xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVFormatReader.cpp (revision c90684742e7361651849be4116d0e5de3a817194)
1 /*
2  * Copyright 2009-2010, Stephan Aßmus <superstippi@gmx.de>
3  * All rights reserved. Distributed under the terms of the GNU L-GPL license.
4  */
5 
6 #include "AVFormatReader.h"
7 
8 #include <stdio.h>
9 #include <string.h>
10 #include <stdlib.h>
11 
12 #include <new>
13 
14 #include <AutoDeleter.h>
15 #include <Autolock.h>
16 #include <ByteOrder.h>
17 #include <DataIO.h>
18 #include <MediaDefs.h>
19 #include <MediaFormats.h>
20 
21 extern "C" {
22 	#include "avcodec.h"
23 	#include "avformat.h"
24 }
25 
26 #include "DemuxerTable.h"
27 #include "gfx_util.h"
28 
29 
30 //#define TRACE_AVFORMAT_READER
31 #ifdef TRACE_AVFORMAT_READER
32 #	define TRACE printf
33 #	define TRACE_IO(a...)
34 #	define TRACE_SEEK(a...) printf(a)
35 #	define TRACE_FIND(a...)
36 #	define TRACE_PACKET(a...)
37 #else
38 #	define TRACE(a...)
39 #	define TRACE_IO(a...)
40 #	define TRACE_SEEK(a...)
41 #	define TRACE_FIND(a...)
42 #	define TRACE_PACKET(a...)
43 #endif
44 
45 #define ERROR(a...) fprintf(stderr, a)
46 
47 
48 static const int64 kNoPTSValue = 0x8000000000000000LL;
49 	// NOTE: For some reasons, I have trouble with the avcodec.h define:
50 	// #define AV_NOPTS_VALUE          INT64_C(0x8000000000000000)
51 	// INT64_C is not defined here.
52 
53 
54 static uint32
55 avformat_to_beos_format(SampleFormat format)
56 {
57 	switch (format) {
58 		case SAMPLE_FMT_U8: return media_raw_audio_format::B_AUDIO_UCHAR;
59 		case SAMPLE_FMT_S16: return media_raw_audio_format::B_AUDIO_SHORT;
60 		case SAMPLE_FMT_S32: return media_raw_audio_format::B_AUDIO_INT;
61 		case SAMPLE_FMT_FLT: return media_raw_audio_format::B_AUDIO_FLOAT;
62 		case SAMPLE_FMT_DBL: return media_raw_audio_format::B_AUDIO_DOUBLE;
63 		default:
64 			break;
65 	}
66 	return 0;
67 }
68 
69 
70 static uint32
71 avformat_to_beos_byte_order(SampleFormat format)
72 {
73 	// TODO: Huh?
74 	return B_MEDIA_HOST_ENDIAN;
75 }
76 
77 
78 static void
79 avmetadata_to_message(AVMetadata* metaData, BMessage* message)
80 {
81 	if (metaData == NULL)
82 		return;
83 
84 	AVMetadataTag* tag = NULL;
85 	while ((tag = av_metadata_get(metaData, "", tag,
86 		AV_METADATA_IGNORE_SUFFIX))) {
87 		// TODO: Make sure we eventually follow a defined convention for
88 		// the names of meta-data keys.
89 		message->AddString(tag->key, tag->value);
90 	}
91 }
92 
93 
94 // #pragma mark - StreamBase
95 
96 
97 class StreamBase {
98 public:
99 								StreamBase(BPositionIO* source,
100 									BLocker* sourceLock, BLocker* streamLock);
101 	virtual						~StreamBase();
102 
103 	// Init an indivual AVFormatContext
104 			status_t			Open();
105 
106 	// Setup this stream to point to the AVStream at the given streamIndex.
107 	virtual	status_t			Init(int32 streamIndex);
108 
109 	inline	const AVFormatContext* Context() const
110 									{ return fContext; }
111 			int32				Index() const;
112 			int32				CountStreams() const;
113 			int32				StreamIndexFor(int32 virtualIndex) const;
114 	inline	int32				VirtualIndex() const
115 									{ return fVirtualIndex; }
116 
117 			double				FrameRate() const;
118 			bigtime_t			Duration() const;
119 
120 	virtual	status_t			Seek(uint32 flags, int64* frame,
121 									bigtime_t* time);
122 
123 			status_t			GetNextChunk(const void** chunkBuffer,
124 									size_t* chunkSize,
125 									media_header* mediaHeader);
126 
127 protected:
128 	// I/O hooks for libavformat, cookie will be a Stream instance.
129 	// Since multiple StreamCookies use the same BPositionIO source, they
130 	// maintain the position individually, and may need to seek the source
131 	// if it does not match anymore in _Read().
132 	// TODO: This concept prevents the use of a plain BDataIO that is not
133 	// seekable. There is a version of AVFormatReader in the SVN history
134 	// which implements packet buffering for other streams when reading
135 	// packets. To support non-seekable network streams for example, this
136 	// code should be resurrected. It will make handling seekable streams,
137 	// especially from different threads that read from totally independent
138 	// positions in the stream (aggressive pre-buffering perhaps), a lot
139 	// more difficult with potentially large memory overhead.
140 	static	int					_Read(void* cookie, uint8* buffer,
141 									int bufferSize);
142 	static	off_t				_Seek(void* cookie, off_t offset, int whence);
143 
144 			status_t			_NextPacket(bool reuse);
145 
146 			int64_t				_ConvertToStreamTimeBase(bigtime_t time) const;
147 			bigtime_t			_ConvertFromStreamTimeBase(int64_t time) const;
148 
149 protected:
150 			BPositionIO*		fSource;
151 			off_t				fPosition;
152 			// Since different threads may read from the source,
153 			// we need to protect the file position and I/O by a lock.
154 			BLocker*			fSourceLock;
155 
156 			BLocker*			fStreamLock;
157 
158 			AVFormatContext*	fContext;
159 			AVStream*			fStream;
160 			int32				fVirtualIndex;
161 
162 			media_format		fFormat;
163 
164 			ByteIOContext		fIOContext;
165 
166 			AVPacket			fPacket;
167 			bool				fReusePacket;
168 
169 			bool				fSeekByBytes;
170 			bool				fStreamBuildsIndexWhileReading;
171 };
172 
173 
174 StreamBase::StreamBase(BPositionIO* source, BLocker* sourceLock,
175 		BLocker* streamLock)
176 	:
177 	fSource(source),
178 	fPosition(0),
179 	fSourceLock(sourceLock),
180 
181 	fStreamLock(streamLock),
182 
183 	fContext(NULL),
184 	fStream(NULL),
185 	fVirtualIndex(-1),
186 
187 	fReusePacket(false),
188 
189 	fSeekByBytes(false),
190 	fStreamBuildsIndexWhileReading(false)
191 {
192 	// NOTE: Don't use streamLock here, it may not yet be initialized!
193 	av_new_packet(&fPacket, 0);
194 	memset(&fFormat, 0, sizeof(media_format));
195 }
196 
197 
198 StreamBase::~StreamBase()
199 {
200 	av_free(fIOContext.buffer);
201 	av_free_packet(&fPacket);
202 	av_free(fContext);
203 }
204 
205 
206 status_t
207 StreamBase::Open()
208 {
209 	BAutolock _(fStreamLock);
210 
211 	// Init probing data
212 	size_t bufferSize = 32768;
213 	uint8* buffer = static_cast<uint8*>(av_malloc(bufferSize));
214 	if (buffer == NULL)
215 		return B_NO_MEMORY;
216 
217 	size_t probeSize = 2048;
218 	AVProbeData probeData;
219 	probeData.filename = "";
220 	probeData.buf = buffer;
221 	probeData.buf_size = probeSize;
222 
223 	// Read a bit of the input...
224 	// NOTE: Even if other streams have already read from the source,
225 	// it is ok to not seek first, since our fPosition is 0, so the necessary
226 	// seek will happen automatically in _Read().
227 	if (_Read(this, buffer, probeSize) != (ssize_t)probeSize) {
228 		av_free(buffer);
229 		return B_IO_ERROR;
230 	}
231 	// ...and seek back to the beginning of the file. This is important
232 	// since libavformat will assume the stream to be at offset 0, the
233 	// probe data is not reused.
234 	_Seek(this, 0, SEEK_SET);
235 
236 	// Probe the input format
237 	AVInputFormat* inputFormat = av_probe_input_format(&probeData, 1);
238 
239 	if (inputFormat == NULL) {
240 		TRACE("StreamBase::Open() - av_probe_input_format() failed!\n");
241 		av_free(buffer);
242 		return B_NOT_SUPPORTED;
243 	}
244 
245 	TRACE("StreamBase::Open() - "
246 		"av_probe_input_format(): %s\n", inputFormat->name);
247 	TRACE("  flags:%s%s%s%s%s\n",
248 		(inputFormat->flags & AVFMT_GLOBALHEADER) ? " AVFMT_GLOBALHEADER" : "",
249 		(inputFormat->flags & AVFMT_NOTIMESTAMPS) ? " AVFMT_NOTIMESTAMPS" : "",
250 		(inputFormat->flags & AVFMT_GENERIC_INDEX) ? " AVFMT_GENERIC_INDEX" : "",
251 		(inputFormat->flags & AVFMT_TS_DISCONT) ? " AVFMT_TS_DISCONT" : "",
252 		(inputFormat->flags & AVFMT_VARIABLE_FPS) ? " AVFMT_VARIABLE_FPS" : ""
253 	);
254 
255 	// Init I/O context with buffer and hook functions, pass ourself as
256 	// cookie.
257 	memset(buffer, 0, bufferSize);
258 	if (init_put_byte(&fIOContext, buffer, bufferSize, 0, this,
259 			_Read, 0, _Seek) != 0) {
260 		TRACE("StreamBase::Open() - init_put_byte() failed!\n");
261 		return B_ERROR;
262 	}
263 
264 	// Initialize our context.
265 	if (av_open_input_stream(&fContext, &fIOContext, "", inputFormat,
266 			NULL) < 0) {
267 		TRACE("StreamBase::Open() - av_open_input_stream() failed!\n");
268 		return B_NOT_SUPPORTED;
269 	}
270 
271 	// Retrieve stream information
272 	if (av_find_stream_info(fContext) < 0) {
273 		TRACE("StreamBase::Open() - av_find_stream_info() failed!\n");
274 		return B_NOT_SUPPORTED;
275 	}
276 
277 	fSeekByBytes = (inputFormat->flags & AVFMT_TS_DISCONT) != 0;
278 	fStreamBuildsIndexWhileReading
279 		= (inputFormat->flags & AVFMT_GENERIC_INDEX) != 0
280 			|| fSeekByBytes;
281 
282 	TRACE("StreamBase::Open() - "
283 		"av_find_stream_info() success! Seeking by bytes: %d\n",
284 		fSeekByBytes);
285 
286 	return B_OK;
287 }
288 
289 
290 status_t
291 StreamBase::Init(int32 virtualIndex)
292 {
293 	BAutolock _(fStreamLock);
294 
295 	TRACE("StreamBase::Init(%ld)\n", virtualIndex);
296 
297 	if (fContext == NULL)
298 		return B_NO_INIT;
299 
300 	int32 streamIndex = StreamIndexFor(virtualIndex);
301 	if (streamIndex < 0) {
302 		TRACE("  bad stream index!\n");
303 		return B_BAD_INDEX;
304 	}
305 
306 	TRACE("  context stream index: %ld\n", streamIndex);
307 
308 	// We need to remember the virtual index so that
309 	// AVFormatReader::FreeCookie() can clear the correct stream entry.
310 	fVirtualIndex = virtualIndex;
311 
312 	// Make us point to the AVStream at streamIndex
313 	fStream = fContext->streams[streamIndex];
314 
315 // NOTE: Discarding other streams works for most, but not all containers,
316 // for example it does not work for the ASF demuxer. Since I don't know what
317 // other demuxer it breaks, let's just keep reading packets for unwanted
318 // streams, it just makes the _GetNextPacket() function slightly less
319 // efficient.
320 //	// Discard all other streams
321 //	for (unsigned i = 0; i < fContext->nb_streams; i++) {
322 //		if (i != (unsigned)streamIndex)
323 //			fContext->streams[i]->discard = AVDISCARD_ALL;
324 //	}
325 
326 	return B_OK;
327 }
328 
329 
330 int32
331 StreamBase::Index() const
332 {
333 	if (fStream != NULL)
334 		return fStream->index;
335 	return -1;
336 }
337 
338 
339 int32
340 StreamBase::CountStreams() const
341 {
342 	// Figure out the stream count. If the context has "AVPrograms", use
343 	// the first program (for now).
344 	// TODO: To support "programs" properly, the BMediaFile/Track API should
345 	// be extended accordingly. I guess programs are like TV channels in the
346 	// same satilite transport stream. Maybe call them "TrackGroups".
347 	if (fContext->nb_programs > 0) {
348 		// See libavformat/utils.c:dump_format()
349 		return fContext->programs[0]->nb_stream_indexes;
350 	}
351 	return fContext->nb_streams;
352 }
353 
354 
355 int32
356 StreamBase::StreamIndexFor(int32 virtualIndex) const
357 {
358 	// NOTE: See CountStreams()
359 	if (fContext->nb_programs > 0) {
360 		const AVProgram* program = fContext->programs[0];
361 		if (virtualIndex >= 0
362 			&& virtualIndex < (int32)program->nb_stream_indexes) {
363 			return program->stream_index[virtualIndex];
364 		}
365 	} else {
366 		if (virtualIndex >= 0 && virtualIndex < (int32)fContext->nb_streams)
367 			return virtualIndex;
368 	}
369 	return -1;
370 }
371 
372 
373 double
374 StreamBase::FrameRate() const
375 {
376 	// TODO: Find a way to always calculate a correct frame rate...
377 	double frameRate = 1.0;
378 	switch (fStream->codec->codec_type) {
379 		case CODEC_TYPE_AUDIO:
380 			frameRate = (double)fStream->codec->sample_rate;
381 			break;
382 		case CODEC_TYPE_VIDEO:
383 			if (fStream->avg_frame_rate.den && fStream->avg_frame_rate.num)
384 				frameRate = av_q2d(fStream->avg_frame_rate);
385 			else if (fStream->r_frame_rate.den && fStream->r_frame_rate.num)
386 				frameRate = av_q2d(fStream->r_frame_rate);
387 			else if (fStream->time_base.den && fStream->time_base.num)
388 				frameRate = 1 / av_q2d(fStream->time_base);
389 			else if (fStream->codec->time_base.den
390 				&& fStream->codec->time_base.num) {
391 				frameRate = 1 / av_q2d(fStream->codec->time_base);
392 			}
393 
394 			// TODO: Fix up interlaced video for real
395 			if (frameRate == 50.0f)
396 				frameRate = 25.0f;
397 			break;
398 		default:
399 			break;
400 	}
401 	if (frameRate <= 0.0)
402 		frameRate = 1.0;
403 	return frameRate;
404 }
405 
406 
407 bigtime_t
408 StreamBase::Duration() const
409 {
410 	// TODO: This is not working correctly for all stream types...
411 	// It seems that the calculations here are correct, because they work
412 	// for a couple of streams and are in line with the documentation, but
413 	// unfortunately, libavformat itself seems to set the time_base and
414 	// duration wrongly sometimes. :-(
415 	if ((int64)fStream->duration != kNoPTSValue)
416 		return _ConvertFromStreamTimeBase(fStream->duration);
417 	else if ((int64)fContext->duration != kNoPTSValue)
418 		return (bigtime_t)fContext->duration;
419 
420 	return 0;
421 }
422 
423 
424 status_t
425 StreamBase::Seek(uint32 flags, int64* frame, bigtime_t* time)
426 {
427 	BAutolock _(fStreamLock);
428 
429 	if (fContext == NULL || fStream == NULL)
430 		return B_NO_INIT;
431 
432 	TRACE_SEEK("StreamBase::Seek(%ld,%s%s%s%s, %lld, "
433 		"%lld)\n", VirtualIndex(),
434 		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
435 		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
436 		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
437 			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
438 		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
439 			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
440 		*frame, *time);
441 
442 	double frameRate = FrameRate();
443 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
444 		// Seeking is always based on time, initialize it when client seeks
445 		// based on frame.
446 		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);
447 	}
448 
449 	int64_t timeStamp = *time;
450 
451 	int searchFlags = AVSEEK_FLAG_BACKWARD;
452 	if ((flags & B_MEDIA_SEEK_CLOSEST_FORWARD) != 0)
453 		searchFlags = 0;
454 
455 	if (fSeekByBytes) {
456 		searchFlags |= AVSEEK_FLAG_BYTE;
457 
458 		BAutolock _(fSourceLock);
459 		int64_t fileSize;
460 		if (fSource->GetSize(&fileSize) != B_OK)
461 			return B_NOT_SUPPORTED;
462 		int64_t duration = Duration();
463 		if (duration == 0)
464 			return B_NOT_SUPPORTED;
465 
466 		timeStamp = int64_t(fileSize * ((double)timeStamp / duration));
467 		if ((flags & B_MEDIA_SEEK_CLOSEST_BACKWARD) != 0) {
468 			timeStamp -= 65536;
469 			if (timeStamp < 0)
470 				timeStamp = 0;
471 		}
472 
473 		bool seekAgain = true;
474 		bool seekForward = true;
475 		bigtime_t lastFoundTime = -1;
476 		int64_t closestTimeStampBackwards = -1;
477 		while (seekAgain) {
478 			if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp,
479 				INT64_MAX, searchFlags) < 0) {
480 				TRACE("  avformat_seek_file() (by bytes) failed.\n");
481 				return B_ERROR;
482 			}
483 			seekAgain = false;
484 
485 			// Our last packet is toast in any case. Read the next one so we
486 			// know where we really seeked.
487 			fReusePacket = false;
488 			if (_NextPacket(true) == B_OK) {
489 				while (fPacket.pts == kNoPTSValue) {
490 					fReusePacket = false;
491 					if (_NextPacket(true) != B_OK)
492 						return B_ERROR;
493 				}
494 				if (fPacket.pos >= 0)
495 					timeStamp = fPacket.pos;
496 				bigtime_t foundTime
497 					= _ConvertFromStreamTimeBase(fPacket.pts);
498 				if (foundTime != lastFoundTime) {
499 					lastFoundTime = foundTime;
500 					if (foundTime > *time) {
501 						if (closestTimeStampBackwards >= 0) {
502 							timeStamp = closestTimeStampBackwards;
503 							seekAgain = true;
504 							seekForward = false;
505 							continue;
506 						}
507 						int64_t diff = int64_t(fileSize
508 							* ((double)(foundTime - *time) / (2 * duration)));
509 						if (diff < 8192)
510 							break;
511 						timeStamp -= diff;
512 						TRACE_SEEK("  need to seek back (%lld) (time: %.2f "
513 							"-> %.2f)\n", timeStamp, *time / 1000000.0,
514 							foundTime / 1000000.0);
515 						if (timeStamp < 0)
516 							foundTime = 0;
517 						else {
518 							seekAgain = true;
519 							continue;
520 						}
521 					} else if (seekForward && foundTime < *time - 100000) {
522 						closestTimeStampBackwards = timeStamp;
523 						int64_t diff = int64_t(fileSize
524 							* ((double)(*time - foundTime) / (2 * duration)));
525 						if (diff < 8192)
526 							break;
527 						timeStamp += diff;
528 						TRACE_SEEK("  need to seek forward (%lld) (time: "
529 							"%.2f -> %.2f)\n", timeStamp, *time / 1000000.0,
530 							foundTime / 1000000.0);
531 						if (timeStamp > duration)
532 							foundTime = duration;
533 						else {
534 							seekAgain = true;
535 							continue;
536 						}
537 					}
538 				}
539 				TRACE_SEEK("  found time: %lld -> %lld (%.2f)\n", *time,
540 					foundTime, foundTime / 1000000.0);
541 				*time = foundTime;
542 				if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
543 					*frame = *time * frameRate / 1000000LL + 0.5;
544 					TRACE_SEEK("  seeked frame: %lld\n", *frame);
545 				}
546 			} else {
547 				TRACE_SEEK("  _NextPacket() failed!\n");
548 				return B_ERROR;
549 			}
550 		}
551 	} else {
552 		// We may not get a PTS from the next packet after seeking, so
553 		// we try to get an expected time from the index.
554 		int64_t streamTimeStamp = _ConvertToStreamTimeBase(*time);
555 		int index = av_index_search_timestamp(fStream, streamTimeStamp,
556 			searchFlags);
557 		if (index < 0) {
558 			TRACE("  av_index_search_timestamp() failed\n");
559 		} else {
560 			if (index > 0) {
561 				const AVIndexEntry& entry = fStream->index_entries[index];
562 				streamTimeStamp = entry.timestamp;
563 			} else {
564 				// Some demuxers use the first index entry to store some
565 				// other information, like the total playing time for example.
566 				// Assume the timeStamp of the first entry is alays 0.
567 				// TODO: Handle start-time offset?
568 				streamTimeStamp = 0;
569 			}
570 			bigtime_t foundTime = _ConvertFromStreamTimeBase(streamTimeStamp);
571 			bigtime_t timeDiff = foundTime > *time
572 				? foundTime - *time : *time - foundTime;
573 
574 			if (timeDiff > 1000000
575 				&& (fStreamBuildsIndexWhileReading
576 					|| index == fStream->nb_index_entries - 1)) {
577 				// If the stream is building the index on the fly while parsing
578 				// it, we only have entries in the index for positions already
579 				// decoded, i.e. we cannot seek into the future. In that case,
580 				// just assume that we can seek where we want and leave
581 				// time/frame unmodified. Since successfully seeking one time
582 				// will generate index entries for the seeked to position, we
583 				// need to remember this in fStreamBuildsIndexWhileReading,
584 				// since when seeking back there will be later index entries,
585 				// but we still want to ignore the found entry.
586 				fStreamBuildsIndexWhileReading = true;
587 				TRACE_SEEK("  Not trusting generic index entry. "
588 					"(Current count: %d)\n", fStream->nb_index_entries);
589 			} else {
590 				// If we found a reasonably time, write it into *time.
591 				// After seeking, we will try to read the sought time from
592 				// the next packet. If the packet has no PTS value, we may
593 				// still have a more accurate time from the index lookup.
594 				*time = foundTime;
595 			}
596 		}
597 
598 		if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp, INT64_MAX,
599 				searchFlags) < 0) {
600 			TRACE("  avformat_seek_file() failed.\n");
601 			// Try to fall back to av_seek_frame()
602 			timeStamp = _ConvertToStreamTimeBase(timeStamp);
603 			if (av_seek_frame(fContext, fStream->index, timeStamp,
604 				searchFlags) < 0) {
605 				TRACE("  avformat_seek_frame() failed as well.\n");
606 				// Fall back to seeking to the beginning by bytes
607 				timeStamp = 0;
608 				if (av_seek_frame(fContext, fStream->index, timeStamp,
609 						AVSEEK_FLAG_BYTE) < 0) {
610 					TRACE("  avformat_seek_frame() by bytes failed as "
611 						"well.\n");
612 					// Do not propagate error in any case. We fail if we can't
613 					// read another packet.
614 				} else
615 					*time = 0;
616 			}
617 		}
618 
619 		// Our last packet is toast in any case. Read the next one so
620 		// we know where we really sought.
621 		bigtime_t foundTime = *time;
622 
623 		fReusePacket = false;
624 		if (_NextPacket(true) == B_OK) {
625 			if (fPacket.pts != kNoPTSValue)
626 				foundTime = _ConvertFromStreamTimeBase(fPacket.pts);
627 			else
628 				TRACE_SEEK("  no PTS in packet after seeking\n");
629 		} else
630 			TRACE_SEEK("  _NextPacket() failed!\n");
631 
632 		*time = foundTime;
633 		TRACE_SEEK("  sought time: %.2fs\n", *time / 1000000.0);
634 		if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
635 			*frame = *time * frameRate / 1000000.0 + 0.5;
636 			TRACE_SEEK("  sought frame: %lld\n", *frame);
637 		}
638 	}
639 
640 	return B_OK;
641 }
642 
643 
644 status_t
645 StreamBase::GetNextChunk(const void** chunkBuffer,
646 	size_t* chunkSize, media_header* mediaHeader)
647 {
648 	BAutolock _(fStreamLock);
649 
650 	TRACE_PACKET("StreamBase::GetNextChunk()\n");
651 
652 	// Get the last stream DTS before reading the next packet, since
653 	// then it points to that one.
654 	int64 lastStreamDTS = fStream->cur_dts;
655 
656 	status_t ret = _NextPacket(false);
657 	if (ret != B_OK) {
658 		*chunkBuffer = NULL;
659 		*chunkSize = 0;
660 		return ret;
661 	}
662 
663 	// NOTE: AVPacket has a field called "convergence_duration", for which
664 	// the documentation is quite interesting. It sounds like it could be
665 	// used to know the time until the next I-Frame in streams that don't
666 	// let you know the position of keyframes in another way (like through
667 	// the index).
668 
669 	// According to libavformat documentation, fPacket is valid until the
670 	// next call to av_read_frame(). This is what we want and we can share
671 	// the memory with the least overhead.
672 	*chunkBuffer = fPacket.data;
673 	*chunkSize = fPacket.size;
674 
675 	if (mediaHeader != NULL) {
676 		mediaHeader->type = fFormat.type;
677 		mediaHeader->buffer = 0;
678 		mediaHeader->destination = -1;
679 		mediaHeader->time_source = -1;
680 		mediaHeader->size_used = fPacket.size;
681 		if (fPacket.pts != kNoPTSValue) {
682 //TRACE("  PTS: %lld (time_base.num: %d, .den: %d), stream DTS: %lld\n",
683 //fPacket.pts, fStream->time_base.num, fStream->time_base.den,
684 //fStream->cur_dts);
685 			mediaHeader->start_time = _ConvertFromStreamTimeBase(fPacket.pts);
686 		} else {
687 //TRACE("  PTS (stream): %lld (time_base.num: %d, .den: %d), stream DTS: %lld\n",
688 //lastStreamDTS, fStream->time_base.num, fStream->time_base.den,
689 //fStream->cur_dts);
690 			mediaHeader->start_time
691 				= _ConvertFromStreamTimeBase(lastStreamDTS);
692 		}
693 		mediaHeader->file_pos = fPacket.pos;
694 		mediaHeader->data_offset = 0;
695 		switch (mediaHeader->type) {
696 			case B_MEDIA_RAW_AUDIO:
697 				break;
698 			case B_MEDIA_ENCODED_AUDIO:
699 				mediaHeader->u.encoded_audio.buffer_flags
700 					= (fPacket.flags & PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
701 				break;
702 			case B_MEDIA_RAW_VIDEO:
703 				mediaHeader->u.raw_video.line_count
704 					= fFormat.u.raw_video.display.line_count;
705 				break;
706 			case B_MEDIA_ENCODED_VIDEO:
707 				mediaHeader->u.encoded_video.field_flags
708 					= (fPacket.flags & PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
709 				mediaHeader->u.encoded_video.line_count
710 					= fFormat.u.encoded_video.output.display.line_count;
711 				break;
712 			default:
713 				break;
714 		}
715 	}
716 
717 //	static bigtime_t pts[2];
718 //	static bigtime_t lastPrintTime = system_time();
719 //	static BLocker printLock;
720 //	if (fStream->index < 2) {
721 //		if (fPacket.pts != kNoPTSValue)
722 //			pts[fStream->index] = _ConvertFromStreamTimeBase(fPacket.pts);
723 //		printLock.Lock();
724 //		bigtime_t now = system_time();
725 //		if (now - lastPrintTime > 1000000) {
726 //			printf("PTS: %.4f/%.4f, diff: %.4f\r", pts[0] / 1000000.0,
727 //				pts[1] / 1000000.0, (pts[0] - pts[1]) / 1000000.0);
728 //			fflush(stdout);
729 //			lastPrintTime = now;
730 //		}
731 //		printLock.Unlock();
732 //	}
733 
734 	return B_OK;
735 }
736 
737 
738 // #pragma mark -
739 
740 
741 /*static*/ int
742 StreamBase::_Read(void* cookie, uint8* buffer, int bufferSize)
743 {
744 	StreamBase* stream = reinterpret_cast<StreamBase*>(cookie);
745 
746 	BAutolock _(stream->fSourceLock);
747 
748 	TRACE_IO("StreamBase::_Read(%p, %p, %d) position: %lld/%lld\n",
749 		cookie, buffer, bufferSize, stream->fPosition,
750 		stream->fSource->Position());
751 
752 	if (stream->fPosition != stream->fSource->Position()) {
753 		off_t position
754 			= stream->fSource->Seek(stream->fPosition, SEEK_SET);
755 		if (position != stream->fPosition)
756 			return -1;
757 	}
758 
759 	ssize_t read = stream->fSource->Read(buffer, bufferSize);
760 	if (read > 0)
761 		stream->fPosition += read;
762 
763 	TRACE_IO("  read: %ld\n", read);
764 	return (int)read;
765 
766 }
767 
768 
769 /*static*/ off_t
770 StreamBase::_Seek(void* cookie, off_t offset, int whence)
771 {
772 	TRACE_IO("StreamBase::_Seek(%p, %lld, %d)\n",
773 		cookie, offset, whence);
774 
775 	StreamBase* stream = reinterpret_cast<StreamBase*>(cookie);
776 
777 	BAutolock _(stream->fSourceLock);
778 
779 	// Support for special file size retrieval API without seeking
780 	// anywhere:
781 	if (whence == AVSEEK_SIZE) {
782 		off_t size;
783 		if (stream->fSource->GetSize(&size) == B_OK)
784 			return size;
785 		return -1;
786 	}
787 
788 	// If not requested to seek to an absolute position, we need to
789 	// confirm that the stream is currently at the position that we
790 	// think it is.
791 	if (whence != SEEK_SET
792 		&& stream->fPosition != stream->fSource->Position()) {
793 		off_t position
794 			= stream->fSource->Seek(stream->fPosition, SEEK_SET);
795 		if (position != stream->fPosition)
796 			return -1;
797 	}
798 
799 	off_t position = stream->fSource->Seek(offset, whence);
800 	TRACE_IO("  position: %lld\n", position);
801 	if (position < 0)
802 		return -1;
803 
804 	stream->fPosition = position;
805 
806 	return position;
807 }
808 
809 
810 status_t
811 StreamBase::_NextPacket(bool reuse)
812 {
813 	TRACE_PACKET("StreamBase::_NextPacket(%d)\n", reuse);
814 
815 	if (fReusePacket) {
816 		// The last packet was marked for reuse, so we keep using it.
817 		TRACE_PACKET("  re-using last packet\n");
818 		fReusePacket = reuse;
819 		return B_OK;
820 	}
821 
822 	av_free_packet(&fPacket);
823 
824 	while (true) {
825 		if (av_read_frame(fContext, &fPacket) < 0) {
826 			// NOTE: Even though we may get the error for a different stream,
827 			// av_read_frame() is not going to be successful from here on, so
828 			// it doesn't matter
829 			fReusePacket = false;
830 			return B_LAST_BUFFER_ERROR;
831 		}
832 
833 		if (fPacket.stream_index == Index())
834 			break;
835 
836 		// This is a packet from another stream, ignore it.
837 		av_free_packet(&fPacket);
838 	}
839 
840 	// Mark this packet with the new reuse flag.
841 	fReusePacket = reuse;
842 	return B_OK;
843 }
844 
845 
846 int64_t
847 StreamBase::_ConvertToStreamTimeBase(bigtime_t time) const
848 {
849 	int64 timeStamp = int64_t((double)time * fStream->time_base.den
850 		/ (1000000.0 * fStream->time_base.num) + 0.5);
851 	if (fStream->start_time != kNoPTSValue)
852 		timeStamp += fStream->start_time;
853 	return timeStamp;
854 }
855 
856 
857 bigtime_t
858 StreamBase::_ConvertFromStreamTimeBase(int64_t time) const
859 {
860 	if (fStream->start_time != kNoPTSValue)
861 		time -= fStream->start_time;
862 
863 	return bigtime_t(1000000.0 * time * fStream->time_base.num
864 		/ fStream->time_base.den + 0.5);
865 }
866 
867 
868 // #pragma mark - AVFormatReader::Stream
869 
870 
871 class AVFormatReader::Stream : public StreamBase {
872 public:
873 								Stream(BPositionIO* source,
874 									BLocker* streamLock);
875 	virtual						~Stream();
876 
877 	// Setup this stream to point to the AVStream at the given streamIndex.
878 	// This will also initialize the media_format.
879 	virtual	status_t			Init(int32 streamIndex);
880 
881 			status_t			GetMetaData(BMessage* data);
882 
883 	// Support for AVFormatReader
884 			status_t			GetStreamInfo(int64* frameCount,
885 									bigtime_t* duration, media_format* format,
886 									const void** infoBuffer,
887 									size_t* infoSize) const;
888 
889 			status_t			FindKeyFrame(uint32 flags, int64* frame,
890 									bigtime_t* time) const;
891 	virtual	status_t			Seek(uint32 flags, int64* frame,
892 									bigtime_t* time);
893 
894 private:
895 	mutable	BLocker				fLock;
896 
897 			struct KeyframeInfo {
898 				bigtime_t		requestedTime;
899 				int64			requestedFrame;
900 				bigtime_t		reportedTime;
901 				int64			reportedFrame;
902 				uint32			seekFlags;
903 			};
904 	mutable	KeyframeInfo		fLastReportedKeyframe;
905 	mutable	StreamBase*			fGhostStream;
906 };
907 
908 
909 
910 AVFormatReader::Stream::Stream(BPositionIO* source, BLocker* streamLock)
911 	:
912 	StreamBase(source, streamLock, &fLock),
913 	fLock("stream lock"),
914 	fGhostStream(NULL)
915 {
916 	fLastReportedKeyframe.requestedTime = 0;
917 	fLastReportedKeyframe.requestedFrame = 0;
918 	fLastReportedKeyframe.reportedTime = 0;
919 	fLastReportedKeyframe.reportedFrame = 0;
920 }
921 
922 
923 AVFormatReader::Stream::~Stream()
924 {
925 	delete fGhostStream;
926 }
927 
928 
929 status_t
930 AVFormatReader::Stream::Init(int32 virtualIndex)
931 {
932 	TRACE("AVFormatReader::Stream::Init(%ld)\n", virtualIndex);
933 
934 	status_t ret = StreamBase::Init(virtualIndex);
935 	if (ret != B_OK)
936 		return ret;
937 
938 	// Get a pointer to the AVCodecContext for the stream at streamIndex.
939 	AVCodecContext* codecContext = fStream->codec;
940 
941 #if 0
942 // stippi: Here I was experimenting with the question if some fields of the
943 // AVCodecContext change (or get filled out at all), if the AVCodec is opened.
944 	class CodecOpener {
945 	public:
946 		CodecOpener(AVCodecContext* context)
947 		{
948 			fCodecContext = context;
949 			AVCodec* codec = avcodec_find_decoder(context->codec_id);
950 			fCodecOpen = avcodec_open(context, codec) >= 0;
951 			if (!fCodecOpen)
952 				TRACE("  failed to open the codec!\n");
953 		}
954 		~CodecOpener()
955 		{
956 			if (fCodecOpen)
957 				avcodec_close(fCodecContext);
958 		}
959 	private:
960 		AVCodecContext*		fCodecContext;
961 		bool				fCodecOpen;
962 	} codecOpener(codecContext);
963 #endif
964 
965 	// initialize the media_format for this stream
966 	media_format* format = &fFormat;
967 	memset(format, 0, sizeof(media_format));
968 
969 	media_format_description description;
970 
971 	// Set format family and type depending on codec_type of the stream.
972 	switch (codecContext->codec_type) {
973 		case AVMEDIA_TYPE_AUDIO:
974 			if ((codecContext->codec_id >= CODEC_ID_PCM_S16LE)
975 				&& (codecContext->codec_id <= CODEC_ID_PCM_U8)) {
976 				TRACE("  raw audio\n");
977 				format->type = B_MEDIA_RAW_AUDIO;
978 				description.family = B_ANY_FORMAT_FAMILY;
979 				// This will then apparently be handled by the (built into
980 				// BMediaTrack) RawDecoder.
981 			} else {
982 				TRACE("  encoded audio\n");
983 				format->type = B_MEDIA_ENCODED_AUDIO;
984 				description.family = B_MISC_FORMAT_FAMILY;
985 				description.u.misc.file_format = 'ffmp';
986 			}
987 			break;
988 		case AVMEDIA_TYPE_VIDEO:
989 			TRACE("  encoded video\n");
990 			format->type = B_MEDIA_ENCODED_VIDEO;
991 			description.family = B_MISC_FORMAT_FAMILY;
992 			description.u.misc.file_format = 'ffmp';
993 			break;
994 		default:
995 			TRACE("  unknown type\n");
996 			format->type = B_MEDIA_UNKNOWN_TYPE;
997 			return B_ERROR;
998 			break;
999 	}
1000 
1001 	if (format->type == B_MEDIA_RAW_AUDIO) {
1002 		// We cannot describe all raw-audio formats, some are unsupported.
1003 		switch (codecContext->codec_id) {
1004 			case CODEC_ID_PCM_S16LE:
1005 				format->u.raw_audio.format
1006 					= media_raw_audio_format::B_AUDIO_SHORT;
1007 				format->u.raw_audio.byte_order
1008 					= B_MEDIA_LITTLE_ENDIAN;
1009 				break;
1010 			case CODEC_ID_PCM_S16BE:
1011 				format->u.raw_audio.format
1012 					= media_raw_audio_format::B_AUDIO_SHORT;
1013 				format->u.raw_audio.byte_order
1014 					= B_MEDIA_BIG_ENDIAN;
1015 				break;
1016 			case CODEC_ID_PCM_U16LE:
1017 //				format->u.raw_audio.format
1018 //					= media_raw_audio_format::B_AUDIO_USHORT;
1019 //				format->u.raw_audio.byte_order
1020 //					= B_MEDIA_LITTLE_ENDIAN;
1021 				return B_NOT_SUPPORTED;
1022 				break;
1023 			case CODEC_ID_PCM_U16BE:
1024 //				format->u.raw_audio.format
1025 //					= media_raw_audio_format::B_AUDIO_USHORT;
1026 //				format->u.raw_audio.byte_order
1027 //					= B_MEDIA_BIG_ENDIAN;
1028 				return B_NOT_SUPPORTED;
1029 				break;
1030 			case CODEC_ID_PCM_S8:
1031 				format->u.raw_audio.format
1032 					= media_raw_audio_format::B_AUDIO_CHAR;
1033 				break;
1034 			case CODEC_ID_PCM_U8:
1035 				format->u.raw_audio.format
1036 					= media_raw_audio_format::B_AUDIO_UCHAR;
1037 				break;
1038 			default:
1039 				return B_NOT_SUPPORTED;
1040 				break;
1041 		}
1042 	} else {
1043 		if (description.family == B_MISC_FORMAT_FAMILY)
1044 			description.u.misc.codec = codecContext->codec_id;
1045 
1046 		BMediaFormats formats;
1047 		status_t status = formats.GetFormatFor(description, format);
1048 		if (status < B_OK)
1049 			TRACE("  formats.GetFormatFor() error: %s\n", strerror(status));
1050 
1051 		format->user_data_type = B_CODEC_TYPE_INFO;
1052 		*(uint32*)format->user_data = codecContext->codec_tag;
1053 		format->user_data[4] = 0;
1054 	}
1055 
1056 	format->require_flags = 0;
1057 	format->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
1058 
1059 	switch (format->type) {
1060 		case B_MEDIA_RAW_AUDIO:
1061 			format->u.raw_audio.frame_rate = (float)codecContext->sample_rate;
1062 			format->u.raw_audio.channel_count = codecContext->channels;
1063 			format->u.raw_audio.channel_mask = codecContext->channel_layout;
1064 			format->u.raw_audio.byte_order
1065 				= avformat_to_beos_byte_order(codecContext->sample_fmt);
1066 			format->u.raw_audio.format
1067 				= avformat_to_beos_format(codecContext->sample_fmt);
1068 			format->u.raw_audio.buffer_size = 0;
1069 
1070 			// Read one packet and mark it for later re-use. (So our first
1071 			// GetNextChunk() call does not read another packet.)
1072 			if (_NextPacket(true) == B_OK) {
1073 				TRACE("  successfully determined audio buffer size: %d\n",
1074 					fPacket.size);
1075 				format->u.raw_audio.buffer_size = fPacket.size;
1076 			}
1077 			break;
1078 
1079 		case B_MEDIA_ENCODED_AUDIO:
1080 			format->u.encoded_audio.bit_rate = codecContext->bit_rate;
1081 			format->u.encoded_audio.frame_size = codecContext->frame_size;
1082 			// Fill in some info about possible output format
1083 			format->u.encoded_audio.output
1084 				= media_multi_audio_format::wildcard;
1085 			format->u.encoded_audio.output.frame_rate
1086 				= (float)codecContext->sample_rate;
1087 			// Channel layout bits match in Be API and FFmpeg.
1088 			format->u.encoded_audio.output.channel_count
1089 				= codecContext->channels;
1090 			format->u.encoded_audio.multi_info.channel_mask
1091 				= codecContext->channel_layout;
1092 			format->u.encoded_audio.output.byte_order
1093 				= avformat_to_beos_byte_order(codecContext->sample_fmt);
1094 			format->u.encoded_audio.output.format
1095 				= avformat_to_beos_format(codecContext->sample_fmt);
1096 			if (codecContext->block_align > 0) {
1097 				format->u.encoded_audio.output.buffer_size
1098 					= codecContext->block_align;
1099 			} else {
1100 				format->u.encoded_audio.output.buffer_size
1101 					= codecContext->frame_size * codecContext->channels
1102 						* (format->u.encoded_audio.output.format
1103 							& media_raw_audio_format::B_AUDIO_SIZE_MASK);
1104 			}
1105 			break;
1106 
1107 		case B_MEDIA_ENCODED_VIDEO:
1108 // TODO: Specifying any of these seems to throw off the format matching
1109 // later on.
1110 //			format->u.encoded_video.avg_bit_rate = codecContext->bit_rate;
1111 //			format->u.encoded_video.max_bit_rate = codecContext->bit_rate
1112 //				+ codecContext->bit_rate_tolerance;
1113 
1114 //			format->u.encoded_video.encoding
1115 //				= media_encoded_video_format::B_ANY;
1116 
1117 //			format->u.encoded_video.frame_size = 1;
1118 //			format->u.encoded_video.forward_history = 0;
1119 //			format->u.encoded_video.backward_history = 0;
1120 
1121 			format->u.encoded_video.output.field_rate = FrameRate();
1122 			format->u.encoded_video.output.interlace = 1;
1123 
1124 			format->u.encoded_video.output.first_active = 0;
1125 			format->u.encoded_video.output.last_active
1126 				= codecContext->height - 1;
1127 				// TODO: Maybe libavformat actually provides that info
1128 				// somewhere...
1129 			format->u.encoded_video.output.orientation
1130 				= B_VIDEO_TOP_LEFT_RIGHT;
1131 
1132 			// Calculate the display aspect ratio
1133 			AVRational displayAspectRatio;
1134 		    if (codecContext->sample_aspect_ratio.num != 0) {
1135 				av_reduce(&displayAspectRatio.num, &displayAspectRatio.den,
1136 					codecContext->width
1137 						* codecContext->sample_aspect_ratio.num,
1138 					codecContext->height
1139 						* codecContext->sample_aspect_ratio.den,
1140 					1024 * 1024);
1141 				TRACE("  pixel aspect ratio: %d/%d, "
1142 					"display aspect ratio: %d/%d\n",
1143 					codecContext->sample_aspect_ratio.num,
1144 					codecContext->sample_aspect_ratio.den,
1145 					displayAspectRatio.num, displayAspectRatio.den);
1146 		    } else {
1147 				av_reduce(&displayAspectRatio.num, &displayAspectRatio.den,
1148 					codecContext->width, codecContext->height, 1024 * 1024);
1149 				TRACE("  no display aspect ratio (%d/%d)\n",
1150 					displayAspectRatio.num, displayAspectRatio.den);
1151 		    }
1152 			format->u.encoded_video.output.pixel_width_aspect
1153 				= displayAspectRatio.num;
1154 			format->u.encoded_video.output.pixel_height_aspect
1155 				= displayAspectRatio.den;
1156 
1157 			format->u.encoded_video.output.display.format
1158 				= pixfmt_to_colorspace(codecContext->pix_fmt);
1159 			format->u.encoded_video.output.display.line_width
1160 				= codecContext->width;
1161 			format->u.encoded_video.output.display.line_count
1162 				= codecContext->height;
1163 			TRACE("  width/height: %d/%d\n", codecContext->width,
1164 				codecContext->height);
1165 			format->u.encoded_video.output.display.bytes_per_row = 0;
1166 			format->u.encoded_video.output.display.pixel_offset = 0;
1167 			format->u.encoded_video.output.display.line_offset = 0;
1168 			format->u.encoded_video.output.display.flags = 0; // TODO
1169 
1170 			break;
1171 
1172 		default:
1173 			// This is an unknown format to us.
1174 			break;
1175 	}
1176 
1177 	// Add the meta data, if any
1178 	if (codecContext->extradata_size > 0) {
1179 		format->SetMetaData(codecContext->extradata,
1180 			codecContext->extradata_size);
1181 		TRACE("  extradata: %p\n", format->MetaData());
1182 	}
1183 
1184 	TRACE("  extradata_size: %d\n", codecContext->extradata_size);
1185 //	TRACE("  intra_matrix: %p\n", codecContext->intra_matrix);
1186 //	TRACE("  inter_matrix: %p\n", codecContext->inter_matrix);
1187 //	TRACE("  get_buffer(): %p\n", codecContext->get_buffer);
1188 //	TRACE("  release_buffer(): %p\n", codecContext->release_buffer);
1189 
1190 #ifdef TRACE_AVFORMAT_READER
1191 	char formatString[512];
1192 	if (string_for_format(*format, formatString, sizeof(formatString)))
1193 		TRACE("  format: %s\n", formatString);
1194 
1195 	uint32 encoding = format->Encoding();
1196 	TRACE("  encoding '%.4s'\n", (char*)&encoding);
1197 #endif
1198 
1199 	return B_OK;
1200 }
1201 
1202 
1203 status_t
1204 AVFormatReader::Stream::GetMetaData(BMessage* data)
1205 {
1206 	BAutolock _(&fLock);
1207 
1208 	avmetadata_to_message(fStream->metadata, data);
1209 
1210 	return B_OK;
1211 }
1212 
1213 
1214 status_t
1215 AVFormatReader::Stream::GetStreamInfo(int64* frameCount,
1216 	bigtime_t* duration, media_format* format, const void** infoBuffer,
1217 	size_t* infoSize) const
1218 {
1219 	BAutolock _(&fLock);
1220 
1221 	TRACE("AVFormatReader::Stream::GetStreamInfo(%ld)\n",
1222 		VirtualIndex());
1223 
1224 	double frameRate = FrameRate();
1225 	TRACE("  frameRate: %.4f\n", frameRate);
1226 
1227 	#ifdef TRACE_AVFORMAT_READER
1228 	if (fStream->start_time != kNoPTSValue) {
1229 		bigtime_t startTime = _ConvertFromStreamTimeBase(fStream->start_time);
1230 		TRACE("  start_time: %lld or %.5fs\n", startTime,
1231 			startTime / 1000000.0);
1232 		// TODO: Handle start time in FindKeyFrame() and Seek()?!
1233 	}
1234 	#endif // TRACE_AVFORMAT_READER
1235 
1236 	*duration = Duration();
1237 
1238 	TRACE("  duration: %lld or %.5fs\n", *duration, *duration / 1000000.0);
1239 
1240 	#if 0
1241 	if (fStream->nb_index_entries > 0) {
1242 		TRACE("  dump of index entries:\n");
1243 		int count = 5;
1244 		int firstEntriesCount = min_c(fStream->nb_index_entries, count);
1245 		int i = 0;
1246 		for (; i < firstEntriesCount; i++) {
1247 			AVIndexEntry& entry = fStream->index_entries[i];
1248 			bigtime_t timeGlobal = entry.timestamp;
1249 			bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
1250 			TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
1251 				timeNative / 1000000.0f, timeGlobal / 1000000.0f);
1252 		}
1253 		if (fStream->nb_index_entries - count > i) {
1254 			i = fStream->nb_index_entries - count;
1255 			TRACE("    ...\n");
1256 			for (; i < fStream->nb_index_entries; i++) {
1257 				AVIndexEntry& entry = fStream->index_entries[i];
1258 				bigtime_t timeGlobal = entry.timestamp;
1259 				bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
1260 				TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
1261 					timeNative / 1000000.0f, timeGlobal / 1000000.0f);
1262 			}
1263 		}
1264 	}
1265 	#endif
1266 
1267 	*frameCount = fStream->nb_frames;
1268 //	if (*frameCount == 0) {
1269 		// Calculate from duration and frame rate
1270 		*frameCount = (int64)(*duration * frameRate / 1000000LL);
1271 		TRACE("  frameCount calculated: %lld, from context: %lld\n",
1272 			*frameCount, fStream->nb_frames);
1273 //	} else
1274 //		TRACE("  frameCount: %lld\n", *frameCount);
1275 
1276 	*format = fFormat;
1277 
1278 	*infoBuffer = fStream->codec->extradata;
1279 	*infoSize = fStream->codec->extradata_size;
1280 
1281 	return B_OK;
1282 }
1283 
1284 
1285 status_t
1286 AVFormatReader::Stream::FindKeyFrame(uint32 flags, int64* frame,
1287 	bigtime_t* time) const
1288 {
1289 	BAutolock _(&fLock);
1290 
1291 	if (fContext == NULL || fStream == NULL)
1292 		return B_NO_INIT;
1293 
1294 	TRACE_FIND("AVFormatReader::Stream::FindKeyFrame(%ld,%s%s%s%s, "
1295 		"%lld, %lld)\n", VirtualIndex(),
1296 		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
1297 		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
1298 		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
1299 			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
1300 		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
1301 			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
1302 		*frame, *time);
1303 
1304 	bool inLastRequestedRange = false;
1305 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1306 		if (fLastReportedKeyframe.reportedFrame
1307 			<= fLastReportedKeyframe.requestedFrame) {
1308 			inLastRequestedRange
1309 				= *frame >= fLastReportedKeyframe.reportedFrame
1310 					&& *frame <= fLastReportedKeyframe.requestedFrame;
1311 		} else {
1312 			inLastRequestedRange
1313 				= *frame >= fLastReportedKeyframe.requestedFrame
1314 					&& *frame <= fLastReportedKeyframe.reportedFrame;
1315 		}
1316 	} else if ((flags & B_MEDIA_SEEK_TO_FRAME) == 0) {
1317 		if (fLastReportedKeyframe.reportedTime
1318 			<= fLastReportedKeyframe.requestedTime) {
1319 			inLastRequestedRange
1320 				= *time >= fLastReportedKeyframe.reportedTime
1321 					&& *time <= fLastReportedKeyframe.requestedTime;
1322 		} else {
1323 			inLastRequestedRange
1324 				= *time >= fLastReportedKeyframe.requestedTime
1325 					&& *time <= fLastReportedKeyframe.reportedTime;
1326 		}
1327 	}
1328 
1329 	if (inLastRequestedRange) {
1330 		*frame = fLastReportedKeyframe.reportedFrame;
1331 		*time = fLastReportedKeyframe.reportedTime;
1332 		TRACE_FIND("  same as last reported keyframe\n");
1333 		return B_OK;
1334 	}
1335 
1336 	double frameRate = FrameRate();
1337 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0)
1338 		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);
1339 
1340 	status_t ret;
1341 	if (fGhostStream == NULL) {
1342 		BAutolock _(fSourceLock);
1343 
1344 		fGhostStream = new(std::nothrow) StreamBase(fSource, fSourceLock,
1345 			&fLock);
1346 		if (fGhostStream == NULL) {
1347 			TRACE("  failed to allocate ghost stream\n");
1348 			return B_NO_MEMORY;
1349 		}
1350 
1351 		ret = fGhostStream->Open();
1352 		if (ret != B_OK) {
1353 			TRACE("  ghost stream failed to open: %s\n", strerror(ret));
1354 			return B_ERROR;
1355 		}
1356 
1357 		ret = fGhostStream->Init(fVirtualIndex);
1358 		if (ret != B_OK) {
1359 			TRACE("  ghost stream failed to init: %s\n", strerror(ret));
1360 			return B_ERROR;
1361 		}
1362 	}
1363 	fLastReportedKeyframe.requestedFrame = *frame;
1364 	fLastReportedKeyframe.requestedTime = *time;
1365 	fLastReportedKeyframe.seekFlags = flags;
1366 
1367 	ret = fGhostStream->Seek(flags, frame, time);
1368 	if (ret != B_OK) {
1369 		TRACE("  ghost stream failed to seek: %s\n", strerror(ret));
1370 		return B_ERROR;
1371 	}
1372 
1373 	fLastReportedKeyframe.reportedFrame = *frame;
1374 	fLastReportedKeyframe.reportedTime = *time;
1375 
1376 	TRACE_FIND("  found time: %.2fs\n", *time / 1000000.0);
1377 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1378 		*frame = int64_t(*time * FrameRate() / 1000000.0 + 0.5);
1379 		TRACE_FIND("  found frame: %lld\n", *frame);
1380 	}
1381 
1382 	return B_OK;
1383 }
1384 
1385 
1386 status_t
1387 AVFormatReader::Stream::Seek(uint32 flags, int64* frame, bigtime_t* time)
1388 {
1389 	BAutolock _(&fLock);
1390 
1391 	if (fContext == NULL || fStream == NULL)
1392 		return B_NO_INIT;
1393 
1394 	// Put the old requested values into frame/time, since we already know
1395 	// that the sought frame/time will then match the reported values.
1396 	// TODO: Will not work if client changes seek flags (from backwards to
1397 	// forward or vice versa)!!
1398 	bool inLastRequestedRange = false;
1399 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1400 		if (fLastReportedKeyframe.reportedFrame
1401 			<= fLastReportedKeyframe.requestedFrame) {
1402 			inLastRequestedRange
1403 				= *frame >= fLastReportedKeyframe.reportedFrame
1404 					&& *frame <= fLastReportedKeyframe.requestedFrame;
1405 		} else {
1406 			inLastRequestedRange
1407 				= *frame >= fLastReportedKeyframe.requestedFrame
1408 					&& *frame <= fLastReportedKeyframe.reportedFrame;
1409 		}
1410 	} else if ((flags & B_MEDIA_SEEK_TO_FRAME) == 0) {
1411 		if (fLastReportedKeyframe.reportedTime
1412 			<= fLastReportedKeyframe.requestedTime) {
1413 			inLastRequestedRange
1414 				= *time >= fLastReportedKeyframe.reportedTime
1415 					&& *time <= fLastReportedKeyframe.requestedTime;
1416 		} else {
1417 			inLastRequestedRange
1418 				= *time >= fLastReportedKeyframe.requestedTime
1419 					&& *time <= fLastReportedKeyframe.reportedTime;
1420 		}
1421 	}
1422 
1423 	if (inLastRequestedRange) {
1424 		*frame = fLastReportedKeyframe.requestedFrame;
1425 		*time = fLastReportedKeyframe.requestedTime;
1426 		flags = fLastReportedKeyframe.seekFlags;
1427 	}
1428 
1429 	return StreamBase::Seek(flags, frame, time);
1430 }
1431 
1432 
1433 // #pragma mark - AVFormatReader
1434 
1435 
1436 AVFormatReader::AVFormatReader()
1437 	:
1438 	fStreams(NULL),
1439 	fSourceLock("source I/O lock")
1440 {
1441 	TRACE("AVFormatReader::AVFormatReader\n");
1442 }
1443 
1444 
1445 AVFormatReader::~AVFormatReader()
1446 {
1447 	TRACE("AVFormatReader::~AVFormatReader\n");
1448 	if (fStreams != NULL) {
1449 		// The client was supposed to call FreeCookie() on all
1450 		// allocated streams. Deleting the first stream is always
1451 		// prevented, we delete the other ones just in case.
1452 		int32 count = fStreams[0]->CountStreams();
1453 		for (int32 i = 0; i < count; i++)
1454 			delete fStreams[i];
1455 		delete[] fStreams;
1456 	}
1457 }
1458 
1459 
1460 // #pragma mark -
1461 
1462 
1463 const char*
1464 AVFormatReader::Copyright()
1465 {
1466 // TODO: Could not find the equivalent in libavformat >= version 53.
1467 // Use metadata API instead!
1468 //	if (fStreams != NULL && fStreams[0] != NULL)
1469 //		return fStreams[0]->Context()->copyright;
1470 	// TODO: Return copyright of the file instead!
1471 	return "Copyright 2009, Stephan Aßmus";
1472 }
1473 
1474 
1475 status_t
1476 AVFormatReader::Sniff(int32* _streamCount)
1477 {
1478 	TRACE("AVFormatReader::Sniff\n");
1479 
1480 	BPositionIO* source = dynamic_cast<BPositionIO*>(Source());
1481 	if (source == NULL) {
1482 		TRACE("  not a BPositionIO, but we need it to be one.\n");
1483 		return B_NOT_SUPPORTED;
1484 	}
1485 
1486 	Stream* stream = new(std::nothrow) Stream(source,
1487 		&fSourceLock);
1488 	if (stream == NULL) {
1489 		ERROR("AVFormatReader::Sniff() - failed to allocate Stream\n");
1490 		return B_NO_MEMORY;
1491 	}
1492 
1493 	ObjectDeleter<Stream> streamDeleter(stream);
1494 
1495 	status_t ret = stream->Open();
1496 	if (ret != B_OK) {
1497 		TRACE("  failed to detect stream: %s\n", strerror(ret));
1498 		return ret;
1499 	}
1500 
1501 	delete[] fStreams;
1502 	fStreams = NULL;
1503 
1504 	int32 streamCount = stream->CountStreams();
1505 	if (streamCount == 0) {
1506 		TRACE("  failed to detect any streams: %s\n", strerror(ret));
1507 		return B_ERROR;
1508 	}
1509 
1510 	fStreams = new(std::nothrow) Stream*[streamCount];
1511 	if (fStreams == NULL) {
1512 		ERROR("AVFormatReader::Sniff() - failed to allocate streams\n");
1513 		return B_NO_MEMORY;
1514 	}
1515 
1516 	memset(fStreams, 0, sizeof(Stream*) * streamCount);
1517 	fStreams[0] = stream;
1518 	streamDeleter.Detach();
1519 
1520 	#ifdef TRACE_AVFORMAT_READER
1521 	dump_format(const_cast<AVFormatContext*>(stream->Context()), 0, "", 0);
1522 	#endif
1523 
1524 	if (_streamCount != NULL)
1525 		*_streamCount = streamCount;
1526 
1527 	return B_OK;
1528 }
1529 
1530 
1531 void
1532 AVFormatReader::GetFileFormatInfo(media_file_format* mff)
1533 {
1534 	TRACE("AVFormatReader::GetFileFormatInfo\n");
1535 
1536 	if (fStreams == NULL)
1537 		return;
1538 
1539 	// The first cookie is always there!
1540 	const AVFormatContext* context = fStreams[0]->Context();
1541 
1542 	if (context == NULL || context->iformat == NULL) {
1543 		TRACE("  no AVFormatContext or AVInputFormat!\n");
1544 		return;
1545 	}
1546 
1547 	const DemuxerFormat* format = demuxer_format_for(context->iformat);
1548 
1549 	mff->capabilities = media_file_format::B_READABLE
1550 		| media_file_format::B_KNOWS_ENCODED_VIDEO
1551 		| media_file_format::B_KNOWS_ENCODED_AUDIO
1552 		| media_file_format::B_IMPERFECTLY_SEEKABLE;
1553 
1554 	if (format != NULL) {
1555 		// TODO: Check if AVInputFormat has audio only and then use
1556 		// format->audio_family!
1557 		mff->family = format->video_family;
1558 	} else {
1559 		TRACE("  no DemuxerFormat for AVInputFormat!\n");
1560 		mff->family = B_MISC_FORMAT_FAMILY;
1561 	}
1562 
1563 	mff->version = 100;
1564 
1565 	if (format != NULL) {
1566 		strcpy(mff->mime_type, format->mime_type);
1567 	} else {
1568 		// TODO: Would be nice to be able to provide this from AVInputFormat,
1569 		// maybe by extending the FFmpeg code itself (all demuxers).
1570 		strcpy(mff->mime_type, "");
1571 	}
1572 
1573 	if (context->iformat->extensions != NULL)
1574 		strcpy(mff->file_extension, context->iformat->extensions);
1575 	else {
1576 		TRACE("  no file extensions for AVInputFormat.\n");
1577 		strcpy(mff->file_extension, "");
1578 	}
1579 
1580 	if (context->iformat->name != NULL)
1581 		strcpy(mff->short_name,  context->iformat->name);
1582 	else {
1583 		TRACE("  no short name for AVInputFormat.\n");
1584 		strcpy(mff->short_name, "");
1585 	}
1586 
1587 	if (context->iformat->long_name != NULL)
1588 		sprintf(mff->pretty_name, "%s (FFmpeg)", context->iformat->long_name);
1589 	else {
1590 		if (format != NULL)
1591 			sprintf(mff->pretty_name, "%s (FFmpeg)", format->pretty_name);
1592 		else
1593 			strcpy(mff->pretty_name, "Unknown (FFmpeg)");
1594 	}
1595 }
1596 
1597 
1598 status_t
1599 AVFormatReader::GetMetaData(BMessage* _data)
1600 {
1601 	// The first cookie is always there!
1602 	const AVFormatContext* context = fStreams[0]->Context();
1603 
1604 	if (context == NULL)
1605 		return B_NO_INIT;
1606 
1607 	avmetadata_to_message(context->metadata, _data);
1608 
1609 	// Add chapter info
1610 	for (unsigned i = 0; i < context->nb_chapters; i++) {
1611 		AVChapter* chapter = context->chapters[i];
1612 		BMessage chapterData;
1613 		chapterData.AddInt64("start", bigtime_t(1000000.0
1614 			* chapter->start * chapter->time_base.num
1615 			/ chapter->time_base.den + 0.5));
1616 		chapterData.AddInt64("end", bigtime_t(1000000.0
1617 			* chapter->end * chapter->time_base.num
1618 			/ chapter->time_base.den + 0.5));
1619 
1620 		avmetadata_to_message(chapter->metadata, &chapterData);
1621 		_data->AddMessage("be:chapter", &chapterData);
1622 	}
1623 
1624 	// Add program info
1625 	for (unsigned i = 0; i < context->nb_programs; i++) {
1626 		BMessage progamData;
1627 		avmetadata_to_message(context->programs[i]->metadata, &progamData);
1628 		_data->AddMessage("be:program", &progamData);
1629 	}
1630 
1631 	return B_OK;
1632 }
1633 
1634 
1635 // #pragma mark -
1636 
1637 
1638 status_t
1639 AVFormatReader::AllocateCookie(int32 streamIndex, void** _cookie)
1640 {
1641 	TRACE("AVFormatReader::AllocateCookie(%ld)\n", streamIndex);
1642 
1643 	BAutolock _(fSourceLock);
1644 
1645 	if (fStreams == NULL)
1646 		return B_NO_INIT;
1647 
1648 	if (streamIndex < 0 || streamIndex >= fStreams[0]->CountStreams())
1649 		return B_BAD_INDEX;
1650 
1651 	if (_cookie == NULL)
1652 		return B_BAD_VALUE;
1653 
1654 	Stream* cookie = fStreams[streamIndex];
1655 	if (cookie == NULL) {
1656 		// Allocate the cookie
1657 		BPositionIO* source = dynamic_cast<BPositionIO*>(Source());
1658 		if (source == NULL) {
1659 			TRACE("  not a BPositionIO, but we need it to be one.\n");
1660 			return B_NOT_SUPPORTED;
1661 		}
1662 
1663 		cookie = new(std::nothrow) Stream(source, &fSourceLock);
1664 		if (cookie == NULL) {
1665 			ERROR("AVFormatReader::Sniff() - failed to allocate "
1666 				"Stream\n");
1667 			return B_NO_MEMORY;
1668 		}
1669 
1670 		status_t ret = cookie->Open();
1671 		if (ret != B_OK) {
1672 			TRACE("  stream failed to open: %s\n", strerror(ret));
1673 			delete cookie;
1674 			return ret;
1675 		}
1676 	}
1677 
1678 	status_t ret = cookie->Init(streamIndex);
1679 	if (ret != B_OK) {
1680 		TRACE("  stream failed to initialize: %s\n", strerror(ret));
1681 		// NOTE: Never delete the first stream!
1682 		if (streamIndex != 0)
1683 			delete cookie;
1684 		return ret;
1685 	}
1686 
1687 	fStreams[streamIndex] = cookie;
1688 	*_cookie = cookie;
1689 
1690 	return B_OK;
1691 }
1692 
1693 
1694 status_t
1695 AVFormatReader::FreeCookie(void *_cookie)
1696 {
1697 	BAutolock _(fSourceLock);
1698 
1699 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1700 
1701 	// NOTE: Never delete the first cookie!
1702 	if (cookie != NULL && cookie->VirtualIndex() != 0) {
1703 		if (fStreams != NULL)
1704 			fStreams[cookie->VirtualIndex()] = NULL;
1705 		delete cookie;
1706 	}
1707 
1708 	return B_OK;
1709 }
1710 
1711 
1712 // #pragma mark -
1713 
1714 
1715 status_t
1716 AVFormatReader::GetStreamInfo(void* _cookie, int64* frameCount,
1717 	bigtime_t* duration, media_format* format, const void** infoBuffer,
1718 	size_t* infoSize)
1719 {
1720 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1721 	return cookie->GetStreamInfo(frameCount, duration, format, infoBuffer,
1722 		infoSize);
1723 }
1724 
1725 
1726 status_t
1727 AVFormatReader::GetStreamMetaData(void* _cookie, BMessage* _data)
1728 {
1729 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1730 	return cookie->GetMetaData(_data);
1731 }
1732 
1733 
1734 status_t
1735 AVFormatReader::Seek(void* _cookie, uint32 seekTo, int64* frame,
1736 	bigtime_t* time)
1737 {
1738 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1739 	return cookie->Seek(seekTo, frame, time);
1740 }
1741 
1742 
1743 status_t
1744 AVFormatReader::FindKeyFrame(void* _cookie, uint32 flags, int64* frame,
1745 	bigtime_t* time)
1746 {
1747 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1748 	return cookie->FindKeyFrame(flags, frame, time);
1749 }
1750 
1751 
1752 status_t
1753 AVFormatReader::GetNextChunk(void* _cookie, const void** chunkBuffer,
1754 	size_t* chunkSize, media_header* mediaHeader)
1755 {
1756 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1757 	return cookie->GetNextChunk(chunkBuffer, chunkSize, mediaHeader);
1758 }
1759 
1760 
1761