xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVFormatReader.cpp (revision 8a6724a0ee3803f1e9f487d8111bb3f6cb8d16db)
1 /*
2  * Copyright 2009-2010, Stephan Aßmus <superstippi@gmx.de>
3  * Copyright 2014, Colin Günther <coling@gmx.de>
4  * All rights reserved. Distributed under the terms of the GNU L-GPL license.
5  */
6 
7 #include "AVFormatReader.h"
8 
9 #include <stdio.h>
10 #include <string.h>
11 #include <stdlib.h>
12 
13 #include <new>
14 
15 #include <AutoDeleter.h>
16 #include <Autolock.h>
17 #include <ByteOrder.h>
18 #include <MediaIO.h>
19 #include <MediaDefs.h>
20 #include <MediaFormats.h>
21 
22 extern "C" {
23 	#include "avcodec.h"
24 	#include "avformat.h"
25 }
26 
27 #include "DemuxerTable.h"
28 #include "gfx_util.h"
29 #include "Utilities.h"
30 
31 
32 //#define TRACE_AVFORMAT_READER
33 #ifdef TRACE_AVFORMAT_READER
34 #	define TRACE printf
35 #	define TRACE_IO(a...)
36 #	define TRACE_SEEK(a...) printf(a)
37 #	define TRACE_FIND(a...)
38 #	define TRACE_PACKET(a...)
39 #else
40 #	define TRACE(a...)
41 #	define TRACE_IO(a...)
42 #	define TRACE_SEEK(a...)
43 #	define TRACE_FIND(a...)
44 #	define TRACE_PACKET(a...)
45 #endif
46 
47 #define ERROR(a...) fprintf(stderr, a)
48 
49 #if LIBAVCODEC_VERSION_INT < ((54 << 16) | (50 << 8))
50 #define AV_CODEC_ID_PCM_S16BE CODEC_ID_PCM_S16BE
51 #define AV_CODEC_ID_PCM_S16LE CODEC_ID_PCM_S16LE
52 #define AV_CODEC_ID_PCM_U16BE CODEC_ID_PCM_U16BE
53 #define AV_CODEC_ID_PCM_U16LE CODEC_ID_PCM_U16LE
54 #define AV_CODEC_ID_PCM_S8 CODEC_ID_PCM_S8
55 #define AV_CODEC_ID_PCM_U8 CODEC_ID_PCM_U8
56 #endif
57 
58 static const int64 kNoPTSValue = AV_NOPTS_VALUE;
59 
60 
61 static uint32
62 avformat_to_beos_byte_order(AVSampleFormat format)
63 {
64 	// TODO: Huh?
65 	return B_MEDIA_HOST_ENDIAN;
66 }
67 
68 
69 static void
70 avdictionary_to_message(AVDictionary* dictionary, BMessage* message)
71 {
72 	if (dictionary == NULL)
73 		return;
74 
75 	AVDictionaryEntry* entry = NULL;
76 	while ((entry = av_dict_get(dictionary, "", entry,
77 		AV_DICT_IGNORE_SUFFIX))) {
78 		// convert entry keys into something more meaningful using the names from
79 		// id3v2.c
80 		if (strcmp(entry->key, "TALB") == 0 || strcmp(entry->key, "TAL") == 0)
81 			message->AddString("album", entry->value);
82 		else if (strcmp(entry->key, "TCOM") == 0)
83 			message->AddString("composer", entry->value);
84 		else if (strcmp(entry->key, "TCON") == 0 || strcmp(entry->key, "TCO") == 0)
85 			message->AddString("genre", entry->value);
86 		else if (strcmp(entry->key, "TCOP") == 0)
87 			message->AddString("copyright", entry->value);
88 		else if (strcmp(entry->key, "TDRL") == 0 || strcmp(entry->key, "TDRC") == 0)
89 			message->AddString("date", entry->value);
90 		else if (strcmp(entry->key, "TENC") == 0 || strcmp(entry->key, "TEN") == 0)
91 			message->AddString("encoded_by", entry->value);
92 		else if (strcmp(entry->key, "TIT2") == 0 || strcmp(entry->key, "TT2") == 0)
93 			message->AddString("title", entry->value);
94 		else if (strcmp(entry->key, "TLAN") == 0)
95 			message->AddString("language", entry->value);
96 		else if (strcmp(entry->key, "TPE1") == 0 || strcmp(entry->key, "TP1") == 0)
97 			message->AddString("artist", entry->value);
98 		else if (strcmp(entry->key, "TPE2") == 0 || strcmp(entry->key, "TP2") == 0)
99 			message->AddString("album_artist", entry->value);
100 		else if (strcmp(entry->key, "TPE3") == 0 || strcmp(entry->key, "TP3") == 0)
101 			message->AddString("performer", entry->value);
102 		else if (strcmp(entry->key, "TPOS") == 0)
103 			message->AddString("disc", entry->value);
104 		else if (strcmp(entry->key, "TPUB") == 0)
105 			message->AddString("publisher", entry->value);
106 		else if (strcmp(entry->key, "TRCK") == 0 || strcmp(entry->key, "TRK") == 0)
107 			message->AddString("track", entry->value);
108 		else if (strcmp(entry->key, "TSOA") == 0)
109 			message->AddString("album-sort", entry->value);
110 		else if (strcmp(entry->key, "TSOP") == 0)
111 			message->AddString("artist-sort", entry->value);
112 		else if (strcmp(entry->key, "TSOT") == 0)
113 			message->AddString("title-sort", entry->value);
114 		else if (strcmp(entry->key, "TSSE") == 0)
115 			message->AddString("encoder", entry->value);
116 		else if (strcmp(entry->key, "TYER") == 0)
117 			message->AddString("year", entry->value);
118 		else
119 			message->AddString(entry->key, entry->value);
120 	}
121 }
122 
123 
124 // #pragma mark - StreamBase
125 
126 
127 class StreamBase {
128 public:
129 								StreamBase(BMediaIO* source,
130 									BLocker* sourceLock, BLocker* streamLock);
131 	virtual						~StreamBase();
132 
133 	// Init an indivual AVFormatContext
134 			status_t			Open();
135 
136 	// Setup this stream to point to the AVStream at the given streamIndex.
137 	virtual	status_t			Init(int32 streamIndex);
138 
139 	inline	const AVFormatContext* Context() const
140 									{ return fContext; }
141 			int32				Index() const;
142 			int32				CountStreams() const;
143 			int32				StreamIndexFor(int32 virtualIndex) const;
144 	inline	int32				VirtualIndex() const
145 									{ return fVirtualIndex; }
146 
147 			double				FrameRate() const;
148 			bigtime_t			Duration() const;
149 
150 	virtual	status_t			Seek(uint32 flags, int64* frame,
151 									bigtime_t* time);
152 
153 			status_t			GetNextChunk(const void** chunkBuffer,
154 									size_t* chunkSize,
155 									media_header* mediaHeader);
156 
157 protected:
158 	// I/O hooks for libavformat, cookie will be a Stream instance.
159 	// Since multiple StreamCookies use the same BMediaIO source, they
160 	// maintain the position individually, and may need to seek the source
161 	// if it does not match anymore in _Read().
162 	static	int					_Read(void* cookie, uint8* buffer,
163 									int bufferSize);
164 	static	off_t				_Seek(void* cookie, off_t offset, int whence);
165 
166 			status_t			_NextPacket(bool reuse);
167 
168 			int64_t				_ConvertToStreamTimeBase(bigtime_t time) const;
169 			bigtime_t			_ConvertFromStreamTimeBase(int64_t time) const;
170 
171 protected:
172 			BMediaIO*			fSource;
173 			off_t				fPosition;
174 			// Since different threads may read from the source,
175 			// we need to protect the file position and I/O by a lock.
176 			BLocker*			fSourceLock;
177 
178 			BLocker*			fStreamLock;
179 
180 			AVFormatContext*	fContext;
181 			AVStream*			fStream;
182 			int32				fVirtualIndex;
183 
184 			media_format		fFormat;
185 
186 			AVIOContext*		fIOContext;
187 
188 			AVPacket			fPacket;
189 			bool				fReusePacket;
190 
191 			bool				fSeekByBytes;
192 			bool				fStreamBuildsIndexWhileReading;
193 };
194 
195 
196 StreamBase::StreamBase(BMediaIO* source, BLocker* sourceLock,
197 		BLocker* streamLock)
198 	:
199 	fSource(source),
200 	fPosition(0),
201 	fSourceLock(sourceLock),
202 
203 	fStreamLock(streamLock),
204 
205 	fContext(NULL),
206 	fStream(NULL),
207 	fVirtualIndex(-1),
208 	fIOContext(NULL),
209 
210 	fReusePacket(false),
211 
212 	fSeekByBytes(false),
213 	fStreamBuildsIndexWhileReading(false)
214 {
215 	// NOTE: Don't use streamLock here, it may not yet be initialized!
216 
217 	av_new_packet(&fPacket, 0);
218 	memset(&fFormat, 0, sizeof(media_format));
219 }
220 
221 
222 StreamBase::~StreamBase()
223 {
224 	if (fContext != NULL)
225 		avformat_close_input(&fContext);
226 	av_free_packet(&fPacket);
227 	av_free(fContext);
228 	if (fIOContext != NULL)
229 		av_free(fIOContext->buffer);
230 	av_free(fIOContext);
231 }
232 
233 
234 status_t
235 StreamBase::Open()
236 {
237 	BAutolock _(fStreamLock);
238 
239 	// Init probing data
240 	size_t bufferSize = 32768;
241 	uint8* buffer = static_cast<uint8*>(av_malloc(bufferSize));
242 	if (buffer == NULL)
243 		return B_NO_MEMORY;
244 
245 	// Allocate I/O context with buffer and hook functions, pass ourself as
246 	// cookie.
247 	memset(buffer, 0, bufferSize);
248 	fIOContext = avio_alloc_context(buffer, bufferSize, 0, this, _Read, 0,
249 		_Seek);
250 	if (fIOContext == NULL) {
251 		TRACE("StreamBase::Open() - avio_alloc_context() failed!\n");
252 		av_free(buffer);
253 		return B_ERROR;
254 	}
255 
256 	fContext = avformat_alloc_context();
257 	fContext->pb = fIOContext;
258 
259 	// Allocate our context and probe the input format
260 	if (avformat_open_input(&fContext, ".mod", NULL, NULL) < 0) {
261 		TRACE("StreamBase::Open() - avformat_open_input() failed!\n");
262 		// avformat_open_input() frees the context in case of failure
263 		fContext = NULL;
264 		av_free(fIOContext);
265 		fIOContext = NULL;
266 		return B_NOT_SUPPORTED;
267 	}
268 
269 	TRACE("StreamBase::Open() - "
270 		"avformat_open_input(): %s\n", fContext->iformat->name);
271 	TRACE("  flags:%s%s%s%s%s\n",
272 		(fContext->iformat->flags & AVFMT_GLOBALHEADER) ? " AVFMT_GLOBALHEADER" : "",
273 		(fContext->iformat->flags & AVFMT_NOTIMESTAMPS) ? " AVFMT_NOTIMESTAMPS" : "",
274 		(fContext->iformat->flags & AVFMT_GENERIC_INDEX) ? " AVFMT_GENERIC_INDEX" : "",
275 		(fContext->iformat->flags & AVFMT_TS_DISCONT) ? " AVFMT_TS_DISCONT" : "",
276 		(fContext->iformat->flags & AVFMT_VARIABLE_FPS) ? " AVFMT_VARIABLE_FPS" : ""
277 	);
278 
279 
280 	// Retrieve stream information
281 	if (avformat_find_stream_info(fContext, NULL) < 0) {
282 		TRACE("StreamBase::Open() - avformat_find_stream_info() failed!\n");
283 		return B_NOT_SUPPORTED;
284 	}
285 
286 	fSeekByBytes = (fContext->iformat->flags & AVFMT_TS_DISCONT) != 0;
287 	fStreamBuildsIndexWhileReading
288 		= (fContext->iformat->flags & AVFMT_GENERIC_INDEX) != 0
289 			|| fSeekByBytes;
290 
291 	TRACE("StreamBase::Open() - "
292 		"av_find_stream_info() success! Seeking by bytes: %d\n",
293 		fSeekByBytes);
294 
295 	return B_OK;
296 }
297 
298 
299 status_t
300 StreamBase::Init(int32 virtualIndex)
301 {
302 	BAutolock _(fStreamLock);
303 
304 	TRACE("StreamBase::Init(%ld)\n", virtualIndex);
305 
306 	if (fContext == NULL)
307 		return B_NO_INIT;
308 
309 	int32 streamIndex = StreamIndexFor(virtualIndex);
310 	if (streamIndex < 0) {
311 		TRACE("  bad stream index!\n");
312 		return B_BAD_INDEX;
313 	}
314 
315 	TRACE("  context stream index: %ld\n", streamIndex);
316 
317 	// We need to remember the virtual index so that
318 	// AVFormatReader::FreeCookie() can clear the correct stream entry.
319 	fVirtualIndex = virtualIndex;
320 
321 	// Make us point to the AVStream at streamIndex
322 	fStream = fContext->streams[streamIndex];
323 
324 // NOTE: Discarding other streams works for most, but not all containers,
325 // for example it does not work for the ASF demuxer. Since I don't know what
326 // other demuxer it breaks, let's just keep reading packets for unwanted
327 // streams, it just makes the _GetNextPacket() function slightly less
328 // efficient.
329 //	// Discard all other streams
330 //	for (unsigned i = 0; i < fContext->nb_streams; i++) {
331 //		if (i != (unsigned)streamIndex)
332 //			fContext->streams[i]->discard = AVDISCARD_ALL;
333 //	}
334 
335 	return B_OK;
336 }
337 
338 
339 int32
340 StreamBase::Index() const
341 {
342 	if (fStream != NULL)
343 		return fStream->index;
344 	return -1;
345 }
346 
347 
348 int32
349 StreamBase::CountStreams() const
350 {
351 	// Figure out the stream count. If the context has "AVPrograms", use
352 	// the first program (for now).
353 	// TODO: To support "programs" properly, the BMediaFile/Track API should
354 	// be extended accordingly. I guess programs are like TV channels in the
355 	// same satilite transport stream. Maybe call them "TrackGroups".
356 	if (fContext->nb_programs > 0) {
357 		// See libavformat/utils.c:dump_format()
358 		return fContext->programs[0]->nb_stream_indexes;
359 	}
360 	return fContext->nb_streams;
361 }
362 
363 
364 int32
365 StreamBase::StreamIndexFor(int32 virtualIndex) const
366 {
367 	// NOTE: See CountStreams()
368 	if (fContext->nb_programs > 0) {
369 		const AVProgram* program = fContext->programs[0];
370 		if (virtualIndex >= 0
371 			&& virtualIndex < (int32)program->nb_stream_indexes) {
372 			return program->stream_index[virtualIndex];
373 		}
374 	} else {
375 		if (virtualIndex >= 0 && virtualIndex < (int32)fContext->nb_streams)
376 			return virtualIndex;
377 	}
378 	return -1;
379 }
380 
381 
382 double
383 StreamBase::FrameRate() const
384 {
385 	// TODO: Find a way to always calculate a correct frame rate...
386 	double frameRate = 1.0;
387 	switch (fStream->codec->codec_type) {
388 		case AVMEDIA_TYPE_AUDIO:
389 			frameRate = (double)fStream->codec->sample_rate;
390 			break;
391 		case AVMEDIA_TYPE_VIDEO:
392 			if (fStream->avg_frame_rate.den && fStream->avg_frame_rate.num)
393 				frameRate = av_q2d(fStream->avg_frame_rate);
394 			else if (fStream->r_frame_rate.den && fStream->r_frame_rate.num)
395 				frameRate = av_q2d(fStream->r_frame_rate);
396 			else if (fStream->time_base.den && fStream->time_base.num)
397 				frameRate = 1 / av_q2d(fStream->time_base);
398 			else if (fStream->codec->time_base.den
399 				&& fStream->codec->time_base.num) {
400 				frameRate = 1 / av_q2d(fStream->codec->time_base);
401 			}
402 
403 			// TODO: Fix up interlaced video for real
404 			if (frameRate == 50.0f)
405 				frameRate = 25.0f;
406 			break;
407 		default:
408 			break;
409 	}
410 	if (frameRate <= 0.0)
411 		frameRate = 1.0;
412 	return frameRate;
413 }
414 
415 
416 bigtime_t
417 StreamBase::Duration() const
418 {
419 	// TODO: This is not working correctly for all stream types...
420 	// It seems that the calculations here are correct, because they work
421 	// for a couple of streams and are in line with the documentation, but
422 	// unfortunately, libavformat itself seems to set the time_base and
423 	// duration wrongly sometimes. :-(
424 	if ((int64)fStream->duration != kNoPTSValue)
425 		return _ConvertFromStreamTimeBase(fStream->duration);
426 	else if ((int64)fContext->duration != kNoPTSValue)
427 		return (bigtime_t)fContext->duration;
428 
429 	return 0;
430 }
431 
432 
433 status_t
434 StreamBase::Seek(uint32 flags, int64* frame, bigtime_t* time)
435 {
436 	BAutolock _(fStreamLock);
437 
438 	if (fContext == NULL || fStream == NULL)
439 		return B_NO_INIT;
440 
441 	TRACE_SEEK("StreamBase::Seek(%ld,%s%s%s%s, %lld, "
442 		"%lld)\n", VirtualIndex(),
443 		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
444 		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
445 		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
446 			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
447 		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
448 			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
449 		*frame, *time);
450 
451 	double frameRate = FrameRate();
452 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
453 		// Seeking is always based on time, initialize it when client seeks
454 		// based on frame.
455 		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);
456 	}
457 
458 	int64_t timeStamp = *time;
459 
460 	int searchFlags = AVSEEK_FLAG_BACKWARD;
461 	if ((flags & B_MEDIA_SEEK_CLOSEST_FORWARD) != 0)
462 		searchFlags = 0;
463 
464 	if (fSeekByBytes) {
465 		searchFlags |= AVSEEK_FLAG_BYTE;
466 
467 		BAutolock _(fSourceLock);
468 		int64_t fileSize;
469 
470 		if (fSource->GetSize(&fileSize) != B_OK)
471 			return B_NOT_SUPPORTED;
472 
473 		int64_t duration = Duration();
474 		if (duration == 0)
475 			return B_NOT_SUPPORTED;
476 
477 		timeStamp = int64_t(fileSize * ((double)timeStamp / duration));
478 		if ((flags & B_MEDIA_SEEK_CLOSEST_BACKWARD) != 0) {
479 			timeStamp -= 65536;
480 			if (timeStamp < 0)
481 				timeStamp = 0;
482 		}
483 
484 		bool seekAgain = true;
485 		bool seekForward = true;
486 		bigtime_t lastFoundTime = -1;
487 		int64_t closestTimeStampBackwards = -1;
488 		while (seekAgain) {
489 			if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp,
490 				INT64_MAX, searchFlags) < 0) {
491 				TRACE("  avformat_seek_file() (by bytes) failed.\n");
492 				return B_ERROR;
493 			}
494 			seekAgain = false;
495 
496 			// Our last packet is toast in any case. Read the next one so we
497 			// know where we really seeked.
498 			fReusePacket = false;
499 			if (_NextPacket(true) == B_OK) {
500 				while (fPacket.pts == kNoPTSValue) {
501 					fReusePacket = false;
502 					if (_NextPacket(true) != B_OK)
503 						return B_ERROR;
504 				}
505 				if (fPacket.pos >= 0)
506 					timeStamp = fPacket.pos;
507 				bigtime_t foundTime
508 					= _ConvertFromStreamTimeBase(fPacket.pts);
509 				if (foundTime != lastFoundTime) {
510 					lastFoundTime = foundTime;
511 					if (foundTime > *time) {
512 						if (closestTimeStampBackwards >= 0) {
513 							timeStamp = closestTimeStampBackwards;
514 							seekAgain = true;
515 							seekForward = false;
516 							continue;
517 						}
518 						int64_t diff = int64_t(fileSize
519 							* ((double)(foundTime - *time) / (2 * duration)));
520 						if (diff < 8192)
521 							break;
522 						timeStamp -= diff;
523 						TRACE_SEEK("  need to seek back (%lld) (time: %.2f "
524 							"-> %.2f)\n", timeStamp, *time / 1000000.0,
525 							foundTime / 1000000.0);
526 						if (timeStamp < 0)
527 							foundTime = 0;
528 						else {
529 							seekAgain = true;
530 							continue;
531 						}
532 					} else if (seekForward && foundTime < *time - 100000) {
533 						closestTimeStampBackwards = timeStamp;
534 						int64_t diff = int64_t(fileSize
535 							* ((double)(*time - foundTime) / (2 * duration)));
536 						if (diff < 8192)
537 							break;
538 						timeStamp += diff;
539 						TRACE_SEEK("  need to seek forward (%lld) (time: "
540 							"%.2f -> %.2f)\n", timeStamp, *time / 1000000.0,
541 							foundTime / 1000000.0);
542 						if (timeStamp > duration)
543 							foundTime = duration;
544 						else {
545 							seekAgain = true;
546 							continue;
547 						}
548 					}
549 				}
550 				TRACE_SEEK("  found time: %lld -> %lld (%.2f)\n", *time,
551 					foundTime, foundTime / 1000000.0);
552 				*time = foundTime;
553 				*frame = (uint64)(*time * frameRate / 1000000LL + 0.5);
554 				TRACE_SEEK("  seeked frame: %lld\n", *frame);
555 			} else {
556 				TRACE_SEEK("  _NextPacket() failed!\n");
557 				return B_ERROR;
558 			}
559 		}
560 	} else {
561 		// We may not get a PTS from the next packet after seeking, so
562 		// we try to get an expected time from the index.
563 		int64_t streamTimeStamp = _ConvertToStreamTimeBase(*time);
564 		int index = av_index_search_timestamp(fStream, streamTimeStamp,
565 			searchFlags);
566 		if (index < 0) {
567 			TRACE("  av_index_search_timestamp() failed\n");
568 		} else {
569 			if (index > 0) {
570 				const AVIndexEntry& entry = fStream->index_entries[index];
571 				streamTimeStamp = entry.timestamp;
572 			} else {
573 				// Some demuxers use the first index entry to store some
574 				// other information, like the total playing time for example.
575 				// Assume the timeStamp of the first entry is alays 0.
576 				// TODO: Handle start-time offset?
577 				streamTimeStamp = 0;
578 			}
579 			bigtime_t foundTime = _ConvertFromStreamTimeBase(streamTimeStamp);
580 			bigtime_t timeDiff = foundTime > *time
581 				? foundTime - *time : *time - foundTime;
582 
583 			if (timeDiff > 1000000
584 				&& (fStreamBuildsIndexWhileReading
585 					|| index == fStream->nb_index_entries - 1)) {
586 				// If the stream is building the index on the fly while parsing
587 				// it, we only have entries in the index for positions already
588 				// decoded, i.e. we cannot seek into the future. In that case,
589 				// just assume that we can seek where we want and leave
590 				// time/frame unmodified. Since successfully seeking one time
591 				// will generate index entries for the seeked to position, we
592 				// need to remember this in fStreamBuildsIndexWhileReading,
593 				// since when seeking back there will be later index entries,
594 				// but we still want to ignore the found entry.
595 				fStreamBuildsIndexWhileReading = true;
596 				TRACE_SEEK("  Not trusting generic index entry. "
597 					"(Current count: %d)\n", fStream->nb_index_entries);
598 			} else {
599 				// If we found a reasonably time, write it into *time.
600 				// After seeking, we will try to read the sought time from
601 				// the next packet. If the packet has no PTS value, we may
602 				// still have a more accurate time from the index lookup.
603 				*time = foundTime;
604 			}
605 		}
606 
607 		if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp, INT64_MAX,
608 				searchFlags) < 0) {
609 			TRACE("  avformat_seek_file() failed.\n");
610 			// Try to fall back to av_seek_frame()
611 			timeStamp = _ConvertToStreamTimeBase(timeStamp);
612 			if (av_seek_frame(fContext, fStream->index, timeStamp,
613 				searchFlags) < 0) {
614 				TRACE("  avformat_seek_frame() failed as well.\n");
615 				// Fall back to seeking to the beginning by bytes
616 				timeStamp = 0;
617 				if (av_seek_frame(fContext, fStream->index, timeStamp,
618 						AVSEEK_FLAG_BYTE) < 0) {
619 					TRACE("  avformat_seek_frame() by bytes failed as "
620 						"well.\n");
621 					// Do not propagate error in any case. We fail if we can't
622 					// read another packet.
623 				} else
624 					*time = 0;
625 			}
626 		}
627 
628 		// Our last packet is toast in any case. Read the next one so
629 		// we know where we really sought.
630 		bigtime_t foundTime = *time;
631 
632 		fReusePacket = false;
633 		if (_NextPacket(true) == B_OK) {
634 			if (fPacket.pts != kNoPTSValue)
635 				foundTime = _ConvertFromStreamTimeBase(fPacket.pts);
636 			else
637 				TRACE_SEEK("  no PTS in packet after seeking\n");
638 		} else
639 			TRACE_SEEK("  _NextPacket() failed!\n");
640 
641 		*time = foundTime;
642 		TRACE_SEEK("  sought time: %.2fs\n", *time / 1000000.0);
643 		*frame = (uint64)(*time * frameRate / 1000000.0 + 0.5);
644 		TRACE_SEEK("  sought frame: %lld\n", *frame);
645 	}
646 
647 	return B_OK;
648 }
649 
650 
651 status_t
652 StreamBase::GetNextChunk(const void** chunkBuffer,
653 	size_t* chunkSize, media_header* mediaHeader)
654 {
655 	BAutolock _(fStreamLock);
656 
657 	TRACE_PACKET("StreamBase::GetNextChunk()\n");
658 
659 	// Get the last stream DTS before reading the next packet, since
660 	// then it points to that one.
661 	int64 lastStreamDTS = fStream->cur_dts;
662 
663 	status_t ret = _NextPacket(false);
664 	if (ret != B_OK) {
665 		*chunkBuffer = NULL;
666 		*chunkSize = 0;
667 		return ret;
668 	}
669 
670 	// NOTE: AVPacket has a field called "convergence_duration", for which
671 	// the documentation is quite interesting. It sounds like it could be
672 	// used to know the time until the next I-Frame in streams that don't
673 	// let you know the position of keyframes in another way (like through
674 	// the index).
675 
676 	// According to libavformat documentation, fPacket is valid until the
677 	// next call to av_read_frame(). This is what we want and we can share
678 	// the memory with the least overhead.
679 	*chunkBuffer = fPacket.data;
680 	*chunkSize = fPacket.size;
681 
682 	if (mediaHeader != NULL) {
683 		mediaHeader->type = fFormat.type;
684 		mediaHeader->buffer = 0;
685 		mediaHeader->destination = -1;
686 		mediaHeader->time_source = -1;
687 		mediaHeader->size_used = fPacket.size;
688 
689 		// FFmpeg recommends to use the decoding time stamps as primary source
690 		// for presentation time stamps, especially for video formats that are
691 		// using frame reordering. More over this way it is ensured that the
692 		// returned start times are ordered in a monotonically increasing time
693 		// series (even for videos that contain B-frames).
694 		// \see http://git.videolan.org/?p=ffmpeg.git;a=blob;f=libavformat/avformat.h;h=1e8a6294890d580cd9ebc684eaf4ce57c8413bd8;hb=9153b33a742c4e2a85ff6230aea0e75f5a8b26c2#l1623
695 		bigtime_t presentationTimeStamp;
696 		if (fPacket.dts != kNoPTSValue)
697 			presentationTimeStamp = fPacket.dts;
698 		else if (fPacket.pts != kNoPTSValue)
699 			presentationTimeStamp = fPacket.pts;
700 		else
701 			presentationTimeStamp = lastStreamDTS;
702 
703 		mediaHeader->start_time	= _ConvertFromStreamTimeBase(presentationTimeStamp);
704 		mediaHeader->file_pos = fPacket.pos;
705 		mediaHeader->data_offset = 0;
706 		switch (mediaHeader->type) {
707 			case B_MEDIA_RAW_AUDIO:
708 				break;
709 			case B_MEDIA_ENCODED_AUDIO:
710 				mediaHeader->u.encoded_audio.buffer_flags
711 					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
712 				break;
713 			case B_MEDIA_RAW_VIDEO:
714 				mediaHeader->u.raw_video.line_count
715 					= fFormat.u.raw_video.display.line_count;
716 				break;
717 			case B_MEDIA_ENCODED_VIDEO:
718 				mediaHeader->u.encoded_video.field_flags
719 					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
720 				mediaHeader->u.encoded_video.line_count
721 					= fFormat.u.encoded_video.output.display.line_count;
722 				break;
723 			default:
724 				break;
725 		}
726 	}
727 
728 //	static bigtime_t pts[2];
729 //	static bigtime_t lastPrintTime = system_time();
730 //	static BLocker printLock;
731 //	if (fStream->index < 2) {
732 //		if (fPacket.pts != kNoPTSValue)
733 //			pts[fStream->index] = _ConvertFromStreamTimeBase(fPacket.pts);
734 //		printLock.Lock();
735 //		bigtime_t now = system_time();
736 //		if (now - lastPrintTime > 1000000) {
737 //			printf("PTS: %.4f/%.4f, diff: %.4f\r", pts[0] / 1000000.0,
738 //				pts[1] / 1000000.0, (pts[0] - pts[1]) / 1000000.0);
739 //			fflush(stdout);
740 //			lastPrintTime = now;
741 //		}
742 //		printLock.Unlock();
743 //	}
744 
745 	return B_OK;
746 }
747 
748 
749 // #pragma mark -
750 
751 
752 /*static*/ int
753 StreamBase::_Read(void* cookie, uint8* buffer, int bufferSize)
754 {
755 	StreamBase* stream = reinterpret_cast<StreamBase*>(cookie);
756 
757 	BAutolock _(stream->fSourceLock);
758 
759 	TRACE_IO("StreamBase::_Read(%p, %p, %d) position: %lld\n",
760 		cookie, buffer, bufferSize, stream->fPosition);
761 
762 	if (stream->fPosition != stream->fSource->Position()) {
763 		TRACE_IO("StreamBase::_Read fSource position: %lld\n",
764 			stream->fSource->Position());
765 
766 		off_t position
767 			= stream->fSource->Seek(stream->fPosition, SEEK_SET);
768 		if (position != stream->fPosition)
769 			return -1;
770 	}
771 
772 	ssize_t read = stream->fSource->Read(buffer, bufferSize);
773 	if (read > 0)
774 		stream->fPosition += read;
775 
776 	TRACE_IO("  read: %ld\n", read);
777 	return (int)read;
778 
779 }
780 
781 
782 /*static*/ off_t
783 StreamBase::_Seek(void* cookie, off_t offset, int whence)
784 {
785 	TRACE_IO("StreamBase::_Seek(%p, %lld, %d)\n",
786 		cookie, offset, whence);
787 
788 	StreamBase* stream = reinterpret_cast<StreamBase*>(cookie);
789 
790 	BAutolock _(stream->fSourceLock);
791 
792 	// Support for special file size retrieval API without seeking
793 	// anywhere:
794 	if (whence == AVSEEK_SIZE) {
795 		off_t size;
796 		if (stream->fSource->GetSize(&size) == B_OK)
797 			return size;
798 		return -1;
799 	}
800 
801 	// If not requested to seek to an absolute position, we need to
802 	// confirm that the stream is currently at the position that we
803 	// think it is.
804 	if (whence != SEEK_SET
805 		&& stream->fPosition != stream->fSource->Position()) {
806 		off_t position
807 			= stream->fSource->Seek(stream->fPosition, SEEK_SET);
808 		if (position != stream->fPosition)
809 			return -1;
810 	}
811 
812 	off_t position = stream->fSource->Seek(offset, whence);
813 	TRACE_IO("  position: %lld\n", position);
814 	if (position < 0)
815 		return -1;
816 
817 	stream->fPosition = position;
818 
819 	return position;
820 }
821 
822 
823 status_t
824 StreamBase::_NextPacket(bool reuse)
825 {
826 	TRACE_PACKET("StreamBase::_NextPacket(%d)\n", reuse);
827 
828 	if (fReusePacket) {
829 		// The last packet was marked for reuse, so we keep using it.
830 		TRACE_PACKET("  re-using last packet\n");
831 		fReusePacket = reuse;
832 		return B_OK;
833 	}
834 
835 	av_free_packet(&fPacket);
836 
837 	while (true) {
838 		if (av_read_frame(fContext, &fPacket) < 0) {
839 			// NOTE: Even though we may get the error for a different stream,
840 			// av_read_frame() is not going to be successful from here on, so
841 			// it doesn't matter
842 			fReusePacket = false;
843 			return B_LAST_BUFFER_ERROR;
844 		}
845 
846 		if (fPacket.stream_index == Index())
847 			break;
848 
849 		// This is a packet from another stream, ignore it.
850 		av_free_packet(&fPacket);
851 	}
852 
853 	// Mark this packet with the new reuse flag.
854 	fReusePacket = reuse;
855 	return B_OK;
856 }
857 
858 
859 int64_t
860 StreamBase::_ConvertToStreamTimeBase(bigtime_t time) const
861 {
862 	int64 timeStamp = int64_t((double)time * fStream->time_base.den
863 		/ (1000000.0 * fStream->time_base.num) + 0.5);
864 	if (fStream->start_time != kNoPTSValue)
865 		timeStamp += fStream->start_time;
866 	return timeStamp;
867 }
868 
869 
870 bigtime_t
871 StreamBase::_ConvertFromStreamTimeBase(int64_t time) const
872 {
873 	if (fStream->start_time != kNoPTSValue)
874 		time -= fStream->start_time;
875 
876 	return bigtime_t(1000000.0 * time * fStream->time_base.num
877 		/ fStream->time_base.den + 0.5);
878 }
879 
880 
881 // #pragma mark - AVFormatReader::Stream
882 
883 
884 class AVFormatReader::Stream : public StreamBase {
885 public:
886 								Stream(BMediaIO* source,
887 									BLocker* streamLock);
888 	virtual						~Stream();
889 
890 	// Setup this stream to point to the AVStream at the given streamIndex.
891 	// This will also initialize the media_format.
892 	virtual	status_t			Init(int32 streamIndex);
893 
894 			status_t			GetMetaData(BMessage* data);
895 
896 	// Support for AVFormatReader
897 			status_t			GetStreamInfo(int64* frameCount,
898 									bigtime_t* duration, media_format* format,
899 									const void** infoBuffer,
900 									size_t* infoSize) const;
901 
902 			status_t			FindKeyFrame(uint32 flags, int64* frame,
903 									bigtime_t* time) const;
904 	virtual	status_t			Seek(uint32 flags, int64* frame,
905 									bigtime_t* time);
906 
907 private:
908 	mutable	BLocker				fLock;
909 
910 			struct KeyframeInfo {
911 				bigtime_t		requestedTime;
912 				int64			requestedFrame;
913 				bigtime_t		reportedTime;
914 				int64			reportedFrame;
915 				uint32			seekFlags;
916 			};
917 	mutable	KeyframeInfo		fLastReportedKeyframe;
918 	mutable	StreamBase*			fGhostStream;
919 };
920 
921 
922 
923 AVFormatReader::Stream::Stream(BMediaIO* source, BLocker* streamLock)
924 	:
925 	StreamBase(source, streamLock, &fLock),
926 	fLock("stream lock"),
927 	fGhostStream(NULL)
928 {
929 	fLastReportedKeyframe.requestedTime = 0;
930 	fLastReportedKeyframe.requestedFrame = 0;
931 	fLastReportedKeyframe.reportedTime = 0;
932 	fLastReportedKeyframe.reportedFrame = 0;
933 }
934 
935 
936 AVFormatReader::Stream::~Stream()
937 {
938 	delete fGhostStream;
939 }
940 
941 
942 status_t
943 AVFormatReader::Stream::Init(int32 virtualIndex)
944 {
945 	TRACE("AVFormatReader::Stream::Init(%ld)\n", virtualIndex);
946 
947 	status_t ret = StreamBase::Init(virtualIndex);
948 	if (ret != B_OK)
949 		return ret;
950 
951 	// Get a pointer to the AVCodecContext for the stream at streamIndex.
952 	AVCodecContext* codecContext = fStream->codec;
953 
954 #if 0
955 // stippi: Here I was experimenting with the question if some fields of the
956 // AVCodecContext change (or get filled out at all), if the AVCodec is opened.
957 	class CodecOpener {
958 	public:
959 		CodecOpener(AVCodecContext* context)
960 		{
961 			fCodecContext = context;
962 			AVCodec* codec = avcodec_find_decoder(context->codec_id);
963 			fCodecOpen = avcodec_open(context, codec) >= 0;
964 			if (!fCodecOpen)
965 				TRACE("  failed to open the codec!\n");
966 		}
967 		~CodecOpener()
968 		{
969 			if (fCodecOpen)
970 				avcodec_close(fCodecContext);
971 		}
972 	private:
973 		AVCodecContext*		fCodecContext;
974 		bool				fCodecOpen;
975 	} codecOpener(codecContext);
976 #endif
977 
978 	// initialize the media_format for this stream
979 	media_format* format = &fFormat;
980 	memset(format, 0, sizeof(media_format));
981 
982 	media_format_description description;
983 
984 	// Set format family and type depending on codec_type of the stream.
985 	switch (codecContext->codec_type) {
986 		case AVMEDIA_TYPE_AUDIO:
987 			if ((codecContext->codec_id >= AV_CODEC_ID_PCM_S16LE)
988 				&& (codecContext->codec_id <= AV_CODEC_ID_PCM_U8)) {
989 				TRACE("  raw audio\n");
990 				format->type = B_MEDIA_RAW_AUDIO;
991 				description.family = B_ANY_FORMAT_FAMILY;
992 				// This will then apparently be handled by the (built into
993 				// BMediaTrack) RawDecoder.
994 			} else {
995 				TRACE("  encoded audio\n");
996 				format->type = B_MEDIA_ENCODED_AUDIO;
997 				description.family = B_MISC_FORMAT_FAMILY;
998 				description.u.misc.file_format = 'ffmp';
999 			}
1000 			break;
1001 		case AVMEDIA_TYPE_VIDEO:
1002 			TRACE("  encoded video\n");
1003 			format->type = B_MEDIA_ENCODED_VIDEO;
1004 			description.family = B_MISC_FORMAT_FAMILY;
1005 			description.u.misc.file_format = 'ffmp';
1006 			break;
1007 		default:
1008 			TRACE("  unknown type\n");
1009 			format->type = B_MEDIA_UNKNOWN_TYPE;
1010 			return B_ERROR;
1011 			break;
1012 	}
1013 
1014 	if (format->type == B_MEDIA_RAW_AUDIO) {
1015 		// We cannot describe all raw-audio formats, some are unsupported.
1016 		switch (codecContext->codec_id) {
1017 			case AV_CODEC_ID_PCM_S16LE:
1018 				format->u.raw_audio.format
1019 					= media_raw_audio_format::B_AUDIO_SHORT;
1020 				format->u.raw_audio.byte_order
1021 					= B_MEDIA_LITTLE_ENDIAN;
1022 				break;
1023 			case AV_CODEC_ID_PCM_S16BE:
1024 				format->u.raw_audio.format
1025 					= media_raw_audio_format::B_AUDIO_SHORT;
1026 				format->u.raw_audio.byte_order
1027 					= B_MEDIA_BIG_ENDIAN;
1028 				break;
1029 			case AV_CODEC_ID_PCM_U16LE:
1030 //				format->u.raw_audio.format
1031 //					= media_raw_audio_format::B_AUDIO_USHORT;
1032 //				format->u.raw_audio.byte_order
1033 //					= B_MEDIA_LITTLE_ENDIAN;
1034 				return B_NOT_SUPPORTED;
1035 				break;
1036 			case AV_CODEC_ID_PCM_U16BE:
1037 //				format->u.raw_audio.format
1038 //					= media_raw_audio_format::B_AUDIO_USHORT;
1039 //				format->u.raw_audio.byte_order
1040 //					= B_MEDIA_BIG_ENDIAN;
1041 				return B_NOT_SUPPORTED;
1042 				break;
1043 			case AV_CODEC_ID_PCM_S8:
1044 				format->u.raw_audio.format
1045 					= media_raw_audio_format::B_AUDIO_CHAR;
1046 				break;
1047 			case AV_CODEC_ID_PCM_U8:
1048 				format->u.raw_audio.format
1049 					= media_raw_audio_format::B_AUDIO_UCHAR;
1050 				break;
1051 			default:
1052 				return B_NOT_SUPPORTED;
1053 				break;
1054 		}
1055 	} else {
1056 		if (description.family == B_MISC_FORMAT_FAMILY)
1057 			description.u.misc.codec = codecContext->codec_id;
1058 
1059 		BMediaFormats formats;
1060 		status_t status = formats.GetFormatFor(description, format);
1061 		if (status < B_OK)
1062 			TRACE("  formats.GetFormatFor() error: %s\n", strerror(status));
1063 
1064 		format->user_data_type = B_CODEC_TYPE_INFO;
1065 		*(uint32*)format->user_data = codecContext->codec_tag;
1066 		format->user_data[4] = 0;
1067 	}
1068 
1069 	format->require_flags = 0;
1070 	format->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
1071 
1072 	switch (format->type) {
1073 		case B_MEDIA_RAW_AUDIO:
1074 			format->u.raw_audio.frame_rate = (float)codecContext->sample_rate;
1075 			format->u.raw_audio.channel_count = codecContext->channels;
1076 			format->u.raw_audio.channel_mask = codecContext->channel_layout;
1077 			ConvertAVSampleFormatToRawAudioFormat(codecContext->sample_fmt,
1078 				format->u.raw_audio.format);
1079 			format->u.raw_audio.buffer_size = 0;
1080 
1081 			// Read one packet and mark it for later re-use. (So our first
1082 			// GetNextChunk() call does not read another packet.)
1083 			if (_NextPacket(true) == B_OK) {
1084 				TRACE("  successfully determined audio buffer size: %d\n",
1085 					fPacket.size);
1086 				format->u.raw_audio.buffer_size = fPacket.size;
1087 			}
1088 			break;
1089 
1090 		case B_MEDIA_ENCODED_AUDIO:
1091 			format->u.encoded_audio.bit_rate = codecContext->bit_rate;
1092 			format->u.encoded_audio.frame_size = codecContext->frame_size;
1093 			// Fill in some info about possible output format
1094 			format->u.encoded_audio.output
1095 				= media_multi_audio_format::wildcard;
1096 			format->u.encoded_audio.output.frame_rate
1097 				= (float)codecContext->sample_rate;
1098 			// Channel layout bits match in Be API and FFmpeg.
1099 			format->u.encoded_audio.output.channel_count
1100 				= codecContext->channels;
1101 			format->u.encoded_audio.multi_info.channel_mask
1102 				= codecContext->channel_layout;
1103 			format->u.encoded_audio.output.byte_order
1104 				= avformat_to_beos_byte_order(codecContext->sample_fmt);
1105 			ConvertAVSampleFormatToRawAudioFormat(codecContext->sample_fmt,
1106 				format->u.encoded_audio.output.format);
1107 			if (codecContext->block_align > 0) {
1108 				format->u.encoded_audio.output.buffer_size
1109 					= codecContext->block_align;
1110 			} else {
1111 				format->u.encoded_audio.output.buffer_size
1112 					= codecContext->frame_size * codecContext->channels
1113 						* (format->u.encoded_audio.output.format
1114 							& media_raw_audio_format::B_AUDIO_SIZE_MASK);
1115 			}
1116 			break;
1117 
1118 		case B_MEDIA_ENCODED_VIDEO:
1119 // TODO: Specifying any of these seems to throw off the format matching
1120 // later on.
1121 //			format->u.encoded_video.avg_bit_rate = codecContext->bit_rate;
1122 //			format->u.encoded_video.max_bit_rate = codecContext->bit_rate
1123 //				+ codecContext->bit_rate_tolerance;
1124 
1125 //			format->u.encoded_video.encoding
1126 //				= media_encoded_video_format::B_ANY;
1127 
1128 //			format->u.encoded_video.frame_size = 1;
1129 //			format->u.encoded_video.forward_history = 0;
1130 //			format->u.encoded_video.backward_history = 0;
1131 
1132 			format->u.encoded_video.output.field_rate = FrameRate();
1133 			format->u.encoded_video.output.interlace = 1;
1134 
1135 			format->u.encoded_video.output.first_active = 0;
1136 			format->u.encoded_video.output.last_active
1137 				= codecContext->height - 1;
1138 				// TODO: Maybe libavformat actually provides that info
1139 				// somewhere...
1140 			format->u.encoded_video.output.orientation
1141 				= B_VIDEO_TOP_LEFT_RIGHT;
1142 
1143 			ConvertAVCodecContextToVideoAspectWidthAndHeight(*codecContext,
1144 				format->u.encoded_video.output.pixel_width_aspect,
1145 				format->u.encoded_video.output.pixel_height_aspect);
1146 
1147 			format->u.encoded_video.output.display.format
1148 				= pixfmt_to_colorspace(codecContext->pix_fmt);
1149 			format->u.encoded_video.output.display.line_width
1150 				= codecContext->width;
1151 			format->u.encoded_video.output.display.line_count
1152 				= codecContext->height;
1153 			TRACE("  width/height: %d/%d\n", codecContext->width,
1154 				codecContext->height);
1155 			format->u.encoded_video.output.display.bytes_per_row = 0;
1156 			format->u.encoded_video.output.display.pixel_offset = 0;
1157 			format->u.encoded_video.output.display.line_offset = 0;
1158 			format->u.encoded_video.output.display.flags = 0; // TODO
1159 
1160 			break;
1161 
1162 		default:
1163 			// This is an unknown format to us.
1164 			break;
1165 	}
1166 
1167 	// Add the meta data, if any
1168 	if (codecContext->extradata_size > 0) {
1169 		format->SetMetaData(codecContext->extradata,
1170 			codecContext->extradata_size);
1171 		TRACE("  extradata: %p\n", format->MetaData());
1172 	}
1173 
1174 	TRACE("  extradata_size: %d\n", codecContext->extradata_size);
1175 //	TRACE("  intra_matrix: %p\n", codecContext->intra_matrix);
1176 //	TRACE("  inter_matrix: %p\n", codecContext->inter_matrix);
1177 //	TRACE("  get_buffer(): %p\n", codecContext->get_buffer);
1178 //	TRACE("  release_buffer(): %p\n", codecContext->release_buffer);
1179 
1180 #ifdef TRACE_AVFORMAT_READER
1181 	char formatString[512];
1182 	if (string_for_format(*format, formatString, sizeof(formatString)))
1183 		TRACE("  format: %s\n", formatString);
1184 
1185 	uint32 encoding = format->Encoding();
1186 	TRACE("  encoding '%.4s'\n", (char*)&encoding);
1187 #endif
1188 
1189 	return B_OK;
1190 }
1191 
1192 
1193 status_t
1194 AVFormatReader::Stream::GetMetaData(BMessage* data)
1195 {
1196 	BAutolock _(&fLock);
1197 
1198 	avdictionary_to_message(fStream->metadata, data);
1199 
1200 	return B_OK;
1201 }
1202 
1203 
1204 status_t
1205 AVFormatReader::Stream::GetStreamInfo(int64* frameCount,
1206 	bigtime_t* duration, media_format* format, const void** infoBuffer,
1207 	size_t* infoSize) const
1208 {
1209 	BAutolock _(&fLock);
1210 
1211 	TRACE("AVFormatReader::Stream::GetStreamInfo(%ld)\n",
1212 		VirtualIndex());
1213 
1214 	double frameRate = FrameRate();
1215 	TRACE("  frameRate: %.4f\n", frameRate);
1216 
1217 	#ifdef TRACE_AVFORMAT_READER
1218 	if (fStream->start_time != kNoPTSValue) {
1219 		bigtime_t startTime = _ConvertFromStreamTimeBase(fStream->start_time);
1220 		TRACE("  start_time: %lld or %.5fs\n", startTime,
1221 			startTime / 1000000.0);
1222 		// TODO: Handle start time in FindKeyFrame() and Seek()?!
1223 	}
1224 	#endif // TRACE_AVFORMAT_READER
1225 
1226 	*duration = Duration();
1227 
1228 	TRACE("  duration: %lld or %.5fs\n", *duration, *duration / 1000000.0);
1229 
1230 	#if 0
1231 	if (fStream->nb_index_entries > 0) {
1232 		TRACE("  dump of index entries:\n");
1233 		int count = 5;
1234 		int firstEntriesCount = min_c(fStream->nb_index_entries, count);
1235 		int i = 0;
1236 		for (; i < firstEntriesCount; i++) {
1237 			AVIndexEntry& entry = fStream->index_entries[i];
1238 			bigtime_t timeGlobal = entry.timestamp;
1239 			bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
1240 			TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
1241 				timeNative / 1000000.0f, timeGlobal / 1000000.0f);
1242 		}
1243 		if (fStream->nb_index_entries - count > i) {
1244 			i = fStream->nb_index_entries - count;
1245 			TRACE("    ...\n");
1246 			for (; i < fStream->nb_index_entries; i++) {
1247 				AVIndexEntry& entry = fStream->index_entries[i];
1248 				bigtime_t timeGlobal = entry.timestamp;
1249 				bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
1250 				TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
1251 					timeNative / 1000000.0f, timeGlobal / 1000000.0f);
1252 			}
1253 		}
1254 	}
1255 	#endif
1256 
1257 	*frameCount = fStream->nb_frames;
1258 //	if (*frameCount == 0) {
1259 		// Calculate from duration and frame rate
1260 		*frameCount = (int64)(*duration * frameRate / 1000000LL);
1261 		TRACE("  frameCount calculated: %lld, from context: %lld\n",
1262 			*frameCount, fStream->nb_frames);
1263 //	} else
1264 //		TRACE("  frameCount: %lld\n", *frameCount);
1265 
1266 	*format = fFormat;
1267 
1268 	*infoBuffer = fStream->codec->extradata;
1269 	*infoSize = fStream->codec->extradata_size;
1270 
1271 	return B_OK;
1272 }
1273 
1274 
1275 status_t
1276 AVFormatReader::Stream::FindKeyFrame(uint32 flags, int64* frame,
1277 	bigtime_t* time) const
1278 {
1279 	BAutolock _(&fLock);
1280 
1281 	if (fContext == NULL || fStream == NULL)
1282 		return B_NO_INIT;
1283 
1284 	TRACE_FIND("AVFormatReader::Stream::FindKeyFrame(%ld,%s%s%s%s, "
1285 		"%lld, %lld)\n", VirtualIndex(),
1286 		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
1287 		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
1288 		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
1289 			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
1290 		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
1291 			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
1292 		*frame, *time);
1293 
1294 	bool inLastRequestedRange = false;
1295 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1296 		if (fLastReportedKeyframe.reportedFrame
1297 			<= fLastReportedKeyframe.requestedFrame) {
1298 			inLastRequestedRange
1299 				= *frame >= fLastReportedKeyframe.reportedFrame
1300 					&& *frame <= fLastReportedKeyframe.requestedFrame;
1301 		} else {
1302 			inLastRequestedRange
1303 				= *frame >= fLastReportedKeyframe.requestedFrame
1304 					&& *frame <= fLastReportedKeyframe.reportedFrame;
1305 		}
1306 	} else if ((flags & B_MEDIA_SEEK_TO_FRAME) == 0) {
1307 		if (fLastReportedKeyframe.reportedTime
1308 			<= fLastReportedKeyframe.requestedTime) {
1309 			inLastRequestedRange
1310 				= *time >= fLastReportedKeyframe.reportedTime
1311 					&& *time <= fLastReportedKeyframe.requestedTime;
1312 		} else {
1313 			inLastRequestedRange
1314 				= *time >= fLastReportedKeyframe.requestedTime
1315 					&& *time <= fLastReportedKeyframe.reportedTime;
1316 		}
1317 	}
1318 
1319 	if (inLastRequestedRange) {
1320 		*frame = fLastReportedKeyframe.reportedFrame;
1321 		*time = fLastReportedKeyframe.reportedTime;
1322 		TRACE_FIND("  same as last reported keyframe\n");
1323 		return B_OK;
1324 	}
1325 
1326 	double frameRate = FrameRate();
1327 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0)
1328 		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);
1329 
1330 	status_t ret;
1331 	if (fGhostStream == NULL) {
1332 		BAutolock _(fSourceLock);
1333 
1334 		fGhostStream = new(std::nothrow) StreamBase(fSource, fSourceLock,
1335 			&fLock);
1336 		if (fGhostStream == NULL) {
1337 			TRACE("  failed to allocate ghost stream\n");
1338 			return B_NO_MEMORY;
1339 		}
1340 
1341 		ret = fGhostStream->Open();
1342 		if (ret != B_OK) {
1343 			TRACE("  ghost stream failed to open: %s\n", strerror(ret));
1344 			return B_ERROR;
1345 		}
1346 
1347 		ret = fGhostStream->Init(fVirtualIndex);
1348 		if (ret != B_OK) {
1349 			TRACE("  ghost stream failed to init: %s\n", strerror(ret));
1350 			return B_ERROR;
1351 		}
1352 	}
1353 	fLastReportedKeyframe.requestedFrame = *frame;
1354 	fLastReportedKeyframe.requestedTime = *time;
1355 	fLastReportedKeyframe.seekFlags = flags;
1356 
1357 	ret = fGhostStream->Seek(flags, frame, time);
1358 	if (ret != B_OK) {
1359 		TRACE("  ghost stream failed to seek: %s\n", strerror(ret));
1360 		return B_ERROR;
1361 	}
1362 
1363 	fLastReportedKeyframe.reportedFrame = *frame;
1364 	fLastReportedKeyframe.reportedTime = *time;
1365 
1366 	TRACE_FIND("  found time: %.2fs\n", *time / 1000000.0);
1367 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1368 		*frame = int64_t(*time * FrameRate() / 1000000.0 + 0.5);
1369 		TRACE_FIND("  found frame: %lld\n", *frame);
1370 	}
1371 
1372 	return B_OK;
1373 }
1374 
1375 
1376 status_t
1377 AVFormatReader::Stream::Seek(uint32 flags, int64* frame, bigtime_t* time)
1378 {
1379 	BAutolock _(&fLock);
1380 
1381 	if (fContext == NULL || fStream == NULL)
1382 		return B_NO_INIT;
1383 
1384 	// Put the old requested values into frame/time, since we already know
1385 	// that the sought frame/time will then match the reported values.
1386 	// TODO: Will not work if client changes seek flags (from backwards to
1387 	// forward or vice versa)!!
1388 	bool inLastRequestedRange = false;
1389 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1390 		if (fLastReportedKeyframe.reportedFrame
1391 			<= fLastReportedKeyframe.requestedFrame) {
1392 			inLastRequestedRange
1393 				= *frame >= fLastReportedKeyframe.reportedFrame
1394 					&& *frame <= fLastReportedKeyframe.requestedFrame;
1395 		} else {
1396 			inLastRequestedRange
1397 				= *frame >= fLastReportedKeyframe.requestedFrame
1398 					&& *frame <= fLastReportedKeyframe.reportedFrame;
1399 		}
1400 	} else if ((flags & B_MEDIA_SEEK_TO_FRAME) == 0) {
1401 		if (fLastReportedKeyframe.reportedTime
1402 			<= fLastReportedKeyframe.requestedTime) {
1403 			inLastRequestedRange
1404 				= *time >= fLastReportedKeyframe.reportedTime
1405 					&& *time <= fLastReportedKeyframe.requestedTime;
1406 		} else {
1407 			inLastRequestedRange
1408 				= *time >= fLastReportedKeyframe.requestedTime
1409 					&& *time <= fLastReportedKeyframe.reportedTime;
1410 		}
1411 	}
1412 
1413 	if (inLastRequestedRange) {
1414 		*frame = fLastReportedKeyframe.requestedFrame;
1415 		*time = fLastReportedKeyframe.requestedTime;
1416 		flags = fLastReportedKeyframe.seekFlags;
1417 	}
1418 
1419 	return StreamBase::Seek(flags, frame, time);
1420 }
1421 
1422 
1423 // #pragma mark - AVFormatReader
1424 
1425 
1426 AVFormatReader::AVFormatReader()
1427 	:
1428 	fCopyright(""),
1429 	fStreams(NULL),
1430 	fSourceLock("source I/O lock")
1431 {
1432 	TRACE("AVFormatReader::AVFormatReader\n");
1433 }
1434 
1435 
1436 AVFormatReader::~AVFormatReader()
1437 {
1438 	TRACE("AVFormatReader::~AVFormatReader\n");
1439 	if (fStreams != NULL) {
1440 		// The client was supposed to call FreeCookie() on all
1441 		// allocated streams. Deleting the first stream is always
1442 		// prevented, we delete the other ones just in case.
1443 		int32 count = fStreams[0]->CountStreams();
1444 		for (int32 i = 0; i < count; i++)
1445 			delete fStreams[i];
1446 		delete[] fStreams;
1447 	}
1448 }
1449 
1450 
1451 // #pragma mark -
1452 
1453 
1454 const char*
1455 AVFormatReader::Copyright()
1456 {
1457 	if (fCopyright.Length() <= 0) {
1458 		BMessage message;
1459 		if (GetMetaData(&message) == B_OK)
1460 			message.FindString("copyright", &fCopyright);
1461 	}
1462 	return fCopyright.String();
1463 }
1464 
1465 
1466 status_t
1467 AVFormatReader::Sniff(int32* _streamCount)
1468 {
1469 	TRACE("AVFormatReader::Sniff\n");
1470 
1471 	BMediaIO* source = dynamic_cast<BMediaIO*>(Source());
1472 	if (source == NULL) {
1473 		TRACE("  not a BMediaIO, but we need it to be one.\n");
1474 		return B_NOT_SUPPORTED;
1475 	}
1476 
1477 	Stream* stream = new(std::nothrow) Stream(source,
1478 		&fSourceLock);
1479 	if (stream == NULL) {
1480 		ERROR("AVFormatReader::Sniff() - failed to allocate Stream\n");
1481 		return B_NO_MEMORY;
1482 	}
1483 
1484 	ObjectDeleter<Stream> streamDeleter(stream);
1485 
1486 	status_t ret = stream->Open();
1487 	if (ret != B_OK) {
1488 		TRACE("  failed to detect stream: %s\n", strerror(ret));
1489 		return ret;
1490 	}
1491 
1492 	delete[] fStreams;
1493 	fStreams = NULL;
1494 
1495 	int32 streamCount = stream->CountStreams();
1496 	if (streamCount == 0) {
1497 		TRACE("  failed to detect any streams: %s\n", strerror(ret));
1498 		return B_ERROR;
1499 	}
1500 
1501 	fStreams = new(std::nothrow) Stream*[streamCount];
1502 	if (fStreams == NULL) {
1503 		ERROR("AVFormatReader::Sniff() - failed to allocate streams\n");
1504 		return B_NO_MEMORY;
1505 	}
1506 
1507 	memset(fStreams, 0, sizeof(Stream*) * streamCount);
1508 	fStreams[0] = stream;
1509 	streamDeleter.Detach();
1510 
1511 	#ifdef TRACE_AVFORMAT_READER
1512 	av_dump_format(const_cast<AVFormatContext*>(stream->Context()), 0, "", 0);
1513 	#endif
1514 
1515 	if (_streamCount != NULL)
1516 		*_streamCount = streamCount;
1517 
1518 	return B_OK;
1519 }
1520 
1521 
1522 void
1523 AVFormatReader::GetFileFormatInfo(media_file_format* mff)
1524 {
1525 	TRACE("AVFormatReader::GetFileFormatInfo\n");
1526 
1527 	if (fStreams == NULL)
1528 		return;
1529 
1530 	// The first cookie is always there!
1531 	const AVFormatContext* context = fStreams[0]->Context();
1532 
1533 	if (context == NULL || context->iformat == NULL) {
1534 		TRACE("  no AVFormatContext or AVInputFormat!\n");
1535 		return;
1536 	}
1537 
1538 	const media_file_format* format = demuxer_format_for(context->iformat);
1539 
1540 	mff->capabilities = media_file_format::B_READABLE
1541 		| media_file_format::B_KNOWS_ENCODED_VIDEO
1542 		| media_file_format::B_KNOWS_ENCODED_AUDIO
1543 		| media_file_format::B_IMPERFECTLY_SEEKABLE;
1544 
1545 	if (format != NULL) {
1546 		mff->family = format->family;
1547 	} else {
1548 		TRACE("  no DemuxerFormat for AVInputFormat!\n");
1549 		mff->family = B_MISC_FORMAT_FAMILY;
1550 	}
1551 
1552 	mff->version = 100;
1553 
1554 	if (format != NULL) {
1555 		strcpy(mff->mime_type, format->mime_type);
1556 	} else {
1557 		// TODO: Would be nice to be able to provide this from AVInputFormat,
1558 		// maybe by extending the FFmpeg code itself (all demuxers).
1559 		strcpy(mff->mime_type, "");
1560 	}
1561 
1562 	if (context->iformat->extensions != NULL)
1563 		strcpy(mff->file_extension, context->iformat->extensions);
1564 	else {
1565 		TRACE("  no file extensions for AVInputFormat.\n");
1566 		strcpy(mff->file_extension, "");
1567 	}
1568 
1569 	if (context->iformat->name != NULL)
1570 		strcpy(mff->short_name,  context->iformat->name);
1571 	else {
1572 		TRACE("  no short name for AVInputFormat.\n");
1573 		strcpy(mff->short_name, "");
1574 	}
1575 
1576 	if (context->iformat->long_name != NULL)
1577 		sprintf(mff->pretty_name, "%s (FFmpeg)", context->iformat->long_name);
1578 	else {
1579 		if (format != NULL)
1580 			sprintf(mff->pretty_name, "%s (FFmpeg)", format->pretty_name);
1581 		else
1582 			strcpy(mff->pretty_name, "Unknown (FFmpeg)");
1583 	}
1584 }
1585 
1586 
1587 status_t
1588 AVFormatReader::GetMetaData(BMessage* _data)
1589 {
1590 	// The first cookie is always there!
1591 	const AVFormatContext* context = fStreams[0]->Context();
1592 
1593 	if (context == NULL)
1594 		return B_NO_INIT;
1595 
1596 	avdictionary_to_message(context->metadata, _data);
1597 
1598 	// Add chapter info
1599 	for (unsigned i = 0; i < context->nb_chapters; i++) {
1600 		AVChapter* chapter = context->chapters[i];
1601 		BMessage chapterData;
1602 		chapterData.AddInt64("start", bigtime_t(1000000.0
1603 			* chapter->start * chapter->time_base.num
1604 			/ chapter->time_base.den + 0.5));
1605 		chapterData.AddInt64("end", bigtime_t(1000000.0
1606 			* chapter->end * chapter->time_base.num
1607 			/ chapter->time_base.den + 0.5));
1608 
1609 		avdictionary_to_message(chapter->metadata, &chapterData);
1610 		_data->AddMessage("be:chapter", &chapterData);
1611 	}
1612 
1613 	// Add program info
1614 	for (unsigned i = 0; i < context->nb_programs; i++) {
1615 		BMessage programData;
1616 		avdictionary_to_message(context->programs[i]->metadata, &programData);
1617 		_data->AddMessage("be:program", &programData);
1618 	}
1619 
1620 	return B_OK;
1621 }
1622 
1623 
1624 // #pragma mark -
1625 
1626 
1627 status_t
1628 AVFormatReader::AllocateCookie(int32 streamIndex, void** _cookie)
1629 {
1630 	TRACE("AVFormatReader::AllocateCookie(%ld)\n", streamIndex);
1631 
1632 	BAutolock _(fSourceLock);
1633 
1634 	if (fStreams == NULL)
1635 		return B_NO_INIT;
1636 
1637 	if (streamIndex < 0 || streamIndex >= fStreams[0]->CountStreams())
1638 		return B_BAD_INDEX;
1639 
1640 	if (_cookie == NULL)
1641 		return B_BAD_VALUE;
1642 
1643 	Stream* cookie = fStreams[streamIndex];
1644 	if (cookie == NULL) {
1645 		// Allocate the cookie
1646 		BMediaIO* source = dynamic_cast<BMediaIO*>(Source());
1647 		if (source == NULL) {
1648 			TRACE("  not a BMediaIO, but we need it to be one.\n");
1649 			return B_NOT_SUPPORTED;
1650 		}
1651 
1652 		cookie = new(std::nothrow) Stream(source, &fSourceLock);
1653 		if (cookie == NULL) {
1654 			ERROR("AVFormatReader::Sniff() - failed to allocate "
1655 				"Stream\n");
1656 			return B_NO_MEMORY;
1657 		}
1658 
1659 		status_t ret = cookie->Open();
1660 		if (ret != B_OK) {
1661 			TRACE("  stream failed to open: %s\n", strerror(ret));
1662 			delete cookie;
1663 			return ret;
1664 		}
1665 	}
1666 
1667 	status_t ret = cookie->Init(streamIndex);
1668 	if (ret != B_OK) {
1669 		TRACE("  stream failed to initialize: %s\n", strerror(ret));
1670 		// NOTE: Never delete the first stream!
1671 		if (streamIndex != 0)
1672 			delete cookie;
1673 		return ret;
1674 	}
1675 
1676 	fStreams[streamIndex] = cookie;
1677 	*_cookie = cookie;
1678 
1679 	return B_OK;
1680 }
1681 
1682 
1683 status_t
1684 AVFormatReader::FreeCookie(void *_cookie)
1685 {
1686 	BAutolock _(fSourceLock);
1687 
1688 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1689 
1690 	// NOTE: Never delete the first cookie!
1691 	if (cookie != NULL && cookie->VirtualIndex() != 0) {
1692 		if (fStreams != NULL)
1693 			fStreams[cookie->VirtualIndex()] = NULL;
1694 		delete cookie;
1695 	}
1696 
1697 	return B_OK;
1698 }
1699 
1700 
1701 // #pragma mark -
1702 
1703 
1704 status_t
1705 AVFormatReader::GetStreamInfo(void* _cookie, int64* frameCount,
1706 	bigtime_t* duration, media_format* format, const void** infoBuffer,
1707 	size_t* infoSize)
1708 {
1709 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1710 	return cookie->GetStreamInfo(frameCount, duration, format, infoBuffer,
1711 		infoSize);
1712 }
1713 
1714 
1715 status_t
1716 AVFormatReader::GetStreamMetaData(void* _cookie, BMessage* _data)
1717 {
1718 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1719 	return cookie->GetMetaData(_data);
1720 }
1721 
1722 
1723 status_t
1724 AVFormatReader::Seek(void* _cookie, uint32 seekTo, int64* frame,
1725 	bigtime_t* time)
1726 {
1727 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1728 	return cookie->Seek(seekTo, frame, time);
1729 }
1730 
1731 
1732 status_t
1733 AVFormatReader::FindKeyFrame(void* _cookie, uint32 flags, int64* frame,
1734 	bigtime_t* time)
1735 {
1736 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1737 	return cookie->FindKeyFrame(flags, frame, time);
1738 }
1739 
1740 
1741 status_t
1742 AVFormatReader::GetNextChunk(void* _cookie, const void** chunkBuffer,
1743 	size_t* chunkSize, media_header* mediaHeader)
1744 {
1745 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1746 	return cookie->GetNextChunk(chunkBuffer, chunkSize, mediaHeader);
1747 }
1748