xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVFormatReader.cpp (revision 4b918abdb02a26a770d898594eaaccc6f1726e9b)
1 /*
2  * Copyright 2009-2010, Stephan Aßmus <superstippi@gmx.de>
3  * Copyright 2014, Colin Günther <coling@gmx.de>
4  * Copyright 2018, Dario Casalinuovo
5  * All rights reserved. Distributed under the terms of the GNU L-GPL license.
6  */
7 
8 #include "AVFormatReader.h"
9 
10 #include <stdio.h>
11 #include <string.h>
12 #include <stdlib.h>
13 
14 #include <new>
15 
16 #include <AutoDeleter.h>
17 #include <Autolock.h>
18 #include <ByteOrder.h>
19 #include <MediaIO.h>
20 #include <MediaDefs.h>
21 #include <MediaFormats.h>
22 #include <MimeType.h>
23 
24 extern "C" {
25 	#include "avcodec.h"
26 	#include "avformat.h"
27 }
28 
29 #include "DemuxerTable.h"
30 #include "gfx_util.h"
31 #include "Utilities.h"
32 
33 
34 //#define TRACE_AVFORMAT_READER
35 #ifdef TRACE_AVFORMAT_READER
36 #	define TRACE printf
37 #	define TRACE_IO(a...)
38 #	define TRACE_SEEK(a...) printf(a)
39 #	define TRACE_FIND(a...)
40 #	define TRACE_PACKET(a...)
41 #else
42 #	define TRACE(a...)
43 #	define TRACE_IO(a...)
44 #	define TRACE_SEEK(a...)
45 #	define TRACE_FIND(a...)
46 #	define TRACE_PACKET(a...)
47 #endif
48 
49 #define ERROR(a...) fprintf(stderr, a)
50 
51 
52 static uint32
53 avformat_to_beos_byte_order(AVSampleFormat format)
54 {
55 	// TODO: Huh?
56 	return B_MEDIA_HOST_ENDIAN;
57 }
58 
59 
60 static void
61 avdictionary_to_message(AVDictionary* dictionary, BMessage* message)
62 {
63 	if (dictionary == NULL)
64 		return;
65 
66 	AVDictionaryEntry* entry = NULL;
67 	while ((entry = av_dict_get(dictionary, "", entry,
68 		AV_DICT_IGNORE_SUFFIX))) {
69 		// convert entry keys into something more meaningful using the names from
70 		// id3v2.c
71 		if (strcmp(entry->key, "TALB") == 0 || strcmp(entry->key, "TAL") == 0)
72 			message->AddString("album", entry->value);
73 		else if (strcmp(entry->key, "TCOM") == 0)
74 			message->AddString("composer", entry->value);
75 		else if (strcmp(entry->key, "TCON") == 0 || strcmp(entry->key, "TCO") == 0)
76 			message->AddString("genre", entry->value);
77 		else if (strcmp(entry->key, "TCOP") == 0)
78 			message->AddString("copyright", entry->value);
79 		else if (strcmp(entry->key, "TDRL") == 0 || strcmp(entry->key, "TDRC") == 0)
80 			message->AddString("date", entry->value);
81 		else if (strcmp(entry->key, "TENC") == 0 || strcmp(entry->key, "TEN") == 0)
82 			message->AddString("encoded_by", entry->value);
83 		else if (strcmp(entry->key, "TIT2") == 0 || strcmp(entry->key, "TT2") == 0)
84 			message->AddString("title", entry->value);
85 		else if (strcmp(entry->key, "TLAN") == 0)
86 			message->AddString("language", entry->value);
87 		else if (strcmp(entry->key, "TPE1") == 0 || strcmp(entry->key, "TP1") == 0)
88 			message->AddString("artist", entry->value);
89 		else if (strcmp(entry->key, "TPE2") == 0 || strcmp(entry->key, "TP2") == 0)
90 			message->AddString("album_artist", entry->value);
91 		else if (strcmp(entry->key, "TPE3") == 0 || strcmp(entry->key, "TP3") == 0)
92 			message->AddString("performer", entry->value);
93 		else if (strcmp(entry->key, "TPOS") == 0)
94 			message->AddString("disc", entry->value);
95 		else if (strcmp(entry->key, "TPUB") == 0)
96 			message->AddString("publisher", entry->value);
97 		else if (strcmp(entry->key, "TRCK") == 0 || strcmp(entry->key, "TRK") == 0)
98 			message->AddString("track", entry->value);
99 		else if (strcmp(entry->key, "TSOA") == 0)
100 			message->AddString("album-sort", entry->value);
101 		else if (strcmp(entry->key, "TSOP") == 0)
102 			message->AddString("artist-sort", entry->value);
103 		else if (strcmp(entry->key, "TSOT") == 0)
104 			message->AddString("title-sort", entry->value);
105 		else if (strcmp(entry->key, "TSSE") == 0)
106 			message->AddString("encoder", entry->value);
107 		else if (strcmp(entry->key, "TYER") == 0)
108 			message->AddString("year", entry->value);
109 		else
110 			message->AddString(entry->key, entry->value);
111 	}
112 }
113 
114 
115 // #pragma mark - StreamBase
116 
117 
118 class StreamBase {
119 public:
120 								StreamBase(BMediaIO* source,
121 									BLocker* sourceLock, BLocker* streamLock);
122 	virtual						~StreamBase();
123 
124 	// Init an indivual AVFormatContext
125 			status_t			Open();
126 
127 	// Setup this stream to point to the AVStream at the given streamIndex.
128 	virtual	status_t			Init(int32 streamIndex);
129 
130 	inline	const AVFormatContext* Context() const
131 									{ return fContext; }
132 			int32				Index() const;
133 			int32				CountStreams() const;
134 			int32				StreamIndexFor(int32 virtualIndex) const;
135 	inline	int32				VirtualIndex() const
136 									{ return fVirtualIndex; }
137 
138 			double				FrameRate() const;
139 			bigtime_t			Duration() const;
140 
141 	virtual	status_t			Seek(uint32 flags, int64* frame,
142 									bigtime_t* time);
143 
144 			status_t			GetNextChunk(const void** chunkBuffer,
145 									size_t* chunkSize,
146 									media_header* mediaHeader);
147 
148 protected:
149 	// I/O hooks for libavformat, cookie will be a Stream instance.
150 	// Since multiple StreamCookies use the same BMediaIO source, they
151 	// maintain the position individually, and may need to seek the source
152 	// if it does not match anymore in _Read().
153 	static	int					_Read(void* cookie, uint8* buffer,
154 									int bufferSize);
155 	static	off_t				_Seek(void* cookie, off_t offset, int whence);
156 
157 			status_t			_NextPacket(bool reuse);
158 
159 			int64_t				_ConvertToStreamTimeBase(bigtime_t time) const;
160 			bigtime_t			_ConvertFromStreamTimeBase(int64_t time) const;
161 
162 protected:
163 			BMediaIO*			fSource;
164 			off_t				fPosition;
165 			// Since different threads may read from the source,
166 			// we need to protect the file position and I/O by a lock.
167 			BLocker*			fSourceLock;
168 
169 			BLocker*			fStreamLock;
170 
171 			AVFormatContext*	fContext;
172 			AVStream*			fStream;
173 			int32				fVirtualIndex;
174 
175 			media_format		fFormat;
176 
177 			AVIOContext*		fIOContext;
178 
179 			AVPacket			fPacket;
180 			bool				fReusePacket;
181 
182 			bool				fSeekByBytes;
183 			bool				fStreamBuildsIndexWhileReading;
184 };
185 
186 
187 StreamBase::StreamBase(BMediaIO* source, BLocker* sourceLock,
188 		BLocker* streamLock)
189 	:
190 	fSource(source),
191 	fPosition(0),
192 	fSourceLock(sourceLock),
193 
194 	fStreamLock(streamLock),
195 
196 	fContext(NULL),
197 	fStream(NULL),
198 	fVirtualIndex(-1),
199 	fIOContext(NULL),
200 
201 	fReusePacket(false),
202 
203 	fSeekByBytes(false),
204 	fStreamBuildsIndexWhileReading(false)
205 {
206 	// NOTE: Don't use streamLock here, it may not yet be initialized!
207 
208 	av_new_packet(&fPacket, 0);
209 	fFormat.Clear();
210 }
211 
212 
213 StreamBase::~StreamBase()
214 {
215 	avformat_close_input(&fContext);
216 	av_packet_unref(&fPacket);
217 	if (fIOContext != NULL)
218 		av_free(fIOContext->buffer);
219 	av_free(fIOContext);
220 }
221 
222 
223 status_t
224 StreamBase::Open()
225 {
226 	BAutolock _(fStreamLock);
227 
228 	// Init probing data
229 	size_t bufferSize = 32768;
230 	uint8* buffer = static_cast<uint8*>(av_malloc(bufferSize));
231 	if (buffer == NULL)
232 		return B_NO_MEMORY;
233 
234 	// First try to identify the file using the MIME database, as ffmpeg
235 	// is not very good at this and relies on us to give it the file extension
236 	// as an hint.
237 	// For this we need some valid data in the buffer, the first 512 bytes
238 	// should do because our MIME sniffing never uses more.
239 	const char* extension = NULL;
240 	BMessage message;
241 	if (fSource->Read(buffer, 512) == 512) {
242 		BMimeType type;
243 		if (BMimeType::GuessMimeType(buffer, 512, &type) == B_OK) {
244 			if (type.GetFileExtensions(&message) == B_OK) {
245 				extension = message.FindString("extensions");
246 			}
247 		}
248 	}
249 
250 	// Allocate I/O context with buffer and hook functions, pass ourself as
251 	// cookie.
252 	memset(buffer, 0, bufferSize);
253 	fIOContext = avio_alloc_context(buffer, bufferSize, 0, this, _Read, 0,
254 		_Seek);
255 	if (fIOContext == NULL) {
256 		TRACE("StreamBase::Open() - avio_alloc_context() failed!\n");
257 		av_free(buffer);
258 		return B_ERROR;
259 	}
260 
261 	fContext = avformat_alloc_context();
262 	fContext->pb = fIOContext;
263 
264 	// Allocate our context and probe the input format
265 	if (avformat_open_input(&fContext, extension, NULL, NULL) < 0) {
266 		TRACE("StreamBase::Open() - avformat_open_input() failed!\n");
267 		// avformat_open_input() frees the context in case of failure
268 		fContext = NULL;
269 		av_free(fIOContext->buffer);
270 		av_free(fIOContext);
271 		fIOContext = NULL;
272 		return B_NOT_SUPPORTED;
273 	}
274 
275 	TRACE("StreamBase::Open() - "
276 		"avformat_open_input(): %s\n", fContext->iformat->name);
277 	TRACE("  flags:%s%s%s%s%s\n",
278 		(fContext->iformat->flags & AVFMT_GLOBALHEADER) ? " AVFMT_GLOBALHEADER" : "",
279 		(fContext->iformat->flags & AVFMT_NOTIMESTAMPS) ? " AVFMT_NOTIMESTAMPS" : "",
280 		(fContext->iformat->flags & AVFMT_GENERIC_INDEX) ? " AVFMT_GENERIC_INDEX" : "",
281 		(fContext->iformat->flags & AVFMT_TS_DISCONT) ? " AVFMT_TS_DISCONT" : "",
282 		(fContext->iformat->flags & AVFMT_VARIABLE_FPS) ? " AVFMT_VARIABLE_FPS" : ""
283 	);
284 
285 
286 	// Retrieve stream information
287 	if (avformat_find_stream_info(fContext, NULL) < 0) {
288 		TRACE("StreamBase::Open() - avformat_find_stream_info() failed!\n");
289 		return B_NOT_SUPPORTED;
290 	}
291 
292 	fSeekByBytes = (fContext->iformat->flags & AVFMT_TS_DISCONT) != 0;
293 	fStreamBuildsIndexWhileReading
294 		= (fContext->iformat->flags & AVFMT_GENERIC_INDEX) != 0
295 			|| fSeekByBytes;
296 
297 	TRACE("StreamBase::Open() - "
298 		"av_find_stream_info() success! Seeking by bytes: %d\n",
299 		fSeekByBytes);
300 
301 	return B_OK;
302 }
303 
304 
305 status_t
306 StreamBase::Init(int32 virtualIndex)
307 {
308 	BAutolock _(fStreamLock);
309 
310 	TRACE("StreamBase::Init(%ld)\n", virtualIndex);
311 
312 	if (fContext == NULL)
313 		return B_NO_INIT;
314 
315 	int32 streamIndex = StreamIndexFor(virtualIndex);
316 	if (streamIndex < 0) {
317 		TRACE("  bad stream index!\n");
318 		return B_BAD_INDEX;
319 	}
320 
321 	TRACE("  context stream index: %ld\n", streamIndex);
322 
323 	// We need to remember the virtual index so that
324 	// AVFormatReader::FreeCookie() can clear the correct stream entry.
325 	fVirtualIndex = virtualIndex;
326 
327 	// Make us point to the AVStream at streamIndex
328 	fStream = fContext->streams[streamIndex];
329 
330 // NOTE: Discarding other streams works for most, but not all containers,
331 // for example it does not work for the ASF demuxer. Since I don't know what
332 // other demuxer it breaks, let's just keep reading packets for unwanted
333 // streams, it just makes the _GetNextPacket() function slightly less
334 // efficient.
335 //	// Discard all other streams
336 //	for (unsigned i = 0; i < fContext->nb_streams; i++) {
337 //		if (i != (unsigned)streamIndex)
338 //			fContext->streams[i]->discard = AVDISCARD_ALL;
339 //	}
340 
341 	return B_OK;
342 }
343 
344 
345 int32
346 StreamBase::Index() const
347 {
348 	if (fStream != NULL)
349 		return fStream->index;
350 	return -1;
351 }
352 
353 
354 int32
355 StreamBase::CountStreams() const
356 {
357 	// Figure out the stream count. If the context has "AVPrograms", use
358 	// the first program (for now).
359 	// TODO: To support "programs" properly, the BMediaFile/Track API should
360 	// be extended accordingly. I guess programs are like TV channels in the
361 	// same satilite transport stream. Maybe call them "TrackGroups".
362 	if (fContext->nb_programs > 0) {
363 		// See libavformat/utils.c:dump_format()
364 		return fContext->programs[0]->nb_stream_indexes;
365 	}
366 	return fContext->nb_streams;
367 }
368 
369 
370 int32
371 StreamBase::StreamIndexFor(int32 virtualIndex) const
372 {
373 	// NOTE: See CountStreams()
374 	if (fContext->nb_programs > 0) {
375 		const AVProgram* program = fContext->programs[0];
376 		if (virtualIndex >= 0
377 			&& virtualIndex < (int32)program->nb_stream_indexes) {
378 			return program->stream_index[virtualIndex];
379 		}
380 	} else {
381 		if (virtualIndex >= 0 && virtualIndex < (int32)fContext->nb_streams)
382 			return virtualIndex;
383 	}
384 	return -1;
385 }
386 
387 
388 double
389 StreamBase::FrameRate() const
390 {
391 	// TODO: Find a way to always calculate a correct frame rate...
392 	double frameRate = 1.0;
393 	switch (fStream->codecpar->codec_type) {
394 		case AVMEDIA_TYPE_AUDIO:
395 			frameRate = (double)fStream->codecpar->sample_rate;
396 			break;
397 		case AVMEDIA_TYPE_VIDEO:
398 			if (fStream->avg_frame_rate.den && fStream->avg_frame_rate.num)
399 				frameRate = av_q2d(fStream->avg_frame_rate);
400 			else if (fStream->r_frame_rate.den && fStream->r_frame_rate.num)
401 				frameRate = av_q2d(fStream->r_frame_rate);
402 			else if (fStream->time_base.den && fStream->time_base.num)
403 				frameRate = 1 / av_q2d(fStream->time_base);
404 
405 			// TODO: Fix up interlaced video for real
406 			if (frameRate == 50.0f)
407 				frameRate = 25.0f;
408 			break;
409 		default:
410 			break;
411 	}
412 	if (frameRate <= 0.0)
413 		frameRate = 1.0;
414 	return frameRate;
415 }
416 
417 
418 bigtime_t
419 StreamBase::Duration() const
420 {
421 	// TODO: This is not working correctly for all stream types...
422 	// It seems that the calculations here are correct, because they work
423 	// for a couple of streams and are in line with the documentation, but
424 	// unfortunately, libavformat itself seems to set the time_base and
425 	// duration wrongly sometimes. :-(
426 
427 	int32 flags;
428 	fSource->GetFlags(&flags);
429 
430 	// "Mutable Size" (ie http streams) means we can't realistically compute
431 	// a duration. So don't let ffmpeg give a (wrong) estimate in this case.
432 	if ((flags & B_MEDIA_MUTABLE_SIZE) != 0)
433 		return 0;
434 
435 	if ((int64)fStream->duration != AV_NOPTS_VALUE)
436 		return _ConvertFromStreamTimeBase(fStream->duration);
437 	else if ((int64)fContext->duration != AV_NOPTS_VALUE)
438 		return (bigtime_t)fContext->duration;
439 
440 	return 0;
441 }
442 
443 
444 status_t
445 StreamBase::Seek(uint32 flags, int64* frame, bigtime_t* time)
446 {
447 	BAutolock _(fStreamLock);
448 
449 	if (fContext == NULL || fStream == NULL)
450 		return B_NO_INIT;
451 
452 	TRACE_SEEK("StreamBase::Seek(%ld,%s%s%s%s, %lld, "
453 		"%lld)\n", VirtualIndex(),
454 		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
455 		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
456 		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
457 			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
458 		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
459 			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
460 		*frame, *time);
461 
462 	double frameRate = FrameRate();
463 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
464 		// Seeking is always based on time, initialize it when client seeks
465 		// based on frame.
466 		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);
467 	}
468 
469 	int64_t timeStamp = *time;
470 
471 	int searchFlags = AVSEEK_FLAG_BACKWARD;
472 	if ((flags & B_MEDIA_SEEK_CLOSEST_FORWARD) != 0)
473 		searchFlags = 0;
474 
475 	if (fSeekByBytes) {
476 		searchFlags |= AVSEEK_FLAG_BYTE;
477 
478 		BAutolock _(fSourceLock);
479 		int64_t fileSize;
480 
481 		if (fSource->GetSize(&fileSize) != B_OK)
482 			return B_NOT_SUPPORTED;
483 
484 		int64_t duration = Duration();
485 		if (duration == 0)
486 			return B_NOT_SUPPORTED;
487 
488 		timeStamp = int64_t(fileSize * ((double)timeStamp / duration));
489 		if ((flags & B_MEDIA_SEEK_CLOSEST_BACKWARD) != 0) {
490 			timeStamp -= 65536;
491 			if (timeStamp < 0)
492 				timeStamp = 0;
493 		}
494 
495 		bool seekAgain = true;
496 		bool seekForward = true;
497 		bigtime_t lastFoundTime = -1;
498 		int64_t closestTimeStampBackwards = -1;
499 		while (seekAgain) {
500 			if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp,
501 				INT64_MAX, searchFlags) < 0) {
502 				TRACE("  avformat_seek_file() (by bytes) failed.\n");
503 				return B_ERROR;
504 			}
505 			seekAgain = false;
506 
507 			// Our last packet is toast in any case. Read the next one so we
508 			// know where we really seeked.
509 			fReusePacket = false;
510 			if (_NextPacket(true) == B_OK) {
511 				while (fPacket.pts == AV_NOPTS_VALUE) {
512 					fReusePacket = false;
513 					if (_NextPacket(true) != B_OK)
514 						return B_ERROR;
515 				}
516 				if (fPacket.pos >= 0)
517 					timeStamp = fPacket.pos;
518 				bigtime_t foundTime
519 					= _ConvertFromStreamTimeBase(fPacket.pts);
520 				if (foundTime != lastFoundTime) {
521 					lastFoundTime = foundTime;
522 					if (foundTime > *time) {
523 						if (closestTimeStampBackwards >= 0) {
524 							timeStamp = closestTimeStampBackwards;
525 							seekAgain = true;
526 							seekForward = false;
527 							continue;
528 						}
529 						int64_t diff = int64_t(fileSize
530 							* ((double)(foundTime - *time) / (2 * duration)));
531 						if (diff < 8192)
532 							break;
533 						timeStamp -= diff;
534 						TRACE_SEEK("  need to seek back (%lld) (time: %.2f "
535 							"-> %.2f)\n", timeStamp, *time / 1000000.0,
536 							foundTime / 1000000.0);
537 						if (timeStamp < 0)
538 							foundTime = 0;
539 						else {
540 							seekAgain = true;
541 							continue;
542 						}
543 					} else if (seekForward && foundTime < *time - 100000) {
544 						closestTimeStampBackwards = timeStamp;
545 						int64_t diff = int64_t(fileSize
546 							* ((double)(*time - foundTime) / (2 * duration)));
547 						if (diff < 8192)
548 							break;
549 						timeStamp += diff;
550 						TRACE_SEEK("  need to seek forward (%lld) (time: "
551 							"%.2f -> %.2f)\n", timeStamp, *time / 1000000.0,
552 							foundTime / 1000000.0);
553 						if (timeStamp > duration)
554 							foundTime = duration;
555 						else {
556 							seekAgain = true;
557 							continue;
558 						}
559 					}
560 				}
561 				TRACE_SEEK("  found time: %lld -> %lld (%.2f)\n", *time,
562 					foundTime, foundTime / 1000000.0);
563 				*time = foundTime;
564 				*frame = (uint64)(*time * frameRate / 1000000LL + 0.5);
565 				TRACE_SEEK("  seeked frame: %lld\n", *frame);
566 			} else {
567 				TRACE_SEEK("  _NextPacket() failed!\n");
568 				return B_ERROR;
569 			}
570 		}
571 	} else {
572 		// We may not get a PTS from the next packet after seeking, so
573 		// we try to get an expected time from the index.
574 		int64_t streamTimeStamp = _ConvertToStreamTimeBase(*time);
575 		int index = av_index_search_timestamp(fStream, streamTimeStamp,
576 			searchFlags);
577 		if (index < 0) {
578 			TRACE("  av_index_search_timestamp() failed\n");
579 		} else {
580 			if (index > 0) {
581 				const AVIndexEntry& entry = fStream->index_entries[index];
582 				streamTimeStamp = entry.timestamp;
583 			} else {
584 				// Some demuxers use the first index entry to store some
585 				// other information, like the total playing time for example.
586 				// Assume the timeStamp of the first entry is alays 0.
587 				// TODO: Handle start-time offset?
588 				streamTimeStamp = 0;
589 			}
590 			bigtime_t foundTime = _ConvertFromStreamTimeBase(streamTimeStamp);
591 			bigtime_t timeDiff = foundTime > *time
592 				? foundTime - *time : *time - foundTime;
593 
594 			if (timeDiff > 1000000
595 				&& (fStreamBuildsIndexWhileReading
596 					|| index == fStream->nb_index_entries - 1)) {
597 				// If the stream is building the index on the fly while parsing
598 				// it, we only have entries in the index for positions already
599 				// decoded, i.e. we cannot seek into the future. In that case,
600 				// just assume that we can seek where we want and leave
601 				// time/frame unmodified. Since successfully seeking one time
602 				// will generate index entries for the seeked to position, we
603 				// need to remember this in fStreamBuildsIndexWhileReading,
604 				// since when seeking back there will be later index entries,
605 				// but we still want to ignore the found entry.
606 				fStreamBuildsIndexWhileReading = true;
607 				TRACE_SEEK("  Not trusting generic index entry. "
608 					"(Current count: %d)\n", fStream->nb_index_entries);
609 			} else {
610 				// If we found a reasonably time, write it into *time.
611 				// After seeking, we will try to read the sought time from
612 				// the next packet. If the packet has no PTS value, we may
613 				// still have a more accurate time from the index lookup.
614 				*time = foundTime;
615 			}
616 		}
617 
618 		if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp, INT64_MAX,
619 				searchFlags) < 0) {
620 			TRACE("  avformat_seek_file() failed.\n");
621 			// Try to fall back to av_seek_frame()
622 			timeStamp = _ConvertToStreamTimeBase(timeStamp);
623 			if (av_seek_frame(fContext, fStream->index, timeStamp,
624 				searchFlags) < 0) {
625 				TRACE("  avformat_seek_frame() failed as well.\n");
626 				// Fall back to seeking to the beginning by bytes
627 				timeStamp = 0;
628 				if (av_seek_frame(fContext, fStream->index, timeStamp,
629 						AVSEEK_FLAG_BYTE) < 0) {
630 					TRACE("  avformat_seek_frame() by bytes failed as "
631 						"well.\n");
632 					// Do not propagate error in any case. We fail if we can't
633 					// read another packet.
634 				} else
635 					*time = 0;
636 			}
637 		}
638 
639 		// Our last packet is toast in any case. Read the next one so
640 		// we know where we really sought.
641 		bigtime_t foundTime = *time;
642 
643 		fReusePacket = false;
644 		if (_NextPacket(true) == B_OK) {
645 			if (fPacket.pts != AV_NOPTS_VALUE)
646 				foundTime = _ConvertFromStreamTimeBase(fPacket.pts);
647 			else
648 				TRACE_SEEK("  no PTS in packet after seeking\n");
649 		} else
650 			TRACE_SEEK("  _NextPacket() failed!\n");
651 
652 		*time = foundTime;
653 		TRACE_SEEK("  sought time: %.2fs\n", *time / 1000000.0);
654 		*frame = (uint64)(*time * frameRate / 1000000.0 + 0.5);
655 		TRACE_SEEK("  sought frame: %lld\n", *frame);
656 	}
657 
658 	return B_OK;
659 }
660 
661 
662 status_t
663 StreamBase::GetNextChunk(const void** chunkBuffer,
664 	size_t* chunkSize, media_header* mediaHeader)
665 {
666 	BAutolock _(fStreamLock);
667 
668 	TRACE_PACKET("StreamBase::GetNextChunk()\n");
669 
670 	// Get the last stream DTS before reading the next packet, since
671 	// then it points to that one.
672 	int64 lastStreamDTS = fStream->cur_dts;
673 
674 	status_t ret = _NextPacket(false);
675 	if (ret != B_OK) {
676 		*chunkBuffer = NULL;
677 		*chunkSize = 0;
678 		return ret;
679 	}
680 
681 	// According to libavformat documentation, fPacket is valid until the
682 	// next call to av_read_frame(). This is what we want and we can share
683 	// the memory with the least overhead.
684 	*chunkBuffer = fPacket.data;
685 	*chunkSize = fPacket.size;
686 
687 	if (mediaHeader != NULL) {
688 		mediaHeader->type = fFormat.type;
689 		mediaHeader->buffer = 0;
690 		mediaHeader->destination = -1;
691 		mediaHeader->time_source = -1;
692 		mediaHeader->size_used = fPacket.size;
693 
694 		// FFmpeg recommends to use the decoding time stamps as primary source
695 		// for presentation time stamps, especially for video formats that are
696 		// using frame reordering. More over this way it is ensured that the
697 		// returned start times are ordered in a monotonically increasing time
698 		// series (even for videos that contain B-frames).
699 		// \see http://git.videolan.org/?p=ffmpeg.git;a=blob;f=libavformat/avformat.h;h=1e8a6294890d580cd9ebc684eaf4ce57c8413bd8;hb=9153b33a742c4e2a85ff6230aea0e75f5a8b26c2#l1623
700 		bigtime_t presentationTimeStamp;
701 		if (fPacket.dts != AV_NOPTS_VALUE)
702 			presentationTimeStamp = fPacket.dts;
703 		else if (fPacket.pts != AV_NOPTS_VALUE)
704 			presentationTimeStamp = fPacket.pts;
705 		else
706 			presentationTimeStamp = lastStreamDTS;
707 
708 		mediaHeader->start_time	= _ConvertFromStreamTimeBase(presentationTimeStamp);
709 		mediaHeader->file_pos = fPacket.pos;
710 		mediaHeader->data_offset = 0;
711 		switch (mediaHeader->type) {
712 			case B_MEDIA_RAW_AUDIO:
713 				break;
714 			case B_MEDIA_ENCODED_AUDIO:
715 				mediaHeader->u.encoded_audio.buffer_flags
716 					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
717 				break;
718 			case B_MEDIA_RAW_VIDEO:
719 				mediaHeader->u.raw_video.line_count
720 					= fFormat.u.raw_video.display.line_count;
721 				break;
722 			case B_MEDIA_ENCODED_VIDEO:
723 				mediaHeader->u.encoded_video.field_flags
724 					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
725 				mediaHeader->u.encoded_video.line_count
726 					= fFormat.u.encoded_video.output.display.line_count;
727 				break;
728 			default:
729 				break;
730 		}
731 	}
732 
733 //	static bigtime_t pts[2];
734 //	static bigtime_t lastPrintTime = system_time();
735 //	static BLocker printLock;
736 //	if (fStream->index < 2) {
737 //		if (fPacket.pts != AV_NOPTS_VALUE)
738 //			pts[fStream->index] = _ConvertFromStreamTimeBase(fPacket.pts);
739 //		printLock.Lock();
740 //		bigtime_t now = system_time();
741 //		if (now - lastPrintTime > 1000000) {
742 //			printf("PTS: %.4f/%.4f, diff: %.4f\r", pts[0] / 1000000.0,
743 //				pts[1] / 1000000.0, (pts[0] - pts[1]) / 1000000.0);
744 //			fflush(stdout);
745 //			lastPrintTime = now;
746 //		}
747 //		printLock.Unlock();
748 //	}
749 
750 	return B_OK;
751 }
752 
753 
754 // #pragma mark -
755 
756 
757 /*static*/ int
758 StreamBase::_Read(void* cookie, uint8* buffer, int bufferSize)
759 {
760 	StreamBase* stream = reinterpret_cast<StreamBase*>(cookie);
761 
762 	BAutolock _(stream->fSourceLock);
763 
764 	TRACE_IO("StreamBase::_Read(%p, %p, %d) position: %lld\n",
765 		cookie, buffer, bufferSize, stream->fPosition);
766 
767 	if (stream->fPosition != stream->fSource->Position()) {
768 		TRACE_IO("StreamBase::_Read fSource position: %lld\n",
769 			stream->fSource->Position());
770 
771 		off_t position
772 			= stream->fSource->Seek(stream->fPosition, SEEK_SET);
773 		if (position != stream->fPosition)
774 			return -1;
775 	}
776 
777 	ssize_t read = stream->fSource->Read(buffer, bufferSize);
778 	if (read > 0)
779 		stream->fPosition += read;
780 
781 	TRACE_IO("  read: %ld\n", read);
782 	return (int)read;
783 
784 }
785 
786 
787 /*static*/ off_t
788 StreamBase::_Seek(void* cookie, off_t offset, int whence)
789 {
790 	TRACE_IO("StreamBase::_Seek(%p, %lld, %d)\n",
791 		cookie, offset, whence);
792 
793 	StreamBase* stream = reinterpret_cast<StreamBase*>(cookie);
794 
795 	BAutolock _(stream->fSourceLock);
796 
797 	// Support for special file size retrieval API without seeking
798 	// anywhere:
799 	if (whence == AVSEEK_SIZE) {
800 		off_t size;
801 		if (stream->fSource->GetSize(&size) == B_OK)
802 			return size;
803 		return -1;
804 	}
805 
806 	// If not requested to seek to an absolute position, we need to
807 	// confirm that the stream is currently at the position that we
808 	// think it is.
809 	if (whence != SEEK_SET
810 		&& stream->fPosition != stream->fSource->Position()) {
811 		off_t position
812 			= stream->fSource->Seek(stream->fPosition, SEEK_SET);
813 		if (position != stream->fPosition)
814 			return -1;
815 	}
816 
817 	off_t position = stream->fSource->Seek(offset, whence);
818 	TRACE_IO("  position: %lld\n", position);
819 	if (position < 0)
820 		return -1;
821 
822 	stream->fPosition = position;
823 
824 	return position;
825 }
826 
827 
828 status_t
829 StreamBase::_NextPacket(bool reuse)
830 {
831 	TRACE_PACKET("StreamBase::_NextPacket(%d)\n", reuse);
832 
833 	if (fReusePacket) {
834 		// The last packet was marked for reuse, so we keep using it.
835 		TRACE_PACKET("  re-using last packet\n");
836 		fReusePacket = reuse;
837 		return B_OK;
838 	}
839 
840 	av_packet_unref(&fPacket);
841 
842 	while (true) {
843 		if (av_read_frame(fContext, &fPacket) < 0) {
844 			// NOTE: Even though we may get the error for a different stream,
845 			// av_read_frame() is not going to be successful from here on, so
846 			// it doesn't matter
847 			fReusePacket = false;
848 			return B_LAST_BUFFER_ERROR;
849 		}
850 
851 		if (fPacket.stream_index == Index())
852 			break;
853 
854 		// This is a packet from another stream, ignore it.
855 		av_packet_unref(&fPacket);
856 	}
857 
858 	// Mark this packet with the new reuse flag.
859 	fReusePacket = reuse;
860 	return B_OK;
861 }
862 
863 
864 int64_t
865 StreamBase::_ConvertToStreamTimeBase(bigtime_t time) const
866 {
867 	int64 timeStamp = int64_t((double)time * fStream->time_base.den
868 		/ (1000000.0 * fStream->time_base.num) + 0.5);
869 	if (fStream->start_time != AV_NOPTS_VALUE)
870 		timeStamp += fStream->start_time;
871 	return timeStamp;
872 }
873 
874 
875 bigtime_t
876 StreamBase::_ConvertFromStreamTimeBase(int64_t time) const
877 {
878 	if (fStream->start_time != AV_NOPTS_VALUE)
879 		time -= fStream->start_time;
880 
881 	return bigtime_t(1000000.0 * time * fStream->time_base.num
882 		/ fStream->time_base.den + 0.5);
883 }
884 
885 
886 // #pragma mark - AVFormatReader::Stream
887 
888 
889 class AVFormatReader::Stream : public StreamBase {
890 public:
891 								Stream(BMediaIO* source,
892 									BLocker* streamLock);
893 	virtual						~Stream();
894 
895 	// Setup this stream to point to the AVStream at the given streamIndex.
896 	// This will also initialize the media_format.
897 	virtual	status_t			Init(int32 streamIndex);
898 
899 			status_t			GetMetaData(BMessage* data);
900 
901 	// Support for AVFormatReader
902 			status_t			GetStreamInfo(int64* frameCount,
903 									bigtime_t* duration, media_format* format,
904 									const void** infoBuffer,
905 									size_t* infoSize) const;
906 
907 			status_t			FindKeyFrame(uint32 flags, int64* frame,
908 									bigtime_t* time) const;
909 	virtual	status_t			Seek(uint32 flags, int64* frame,
910 									bigtime_t* time);
911 
912 private:
913 	mutable	BLocker				fLock;
914 
915 			struct KeyframeInfo {
916 				bigtime_t		requestedTime;
917 				int64			requestedFrame;
918 				bigtime_t		reportedTime;
919 				int64			reportedFrame;
920 				uint32			seekFlags;
921 			};
922 	mutable	KeyframeInfo		fLastReportedKeyframe;
923 	mutable	StreamBase*			fGhostStream;
924 };
925 
926 
927 
928 AVFormatReader::Stream::Stream(BMediaIO* source, BLocker* streamLock)
929 	:
930 	StreamBase(source, streamLock, &fLock),
931 	fLock("stream lock"),
932 	fGhostStream(NULL)
933 {
934 	fLastReportedKeyframe.requestedTime = 0;
935 	fLastReportedKeyframe.requestedFrame = 0;
936 	fLastReportedKeyframe.reportedTime = 0;
937 	fLastReportedKeyframe.reportedFrame = 0;
938 }
939 
940 
941 AVFormatReader::Stream::~Stream()
942 {
943 	delete fGhostStream;
944 }
945 
946 
947 status_t
948 AVFormatReader::Stream::Init(int32 virtualIndex)
949 {
950 	TRACE("AVFormatReader::Stream::Init(%ld)\n", virtualIndex);
951 
952 	status_t ret = StreamBase::Init(virtualIndex);
953 	if (ret != B_OK)
954 		return ret;
955 
956 	// Get a pointer to the AVCodecPaarameters for the stream at streamIndex.
957 	AVCodecParameters* codecParams = fStream->codecpar;
958 
959 	// initialize the media_format for this stream
960 	media_format* format = &fFormat;
961 	format->Clear();
962 
963 	media_format_description description;
964 
965 	// Set format family and type depending on codec_type of the stream.
966 	switch (codecParams->codec_type) {
967 		case AVMEDIA_TYPE_AUDIO:
968 			if ((codecParams->codec_id >= AV_CODEC_ID_PCM_S16LE)
969 				&& (codecParams->codec_id <= AV_CODEC_ID_PCM_U8)) {
970 				TRACE("  raw audio\n");
971 				format->type = B_MEDIA_RAW_AUDIO;
972 				description.family = B_ANY_FORMAT_FAMILY;
973 				// This will then apparently be handled by the (built into
974 				// BMediaTrack) RawDecoder.
975 			} else {
976 				TRACE("  encoded audio\n");
977 				format->type = B_MEDIA_ENCODED_AUDIO;
978 				description.family = B_MISC_FORMAT_FAMILY;
979 				description.u.misc.file_format = 'ffmp';
980 			}
981 			break;
982 		case AVMEDIA_TYPE_VIDEO:
983 			TRACE("  encoded video\n");
984 			format->type = B_MEDIA_ENCODED_VIDEO;
985 			description.family = B_MISC_FORMAT_FAMILY;
986 			description.u.misc.file_format = 'ffmp';
987 			break;
988 		default:
989 			TRACE("  unknown type\n");
990 			format->type = B_MEDIA_UNKNOWN_TYPE;
991 			return B_ERROR;
992 			break;
993 	}
994 
995 	if (format->type == B_MEDIA_RAW_AUDIO) {
996 		// We cannot describe all raw-audio formats, some are unsupported.
997 		switch (codecParams->codec_id) {
998 			case AV_CODEC_ID_PCM_S16LE:
999 				format->u.raw_audio.format
1000 					= media_raw_audio_format::B_AUDIO_SHORT;
1001 				format->u.raw_audio.byte_order
1002 					= B_MEDIA_LITTLE_ENDIAN;
1003 				break;
1004 			case AV_CODEC_ID_PCM_S16BE:
1005 				format->u.raw_audio.format
1006 					= media_raw_audio_format::B_AUDIO_SHORT;
1007 				format->u.raw_audio.byte_order
1008 					= B_MEDIA_BIG_ENDIAN;
1009 				break;
1010 			case AV_CODEC_ID_PCM_U16LE:
1011 //				format->u.raw_audio.format
1012 //					= media_raw_audio_format::B_AUDIO_USHORT;
1013 //				format->u.raw_audio.byte_order
1014 //					= B_MEDIA_LITTLE_ENDIAN;
1015 				return B_NOT_SUPPORTED;
1016 				break;
1017 			case AV_CODEC_ID_PCM_U16BE:
1018 //				format->u.raw_audio.format
1019 //					= media_raw_audio_format::B_AUDIO_USHORT;
1020 //				format->u.raw_audio.byte_order
1021 //					= B_MEDIA_BIG_ENDIAN;
1022 				return B_NOT_SUPPORTED;
1023 				break;
1024 			case AV_CODEC_ID_PCM_S8:
1025 				format->u.raw_audio.format
1026 					= media_raw_audio_format::B_AUDIO_CHAR;
1027 				break;
1028 			case AV_CODEC_ID_PCM_U8:
1029 				format->u.raw_audio.format
1030 					= media_raw_audio_format::B_AUDIO_UCHAR;
1031 				break;
1032 			default:
1033 				return B_NOT_SUPPORTED;
1034 				break;
1035 		}
1036 	} else {
1037 		if (description.family == B_MISC_FORMAT_FAMILY)
1038 			description.u.misc.codec = codecParams->codec_id;
1039 
1040 		BMediaFormats formats;
1041 		status_t status = formats.GetFormatFor(description, format);
1042 		if (status < B_OK)
1043 			TRACE("  formats.GetFormatFor() error: %s\n", strerror(status));
1044 
1045 		format->user_data_type = B_CODEC_TYPE_INFO;
1046 		*(uint32*)format->user_data = codecParams->codec_tag;
1047 		format->user_data[4] = 0;
1048 	}
1049 
1050 	format->require_flags = 0;
1051 	format->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
1052 
1053 	switch (format->type) {
1054 		case B_MEDIA_RAW_AUDIO:
1055 			format->u.raw_audio.frame_rate = (float)codecParams->sample_rate;
1056 			format->u.raw_audio.channel_count = codecParams->channels;
1057 			format->u.raw_audio.channel_mask = codecParams->channel_layout;
1058 			ConvertAVSampleFormatToRawAudioFormat(
1059 				(AVSampleFormat)codecParams->format,
1060 				format->u.raw_audio.format);
1061 			format->u.raw_audio.buffer_size = 0;
1062 
1063 			// Read one packet and mark it for later re-use. (So our first
1064 			// GetNextChunk() call does not read another packet.)
1065 			if (_NextPacket(true) == B_OK) {
1066 				TRACE("  successfully determined audio buffer size: %d\n",
1067 					fPacket.size);
1068 				format->u.raw_audio.buffer_size = fPacket.size;
1069 			}
1070 			break;
1071 
1072 		case B_MEDIA_ENCODED_AUDIO:
1073 			format->u.encoded_audio.bit_rate = codecParams->bit_rate;
1074 			format->u.encoded_audio.frame_size = codecParams->frame_size;
1075 			// Fill in some info about possible output format
1076 			format->u.encoded_audio.output
1077 				= media_multi_audio_format::wildcard;
1078 			format->u.encoded_audio.output.frame_rate
1079 				= (float)codecParams->sample_rate;
1080 			// Channel layout bits match in Be API and FFmpeg.
1081 			format->u.encoded_audio.output.channel_count
1082 				= codecParams->channels;
1083 			format->u.encoded_audio.multi_info.channel_mask
1084 				= codecParams->channel_layout;
1085 			format->u.encoded_audio.output.byte_order
1086 				= avformat_to_beos_byte_order(
1087 					(AVSampleFormat)codecParams->format);
1088 
1089 			ConvertAVSampleFormatToRawAudioFormat(
1090 					(AVSampleFormat)codecParams->format,
1091 				format->u.encoded_audio.output.format);
1092 
1093 			if (codecParams->block_align > 0) {
1094 				format->u.encoded_audio.output.buffer_size
1095 					= codecParams->block_align;
1096 			} else {
1097 				format->u.encoded_audio.output.buffer_size
1098 					= codecParams->frame_size * codecParams->channels
1099 						* (format->u.encoded_audio.output.format
1100 							& media_raw_audio_format::B_AUDIO_SIZE_MASK);
1101 			}
1102 			break;
1103 
1104 		case B_MEDIA_ENCODED_VIDEO:
1105 // TODO: Specifying any of these seems to throw off the format matching
1106 // later on.
1107 //			format->u.encoded_video.avg_bit_rate = codecParams->bit_rate;
1108 //			format->u.encoded_video.max_bit_rate = codecParams->bit_rate
1109 //				+ codecParams->bit_rate_tolerance;
1110 
1111 //			format->u.encoded_video.encoding
1112 //				= media_encoded_video_format::B_ANY;
1113 
1114 //			format->u.encoded_video.frame_size = 1;
1115 //			format->u.encoded_video.forward_history = 0;
1116 //			format->u.encoded_video.backward_history = 0;
1117 
1118 			format->u.encoded_video.output.field_rate = FrameRate();
1119 			format->u.encoded_video.output.interlace = 1;
1120 
1121 			format->u.encoded_video.output.first_active = 0;
1122 			format->u.encoded_video.output.last_active
1123 				= codecParams->height - 1;
1124 				// TODO: Maybe libavformat actually provides that info
1125 				// somewhere...
1126 			format->u.encoded_video.output.orientation
1127 				= B_VIDEO_TOP_LEFT_RIGHT;
1128 
1129 			ConvertAVCodecParametersToVideoAspectWidthAndHeight(*codecParams,
1130 				format->u.encoded_video.output.pixel_width_aspect,
1131 				format->u.encoded_video.output.pixel_height_aspect);
1132 
1133 			format->u.encoded_video.output.display.format
1134 				= pixfmt_to_colorspace(codecParams->format);
1135 			format->u.encoded_video.output.display.line_width
1136 				= codecParams->width;
1137 			format->u.encoded_video.output.display.line_count
1138 				= codecParams->height;
1139 			TRACE("  width/height: %d/%d\n", codecParams->width,
1140 				codecParams->height);
1141 			format->u.encoded_video.output.display.bytes_per_row = 0;
1142 			format->u.encoded_video.output.display.pixel_offset = 0;
1143 			format->u.encoded_video.output.display.line_offset = 0;
1144 			format->u.encoded_video.output.display.flags = 0; // TODO
1145 
1146 			break;
1147 
1148 		default:
1149 			// This is an unknown format to us.
1150 			break;
1151 	}
1152 
1153 	// Add the meta data, if any
1154 	if (codecParams->extradata_size > 0) {
1155 		format->SetMetaData(codecParams->extradata,
1156 			codecParams->extradata_size);
1157 		TRACE("  extradata: %p\n", format->MetaData());
1158 	}
1159 
1160 	TRACE("  extradata_size: %d\n", codecParams->extradata_size);
1161 //	TRACE("  intra_matrix: %p\n", codecParams->intra_matrix);
1162 //	TRACE("  inter_matrix: %p\n", codecParams->inter_matrix);
1163 //	TRACE("  get_buffer(): %p\n", codecParams->get_buffer);
1164 //	TRACE("  release_buffer(): %p\n", codecParams->release_buffer);
1165 
1166 #ifdef TRACE_AVFORMAT_READER
1167 	char formatString[512];
1168 	if (string_for_format(*format, formatString, sizeof(formatString)))
1169 		TRACE("  format: %s\n", formatString);
1170 
1171 	uint32 encoding = format->Encoding();
1172 	TRACE("  encoding '%.4s'\n", (char*)&encoding);
1173 #endif
1174 
1175 	return B_OK;
1176 }
1177 
1178 
1179 status_t
1180 AVFormatReader::Stream::GetMetaData(BMessage* data)
1181 {
1182 	BAutolock _(&fLock);
1183 
1184 	avdictionary_to_message(fStream->metadata, data);
1185 
1186 	return B_OK;
1187 }
1188 
1189 
1190 status_t
1191 AVFormatReader::Stream::GetStreamInfo(int64* frameCount,
1192 	bigtime_t* duration, media_format* format, const void** infoBuffer,
1193 	size_t* infoSize) const
1194 {
1195 	BAutolock _(&fLock);
1196 
1197 	TRACE("AVFormatReader::Stream::GetStreamInfo(%ld)\n",
1198 		VirtualIndex());
1199 
1200 	double frameRate = FrameRate();
1201 	TRACE("  frameRate: %.4f\n", frameRate);
1202 
1203 	#ifdef TRACE_AVFORMAT_READER
1204 	if (fStream->start_time != AV_NOPTS_VALUE) {
1205 		bigtime_t startTime = _ConvertFromStreamTimeBase(fStream->start_time);
1206 		TRACE("  start_time: %lld or %.5fs\n", startTime,
1207 			startTime / 1000000.0);
1208 		// TODO: Handle start time in FindKeyFrame() and Seek()?!
1209 	}
1210 	#endif // TRACE_AVFORMAT_READER
1211 
1212 	*duration = Duration();
1213 
1214 	TRACE("  duration: %lld or %.5fs\n", *duration, *duration / 1000000.0);
1215 
1216 	#if 0
1217 	if (fStream->nb_index_entries > 0) {
1218 		TRACE("  dump of index entries:\n");
1219 		int count = 5;
1220 		int firstEntriesCount = min_c(fStream->nb_index_entries, count);
1221 		int i = 0;
1222 		for (; i < firstEntriesCount; i++) {
1223 			AVIndexEntry& entry = fStream->index_entries[i];
1224 			bigtime_t timeGlobal = entry.timestamp;
1225 			bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
1226 			TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
1227 				timeNative / 1000000.0f, timeGlobal / 1000000.0f);
1228 		}
1229 		if (fStream->nb_index_entries - count > i) {
1230 			i = fStream->nb_index_entries - count;
1231 			TRACE("    ...\n");
1232 			for (; i < fStream->nb_index_entries; i++) {
1233 				AVIndexEntry& entry = fStream->index_entries[i];
1234 				bigtime_t timeGlobal = entry.timestamp;
1235 				bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
1236 				TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
1237 					timeNative / 1000000.0f, timeGlobal / 1000000.0f);
1238 			}
1239 		}
1240 	}
1241 	#endif
1242 
1243 	*frameCount = fStream->nb_frames * fStream->codecpar->frame_size;
1244 	if (*frameCount == 0) {
1245 		// Calculate from duration and frame rate
1246 		*frameCount = (int64)(*duration * frameRate / 1000000LL);
1247 		TRACE("  frameCount calculated: %lld, from context: %lld\n",
1248 			*frameCount, fStream->nb_frames);
1249 	} else
1250 		TRACE("  frameCount: %lld\n", *frameCount);
1251 
1252 	*format = fFormat;
1253 
1254 	*infoBuffer = fStream->codecpar->extradata;
1255 	*infoSize = fStream->codecpar->extradata_size;
1256 
1257 	return B_OK;
1258 }
1259 
1260 
1261 status_t
1262 AVFormatReader::Stream::FindKeyFrame(uint32 flags, int64* frame,
1263 	bigtime_t* time) const
1264 {
1265 	BAutolock _(&fLock);
1266 
1267 	if (fContext == NULL || fStream == NULL)
1268 		return B_NO_INIT;
1269 
1270 	TRACE_FIND("AVFormatReader::Stream::FindKeyFrame(%ld,%s%s%s%s, "
1271 		"%lld, %lld)\n", VirtualIndex(),
1272 		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
1273 		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
1274 		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
1275 			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
1276 		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
1277 			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
1278 		*frame, *time);
1279 
1280 	bool inLastRequestedRange = false;
1281 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1282 		if (fLastReportedKeyframe.reportedFrame
1283 			<= fLastReportedKeyframe.requestedFrame) {
1284 			inLastRequestedRange
1285 				= *frame >= fLastReportedKeyframe.reportedFrame
1286 					&& *frame <= fLastReportedKeyframe.requestedFrame;
1287 		} else {
1288 			inLastRequestedRange
1289 				= *frame >= fLastReportedKeyframe.requestedFrame
1290 					&& *frame <= fLastReportedKeyframe.reportedFrame;
1291 		}
1292 	} else if ((flags & B_MEDIA_SEEK_TO_FRAME) == 0) {
1293 		if (fLastReportedKeyframe.reportedTime
1294 			<= fLastReportedKeyframe.requestedTime) {
1295 			inLastRequestedRange
1296 				= *time >= fLastReportedKeyframe.reportedTime
1297 					&& *time <= fLastReportedKeyframe.requestedTime;
1298 		} else {
1299 			inLastRequestedRange
1300 				= *time >= fLastReportedKeyframe.requestedTime
1301 					&& *time <= fLastReportedKeyframe.reportedTime;
1302 		}
1303 	}
1304 
1305 	if (inLastRequestedRange) {
1306 		*frame = fLastReportedKeyframe.reportedFrame;
1307 		*time = fLastReportedKeyframe.reportedTime;
1308 		TRACE_FIND("  same as last reported keyframe\n");
1309 		return B_OK;
1310 	}
1311 
1312 	double frameRate = FrameRate();
1313 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0)
1314 		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);
1315 
1316 	status_t ret;
1317 	if (fGhostStream == NULL) {
1318 		BAutolock _(fSourceLock);
1319 
1320 		fGhostStream = new(std::nothrow) StreamBase(fSource, fSourceLock,
1321 			&fLock);
1322 		if (fGhostStream == NULL) {
1323 			TRACE("  failed to allocate ghost stream\n");
1324 			return B_NO_MEMORY;
1325 		}
1326 
1327 		ret = fGhostStream->Open();
1328 		if (ret != B_OK) {
1329 			TRACE("  ghost stream failed to open: %s\n", strerror(ret));
1330 			return B_ERROR;
1331 		}
1332 
1333 		ret = fGhostStream->Init(fVirtualIndex);
1334 		if (ret != B_OK) {
1335 			TRACE("  ghost stream failed to init: %s\n", strerror(ret));
1336 			return B_ERROR;
1337 		}
1338 	}
1339 	fLastReportedKeyframe.requestedFrame = *frame;
1340 	fLastReportedKeyframe.requestedTime = *time;
1341 	fLastReportedKeyframe.seekFlags = flags;
1342 
1343 	ret = fGhostStream->Seek(flags, frame, time);
1344 	if (ret != B_OK) {
1345 		TRACE("  ghost stream failed to seek: %s\n", strerror(ret));
1346 		return B_ERROR;
1347 	}
1348 
1349 	fLastReportedKeyframe.reportedFrame = *frame;
1350 	fLastReportedKeyframe.reportedTime = *time;
1351 
1352 	TRACE_FIND("  found time: %.2fs\n", *time / 1000000.0);
1353 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1354 		*frame = int64_t(*time * FrameRate() / 1000000.0 + 0.5);
1355 		TRACE_FIND("  found frame: %lld\n", *frame);
1356 	}
1357 
1358 	return B_OK;
1359 }
1360 
1361 
1362 status_t
1363 AVFormatReader::Stream::Seek(uint32 flags, int64* frame, bigtime_t* time)
1364 {
1365 	BAutolock _(&fLock);
1366 
1367 	if (fContext == NULL || fStream == NULL)
1368 		return B_NO_INIT;
1369 
1370 	// Put the old requested values into frame/time, since we already know
1371 	// that the sought frame/time will then match the reported values.
1372 	// TODO: Will not work if client changes seek flags (from backwards to
1373 	// forward or vice versa)!!
1374 	bool inLastRequestedRange = false;
1375 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1376 		if (fLastReportedKeyframe.reportedFrame
1377 			<= fLastReportedKeyframe.requestedFrame) {
1378 			inLastRequestedRange
1379 				= *frame >= fLastReportedKeyframe.reportedFrame
1380 					&& *frame <= fLastReportedKeyframe.requestedFrame;
1381 		} else {
1382 			inLastRequestedRange
1383 				= *frame >= fLastReportedKeyframe.requestedFrame
1384 					&& *frame <= fLastReportedKeyframe.reportedFrame;
1385 		}
1386 	} else if ((flags & B_MEDIA_SEEK_TO_FRAME) == 0) {
1387 		if (fLastReportedKeyframe.reportedTime
1388 			<= fLastReportedKeyframe.requestedTime) {
1389 			inLastRequestedRange
1390 				= *time >= fLastReportedKeyframe.reportedTime
1391 					&& *time <= fLastReportedKeyframe.requestedTime;
1392 		} else {
1393 			inLastRequestedRange
1394 				= *time >= fLastReportedKeyframe.requestedTime
1395 					&& *time <= fLastReportedKeyframe.reportedTime;
1396 		}
1397 	}
1398 
1399 	if (inLastRequestedRange) {
1400 		*frame = fLastReportedKeyframe.requestedFrame;
1401 		*time = fLastReportedKeyframe.requestedTime;
1402 		flags = fLastReportedKeyframe.seekFlags;
1403 	}
1404 
1405 	return StreamBase::Seek(flags, frame, time);
1406 }
1407 
1408 
1409 // #pragma mark - AVFormatReader
1410 
1411 
1412 AVFormatReader::AVFormatReader()
1413 	:
1414 	fCopyright(""),
1415 	fStreams(NULL),
1416 	fSourceLock("source I/O lock")
1417 {
1418 	TRACE("AVFormatReader::AVFormatReader\n");
1419 }
1420 
1421 
1422 AVFormatReader::~AVFormatReader()
1423 {
1424 	TRACE("AVFormatReader::~AVFormatReader\n");
1425 	if (fStreams != NULL) {
1426 		// The client was supposed to call FreeCookie() on all
1427 		// allocated streams. Deleting the first stream is always
1428 		// prevented, we delete the other ones just in case.
1429 		int32 count = fStreams[0]->CountStreams();
1430 		for (int32 i = 0; i < count; i++)
1431 			delete fStreams[i];
1432 		delete[] fStreams;
1433 	}
1434 }
1435 
1436 
1437 // #pragma mark -
1438 
1439 
1440 const char*
1441 AVFormatReader::Copyright()
1442 {
1443 	if (fCopyright.Length() <= 0) {
1444 		BMessage message;
1445 		if (GetMetaData(&message) == B_OK)
1446 			message.FindString("copyright", &fCopyright);
1447 	}
1448 	return fCopyright.String();
1449 }
1450 
1451 
1452 status_t
1453 AVFormatReader::Sniff(int32* _streamCount)
1454 {
1455 	TRACE("AVFormatReader::Sniff\n");
1456 
1457 	BMediaIO* source = dynamic_cast<BMediaIO*>(Source());
1458 	if (source == NULL) {
1459 		TRACE("  not a BMediaIO, but we need it to be one.\n");
1460 		return B_NOT_SUPPORTED;
1461 	}
1462 
1463 	Stream* stream = new(std::nothrow) Stream(source,
1464 		&fSourceLock);
1465 	if (stream == NULL) {
1466 		ERROR("AVFormatReader::Sniff() - failed to allocate Stream\n");
1467 		return B_NO_MEMORY;
1468 	}
1469 
1470 	ObjectDeleter<Stream> streamDeleter(stream);
1471 
1472 	status_t ret = stream->Open();
1473 	if (ret != B_OK) {
1474 		TRACE("  failed to detect stream: %s\n", strerror(ret));
1475 		return ret;
1476 	}
1477 
1478 	delete[] fStreams;
1479 	fStreams = NULL;
1480 
1481 	int32 streamCount = stream->CountStreams();
1482 	if (streamCount == 0) {
1483 		TRACE("  failed to detect any streams: %s\n", strerror(ret));
1484 		return B_ERROR;
1485 	}
1486 
1487 	fStreams = new(std::nothrow) Stream*[streamCount];
1488 	if (fStreams == NULL) {
1489 		ERROR("AVFormatReader::Sniff() - failed to allocate streams\n");
1490 		return B_NO_MEMORY;
1491 	}
1492 
1493 	memset(fStreams, 0, sizeof(Stream*) * streamCount);
1494 	fStreams[0] = stream;
1495 	streamDeleter.Detach();
1496 
1497 	#ifdef TRACE_AVFORMAT_READER
1498 	av_dump_format(const_cast<AVFormatContext*>(stream->Context()), 0, "", 0);
1499 	#endif
1500 
1501 	if (_streamCount != NULL)
1502 		*_streamCount = streamCount;
1503 
1504 	return B_OK;
1505 }
1506 
1507 
1508 void
1509 AVFormatReader::GetFileFormatInfo(media_file_format* mff)
1510 {
1511 	TRACE("AVFormatReader::GetFileFormatInfo\n");
1512 
1513 	if (fStreams == NULL)
1514 		return;
1515 
1516 	// The first cookie is always there!
1517 	const AVFormatContext* context = fStreams[0]->Context();
1518 
1519 	if (context == NULL || context->iformat == NULL) {
1520 		TRACE("  no AVFormatContext or AVInputFormat!\n");
1521 		return;
1522 	}
1523 
1524 	const media_file_format* format = demuxer_format_for(context->iformat);
1525 
1526 	mff->capabilities = media_file_format::B_READABLE
1527 		| media_file_format::B_KNOWS_ENCODED_VIDEO
1528 		| media_file_format::B_KNOWS_ENCODED_AUDIO
1529 		| media_file_format::B_IMPERFECTLY_SEEKABLE;
1530 
1531 	if (format != NULL) {
1532 		mff->family = format->family;
1533 	} else {
1534 		TRACE("  no DemuxerFormat for AVInputFormat!\n");
1535 		mff->family = B_MISC_FORMAT_FAMILY;
1536 	}
1537 
1538 	mff->version = 100;
1539 
1540 	if (format != NULL) {
1541 		strcpy(mff->mime_type, format->mime_type);
1542 	} else {
1543 		// TODO: Would be nice to be able to provide this from AVInputFormat,
1544 		// maybe by extending the FFmpeg code itself (all demuxers).
1545 		strcpy(mff->mime_type, "");
1546 	}
1547 
1548 	if (context->iformat->extensions != NULL)
1549 		strcpy(mff->file_extension, context->iformat->extensions);
1550 	else {
1551 		TRACE("  no file extensions for AVInputFormat.\n");
1552 		strcpy(mff->file_extension, "");
1553 	}
1554 
1555 	if (context->iformat->name != NULL)
1556 		strcpy(mff->short_name,  context->iformat->name);
1557 	else {
1558 		TRACE("  no short name for AVInputFormat.\n");
1559 		strcpy(mff->short_name, "");
1560 	}
1561 
1562 	if (context->iformat->long_name != NULL)
1563 		sprintf(mff->pretty_name, "%s (FFmpeg)", context->iformat->long_name);
1564 	else {
1565 		if (format != NULL)
1566 			sprintf(mff->pretty_name, "%s (FFmpeg)", format->pretty_name);
1567 		else
1568 			strcpy(mff->pretty_name, "Unknown (FFmpeg)");
1569 	}
1570 }
1571 
1572 
1573 status_t
1574 AVFormatReader::GetMetaData(BMessage* _data)
1575 {
1576 	// The first cookie is always there!
1577 	const AVFormatContext* context = fStreams[0]->Context();
1578 
1579 	if (context == NULL)
1580 		return B_NO_INIT;
1581 
1582 	avdictionary_to_message(context->metadata, _data);
1583 
1584 	// Add chapter info
1585 	for (unsigned i = 0; i < context->nb_chapters; i++) {
1586 		AVChapter* chapter = context->chapters[i];
1587 		BMessage chapterData;
1588 		chapterData.AddInt64("start", bigtime_t(1000000.0
1589 			* chapter->start * chapter->time_base.num
1590 			/ chapter->time_base.den + 0.5));
1591 		chapterData.AddInt64("end", bigtime_t(1000000.0
1592 			* chapter->end * chapter->time_base.num
1593 			/ chapter->time_base.den + 0.5));
1594 
1595 		avdictionary_to_message(chapter->metadata, &chapterData);
1596 		_data->AddMessage("be:chapter", &chapterData);
1597 	}
1598 
1599 	// Add program info
1600 	for (unsigned i = 0; i < context->nb_programs; i++) {
1601 		BMessage programData;
1602 		avdictionary_to_message(context->programs[i]->metadata, &programData);
1603 		_data->AddMessage("be:program", &programData);
1604 	}
1605 
1606 	return B_OK;
1607 }
1608 
1609 
1610 // #pragma mark -
1611 
1612 
1613 status_t
1614 AVFormatReader::AllocateCookie(int32 streamIndex, void** _cookie)
1615 {
1616 	TRACE("AVFormatReader::AllocateCookie(%ld)\n", streamIndex);
1617 
1618 	BAutolock _(fSourceLock);
1619 
1620 	if (fStreams == NULL)
1621 		return B_NO_INIT;
1622 
1623 	if (streamIndex < 0 || streamIndex >= fStreams[0]->CountStreams())
1624 		return B_BAD_INDEX;
1625 
1626 	if (_cookie == NULL)
1627 		return B_BAD_VALUE;
1628 
1629 	Stream* cookie = fStreams[streamIndex];
1630 	if (cookie == NULL) {
1631 		// Allocate the cookie
1632 		BMediaIO* source = dynamic_cast<BMediaIO*>(Source());
1633 		if (source == NULL) {
1634 			TRACE("  not a BMediaIO, but we need it to be one.\n");
1635 			return B_NOT_SUPPORTED;
1636 		}
1637 
1638 		cookie = new(std::nothrow) Stream(source, &fSourceLock);
1639 		if (cookie == NULL) {
1640 			ERROR("AVFormatReader::Sniff() - failed to allocate "
1641 				"Stream\n");
1642 			return B_NO_MEMORY;
1643 		}
1644 
1645 		status_t ret = cookie->Open();
1646 		if (ret != B_OK) {
1647 			TRACE("  stream failed to open: %s\n", strerror(ret));
1648 			delete cookie;
1649 			return ret;
1650 		}
1651 	}
1652 
1653 	status_t ret = cookie->Init(streamIndex);
1654 	if (ret != B_OK) {
1655 		TRACE("  stream failed to initialize: %s\n", strerror(ret));
1656 		// NOTE: Never delete the first stream!
1657 		if (streamIndex != 0)
1658 			delete cookie;
1659 		return ret;
1660 	}
1661 
1662 	fStreams[streamIndex] = cookie;
1663 	*_cookie = cookie;
1664 
1665 	return B_OK;
1666 }
1667 
1668 
1669 status_t
1670 AVFormatReader::FreeCookie(void *_cookie)
1671 {
1672 	BAutolock _(fSourceLock);
1673 
1674 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1675 
1676 	// NOTE: Never delete the first cookie!
1677 	if (cookie != NULL && cookie->VirtualIndex() != 0) {
1678 		if (fStreams != NULL)
1679 			fStreams[cookie->VirtualIndex()] = NULL;
1680 		delete cookie;
1681 	}
1682 
1683 	return B_OK;
1684 }
1685 
1686 
1687 // #pragma mark -
1688 
1689 
1690 status_t
1691 AVFormatReader::GetStreamInfo(void* _cookie, int64* frameCount,
1692 	bigtime_t* duration, media_format* format, const void** infoBuffer,
1693 	size_t* infoSize)
1694 {
1695 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1696 	return cookie->GetStreamInfo(frameCount, duration, format, infoBuffer,
1697 		infoSize);
1698 }
1699 
1700 
1701 status_t
1702 AVFormatReader::GetStreamMetaData(void* _cookie, BMessage* _data)
1703 {
1704 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1705 	return cookie->GetMetaData(_data);
1706 }
1707 
1708 
1709 status_t
1710 AVFormatReader::Seek(void* _cookie, uint32 seekTo, int64* frame,
1711 	bigtime_t* time)
1712 {
1713 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1714 	return cookie->Seek(seekTo, frame, time);
1715 }
1716 
1717 
1718 status_t
1719 AVFormatReader::FindKeyFrame(void* _cookie, uint32 flags, int64* frame,
1720 	bigtime_t* time)
1721 {
1722 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1723 	return cookie->FindKeyFrame(flags, frame, time);
1724 }
1725 
1726 
1727 status_t
1728 AVFormatReader::GetNextChunk(void* _cookie, const void** chunkBuffer,
1729 	size_t* chunkSize, media_header* mediaHeader)
1730 {
1731 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1732 	return cookie->GetNextChunk(chunkBuffer, chunkSize, mediaHeader);
1733 }
1734