xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVFormatReader.cpp (revision dd2a1e350b303b855a50fd64e6cb55618be1ae6a)
1 /*
2  * Copyright 2009-2010, Stephan Aßmus <superstippi@gmx.de>
3  * Copyright 2014, Colin Günther <coling@gmx.de>
4  * Copyright 2018, Dario Casalinuovo
5  * All rights reserved. Distributed under the terms of the GNU L-GPL license.
6  */
7 
8 #include "AVFormatReader.h"
9 
10 #include <stdio.h>
11 #include <string.h>
12 #include <stdlib.h>
13 
14 #include <new>
15 
16 #include <AutoDeleter.h>
17 #include <Autolock.h>
18 #include <ByteOrder.h>
19 #include <MediaIO.h>
20 #include <MediaDefs.h>
21 #include <MediaFormats.h>
22 #include <MimeType.h>
23 
24 extern "C" {
25 	#include "avcodec.h"
26 	#include "avformat.h"
27 }
28 
29 #include "DemuxerTable.h"
30 #include "gfx_util.h"
31 #include "Utilities.h"
32 
33 
34 //#define TRACE_AVFORMAT_READER
35 #ifdef TRACE_AVFORMAT_READER
36 #	define TRACE printf
37 #	define TRACE_IO(a...)
38 #	define TRACE_SEEK(a...) printf(a)
39 #	define TRACE_FIND(a...)
40 #	define TRACE_PACKET(a...)
41 #else
42 #	define TRACE(a...)
43 #	define TRACE_IO(a...)
44 #	define TRACE_SEEK(a...)
45 #	define TRACE_FIND(a...)
46 #	define TRACE_PACKET(a...)
47 #endif
48 
49 #define ERROR(a...) fprintf(stderr, a)
50 
51 // Compatibility with old ffmpeg 4.x, where the getters didn't exist yet
52 #if LIBAVCODEC_VERSION_MAJOR < 60
53 #define avformat_index_get_entry(stream, index) (&(stream)->index_entries[(index)])
54 #define avformat_index_get_entries_count(stream) ((stream)->nb_index_entries)
55 #endif
56 
57 
58 static uint32
59 avformat_to_beos_byte_order(AVSampleFormat format)
60 {
61 	// TODO: Huh?
62 	return B_MEDIA_HOST_ENDIAN;
63 }
64 
65 
66 static void
67 avdictionary_to_message(AVDictionary* dictionary, BMessage* message)
68 {
69 	if (dictionary == NULL)
70 		return;
71 
72 	AVDictionaryEntry* entry = NULL;
73 	while ((entry = av_dict_get(dictionary, "", entry,
74 		AV_DICT_IGNORE_SUFFIX))) {
75 		// convert entry keys into something more meaningful using the names from
76 		// id3v2.c
77 		if (strcmp(entry->key, "TALB") == 0 || strcmp(entry->key, "TAL") == 0)
78 			message->AddString("album", entry->value);
79 		else if (strcmp(entry->key, "TCOM") == 0)
80 			message->AddString("composer", entry->value);
81 		else if (strcmp(entry->key, "TCON") == 0 || strcmp(entry->key, "TCO") == 0)
82 			message->AddString("genre", entry->value);
83 		else if (strcmp(entry->key, "TCOP") == 0)
84 			message->AddString("copyright", entry->value);
85 		else if (strcmp(entry->key, "TDRL") == 0 || strcmp(entry->key, "TDRC") == 0)
86 			message->AddString("date", entry->value);
87 		else if (strcmp(entry->key, "TENC") == 0 || strcmp(entry->key, "TEN") == 0)
88 			message->AddString("encoded_by", entry->value);
89 		else if (strcmp(entry->key, "TIT2") == 0 || strcmp(entry->key, "TT2") == 0)
90 			message->AddString("title", entry->value);
91 		else if (strcmp(entry->key, "TLAN") == 0)
92 			message->AddString("language", entry->value);
93 		else if (strcmp(entry->key, "TPE1") == 0 || strcmp(entry->key, "TP1") == 0)
94 			message->AddString("artist", entry->value);
95 		else if (strcmp(entry->key, "TPE2") == 0 || strcmp(entry->key, "TP2") == 0)
96 			message->AddString("album_artist", entry->value);
97 		else if (strcmp(entry->key, "TPE3") == 0 || strcmp(entry->key, "TP3") == 0)
98 			message->AddString("performer", entry->value);
99 		else if (strcmp(entry->key, "TPOS") == 0)
100 			message->AddString("disc", entry->value);
101 		else if (strcmp(entry->key, "TPUB") == 0)
102 			message->AddString("publisher", entry->value);
103 		else if (strcmp(entry->key, "TRCK") == 0 || strcmp(entry->key, "TRK") == 0)
104 			message->AddString("track", entry->value);
105 		else if (strcmp(entry->key, "TSOA") == 0)
106 			message->AddString("album-sort", entry->value);
107 		else if (strcmp(entry->key, "TSOP") == 0)
108 			message->AddString("artist-sort", entry->value);
109 		else if (strcmp(entry->key, "TSOT") == 0)
110 			message->AddString("title-sort", entry->value);
111 		else if (strcmp(entry->key, "TSSE") == 0)
112 			message->AddString("encoder", entry->value);
113 		else if (strcmp(entry->key, "TYER") == 0)
114 			message->AddString("year", entry->value);
115 		else
116 			message->AddString(entry->key, entry->value);
117 	}
118 }
119 
120 
121 // #pragma mark - StreamBase
122 
123 
124 class StreamBase {
125 public:
126 								StreamBase(BMediaIO* source,
127 									BLocker* sourceLock, BLocker* streamLock);
128 	virtual						~StreamBase();
129 
130 	// Init an indivual AVFormatContext
131 			status_t			Open();
132 
133 	// Setup this stream to point to the AVStream at the given streamIndex.
134 	virtual	status_t			Init(int32 streamIndex);
135 
136 	inline	const AVFormatContext* Context() const
137 									{ return fContext; }
138 			int32				Index() const;
139 			int32				CountStreams() const;
140 			int32				StreamIndexFor(int32 virtualIndex) const;
141 	inline	int32				VirtualIndex() const
142 									{ return fVirtualIndex; }
143 
144 			double				FrameRate() const;
145 			bigtime_t			Duration() const;
146 
147 	virtual	status_t			Seek(uint32 flags, int64* frame,
148 									bigtime_t* time);
149 
150 			status_t			GetNextChunk(const void** chunkBuffer,
151 									size_t* chunkSize,
152 									media_header* mediaHeader);
153 
154 protected:
155 	// I/O hooks for libavformat, cookie will be a Stream instance.
156 	// Since multiple StreamCookies use the same BMediaIO source, they
157 	// maintain the position individually, and may need to seek the source
158 	// if it does not match anymore in _Read().
159 	static	int					_Read(void* cookie, uint8* buffer,
160 									int bufferSize);
161 	static	off_t				_Seek(void* cookie, off_t offset, int whence);
162 
163 			status_t			_NextPacket(bool reuse);
164 
165 			int64_t				_ConvertToStreamTimeBase(bigtime_t time) const;
166 			bigtime_t			_ConvertFromStreamTimeBase(int64_t time) const;
167 
168 protected:
169 			BMediaIO*			fSource;
170 			off_t				fPosition;
171 			// Since different threads may read from the source,
172 			// we need to protect the file position and I/O by a lock.
173 			BLocker*			fSourceLock;
174 
175 			BLocker*			fStreamLock;
176 
177 			AVFormatContext*	fContext;
178 			AVStream*			fStream;
179 			int32				fVirtualIndex;
180 
181 			media_format		fFormat;
182 
183 			AVIOContext*		fIOContext;
184 
185 			AVPacket			fPacket;
186 			bool				fReusePacket;
187 
188 			bool				fSeekByBytes;
189 			bool				fStreamBuildsIndexWhileReading;
190 };
191 
192 
193 StreamBase::StreamBase(BMediaIO* source, BLocker* sourceLock,
194 		BLocker* streamLock)
195 	:
196 	fSource(source),
197 	fPosition(0),
198 	fSourceLock(sourceLock),
199 
200 	fStreamLock(streamLock),
201 
202 	fContext(NULL),
203 	fStream(NULL),
204 	fVirtualIndex(-1),
205 	fIOContext(NULL),
206 
207 	fReusePacket(false),
208 
209 	fSeekByBytes(false),
210 	fStreamBuildsIndexWhileReading(false)
211 {
212 	// NOTE: Don't use streamLock here, it may not yet be initialized!
213 
214 	av_new_packet(&fPacket, 0);
215 	fFormat.Clear();
216 }
217 
218 
219 StreamBase::~StreamBase()
220 {
221 	avformat_close_input(&fContext);
222 	av_packet_unref(&fPacket);
223 	if (fIOContext != NULL)
224 		av_free(fIOContext->buffer);
225 	av_free(fIOContext);
226 }
227 
228 
229 status_t
230 StreamBase::Open()
231 {
232 	BAutolock _(fStreamLock);
233 
234 	// Init probing data
235 	size_t bufferSize = 32768;
236 	uint8* buffer = static_cast<uint8*>(av_malloc(bufferSize));
237 	if (buffer == NULL)
238 		return B_NO_MEMORY;
239 
240 	// First try to identify the file using the MIME database, as ffmpeg
241 	// is not very good at this and relies on us to give it the file extension
242 	// as an hint.
243 	// For this we need some valid data in the buffer, the first 512 bytes
244 	// should do because our MIME sniffing never uses more.
245 	const char* extension = NULL;
246 	BMessage message;
247 	if (fSource->Read(buffer, 512) == 512) {
248 		BMimeType type;
249 		if (BMimeType::GuessMimeType(buffer, 512, &type) == B_OK) {
250 			if (type.GetFileExtensions(&message) == B_OK) {
251 				extension = message.FindString("extensions");
252 			}
253 		}
254 	}
255 
256 	// Allocate I/O context with buffer and hook functions, pass ourself as
257 	// cookie.
258 	memset(buffer, 0, bufferSize);
259 	fIOContext = avio_alloc_context(buffer, bufferSize, 0, this, _Read, 0,
260 		_Seek);
261 	if (fIOContext == NULL) {
262 		TRACE("StreamBase::Open() - avio_alloc_context() failed!\n");
263 		av_free(buffer);
264 		return B_ERROR;
265 	}
266 
267 	fContext = avformat_alloc_context();
268 	fContext->pb = fIOContext;
269 
270 	// Allocate our context and probe the input format
271 	if (avformat_open_input(&fContext, extension, NULL, NULL) < 0) {
272 		TRACE("StreamBase::Open() - avformat_open_input() failed!\n");
273 		// avformat_open_input() frees the context in case of failure
274 		fContext = NULL;
275 		av_free(fIOContext->buffer);
276 		av_free(fIOContext);
277 		fIOContext = NULL;
278 		return B_NOT_SUPPORTED;
279 	}
280 
281 	TRACE("StreamBase::Open() - "
282 		"avformat_open_input(): %s\n", fContext->iformat->name);
283 	TRACE("  flags:%s%s%s%s%s\n",
284 		(fContext->iformat->flags & AVFMT_GLOBALHEADER) ? " AVFMT_GLOBALHEADER" : "",
285 		(fContext->iformat->flags & AVFMT_NOTIMESTAMPS) ? " AVFMT_NOTIMESTAMPS" : "",
286 		(fContext->iformat->flags & AVFMT_GENERIC_INDEX) ? " AVFMT_GENERIC_INDEX" : "",
287 		(fContext->iformat->flags & AVFMT_TS_DISCONT) ? " AVFMT_TS_DISCONT" : "",
288 		(fContext->iformat->flags & AVFMT_VARIABLE_FPS) ? " AVFMT_VARIABLE_FPS" : ""
289 	);
290 
291 
292 	// Retrieve stream information
293 	if (avformat_find_stream_info(fContext, NULL) < 0) {
294 		TRACE("StreamBase::Open() - avformat_find_stream_info() failed!\n");
295 		return B_NOT_SUPPORTED;
296 	}
297 
298 	fSeekByBytes = (fContext->iformat->flags & AVFMT_TS_DISCONT) != 0;
299 	fStreamBuildsIndexWhileReading
300 		= (fContext->iformat->flags & AVFMT_GENERIC_INDEX) != 0
301 			|| fSeekByBytes;
302 
303 	TRACE("StreamBase::Open() - "
304 		"av_find_stream_info() success! Seeking by bytes: %d\n",
305 		fSeekByBytes);
306 
307 	return B_OK;
308 }
309 
310 
311 status_t
312 StreamBase::Init(int32 virtualIndex)
313 {
314 	BAutolock _(fStreamLock);
315 
316 	TRACE("StreamBase::Init(%" B_PRId32 ")\n", virtualIndex);
317 
318 	if (fContext == NULL)
319 		return B_NO_INIT;
320 
321 	int32 streamIndex = StreamIndexFor(virtualIndex);
322 	if (streamIndex < 0) {
323 		TRACE("  bad stream index!\n");
324 		return B_BAD_INDEX;
325 	}
326 
327 	TRACE("  context stream index: %" B_PRId32 "\n", streamIndex);
328 
329 	// We need to remember the virtual index so that
330 	// AVFormatReader::FreeCookie() can clear the correct stream entry.
331 	fVirtualIndex = virtualIndex;
332 
333 	// Make us point to the AVStream at streamIndex
334 	fStream = fContext->streams[streamIndex];
335 
336 // NOTE: Discarding other streams works for most, but not all containers,
337 // for example it does not work for the ASF demuxer. Since I don't know what
338 // other demuxer it breaks, let's just keep reading packets for unwanted
339 // streams, it just makes the _GetNextPacket() function slightly less
340 // efficient.
341 //	// Discard all other streams
342 //	for (unsigned i = 0; i < fContext->nb_streams; i++) {
343 //		if (i != (unsigned)streamIndex)
344 //			fContext->streams[i]->discard = AVDISCARD_ALL;
345 //	}
346 
347 	return B_OK;
348 }
349 
350 
351 int32
352 StreamBase::Index() const
353 {
354 	if (fStream != NULL)
355 		return fStream->index;
356 	return -1;
357 }
358 
359 
360 int32
361 StreamBase::CountStreams() const
362 {
363 	// Figure out the stream count. If the context has "AVPrograms", use
364 	// the first program (for now).
365 	// TODO: To support "programs" properly, the BMediaFile/Track API should
366 	// be extended accordingly. I guess programs are like TV channels in the
367 	// same satilite transport stream. Maybe call them "TrackGroups".
368 	if (fContext->nb_programs > 0) {
369 		// See libavformat/utils.c:dump_format()
370 		return fContext->programs[0]->nb_stream_indexes;
371 	}
372 	return fContext->nb_streams;
373 }
374 
375 
376 int32
377 StreamBase::StreamIndexFor(int32 virtualIndex) const
378 {
379 	// NOTE: See CountStreams()
380 	if (fContext->nb_programs > 0) {
381 		const AVProgram* program = fContext->programs[0];
382 		if (virtualIndex >= 0
383 			&& virtualIndex < (int32)program->nb_stream_indexes) {
384 			return program->stream_index[virtualIndex];
385 		}
386 	} else {
387 		if (virtualIndex >= 0 && virtualIndex < (int32)fContext->nb_streams)
388 			return virtualIndex;
389 	}
390 	return -1;
391 }
392 
393 
394 double
395 StreamBase::FrameRate() const
396 {
397 	// TODO: Find a way to always calculate a correct frame rate...
398 	double frameRate = 1.0;
399 	switch (fStream->codecpar->codec_type) {
400 		case AVMEDIA_TYPE_AUDIO:
401 			frameRate = (double)fStream->codecpar->sample_rate;
402 			break;
403 		case AVMEDIA_TYPE_VIDEO:
404 		{
405 			AVRational frameRateFrac = av_guess_frame_rate(NULL, fStream, NULL);
406 			if (frameRateFrac.den != 0 && frameRateFrac.num != 0)
407 				frameRate = av_q2d(frameRateFrac);
408 			else if (fStream->time_base.den != 0 && fStream->time_base.num != 0)
409 				frameRate = 1 / av_q2d(fStream->time_base);
410 
411 			// Catch the obviously wrong default framerate when ffmpeg cannot
412 			// guess anything because there are not two frames to compute a
413 			// framerate
414 			if (fStream->nb_frames < 2 && frameRate == 90000.0f)
415 				return 0.0f;
416 			break;
417 		}
418 		default:
419 			break;
420 	}
421 	if (frameRate <= 0.0)
422 		frameRate = 1.0;
423 	return frameRate;
424 }
425 
426 
427 bigtime_t
428 StreamBase::Duration() const
429 {
430 	// TODO: This is not working correctly for all stream types...
431 	// It seems that the calculations here are correct, because they work
432 	// for a couple of streams and are in line with the documentation, but
433 	// unfortunately, libavformat itself seems to set the time_base and
434 	// duration wrongly sometimes. :-(
435 
436 	int32 flags;
437 	fSource->GetFlags(&flags);
438 
439 	// "Mutable Size" (ie http streams) means we can't realistically compute
440 	// a duration. So don't let ffmpeg give a (wrong) estimate in this case.
441 	if ((flags & B_MEDIA_MUTABLE_SIZE) != 0)
442 		return 0;
443 
444 	if ((int64)fStream->duration != AV_NOPTS_VALUE) {
445 		int64_t time = fStream->duration;
446 		if (fStream->start_time != AV_NOPTS_VALUE)
447 			time += fStream->start_time;
448 		return _ConvertFromStreamTimeBase(time);
449 	} else if ((int64)fContext->duration != AV_NOPTS_VALUE)
450 		return (bigtime_t)fContext->duration;
451 
452 	return 0;
453 }
454 
455 
456 status_t
457 StreamBase::Seek(uint32 flags, int64* frame, bigtime_t* time)
458 {
459 	BAutolock _(fStreamLock);
460 
461 	if (fContext == NULL || fStream == NULL)
462 		return B_NO_INIT;
463 
464 	TRACE_SEEK("StreamBase::Seek(%" B_PRId32 ",%s%s%s%s, %" B_PRId64 ", "
465 		"%" B_PRId64 ")\n", VirtualIndex(),
466 		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
467 		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
468 		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
469 			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
470 		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
471 			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
472 		*frame, *time);
473 
474 	double frameRate = FrameRate();
475 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
476 		// Seeking is always based on time, initialize it when client seeks
477 		// based on frame.
478 		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);
479 	}
480 
481 	int64_t timeStamp = *time;
482 
483 	int searchFlags = AVSEEK_FLAG_BACKWARD;
484 	if ((flags & B_MEDIA_SEEK_CLOSEST_FORWARD) != 0)
485 		searchFlags = 0;
486 
487 	if (fSeekByBytes) {
488 		searchFlags |= AVSEEK_FLAG_BYTE;
489 
490 		BAutolock _(fSourceLock);
491 		int64_t fileSize;
492 
493 		if (fSource->GetSize(&fileSize) != B_OK)
494 			return B_NOT_SUPPORTED;
495 
496 		int64_t duration = Duration();
497 		if (duration == 0)
498 			return B_NOT_SUPPORTED;
499 
500 		timeStamp = int64_t(fileSize * ((double)timeStamp / duration));
501 		if ((flags & B_MEDIA_SEEK_CLOSEST_BACKWARD) != 0) {
502 			timeStamp -= 65536;
503 			if (timeStamp < 0)
504 				timeStamp = 0;
505 		}
506 
507 		bool seekAgain = true;
508 		bool seekForward = true;
509 		bigtime_t lastFoundTime = -1;
510 		int64_t closestTimeStampBackwards = -1;
511 		while (seekAgain) {
512 			if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp,
513 				INT64_MAX, searchFlags) < 0) {
514 				TRACE("  avformat_seek_file() (by bytes) failed.\n");
515 				return B_ERROR;
516 			}
517 			seekAgain = false;
518 
519 			// Our last packet is toast in any case. Read the next one so we
520 			// know where we really seeked.
521 			fReusePacket = false;
522 			if (_NextPacket(true) == B_OK) {
523 				while (fPacket.pts == AV_NOPTS_VALUE) {
524 					fReusePacket = false;
525 					if (_NextPacket(true) != B_OK)
526 						return B_ERROR;
527 				}
528 				if (fPacket.pos >= 0)
529 					timeStamp = fPacket.pos;
530 				bigtime_t foundTime
531 					= _ConvertFromStreamTimeBase(fPacket.pts);
532 				if (foundTime != lastFoundTime) {
533 					lastFoundTime = foundTime;
534 					if (foundTime > *time) {
535 						if (closestTimeStampBackwards >= 0) {
536 							timeStamp = closestTimeStampBackwards;
537 							seekAgain = true;
538 							seekForward = false;
539 							continue;
540 						}
541 						int64_t diff = int64_t(fileSize
542 							* ((double)(foundTime - *time) / (2 * duration)));
543 						if (diff < 8192)
544 							break;
545 						timeStamp -= diff;
546 						TRACE_SEEK("  need to seek back (%" B_PRIdBIGTIME ") (time: %.2f "
547 							"-> %.2f)\n", timeStamp, *time / 1000000.0,
548 							foundTime / 1000000.0);
549 						if (timeStamp < 0)
550 							foundTime = 0;
551 						else {
552 							seekAgain = true;
553 							continue;
554 						}
555 					} else if (seekForward && foundTime < *time - 100000) {
556 						closestTimeStampBackwards = timeStamp;
557 						int64_t diff = int64_t(fileSize
558 							* ((double)(*time - foundTime) / (2 * duration)));
559 						if (diff < 8192)
560 							break;
561 						timeStamp += diff;
562 						TRACE_SEEK("  need to seek forward (%" B_PRId64 ") (time: "
563 							"%.2f -> %.2f)\n", timeStamp, *time / 1000000.0,
564 							foundTime / 1000000.0);
565 						if (timeStamp > duration)
566 							foundTime = duration;
567 						else {
568 							seekAgain = true;
569 							continue;
570 						}
571 					}
572 				}
573 				TRACE_SEEK("  found time: %" B_PRIdBIGTIME " -> %" B_PRIdBIGTIME " (%.2f)\n", *time,
574 					foundTime, foundTime / 1000000.0);
575 				*time = foundTime;
576 				*frame = (uint64)(*time * frameRate / 1000000LL + 0.5);
577 				TRACE_SEEK("  seeked frame: %" B_PRId64 "\n", *frame);
578 			} else {
579 				TRACE_SEEK("  _NextPacket() failed!\n");
580 				return B_ERROR;
581 			}
582 		}
583 	} else {
584 		// We may not get a PTS from the next packet after seeking, so
585 		// we try to get an expected time from the index.
586 		int64_t streamTimeStamp = _ConvertToStreamTimeBase(*time);
587 		int index = av_index_search_timestamp(fStream, streamTimeStamp,
588 			searchFlags);
589 		if (index < 0) {
590 			TRACE("  av_index_search_timestamp() failed\n");
591 		} else {
592 			if (index > 0) {
593 				const AVIndexEntry* entry = avformat_index_get_entry(fStream, index);
594 				streamTimeStamp = entry->timestamp;
595 			} else {
596 				// Some demuxers use the first index entry to store some
597 				// other information, like the total playing time for example.
598 				// Assume the timeStamp of the first entry is alays 0.
599 				// TODO: Handle start-time offset?
600 				streamTimeStamp = 0;
601 			}
602 			bigtime_t foundTime = _ConvertFromStreamTimeBase(streamTimeStamp);
603 			bigtime_t timeDiff = foundTime > *time
604 				? foundTime - *time : *time - foundTime;
605 
606 			if (timeDiff > 1000000
607 				&& (fStreamBuildsIndexWhileReading
608 					|| index == avformat_index_get_entries_count(fStream) - 1)) {
609 				// If the stream is building the index on the fly while parsing
610 				// it, we only have entries in the index for positions already
611 				// decoded, i.e. we cannot seek into the future. In that case,
612 				// just assume that we can seek where we want and leave
613 				// time/frame unmodified. Since successfully seeking one time
614 				// will generate index entries for the seeked to position, we
615 				// need to remember this in fStreamBuildsIndexWhileReading,
616 				// since when seeking back there will be later index entries,
617 				// but we still want to ignore the found entry.
618 				fStreamBuildsIndexWhileReading = true;
619 				TRACE_SEEK("  Not trusting generic index entry. "
620 					"(Current count: %d)\n", fStream->nb_index_entries);
621 			} else {
622 				// If we found a reasonably time, write it into *time.
623 				// After seeking, we will try to read the sought time from
624 				// the next packet. If the packet has no PTS value, we may
625 				// still have a more accurate time from the index lookup.
626 				*time = foundTime;
627 			}
628 		}
629 
630 		if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp, INT64_MAX,
631 				searchFlags) < 0) {
632 			TRACE("  avformat_seek_file() failed.\n");
633 			// Try to fall back to av_seek_frame()
634 			timeStamp = _ConvertToStreamTimeBase(timeStamp);
635 			if (av_seek_frame(fContext, fStream->index, timeStamp,
636 				searchFlags) < 0) {
637 				TRACE("  avformat_seek_frame() failed as well.\n");
638 				// Fall back to seeking to the beginning by bytes
639 				timeStamp = 0;
640 				if (av_seek_frame(fContext, fStream->index, timeStamp,
641 						AVSEEK_FLAG_BYTE) < 0) {
642 					TRACE("  avformat_seek_frame() by bytes failed as "
643 						"well.\n");
644 					// Do not propagate error in any case. We fail if we can't
645 					// read another packet.
646 				} else
647 					*time = 0;
648 			}
649 		}
650 
651 		// Our last packet is toast in any case. Read the next one so
652 		// we know where we really sought.
653 		bigtime_t foundTime = *time;
654 
655 		fReusePacket = false;
656 		if (_NextPacket(true) == B_OK) {
657 			if (fPacket.pts != AV_NOPTS_VALUE)
658 				foundTime = _ConvertFromStreamTimeBase(fPacket.pts);
659 			else
660 				TRACE_SEEK("  no PTS in packet after seeking\n");
661 		} else
662 			TRACE_SEEK("  _NextPacket() failed!\n");
663 
664 		*time = foundTime;
665 		TRACE_SEEK("  sought time: %.2fs\n", *time / 1000000.0);
666 		*frame = (uint64)(*time * frameRate / 1000000.0 + 0.5);
667 		TRACE_SEEK("  sought frame: %" B_PRId64 "\n", *frame);
668 	}
669 
670 	return B_OK;
671 }
672 
673 
674 status_t
675 StreamBase::GetNextChunk(const void** chunkBuffer,
676 	size_t* chunkSize, media_header* mediaHeader)
677 {
678 	BAutolock _(fStreamLock);
679 
680 	TRACE_PACKET("StreamBase::GetNextChunk()\n");
681 
682 	status_t ret = _NextPacket(false);
683 	if (ret != B_OK) {
684 		*chunkBuffer = NULL;
685 		*chunkSize = 0;
686 		return ret;
687 	}
688 
689 	// According to libavformat documentation, fPacket is valid until the
690 	// next call to av_read_frame(). This is what we want and we can share
691 	// the memory with the least overhead.
692 	*chunkBuffer = fPacket.data;
693 	*chunkSize = fPacket.size;
694 
695 	if (mediaHeader != NULL) {
696 #if __GNUC__ != 2
697 		static_assert(sizeof(avpacket_user_data) <= sizeof(mediaHeader->user_data),
698 			"avpacket user data too large");
699 #endif
700 		mediaHeader->user_data_type = AVPACKET_USER_DATA_TYPE;
701 		avpacket_user_data* data = (avpacket_user_data*)mediaHeader->user_data;
702 		data->pts = fPacket.pts;
703 		data->dts = fPacket.dts;
704 		data->stream_index = fPacket.stream_index;
705 		data->flags = fPacket.flags;
706 		data->duration = fPacket.duration;
707 		data->pos = fPacket.pos;
708 
709 		mediaHeader->type = fFormat.type;
710 		mediaHeader->buffer = 0;
711 		mediaHeader->destination = -1;
712 		mediaHeader->time_source = -1;
713 		mediaHeader->size_used = fPacket.size;
714 
715 		// Use the presentation timestamp if available (that is not always the case)
716 		// Use the decoding timestamp as a fallback, that is guaranteed to be set by av_read_frame
717 		bigtime_t presentationTimeStamp;
718 		if (fPacket.pts != AV_NOPTS_VALUE)
719 			presentationTimeStamp = fPacket.pts;
720 		else
721 			presentationTimeStamp = fPacket.dts;
722 
723 		mediaHeader->start_time	= _ConvertFromStreamTimeBase(presentationTimeStamp);
724 		mediaHeader->file_pos = fPacket.pos;
725 		mediaHeader->data_offset = 0;
726 		switch (mediaHeader->type) {
727 			case B_MEDIA_RAW_AUDIO:
728 				break;
729 			case B_MEDIA_ENCODED_AUDIO:
730 				mediaHeader->u.encoded_audio.buffer_flags
731 					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
732 				break;
733 			case B_MEDIA_RAW_VIDEO:
734 				mediaHeader->u.raw_video.line_count
735 					= fFormat.u.raw_video.display.line_count;
736 				break;
737 			case B_MEDIA_ENCODED_VIDEO:
738 				mediaHeader->u.encoded_video.field_flags
739 					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
740 				mediaHeader->u.encoded_video.line_count
741 					= fFormat.u.encoded_video.output.display.line_count;
742 				break;
743 			default:
744 				break;
745 		}
746 	}
747 
748 //	static bigtime_t pts[2];
749 //	static bigtime_t lastPrintTime = system_time();
750 //	static BLocker printLock;
751 //	if (fStream->index < 2) {
752 //		if (fPacket.pts != AV_NOPTS_VALUE)
753 //			pts[fStream->index] = _ConvertFromStreamTimeBase(fPacket.pts);
754 //		printLock.Lock();
755 //		bigtime_t now = system_time();
756 //		if (now - lastPrintTime > 1000000) {
757 //			printf("PTS: %.4f/%.4f, diff: %.4f\r", pts[0] / 1000000.0,
758 //				pts[1] / 1000000.0, (pts[0] - pts[1]) / 1000000.0);
759 //			fflush(stdout);
760 //			lastPrintTime = now;
761 //		}
762 //		printLock.Unlock();
763 //	}
764 
765 	return B_OK;
766 }
767 
768 
769 // #pragma mark -
770 
771 
772 /*static*/ int
773 StreamBase::_Read(void* cookie, uint8* buffer, int bufferSize)
774 {
775 	StreamBase* stream = reinterpret_cast<StreamBase*>(cookie);
776 
777 	BAutolock _(stream->fSourceLock);
778 
779 	TRACE_IO("StreamBase::_Read(%p, %p, %d) position: %lld\n",
780 		cookie, buffer, bufferSize, stream->fPosition);
781 
782 	if (stream->fPosition != stream->fSource->Position()) {
783 		TRACE_IO("StreamBase::_Read fSource position: %lld\n",
784 			stream->fSource->Position());
785 
786 		off_t position
787 			= stream->fSource->Seek(stream->fPosition, SEEK_SET);
788 		if (position != stream->fPosition)
789 			return -1;
790 	}
791 
792 	ssize_t read = stream->fSource->Read(buffer, bufferSize);
793 	if (read > 0)
794 		stream->fPosition += read;
795 
796 	TRACE_IO("  read: %ld\n", read);
797 
798 	if (read == 0)
799 		return AVERROR_EOF;
800 
801 	return (int)read;
802 
803 }
804 
805 
806 /*static*/ off_t
807 StreamBase::_Seek(void* cookie, off_t offset, int whence)
808 {
809 	TRACE_IO("StreamBase::_Seek(%p, %lld, %d)\n",
810 		cookie, offset, whence);
811 
812 	StreamBase* stream = reinterpret_cast<StreamBase*>(cookie);
813 
814 	BAutolock _(stream->fSourceLock);
815 
816 	// Support for special file size retrieval API without seeking
817 	// anywhere:
818 	if (whence == AVSEEK_SIZE) {
819 		off_t size;
820 		if (stream->fSource->GetSize(&size) == B_OK)
821 			return size;
822 		return -1;
823 	}
824 
825 	// If not requested to seek to an absolute position, we need to
826 	// confirm that the stream is currently at the position that we
827 	// think it is.
828 	if (whence != SEEK_SET
829 		&& stream->fPosition != stream->fSource->Position()) {
830 		off_t position
831 			= stream->fSource->Seek(stream->fPosition, SEEK_SET);
832 		if (position != stream->fPosition)
833 			return -1;
834 	}
835 
836 	off_t position = stream->fSource->Seek(offset, whence);
837 	TRACE_IO("  position: %lld\n", position);
838 	if (position < 0)
839 		return -1;
840 
841 	stream->fPosition = position;
842 
843 	return position;
844 }
845 
846 
847 status_t
848 StreamBase::_NextPacket(bool reuse)
849 {
850 	TRACE_PACKET("StreamBase::_NextPacket(%d)\n", reuse);
851 
852 	if (fReusePacket) {
853 		// The last packet was marked for reuse, so we keep using it.
854 		TRACE_PACKET("  re-using last packet\n");
855 		fReusePacket = reuse;
856 		return B_OK;
857 	}
858 
859 	av_packet_unref(&fPacket);
860 
861 	while (true) {
862 		if (av_read_frame(fContext, &fPacket) < 0) {
863 			// NOTE: Even though we may get the error for a different stream,
864 			// av_read_frame() is not going to be successful from here on, so
865 			// it doesn't matter
866 			fReusePacket = false;
867 			return B_LAST_BUFFER_ERROR;
868 		}
869 
870 		if (fPacket.stream_index == Index())
871 			break;
872 
873 		// This is a packet from another stream, ignore it.
874 		av_packet_unref(&fPacket);
875 	}
876 
877 	// Mark this packet with the new reuse flag.
878 	fReusePacket = reuse;
879 	return B_OK;
880 }
881 
882 
883 int64_t
884 StreamBase::_ConvertToStreamTimeBase(bigtime_t time) const
885 {
886 	int64 timeStamp = int64_t((double)time * fStream->time_base.den
887 		/ (1000000.0 * fStream->time_base.num) + 0.5);
888 	if (fStream->start_time != AV_NOPTS_VALUE)
889 		timeStamp += fStream->start_time;
890 	return timeStamp;
891 }
892 
893 
894 bigtime_t
895 StreamBase::_ConvertFromStreamTimeBase(int64_t time) const
896 {
897 	if (fStream->start_time != AV_NOPTS_VALUE)
898 		time -= fStream->start_time;
899 
900 	return bigtime_t(1000000LL * time
901 		* fStream->time_base.num / fStream->time_base.den);
902 }
903 
904 
905 // #pragma mark - AVFormatReader::Stream
906 
907 
908 class AVFormatReader::Stream : public StreamBase {
909 public:
910 								Stream(BMediaIO* source,
911 									BLocker* streamLock);
912 	virtual						~Stream();
913 
914 	// Setup this stream to point to the AVStream at the given streamIndex.
915 	// This will also initialize the media_format.
916 	virtual	status_t			Init(int32 streamIndex);
917 
918 			status_t			GetMetaData(BMessage* data);
919 
920 	// Support for AVFormatReader
921 			status_t			GetStreamInfo(int64* frameCount,
922 									bigtime_t* duration, media_format* format,
923 									const void** infoBuffer,
924 									size_t* infoSize) const;
925 
926 			status_t			FindKeyFrame(uint32 flags, int64* frame,
927 									bigtime_t* time) const;
928 	virtual	status_t			Seek(uint32 flags, int64* frame,
929 									bigtime_t* time);
930 
931 private:
932 	mutable	BLocker				fLock;
933 
934 			struct KeyframeInfo {
935 				bigtime_t		requestedTime;
936 				int64			requestedFrame;
937 				bigtime_t		reportedTime;
938 				int64			reportedFrame;
939 				uint32			seekFlags;
940 			};
941 	mutable	KeyframeInfo		fLastReportedKeyframe;
942 	mutable	StreamBase*			fGhostStream;
943 };
944 
945 
946 
947 AVFormatReader::Stream::Stream(BMediaIO* source, BLocker* streamLock)
948 	:
949 	StreamBase(source, streamLock, &fLock),
950 	fLock("stream lock"),
951 	fGhostStream(NULL)
952 {
953 	fLastReportedKeyframe.requestedTime = 0;
954 	fLastReportedKeyframe.requestedFrame = 0;
955 	fLastReportedKeyframe.reportedTime = 0;
956 	fLastReportedKeyframe.reportedFrame = 0;
957 }
958 
959 
960 AVFormatReader::Stream::~Stream()
961 {
962 	delete fGhostStream;
963 }
964 
965 
966 static int
967 get_channel_count(AVCodecParameters* context)
968 {
969 #if LIBAVCODEC_VERSION_MAJOR >= 60
970 	return context->ch_layout.nb_channels;
971 #else
972 	return context->channels;
973 #endif
974 }
975 
976 
977 static int
978 get_channel_mask(AVCodecParameters* context)
979 {
980 #if LIBAVCODEC_VERSION_MAJOR >= 60
981 	return context->ch_layout.u.mask;
982 #else
983 	return context->channel_layout;
984 #endif
985 }
986 
987 
988 status_t
989 AVFormatReader::Stream::Init(int32 virtualIndex)
990 {
991 	TRACE("AVFormatReader::Stream::Init(%" B_PRId32 ")\n", virtualIndex);
992 
993 	status_t ret = StreamBase::Init(virtualIndex);
994 	if (ret != B_OK)
995 		return ret;
996 
997 	// Get a pointer to the AVCodecPaarameters for the stream at streamIndex.
998 	AVCodecParameters* codecParams = fStream->codecpar;
999 
1000 	// initialize the media_format for this stream
1001 	media_format* format = &fFormat;
1002 	format->Clear();
1003 
1004 	media_format_description description;
1005 
1006 	// Set format family and type depending on codec_type of the stream.
1007 	switch (codecParams->codec_type) {
1008 		case AVMEDIA_TYPE_AUDIO:
1009 			if ((codecParams->codec_id >= AV_CODEC_ID_PCM_S16LE)
1010 				&& (codecParams->codec_id <= AV_CODEC_ID_PCM_U8)) {
1011 				TRACE("  raw audio\n");
1012 				format->type = B_MEDIA_RAW_AUDIO;
1013 				description.family = B_ANY_FORMAT_FAMILY;
1014 				// This will then apparently be handled by the (built into
1015 				// BMediaTrack) RawDecoder.
1016 			} else {
1017 				TRACE("  encoded audio\n");
1018 				format->type = B_MEDIA_ENCODED_AUDIO;
1019 				description.family = B_MISC_FORMAT_FAMILY;
1020 				description.u.misc.file_format = 'ffmp';
1021 			}
1022 			break;
1023 		case AVMEDIA_TYPE_VIDEO:
1024 			TRACE("  encoded video\n");
1025 			format->type = B_MEDIA_ENCODED_VIDEO;
1026 			description.family = B_MISC_FORMAT_FAMILY;
1027 			description.u.misc.file_format = 'ffmp';
1028 			break;
1029 		default:
1030 			TRACE("  unknown type\n");
1031 			format->type = B_MEDIA_UNKNOWN_TYPE;
1032 			return B_ERROR;
1033 			break;
1034 	}
1035 
1036 	if (format->type == B_MEDIA_RAW_AUDIO) {
1037 		// We cannot describe all raw-audio formats, some are unsupported.
1038 		switch (codecParams->codec_id) {
1039 			case AV_CODEC_ID_PCM_S16LE:
1040 				format->u.raw_audio.format
1041 					= media_raw_audio_format::B_AUDIO_SHORT;
1042 				format->u.raw_audio.byte_order
1043 					= B_MEDIA_LITTLE_ENDIAN;
1044 				break;
1045 			case AV_CODEC_ID_PCM_S16BE:
1046 				format->u.raw_audio.format
1047 					= media_raw_audio_format::B_AUDIO_SHORT;
1048 				format->u.raw_audio.byte_order
1049 					= B_MEDIA_BIG_ENDIAN;
1050 				break;
1051 			case AV_CODEC_ID_PCM_U16LE:
1052 //				format->u.raw_audio.format
1053 //					= media_raw_audio_format::B_AUDIO_USHORT;
1054 //				format->u.raw_audio.byte_order
1055 //					= B_MEDIA_LITTLE_ENDIAN;
1056 				return B_NOT_SUPPORTED;
1057 				break;
1058 			case AV_CODEC_ID_PCM_U16BE:
1059 //				format->u.raw_audio.format
1060 //					= media_raw_audio_format::B_AUDIO_USHORT;
1061 //				format->u.raw_audio.byte_order
1062 //					= B_MEDIA_BIG_ENDIAN;
1063 				return B_NOT_SUPPORTED;
1064 				break;
1065 			case AV_CODEC_ID_PCM_S8:
1066 				format->u.raw_audio.format
1067 					= media_raw_audio_format::B_AUDIO_CHAR;
1068 				break;
1069 			case AV_CODEC_ID_PCM_U8:
1070 				format->u.raw_audio.format
1071 					= media_raw_audio_format::B_AUDIO_UCHAR;
1072 				break;
1073 			default:
1074 				return B_NOT_SUPPORTED;
1075 				break;
1076 		}
1077 	} else {
1078 		if (description.family == B_MISC_FORMAT_FAMILY)
1079 			description.u.misc.codec = codecParams->codec_id;
1080 
1081 		BMediaFormats formats;
1082 		status_t status = formats.GetFormatFor(description, format);
1083 		if (status < B_OK)
1084 			TRACE("  formats.GetFormatFor() error: %s\n", strerror(status));
1085 
1086 		format->user_data_type = B_CODEC_TYPE_INFO;
1087 		*(uint32*)format->user_data = codecParams->codec_tag;
1088 		format->user_data[4] = 0;
1089 	}
1090 
1091 	format->require_flags = 0;
1092 	format->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
1093 
1094 	switch (format->type) {
1095 		case B_MEDIA_RAW_AUDIO:
1096 			format->u.raw_audio.frame_rate = (float)codecParams->sample_rate;
1097 			format->u.raw_audio.channel_count = get_channel_count(codecParams);
1098 			format->u.raw_audio.channel_mask = get_channel_mask(codecParams);
1099 			ConvertAVSampleFormatToRawAudioFormat(
1100 				(AVSampleFormat)codecParams->format,
1101 				format->u.raw_audio.format);
1102 			format->u.raw_audio.buffer_size = 0;
1103 
1104 			// Read one packet and mark it for later re-use. (So our first
1105 			// GetNextChunk() call does not read another packet.)
1106 			if (_NextPacket(true) == B_OK) {
1107 				TRACE("  successfully determined audio buffer size: %d\n",
1108 					fPacket.size);
1109 				format->u.raw_audio.buffer_size = fPacket.size;
1110 			}
1111 			break;
1112 
1113 		case B_MEDIA_ENCODED_AUDIO:
1114 			format->u.encoded_audio.bit_rate = codecParams->bit_rate;
1115 			format->u.encoded_audio.frame_size = codecParams->frame_size;
1116 			// Fill in some info about possible output format
1117 			format->u.encoded_audio.output
1118 				= media_multi_audio_format::wildcard;
1119 			format->u.encoded_audio.output.frame_rate
1120 				= (float)codecParams->sample_rate;
1121 			// Channel layout bits match in Be API and FFmpeg.
1122 			format->u.encoded_audio.output.channel_count = get_channel_count(codecParams);
1123 			format->u.encoded_audio.multi_info.channel_mask = get_channel_mask(codecParams);
1124 			format->u.encoded_audio.output.byte_order
1125 				= avformat_to_beos_byte_order(
1126 					(AVSampleFormat)codecParams->format);
1127 
1128 			ConvertAVSampleFormatToRawAudioFormat(
1129 					(AVSampleFormat)codecParams->format,
1130 				format->u.encoded_audio.output.format);
1131 
1132 			if (codecParams->block_align > 0) {
1133 				format->u.encoded_audio.output.buffer_size
1134 					= codecParams->block_align;
1135 			} else {
1136 				format->u.encoded_audio.output.buffer_size
1137 					= codecParams->frame_size * get_channel_count(codecParams)
1138 						* (format->u.encoded_audio.output.format
1139 							& media_raw_audio_format::B_AUDIO_SIZE_MASK);
1140 			}
1141 			break;
1142 
1143 		case B_MEDIA_ENCODED_VIDEO:
1144 // TODO: Specifying any of these seems to throw off the format matching
1145 // later on.
1146 //			format->u.encoded_video.avg_bit_rate = codecParams->bit_rate;
1147 //			format->u.encoded_video.max_bit_rate = codecParams->bit_rate
1148 //				+ codecParams->bit_rate_tolerance;
1149 
1150 //			format->u.encoded_video.encoding
1151 //				= media_encoded_video_format::B_ANY;
1152 
1153 //			format->u.encoded_video.frame_size = 1;
1154 //			format->u.encoded_video.forward_history = 0;
1155 //			format->u.encoded_video.backward_history = 0;
1156 
1157 			format->u.encoded_video.output.field_rate = FrameRate();
1158 			format->u.encoded_video.output.interlace = 1;
1159 
1160 			format->u.encoded_video.output.first_active = 0;
1161 			format->u.encoded_video.output.last_active
1162 				= codecParams->height - 1;
1163 				// TODO: Maybe libavformat actually provides that info
1164 				// somewhere...
1165 			format->u.encoded_video.output.orientation
1166 				= B_VIDEO_TOP_LEFT_RIGHT;
1167 
1168 			ConvertAVCodecParametersToVideoAspectWidthAndHeight(*codecParams,
1169 				format->u.encoded_video.output.pixel_width_aspect,
1170 				format->u.encoded_video.output.pixel_height_aspect);
1171 
1172 			format->u.encoded_video.output.display.format
1173 				= pixfmt_to_colorspace(codecParams->format);
1174 			format->u.encoded_video.output.display.line_width
1175 				= codecParams->width;
1176 			format->u.encoded_video.output.display.line_count
1177 				= codecParams->height;
1178 			TRACE("  width/height: %d/%d\n", codecParams->width,
1179 				codecParams->height);
1180 			format->u.encoded_video.output.display.bytes_per_row = 0;
1181 			format->u.encoded_video.output.display.pixel_offset = 0;
1182 			format->u.encoded_video.output.display.line_offset = 0;
1183 			format->u.encoded_video.output.display.flags = 0; // TODO
1184 
1185 			break;
1186 
1187 		default:
1188 			// This is an unknown format to us.
1189 			break;
1190 	}
1191 
1192 	// Add the meta data, if any
1193 	if (codecParams->extradata_size > 0) {
1194 		format->SetMetaData(codecParams->extradata,
1195 			codecParams->extradata_size);
1196 		TRACE("  extradata: %p\n", format->MetaData());
1197 	}
1198 
1199 	TRACE("  extradata_size: %d\n", codecParams->extradata_size);
1200 //	TRACE("  intra_matrix: %p\n", codecParams->intra_matrix);
1201 //	TRACE("  inter_matrix: %p\n", codecParams->inter_matrix);
1202 //	TRACE("  get_buffer(): %p\n", codecParams->get_buffer);
1203 //	TRACE("  release_buffer(): %p\n", codecParams->release_buffer);
1204 
1205 #ifdef TRACE_AVFORMAT_READER
1206 	char formatString[512];
1207 	if (string_for_format(*format, formatString, sizeof(formatString)))
1208 		TRACE("  format: %s\n", formatString);
1209 
1210 	uint32 encoding = format->Encoding();
1211 	TRACE("  encoding '%.4s'\n", (char*)&encoding);
1212 #endif
1213 
1214 	return B_OK;
1215 }
1216 
1217 
1218 status_t
1219 AVFormatReader::Stream::GetMetaData(BMessage* data)
1220 {
1221 	BAutolock _(&fLock);
1222 
1223 	avdictionary_to_message(fStream->metadata, data);
1224 
1225 	return B_OK;
1226 }
1227 
1228 
1229 status_t
1230 AVFormatReader::Stream::GetStreamInfo(int64* frameCount,
1231 	bigtime_t* duration, media_format* format, const void** infoBuffer,
1232 	size_t* infoSize) const
1233 {
1234 	BAutolock _(&fLock);
1235 
1236 	TRACE("AVFormatReader::Stream::GetStreamInfo(%" B_PRId32 ")\n",
1237 		VirtualIndex());
1238 
1239 	double frameRate = FrameRate();
1240 	TRACE("  frameRate: %.4f\n", frameRate);
1241 
1242 	#ifdef TRACE_AVFORMAT_READER
1243 	if (fStream->start_time != AV_NOPTS_VALUE) {
1244 		bigtime_t startTime = _ConvertFromStreamTimeBase(fStream->start_time);
1245 		TRACE("  start_time: %" B_PRIdBIGTIME " or %.5fs\n", startTime,
1246 			startTime / 1000000.0);
1247 		// TODO: Handle start time in FindKeyFrame() and Seek()?!
1248 	}
1249 	#endif // TRACE_AVFORMAT_READER
1250 
1251 	*duration = Duration();
1252 
1253 	TRACE("  duration: %" B_PRIdBIGTIME " or %.5fs\n", *duration, *duration / 1000000.0);
1254 
1255 	#if 0
1256 	if (fStream->nb_index_entries > 0) {
1257 		TRACE("  dump of index entries:\n");
1258 		int count = 5;
1259 		int firstEntriesCount = min_c(fStream->nb_index_entries, count);
1260 		int i = 0;
1261 		for (; i < firstEntriesCount; i++) {
1262 			AVIndexEntry& entry = fStream->index_entries[i];
1263 			bigtime_t timeGlobal = entry.timestamp;
1264 			bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
1265 			TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
1266 				timeNative / 1000000.0f, timeGlobal / 1000000.0f);
1267 		}
1268 		if (fStream->nb_index_entries - count > i) {
1269 			i = fStream->nb_index_entries - count;
1270 			TRACE("    ...\n");
1271 			for (; i < fStream->nb_index_entries; i++) {
1272 				AVIndexEntry& entry = fStream->index_entries[i];
1273 				bigtime_t timeGlobal = entry.timestamp;
1274 				bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
1275 				TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
1276 					timeNative / 1000000.0f, timeGlobal / 1000000.0f);
1277 			}
1278 		}
1279 	}
1280 	#endif
1281 
1282 	*frameCount = fStream->nb_frames * fStream->codecpar->frame_size;
1283 	if (*frameCount == 0) {
1284 		// Calculate from duration and frame rate
1285 		if (fStream->duration != AV_NOPTS_VALUE) {
1286 			*frameCount = (int64)(fStream->duration * frameRate
1287 				* fStream->time_base.num / fStream->time_base.den);
1288 		} else if (fContext->duration != AV_NOPTS_VALUE) {
1289 			*frameCount = (int64)(fContext->duration * frameRate);
1290 		}
1291 		TRACE("  frameCount calculated: %" B_PRIu64 ", from context: %" B_PRIu64 "\n",
1292 			*frameCount, fStream->nb_frames);
1293 	} else
1294 		TRACE("  frameCount: %" B_PRId64 "\n", *frameCount);
1295 
1296 	*format = fFormat;
1297 
1298 	*infoBuffer = fStream->codecpar->extradata;
1299 	*infoSize = fStream->codecpar->extradata_size;
1300 
1301 	return B_OK;
1302 }
1303 
1304 
1305 status_t
1306 AVFormatReader::Stream::FindKeyFrame(uint32 flags, int64* frame,
1307 	bigtime_t* time) const
1308 {
1309 	BAutolock _(&fLock);
1310 
1311 	if (fContext == NULL || fStream == NULL)
1312 		return B_NO_INIT;
1313 
1314 	TRACE_FIND("AVFormatReader::Stream::FindKeyFrame(%ld,%s%s%s%s, "
1315 		"%lld, %lld)\n", VirtualIndex(),
1316 		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
1317 		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
1318 		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
1319 			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
1320 		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
1321 			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
1322 		*frame, *time);
1323 
1324 	bool inLastRequestedRange = false;
1325 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1326 		if (fLastReportedKeyframe.reportedFrame
1327 			<= fLastReportedKeyframe.requestedFrame) {
1328 			inLastRequestedRange
1329 				= *frame >= fLastReportedKeyframe.reportedFrame
1330 					&& *frame <= fLastReportedKeyframe.requestedFrame;
1331 		} else {
1332 			inLastRequestedRange
1333 				= *frame >= fLastReportedKeyframe.requestedFrame
1334 					&& *frame <= fLastReportedKeyframe.reportedFrame;
1335 		}
1336 	} else if ((flags & B_MEDIA_SEEK_TO_FRAME) == 0) {
1337 		if (fLastReportedKeyframe.reportedTime
1338 			<= fLastReportedKeyframe.requestedTime) {
1339 			inLastRequestedRange
1340 				= *time >= fLastReportedKeyframe.reportedTime
1341 					&& *time <= fLastReportedKeyframe.requestedTime;
1342 		} else {
1343 			inLastRequestedRange
1344 				= *time >= fLastReportedKeyframe.requestedTime
1345 					&& *time <= fLastReportedKeyframe.reportedTime;
1346 		}
1347 	}
1348 
1349 	if (inLastRequestedRange) {
1350 		*frame = fLastReportedKeyframe.reportedFrame;
1351 		*time = fLastReportedKeyframe.reportedTime;
1352 		TRACE_FIND("  same as last reported keyframe\n");
1353 		return B_OK;
1354 	}
1355 
1356 	double frameRate = FrameRate();
1357 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0)
1358 		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);
1359 
1360 	status_t ret;
1361 	if (fGhostStream == NULL) {
1362 		BAutolock _(fSourceLock);
1363 
1364 		fGhostStream = new(std::nothrow) StreamBase(fSource, fSourceLock,
1365 			&fLock);
1366 		if (fGhostStream == NULL) {
1367 			TRACE("  failed to allocate ghost stream\n");
1368 			return B_NO_MEMORY;
1369 		}
1370 
1371 		ret = fGhostStream->Open();
1372 		if (ret != B_OK) {
1373 			TRACE("  ghost stream failed to open: %s\n", strerror(ret));
1374 			return B_ERROR;
1375 		}
1376 
1377 		ret = fGhostStream->Init(fVirtualIndex);
1378 		if (ret != B_OK) {
1379 			TRACE("  ghost stream failed to init: %s\n", strerror(ret));
1380 			return B_ERROR;
1381 		}
1382 	}
1383 	fLastReportedKeyframe.requestedFrame = *frame;
1384 	fLastReportedKeyframe.requestedTime = *time;
1385 	fLastReportedKeyframe.seekFlags = flags;
1386 
1387 	ret = fGhostStream->Seek(flags, frame, time);
1388 	if (ret != B_OK) {
1389 		TRACE("  ghost stream failed to seek: %s\n", strerror(ret));
1390 		return B_ERROR;
1391 	}
1392 
1393 	fLastReportedKeyframe.reportedFrame = *frame;
1394 	fLastReportedKeyframe.reportedTime = *time;
1395 
1396 	TRACE_FIND("  found time: %.2fs\n", *time / 1000000.0);
1397 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1398 		*frame = int64_t(*time * FrameRate() / 1000000.0 + 0.5);
1399 		TRACE_FIND("  found frame: %lld\n", *frame);
1400 	}
1401 
1402 	return B_OK;
1403 }
1404 
1405 
1406 status_t
1407 AVFormatReader::Stream::Seek(uint32 flags, int64* frame, bigtime_t* time)
1408 {
1409 	BAutolock _(&fLock);
1410 
1411 	if (fContext == NULL || fStream == NULL)
1412 		return B_NO_INIT;
1413 
1414 	// Put the old requested values into frame/time, since we already know
1415 	// that the sought frame/time will then match the reported values.
1416 	// TODO: Will not work if client changes seek flags (from backwards to
1417 	// forward or vice versa)!!
1418 	bool inLastRequestedRange = false;
1419 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1420 		if (fLastReportedKeyframe.reportedFrame
1421 			<= fLastReportedKeyframe.requestedFrame) {
1422 			inLastRequestedRange
1423 				= *frame >= fLastReportedKeyframe.reportedFrame
1424 					&& *frame <= fLastReportedKeyframe.requestedFrame;
1425 		} else {
1426 			inLastRequestedRange
1427 				= *frame >= fLastReportedKeyframe.requestedFrame
1428 					&& *frame <= fLastReportedKeyframe.reportedFrame;
1429 		}
1430 	} else if ((flags & B_MEDIA_SEEK_TO_FRAME) == 0) {
1431 		if (fLastReportedKeyframe.reportedTime
1432 			<= fLastReportedKeyframe.requestedTime) {
1433 			inLastRequestedRange
1434 				= *time >= fLastReportedKeyframe.reportedTime
1435 					&& *time <= fLastReportedKeyframe.requestedTime;
1436 		} else {
1437 			inLastRequestedRange
1438 				= *time >= fLastReportedKeyframe.requestedTime
1439 					&& *time <= fLastReportedKeyframe.reportedTime;
1440 		}
1441 	}
1442 
1443 	if (inLastRequestedRange) {
1444 		*frame = fLastReportedKeyframe.requestedFrame;
1445 		*time = fLastReportedKeyframe.requestedTime;
1446 		flags = fLastReportedKeyframe.seekFlags;
1447 	}
1448 
1449 	return StreamBase::Seek(flags, frame, time);
1450 }
1451 
1452 
1453 // #pragma mark - AVFormatReader
1454 
1455 
1456 AVFormatReader::AVFormatReader()
1457 	:
1458 	fCopyright(""),
1459 	fStreams(NULL),
1460 	fSourceLock("source I/O lock")
1461 {
1462 	TRACE("AVFormatReader::AVFormatReader\n");
1463 }
1464 
1465 
1466 AVFormatReader::~AVFormatReader()
1467 {
1468 	TRACE("AVFormatReader::~AVFormatReader\n");
1469 	if (fStreams != NULL) {
1470 		// The client was supposed to call FreeCookie() on all
1471 		// allocated streams. Deleting the first stream is always
1472 		// prevented, we delete the other ones just in case.
1473 		int32 count = fStreams[0]->CountStreams();
1474 		for (int32 i = 0; i < count; i++)
1475 			delete fStreams[i];
1476 		delete[] fStreams;
1477 	}
1478 }
1479 
1480 
1481 // #pragma mark -
1482 
1483 
1484 const char*
1485 AVFormatReader::Copyright()
1486 {
1487 	if (fCopyright.Length() <= 0) {
1488 		BMessage message;
1489 		if (GetMetaData(&message) == B_OK)
1490 			message.FindString("copyright", &fCopyright);
1491 	}
1492 	return fCopyright.String();
1493 }
1494 
1495 
1496 status_t
1497 AVFormatReader::Sniff(int32* _streamCount)
1498 {
1499 	TRACE("AVFormatReader::Sniff\n");
1500 
1501 	BMediaIO* source = dynamic_cast<BMediaIO*>(Source());
1502 	if (source == NULL) {
1503 		TRACE("  not a BMediaIO, but we need it to be one.\n");
1504 		return B_NOT_SUPPORTED;
1505 	}
1506 
1507 	Stream* stream = new(std::nothrow) Stream(source,
1508 		&fSourceLock);
1509 	if (stream == NULL) {
1510 		ERROR("AVFormatReader::Sniff() - failed to allocate Stream\n");
1511 		return B_NO_MEMORY;
1512 	}
1513 
1514 	ObjectDeleter<Stream> streamDeleter(stream);
1515 
1516 	status_t ret = stream->Open();
1517 	if (ret != B_OK) {
1518 		TRACE("  failed to detect stream: %s\n", strerror(ret));
1519 		return ret;
1520 	}
1521 
1522 	delete[] fStreams;
1523 	fStreams = NULL;
1524 
1525 	int32 streamCount = stream->CountStreams();
1526 	if (streamCount == 0) {
1527 		TRACE("  failed to detect any streams: %s\n", strerror(ret));
1528 		return B_ERROR;
1529 	}
1530 
1531 	fStreams = new(std::nothrow) Stream*[streamCount];
1532 	if (fStreams == NULL) {
1533 		ERROR("AVFormatReader::Sniff() - failed to allocate streams\n");
1534 		return B_NO_MEMORY;
1535 	}
1536 
1537 	memset(fStreams, 0, sizeof(Stream*) * streamCount);
1538 	fStreams[0] = stream;
1539 	streamDeleter.Detach();
1540 
1541 	#ifdef TRACE_AVFORMAT_READER
1542 	av_dump_format(const_cast<AVFormatContext*>(stream->Context()), 0, "", 0);
1543 	#endif
1544 
1545 	if (_streamCount != NULL)
1546 		*_streamCount = streamCount;
1547 
1548 	return B_OK;
1549 }
1550 
1551 
1552 void
1553 AVFormatReader::GetFileFormatInfo(media_file_format* mff)
1554 {
1555 	TRACE("AVFormatReader::GetFileFormatInfo\n");
1556 
1557 	if (fStreams == NULL)
1558 		return;
1559 
1560 	// The first cookie is always there!
1561 	const AVFormatContext* context = fStreams[0]->Context();
1562 
1563 	if (context == NULL || context->iformat == NULL) {
1564 		TRACE("  no AVFormatContext or AVInputFormat!\n");
1565 		return;
1566 	}
1567 
1568 	const media_file_format* format = demuxer_format_for(context->iformat);
1569 
1570 	mff->capabilities = media_file_format::B_READABLE
1571 		| media_file_format::B_KNOWS_ENCODED_VIDEO
1572 		| media_file_format::B_KNOWS_ENCODED_AUDIO
1573 		| media_file_format::B_IMPERFECTLY_SEEKABLE;
1574 
1575 	if (format != NULL) {
1576 		mff->family = format->family;
1577 	} else {
1578 		TRACE("  no DemuxerFormat for AVInputFormat!\n");
1579 		mff->family = B_MISC_FORMAT_FAMILY;
1580 	}
1581 
1582 	mff->version = 100;
1583 
1584 	if (format != NULL) {
1585 		strlcpy(mff->mime_type, format->mime_type, sizeof(mff->mime_type));
1586 	} else {
1587 		// TODO: Would be nice to be able to provide this from AVInputFormat,
1588 		// maybe by extending the FFmpeg code itself (all demuxers).
1589 		mff->mime_type[0] = '\0';
1590 	}
1591 
1592 	if (context->iformat->extensions != NULL)
1593 		strlcpy(mff->file_extension, context->iformat->extensions, sizeof(mff->file_extension));
1594 	else {
1595 		TRACE("  no file extensions for AVInputFormat.\n");
1596 		mff->file_extension[0] = '\0';
1597 	}
1598 
1599 	if (context->iformat->name != NULL)
1600 		strlcpy(mff->short_name,  context->iformat->name, sizeof(mff->short_name));
1601 	else {
1602 		TRACE("  no short name for AVInputFormat.\n");
1603 		mff->short_name[0] = '\0';
1604 	}
1605 
1606 	if (context->iformat->long_name != NULL) {
1607 		snprintf(mff->pretty_name, sizeof(mff->pretty_name), "%s (FFmpeg)",
1608 			context->iformat->long_name);
1609 	} else if (format != NULL)
1610 		snprintf(mff->pretty_name, sizeof(mff->pretty_name), "%.54s (FFmpeg)", format->pretty_name);
1611 	else
1612 		strlcpy(mff->pretty_name, "Unknown (FFmpeg)", sizeof(mff->pretty_name));
1613 }
1614 
1615 
1616 status_t
1617 AVFormatReader::GetMetaData(BMessage* _data)
1618 {
1619 	// The first cookie is always there!
1620 	const AVFormatContext* context = fStreams[0]->Context();
1621 
1622 	if (context == NULL)
1623 		return B_NO_INIT;
1624 
1625 	avdictionary_to_message(context->metadata, _data);
1626 
1627 	// Add chapter info
1628 	for (unsigned i = 0; i < context->nb_chapters; i++) {
1629 		AVChapter* chapter = context->chapters[i];
1630 		BMessage chapterData;
1631 		chapterData.AddInt64("start", bigtime_t(1000000.0
1632 			* chapter->start * chapter->time_base.num
1633 			/ chapter->time_base.den + 0.5));
1634 		chapterData.AddInt64("end", bigtime_t(1000000.0
1635 			* chapter->end * chapter->time_base.num
1636 			/ chapter->time_base.den + 0.5));
1637 
1638 		avdictionary_to_message(chapter->metadata, &chapterData);
1639 		_data->AddMessage("be:chapter", &chapterData);
1640 	}
1641 
1642 	// Add program info
1643 	for (unsigned i = 0; i < context->nb_programs; i++) {
1644 		BMessage programData;
1645 		avdictionary_to_message(context->programs[i]->metadata, &programData);
1646 		_data->AddMessage("be:program", &programData);
1647 	}
1648 
1649 	return B_OK;
1650 }
1651 
1652 
1653 // #pragma mark -
1654 
1655 
1656 status_t
1657 AVFormatReader::AllocateCookie(int32 streamIndex, void** _cookie)
1658 {
1659 	TRACE("AVFormatReader::AllocateCookie(%" B_PRId32 ")\n", streamIndex);
1660 
1661 	BAutolock _(fSourceLock);
1662 
1663 	if (fStreams == NULL)
1664 		return B_NO_INIT;
1665 
1666 	if (streamIndex < 0 || streamIndex >= fStreams[0]->CountStreams())
1667 		return B_BAD_INDEX;
1668 
1669 	if (_cookie == NULL)
1670 		return B_BAD_VALUE;
1671 
1672 	Stream* cookie = fStreams[streamIndex];
1673 	if (cookie == NULL) {
1674 		// Allocate the cookie
1675 		BMediaIO* source = dynamic_cast<BMediaIO*>(Source());
1676 		if (source == NULL) {
1677 			TRACE("  not a BMediaIO, but we need it to be one.\n");
1678 			return B_NOT_SUPPORTED;
1679 		}
1680 
1681 		cookie = new(std::nothrow) Stream(source, &fSourceLock);
1682 		if (cookie == NULL) {
1683 			ERROR("AVFormatReader::Sniff() - failed to allocate "
1684 				"Stream\n");
1685 			return B_NO_MEMORY;
1686 		}
1687 
1688 		status_t ret = cookie->Open();
1689 		if (ret != B_OK) {
1690 			TRACE("  stream failed to open: %s\n", strerror(ret));
1691 			delete cookie;
1692 			return ret;
1693 		}
1694 	}
1695 
1696 	status_t ret = cookie->Init(streamIndex);
1697 	if (ret != B_OK) {
1698 		TRACE("  stream failed to initialize: %s\n", strerror(ret));
1699 		// NOTE: Never delete the first stream!
1700 		if (streamIndex != 0)
1701 			delete cookie;
1702 		return ret;
1703 	}
1704 
1705 	fStreams[streamIndex] = cookie;
1706 	*_cookie = cookie;
1707 
1708 	return B_OK;
1709 }
1710 
1711 
1712 status_t
1713 AVFormatReader::FreeCookie(void *_cookie)
1714 {
1715 	BAutolock _(fSourceLock);
1716 
1717 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1718 
1719 	// NOTE: Never delete the first cookie!
1720 	if (cookie != NULL && cookie->VirtualIndex() != 0) {
1721 		if (fStreams != NULL)
1722 			fStreams[cookie->VirtualIndex()] = NULL;
1723 		delete cookie;
1724 	}
1725 
1726 	return B_OK;
1727 }
1728 
1729 
1730 // #pragma mark -
1731 
1732 
1733 status_t
1734 AVFormatReader::GetStreamInfo(void* _cookie, int64* frameCount,
1735 	bigtime_t* duration, media_format* format, const void** infoBuffer,
1736 	size_t* infoSize)
1737 {
1738 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1739 	return cookie->GetStreamInfo(frameCount, duration, format, infoBuffer,
1740 		infoSize);
1741 }
1742 
1743 
1744 status_t
1745 AVFormatReader::GetStreamMetaData(void* _cookie, BMessage* _data)
1746 {
1747 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1748 	return cookie->GetMetaData(_data);
1749 }
1750 
1751 
1752 status_t
1753 AVFormatReader::Seek(void* _cookie, uint32 seekTo, int64* frame,
1754 	bigtime_t* time)
1755 {
1756 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1757 	return cookie->Seek(seekTo, frame, time);
1758 }
1759 
1760 
1761 status_t
1762 AVFormatReader::FindKeyFrame(void* _cookie, uint32 flags, int64* frame,
1763 	bigtime_t* time)
1764 {
1765 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1766 	return cookie->FindKeyFrame(flags, frame, time);
1767 }
1768 
1769 
1770 status_t
1771 AVFormatReader::GetNextChunk(void* _cookie, const void** chunkBuffer,
1772 	size_t* chunkSize, media_header* mediaHeader)
1773 {
1774 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1775 	return cookie->GetNextChunk(chunkBuffer, chunkSize, mediaHeader);
1776 }
1777