xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVFormatReader.cpp (revision 1149fa6ece3567c466008a04ae8a830a63bafdaa)
1 /*
2  * Copyright 2009-2010, Stephan Aßmus <superstippi@gmx.de>
3  * All rights reserved. Distributed under the terms of the GNU L-GPL license.
4  */
5 
6 #include "AVFormatReader.h"
7 
8 #include <stdio.h>
9 #include <string.h>
10 #include <stdlib.h>
11 
12 #include <new>
13 
14 #include <AutoDeleter.h>
15 #include <Autolock.h>
16 #include <ByteOrder.h>
17 #include <DataIO.h>
18 #include <MediaDefs.h>
19 #include <MediaFormats.h>
20 
21 extern "C" {
22 	#include "avcodec.h"
23 	#include "avformat.h"
24 }
25 
26 #include "DemuxerTable.h"
27 #include "gfx_util.h"
28 
29 
30 //#define TRACE_AVFORMAT_READER
31 #ifdef TRACE_AVFORMAT_READER
32 #	define TRACE printf
33 #	define TRACE_IO(a...)
34 #	define TRACE_SEEK(a...) printf(a)
35 #	define TRACE_FIND(a...)
36 #	define TRACE_PACKET(a...)
37 #else
38 #	define TRACE(a...)
39 #	define TRACE_IO(a...)
40 #	define TRACE_SEEK(a...)
41 #	define TRACE_FIND(a...)
42 #	define TRACE_PACKET(a...)
43 #endif
44 
45 #define ERROR(a...) fprintf(stderr, a)
46 
47 
48 static const int64 kNoPTSValue = 0x8000000000000000LL;
49 	// NOTE: For some reasons, I have trouble with the avcodec.h define:
50 	// #define AV_NOPTS_VALUE          INT64_C(0x8000000000000000)
51 	// INT64_C is not defined here.
52 
53 
54 static uint32
55 avformat_to_beos_format(SampleFormat format)
56 {
57 	switch (format) {
58 		case SAMPLE_FMT_U8: return media_raw_audio_format::B_AUDIO_UCHAR;
59 		case SAMPLE_FMT_S16: return media_raw_audio_format::B_AUDIO_SHORT;
60 		case SAMPLE_FMT_S32: return media_raw_audio_format::B_AUDIO_INT;
61 		case SAMPLE_FMT_FLT: return media_raw_audio_format::B_AUDIO_FLOAT;
62 		case SAMPLE_FMT_DBL: return media_raw_audio_format::B_AUDIO_DOUBLE;
63 		default:
64 			break;
65 	}
66 	return 0;
67 }
68 
69 
70 static uint32
71 avformat_to_beos_byte_order(SampleFormat format)
72 {
73 	// TODO: Huh?
74 	return B_MEDIA_HOST_ENDIAN;
75 }
76 
77 
78 static void
79 avdictionary_to_message(AVDictionary* dictionary, BMessage* message)
80 {
81 	if (dictionary == NULL)
82 		return;
83 
84 	AVDictionaryEntry* entry = NULL;
85 	while ((entry = av_dict_get(dictionary, "", entry,
86 		AV_METADATA_IGNORE_SUFFIX))) {
87 		// convert entry keys into something more meaningful using the names from
88 		// id3v2.c
89 		if (strcmp(entry->key, "TALB") == 0 || strcmp(entry->key, "TAL") == 0)
90 			message->AddString("album", entry->value);
91 		else if (strcmp(entry->key, "TCOM") == 0)
92 			message->AddString("composer", entry->value);
93 		else if (strcmp(entry->key, "TCON") == 0 || strcmp(entry->key, "TCO") == 0)
94 			message->AddString("genre", entry->value);
95 		else if (strcmp(entry->key, "TCOP") == 0)
96 			message->AddString("copyright", entry->value);
97 		else if (strcmp(entry->key, "TDRL") == 0 || strcmp(entry->key, "TDRC") == 0)
98 			message->AddString("date", entry->value);
99 		else if (strcmp(entry->key, "TENC") == 0 || strcmp(entry->key, "TEN") == 0)
100 			message->AddString("encoded_by", entry->value);
101 		else if (strcmp(entry->key, "TIT2") == 0 || strcmp(entry->key, "TT2") == 0)
102 			message->AddString("title", entry->value);
103 		else if (strcmp(entry->key, "TLAN") == 0)
104 			message->AddString("language", entry->value);
105 		else if (strcmp(entry->key, "TPE1") == 0 || strcmp(entry->key, "TP1") == 0)
106 			message->AddString("artist", entry->value);
107 		else if (strcmp(entry->key, "TPE2") == 0 || strcmp(entry->key, "TP2") == 0)
108 			message->AddString("album_artist", entry->value);
109 		else if (strcmp(entry->key, "TPE3") == 0 || strcmp(entry->key, "TP3") == 0)
110 			message->AddString("performer", entry->value);
111 		else if (strcmp(entry->key, "TPOS") == 0)
112 			message->AddString("disc", entry->value);
113 		else if (strcmp(entry->key, "TPUB") == 0)
114 			message->AddString("publisher", entry->value);
115 		else if (strcmp(entry->key, "TRCK") == 0 || strcmp(entry->key, "TRK") == 0)
116 			message->AddString("track", entry->value);
117 		else if (strcmp(entry->key, "TSOA") == 0)
118 			message->AddString("album-sort", entry->value);
119 		else if (strcmp(entry->key, "TSOP") == 0)
120 			message->AddString("artist-sort", entry->value);
121 		else if (strcmp(entry->key, "TSOT") == 0)
122 			message->AddString("title-sort", entry->value);
123 		else if (strcmp(entry->key, "TSSE") == 0)
124 			message->AddString("encoder", entry->value);
125 		else if (strcmp(entry->key, "TYER") == 0)
126 			message->AddString("year", entry->value);
127 		else
128 			message->AddString(entry->key, entry->value);
129 	}
130 }
131 
132 
133 // #pragma mark - StreamBase
134 
135 
136 class StreamBase {
137 public:
138 								StreamBase(BPositionIO* source,
139 									BLocker* sourceLock, BLocker* streamLock);
140 	virtual						~StreamBase();
141 
142 	// Init an indivual AVFormatContext
143 			status_t			Open();
144 
145 	// Setup this stream to point to the AVStream at the given streamIndex.
146 	virtual	status_t			Init(int32 streamIndex);
147 
148 	inline	const AVFormatContext* Context() const
149 									{ return fContext; }
150 			int32				Index() const;
151 			int32				CountStreams() const;
152 			int32				StreamIndexFor(int32 virtualIndex) const;
153 	inline	int32				VirtualIndex() const
154 									{ return fVirtualIndex; }
155 
156 			double				FrameRate() const;
157 			bigtime_t			Duration() const;
158 
159 	virtual	status_t			Seek(uint32 flags, int64* frame,
160 									bigtime_t* time);
161 
162 			status_t			GetNextChunk(const void** chunkBuffer,
163 									size_t* chunkSize,
164 									media_header* mediaHeader);
165 
166 protected:
167 	// I/O hooks for libavformat, cookie will be a Stream instance.
168 	// Since multiple StreamCookies use the same BPositionIO source, they
169 	// maintain the position individually, and may need to seek the source
170 	// if it does not match anymore in _Read().
171 	// TODO: This concept prevents the use of a plain BDataIO that is not
172 	// seekable. There is a version of AVFormatReader in the SVN history
173 	// which implements packet buffering for other streams when reading
174 	// packets. To support non-seekable network streams for example, this
175 	// code should be resurrected. It will make handling seekable streams,
176 	// especially from different threads that read from totally independent
177 	// positions in the stream (aggressive pre-buffering perhaps), a lot
178 	// more difficult with potentially large memory overhead.
179 	static	int					_Read(void* cookie, uint8* buffer,
180 									int bufferSize);
181 	static	off_t				_Seek(void* cookie, off_t offset, int whence);
182 
183 			status_t			_NextPacket(bool reuse);
184 
185 			int64_t				_ConvertToStreamTimeBase(bigtime_t time) const;
186 			bigtime_t			_ConvertFromStreamTimeBase(int64_t time) const;
187 
188 protected:
189 			BPositionIO*		fSource;
190 			off_t				fPosition;
191 			// Since different threads may read from the source,
192 			// we need to protect the file position and I/O by a lock.
193 			BLocker*			fSourceLock;
194 
195 			BLocker*			fStreamLock;
196 
197 			AVFormatContext*	fContext;
198 			AVStream*			fStream;
199 			int32				fVirtualIndex;
200 
201 			media_format		fFormat;
202 
203 			AVIOContext*		fIOContext;
204 
205 			AVPacket			fPacket;
206 			bool				fReusePacket;
207 
208 			bool				fSeekByBytes;
209 			bool				fStreamBuildsIndexWhileReading;
210 };
211 
212 
213 StreamBase::StreamBase(BPositionIO* source, BLocker* sourceLock,
214 		BLocker* streamLock)
215 	:
216 	fSource(source),
217 	fPosition(0),
218 	fSourceLock(sourceLock),
219 
220 	fStreamLock(streamLock),
221 
222 	fContext(NULL),
223 	fStream(NULL),
224 	fVirtualIndex(-1),
225 	fIOContext(NULL),
226 
227 	fReusePacket(false),
228 
229 	fSeekByBytes(false),
230 	fStreamBuildsIndexWhileReading(false)
231 {
232 	// NOTE: Don't use streamLock here, it may not yet be initialized!
233 
234 	av_new_packet(&fPacket, 0);
235 	memset(&fFormat, 0, sizeof(media_format));
236 }
237 
238 
239 StreamBase::~StreamBase()
240 {
241 	if (fContext != NULL)
242 		avformat_close_input(&fContext);
243 	av_free_packet(&fPacket);
244 	av_free(fContext);
245 	av_free(fIOContext->buffer);
246 	av_free(fIOContext);
247 }
248 
249 
250 status_t
251 StreamBase::Open()
252 {
253 	BAutolock _(fStreamLock);
254 
255 	// Init probing data
256 	size_t bufferSize = 32768;
257 	uint8* buffer = static_cast<uint8*>(av_malloc(bufferSize));
258 	if (buffer == NULL)
259 		return B_NO_MEMORY;
260 
261 	// Allocate I/O context with buffer and hook functions, pass ourself as
262 	// cookie.
263 	memset(buffer, 0, bufferSize);
264 	fIOContext = avio_alloc_context(buffer, bufferSize, 0, this, _Read, 0,
265 		_Seek);
266 	if (fIOContext == NULL) {
267 		TRACE("StreamBase::Open() - avio_alloc_context() failed!\n");
268 		av_free(buffer);
269 		return B_ERROR;
270 	}
271 
272 	fContext = avformat_alloc_context();
273 	fContext->pb = fIOContext;
274 
275 	// Allocate our context and probe the input format
276 	if (avformat_open_input(&fContext, "", NULL, NULL) < 0) {
277 		TRACE("StreamBase::Open() - avformat_open_input() failed!\n");
278 		// avformat_open_input() frees the context in case of failure
279 		fContext = NULL;
280 		av_free(fIOContext);
281 		fIOContext = NULL;
282 		return B_NOT_SUPPORTED;
283 	}
284 
285 	TRACE("StreamBase::Open() - "
286 		"avformat_open_input(): %s\n", fContext->iformat->name);
287 	TRACE("  flags:%s%s%s%s%s\n",
288 		(fContext->iformat->flags & AVFMT_GLOBALHEADER) ? " AVFMT_GLOBALHEADER" : "",
289 		(fContext->iformat->flags & AVFMT_NOTIMESTAMPS) ? " AVFMT_NOTIMESTAMPS" : "",
290 		(fContext->iformat->flags & AVFMT_GENERIC_INDEX) ? " AVFMT_GENERIC_INDEX" : "",
291 		(fContext->iformat->flags & AVFMT_TS_DISCONT) ? " AVFMT_TS_DISCONT" : "",
292 		(fContext->iformat->flags & AVFMT_VARIABLE_FPS) ? " AVFMT_VARIABLE_FPS" : ""
293 	);
294 
295 
296 	// Retrieve stream information
297 	if (avformat_find_stream_info(fContext, NULL) < 0) {
298 		TRACE("StreamBase::Open() - avformat_find_stream_info() failed!\n");
299 		return B_NOT_SUPPORTED;
300 	}
301 
302 	fSeekByBytes = (fContext->iformat->flags & AVFMT_TS_DISCONT) != 0;
303 	fStreamBuildsIndexWhileReading
304 		= (fContext->iformat->flags & AVFMT_GENERIC_INDEX) != 0
305 			|| fSeekByBytes;
306 
307 	TRACE("StreamBase::Open() - "
308 		"av_find_stream_info() success! Seeking by bytes: %d\n",
309 		fSeekByBytes);
310 
311 	return B_OK;
312 }
313 
314 
315 status_t
316 StreamBase::Init(int32 virtualIndex)
317 {
318 	BAutolock _(fStreamLock);
319 
320 	TRACE("StreamBase::Init(%ld)\n", virtualIndex);
321 
322 	if (fContext == NULL)
323 		return B_NO_INIT;
324 
325 	int32 streamIndex = StreamIndexFor(virtualIndex);
326 	if (streamIndex < 0) {
327 		TRACE("  bad stream index!\n");
328 		return B_BAD_INDEX;
329 	}
330 
331 	TRACE("  context stream index: %ld\n", streamIndex);
332 
333 	// We need to remember the virtual index so that
334 	// AVFormatReader::FreeCookie() can clear the correct stream entry.
335 	fVirtualIndex = virtualIndex;
336 
337 	// Make us point to the AVStream at streamIndex
338 	fStream = fContext->streams[streamIndex];
339 
340 // NOTE: Discarding other streams works for most, but not all containers,
341 // for example it does not work for the ASF demuxer. Since I don't know what
342 // other demuxer it breaks, let's just keep reading packets for unwanted
343 // streams, it just makes the _GetNextPacket() function slightly less
344 // efficient.
345 //	// Discard all other streams
346 //	for (unsigned i = 0; i < fContext->nb_streams; i++) {
347 //		if (i != (unsigned)streamIndex)
348 //			fContext->streams[i]->discard = AVDISCARD_ALL;
349 //	}
350 
351 	return B_OK;
352 }
353 
354 
355 int32
356 StreamBase::Index() const
357 {
358 	if (fStream != NULL)
359 		return fStream->index;
360 	return -1;
361 }
362 
363 
364 int32
365 StreamBase::CountStreams() const
366 {
367 	// Figure out the stream count. If the context has "AVPrograms", use
368 	// the first program (for now).
369 	// TODO: To support "programs" properly, the BMediaFile/Track API should
370 	// be extended accordingly. I guess programs are like TV channels in the
371 	// same satilite transport stream. Maybe call them "TrackGroups".
372 	if (fContext->nb_programs > 0) {
373 		// See libavformat/utils.c:dump_format()
374 		return fContext->programs[0]->nb_stream_indexes;
375 	}
376 	return fContext->nb_streams;
377 }
378 
379 
380 int32
381 StreamBase::StreamIndexFor(int32 virtualIndex) const
382 {
383 	// NOTE: See CountStreams()
384 	if (fContext->nb_programs > 0) {
385 		const AVProgram* program = fContext->programs[0];
386 		if (virtualIndex >= 0
387 			&& virtualIndex < (int32)program->nb_stream_indexes) {
388 			return program->stream_index[virtualIndex];
389 		}
390 	} else {
391 		if (virtualIndex >= 0 && virtualIndex < (int32)fContext->nb_streams)
392 			return virtualIndex;
393 	}
394 	return -1;
395 }
396 
397 
398 double
399 StreamBase::FrameRate() const
400 {
401 	// TODO: Find a way to always calculate a correct frame rate...
402 	double frameRate = 1.0;
403 	switch (fStream->codec->codec_type) {
404 		case AVMEDIA_TYPE_AUDIO:
405 			frameRate = (double)fStream->codec->sample_rate;
406 			break;
407 		case AVMEDIA_TYPE_VIDEO:
408 			if (fStream->avg_frame_rate.den && fStream->avg_frame_rate.num)
409 				frameRate = av_q2d(fStream->avg_frame_rate);
410 			else if (fStream->r_frame_rate.den && fStream->r_frame_rate.num)
411 				frameRate = av_q2d(fStream->r_frame_rate);
412 			else if (fStream->time_base.den && fStream->time_base.num)
413 				frameRate = 1 / av_q2d(fStream->time_base);
414 			else if (fStream->codec->time_base.den
415 				&& fStream->codec->time_base.num) {
416 				frameRate = 1 / av_q2d(fStream->codec->time_base);
417 			}
418 
419 			// TODO: Fix up interlaced video for real
420 			if (frameRate == 50.0f)
421 				frameRate = 25.0f;
422 			break;
423 		default:
424 			break;
425 	}
426 	if (frameRate <= 0.0)
427 		frameRate = 1.0;
428 	return frameRate;
429 }
430 
431 
432 bigtime_t
433 StreamBase::Duration() const
434 {
435 	// TODO: This is not working correctly for all stream types...
436 	// It seems that the calculations here are correct, because they work
437 	// for a couple of streams and are in line with the documentation, but
438 	// unfortunately, libavformat itself seems to set the time_base and
439 	// duration wrongly sometimes. :-(
440 	if ((int64)fStream->duration != kNoPTSValue)
441 		return _ConvertFromStreamTimeBase(fStream->duration);
442 	else if ((int64)fContext->duration != kNoPTSValue)
443 		return (bigtime_t)fContext->duration;
444 
445 	return 0;
446 }
447 
448 
449 status_t
450 StreamBase::Seek(uint32 flags, int64* frame, bigtime_t* time)
451 {
452 	BAutolock _(fStreamLock);
453 
454 	if (fContext == NULL || fStream == NULL)
455 		return B_NO_INIT;
456 
457 	TRACE_SEEK("StreamBase::Seek(%ld,%s%s%s%s, %lld, "
458 		"%lld)\n", VirtualIndex(),
459 		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
460 		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
461 		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
462 			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
463 		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
464 			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
465 		*frame, *time);
466 
467 	double frameRate = FrameRate();
468 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
469 		// Seeking is always based on time, initialize it when client seeks
470 		// based on frame.
471 		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);
472 	}
473 
474 	int64_t timeStamp = *time;
475 
476 	int searchFlags = AVSEEK_FLAG_BACKWARD;
477 	if ((flags & B_MEDIA_SEEK_CLOSEST_FORWARD) != 0)
478 		searchFlags = 0;
479 
480 	if (fSeekByBytes) {
481 		searchFlags |= AVSEEK_FLAG_BYTE;
482 
483 		BAutolock _(fSourceLock);
484 		int64_t fileSize;
485 		if (fSource->GetSize(&fileSize) != B_OK)
486 			return B_NOT_SUPPORTED;
487 		int64_t duration = Duration();
488 		if (duration == 0)
489 			return B_NOT_SUPPORTED;
490 
491 		timeStamp = int64_t(fileSize * ((double)timeStamp / duration));
492 		if ((flags & B_MEDIA_SEEK_CLOSEST_BACKWARD) != 0) {
493 			timeStamp -= 65536;
494 			if (timeStamp < 0)
495 				timeStamp = 0;
496 		}
497 
498 		bool seekAgain = true;
499 		bool seekForward = true;
500 		bigtime_t lastFoundTime = -1;
501 		int64_t closestTimeStampBackwards = -1;
502 		while (seekAgain) {
503 			if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp,
504 				INT64_MAX, searchFlags) < 0) {
505 				TRACE("  avformat_seek_file() (by bytes) failed.\n");
506 				return B_ERROR;
507 			}
508 			seekAgain = false;
509 
510 			// Our last packet is toast in any case. Read the next one so we
511 			// know where we really seeked.
512 			fReusePacket = false;
513 			if (_NextPacket(true) == B_OK) {
514 				while (fPacket.pts == kNoPTSValue) {
515 					fReusePacket = false;
516 					if (_NextPacket(true) != B_OK)
517 						return B_ERROR;
518 				}
519 				if (fPacket.pos >= 0)
520 					timeStamp = fPacket.pos;
521 				bigtime_t foundTime
522 					= _ConvertFromStreamTimeBase(fPacket.pts);
523 				if (foundTime != lastFoundTime) {
524 					lastFoundTime = foundTime;
525 					if (foundTime > *time) {
526 						if (closestTimeStampBackwards >= 0) {
527 							timeStamp = closestTimeStampBackwards;
528 							seekAgain = true;
529 							seekForward = false;
530 							continue;
531 						}
532 						int64_t diff = int64_t(fileSize
533 							* ((double)(foundTime - *time) / (2 * duration)));
534 						if (diff < 8192)
535 							break;
536 						timeStamp -= diff;
537 						TRACE_SEEK("  need to seek back (%lld) (time: %.2f "
538 							"-> %.2f)\n", timeStamp, *time / 1000000.0,
539 							foundTime / 1000000.0);
540 						if (timeStamp < 0)
541 							foundTime = 0;
542 						else {
543 							seekAgain = true;
544 							continue;
545 						}
546 					} else if (seekForward && foundTime < *time - 100000) {
547 						closestTimeStampBackwards = timeStamp;
548 						int64_t diff = int64_t(fileSize
549 							* ((double)(*time - foundTime) / (2 * duration)));
550 						if (diff < 8192)
551 							break;
552 						timeStamp += diff;
553 						TRACE_SEEK("  need to seek forward (%lld) (time: "
554 							"%.2f -> %.2f)\n", timeStamp, *time / 1000000.0,
555 							foundTime / 1000000.0);
556 						if (timeStamp > duration)
557 							foundTime = duration;
558 						else {
559 							seekAgain = true;
560 							continue;
561 						}
562 					}
563 				}
564 				TRACE_SEEK("  found time: %lld -> %lld (%.2f)\n", *time,
565 					foundTime, foundTime / 1000000.0);
566 				*time = foundTime;
567 				*frame = (uint64)(*time * frameRate / 1000000LL + 0.5);
568 				TRACE_SEEK("  seeked frame: %lld\n", *frame);
569 			} else {
570 				TRACE_SEEK("  _NextPacket() failed!\n");
571 				return B_ERROR;
572 			}
573 		}
574 	} else {
575 		// We may not get a PTS from the next packet after seeking, so
576 		// we try to get an expected time from the index.
577 		int64_t streamTimeStamp = _ConvertToStreamTimeBase(*time);
578 		int index = av_index_search_timestamp(fStream, streamTimeStamp,
579 			searchFlags);
580 		if (index < 0) {
581 			TRACE("  av_index_search_timestamp() failed\n");
582 		} else {
583 			if (index > 0) {
584 				const AVIndexEntry& entry = fStream->index_entries[index];
585 				streamTimeStamp = entry.timestamp;
586 			} else {
587 				// Some demuxers use the first index entry to store some
588 				// other information, like the total playing time for example.
589 				// Assume the timeStamp of the first entry is alays 0.
590 				// TODO: Handle start-time offset?
591 				streamTimeStamp = 0;
592 			}
593 			bigtime_t foundTime = _ConvertFromStreamTimeBase(streamTimeStamp);
594 			bigtime_t timeDiff = foundTime > *time
595 				? foundTime - *time : *time - foundTime;
596 
597 			if (timeDiff > 1000000
598 				&& (fStreamBuildsIndexWhileReading
599 					|| index == fStream->nb_index_entries - 1)) {
600 				// If the stream is building the index on the fly while parsing
601 				// it, we only have entries in the index for positions already
602 				// decoded, i.e. we cannot seek into the future. In that case,
603 				// just assume that we can seek where we want and leave
604 				// time/frame unmodified. Since successfully seeking one time
605 				// will generate index entries for the seeked to position, we
606 				// need to remember this in fStreamBuildsIndexWhileReading,
607 				// since when seeking back there will be later index entries,
608 				// but we still want to ignore the found entry.
609 				fStreamBuildsIndexWhileReading = true;
610 				TRACE_SEEK("  Not trusting generic index entry. "
611 					"(Current count: %d)\n", fStream->nb_index_entries);
612 			} else {
613 				// If we found a reasonably time, write it into *time.
614 				// After seeking, we will try to read the sought time from
615 				// the next packet. If the packet has no PTS value, we may
616 				// still have a more accurate time from the index lookup.
617 				*time = foundTime;
618 			}
619 		}
620 
621 		if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp, INT64_MAX,
622 				searchFlags) < 0) {
623 			TRACE("  avformat_seek_file() failed.\n");
624 			// Try to fall back to av_seek_frame()
625 			timeStamp = _ConvertToStreamTimeBase(timeStamp);
626 			if (av_seek_frame(fContext, fStream->index, timeStamp,
627 				searchFlags) < 0) {
628 				TRACE("  avformat_seek_frame() failed as well.\n");
629 				// Fall back to seeking to the beginning by bytes
630 				timeStamp = 0;
631 				if (av_seek_frame(fContext, fStream->index, timeStamp,
632 						AVSEEK_FLAG_BYTE) < 0) {
633 					TRACE("  avformat_seek_frame() by bytes failed as "
634 						"well.\n");
635 					// Do not propagate error in any case. We fail if we can't
636 					// read another packet.
637 				} else
638 					*time = 0;
639 			}
640 		}
641 
642 		// Our last packet is toast in any case. Read the next one so
643 		// we know where we really sought.
644 		bigtime_t foundTime = *time;
645 
646 		fReusePacket = false;
647 		if (_NextPacket(true) == B_OK) {
648 			if (fPacket.pts != kNoPTSValue)
649 				foundTime = _ConvertFromStreamTimeBase(fPacket.pts);
650 			else
651 				TRACE_SEEK("  no PTS in packet after seeking\n");
652 		} else
653 			TRACE_SEEK("  _NextPacket() failed!\n");
654 
655 		*time = foundTime;
656 		TRACE_SEEK("  sought time: %.2fs\n", *time / 1000000.0);
657 		*frame = (uint64)(*time * frameRate / 1000000.0 + 0.5);
658 		TRACE_SEEK("  sought frame: %lld\n", *frame);
659 	}
660 
661 	return B_OK;
662 }
663 
664 
665 status_t
666 StreamBase::GetNextChunk(const void** chunkBuffer,
667 	size_t* chunkSize, media_header* mediaHeader)
668 {
669 	BAutolock _(fStreamLock);
670 
671 	TRACE_PACKET("StreamBase::GetNextChunk()\n");
672 
673 	// Get the last stream DTS before reading the next packet, since
674 	// then it points to that one.
675 	int64 lastStreamDTS = fStream->cur_dts;
676 
677 	status_t ret = _NextPacket(false);
678 	if (ret != B_OK) {
679 		*chunkBuffer = NULL;
680 		*chunkSize = 0;
681 		return ret;
682 	}
683 
684 	// NOTE: AVPacket has a field called "convergence_duration", for which
685 	// the documentation is quite interesting. It sounds like it could be
686 	// used to know the time until the next I-Frame in streams that don't
687 	// let you know the position of keyframes in another way (like through
688 	// the index).
689 
690 	// According to libavformat documentation, fPacket is valid until the
691 	// next call to av_read_frame(). This is what we want and we can share
692 	// the memory with the least overhead.
693 	*chunkBuffer = fPacket.data;
694 	*chunkSize = fPacket.size;
695 
696 	if (mediaHeader != NULL) {
697 		mediaHeader->type = fFormat.type;
698 		mediaHeader->buffer = 0;
699 		mediaHeader->destination = -1;
700 		mediaHeader->time_source = -1;
701 		mediaHeader->size_used = fPacket.size;
702 		if (fPacket.pts != kNoPTSValue) {
703 //TRACE("  PTS: %lld (time_base.num: %d, .den: %d), stream DTS: %lld\n",
704 //fPacket.pts, fStream->time_base.num, fStream->time_base.den,
705 //fStream->cur_dts);
706 			mediaHeader->start_time = _ConvertFromStreamTimeBase(fPacket.pts);
707 		} else {
708 //TRACE("  PTS (stream): %lld (time_base.num: %d, .den: %d), stream DTS: %lld\n",
709 //lastStreamDTS, fStream->time_base.num, fStream->time_base.den,
710 //fStream->cur_dts);
711 			mediaHeader->start_time
712 				= _ConvertFromStreamTimeBase(lastStreamDTS);
713 		}
714 		mediaHeader->file_pos = fPacket.pos;
715 		mediaHeader->data_offset = 0;
716 		switch (mediaHeader->type) {
717 			case B_MEDIA_RAW_AUDIO:
718 				break;
719 			case B_MEDIA_ENCODED_AUDIO:
720 				mediaHeader->u.encoded_audio.buffer_flags
721 					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
722 				break;
723 			case B_MEDIA_RAW_VIDEO:
724 				mediaHeader->u.raw_video.line_count
725 					= fFormat.u.raw_video.display.line_count;
726 				break;
727 			case B_MEDIA_ENCODED_VIDEO:
728 				mediaHeader->u.encoded_video.field_flags
729 					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
730 				mediaHeader->u.encoded_video.line_count
731 					= fFormat.u.encoded_video.output.display.line_count;
732 				break;
733 			default:
734 				break;
735 		}
736 	}
737 
738 //	static bigtime_t pts[2];
739 //	static bigtime_t lastPrintTime = system_time();
740 //	static BLocker printLock;
741 //	if (fStream->index < 2) {
742 //		if (fPacket.pts != kNoPTSValue)
743 //			pts[fStream->index] = _ConvertFromStreamTimeBase(fPacket.pts);
744 //		printLock.Lock();
745 //		bigtime_t now = system_time();
746 //		if (now - lastPrintTime > 1000000) {
747 //			printf("PTS: %.4f/%.4f, diff: %.4f\r", pts[0] / 1000000.0,
748 //				pts[1] / 1000000.0, (pts[0] - pts[1]) / 1000000.0);
749 //			fflush(stdout);
750 //			lastPrintTime = now;
751 //		}
752 //		printLock.Unlock();
753 //	}
754 
755 	return B_OK;
756 }
757 
758 
759 // #pragma mark -
760 
761 
762 /*static*/ int
763 StreamBase::_Read(void* cookie, uint8* buffer, int bufferSize)
764 {
765 	StreamBase* stream = reinterpret_cast<StreamBase*>(cookie);
766 
767 	BAutolock _(stream->fSourceLock);
768 
769 	TRACE_IO("StreamBase::_Read(%p, %p, %d) position: %lld/%lld\n",
770 		cookie, buffer, bufferSize, stream->fPosition,
771 		stream->fSource->Position());
772 
773 	if (stream->fPosition != stream->fSource->Position()) {
774 		off_t position
775 			= stream->fSource->Seek(stream->fPosition, SEEK_SET);
776 		if (position != stream->fPosition)
777 			return -1;
778 	}
779 
780 	ssize_t read = stream->fSource->Read(buffer, bufferSize);
781 	if (read > 0)
782 		stream->fPosition += read;
783 
784 	TRACE_IO("  read: %ld\n", read);
785 	return (int)read;
786 
787 }
788 
789 
790 /*static*/ off_t
791 StreamBase::_Seek(void* cookie, off_t offset, int whence)
792 {
793 	TRACE_IO("StreamBase::_Seek(%p, %lld, %d)\n",
794 		cookie, offset, whence);
795 
796 	StreamBase* stream = reinterpret_cast<StreamBase*>(cookie);
797 
798 	BAutolock _(stream->fSourceLock);
799 
800 	// Support for special file size retrieval API without seeking
801 	// anywhere:
802 	if (whence == AVSEEK_SIZE) {
803 		off_t size;
804 		if (stream->fSource->GetSize(&size) == B_OK)
805 			return size;
806 		return -1;
807 	}
808 
809 	// If not requested to seek to an absolute position, we need to
810 	// confirm that the stream is currently at the position that we
811 	// think it is.
812 	if (whence != SEEK_SET
813 		&& stream->fPosition != stream->fSource->Position()) {
814 		off_t position
815 			= stream->fSource->Seek(stream->fPosition, SEEK_SET);
816 		if (position != stream->fPosition)
817 			return -1;
818 	}
819 
820 	off_t position = stream->fSource->Seek(offset, whence);
821 	TRACE_IO("  position: %lld\n", position);
822 	if (position < 0)
823 		return -1;
824 
825 	stream->fPosition = position;
826 
827 	return position;
828 }
829 
830 
831 status_t
832 StreamBase::_NextPacket(bool reuse)
833 {
834 	TRACE_PACKET("StreamBase::_NextPacket(%d)\n", reuse);
835 
836 	if (fReusePacket) {
837 		// The last packet was marked for reuse, so we keep using it.
838 		TRACE_PACKET("  re-using last packet\n");
839 		fReusePacket = reuse;
840 		return B_OK;
841 	}
842 
843 	av_free_packet(&fPacket);
844 
845 	while (true) {
846 		if (av_read_frame(fContext, &fPacket) < 0) {
847 			// NOTE: Even though we may get the error for a different stream,
848 			// av_read_frame() is not going to be successful from here on, so
849 			// it doesn't matter
850 			fReusePacket = false;
851 			return B_LAST_BUFFER_ERROR;
852 		}
853 
854 		if (fPacket.stream_index == Index())
855 			break;
856 
857 		// This is a packet from another stream, ignore it.
858 		av_free_packet(&fPacket);
859 	}
860 
861 	// Mark this packet with the new reuse flag.
862 	fReusePacket = reuse;
863 	return B_OK;
864 }
865 
866 
867 int64_t
868 StreamBase::_ConvertToStreamTimeBase(bigtime_t time) const
869 {
870 	int64 timeStamp = int64_t((double)time * fStream->time_base.den
871 		/ (1000000.0 * fStream->time_base.num) + 0.5);
872 	if (fStream->start_time != kNoPTSValue)
873 		timeStamp += fStream->start_time;
874 	return timeStamp;
875 }
876 
877 
878 bigtime_t
879 StreamBase::_ConvertFromStreamTimeBase(int64_t time) const
880 {
881 	if (fStream->start_time != kNoPTSValue)
882 		time -= fStream->start_time;
883 
884 	return bigtime_t(1000000.0 * time * fStream->time_base.num
885 		/ fStream->time_base.den + 0.5);
886 }
887 
888 
889 // #pragma mark - AVFormatReader::Stream
890 
891 
892 class AVFormatReader::Stream : public StreamBase {
893 public:
894 								Stream(BPositionIO* source,
895 									BLocker* streamLock);
896 	virtual						~Stream();
897 
898 	// Setup this stream to point to the AVStream at the given streamIndex.
899 	// This will also initialize the media_format.
900 	virtual	status_t			Init(int32 streamIndex);
901 
902 			status_t			GetMetaData(BMessage* data);
903 
904 	// Support for AVFormatReader
905 			status_t			GetStreamInfo(int64* frameCount,
906 									bigtime_t* duration, media_format* format,
907 									const void** infoBuffer,
908 									size_t* infoSize) const;
909 
910 			status_t			FindKeyFrame(uint32 flags, int64* frame,
911 									bigtime_t* time) const;
912 	virtual	status_t			Seek(uint32 flags, int64* frame,
913 									bigtime_t* time);
914 
915 private:
916 	mutable	BLocker				fLock;
917 
918 			struct KeyframeInfo {
919 				bigtime_t		requestedTime;
920 				int64			requestedFrame;
921 				bigtime_t		reportedTime;
922 				int64			reportedFrame;
923 				uint32			seekFlags;
924 			};
925 	mutable	KeyframeInfo		fLastReportedKeyframe;
926 	mutable	StreamBase*			fGhostStream;
927 };
928 
929 
930 
931 AVFormatReader::Stream::Stream(BPositionIO* source, BLocker* streamLock)
932 	:
933 	StreamBase(source, streamLock, &fLock),
934 	fLock("stream lock"),
935 	fGhostStream(NULL)
936 {
937 	fLastReportedKeyframe.requestedTime = 0;
938 	fLastReportedKeyframe.requestedFrame = 0;
939 	fLastReportedKeyframe.reportedTime = 0;
940 	fLastReportedKeyframe.reportedFrame = 0;
941 }
942 
943 
944 AVFormatReader::Stream::~Stream()
945 {
946 	delete fGhostStream;
947 }
948 
949 
950 status_t
951 AVFormatReader::Stream::Init(int32 virtualIndex)
952 {
953 	TRACE("AVFormatReader::Stream::Init(%ld)\n", virtualIndex);
954 
955 	status_t ret = StreamBase::Init(virtualIndex);
956 	if (ret != B_OK)
957 		return ret;
958 
959 	// Get a pointer to the AVCodecContext for the stream at streamIndex.
960 	AVCodecContext* codecContext = fStream->codec;
961 
962 #if 0
963 // stippi: Here I was experimenting with the question if some fields of the
964 // AVCodecContext change (or get filled out at all), if the AVCodec is opened.
965 	class CodecOpener {
966 	public:
967 		CodecOpener(AVCodecContext* context)
968 		{
969 			fCodecContext = context;
970 			AVCodec* codec = avcodec_find_decoder(context->codec_id);
971 			fCodecOpen = avcodec_open(context, codec) >= 0;
972 			if (!fCodecOpen)
973 				TRACE("  failed to open the codec!\n");
974 		}
975 		~CodecOpener()
976 		{
977 			if (fCodecOpen)
978 				avcodec_close(fCodecContext);
979 		}
980 	private:
981 		AVCodecContext*		fCodecContext;
982 		bool				fCodecOpen;
983 	} codecOpener(codecContext);
984 #endif
985 
986 	// initialize the media_format for this stream
987 	media_format* format = &fFormat;
988 	memset(format, 0, sizeof(media_format));
989 
990 	media_format_description description;
991 
992 	// Set format family and type depending on codec_type of the stream.
993 	switch (codecContext->codec_type) {
994 		case AVMEDIA_TYPE_AUDIO:
995 			if ((codecContext->codec_id >= CODEC_ID_PCM_S16LE)
996 				&& (codecContext->codec_id <= CODEC_ID_PCM_U8)) {
997 				TRACE("  raw audio\n");
998 				format->type = B_MEDIA_RAW_AUDIO;
999 				description.family = B_ANY_FORMAT_FAMILY;
1000 				// This will then apparently be handled by the (built into
1001 				// BMediaTrack) RawDecoder.
1002 			} else {
1003 				TRACE("  encoded audio\n");
1004 				format->type = B_MEDIA_ENCODED_AUDIO;
1005 				description.family = B_MISC_FORMAT_FAMILY;
1006 				description.u.misc.file_format = 'ffmp';
1007 			}
1008 			break;
1009 		case AVMEDIA_TYPE_VIDEO:
1010 			TRACE("  encoded video\n");
1011 			format->type = B_MEDIA_ENCODED_VIDEO;
1012 			description.family = B_MISC_FORMAT_FAMILY;
1013 			description.u.misc.file_format = 'ffmp';
1014 			break;
1015 		default:
1016 			TRACE("  unknown type\n");
1017 			format->type = B_MEDIA_UNKNOWN_TYPE;
1018 			return B_ERROR;
1019 			break;
1020 	}
1021 
1022 	if (format->type == B_MEDIA_RAW_AUDIO) {
1023 		// We cannot describe all raw-audio formats, some are unsupported.
1024 		switch (codecContext->codec_id) {
1025 			case CODEC_ID_PCM_S16LE:
1026 				format->u.raw_audio.format
1027 					= media_raw_audio_format::B_AUDIO_SHORT;
1028 				format->u.raw_audio.byte_order
1029 					= B_MEDIA_LITTLE_ENDIAN;
1030 				break;
1031 			case CODEC_ID_PCM_S16BE:
1032 				format->u.raw_audio.format
1033 					= media_raw_audio_format::B_AUDIO_SHORT;
1034 				format->u.raw_audio.byte_order
1035 					= B_MEDIA_BIG_ENDIAN;
1036 				break;
1037 			case CODEC_ID_PCM_U16LE:
1038 //				format->u.raw_audio.format
1039 //					= media_raw_audio_format::B_AUDIO_USHORT;
1040 //				format->u.raw_audio.byte_order
1041 //					= B_MEDIA_LITTLE_ENDIAN;
1042 				return B_NOT_SUPPORTED;
1043 				break;
1044 			case CODEC_ID_PCM_U16BE:
1045 //				format->u.raw_audio.format
1046 //					= media_raw_audio_format::B_AUDIO_USHORT;
1047 //				format->u.raw_audio.byte_order
1048 //					= B_MEDIA_BIG_ENDIAN;
1049 				return B_NOT_SUPPORTED;
1050 				break;
1051 			case CODEC_ID_PCM_S8:
1052 				format->u.raw_audio.format
1053 					= media_raw_audio_format::B_AUDIO_CHAR;
1054 				break;
1055 			case CODEC_ID_PCM_U8:
1056 				format->u.raw_audio.format
1057 					= media_raw_audio_format::B_AUDIO_UCHAR;
1058 				break;
1059 			default:
1060 				return B_NOT_SUPPORTED;
1061 				break;
1062 		}
1063 	} else {
1064 		if (description.family == B_MISC_FORMAT_FAMILY)
1065 			description.u.misc.codec = codecContext->codec_id;
1066 
1067 		BMediaFormats formats;
1068 		status_t status = formats.GetFormatFor(description, format);
1069 		if (status < B_OK)
1070 			TRACE("  formats.GetFormatFor() error: %s\n", strerror(status));
1071 
1072 		format->user_data_type = B_CODEC_TYPE_INFO;
1073 		*(uint32*)format->user_data = codecContext->codec_tag;
1074 		format->user_data[4] = 0;
1075 	}
1076 
1077 	format->require_flags = 0;
1078 	format->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
1079 
1080 	switch (format->type) {
1081 		case B_MEDIA_RAW_AUDIO:
1082 			format->u.raw_audio.frame_rate = (float)codecContext->sample_rate;
1083 			format->u.raw_audio.channel_count = codecContext->channels;
1084 			format->u.raw_audio.channel_mask = codecContext->channel_layout;
1085 			format->u.raw_audio.byte_order
1086 				= avformat_to_beos_byte_order(codecContext->sample_fmt);
1087 			format->u.raw_audio.format
1088 				= avformat_to_beos_format(codecContext->sample_fmt);
1089 			format->u.raw_audio.buffer_size = 0;
1090 
1091 			// Read one packet and mark it for later re-use. (So our first
1092 			// GetNextChunk() call does not read another packet.)
1093 			if (_NextPacket(true) == B_OK) {
1094 				TRACE("  successfully determined audio buffer size: %d\n",
1095 					fPacket.size);
1096 				format->u.raw_audio.buffer_size = fPacket.size;
1097 			}
1098 			break;
1099 
1100 		case B_MEDIA_ENCODED_AUDIO:
1101 			format->u.encoded_audio.bit_rate = codecContext->bit_rate;
1102 			format->u.encoded_audio.frame_size = codecContext->frame_size;
1103 			// Fill in some info about possible output format
1104 			format->u.encoded_audio.output
1105 				= media_multi_audio_format::wildcard;
1106 			format->u.encoded_audio.output.frame_rate
1107 				= (float)codecContext->sample_rate;
1108 			// Channel layout bits match in Be API and FFmpeg.
1109 			format->u.encoded_audio.output.channel_count
1110 				= codecContext->channels;
1111 			format->u.encoded_audio.multi_info.channel_mask
1112 				= codecContext->channel_layout;
1113 			format->u.encoded_audio.output.byte_order
1114 				= avformat_to_beos_byte_order(codecContext->sample_fmt);
1115 			format->u.encoded_audio.output.format
1116 				= avformat_to_beos_format(codecContext->sample_fmt);
1117 			if (codecContext->block_align > 0) {
1118 				format->u.encoded_audio.output.buffer_size
1119 					= codecContext->block_align;
1120 			} else {
1121 				format->u.encoded_audio.output.buffer_size
1122 					= codecContext->frame_size * codecContext->channels
1123 						* (format->u.encoded_audio.output.format
1124 							& media_raw_audio_format::B_AUDIO_SIZE_MASK);
1125 			}
1126 			break;
1127 
1128 		case B_MEDIA_ENCODED_VIDEO:
1129 // TODO: Specifying any of these seems to throw off the format matching
1130 // later on.
1131 //			format->u.encoded_video.avg_bit_rate = codecContext->bit_rate;
1132 //			format->u.encoded_video.max_bit_rate = codecContext->bit_rate
1133 //				+ codecContext->bit_rate_tolerance;
1134 
1135 //			format->u.encoded_video.encoding
1136 //				= media_encoded_video_format::B_ANY;
1137 
1138 //			format->u.encoded_video.frame_size = 1;
1139 //			format->u.encoded_video.forward_history = 0;
1140 //			format->u.encoded_video.backward_history = 0;
1141 
1142 			format->u.encoded_video.output.field_rate = FrameRate();
1143 			format->u.encoded_video.output.interlace = 1;
1144 
1145 			format->u.encoded_video.output.first_active = 0;
1146 			format->u.encoded_video.output.last_active
1147 				= codecContext->height - 1;
1148 				// TODO: Maybe libavformat actually provides that info
1149 				// somewhere...
1150 			format->u.encoded_video.output.orientation
1151 				= B_VIDEO_TOP_LEFT_RIGHT;
1152 
1153 			// Calculate the display aspect ratio
1154 			AVRational displayAspectRatio;
1155 		    if (codecContext->sample_aspect_ratio.num != 0) {
1156 				av_reduce(&displayAspectRatio.num, &displayAspectRatio.den,
1157 					codecContext->width
1158 						* codecContext->sample_aspect_ratio.num,
1159 					codecContext->height
1160 						* codecContext->sample_aspect_ratio.den,
1161 					1024 * 1024);
1162 				TRACE("  pixel aspect ratio: %d/%d, "
1163 					"display aspect ratio: %d/%d\n",
1164 					codecContext->sample_aspect_ratio.num,
1165 					codecContext->sample_aspect_ratio.den,
1166 					displayAspectRatio.num, displayAspectRatio.den);
1167 		    } else {
1168 				av_reduce(&displayAspectRatio.num, &displayAspectRatio.den,
1169 					codecContext->width, codecContext->height, 1024 * 1024);
1170 				TRACE("  no display aspect ratio (%d/%d)\n",
1171 					displayAspectRatio.num, displayAspectRatio.den);
1172 		    }
1173 			format->u.encoded_video.output.pixel_width_aspect
1174 				= displayAspectRatio.num;
1175 			format->u.encoded_video.output.pixel_height_aspect
1176 				= displayAspectRatio.den;
1177 
1178 			format->u.encoded_video.output.display.format
1179 				= pixfmt_to_colorspace(codecContext->pix_fmt);
1180 			format->u.encoded_video.output.display.line_width
1181 				= codecContext->width;
1182 			format->u.encoded_video.output.display.line_count
1183 				= codecContext->height;
1184 			TRACE("  width/height: %d/%d\n", codecContext->width,
1185 				codecContext->height);
1186 			format->u.encoded_video.output.display.bytes_per_row = 0;
1187 			format->u.encoded_video.output.display.pixel_offset = 0;
1188 			format->u.encoded_video.output.display.line_offset = 0;
1189 			format->u.encoded_video.output.display.flags = 0; // TODO
1190 
1191 			break;
1192 
1193 		default:
1194 			// This is an unknown format to us.
1195 			break;
1196 	}
1197 
1198 	// Add the meta data, if any
1199 	if (codecContext->extradata_size > 0) {
1200 		format->SetMetaData(codecContext->extradata,
1201 			codecContext->extradata_size);
1202 		TRACE("  extradata: %p\n", format->MetaData());
1203 	}
1204 
1205 	TRACE("  extradata_size: %d\n", codecContext->extradata_size);
1206 //	TRACE("  intra_matrix: %p\n", codecContext->intra_matrix);
1207 //	TRACE("  inter_matrix: %p\n", codecContext->inter_matrix);
1208 //	TRACE("  get_buffer(): %p\n", codecContext->get_buffer);
1209 //	TRACE("  release_buffer(): %p\n", codecContext->release_buffer);
1210 
1211 #ifdef TRACE_AVFORMAT_READER
1212 	char formatString[512];
1213 	if (string_for_format(*format, formatString, sizeof(formatString)))
1214 		TRACE("  format: %s\n", formatString);
1215 
1216 	uint32 encoding = format->Encoding();
1217 	TRACE("  encoding '%.4s'\n", (char*)&encoding);
1218 #endif
1219 
1220 	return B_OK;
1221 }
1222 
1223 
1224 status_t
1225 AVFormatReader::Stream::GetMetaData(BMessage* data)
1226 {
1227 	BAutolock _(&fLock);
1228 
1229 	avdictionary_to_message(fStream->metadata, data);
1230 
1231 	return B_OK;
1232 }
1233 
1234 
1235 status_t
1236 AVFormatReader::Stream::GetStreamInfo(int64* frameCount,
1237 	bigtime_t* duration, media_format* format, const void** infoBuffer,
1238 	size_t* infoSize) const
1239 {
1240 	BAutolock _(&fLock);
1241 
1242 	TRACE("AVFormatReader::Stream::GetStreamInfo(%ld)\n",
1243 		VirtualIndex());
1244 
1245 	double frameRate = FrameRate();
1246 	TRACE("  frameRate: %.4f\n", frameRate);
1247 
1248 	#ifdef TRACE_AVFORMAT_READER
1249 	if (fStream->start_time != kNoPTSValue) {
1250 		bigtime_t startTime = _ConvertFromStreamTimeBase(fStream->start_time);
1251 		TRACE("  start_time: %lld or %.5fs\n", startTime,
1252 			startTime / 1000000.0);
1253 		// TODO: Handle start time in FindKeyFrame() and Seek()?!
1254 	}
1255 	#endif // TRACE_AVFORMAT_READER
1256 
1257 	*duration = Duration();
1258 
1259 	TRACE("  duration: %lld or %.5fs\n", *duration, *duration / 1000000.0);
1260 
1261 	#if 0
1262 	if (fStream->nb_index_entries > 0) {
1263 		TRACE("  dump of index entries:\n");
1264 		int count = 5;
1265 		int firstEntriesCount = min_c(fStream->nb_index_entries, count);
1266 		int i = 0;
1267 		for (; i < firstEntriesCount; i++) {
1268 			AVIndexEntry& entry = fStream->index_entries[i];
1269 			bigtime_t timeGlobal = entry.timestamp;
1270 			bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
1271 			TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
1272 				timeNative / 1000000.0f, timeGlobal / 1000000.0f);
1273 		}
1274 		if (fStream->nb_index_entries - count > i) {
1275 			i = fStream->nb_index_entries - count;
1276 			TRACE("    ...\n");
1277 			for (; i < fStream->nb_index_entries; i++) {
1278 				AVIndexEntry& entry = fStream->index_entries[i];
1279 				bigtime_t timeGlobal = entry.timestamp;
1280 				bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
1281 				TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
1282 					timeNative / 1000000.0f, timeGlobal / 1000000.0f);
1283 			}
1284 		}
1285 	}
1286 	#endif
1287 
1288 	*frameCount = fStream->nb_frames;
1289 //	if (*frameCount == 0) {
1290 		// Calculate from duration and frame rate
1291 		*frameCount = (int64)(*duration * frameRate / 1000000LL);
1292 		TRACE("  frameCount calculated: %lld, from context: %lld\n",
1293 			*frameCount, fStream->nb_frames);
1294 //	} else
1295 //		TRACE("  frameCount: %lld\n", *frameCount);
1296 
1297 	*format = fFormat;
1298 
1299 	*infoBuffer = fStream->codec->extradata;
1300 	*infoSize = fStream->codec->extradata_size;
1301 
1302 	return B_OK;
1303 }
1304 
1305 
1306 status_t
1307 AVFormatReader::Stream::FindKeyFrame(uint32 flags, int64* frame,
1308 	bigtime_t* time) const
1309 {
1310 	BAutolock _(&fLock);
1311 
1312 	if (fContext == NULL || fStream == NULL)
1313 		return B_NO_INIT;
1314 
1315 	TRACE_FIND("AVFormatReader::Stream::FindKeyFrame(%ld,%s%s%s%s, "
1316 		"%lld, %lld)\n", VirtualIndex(),
1317 		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
1318 		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
1319 		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
1320 			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
1321 		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
1322 			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
1323 		*frame, *time);
1324 
1325 	bool inLastRequestedRange = false;
1326 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1327 		if (fLastReportedKeyframe.reportedFrame
1328 			<= fLastReportedKeyframe.requestedFrame) {
1329 			inLastRequestedRange
1330 				= *frame >= fLastReportedKeyframe.reportedFrame
1331 					&& *frame <= fLastReportedKeyframe.requestedFrame;
1332 		} else {
1333 			inLastRequestedRange
1334 				= *frame >= fLastReportedKeyframe.requestedFrame
1335 					&& *frame <= fLastReportedKeyframe.reportedFrame;
1336 		}
1337 	} else if ((flags & B_MEDIA_SEEK_TO_FRAME) == 0) {
1338 		if (fLastReportedKeyframe.reportedTime
1339 			<= fLastReportedKeyframe.requestedTime) {
1340 			inLastRequestedRange
1341 				= *time >= fLastReportedKeyframe.reportedTime
1342 					&& *time <= fLastReportedKeyframe.requestedTime;
1343 		} else {
1344 			inLastRequestedRange
1345 				= *time >= fLastReportedKeyframe.requestedTime
1346 					&& *time <= fLastReportedKeyframe.reportedTime;
1347 		}
1348 	}
1349 
1350 	if (inLastRequestedRange) {
1351 		*frame = fLastReportedKeyframe.reportedFrame;
1352 		*time = fLastReportedKeyframe.reportedTime;
1353 		TRACE_FIND("  same as last reported keyframe\n");
1354 		return B_OK;
1355 	}
1356 
1357 	double frameRate = FrameRate();
1358 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0)
1359 		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);
1360 
1361 	status_t ret;
1362 	if (fGhostStream == NULL) {
1363 		BAutolock _(fSourceLock);
1364 
1365 		fGhostStream = new(std::nothrow) StreamBase(fSource, fSourceLock,
1366 			&fLock);
1367 		if (fGhostStream == NULL) {
1368 			TRACE("  failed to allocate ghost stream\n");
1369 			return B_NO_MEMORY;
1370 		}
1371 
1372 		ret = fGhostStream->Open();
1373 		if (ret != B_OK) {
1374 			TRACE("  ghost stream failed to open: %s\n", strerror(ret));
1375 			return B_ERROR;
1376 		}
1377 
1378 		ret = fGhostStream->Init(fVirtualIndex);
1379 		if (ret != B_OK) {
1380 			TRACE("  ghost stream failed to init: %s\n", strerror(ret));
1381 			return B_ERROR;
1382 		}
1383 	}
1384 	fLastReportedKeyframe.requestedFrame = *frame;
1385 	fLastReportedKeyframe.requestedTime = *time;
1386 	fLastReportedKeyframe.seekFlags = flags;
1387 
1388 	ret = fGhostStream->Seek(flags, frame, time);
1389 	if (ret != B_OK) {
1390 		TRACE("  ghost stream failed to seek: %s\n", strerror(ret));
1391 		return B_ERROR;
1392 	}
1393 
1394 	fLastReportedKeyframe.reportedFrame = *frame;
1395 	fLastReportedKeyframe.reportedTime = *time;
1396 
1397 	TRACE_FIND("  found time: %.2fs\n", *time / 1000000.0);
1398 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1399 		*frame = int64_t(*time * FrameRate() / 1000000.0 + 0.5);
1400 		TRACE_FIND("  found frame: %lld\n", *frame);
1401 	}
1402 
1403 	return B_OK;
1404 }
1405 
1406 
1407 status_t
1408 AVFormatReader::Stream::Seek(uint32 flags, int64* frame, bigtime_t* time)
1409 {
1410 	BAutolock _(&fLock);
1411 
1412 	if (fContext == NULL || fStream == NULL)
1413 		return B_NO_INIT;
1414 
1415 	// Put the old requested values into frame/time, since we already know
1416 	// that the sought frame/time will then match the reported values.
1417 	// TODO: Will not work if client changes seek flags (from backwards to
1418 	// forward or vice versa)!!
1419 	bool inLastRequestedRange = false;
1420 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1421 		if (fLastReportedKeyframe.reportedFrame
1422 			<= fLastReportedKeyframe.requestedFrame) {
1423 			inLastRequestedRange
1424 				= *frame >= fLastReportedKeyframe.reportedFrame
1425 					&& *frame <= fLastReportedKeyframe.requestedFrame;
1426 		} else {
1427 			inLastRequestedRange
1428 				= *frame >= fLastReportedKeyframe.requestedFrame
1429 					&& *frame <= fLastReportedKeyframe.reportedFrame;
1430 		}
1431 	} else if ((flags & B_MEDIA_SEEK_TO_FRAME) == 0) {
1432 		if (fLastReportedKeyframe.reportedTime
1433 			<= fLastReportedKeyframe.requestedTime) {
1434 			inLastRequestedRange
1435 				= *time >= fLastReportedKeyframe.reportedTime
1436 					&& *time <= fLastReportedKeyframe.requestedTime;
1437 		} else {
1438 			inLastRequestedRange
1439 				= *time >= fLastReportedKeyframe.requestedTime
1440 					&& *time <= fLastReportedKeyframe.reportedTime;
1441 		}
1442 	}
1443 
1444 	if (inLastRequestedRange) {
1445 		*frame = fLastReportedKeyframe.requestedFrame;
1446 		*time = fLastReportedKeyframe.requestedTime;
1447 		flags = fLastReportedKeyframe.seekFlags;
1448 	}
1449 
1450 	return StreamBase::Seek(flags, frame, time);
1451 }
1452 
1453 
1454 // #pragma mark - AVFormatReader
1455 
1456 
1457 AVFormatReader::AVFormatReader()
1458 	:
1459 	fCopyright(""),
1460 	fStreams(NULL),
1461 	fSourceLock("source I/O lock")
1462 {
1463 	TRACE("AVFormatReader::AVFormatReader\n");
1464 }
1465 
1466 
1467 AVFormatReader::~AVFormatReader()
1468 {
1469 	TRACE("AVFormatReader::~AVFormatReader\n");
1470 	if (fStreams != NULL) {
1471 		// The client was supposed to call FreeCookie() on all
1472 		// allocated streams. Deleting the first stream is always
1473 		// prevented, we delete the other ones just in case.
1474 		int32 count = fStreams[0]->CountStreams();
1475 		for (int32 i = 0; i < count; i++)
1476 			delete fStreams[i];
1477 		delete[] fStreams;
1478 	}
1479 }
1480 
1481 
1482 // #pragma mark -
1483 
1484 
1485 const char*
1486 AVFormatReader::Copyright()
1487 {
1488 	if (fCopyright.Length() <= 0) {
1489 		BMessage message;
1490 		if (GetMetaData(&message) == B_OK)
1491 			message.FindString("copyright", &fCopyright);
1492 	}
1493 	return fCopyright.String();
1494 }
1495 
1496 
1497 status_t
1498 AVFormatReader::Sniff(int32* _streamCount)
1499 {
1500 	TRACE("AVFormatReader::Sniff\n");
1501 
1502 	BPositionIO* source = dynamic_cast<BPositionIO*>(Source());
1503 	if (source == NULL) {
1504 		TRACE("  not a BPositionIO, but we need it to be one.\n");
1505 		return B_NOT_SUPPORTED;
1506 	}
1507 
1508 	Stream* stream = new(std::nothrow) Stream(source,
1509 		&fSourceLock);
1510 	if (stream == NULL) {
1511 		ERROR("AVFormatReader::Sniff() - failed to allocate Stream\n");
1512 		return B_NO_MEMORY;
1513 	}
1514 
1515 	ObjectDeleter<Stream> streamDeleter(stream);
1516 
1517 	status_t ret = stream->Open();
1518 	if (ret != B_OK) {
1519 		TRACE("  failed to detect stream: %s\n", strerror(ret));
1520 		return ret;
1521 	}
1522 
1523 	delete[] fStreams;
1524 	fStreams = NULL;
1525 
1526 	int32 streamCount = stream->CountStreams();
1527 	if (streamCount == 0) {
1528 		TRACE("  failed to detect any streams: %s\n", strerror(ret));
1529 		return B_ERROR;
1530 	}
1531 
1532 	fStreams = new(std::nothrow) Stream*[streamCount];
1533 	if (fStreams == NULL) {
1534 		ERROR("AVFormatReader::Sniff() - failed to allocate streams\n");
1535 		return B_NO_MEMORY;
1536 	}
1537 
1538 	memset(fStreams, 0, sizeof(Stream*) * streamCount);
1539 	fStreams[0] = stream;
1540 	streamDeleter.Detach();
1541 
1542 	#ifdef TRACE_AVFORMAT_READER
1543 	dump_format(const_cast<AVFormatContext*>(stream->Context()), 0, "", 0);
1544 	#endif
1545 
1546 	if (_streamCount != NULL)
1547 		*_streamCount = streamCount;
1548 
1549 	return B_OK;
1550 }
1551 
1552 
1553 void
1554 AVFormatReader::GetFileFormatInfo(media_file_format* mff)
1555 {
1556 	TRACE("AVFormatReader::GetFileFormatInfo\n");
1557 
1558 	if (fStreams == NULL)
1559 		return;
1560 
1561 	// The first cookie is always there!
1562 	const AVFormatContext* context = fStreams[0]->Context();
1563 
1564 	if (context == NULL || context->iformat == NULL) {
1565 		TRACE("  no AVFormatContext or AVInputFormat!\n");
1566 		return;
1567 	}
1568 
1569 	const DemuxerFormat* format = demuxer_format_for(context->iformat);
1570 
1571 	mff->capabilities = media_file_format::B_READABLE
1572 		| media_file_format::B_KNOWS_ENCODED_VIDEO
1573 		| media_file_format::B_KNOWS_ENCODED_AUDIO
1574 		| media_file_format::B_IMPERFECTLY_SEEKABLE;
1575 
1576 	if (format != NULL) {
1577 		// TODO: Check if AVInputFormat has audio only and then use
1578 		// format->audio_family!
1579 		mff->family = format->video_family;
1580 	} else {
1581 		TRACE("  no DemuxerFormat for AVInputFormat!\n");
1582 		mff->family = B_MISC_FORMAT_FAMILY;
1583 	}
1584 
1585 	mff->version = 100;
1586 
1587 	if (format != NULL) {
1588 		strcpy(mff->mime_type, format->mime_type);
1589 	} else {
1590 		// TODO: Would be nice to be able to provide this from AVInputFormat,
1591 		// maybe by extending the FFmpeg code itself (all demuxers).
1592 		strcpy(mff->mime_type, "");
1593 	}
1594 
1595 	if (context->iformat->extensions != NULL)
1596 		strcpy(mff->file_extension, context->iformat->extensions);
1597 	else {
1598 		TRACE("  no file extensions for AVInputFormat.\n");
1599 		strcpy(mff->file_extension, "");
1600 	}
1601 
1602 	if (context->iformat->name != NULL)
1603 		strcpy(mff->short_name,  context->iformat->name);
1604 	else {
1605 		TRACE("  no short name for AVInputFormat.\n");
1606 		strcpy(mff->short_name, "");
1607 	}
1608 
1609 	if (context->iformat->long_name != NULL)
1610 		sprintf(mff->pretty_name, "%s (FFmpeg)", context->iformat->long_name);
1611 	else {
1612 		if (format != NULL)
1613 			sprintf(mff->pretty_name, "%s (FFmpeg)", format->pretty_name);
1614 		else
1615 			strcpy(mff->pretty_name, "Unknown (FFmpeg)");
1616 	}
1617 }
1618 
1619 
1620 status_t
1621 AVFormatReader::GetMetaData(BMessage* _data)
1622 {
1623 	// The first cookie is always there!
1624 	const AVFormatContext* context = fStreams[0]->Context();
1625 
1626 	if (context == NULL)
1627 		return B_NO_INIT;
1628 
1629 	avdictionary_to_message(context->metadata, _data);
1630 
1631 	// Add chapter info
1632 	for (unsigned i = 0; i < context->nb_chapters; i++) {
1633 		AVChapter* chapter = context->chapters[i];
1634 		BMessage chapterData;
1635 		chapterData.AddInt64("start", bigtime_t(1000000.0
1636 			* chapter->start * chapter->time_base.num
1637 			/ chapter->time_base.den + 0.5));
1638 		chapterData.AddInt64("end", bigtime_t(1000000.0
1639 			* chapter->end * chapter->time_base.num
1640 			/ chapter->time_base.den + 0.5));
1641 
1642 		avdictionary_to_message(chapter->metadata, &chapterData);
1643 		_data->AddMessage("be:chapter", &chapterData);
1644 	}
1645 
1646 	// Add program info
1647 	for (unsigned i = 0; i < context->nb_programs; i++) {
1648 		BMessage programData;
1649 		avdictionary_to_message(context->programs[i]->metadata, &programData);
1650 		_data->AddMessage("be:program", &programData);
1651 	}
1652 
1653 	return B_OK;
1654 }
1655 
1656 
1657 // #pragma mark -
1658 
1659 
1660 status_t
1661 AVFormatReader::AllocateCookie(int32 streamIndex, void** _cookie)
1662 {
1663 	TRACE("AVFormatReader::AllocateCookie(%ld)\n", streamIndex);
1664 
1665 	BAutolock _(fSourceLock);
1666 
1667 	if (fStreams == NULL)
1668 		return B_NO_INIT;
1669 
1670 	if (streamIndex < 0 || streamIndex >= fStreams[0]->CountStreams())
1671 		return B_BAD_INDEX;
1672 
1673 	if (_cookie == NULL)
1674 		return B_BAD_VALUE;
1675 
1676 	Stream* cookie = fStreams[streamIndex];
1677 	if (cookie == NULL) {
1678 		// Allocate the cookie
1679 		BPositionIO* source = dynamic_cast<BPositionIO*>(Source());
1680 		if (source == NULL) {
1681 			TRACE("  not a BPositionIO, but we need it to be one.\n");
1682 			return B_NOT_SUPPORTED;
1683 		}
1684 
1685 		cookie = new(std::nothrow) Stream(source, &fSourceLock);
1686 		if (cookie == NULL) {
1687 			ERROR("AVFormatReader::Sniff() - failed to allocate "
1688 				"Stream\n");
1689 			return B_NO_MEMORY;
1690 		}
1691 
1692 		status_t ret = cookie->Open();
1693 		if (ret != B_OK) {
1694 			TRACE("  stream failed to open: %s\n", strerror(ret));
1695 			delete cookie;
1696 			return ret;
1697 		}
1698 	}
1699 
1700 	status_t ret = cookie->Init(streamIndex);
1701 	if (ret != B_OK) {
1702 		TRACE("  stream failed to initialize: %s\n", strerror(ret));
1703 		// NOTE: Never delete the first stream!
1704 		if (streamIndex != 0)
1705 			delete cookie;
1706 		return ret;
1707 	}
1708 
1709 	fStreams[streamIndex] = cookie;
1710 	*_cookie = cookie;
1711 
1712 	return B_OK;
1713 }
1714 
1715 
1716 status_t
1717 AVFormatReader::FreeCookie(void *_cookie)
1718 {
1719 	BAutolock _(fSourceLock);
1720 
1721 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1722 
1723 	// NOTE: Never delete the first cookie!
1724 	if (cookie != NULL && cookie->VirtualIndex() != 0) {
1725 		if (fStreams != NULL)
1726 			fStreams[cookie->VirtualIndex()] = NULL;
1727 		delete cookie;
1728 	}
1729 
1730 	return B_OK;
1731 }
1732 
1733 
1734 // #pragma mark -
1735 
1736 
1737 status_t
1738 AVFormatReader::GetStreamInfo(void* _cookie, int64* frameCount,
1739 	bigtime_t* duration, media_format* format, const void** infoBuffer,
1740 	size_t* infoSize)
1741 {
1742 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1743 	return cookie->GetStreamInfo(frameCount, duration, format, infoBuffer,
1744 		infoSize);
1745 }
1746 
1747 
1748 status_t
1749 AVFormatReader::GetStreamMetaData(void* _cookie, BMessage* _data)
1750 {
1751 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1752 	return cookie->GetMetaData(_data);
1753 }
1754 
1755 
1756 status_t
1757 AVFormatReader::Seek(void* _cookie, uint32 seekTo, int64* frame,
1758 	bigtime_t* time)
1759 {
1760 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1761 	return cookie->Seek(seekTo, frame, time);
1762 }
1763 
1764 
1765 status_t
1766 AVFormatReader::FindKeyFrame(void* _cookie, uint32 flags, int64* frame,
1767 	bigtime_t* time)
1768 {
1769 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1770 	return cookie->FindKeyFrame(flags, frame, time);
1771 }
1772 
1773 
1774 status_t
1775 AVFormatReader::GetNextChunk(void* _cookie, const void** chunkBuffer,
1776 	size_t* chunkSize, media_header* mediaHeader)
1777 {
1778 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1779 	return cookie->GetNextChunk(chunkBuffer, chunkSize, mediaHeader);
1780 }
1781