xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVFormatReader.cpp (revision 991dadd6324f7b7a68e94743a39ebae789823228)
1 /*
2  * Copyright 2009-2010, Stephan Aßmus <superstippi@gmx.de>
3  * All rights reserved. Distributed under the terms of the GNU L-GPL license.
4  */
5 
6 #include "AVFormatReader.h"
7 
8 #include <stdio.h>
9 #include <string.h>
10 #include <stdlib.h>
11 
12 #include <new>
13 
14 #include <AutoDeleter.h>
15 #include <Autolock.h>
16 #include <ByteOrder.h>
17 #include <DataIO.h>
18 #include <MediaDefs.h>
19 #include <MediaFormats.h>
20 
21 extern "C" {
22 	#include "avcodec.h"
23 	#include "avformat.h"
24 }
25 
26 #include "DemuxerTable.h"
27 #include "gfx_util.h"
28 
29 
30 //#define TRACE_AVFORMAT_READER
31 #ifdef TRACE_AVFORMAT_READER
32 #	define TRACE printf
33 #	define TRACE_IO(a...)
34 #	define TRACE_SEEK(a...) printf(a)
35 #	define TRACE_FIND(a...)
36 #	define TRACE_PACKET(a...)
37 #else
38 #	define TRACE(a...)
39 #	define TRACE_IO(a...)
40 #	define TRACE_SEEK(a...)
41 #	define TRACE_FIND(a...)
42 #	define TRACE_PACKET(a...)
43 #endif
44 
45 #define ERROR(a...) fprintf(stderr, a)
46 
47 
48 static const int64 kNoPTSValue = 0x8000000000000000LL;
49 	// NOTE: For some reasons, I have trouble with the avcodec.h define:
50 	// #define AV_NOPTS_VALUE          INT64_C(0x8000000000000000)
51 	// INT64_C is not defined here.
52 
53 
54 static uint32
55 avformat_to_beos_format(SampleFormat format)
56 {
57 	switch (format) {
58 		case SAMPLE_FMT_U8: return media_raw_audio_format::B_AUDIO_UCHAR;
59 		case SAMPLE_FMT_S16: return media_raw_audio_format::B_AUDIO_SHORT;
60 		case SAMPLE_FMT_S32: return media_raw_audio_format::B_AUDIO_INT;
61 		case SAMPLE_FMT_FLT: return media_raw_audio_format::B_AUDIO_FLOAT;
62 		case SAMPLE_FMT_DBL: return media_raw_audio_format::B_AUDIO_DOUBLE;
63 		default:
64 			break;
65 	}
66 	return 0;
67 }
68 
69 
70 static uint32
71 avformat_to_beos_byte_order(SampleFormat format)
72 {
73 	// TODO: Huh?
74 	return B_MEDIA_HOST_ENDIAN;
75 }
76 
77 
78 static void
79 avdictionary_to_message(AVDictionary* dictionary, BMessage* message)
80 {
81 	if (dictionary == NULL)
82 		return;
83 
84 	AVDictionaryEntry* entry = NULL;
85 	while ((entry = av_dict_get(dictionary, "", entry,
86 		AV_METADATA_IGNORE_SUFFIX))) {
87 		// convert entry keys into something more meaningful using the names from
88 		// id3v2.c
89 		if (strcmp(entry->key, "TALB") == 0 || strcmp(entry->key, "TAL") == 0)
90 			message->AddString("album", entry->value);
91 		else if (strcmp(entry->key, "TCOM") == 0)
92 			message->AddString("composer", entry->value);
93 		else if (strcmp(entry->key, "TCON") == 0 || strcmp(entry->key, "TCO") == 0)
94 			message->AddString("genre", entry->value);
95 		else if (strcmp(entry->key, "TCOP") == 0)
96 			message->AddString("copyright", entry->value);
97 		else if (strcmp(entry->key, "TDRL") == 0 || strcmp(entry->key, "TDRC") == 0)
98 			message->AddString("date", entry->value);
99 		else if (strcmp(entry->key, "TENC") == 0 || strcmp(entry->key, "TEN") == 0)
100 			message->AddString("encoded_by", entry->value);
101 		else if (strcmp(entry->key, "TIT2") == 0 || strcmp(entry->key, "TT2") == 0)
102 			message->AddString("title", entry->value);
103 		else if (strcmp(entry->key, "TLAN") == 0)
104 			message->AddString("language", entry->value);
105 		else if (strcmp(entry->key, "TPE1") == 0 || strcmp(entry->key, "TP1") == 0)
106 			message->AddString("artist", entry->value);
107 		else if (strcmp(entry->key, "TPE2") == 0 || strcmp(entry->key, "TP2") == 0)
108 			message->AddString("album_artist", entry->value);
109 		else if (strcmp(entry->key, "TPE3") == 0 || strcmp(entry->key, "TP3") == 0)
110 			message->AddString("performer", entry->value);
111 		else if (strcmp(entry->key, "TPOS") == 0)
112 			message->AddString("disc", entry->value);
113 		else if (strcmp(entry->key, "TPUB") == 0)
114 			message->AddString("publisher", entry->value);
115 		else if (strcmp(entry->key, "TRCK") == 0 || strcmp(entry->key, "TRK") == 0)
116 			message->AddString("track", entry->value);
117 		else if (strcmp(entry->key, "TSOA") == 0)
118 			message->AddString("album-sort", entry->value);
119 		else if (strcmp(entry->key, "TSOP") == 0)
120 			message->AddString("artist-sort", entry->value);
121 		else if (strcmp(entry->key, "TSOT") == 0)
122 			message->AddString("title-sort", entry->value);
123 		else if (strcmp(entry->key, "TSSE") == 0)
124 			message->AddString("encoder", entry->value);
125 		else if (strcmp(entry->key, "TYER") == 0)
126 			message->AddString("year", entry->value);
127 		else
128 			message->AddString(entry->key, entry->value);
129 	}
130 }
131 
132 
133 // #pragma mark - StreamBase
134 
135 
136 class StreamBase {
137 public:
138 								StreamBase(BPositionIO* source,
139 									BLocker* sourceLock, BLocker* streamLock);
140 	virtual						~StreamBase();
141 
142 	// Init an indivual AVFormatContext
143 			status_t			Open();
144 
145 	// Setup this stream to point to the AVStream at the given streamIndex.
146 	virtual	status_t			Init(int32 streamIndex);
147 
148 	inline	const AVFormatContext* Context() const
149 									{ return fContext; }
150 			int32				Index() const;
151 			int32				CountStreams() const;
152 			int32				StreamIndexFor(int32 virtualIndex) const;
153 	inline	int32				VirtualIndex() const
154 									{ return fVirtualIndex; }
155 
156 			double				FrameRate() const;
157 			bigtime_t			Duration() const;
158 
159 	virtual	status_t			Seek(uint32 flags, int64* frame,
160 									bigtime_t* time);
161 
162 			status_t			GetNextChunk(const void** chunkBuffer,
163 									size_t* chunkSize,
164 									media_header* mediaHeader);
165 
166 protected:
167 	// I/O hooks for libavformat, cookie will be a Stream instance.
168 	// Since multiple StreamCookies use the same BPositionIO source, they
169 	// maintain the position individually, and may need to seek the source
170 	// if it does not match anymore in _Read().
171 	// TODO: This concept prevents the use of a plain BDataIO that is not
172 	// seekable. There is a version of AVFormatReader in the SVN history
173 	// which implements packet buffering for other streams when reading
174 	// packets. To support non-seekable network streams for example, this
175 	// code should be resurrected. It will make handling seekable streams,
176 	// especially from different threads that read from totally independent
177 	// positions in the stream (aggressive pre-buffering perhaps), a lot
178 	// more difficult with potentially large memory overhead.
179 	static	int					_Read(void* cookie, uint8* buffer,
180 									int bufferSize);
181 	static	off_t				_Seek(void* cookie, off_t offset, int whence);
182 
183 			status_t			_NextPacket(bool reuse);
184 
185 			int64_t				_ConvertToStreamTimeBase(bigtime_t time) const;
186 			bigtime_t			_ConvertFromStreamTimeBase(int64_t time) const;
187 
188 protected:
189 			BPositionIO*		fSource;
190 			off_t				fPosition;
191 			// Since different threads may read from the source,
192 			// we need to protect the file position and I/O by a lock.
193 			BLocker*			fSourceLock;
194 
195 			BLocker*			fStreamLock;
196 
197 			AVFormatContext*	fContext;
198 			AVStream*			fStream;
199 			int32				fVirtualIndex;
200 
201 			media_format		fFormat;
202 
203 			AVIOContext*		fIOContext;
204 
205 			AVPacket			fPacket;
206 			bool				fReusePacket;
207 
208 			bool				fSeekByBytes;
209 			bool				fStreamBuildsIndexWhileReading;
210 };
211 
212 
213 StreamBase::StreamBase(BPositionIO* source, BLocker* sourceLock,
214 		BLocker* streamLock)
215 	:
216 	fSource(source),
217 	fPosition(0),
218 	fSourceLock(sourceLock),
219 
220 	fStreamLock(streamLock),
221 
222 	fContext(NULL),
223 	fStream(NULL),
224 	fVirtualIndex(-1),
225 	fIOContext(NULL),
226 
227 	fReusePacket(false),
228 
229 	fSeekByBytes(false),
230 	fStreamBuildsIndexWhileReading(false)
231 {
232 	// NOTE: Don't use streamLock here, it may not yet be initialized!
233 
234 	av_new_packet(&fPacket, 0);
235 	memset(&fFormat, 0, sizeof(media_format));
236 }
237 
238 
239 StreamBase::~StreamBase()
240 {
241 	if (fContext != NULL)
242 		avformat_close_input(&fContext);
243 	av_free_packet(&fPacket);
244 	av_free(fContext);
245 	if (fIOContext != NULL)
246 		av_free(fIOContext->buffer);
247 	av_free(fIOContext);
248 }
249 
250 
251 status_t
252 StreamBase::Open()
253 {
254 	BAutolock _(fStreamLock);
255 
256 	// Init probing data
257 	size_t bufferSize = 32768;
258 	uint8* buffer = static_cast<uint8*>(av_malloc(bufferSize));
259 	if (buffer == NULL)
260 		return B_NO_MEMORY;
261 
262 	// Allocate I/O context with buffer and hook functions, pass ourself as
263 	// cookie.
264 	memset(buffer, 0, bufferSize);
265 	fIOContext = avio_alloc_context(buffer, bufferSize, 0, this, _Read, 0,
266 		_Seek);
267 	if (fIOContext == NULL) {
268 		TRACE("StreamBase::Open() - avio_alloc_context() failed!\n");
269 		av_free(buffer);
270 		return B_ERROR;
271 	}
272 
273 	fContext = avformat_alloc_context();
274 	fContext->pb = fIOContext;
275 
276 	// Allocate our context and probe the input format
277 	if (avformat_open_input(&fContext, "", NULL, NULL) < 0) {
278 		TRACE("StreamBase::Open() - avformat_open_input() failed!\n");
279 		// avformat_open_input() frees the context in case of failure
280 		fContext = NULL;
281 		av_free(fIOContext);
282 		fIOContext = NULL;
283 		return B_NOT_SUPPORTED;
284 	}
285 
286 	TRACE("StreamBase::Open() - "
287 		"avformat_open_input(): %s\n", fContext->iformat->name);
288 	TRACE("  flags:%s%s%s%s%s\n",
289 		(fContext->iformat->flags & AVFMT_GLOBALHEADER) ? " AVFMT_GLOBALHEADER" : "",
290 		(fContext->iformat->flags & AVFMT_NOTIMESTAMPS) ? " AVFMT_NOTIMESTAMPS" : "",
291 		(fContext->iformat->flags & AVFMT_GENERIC_INDEX) ? " AVFMT_GENERIC_INDEX" : "",
292 		(fContext->iformat->flags & AVFMT_TS_DISCONT) ? " AVFMT_TS_DISCONT" : "",
293 		(fContext->iformat->flags & AVFMT_VARIABLE_FPS) ? " AVFMT_VARIABLE_FPS" : ""
294 	);
295 
296 
297 	// Retrieve stream information
298 	if (avformat_find_stream_info(fContext, NULL) < 0) {
299 		TRACE("StreamBase::Open() - avformat_find_stream_info() failed!\n");
300 		return B_NOT_SUPPORTED;
301 	}
302 
303 	fSeekByBytes = (fContext->iformat->flags & AVFMT_TS_DISCONT) != 0;
304 	fStreamBuildsIndexWhileReading
305 		= (fContext->iformat->flags & AVFMT_GENERIC_INDEX) != 0
306 			|| fSeekByBytes;
307 
308 	TRACE("StreamBase::Open() - "
309 		"av_find_stream_info() success! Seeking by bytes: %d\n",
310 		fSeekByBytes);
311 
312 	return B_OK;
313 }
314 
315 
316 status_t
317 StreamBase::Init(int32 virtualIndex)
318 {
319 	BAutolock _(fStreamLock);
320 
321 	TRACE("StreamBase::Init(%ld)\n", virtualIndex);
322 
323 	if (fContext == NULL)
324 		return B_NO_INIT;
325 
326 	int32 streamIndex = StreamIndexFor(virtualIndex);
327 	if (streamIndex < 0) {
328 		TRACE("  bad stream index!\n");
329 		return B_BAD_INDEX;
330 	}
331 
332 	TRACE("  context stream index: %ld\n", streamIndex);
333 
334 	// We need to remember the virtual index so that
335 	// AVFormatReader::FreeCookie() can clear the correct stream entry.
336 	fVirtualIndex = virtualIndex;
337 
338 	// Make us point to the AVStream at streamIndex
339 	fStream = fContext->streams[streamIndex];
340 
341 // NOTE: Discarding other streams works for most, but not all containers,
342 // for example it does not work for the ASF demuxer. Since I don't know what
343 // other demuxer it breaks, let's just keep reading packets for unwanted
344 // streams, it just makes the _GetNextPacket() function slightly less
345 // efficient.
346 //	// Discard all other streams
347 //	for (unsigned i = 0; i < fContext->nb_streams; i++) {
348 //		if (i != (unsigned)streamIndex)
349 //			fContext->streams[i]->discard = AVDISCARD_ALL;
350 //	}
351 
352 	return B_OK;
353 }
354 
355 
356 int32
357 StreamBase::Index() const
358 {
359 	if (fStream != NULL)
360 		return fStream->index;
361 	return -1;
362 }
363 
364 
365 int32
366 StreamBase::CountStreams() const
367 {
368 	// Figure out the stream count. If the context has "AVPrograms", use
369 	// the first program (for now).
370 	// TODO: To support "programs" properly, the BMediaFile/Track API should
371 	// be extended accordingly. I guess programs are like TV channels in the
372 	// same satilite transport stream. Maybe call them "TrackGroups".
373 	if (fContext->nb_programs > 0) {
374 		// See libavformat/utils.c:dump_format()
375 		return fContext->programs[0]->nb_stream_indexes;
376 	}
377 	return fContext->nb_streams;
378 }
379 
380 
381 int32
382 StreamBase::StreamIndexFor(int32 virtualIndex) const
383 {
384 	// NOTE: See CountStreams()
385 	if (fContext->nb_programs > 0) {
386 		const AVProgram* program = fContext->programs[0];
387 		if (virtualIndex >= 0
388 			&& virtualIndex < (int32)program->nb_stream_indexes) {
389 			return program->stream_index[virtualIndex];
390 		}
391 	} else {
392 		if (virtualIndex >= 0 && virtualIndex < (int32)fContext->nb_streams)
393 			return virtualIndex;
394 	}
395 	return -1;
396 }
397 
398 
399 double
400 StreamBase::FrameRate() const
401 {
402 	// TODO: Find a way to always calculate a correct frame rate...
403 	double frameRate = 1.0;
404 	switch (fStream->codec->codec_type) {
405 		case AVMEDIA_TYPE_AUDIO:
406 			frameRate = (double)fStream->codec->sample_rate;
407 			break;
408 		case AVMEDIA_TYPE_VIDEO:
409 			if (fStream->avg_frame_rate.den && fStream->avg_frame_rate.num)
410 				frameRate = av_q2d(fStream->avg_frame_rate);
411 			else if (fStream->r_frame_rate.den && fStream->r_frame_rate.num)
412 				frameRate = av_q2d(fStream->r_frame_rate);
413 			else if (fStream->time_base.den && fStream->time_base.num)
414 				frameRate = 1 / av_q2d(fStream->time_base);
415 			else if (fStream->codec->time_base.den
416 				&& fStream->codec->time_base.num) {
417 				frameRate = 1 / av_q2d(fStream->codec->time_base);
418 			}
419 
420 			// TODO: Fix up interlaced video for real
421 			if (frameRate == 50.0f)
422 				frameRate = 25.0f;
423 			break;
424 		default:
425 			break;
426 	}
427 	if (frameRate <= 0.0)
428 		frameRate = 1.0;
429 	return frameRate;
430 }
431 
432 
433 bigtime_t
434 StreamBase::Duration() const
435 {
436 	// TODO: This is not working correctly for all stream types...
437 	// It seems that the calculations here are correct, because they work
438 	// for a couple of streams and are in line with the documentation, but
439 	// unfortunately, libavformat itself seems to set the time_base and
440 	// duration wrongly sometimes. :-(
441 	if ((int64)fStream->duration != kNoPTSValue)
442 		return _ConvertFromStreamTimeBase(fStream->duration);
443 	else if ((int64)fContext->duration != kNoPTSValue)
444 		return (bigtime_t)fContext->duration;
445 
446 	return 0;
447 }
448 
449 
450 status_t
451 StreamBase::Seek(uint32 flags, int64* frame, bigtime_t* time)
452 {
453 	BAutolock _(fStreamLock);
454 
455 	if (fContext == NULL || fStream == NULL)
456 		return B_NO_INIT;
457 
458 	TRACE_SEEK("StreamBase::Seek(%ld,%s%s%s%s, %lld, "
459 		"%lld)\n", VirtualIndex(),
460 		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
461 		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
462 		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
463 			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
464 		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
465 			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
466 		*frame, *time);
467 
468 	double frameRate = FrameRate();
469 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
470 		// Seeking is always based on time, initialize it when client seeks
471 		// based on frame.
472 		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);
473 	}
474 
475 	int64_t timeStamp = *time;
476 
477 	int searchFlags = AVSEEK_FLAG_BACKWARD;
478 	if ((flags & B_MEDIA_SEEK_CLOSEST_FORWARD) != 0)
479 		searchFlags = 0;
480 
481 	if (fSeekByBytes) {
482 		searchFlags |= AVSEEK_FLAG_BYTE;
483 
484 		BAutolock _(fSourceLock);
485 		int64_t fileSize;
486 		if (fSource->GetSize(&fileSize) != B_OK)
487 			return B_NOT_SUPPORTED;
488 		int64_t duration = Duration();
489 		if (duration == 0)
490 			return B_NOT_SUPPORTED;
491 
492 		timeStamp = int64_t(fileSize * ((double)timeStamp / duration));
493 		if ((flags & B_MEDIA_SEEK_CLOSEST_BACKWARD) != 0) {
494 			timeStamp -= 65536;
495 			if (timeStamp < 0)
496 				timeStamp = 0;
497 		}
498 
499 		bool seekAgain = true;
500 		bool seekForward = true;
501 		bigtime_t lastFoundTime = -1;
502 		int64_t closestTimeStampBackwards = -1;
503 		while (seekAgain) {
504 			if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp,
505 				INT64_MAX, searchFlags) < 0) {
506 				TRACE("  avformat_seek_file() (by bytes) failed.\n");
507 				return B_ERROR;
508 			}
509 			seekAgain = false;
510 
511 			// Our last packet is toast in any case. Read the next one so we
512 			// know where we really seeked.
513 			fReusePacket = false;
514 			if (_NextPacket(true) == B_OK) {
515 				while (fPacket.pts == kNoPTSValue) {
516 					fReusePacket = false;
517 					if (_NextPacket(true) != B_OK)
518 						return B_ERROR;
519 				}
520 				if (fPacket.pos >= 0)
521 					timeStamp = fPacket.pos;
522 				bigtime_t foundTime
523 					= _ConvertFromStreamTimeBase(fPacket.pts);
524 				if (foundTime != lastFoundTime) {
525 					lastFoundTime = foundTime;
526 					if (foundTime > *time) {
527 						if (closestTimeStampBackwards >= 0) {
528 							timeStamp = closestTimeStampBackwards;
529 							seekAgain = true;
530 							seekForward = false;
531 							continue;
532 						}
533 						int64_t diff = int64_t(fileSize
534 							* ((double)(foundTime - *time) / (2 * duration)));
535 						if (diff < 8192)
536 							break;
537 						timeStamp -= diff;
538 						TRACE_SEEK("  need to seek back (%lld) (time: %.2f "
539 							"-> %.2f)\n", timeStamp, *time / 1000000.0,
540 							foundTime / 1000000.0);
541 						if (timeStamp < 0)
542 							foundTime = 0;
543 						else {
544 							seekAgain = true;
545 							continue;
546 						}
547 					} else if (seekForward && foundTime < *time - 100000) {
548 						closestTimeStampBackwards = timeStamp;
549 						int64_t diff = int64_t(fileSize
550 							* ((double)(*time - foundTime) / (2 * duration)));
551 						if (diff < 8192)
552 							break;
553 						timeStamp += diff;
554 						TRACE_SEEK("  need to seek forward (%lld) (time: "
555 							"%.2f -> %.2f)\n", timeStamp, *time / 1000000.0,
556 							foundTime / 1000000.0);
557 						if (timeStamp > duration)
558 							foundTime = duration;
559 						else {
560 							seekAgain = true;
561 							continue;
562 						}
563 					}
564 				}
565 				TRACE_SEEK("  found time: %lld -> %lld (%.2f)\n", *time,
566 					foundTime, foundTime / 1000000.0);
567 				*time = foundTime;
568 				*frame = (uint64)(*time * frameRate / 1000000LL + 0.5);
569 				TRACE_SEEK("  seeked frame: %lld\n", *frame);
570 			} else {
571 				TRACE_SEEK("  _NextPacket() failed!\n");
572 				return B_ERROR;
573 			}
574 		}
575 	} else {
576 		// We may not get a PTS from the next packet after seeking, so
577 		// we try to get an expected time from the index.
578 		int64_t streamTimeStamp = _ConvertToStreamTimeBase(*time);
579 		int index = av_index_search_timestamp(fStream, streamTimeStamp,
580 			searchFlags);
581 		if (index < 0) {
582 			TRACE("  av_index_search_timestamp() failed\n");
583 		} else {
584 			if (index > 0) {
585 				const AVIndexEntry& entry = fStream->index_entries[index];
586 				streamTimeStamp = entry.timestamp;
587 			} else {
588 				// Some demuxers use the first index entry to store some
589 				// other information, like the total playing time for example.
590 				// Assume the timeStamp of the first entry is alays 0.
591 				// TODO: Handle start-time offset?
592 				streamTimeStamp = 0;
593 			}
594 			bigtime_t foundTime = _ConvertFromStreamTimeBase(streamTimeStamp);
595 			bigtime_t timeDiff = foundTime > *time
596 				? foundTime - *time : *time - foundTime;
597 
598 			if (timeDiff > 1000000
599 				&& (fStreamBuildsIndexWhileReading
600 					|| index == fStream->nb_index_entries - 1)) {
601 				// If the stream is building the index on the fly while parsing
602 				// it, we only have entries in the index for positions already
603 				// decoded, i.e. we cannot seek into the future. In that case,
604 				// just assume that we can seek where we want and leave
605 				// time/frame unmodified. Since successfully seeking one time
606 				// will generate index entries for the seeked to position, we
607 				// need to remember this in fStreamBuildsIndexWhileReading,
608 				// since when seeking back there will be later index entries,
609 				// but we still want to ignore the found entry.
610 				fStreamBuildsIndexWhileReading = true;
611 				TRACE_SEEK("  Not trusting generic index entry. "
612 					"(Current count: %d)\n", fStream->nb_index_entries);
613 			} else {
614 				// If we found a reasonably time, write it into *time.
615 				// After seeking, we will try to read the sought time from
616 				// the next packet. If the packet has no PTS value, we may
617 				// still have a more accurate time from the index lookup.
618 				*time = foundTime;
619 			}
620 		}
621 
622 		if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp, INT64_MAX,
623 				searchFlags) < 0) {
624 			TRACE("  avformat_seek_file() failed.\n");
625 			// Try to fall back to av_seek_frame()
626 			timeStamp = _ConvertToStreamTimeBase(timeStamp);
627 			if (av_seek_frame(fContext, fStream->index, timeStamp,
628 				searchFlags) < 0) {
629 				TRACE("  avformat_seek_frame() failed as well.\n");
630 				// Fall back to seeking to the beginning by bytes
631 				timeStamp = 0;
632 				if (av_seek_frame(fContext, fStream->index, timeStamp,
633 						AVSEEK_FLAG_BYTE) < 0) {
634 					TRACE("  avformat_seek_frame() by bytes failed as "
635 						"well.\n");
636 					// Do not propagate error in any case. We fail if we can't
637 					// read another packet.
638 				} else
639 					*time = 0;
640 			}
641 		}
642 
643 		// Our last packet is toast in any case. Read the next one so
644 		// we know where we really sought.
645 		bigtime_t foundTime = *time;
646 
647 		fReusePacket = false;
648 		if (_NextPacket(true) == B_OK) {
649 			if (fPacket.pts != kNoPTSValue)
650 				foundTime = _ConvertFromStreamTimeBase(fPacket.pts);
651 			else
652 				TRACE_SEEK("  no PTS in packet after seeking\n");
653 		} else
654 			TRACE_SEEK("  _NextPacket() failed!\n");
655 
656 		*time = foundTime;
657 		TRACE_SEEK("  sought time: %.2fs\n", *time / 1000000.0);
658 		*frame = (uint64)(*time * frameRate / 1000000.0 + 0.5);
659 		TRACE_SEEK("  sought frame: %lld\n", *frame);
660 	}
661 
662 	return B_OK;
663 }
664 
665 
666 status_t
667 StreamBase::GetNextChunk(const void** chunkBuffer,
668 	size_t* chunkSize, media_header* mediaHeader)
669 {
670 	BAutolock _(fStreamLock);
671 
672 	TRACE_PACKET("StreamBase::GetNextChunk()\n");
673 
674 	// Get the last stream DTS before reading the next packet, since
675 	// then it points to that one.
676 	int64 lastStreamDTS = fStream->cur_dts;
677 
678 	status_t ret = _NextPacket(false);
679 	if (ret != B_OK) {
680 		*chunkBuffer = NULL;
681 		*chunkSize = 0;
682 		return ret;
683 	}
684 
685 	// NOTE: AVPacket has a field called "convergence_duration", for which
686 	// the documentation is quite interesting. It sounds like it could be
687 	// used to know the time until the next I-Frame in streams that don't
688 	// let you know the position of keyframes in another way (like through
689 	// the index).
690 
691 	// According to libavformat documentation, fPacket is valid until the
692 	// next call to av_read_frame(). This is what we want and we can share
693 	// the memory with the least overhead.
694 	*chunkBuffer = fPacket.data;
695 	*chunkSize = fPacket.size;
696 
697 	if (mediaHeader != NULL) {
698 		mediaHeader->type = fFormat.type;
699 		mediaHeader->buffer = 0;
700 		mediaHeader->destination = -1;
701 		mediaHeader->time_source = -1;
702 		mediaHeader->size_used = fPacket.size;
703 		if (fPacket.pts != kNoPTSValue) {
704 //TRACE("  PTS: %lld (time_base.num: %d, .den: %d), stream DTS: %lld\n",
705 //fPacket.pts, fStream->time_base.num, fStream->time_base.den,
706 //fStream->cur_dts);
707 			mediaHeader->start_time = _ConvertFromStreamTimeBase(fPacket.pts);
708 		} else {
709 //TRACE("  PTS (stream): %lld (time_base.num: %d, .den: %d), stream DTS: %lld\n",
710 //lastStreamDTS, fStream->time_base.num, fStream->time_base.den,
711 //fStream->cur_dts);
712 			mediaHeader->start_time
713 				= _ConvertFromStreamTimeBase(lastStreamDTS);
714 		}
715 		mediaHeader->file_pos = fPacket.pos;
716 		mediaHeader->data_offset = 0;
717 		switch (mediaHeader->type) {
718 			case B_MEDIA_RAW_AUDIO:
719 				break;
720 			case B_MEDIA_ENCODED_AUDIO:
721 				mediaHeader->u.encoded_audio.buffer_flags
722 					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
723 				break;
724 			case B_MEDIA_RAW_VIDEO:
725 				mediaHeader->u.raw_video.line_count
726 					= fFormat.u.raw_video.display.line_count;
727 				break;
728 			case B_MEDIA_ENCODED_VIDEO:
729 				mediaHeader->u.encoded_video.field_flags
730 					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
731 				mediaHeader->u.encoded_video.line_count
732 					= fFormat.u.encoded_video.output.display.line_count;
733 				break;
734 			default:
735 				break;
736 		}
737 	}
738 
739 //	static bigtime_t pts[2];
740 //	static bigtime_t lastPrintTime = system_time();
741 //	static BLocker printLock;
742 //	if (fStream->index < 2) {
743 //		if (fPacket.pts != kNoPTSValue)
744 //			pts[fStream->index] = _ConvertFromStreamTimeBase(fPacket.pts);
745 //		printLock.Lock();
746 //		bigtime_t now = system_time();
747 //		if (now - lastPrintTime > 1000000) {
748 //			printf("PTS: %.4f/%.4f, diff: %.4f\r", pts[0] / 1000000.0,
749 //				pts[1] / 1000000.0, (pts[0] - pts[1]) / 1000000.0);
750 //			fflush(stdout);
751 //			lastPrintTime = now;
752 //		}
753 //		printLock.Unlock();
754 //	}
755 
756 	return B_OK;
757 }
758 
759 
760 // #pragma mark -
761 
762 
763 /*static*/ int
764 StreamBase::_Read(void* cookie, uint8* buffer, int bufferSize)
765 {
766 	StreamBase* stream = reinterpret_cast<StreamBase*>(cookie);
767 
768 	BAutolock _(stream->fSourceLock);
769 
770 	TRACE_IO("StreamBase::_Read(%p, %p, %d) position: %lld/%lld\n",
771 		cookie, buffer, bufferSize, stream->fPosition,
772 		stream->fSource->Position());
773 
774 	if (stream->fPosition != stream->fSource->Position()) {
775 		off_t position
776 			= stream->fSource->Seek(stream->fPosition, SEEK_SET);
777 		if (position != stream->fPosition)
778 			return -1;
779 	}
780 
781 	ssize_t read = stream->fSource->Read(buffer, bufferSize);
782 	if (read > 0)
783 		stream->fPosition += read;
784 
785 	TRACE_IO("  read: %ld\n", read);
786 	return (int)read;
787 
788 }
789 
790 
791 /*static*/ off_t
792 StreamBase::_Seek(void* cookie, off_t offset, int whence)
793 {
794 	TRACE_IO("StreamBase::_Seek(%p, %lld, %d)\n",
795 		cookie, offset, whence);
796 
797 	StreamBase* stream = reinterpret_cast<StreamBase*>(cookie);
798 
799 	BAutolock _(stream->fSourceLock);
800 
801 	// Support for special file size retrieval API without seeking
802 	// anywhere:
803 	if (whence == AVSEEK_SIZE) {
804 		off_t size;
805 		if (stream->fSource->GetSize(&size) == B_OK)
806 			return size;
807 		return -1;
808 	}
809 
810 	// If not requested to seek to an absolute position, we need to
811 	// confirm that the stream is currently at the position that we
812 	// think it is.
813 	if (whence != SEEK_SET
814 		&& stream->fPosition != stream->fSource->Position()) {
815 		off_t position
816 			= stream->fSource->Seek(stream->fPosition, SEEK_SET);
817 		if (position != stream->fPosition)
818 			return -1;
819 	}
820 
821 	off_t position = stream->fSource->Seek(offset, whence);
822 	TRACE_IO("  position: %lld\n", position);
823 	if (position < 0)
824 		return -1;
825 
826 	stream->fPosition = position;
827 
828 	return position;
829 }
830 
831 
832 status_t
833 StreamBase::_NextPacket(bool reuse)
834 {
835 	TRACE_PACKET("StreamBase::_NextPacket(%d)\n", reuse);
836 
837 	if (fReusePacket) {
838 		// The last packet was marked for reuse, so we keep using it.
839 		TRACE_PACKET("  re-using last packet\n");
840 		fReusePacket = reuse;
841 		return B_OK;
842 	}
843 
844 	av_free_packet(&fPacket);
845 
846 	while (true) {
847 		if (av_read_frame(fContext, &fPacket) < 0) {
848 			// NOTE: Even though we may get the error for a different stream,
849 			// av_read_frame() is not going to be successful from here on, so
850 			// it doesn't matter
851 			fReusePacket = false;
852 			return B_LAST_BUFFER_ERROR;
853 		}
854 
855 		if (fPacket.stream_index == Index())
856 			break;
857 
858 		// This is a packet from another stream, ignore it.
859 		av_free_packet(&fPacket);
860 	}
861 
862 	// Mark this packet with the new reuse flag.
863 	fReusePacket = reuse;
864 	return B_OK;
865 }
866 
867 
868 int64_t
869 StreamBase::_ConvertToStreamTimeBase(bigtime_t time) const
870 {
871 	int64 timeStamp = int64_t((double)time * fStream->time_base.den
872 		/ (1000000.0 * fStream->time_base.num) + 0.5);
873 	if (fStream->start_time != kNoPTSValue)
874 		timeStamp += fStream->start_time;
875 	return timeStamp;
876 }
877 
878 
879 bigtime_t
880 StreamBase::_ConvertFromStreamTimeBase(int64_t time) const
881 {
882 	if (fStream->start_time != kNoPTSValue)
883 		time -= fStream->start_time;
884 
885 	return bigtime_t(1000000.0 * time * fStream->time_base.num
886 		/ fStream->time_base.den + 0.5);
887 }
888 
889 
890 // #pragma mark - AVFormatReader::Stream
891 
892 
893 class AVFormatReader::Stream : public StreamBase {
894 public:
895 								Stream(BPositionIO* source,
896 									BLocker* streamLock);
897 	virtual						~Stream();
898 
899 	// Setup this stream to point to the AVStream at the given streamIndex.
900 	// This will also initialize the media_format.
901 	virtual	status_t			Init(int32 streamIndex);
902 
903 			status_t			GetMetaData(BMessage* data);
904 
905 	// Support for AVFormatReader
906 			status_t			GetStreamInfo(int64* frameCount,
907 									bigtime_t* duration, media_format* format,
908 									const void** infoBuffer,
909 									size_t* infoSize) const;
910 
911 			status_t			FindKeyFrame(uint32 flags, int64* frame,
912 									bigtime_t* time) const;
913 	virtual	status_t			Seek(uint32 flags, int64* frame,
914 									bigtime_t* time);
915 
916 private:
917 	mutable	BLocker				fLock;
918 
919 			struct KeyframeInfo {
920 				bigtime_t		requestedTime;
921 				int64			requestedFrame;
922 				bigtime_t		reportedTime;
923 				int64			reportedFrame;
924 				uint32			seekFlags;
925 			};
926 	mutable	KeyframeInfo		fLastReportedKeyframe;
927 	mutable	StreamBase*			fGhostStream;
928 };
929 
930 
931 
932 AVFormatReader::Stream::Stream(BPositionIO* source, BLocker* streamLock)
933 	:
934 	StreamBase(source, streamLock, &fLock),
935 	fLock("stream lock"),
936 	fGhostStream(NULL)
937 {
938 	fLastReportedKeyframe.requestedTime = 0;
939 	fLastReportedKeyframe.requestedFrame = 0;
940 	fLastReportedKeyframe.reportedTime = 0;
941 	fLastReportedKeyframe.reportedFrame = 0;
942 }
943 
944 
945 AVFormatReader::Stream::~Stream()
946 {
947 	delete fGhostStream;
948 }
949 
950 
951 status_t
952 AVFormatReader::Stream::Init(int32 virtualIndex)
953 {
954 	TRACE("AVFormatReader::Stream::Init(%ld)\n", virtualIndex);
955 
956 	status_t ret = StreamBase::Init(virtualIndex);
957 	if (ret != B_OK)
958 		return ret;
959 
960 	// Get a pointer to the AVCodecContext for the stream at streamIndex.
961 	AVCodecContext* codecContext = fStream->codec;
962 
963 #if 0
964 // stippi: Here I was experimenting with the question if some fields of the
965 // AVCodecContext change (or get filled out at all), if the AVCodec is opened.
966 	class CodecOpener {
967 	public:
968 		CodecOpener(AVCodecContext* context)
969 		{
970 			fCodecContext = context;
971 			AVCodec* codec = avcodec_find_decoder(context->codec_id);
972 			fCodecOpen = avcodec_open(context, codec) >= 0;
973 			if (!fCodecOpen)
974 				TRACE("  failed to open the codec!\n");
975 		}
976 		~CodecOpener()
977 		{
978 			if (fCodecOpen)
979 				avcodec_close(fCodecContext);
980 		}
981 	private:
982 		AVCodecContext*		fCodecContext;
983 		bool				fCodecOpen;
984 	} codecOpener(codecContext);
985 #endif
986 
987 	// initialize the media_format for this stream
988 	media_format* format = &fFormat;
989 	memset(format, 0, sizeof(media_format));
990 
991 	media_format_description description;
992 
993 	// Set format family and type depending on codec_type of the stream.
994 	switch (codecContext->codec_type) {
995 		case AVMEDIA_TYPE_AUDIO:
996 			if ((codecContext->codec_id >= CODEC_ID_PCM_S16LE)
997 				&& (codecContext->codec_id <= CODEC_ID_PCM_U8)) {
998 				TRACE("  raw audio\n");
999 				format->type = B_MEDIA_RAW_AUDIO;
1000 				description.family = B_ANY_FORMAT_FAMILY;
1001 				// This will then apparently be handled by the (built into
1002 				// BMediaTrack) RawDecoder.
1003 			} else {
1004 				TRACE("  encoded audio\n");
1005 				format->type = B_MEDIA_ENCODED_AUDIO;
1006 				description.family = B_MISC_FORMAT_FAMILY;
1007 				description.u.misc.file_format = 'ffmp';
1008 			}
1009 			break;
1010 		case AVMEDIA_TYPE_VIDEO:
1011 			TRACE("  encoded video\n");
1012 			format->type = B_MEDIA_ENCODED_VIDEO;
1013 			description.family = B_MISC_FORMAT_FAMILY;
1014 			description.u.misc.file_format = 'ffmp';
1015 			break;
1016 		default:
1017 			TRACE("  unknown type\n");
1018 			format->type = B_MEDIA_UNKNOWN_TYPE;
1019 			return B_ERROR;
1020 			break;
1021 	}
1022 
1023 	if (format->type == B_MEDIA_RAW_AUDIO) {
1024 		// We cannot describe all raw-audio formats, some are unsupported.
1025 		switch (codecContext->codec_id) {
1026 			case CODEC_ID_PCM_S16LE:
1027 				format->u.raw_audio.format
1028 					= media_raw_audio_format::B_AUDIO_SHORT;
1029 				format->u.raw_audio.byte_order
1030 					= B_MEDIA_LITTLE_ENDIAN;
1031 				break;
1032 			case CODEC_ID_PCM_S16BE:
1033 				format->u.raw_audio.format
1034 					= media_raw_audio_format::B_AUDIO_SHORT;
1035 				format->u.raw_audio.byte_order
1036 					= B_MEDIA_BIG_ENDIAN;
1037 				break;
1038 			case CODEC_ID_PCM_U16LE:
1039 //				format->u.raw_audio.format
1040 //					= media_raw_audio_format::B_AUDIO_USHORT;
1041 //				format->u.raw_audio.byte_order
1042 //					= B_MEDIA_LITTLE_ENDIAN;
1043 				return B_NOT_SUPPORTED;
1044 				break;
1045 			case CODEC_ID_PCM_U16BE:
1046 //				format->u.raw_audio.format
1047 //					= media_raw_audio_format::B_AUDIO_USHORT;
1048 //				format->u.raw_audio.byte_order
1049 //					= B_MEDIA_BIG_ENDIAN;
1050 				return B_NOT_SUPPORTED;
1051 				break;
1052 			case CODEC_ID_PCM_S8:
1053 				format->u.raw_audio.format
1054 					= media_raw_audio_format::B_AUDIO_CHAR;
1055 				break;
1056 			case CODEC_ID_PCM_U8:
1057 				format->u.raw_audio.format
1058 					= media_raw_audio_format::B_AUDIO_UCHAR;
1059 				break;
1060 			default:
1061 				return B_NOT_SUPPORTED;
1062 				break;
1063 		}
1064 	} else {
1065 		if (description.family == B_MISC_FORMAT_FAMILY)
1066 			description.u.misc.codec = codecContext->codec_id;
1067 
1068 		BMediaFormats formats;
1069 		status_t status = formats.GetFormatFor(description, format);
1070 		if (status < B_OK)
1071 			TRACE("  formats.GetFormatFor() error: %s\n", strerror(status));
1072 
1073 		format->user_data_type = B_CODEC_TYPE_INFO;
1074 		*(uint32*)format->user_data = codecContext->codec_tag;
1075 		format->user_data[4] = 0;
1076 	}
1077 
1078 	format->require_flags = 0;
1079 	format->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
1080 
1081 	switch (format->type) {
1082 		case B_MEDIA_RAW_AUDIO:
1083 			format->u.raw_audio.frame_rate = (float)codecContext->sample_rate;
1084 			format->u.raw_audio.channel_count = codecContext->channels;
1085 			format->u.raw_audio.channel_mask = codecContext->channel_layout;
1086 			format->u.raw_audio.byte_order
1087 				= avformat_to_beos_byte_order(codecContext->sample_fmt);
1088 			format->u.raw_audio.format
1089 				= avformat_to_beos_format(codecContext->sample_fmt);
1090 			format->u.raw_audio.buffer_size = 0;
1091 
1092 			// Read one packet and mark it for later re-use. (So our first
1093 			// GetNextChunk() call does not read another packet.)
1094 			if (_NextPacket(true) == B_OK) {
1095 				TRACE("  successfully determined audio buffer size: %d\n",
1096 					fPacket.size);
1097 				format->u.raw_audio.buffer_size = fPacket.size;
1098 			}
1099 			break;
1100 
1101 		case B_MEDIA_ENCODED_AUDIO:
1102 			format->u.encoded_audio.bit_rate = codecContext->bit_rate;
1103 			format->u.encoded_audio.frame_size = codecContext->frame_size;
1104 			// Fill in some info about possible output format
1105 			format->u.encoded_audio.output
1106 				= media_multi_audio_format::wildcard;
1107 			format->u.encoded_audio.output.frame_rate
1108 				= (float)codecContext->sample_rate;
1109 			// Channel layout bits match in Be API and FFmpeg.
1110 			format->u.encoded_audio.output.channel_count
1111 				= codecContext->channels;
1112 			format->u.encoded_audio.multi_info.channel_mask
1113 				= codecContext->channel_layout;
1114 			format->u.encoded_audio.output.byte_order
1115 				= avformat_to_beos_byte_order(codecContext->sample_fmt);
1116 			format->u.encoded_audio.output.format
1117 				= avformat_to_beos_format(codecContext->sample_fmt);
1118 			if (codecContext->block_align > 0) {
1119 				format->u.encoded_audio.output.buffer_size
1120 					= codecContext->block_align;
1121 			} else {
1122 				format->u.encoded_audio.output.buffer_size
1123 					= codecContext->frame_size * codecContext->channels
1124 						* (format->u.encoded_audio.output.format
1125 							& media_raw_audio_format::B_AUDIO_SIZE_MASK);
1126 			}
1127 			break;
1128 
1129 		case B_MEDIA_ENCODED_VIDEO:
1130 // TODO: Specifying any of these seems to throw off the format matching
1131 // later on.
1132 //			format->u.encoded_video.avg_bit_rate = codecContext->bit_rate;
1133 //			format->u.encoded_video.max_bit_rate = codecContext->bit_rate
1134 //				+ codecContext->bit_rate_tolerance;
1135 
1136 //			format->u.encoded_video.encoding
1137 //				= media_encoded_video_format::B_ANY;
1138 
1139 //			format->u.encoded_video.frame_size = 1;
1140 //			format->u.encoded_video.forward_history = 0;
1141 //			format->u.encoded_video.backward_history = 0;
1142 
1143 			format->u.encoded_video.output.field_rate = FrameRate();
1144 			format->u.encoded_video.output.interlace = 1;
1145 
1146 			format->u.encoded_video.output.first_active = 0;
1147 			format->u.encoded_video.output.last_active
1148 				= codecContext->height - 1;
1149 				// TODO: Maybe libavformat actually provides that info
1150 				// somewhere...
1151 			format->u.encoded_video.output.orientation
1152 				= B_VIDEO_TOP_LEFT_RIGHT;
1153 
1154 			// Calculate the display aspect ratio
1155 			AVRational displayAspectRatio;
1156 		    if (codecContext->sample_aspect_ratio.num != 0) {
1157 				av_reduce(&displayAspectRatio.num, &displayAspectRatio.den,
1158 					codecContext->width
1159 						* codecContext->sample_aspect_ratio.num,
1160 					codecContext->height
1161 						* codecContext->sample_aspect_ratio.den,
1162 					1024 * 1024);
1163 				TRACE("  pixel aspect ratio: %d/%d, "
1164 					"display aspect ratio: %d/%d\n",
1165 					codecContext->sample_aspect_ratio.num,
1166 					codecContext->sample_aspect_ratio.den,
1167 					displayAspectRatio.num, displayAspectRatio.den);
1168 		    } else {
1169 				av_reduce(&displayAspectRatio.num, &displayAspectRatio.den,
1170 					codecContext->width, codecContext->height, 1024 * 1024);
1171 				TRACE("  no display aspect ratio (%d/%d)\n",
1172 					displayAspectRatio.num, displayAspectRatio.den);
1173 		    }
1174 			format->u.encoded_video.output.pixel_width_aspect
1175 				= displayAspectRatio.num;
1176 			format->u.encoded_video.output.pixel_height_aspect
1177 				= displayAspectRatio.den;
1178 
1179 			format->u.encoded_video.output.display.format
1180 				= pixfmt_to_colorspace(codecContext->pix_fmt);
1181 			format->u.encoded_video.output.display.line_width
1182 				= codecContext->width;
1183 			format->u.encoded_video.output.display.line_count
1184 				= codecContext->height;
1185 			TRACE("  width/height: %d/%d\n", codecContext->width,
1186 				codecContext->height);
1187 			format->u.encoded_video.output.display.bytes_per_row = 0;
1188 			format->u.encoded_video.output.display.pixel_offset = 0;
1189 			format->u.encoded_video.output.display.line_offset = 0;
1190 			format->u.encoded_video.output.display.flags = 0; // TODO
1191 
1192 			break;
1193 
1194 		default:
1195 			// This is an unknown format to us.
1196 			break;
1197 	}
1198 
1199 	// Add the meta data, if any
1200 	if (codecContext->extradata_size > 0) {
1201 		format->SetMetaData(codecContext->extradata,
1202 			codecContext->extradata_size);
1203 		TRACE("  extradata: %p\n", format->MetaData());
1204 	}
1205 
1206 	TRACE("  extradata_size: %d\n", codecContext->extradata_size);
1207 //	TRACE("  intra_matrix: %p\n", codecContext->intra_matrix);
1208 //	TRACE("  inter_matrix: %p\n", codecContext->inter_matrix);
1209 //	TRACE("  get_buffer(): %p\n", codecContext->get_buffer);
1210 //	TRACE("  release_buffer(): %p\n", codecContext->release_buffer);
1211 
1212 #ifdef TRACE_AVFORMAT_READER
1213 	char formatString[512];
1214 	if (string_for_format(*format, formatString, sizeof(formatString)))
1215 		TRACE("  format: %s\n", formatString);
1216 
1217 	uint32 encoding = format->Encoding();
1218 	TRACE("  encoding '%.4s'\n", (char*)&encoding);
1219 #endif
1220 
1221 	return B_OK;
1222 }
1223 
1224 
1225 status_t
1226 AVFormatReader::Stream::GetMetaData(BMessage* data)
1227 {
1228 	BAutolock _(&fLock);
1229 
1230 	avdictionary_to_message(fStream->metadata, data);
1231 
1232 	return B_OK;
1233 }
1234 
1235 
1236 status_t
1237 AVFormatReader::Stream::GetStreamInfo(int64* frameCount,
1238 	bigtime_t* duration, media_format* format, const void** infoBuffer,
1239 	size_t* infoSize) const
1240 {
1241 	BAutolock _(&fLock);
1242 
1243 	TRACE("AVFormatReader::Stream::GetStreamInfo(%ld)\n",
1244 		VirtualIndex());
1245 
1246 	double frameRate = FrameRate();
1247 	TRACE("  frameRate: %.4f\n", frameRate);
1248 
1249 	#ifdef TRACE_AVFORMAT_READER
1250 	if (fStream->start_time != kNoPTSValue) {
1251 		bigtime_t startTime = _ConvertFromStreamTimeBase(fStream->start_time);
1252 		TRACE("  start_time: %lld or %.5fs\n", startTime,
1253 			startTime / 1000000.0);
1254 		// TODO: Handle start time in FindKeyFrame() and Seek()?!
1255 	}
1256 	#endif // TRACE_AVFORMAT_READER
1257 
1258 	*duration = Duration();
1259 
1260 	TRACE("  duration: %lld or %.5fs\n", *duration, *duration / 1000000.0);
1261 
1262 	#if 0
1263 	if (fStream->nb_index_entries > 0) {
1264 		TRACE("  dump of index entries:\n");
1265 		int count = 5;
1266 		int firstEntriesCount = min_c(fStream->nb_index_entries, count);
1267 		int i = 0;
1268 		for (; i < firstEntriesCount; i++) {
1269 			AVIndexEntry& entry = fStream->index_entries[i];
1270 			bigtime_t timeGlobal = entry.timestamp;
1271 			bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
1272 			TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
1273 				timeNative / 1000000.0f, timeGlobal / 1000000.0f);
1274 		}
1275 		if (fStream->nb_index_entries - count > i) {
1276 			i = fStream->nb_index_entries - count;
1277 			TRACE("    ...\n");
1278 			for (; i < fStream->nb_index_entries; i++) {
1279 				AVIndexEntry& entry = fStream->index_entries[i];
1280 				bigtime_t timeGlobal = entry.timestamp;
1281 				bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
1282 				TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
1283 					timeNative / 1000000.0f, timeGlobal / 1000000.0f);
1284 			}
1285 		}
1286 	}
1287 	#endif
1288 
1289 	*frameCount = fStream->nb_frames;
1290 //	if (*frameCount == 0) {
1291 		// Calculate from duration and frame rate
1292 		*frameCount = (int64)(*duration * frameRate / 1000000LL);
1293 		TRACE("  frameCount calculated: %lld, from context: %lld\n",
1294 			*frameCount, fStream->nb_frames);
1295 //	} else
1296 //		TRACE("  frameCount: %lld\n", *frameCount);
1297 
1298 	*format = fFormat;
1299 
1300 	*infoBuffer = fStream->codec->extradata;
1301 	*infoSize = fStream->codec->extradata_size;
1302 
1303 	return B_OK;
1304 }
1305 
1306 
1307 status_t
1308 AVFormatReader::Stream::FindKeyFrame(uint32 flags, int64* frame,
1309 	bigtime_t* time) const
1310 {
1311 	BAutolock _(&fLock);
1312 
1313 	if (fContext == NULL || fStream == NULL)
1314 		return B_NO_INIT;
1315 
1316 	TRACE_FIND("AVFormatReader::Stream::FindKeyFrame(%ld,%s%s%s%s, "
1317 		"%lld, %lld)\n", VirtualIndex(),
1318 		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
1319 		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
1320 		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
1321 			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
1322 		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
1323 			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
1324 		*frame, *time);
1325 
1326 	bool inLastRequestedRange = false;
1327 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1328 		if (fLastReportedKeyframe.reportedFrame
1329 			<= fLastReportedKeyframe.requestedFrame) {
1330 			inLastRequestedRange
1331 				= *frame >= fLastReportedKeyframe.reportedFrame
1332 					&& *frame <= fLastReportedKeyframe.requestedFrame;
1333 		} else {
1334 			inLastRequestedRange
1335 				= *frame >= fLastReportedKeyframe.requestedFrame
1336 					&& *frame <= fLastReportedKeyframe.reportedFrame;
1337 		}
1338 	} else if ((flags & B_MEDIA_SEEK_TO_FRAME) == 0) {
1339 		if (fLastReportedKeyframe.reportedTime
1340 			<= fLastReportedKeyframe.requestedTime) {
1341 			inLastRequestedRange
1342 				= *time >= fLastReportedKeyframe.reportedTime
1343 					&& *time <= fLastReportedKeyframe.requestedTime;
1344 		} else {
1345 			inLastRequestedRange
1346 				= *time >= fLastReportedKeyframe.requestedTime
1347 					&& *time <= fLastReportedKeyframe.reportedTime;
1348 		}
1349 	}
1350 
1351 	if (inLastRequestedRange) {
1352 		*frame = fLastReportedKeyframe.reportedFrame;
1353 		*time = fLastReportedKeyframe.reportedTime;
1354 		TRACE_FIND("  same as last reported keyframe\n");
1355 		return B_OK;
1356 	}
1357 
1358 	double frameRate = FrameRate();
1359 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0)
1360 		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);
1361 
1362 	status_t ret;
1363 	if (fGhostStream == NULL) {
1364 		BAutolock _(fSourceLock);
1365 
1366 		fGhostStream = new(std::nothrow) StreamBase(fSource, fSourceLock,
1367 			&fLock);
1368 		if (fGhostStream == NULL) {
1369 			TRACE("  failed to allocate ghost stream\n");
1370 			return B_NO_MEMORY;
1371 		}
1372 
1373 		ret = fGhostStream->Open();
1374 		if (ret != B_OK) {
1375 			TRACE("  ghost stream failed to open: %s\n", strerror(ret));
1376 			return B_ERROR;
1377 		}
1378 
1379 		ret = fGhostStream->Init(fVirtualIndex);
1380 		if (ret != B_OK) {
1381 			TRACE("  ghost stream failed to init: %s\n", strerror(ret));
1382 			return B_ERROR;
1383 		}
1384 	}
1385 	fLastReportedKeyframe.requestedFrame = *frame;
1386 	fLastReportedKeyframe.requestedTime = *time;
1387 	fLastReportedKeyframe.seekFlags = flags;
1388 
1389 	ret = fGhostStream->Seek(flags, frame, time);
1390 	if (ret != B_OK) {
1391 		TRACE("  ghost stream failed to seek: %s\n", strerror(ret));
1392 		return B_ERROR;
1393 	}
1394 
1395 	fLastReportedKeyframe.reportedFrame = *frame;
1396 	fLastReportedKeyframe.reportedTime = *time;
1397 
1398 	TRACE_FIND("  found time: %.2fs\n", *time / 1000000.0);
1399 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1400 		*frame = int64_t(*time * FrameRate() / 1000000.0 + 0.5);
1401 		TRACE_FIND("  found frame: %lld\n", *frame);
1402 	}
1403 
1404 	return B_OK;
1405 }
1406 
1407 
1408 status_t
1409 AVFormatReader::Stream::Seek(uint32 flags, int64* frame, bigtime_t* time)
1410 {
1411 	BAutolock _(&fLock);
1412 
1413 	if (fContext == NULL || fStream == NULL)
1414 		return B_NO_INIT;
1415 
1416 	// Put the old requested values into frame/time, since we already know
1417 	// that the sought frame/time will then match the reported values.
1418 	// TODO: Will not work if client changes seek flags (from backwards to
1419 	// forward or vice versa)!!
1420 	bool inLastRequestedRange = false;
1421 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1422 		if (fLastReportedKeyframe.reportedFrame
1423 			<= fLastReportedKeyframe.requestedFrame) {
1424 			inLastRequestedRange
1425 				= *frame >= fLastReportedKeyframe.reportedFrame
1426 					&& *frame <= fLastReportedKeyframe.requestedFrame;
1427 		} else {
1428 			inLastRequestedRange
1429 				= *frame >= fLastReportedKeyframe.requestedFrame
1430 					&& *frame <= fLastReportedKeyframe.reportedFrame;
1431 		}
1432 	} else if ((flags & B_MEDIA_SEEK_TO_FRAME) == 0) {
1433 		if (fLastReportedKeyframe.reportedTime
1434 			<= fLastReportedKeyframe.requestedTime) {
1435 			inLastRequestedRange
1436 				= *time >= fLastReportedKeyframe.reportedTime
1437 					&& *time <= fLastReportedKeyframe.requestedTime;
1438 		} else {
1439 			inLastRequestedRange
1440 				= *time >= fLastReportedKeyframe.requestedTime
1441 					&& *time <= fLastReportedKeyframe.reportedTime;
1442 		}
1443 	}
1444 
1445 	if (inLastRequestedRange) {
1446 		*frame = fLastReportedKeyframe.requestedFrame;
1447 		*time = fLastReportedKeyframe.requestedTime;
1448 		flags = fLastReportedKeyframe.seekFlags;
1449 	}
1450 
1451 	return StreamBase::Seek(flags, frame, time);
1452 }
1453 
1454 
1455 // #pragma mark - AVFormatReader
1456 
1457 
1458 AVFormatReader::AVFormatReader()
1459 	:
1460 	fCopyright(""),
1461 	fStreams(NULL),
1462 	fSourceLock("source I/O lock")
1463 {
1464 	TRACE("AVFormatReader::AVFormatReader\n");
1465 }
1466 
1467 
1468 AVFormatReader::~AVFormatReader()
1469 {
1470 	TRACE("AVFormatReader::~AVFormatReader\n");
1471 	if (fStreams != NULL) {
1472 		// The client was supposed to call FreeCookie() on all
1473 		// allocated streams. Deleting the first stream is always
1474 		// prevented, we delete the other ones just in case.
1475 		int32 count = fStreams[0]->CountStreams();
1476 		for (int32 i = 0; i < count; i++)
1477 			delete fStreams[i];
1478 		delete[] fStreams;
1479 	}
1480 }
1481 
1482 
1483 // #pragma mark -
1484 
1485 
1486 const char*
1487 AVFormatReader::Copyright()
1488 {
1489 	if (fCopyright.Length() <= 0) {
1490 		BMessage message;
1491 		if (GetMetaData(&message) == B_OK)
1492 			message.FindString("copyright", &fCopyright);
1493 	}
1494 	return fCopyright.String();
1495 }
1496 
1497 
1498 status_t
1499 AVFormatReader::Sniff(int32* _streamCount)
1500 {
1501 	TRACE("AVFormatReader::Sniff\n");
1502 
1503 	BPositionIO* source = dynamic_cast<BPositionIO*>(Source());
1504 	if (source == NULL) {
1505 		TRACE("  not a BPositionIO, but we need it to be one.\n");
1506 		return B_NOT_SUPPORTED;
1507 	}
1508 
1509 	Stream* stream = new(std::nothrow) Stream(source,
1510 		&fSourceLock);
1511 	if (stream == NULL) {
1512 		ERROR("AVFormatReader::Sniff() - failed to allocate Stream\n");
1513 		return B_NO_MEMORY;
1514 	}
1515 
1516 	ObjectDeleter<Stream> streamDeleter(stream);
1517 
1518 	status_t ret = stream->Open();
1519 	if (ret != B_OK) {
1520 		TRACE("  failed to detect stream: %s\n", strerror(ret));
1521 		return ret;
1522 	}
1523 
1524 	delete[] fStreams;
1525 	fStreams = NULL;
1526 
1527 	int32 streamCount = stream->CountStreams();
1528 	if (streamCount == 0) {
1529 		TRACE("  failed to detect any streams: %s\n", strerror(ret));
1530 		return B_ERROR;
1531 	}
1532 
1533 	fStreams = new(std::nothrow) Stream*[streamCount];
1534 	if (fStreams == NULL) {
1535 		ERROR("AVFormatReader::Sniff() - failed to allocate streams\n");
1536 		return B_NO_MEMORY;
1537 	}
1538 
1539 	memset(fStreams, 0, sizeof(Stream*) * streamCount);
1540 	fStreams[0] = stream;
1541 	streamDeleter.Detach();
1542 
1543 	#ifdef TRACE_AVFORMAT_READER
1544 	dump_format(const_cast<AVFormatContext*>(stream->Context()), 0, "", 0);
1545 	#endif
1546 
1547 	if (_streamCount != NULL)
1548 		*_streamCount = streamCount;
1549 
1550 	return B_OK;
1551 }
1552 
1553 
1554 void
1555 AVFormatReader::GetFileFormatInfo(media_file_format* mff)
1556 {
1557 	TRACE("AVFormatReader::GetFileFormatInfo\n");
1558 
1559 	if (fStreams == NULL)
1560 		return;
1561 
1562 	// The first cookie is always there!
1563 	const AVFormatContext* context = fStreams[0]->Context();
1564 
1565 	if (context == NULL || context->iformat == NULL) {
1566 		TRACE("  no AVFormatContext or AVInputFormat!\n");
1567 		return;
1568 	}
1569 
1570 	const media_file_format* format = demuxer_format_for(context->iformat);
1571 
1572 	mff->capabilities = media_file_format::B_READABLE
1573 		| media_file_format::B_KNOWS_ENCODED_VIDEO
1574 		| media_file_format::B_KNOWS_ENCODED_AUDIO
1575 		| media_file_format::B_IMPERFECTLY_SEEKABLE;
1576 
1577 	if (format != NULL) {
1578 		mff->family = format->family;
1579 	} else {
1580 		TRACE("  no DemuxerFormat for AVInputFormat!\n");
1581 		mff->family = B_MISC_FORMAT_FAMILY;
1582 	}
1583 
1584 	mff->version = 100;
1585 
1586 	if (format != NULL) {
1587 		strcpy(mff->mime_type, format->mime_type);
1588 	} else {
1589 		// TODO: Would be nice to be able to provide this from AVInputFormat,
1590 		// maybe by extending the FFmpeg code itself (all demuxers).
1591 		strcpy(mff->mime_type, "");
1592 	}
1593 
1594 	if (context->iformat->extensions != NULL)
1595 		strcpy(mff->file_extension, context->iformat->extensions);
1596 	else {
1597 		TRACE("  no file extensions for AVInputFormat.\n");
1598 		strcpy(mff->file_extension, "");
1599 	}
1600 
1601 	if (context->iformat->name != NULL)
1602 		strcpy(mff->short_name,  context->iformat->name);
1603 	else {
1604 		TRACE("  no short name for AVInputFormat.\n");
1605 		strcpy(mff->short_name, "");
1606 	}
1607 
1608 	if (context->iformat->long_name != NULL)
1609 		sprintf(mff->pretty_name, "%s (FFmpeg)", context->iformat->long_name);
1610 	else {
1611 		if (format != NULL)
1612 			sprintf(mff->pretty_name, "%s (FFmpeg)", format->pretty_name);
1613 		else
1614 			strcpy(mff->pretty_name, "Unknown (FFmpeg)");
1615 	}
1616 }
1617 
1618 
1619 status_t
1620 AVFormatReader::GetMetaData(BMessage* _data)
1621 {
1622 	// The first cookie is always there!
1623 	const AVFormatContext* context = fStreams[0]->Context();
1624 
1625 	if (context == NULL)
1626 		return B_NO_INIT;
1627 
1628 	avdictionary_to_message(context->metadata, _data);
1629 
1630 	// Add chapter info
1631 	for (unsigned i = 0; i < context->nb_chapters; i++) {
1632 		AVChapter* chapter = context->chapters[i];
1633 		BMessage chapterData;
1634 		chapterData.AddInt64("start", bigtime_t(1000000.0
1635 			* chapter->start * chapter->time_base.num
1636 			/ chapter->time_base.den + 0.5));
1637 		chapterData.AddInt64("end", bigtime_t(1000000.0
1638 			* chapter->end * chapter->time_base.num
1639 			/ chapter->time_base.den + 0.5));
1640 
1641 		avdictionary_to_message(chapter->metadata, &chapterData);
1642 		_data->AddMessage("be:chapter", &chapterData);
1643 	}
1644 
1645 	// Add program info
1646 	for (unsigned i = 0; i < context->nb_programs; i++) {
1647 		BMessage programData;
1648 		avdictionary_to_message(context->programs[i]->metadata, &programData);
1649 		_data->AddMessage("be:program", &programData);
1650 	}
1651 
1652 	return B_OK;
1653 }
1654 
1655 
1656 // #pragma mark -
1657 
1658 
1659 status_t
1660 AVFormatReader::AllocateCookie(int32 streamIndex, void** _cookie)
1661 {
1662 	TRACE("AVFormatReader::AllocateCookie(%ld)\n", streamIndex);
1663 
1664 	BAutolock _(fSourceLock);
1665 
1666 	if (fStreams == NULL)
1667 		return B_NO_INIT;
1668 
1669 	if (streamIndex < 0 || streamIndex >= fStreams[0]->CountStreams())
1670 		return B_BAD_INDEX;
1671 
1672 	if (_cookie == NULL)
1673 		return B_BAD_VALUE;
1674 
1675 	Stream* cookie = fStreams[streamIndex];
1676 	if (cookie == NULL) {
1677 		// Allocate the cookie
1678 		BPositionIO* source = dynamic_cast<BPositionIO*>(Source());
1679 		if (source == NULL) {
1680 			TRACE("  not a BPositionIO, but we need it to be one.\n");
1681 			return B_NOT_SUPPORTED;
1682 		}
1683 
1684 		cookie = new(std::nothrow) Stream(source, &fSourceLock);
1685 		if (cookie == NULL) {
1686 			ERROR("AVFormatReader::Sniff() - failed to allocate "
1687 				"Stream\n");
1688 			return B_NO_MEMORY;
1689 		}
1690 
1691 		status_t ret = cookie->Open();
1692 		if (ret != B_OK) {
1693 			TRACE("  stream failed to open: %s\n", strerror(ret));
1694 			delete cookie;
1695 			return ret;
1696 		}
1697 	}
1698 
1699 	status_t ret = cookie->Init(streamIndex);
1700 	if (ret != B_OK) {
1701 		TRACE("  stream failed to initialize: %s\n", strerror(ret));
1702 		// NOTE: Never delete the first stream!
1703 		if (streamIndex != 0)
1704 			delete cookie;
1705 		return ret;
1706 	}
1707 
1708 	fStreams[streamIndex] = cookie;
1709 	*_cookie = cookie;
1710 
1711 	return B_OK;
1712 }
1713 
1714 
1715 status_t
1716 AVFormatReader::FreeCookie(void *_cookie)
1717 {
1718 	BAutolock _(fSourceLock);
1719 
1720 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1721 
1722 	// NOTE: Never delete the first cookie!
1723 	if (cookie != NULL && cookie->VirtualIndex() != 0) {
1724 		if (fStreams != NULL)
1725 			fStreams[cookie->VirtualIndex()] = NULL;
1726 		delete cookie;
1727 	}
1728 
1729 	return B_OK;
1730 }
1731 
1732 
1733 // #pragma mark -
1734 
1735 
1736 status_t
1737 AVFormatReader::GetStreamInfo(void* _cookie, int64* frameCount,
1738 	bigtime_t* duration, media_format* format, const void** infoBuffer,
1739 	size_t* infoSize)
1740 {
1741 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1742 	return cookie->GetStreamInfo(frameCount, duration, format, infoBuffer,
1743 		infoSize);
1744 }
1745 
1746 
1747 status_t
1748 AVFormatReader::GetStreamMetaData(void* _cookie, BMessage* _data)
1749 {
1750 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1751 	return cookie->GetMetaData(_data);
1752 }
1753 
1754 
1755 status_t
1756 AVFormatReader::Seek(void* _cookie, uint32 seekTo, int64* frame,
1757 	bigtime_t* time)
1758 {
1759 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1760 	return cookie->Seek(seekTo, frame, time);
1761 }
1762 
1763 
1764 status_t
1765 AVFormatReader::FindKeyFrame(void* _cookie, uint32 flags, int64* frame,
1766 	bigtime_t* time)
1767 {
1768 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1769 	return cookie->FindKeyFrame(flags, frame, time);
1770 }
1771 
1772 
1773 status_t
1774 AVFormatReader::GetNextChunk(void* _cookie, const void** chunkBuffer,
1775 	size_t* chunkSize, media_header* mediaHeader)
1776 {
1777 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1778 	return cookie->GetNextChunk(chunkBuffer, chunkSize, mediaHeader);
1779 }
1780