xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVFormatReader.cpp (revision 5ac9b506412b11afb993bb52d161efe7666958a5)
1 /*
2  * Copyright 2009-2010, Stephan Aßmus <superstippi@gmx.de>
3  * Copyright 2014, Colin Günther <coling@gmx.de>
4  * Copyright 2018, Dario Casalinuovo
5  * All rights reserved. Distributed under the terms of the GNU L-GPL license.
6  */
7 
8 #include "AVFormatReader.h"
9 
10 #include <stdio.h>
11 #include <string.h>
12 #include <stdlib.h>
13 
14 #include <new>
15 
16 #include <AutoDeleter.h>
17 #include <Autolock.h>
18 #include <ByteOrder.h>
19 #include <MediaIO.h>
20 #include <MediaDefs.h>
21 #include <MediaFormats.h>
22 #include <MimeType.h>
23 
24 extern "C" {
25 	#include "avcodec.h"
26 	#include "avformat.h"
27 }
28 
29 #include "DemuxerTable.h"
30 #include "gfx_util.h"
31 #include "Utilities.h"
32 
33 
34 //#define TRACE_AVFORMAT_READER
35 #ifdef TRACE_AVFORMAT_READER
36 #	define TRACE printf
37 #	define TRACE_IO(a...)
38 #	define TRACE_SEEK(a...) printf(a)
39 #	define TRACE_FIND(a...)
40 #	define TRACE_PACKET(a...)
41 #else
42 #	define TRACE(a...)
43 #	define TRACE_IO(a...)
44 #	define TRACE_SEEK(a...)
45 #	define TRACE_FIND(a...)
46 #	define TRACE_PACKET(a...)
47 #endif
48 
49 #define ERROR(a...) fprintf(stderr, a)
50 
51 
52 static uint32
53 avformat_to_beos_byte_order(AVSampleFormat format)
54 {
55 	// TODO: Huh?
56 	return B_MEDIA_HOST_ENDIAN;
57 }
58 
59 
60 static void
61 avdictionary_to_message(AVDictionary* dictionary, BMetaData* data)
62 {
63 	if (dictionary == NULL)
64 		return;
65 
66 	AVDictionaryEntry* entry = NULL;
67 	while ((entry = av_dict_get(dictionary, "", entry,
68 		AV_DICT_IGNORE_SUFFIX))) {
69 		// convert entry keys into something more meaningful using the names from
70 		// id3v2.c
71 		if (strcmp(entry->key, "TALB") == 0 || strcmp(entry->key, "TAL") == 0)
72 			data->SetString(kAlbum, entry->value);
73 		else if (strcmp(entry->key, "TCOM") == 0)
74 			data->SetString(kComposer, entry->value);
75 		else if (strcmp(entry->key, "TCON") == 0 || strcmp(entry->key, "TCO") == 0)
76 			data->SetString(kGenre, entry->value);
77 		else if (strcmp(entry->key, "TCOP") == 0)
78 			data->SetString(kCopyright, entry->value);
79 		else if (strcmp(entry->key, "TDRL") == 0 || strcmp(entry->key, "TDRC") == 0)
80 			data->SetString(kDate, entry->value);
81 		else if (strcmp(entry->key, "TENC") == 0 || strcmp(entry->key, "TEN") == 0)
82 			data->SetString(kEncodedBy, entry->value);
83 		else if (strcmp(entry->key, "TIT2") == 0 || strcmp(entry->key, "TT2") == 0)
84 			data->SetString(kTitle, entry->value);
85 		else if (strcmp(entry->key, "TLAN") == 0)
86 			data->SetString(kLanguage, entry->value);
87 		else if (strcmp(entry->key, "TPE1") == 0 || strcmp(entry->key, "TP1") == 0)
88 			data->SetString(kArtist, entry->value);
89 		else if (strcmp(entry->key, "TPE2") == 0 || strcmp(entry->key, "TP2") == 0)
90 			data->SetString(kAlbumArtist, entry->value);
91 		else if (strcmp(entry->key, "TPE3") == 0 || strcmp(entry->key, "TP3") == 0)
92 			data->SetString(kPerformer, entry->value);
93 		else if (strcmp(entry->key, "TPOS") == 0)
94 			data->SetString(kDisc, entry->value);
95 		else if (strcmp(entry->key, "TPUB") == 0)
96 			data->SetString(kPublisher, entry->value);
97 		else if (strcmp(entry->key, "TRCK") == 0 || strcmp(entry->key, "TRK") == 0)
98 			data->SetString(kTrack, entry->value);
99 		else if (strcmp(entry->key, "TSOA") == 0)
100 			data->SetString("album-sort", entry->value);
101 		else if (strcmp(entry->key, "TSOP") == 0)
102 			data->SetString("artist-sort", entry->value);
103 		else if (strcmp(entry->key, "TSOT") == 0)
104 			data->SetString("title-sort", entry->value);
105 		else if (strcmp(entry->key, "TSSE") == 0)
106 			data->SetString(kEncoder, entry->value);
107 		else if (strcmp(entry->key, "TYER") == 0)
108 			data->SetString(kYear, entry->value);
109 		else
110 			data->SetString(entry->key, entry->value);
111 	}
112 }
113 
114 
115 // #pragma mark - StreamBase
116 
117 
118 class StreamBase {
119 public:
120 								StreamBase(BMediaIO* source,
121 									BLocker* sourceLock, BLocker* streamLock);
122 	virtual						~StreamBase();
123 
124 	// Init an indivual AVFormatContext
125 			status_t			Open();
126 
127 	// Setup this stream to point to the AVStream at the given streamIndex.
128 	virtual	status_t			Init(int32 streamIndex);
129 
130 	inline	const AVFormatContext* Context() const
131 									{ return fContext; }
132 			int32				Index() const;
133 			int32				CountStreams() const;
134 			int32				StreamIndexFor(int32 virtualIndex) const;
135 	inline	int32				VirtualIndex() const
136 									{ return fVirtualIndex; }
137 
138 			double				FrameRate() const;
139 			bigtime_t			Duration() const;
140 
141 	virtual	status_t			Seek(uint32 flags, int64* frame,
142 									bigtime_t* time);
143 
144 			status_t			GetNextChunk(const void** chunkBuffer,
145 									size_t* chunkSize,
146 									media_header* mediaHeader);
147 
148 protected:
149 	// I/O hooks for libavformat, cookie will be a Stream instance.
150 	// Since multiple StreamCookies use the same BMediaIO source, they
151 	// maintain the position individually, and may need to seek the source
152 	// if it does not match anymore in _Read().
153 	static	int					_Read(void* cookie, uint8* buffer,
154 									int bufferSize);
155 	static	off_t				_Seek(void* cookie, off_t offset, int whence);
156 
157 			status_t			_NextPacket(bool reuse);
158 
159 			int64_t				_ConvertToStreamTimeBase(bigtime_t time) const;
160 			bigtime_t			_ConvertFromStreamTimeBase(int64_t time) const;
161 
162 protected:
163 			BMediaIO*			fSource;
164 			off_t				fPosition;
165 			// Since different threads may read from the source,
166 			// we need to protect the file position and I/O by a lock.
167 			BLocker*			fSourceLock;
168 
169 			BLocker*			fStreamLock;
170 
171 			AVFormatContext*	fContext;
172 			AVStream*			fStream;
173 			int32				fVirtualIndex;
174 
175 			media_format		fFormat;
176 
177 			AVIOContext*		fIOContext;
178 
179 			AVPacket			fPacket;
180 			bool				fReusePacket;
181 
182 			bool				fSeekByBytes;
183 			bool				fStreamBuildsIndexWhileReading;
184 };
185 
186 
187 StreamBase::StreamBase(BMediaIO* source, BLocker* sourceLock,
188 		BLocker* streamLock)
189 	:
190 	fSource(source),
191 	fPosition(0),
192 	fSourceLock(sourceLock),
193 
194 	fStreamLock(streamLock),
195 
196 	fContext(NULL),
197 	fStream(NULL),
198 	fVirtualIndex(-1),
199 	fIOContext(NULL),
200 
201 	fReusePacket(false),
202 
203 	fSeekByBytes(false),
204 	fStreamBuildsIndexWhileReading(false)
205 {
206 	// NOTE: Don't use streamLock here, it may not yet be initialized!
207 
208 	av_new_packet(&fPacket, 0);
209 	memset(&fFormat, 0, sizeof(media_format));
210 }
211 
212 
213 StreamBase::~StreamBase()
214 {
215 	avformat_close_input(&fContext);
216 	av_free_packet(&fPacket);
217 	if (fIOContext != NULL)
218 		av_free(fIOContext->buffer);
219 	av_free(fIOContext);
220 }
221 
222 
223 status_t
224 StreamBase::Open()
225 {
226 	BAutolock _(fStreamLock);
227 
228 	// Init probing data
229 	size_t bufferSize = 32768;
230 	uint8* buffer = static_cast<uint8*>(av_malloc(bufferSize));
231 	if (buffer == NULL)
232 		return B_NO_MEMORY;
233 
234 	// First try to identify the file using the MIME database, as ffmpeg
235 	// (especially old versions) is not very good at this and relies on us
236 	// to give it the file extension as an hint.
237 	// For this we need some valid data in the buffer, the first 512 bytes
238 	// should do because our MIME sniffing never uses more.
239 	const char* extension = NULL;
240 	BMessage message;
241 	if (fSource->Read(buffer, 512) == 512) {
242 		BMimeType type;
243 		if (BMimeType::GuessMimeType(buffer, 512, &type) == B_OK) {
244 			if (type.GetFileExtensions(&message) == B_OK) {
245 				extension = message.FindString("extensions");
246 			}
247 		}
248 	}
249 
250 	// If the format is not identified, try Amiga MOD-files, because these do
251 	// not currently have a sniffing rule.
252 	if (extension == NULL)
253 		extension = ".mod";
254 
255 	// Allocate I/O context with buffer and hook functions, pass ourself as
256 	// cookie.
257 	memset(buffer, 0, bufferSize);
258 	fIOContext = avio_alloc_context(buffer, bufferSize, 0, this, _Read, 0,
259 		_Seek);
260 	if (fIOContext == NULL) {
261 		TRACE("StreamBase::Open() - avio_alloc_context() failed!\n");
262 		av_free(buffer);
263 		return B_ERROR;
264 	}
265 
266 	fContext = avformat_alloc_context();
267 	fContext->pb = fIOContext;
268 
269 	// Allocate our context and probe the input format
270 	if (avformat_open_input(&fContext, extension, NULL, NULL) < 0) {
271 		TRACE("StreamBase::Open() - avformat_open_input() failed!\n");
272 		// avformat_open_input() frees the context in case of failure
273 		fContext = NULL;
274 		av_free(fIOContext);
275 		fIOContext = NULL;
276 		return B_NOT_SUPPORTED;
277 	}
278 
279 	TRACE("StreamBase::Open() - "
280 		"avformat_open_input(): %s\n", fContext->iformat->name);
281 	TRACE("  flags:%s%s%s%s%s\n",
282 		(fContext->iformat->flags & AVFMT_GLOBALHEADER) ? " AVFMT_GLOBALHEADER" : "",
283 		(fContext->iformat->flags & AVFMT_NOTIMESTAMPS) ? " AVFMT_NOTIMESTAMPS" : "",
284 		(fContext->iformat->flags & AVFMT_GENERIC_INDEX) ? " AVFMT_GENERIC_INDEX" : "",
285 		(fContext->iformat->flags & AVFMT_TS_DISCONT) ? " AVFMT_TS_DISCONT" : "",
286 		(fContext->iformat->flags & AVFMT_VARIABLE_FPS) ? " AVFMT_VARIABLE_FPS" : ""
287 	);
288 
289 
290 	// Retrieve stream information
291 	if (avformat_find_stream_info(fContext, NULL) < 0) {
292 		TRACE("StreamBase::Open() - avformat_find_stream_info() failed!\n");
293 		return B_NOT_SUPPORTED;
294 	}
295 
296 	fSeekByBytes = (fContext->iformat->flags & AVFMT_TS_DISCONT) != 0;
297 	fStreamBuildsIndexWhileReading
298 		= (fContext->iformat->flags & AVFMT_GENERIC_INDEX) != 0
299 			|| fSeekByBytes;
300 
301 	TRACE("StreamBase::Open() - "
302 		"av_find_stream_info() success! Seeking by bytes: %d\n",
303 		fSeekByBytes);
304 
305 	return B_OK;
306 }
307 
308 
309 status_t
310 StreamBase::Init(int32 virtualIndex)
311 {
312 	BAutolock _(fStreamLock);
313 
314 	TRACE("StreamBase::Init(%ld)\n", virtualIndex);
315 
316 	if (fContext == NULL)
317 		return B_NO_INIT;
318 
319 	int32 streamIndex = StreamIndexFor(virtualIndex);
320 	if (streamIndex < 0) {
321 		TRACE("  bad stream index!\n");
322 		return B_BAD_INDEX;
323 	}
324 
325 	TRACE("  context stream index: %ld\n", streamIndex);
326 
327 	// We need to remember the virtual index so that
328 	// AVFormatReader::FreeCookie() can clear the correct stream entry.
329 	fVirtualIndex = virtualIndex;
330 
331 	// Make us point to the AVStream at streamIndex
332 	fStream = fContext->streams[streamIndex];
333 
334 // NOTE: Discarding other streams works for most, but not all containers,
335 // for example it does not work for the ASF demuxer. Since I don't know what
336 // other demuxer it breaks, let's just keep reading packets for unwanted
337 // streams, it just makes the _GetNextPacket() function slightly less
338 // efficient.
339 //	// Discard all other streams
340 //	for (unsigned i = 0; i < fContext->nb_streams; i++) {
341 //		if (i != (unsigned)streamIndex)
342 //			fContext->streams[i]->discard = AVDISCARD_ALL;
343 //	}
344 
345 	return B_OK;
346 }
347 
348 
349 int32
350 StreamBase::Index() const
351 {
352 	if (fStream != NULL)
353 		return fStream->index;
354 	return -1;
355 }
356 
357 
358 int32
359 StreamBase::CountStreams() const
360 {
361 	// Figure out the stream count. If the context has "AVPrograms", use
362 	// the first program (for now).
363 	// TODO: To support "programs" properly, the BMediaFile/Track API should
364 	// be extended accordingly. I guess programs are like TV channels in the
365 	// same satilite transport stream. Maybe call them "TrackGroups".
366 	if (fContext->nb_programs > 0) {
367 		// See libavformat/utils.c:dump_format()
368 		return fContext->programs[0]->nb_stream_indexes;
369 	}
370 	return fContext->nb_streams;
371 }
372 
373 
374 int32
375 StreamBase::StreamIndexFor(int32 virtualIndex) const
376 {
377 	// NOTE: See CountStreams()
378 	if (fContext->nb_programs > 0) {
379 		const AVProgram* program = fContext->programs[0];
380 		if (virtualIndex >= 0
381 			&& virtualIndex < (int32)program->nb_stream_indexes) {
382 			return program->stream_index[virtualIndex];
383 		}
384 	} else {
385 		if (virtualIndex >= 0 && virtualIndex < (int32)fContext->nb_streams)
386 			return virtualIndex;
387 	}
388 	return -1;
389 }
390 
391 
392 double
393 StreamBase::FrameRate() const
394 {
395 	// TODO: Find a way to always calculate a correct frame rate...
396 	double frameRate = 1.0;
397 	switch (fStream->codecpar->codec_type) {
398 		case AVMEDIA_TYPE_AUDIO:
399 			frameRate = (double)fStream->codecpar->sample_rate;
400 			break;
401 		case AVMEDIA_TYPE_VIDEO:
402 			if (fStream->avg_frame_rate.den && fStream->avg_frame_rate.num)
403 				frameRate = av_q2d(fStream->avg_frame_rate);
404 			else if (fStream->r_frame_rate.den && fStream->r_frame_rate.num)
405 				frameRate = av_q2d(fStream->r_frame_rate);
406 			else if (fStream->time_base.den && fStream->time_base.num)
407 				frameRate = 1 / av_q2d(fStream->time_base);
408 
409 			// TODO: Fix up interlaced video for real
410 			if (frameRate == 50.0f)
411 				frameRate = 25.0f;
412 			break;
413 		default:
414 			break;
415 	}
416 	if (frameRate <= 0.0)
417 		frameRate = 1.0;
418 	return frameRate;
419 }
420 
421 
422 bigtime_t
423 StreamBase::Duration() const
424 {
425 	// TODO: This is not working correctly for all stream types...
426 	// It seems that the calculations here are correct, because they work
427 	// for a couple of streams and are in line with the documentation, but
428 	// unfortunately, libavformat itself seems to set the time_base and
429 	// duration wrongly sometimes. :-(
430 
431 	int32 flags;
432 	fSource->GetFlags(&flags);
433 
434 	// "Mutable Size" (ie http streams) means we can't realistically compute
435 	// a duration. So don't let ffmpeg giva (wrong) estimate in this case.
436 	if ((flags & B_MEDIA_MUTABLE_SIZE) != 0)
437 		return 0;
438 
439 	if ((int64)fStream->duration != AV_NOPTS_VALUE)
440 		return _ConvertFromStreamTimeBase(fStream->duration);
441 	else if ((int64)fContext->duration != AV_NOPTS_VALUE)
442 		return (bigtime_t)fContext->duration;
443 
444 	return 0;
445 }
446 
447 
448 status_t
449 StreamBase::Seek(uint32 flags, int64* frame, bigtime_t* time)
450 {
451 	BAutolock _(fStreamLock);
452 
453 	if (fContext == NULL || fStream == NULL)
454 		return B_NO_INIT;
455 
456 	TRACE_SEEK("StreamBase::Seek(%ld,%s%s%s%s, %lld, "
457 		"%lld)\n", VirtualIndex(),
458 		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
459 		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
460 		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
461 			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
462 		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
463 			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
464 		*frame, *time);
465 
466 	double frameRate = FrameRate();
467 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
468 		// Seeking is always based on time, initialize it when client seeks
469 		// based on frame.
470 		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);
471 	}
472 
473 	int64_t timeStamp = *time;
474 
475 	int searchFlags = AVSEEK_FLAG_BACKWARD;
476 	if ((flags & B_MEDIA_SEEK_CLOSEST_FORWARD) != 0)
477 		searchFlags = 0;
478 
479 	if (fSeekByBytes) {
480 		searchFlags |= AVSEEK_FLAG_BYTE;
481 
482 		BAutolock _(fSourceLock);
483 		int64_t fileSize;
484 
485 		if (fSource->GetSize(&fileSize) != B_OK)
486 			return B_NOT_SUPPORTED;
487 
488 		int64_t duration = Duration();
489 		if (duration == 0)
490 			return B_NOT_SUPPORTED;
491 
492 		timeStamp = int64_t(fileSize * ((double)timeStamp / duration));
493 		if ((flags & B_MEDIA_SEEK_CLOSEST_BACKWARD) != 0) {
494 			timeStamp -= 65536;
495 			if (timeStamp < 0)
496 				timeStamp = 0;
497 		}
498 
499 		bool seekAgain = true;
500 		bool seekForward = true;
501 		bigtime_t lastFoundTime = -1;
502 		int64_t closestTimeStampBackwards = -1;
503 		while (seekAgain) {
504 			if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp,
505 				INT64_MAX, searchFlags) < 0) {
506 				TRACE("  avformat_seek_file() (by bytes) failed.\n");
507 				return B_ERROR;
508 			}
509 			seekAgain = false;
510 
511 			// Our last packet is toast in any case. Read the next one so we
512 			// know where we really seeked.
513 			fReusePacket = false;
514 			if (_NextPacket(true) == B_OK) {
515 				while (fPacket.pts == AV_NOPTS_VALUE) {
516 					fReusePacket = false;
517 					if (_NextPacket(true) != B_OK)
518 						return B_ERROR;
519 				}
520 				if (fPacket.pos >= 0)
521 					timeStamp = fPacket.pos;
522 				bigtime_t foundTime
523 					= _ConvertFromStreamTimeBase(fPacket.pts);
524 				if (foundTime != lastFoundTime) {
525 					lastFoundTime = foundTime;
526 					if (foundTime > *time) {
527 						if (closestTimeStampBackwards >= 0) {
528 							timeStamp = closestTimeStampBackwards;
529 							seekAgain = true;
530 							seekForward = false;
531 							continue;
532 						}
533 						int64_t diff = int64_t(fileSize
534 							* ((double)(foundTime - *time) / (2 * duration)));
535 						if (diff < 8192)
536 							break;
537 						timeStamp -= diff;
538 						TRACE_SEEK("  need to seek back (%lld) (time: %.2f "
539 							"-> %.2f)\n", timeStamp, *time / 1000000.0,
540 							foundTime / 1000000.0);
541 						if (timeStamp < 0)
542 							foundTime = 0;
543 						else {
544 							seekAgain = true;
545 							continue;
546 						}
547 					} else if (seekForward && foundTime < *time - 100000) {
548 						closestTimeStampBackwards = timeStamp;
549 						int64_t diff = int64_t(fileSize
550 							* ((double)(*time - foundTime) / (2 * duration)));
551 						if (diff < 8192)
552 							break;
553 						timeStamp += diff;
554 						TRACE_SEEK("  need to seek forward (%lld) (time: "
555 							"%.2f -> %.2f)\n", timeStamp, *time / 1000000.0,
556 							foundTime / 1000000.0);
557 						if (timeStamp > duration)
558 							foundTime = duration;
559 						else {
560 							seekAgain = true;
561 							continue;
562 						}
563 					}
564 				}
565 				TRACE_SEEK("  found time: %lld -> %lld (%.2f)\n", *time,
566 					foundTime, foundTime / 1000000.0);
567 				*time = foundTime;
568 				*frame = (uint64)(*time * frameRate / 1000000LL + 0.5);
569 				TRACE_SEEK("  seeked frame: %lld\n", *frame);
570 			} else {
571 				TRACE_SEEK("  _NextPacket() failed!\n");
572 				return B_ERROR;
573 			}
574 		}
575 	} else {
576 		// We may not get a PTS from the next packet after seeking, so
577 		// we try to get an expected time from the index.
578 		int64_t streamTimeStamp = _ConvertToStreamTimeBase(*time);
579 		int index = av_index_search_timestamp(fStream, streamTimeStamp,
580 			searchFlags);
581 		if (index < 0) {
582 			TRACE("  av_index_search_timestamp() failed\n");
583 		} else {
584 			if (index > 0) {
585 				const AVIndexEntry& entry = fStream->index_entries[index];
586 				streamTimeStamp = entry.timestamp;
587 			} else {
588 				// Some demuxers use the first index entry to store some
589 				// other information, like the total playing time for example.
590 				// Assume the timeStamp of the first entry is alays 0.
591 				// TODO: Handle start-time offset?
592 				streamTimeStamp = 0;
593 			}
594 			bigtime_t foundTime = _ConvertFromStreamTimeBase(streamTimeStamp);
595 			bigtime_t timeDiff = foundTime > *time
596 				? foundTime - *time : *time - foundTime;
597 
598 			if (timeDiff > 1000000
599 				&& (fStreamBuildsIndexWhileReading
600 					|| index == fStream->nb_index_entries - 1)) {
601 				// If the stream is building the index on the fly while parsing
602 				// it, we only have entries in the index for positions already
603 				// decoded, i.e. we cannot seek into the future. In that case,
604 				// just assume that we can seek where we want and leave
605 				// time/frame unmodified. Since successfully seeking one time
606 				// will generate index entries for the seeked to position, we
607 				// need to remember this in fStreamBuildsIndexWhileReading,
608 				// since when seeking back there will be later index entries,
609 				// but we still want to ignore the found entry.
610 				fStreamBuildsIndexWhileReading = true;
611 				TRACE_SEEK("  Not trusting generic index entry. "
612 					"(Current count: %d)\n", fStream->nb_index_entries);
613 			} else {
614 				// If we found a reasonably time, write it into *time.
615 				// After seeking, we will try to read the sought time from
616 				// the next packet. If the packet has no PTS value, we may
617 				// still have a more accurate time from the index lookup.
618 				*time = foundTime;
619 			}
620 		}
621 
622 		if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp, INT64_MAX,
623 				searchFlags) < 0) {
624 			TRACE("  avformat_seek_file() failed.\n");
625 			// Try to fall back to av_seek_frame()
626 			timeStamp = _ConvertToStreamTimeBase(timeStamp);
627 			if (av_seek_frame(fContext, fStream->index, timeStamp,
628 				searchFlags) < 0) {
629 				TRACE("  avformat_seek_frame() failed as well.\n");
630 				// Fall back to seeking to the beginning by bytes
631 				timeStamp = 0;
632 				if (av_seek_frame(fContext, fStream->index, timeStamp,
633 						AVSEEK_FLAG_BYTE) < 0) {
634 					TRACE("  avformat_seek_frame() by bytes failed as "
635 						"well.\n");
636 					// Do not propagate error in any case. We fail if we can't
637 					// read another packet.
638 				} else
639 					*time = 0;
640 			}
641 		}
642 
643 		// Our last packet is toast in any case. Read the next one so
644 		// we know where we really sought.
645 		bigtime_t foundTime = *time;
646 
647 		fReusePacket = false;
648 		if (_NextPacket(true) == B_OK) {
649 			if (fPacket.pts != AV_NOPTS_VALUE)
650 				foundTime = _ConvertFromStreamTimeBase(fPacket.pts);
651 			else
652 				TRACE_SEEK("  no PTS in packet after seeking\n");
653 		} else
654 			TRACE_SEEK("  _NextPacket() failed!\n");
655 
656 		*time = foundTime;
657 		TRACE_SEEK("  sought time: %.2fs\n", *time / 1000000.0);
658 		*frame = (uint64)(*time * frameRate / 1000000.0 + 0.5);
659 		TRACE_SEEK("  sought frame: %lld\n", *frame);
660 	}
661 
662 	return B_OK;
663 }
664 
665 
666 status_t
667 StreamBase::GetNextChunk(const void** chunkBuffer,
668 	size_t* chunkSize, media_header* mediaHeader)
669 {
670 	BAutolock _(fStreamLock);
671 
672 	TRACE_PACKET("StreamBase::GetNextChunk()\n");
673 
674 	// Get the last stream DTS before reading the next packet, since
675 	// then it points to that one.
676 	int64 lastStreamDTS = fStream->cur_dts;
677 
678 	status_t ret = _NextPacket(false);
679 	if (ret != B_OK) {
680 		*chunkBuffer = NULL;
681 		*chunkSize = 0;
682 		return ret;
683 	}
684 
685 	// According to libavformat documentation, fPacket is valid until the
686 	// next call to av_read_frame(). This is what we want and we can share
687 	// the memory with the least overhead.
688 	*chunkBuffer = fPacket.data;
689 	*chunkSize = fPacket.size;
690 
691 	if (mediaHeader != NULL) {
692 		mediaHeader->type = fFormat.type;
693 		mediaHeader->buffer = 0;
694 		mediaHeader->destination = -1;
695 		mediaHeader->time_source = -1;
696 		mediaHeader->size_used = fPacket.size;
697 
698 		// FFmpeg recommends to use the decoding time stamps as primary source
699 		// for presentation time stamps, especially for video formats that are
700 		// using frame reordering. More over this way it is ensured that the
701 		// returned start times are ordered in a monotonically increasing time
702 		// series (even for videos that contain B-frames).
703 		// \see http://git.videolan.org/?p=ffmpeg.git;a=blob;f=libavformat/avformat.h;h=1e8a6294890d580cd9ebc684eaf4ce57c8413bd8;hb=9153b33a742c4e2a85ff6230aea0e75f5a8b26c2#l1623
704 		bigtime_t presentationTimeStamp;
705 		if (fPacket.dts != AV_NOPTS_VALUE)
706 			presentationTimeStamp = fPacket.dts;
707 		else if (fPacket.pts != AV_NOPTS_VALUE)
708 			presentationTimeStamp = fPacket.pts;
709 		else
710 			presentationTimeStamp = lastStreamDTS;
711 
712 		mediaHeader->start_time	= _ConvertFromStreamTimeBase(presentationTimeStamp);
713 		mediaHeader->file_pos = fPacket.pos;
714 		mediaHeader->data_offset = 0;
715 		switch (mediaHeader->type) {
716 			case B_MEDIA_RAW_AUDIO:
717 				break;
718 			case B_MEDIA_ENCODED_AUDIO:
719 				mediaHeader->u.encoded_audio.buffer_flags
720 					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
721 				break;
722 			case B_MEDIA_RAW_VIDEO:
723 				mediaHeader->u.raw_video.line_count
724 					= fFormat.u.raw_video.display.line_count;
725 				break;
726 			case B_MEDIA_ENCODED_VIDEO:
727 				mediaHeader->u.encoded_video.field_flags
728 					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
729 				mediaHeader->u.encoded_video.line_count
730 					= fFormat.u.encoded_video.output.display.line_count;
731 				break;
732 			default:
733 				break;
734 		}
735 	}
736 
737 //	static bigtime_t pts[2];
738 //	static bigtime_t lastPrintTime = system_time();
739 //	static BLocker printLock;
740 //	if (fStream->index < 2) {
741 //		if (fPacket.pts != AV_NOPTS_VALUE)
742 //			pts[fStream->index] = _ConvertFromStreamTimeBase(fPacket.pts);
743 //		printLock.Lock();
744 //		bigtime_t now = system_time();
745 //		if (now - lastPrintTime > 1000000) {
746 //			printf("PTS: %.4f/%.4f, diff: %.4f\r", pts[0] / 1000000.0,
747 //				pts[1] / 1000000.0, (pts[0] - pts[1]) / 1000000.0);
748 //			fflush(stdout);
749 //			lastPrintTime = now;
750 //		}
751 //		printLock.Unlock();
752 //	}
753 
754 	return B_OK;
755 }
756 
757 
758 // #pragma mark -
759 
760 
761 /*static*/ int
762 StreamBase::_Read(void* cookie, uint8* buffer, int bufferSize)
763 {
764 	StreamBase* stream = reinterpret_cast<StreamBase*>(cookie);
765 
766 	BAutolock _(stream->fSourceLock);
767 
768 	TRACE_IO("StreamBase::_Read(%p, %p, %d) position: %lld\n",
769 		cookie, buffer, bufferSize, stream->fPosition);
770 
771 	if (stream->fPosition != stream->fSource->Position()) {
772 		TRACE_IO("StreamBase::_Read fSource position: %lld\n",
773 			stream->fSource->Position());
774 
775 		off_t position
776 			= stream->fSource->Seek(stream->fPosition, SEEK_SET);
777 		if (position != stream->fPosition)
778 			return -1;
779 	}
780 
781 	ssize_t read = stream->fSource->Read(buffer, bufferSize);
782 	if (read > 0)
783 		stream->fPosition += read;
784 
785 	TRACE_IO("  read: %ld\n", read);
786 	return (int)read;
787 
788 }
789 
790 
791 /*static*/ off_t
792 StreamBase::_Seek(void* cookie, off_t offset, int whence)
793 {
794 	TRACE_IO("StreamBase::_Seek(%p, %lld, %d)\n",
795 		cookie, offset, whence);
796 
797 	StreamBase* stream = reinterpret_cast<StreamBase*>(cookie);
798 
799 	BAutolock _(stream->fSourceLock);
800 
801 	// Support for special file size retrieval API without seeking
802 	// anywhere:
803 	if (whence == AVSEEK_SIZE) {
804 		off_t size;
805 		if (stream->fSource->GetSize(&size) == B_OK)
806 			return size;
807 		return -1;
808 	}
809 
810 	// If not requested to seek to an absolute position, we need to
811 	// confirm that the stream is currently at the position that we
812 	// think it is.
813 	if (whence != SEEK_SET
814 		&& stream->fPosition != stream->fSource->Position()) {
815 		off_t position
816 			= stream->fSource->Seek(stream->fPosition, SEEK_SET);
817 		if (position != stream->fPosition)
818 			return -1;
819 	}
820 
821 	off_t position = stream->fSource->Seek(offset, whence);
822 	TRACE_IO("  position: %lld\n", position);
823 	if (position < 0)
824 		return -1;
825 
826 	stream->fPosition = position;
827 
828 	return position;
829 }
830 
831 
832 status_t
833 StreamBase::_NextPacket(bool reuse)
834 {
835 	TRACE_PACKET("StreamBase::_NextPacket(%d)\n", reuse);
836 
837 	if (fReusePacket) {
838 		// The last packet was marked for reuse, so we keep using it.
839 		TRACE_PACKET("  re-using last packet\n");
840 		fReusePacket = reuse;
841 		return B_OK;
842 	}
843 
844 	av_free_packet(&fPacket);
845 
846 	while (true) {
847 		if (av_read_frame(fContext, &fPacket) < 0) {
848 			// NOTE: Even though we may get the error for a different stream,
849 			// av_read_frame() is not going to be successful from here on, so
850 			// it doesn't matter
851 			fReusePacket = false;
852 			return B_LAST_BUFFER_ERROR;
853 		}
854 
855 		if (fPacket.stream_index == Index())
856 			break;
857 
858 		// This is a packet from another stream, ignore it.
859 		av_free_packet(&fPacket);
860 	}
861 
862 	// Mark this packet with the new reuse flag.
863 	fReusePacket = reuse;
864 	return B_OK;
865 }
866 
867 
868 int64_t
869 StreamBase::_ConvertToStreamTimeBase(bigtime_t time) const
870 {
871 	int64 timeStamp = int64_t((double)time * fStream->time_base.den
872 		/ (1000000.0 * fStream->time_base.num) + 0.5);
873 	if (fStream->start_time != AV_NOPTS_VALUE)
874 		timeStamp += fStream->start_time;
875 	return timeStamp;
876 }
877 
878 
879 bigtime_t
880 StreamBase::_ConvertFromStreamTimeBase(int64_t time) const
881 {
882 	if (fStream->start_time != AV_NOPTS_VALUE)
883 		time -= fStream->start_time;
884 
885 	return bigtime_t(1000000.0 * time * fStream->time_base.num
886 		/ fStream->time_base.den + 0.5);
887 }
888 
889 
890 // #pragma mark - AVFormatReader::Stream
891 
892 
893 class AVFormatReader::Stream : public StreamBase {
894 public:
895 								Stream(BMediaIO* source,
896 									BLocker* streamLock);
897 	virtual						~Stream();
898 
899 	// Setup this stream to point to the AVStream at the given streamIndex.
900 	// This will also initialize the media_format.
901 	virtual	status_t			Init(int32 streamIndex);
902 
903 			status_t			GetMetaData(BMetaData* data);
904 
905 	// Support for AVFormatReader
906 			status_t			GetStreamInfo(int64* frameCount,
907 									bigtime_t* duration, media_format* format,
908 									const void** infoBuffer,
909 									size_t* infoSize) const;
910 
911 			status_t			FindKeyFrame(uint32 flags, int64* frame,
912 									bigtime_t* time) const;
913 	virtual	status_t			Seek(uint32 flags, int64* frame,
914 									bigtime_t* time);
915 
916 private:
917 	mutable	BLocker				fLock;
918 
919 			struct KeyframeInfo {
920 				bigtime_t		requestedTime;
921 				int64			requestedFrame;
922 				bigtime_t		reportedTime;
923 				int64			reportedFrame;
924 				uint32			seekFlags;
925 			};
926 	mutable	KeyframeInfo		fLastReportedKeyframe;
927 	mutable	StreamBase*			fGhostStream;
928 };
929 
930 
931 
932 AVFormatReader::Stream::Stream(BMediaIO* source, BLocker* streamLock)
933 	:
934 	StreamBase(source, streamLock, &fLock),
935 	fLock("stream lock"),
936 	fGhostStream(NULL)
937 {
938 	fLastReportedKeyframe.requestedTime = 0;
939 	fLastReportedKeyframe.requestedFrame = 0;
940 	fLastReportedKeyframe.reportedTime = 0;
941 	fLastReportedKeyframe.reportedFrame = 0;
942 }
943 
944 
945 AVFormatReader::Stream::~Stream()
946 {
947 	delete fGhostStream;
948 }
949 
950 
951 status_t
952 AVFormatReader::Stream::Init(int32 virtualIndex)
953 {
954 	TRACE("AVFormatReader::Stream::Init(%ld)\n", virtualIndex);
955 
956 	status_t ret = StreamBase::Init(virtualIndex);
957 	if (ret != B_OK)
958 		return ret;
959 
960 	// Get a pointer to the AVCodecPaarameters for the stream at streamIndex.
961 	AVCodecParameters* codecParams = fStream->codecpar;
962 
963 	// initialize the media_format for this stream
964 	media_format* format = &fFormat;
965 	memset(format, 0, sizeof(media_format));
966 
967 	media_format_description description;
968 
969 	// Set format family and type depending on codec_type of the stream.
970 	switch (codecParams->codec_type) {
971 		case AVMEDIA_TYPE_AUDIO:
972 			if ((codecParams->codec_id >= AV_CODEC_ID_PCM_S16LE)
973 				&& (codecParams->codec_id <= AV_CODEC_ID_PCM_U8)) {
974 				TRACE("  raw audio\n");
975 				format->type = B_MEDIA_RAW_AUDIO;
976 				description.family = B_ANY_FORMAT_FAMILY;
977 				// This will then apparently be handled by the (built into
978 				// BMediaTrack) RawDecoder.
979 			} else {
980 				TRACE("  encoded audio\n");
981 				format->type = B_MEDIA_ENCODED_AUDIO;
982 				description.family = B_MISC_FORMAT_FAMILY;
983 				description.u.misc.file_format = 'ffmp';
984 			}
985 			break;
986 		case AVMEDIA_TYPE_VIDEO:
987 			TRACE("  encoded video\n");
988 			format->type = B_MEDIA_ENCODED_VIDEO;
989 			description.family = B_MISC_FORMAT_FAMILY;
990 			description.u.misc.file_format = 'ffmp';
991 			break;
992 		default:
993 			TRACE("  unknown type\n");
994 			format->type = B_MEDIA_UNKNOWN_TYPE;
995 			return B_ERROR;
996 			break;
997 	}
998 
999 	if (format->type == B_MEDIA_RAW_AUDIO) {
1000 		// We cannot describe all raw-audio formats, some are unsupported.
1001 		switch (codecParams->codec_id) {
1002 			case AV_CODEC_ID_PCM_S16LE:
1003 				format->u.raw_audio.format
1004 					= media_raw_audio_format::B_AUDIO_SHORT;
1005 				format->u.raw_audio.byte_order
1006 					= B_MEDIA_LITTLE_ENDIAN;
1007 				break;
1008 			case AV_CODEC_ID_PCM_S16BE:
1009 				format->u.raw_audio.format
1010 					= media_raw_audio_format::B_AUDIO_SHORT;
1011 				format->u.raw_audio.byte_order
1012 					= B_MEDIA_BIG_ENDIAN;
1013 				break;
1014 			case AV_CODEC_ID_PCM_U16LE:
1015 //				format->u.raw_audio.format
1016 //					= media_raw_audio_format::B_AUDIO_USHORT;
1017 //				format->u.raw_audio.byte_order
1018 //					= B_MEDIA_LITTLE_ENDIAN;
1019 				return B_NOT_SUPPORTED;
1020 				break;
1021 			case AV_CODEC_ID_PCM_U16BE:
1022 //				format->u.raw_audio.format
1023 //					= media_raw_audio_format::B_AUDIO_USHORT;
1024 //				format->u.raw_audio.byte_order
1025 //					= B_MEDIA_BIG_ENDIAN;
1026 				return B_NOT_SUPPORTED;
1027 				break;
1028 			case AV_CODEC_ID_PCM_S8:
1029 				format->u.raw_audio.format
1030 					= media_raw_audio_format::B_AUDIO_CHAR;
1031 				break;
1032 			case AV_CODEC_ID_PCM_U8:
1033 				format->u.raw_audio.format
1034 					= media_raw_audio_format::B_AUDIO_UCHAR;
1035 				break;
1036 			default:
1037 				return B_NOT_SUPPORTED;
1038 				break;
1039 		}
1040 	} else {
1041 		if (description.family == B_MISC_FORMAT_FAMILY)
1042 			description.u.misc.codec = codecParams->codec_id;
1043 
1044 		BMediaFormats formats;
1045 		status_t status = formats.GetFormatFor(description, format);
1046 		if (status < B_OK)
1047 			TRACE("  formats.GetFormatFor() error: %s\n", strerror(status));
1048 
1049 		format->user_data_type = B_CODEC_TYPE_INFO;
1050 		*(uint32*)format->user_data = codecParams->codec_tag;
1051 		format->user_data[4] = 0;
1052 	}
1053 
1054 	format->require_flags = 0;
1055 	format->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
1056 
1057 	switch (format->type) {
1058 		case B_MEDIA_RAW_AUDIO:
1059 			format->u.raw_audio.frame_rate = (float)codecParams->sample_rate;
1060 			format->u.raw_audio.channel_count = codecParams->channels;
1061 			format->u.raw_audio.channel_mask = codecParams->channel_layout;
1062 			ConvertAVSampleFormatToRawAudioFormat(
1063 				(AVSampleFormat)codecParams->format,
1064 				format->u.raw_audio.format);
1065 			format->u.raw_audio.buffer_size = 0;
1066 
1067 			// Read one packet and mark it for later re-use. (So our first
1068 			// GetNextChunk() call does not read another packet.)
1069 			if (_NextPacket(true) == B_OK) {
1070 				TRACE("  successfully determined audio buffer size: %d\n",
1071 					fPacket.size);
1072 				format->u.raw_audio.buffer_size = fPacket.size;
1073 			}
1074 			break;
1075 
1076 		case B_MEDIA_ENCODED_AUDIO:
1077 			format->u.encoded_audio.bit_rate = codecParams->bit_rate;
1078 			format->u.encoded_audio.frame_size = codecParams->frame_size;
1079 			// Fill in some info about possible output format
1080 			format->u.encoded_audio.output
1081 				= media_multi_audio_format::wildcard;
1082 			format->u.encoded_audio.output.frame_rate
1083 				= (float)codecParams->sample_rate;
1084 			// Channel layout bits match in Be API and FFmpeg.
1085 			format->u.encoded_audio.output.channel_count
1086 				= codecParams->channels;
1087 			format->u.encoded_audio.multi_info.channel_mask
1088 				= codecParams->channel_layout;
1089 			format->u.encoded_audio.output.byte_order
1090 				= avformat_to_beos_byte_order(
1091 					(AVSampleFormat)codecParams->format);
1092 
1093 			ConvertAVSampleFormatToRawAudioFormat(
1094 					(AVSampleFormat)codecParams->format,
1095 				format->u.encoded_audio.output.format);
1096 
1097 			if (codecParams->block_align > 0) {
1098 				format->u.encoded_audio.output.buffer_size
1099 					= codecParams->block_align;
1100 			} else {
1101 				format->u.encoded_audio.output.buffer_size
1102 					= codecParams->frame_size * codecParams->channels
1103 						* (format->u.encoded_audio.output.format
1104 							& media_raw_audio_format::B_AUDIO_SIZE_MASK);
1105 			}
1106 			break;
1107 
1108 		case B_MEDIA_ENCODED_VIDEO:
1109 // TODO: Specifying any of these seems to throw off the format matching
1110 // later on.
1111 //			format->u.encoded_video.avg_bit_rate = codecParams->bit_rate;
1112 //			format->u.encoded_video.max_bit_rate = codecParams->bit_rate
1113 //				+ codecParams->bit_rate_tolerance;
1114 
1115 //			format->u.encoded_video.encoding
1116 //				= media_encoded_video_format::B_ANY;
1117 
1118 //			format->u.encoded_video.frame_size = 1;
1119 //			format->u.encoded_video.forward_history = 0;
1120 //			format->u.encoded_video.backward_history = 0;
1121 
1122 			format->u.encoded_video.output.field_rate = FrameRate();
1123 			format->u.encoded_video.output.interlace = 1;
1124 
1125 			format->u.encoded_video.output.first_active = 0;
1126 			format->u.encoded_video.output.last_active
1127 				= codecParams->height - 1;
1128 				// TODO: Maybe libavformat actually provides that info
1129 				// somewhere...
1130 			format->u.encoded_video.output.orientation
1131 				= B_VIDEO_TOP_LEFT_RIGHT;
1132 
1133 			ConvertAVCodecParametersToVideoAspectWidthAndHeight(*codecParams,
1134 				format->u.encoded_video.output.pixel_width_aspect,
1135 				format->u.encoded_video.output.pixel_height_aspect);
1136 
1137 			format->u.encoded_video.output.display.format
1138 				= pixfmt_to_colorspace(codecParams->format);
1139 			format->u.encoded_video.output.display.line_width
1140 				= codecParams->width;
1141 			format->u.encoded_video.output.display.line_count
1142 				= codecParams->height;
1143 			TRACE("  width/height: %d/%d\n", codecParams->width,
1144 				codecParams->height);
1145 			format->u.encoded_video.output.display.bytes_per_row = 0;
1146 			format->u.encoded_video.output.display.pixel_offset = 0;
1147 			format->u.encoded_video.output.display.line_offset = 0;
1148 			format->u.encoded_video.output.display.flags = 0; // TODO
1149 
1150 			break;
1151 
1152 		default:
1153 			// This is an unknown format to us.
1154 			break;
1155 	}
1156 
1157 	// Add the meta data, if any
1158 	if (codecParams->extradata_size > 0) {
1159 		format->SetMetaData(codecParams->extradata,
1160 			codecParams->extradata_size);
1161 		TRACE("  extradata: %p\n", format->MetaData());
1162 	}
1163 
1164 	TRACE("  extradata_size: %d\n", codecParams->extradata_size);
1165 //	TRACE("  intra_matrix: %p\n", codecParams->intra_matrix);
1166 //	TRACE("  inter_matrix: %p\n", codecParams->inter_matrix);
1167 //	TRACE("  get_buffer(): %p\n", codecParams->get_buffer);
1168 //	TRACE("  release_buffer(): %p\n", codecParams->release_buffer);
1169 
1170 #ifdef TRACE_AVFORMAT_READER
1171 	char formatString[512];
1172 	if (string_for_format(*format, formatString, sizeof(formatString)))
1173 		TRACE("  format: %s\n", formatString);
1174 
1175 	uint32 encoding = format->Encoding();
1176 	TRACE("  encoding '%.4s'\n", (char*)&encoding);
1177 #endif
1178 
1179 	return B_OK;
1180 }
1181 
1182 
1183 status_t
1184 AVFormatReader::Stream::GetMetaData(BMetaData* data)
1185 {
1186 	BAutolock _(&fLock);
1187 
1188 	avdictionary_to_message(fStream->metadata, data);
1189 
1190 	return B_OK;
1191 }
1192 
1193 
1194 status_t
1195 AVFormatReader::Stream::GetStreamInfo(int64* frameCount,
1196 	bigtime_t* duration, media_format* format, const void** infoBuffer,
1197 	size_t* infoSize) const
1198 {
1199 	BAutolock _(&fLock);
1200 
1201 	TRACE("AVFormatReader::Stream::GetStreamInfo(%ld)\n",
1202 		VirtualIndex());
1203 
1204 	double frameRate = FrameRate();
1205 	TRACE("  frameRate: %.4f\n", frameRate);
1206 
1207 	#ifdef TRACE_AVFORMAT_READER
1208 	if (fStream->start_time != AV_NOPTS_VALUE) {
1209 		bigtime_t startTime = _ConvertFromStreamTimeBase(fStream->start_time);
1210 		TRACE("  start_time: %lld or %.5fs\n", startTime,
1211 			startTime / 1000000.0);
1212 		// TODO: Handle start time in FindKeyFrame() and Seek()?!
1213 	}
1214 	#endif // TRACE_AVFORMAT_READER
1215 
1216 	*duration = Duration();
1217 
1218 	TRACE("  duration: %lld or %.5fs\n", *duration, *duration / 1000000.0);
1219 
1220 	#if 0
1221 	if (fStream->nb_index_entries > 0) {
1222 		TRACE("  dump of index entries:\n");
1223 		int count = 5;
1224 		int firstEntriesCount = min_c(fStream->nb_index_entries, count);
1225 		int i = 0;
1226 		for (; i < firstEntriesCount; i++) {
1227 			AVIndexEntry& entry = fStream->index_entries[i];
1228 			bigtime_t timeGlobal = entry.timestamp;
1229 			bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
1230 			TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
1231 				timeNative / 1000000.0f, timeGlobal / 1000000.0f);
1232 		}
1233 		if (fStream->nb_index_entries - count > i) {
1234 			i = fStream->nb_index_entries - count;
1235 			TRACE("    ...\n");
1236 			for (; i < fStream->nb_index_entries; i++) {
1237 				AVIndexEntry& entry = fStream->index_entries[i];
1238 				bigtime_t timeGlobal = entry.timestamp;
1239 				bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
1240 				TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
1241 					timeNative / 1000000.0f, timeGlobal / 1000000.0f);
1242 			}
1243 		}
1244 	}
1245 	#endif
1246 
1247 	*frameCount = fStream->nb_frames * fStream->codecpar->frame_size;
1248 	if (*frameCount == 0) {
1249 		// Calculate from duration and frame rate
1250 		*frameCount = (int64)(*duration * frameRate / 1000000LL);
1251 		TRACE("  frameCount calculated: %lld, from context: %lld\n",
1252 			*frameCount, fStream->nb_frames);
1253 	} else
1254 		TRACE("  frameCount: %lld\n", *frameCount);
1255 
1256 	*format = fFormat;
1257 
1258 	*infoBuffer = fStream->codecpar->extradata;
1259 	*infoSize = fStream->codecpar->extradata_size;
1260 
1261 	return B_OK;
1262 }
1263 
1264 
1265 status_t
1266 AVFormatReader::Stream::FindKeyFrame(uint32 flags, int64* frame,
1267 	bigtime_t* time) const
1268 {
1269 	BAutolock _(&fLock);
1270 
1271 	if (fContext == NULL || fStream == NULL)
1272 		return B_NO_INIT;
1273 
1274 	TRACE_FIND("AVFormatReader::Stream::FindKeyFrame(%ld,%s%s%s%s, "
1275 		"%lld, %lld)\n", VirtualIndex(),
1276 		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
1277 		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
1278 		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
1279 			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
1280 		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
1281 			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
1282 		*frame, *time);
1283 
1284 	bool inLastRequestedRange = false;
1285 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1286 		if (fLastReportedKeyframe.reportedFrame
1287 			<= fLastReportedKeyframe.requestedFrame) {
1288 			inLastRequestedRange
1289 				= *frame >= fLastReportedKeyframe.reportedFrame
1290 					&& *frame <= fLastReportedKeyframe.requestedFrame;
1291 		} else {
1292 			inLastRequestedRange
1293 				= *frame >= fLastReportedKeyframe.requestedFrame
1294 					&& *frame <= fLastReportedKeyframe.reportedFrame;
1295 		}
1296 	} else if ((flags & B_MEDIA_SEEK_TO_FRAME) == 0) {
1297 		if (fLastReportedKeyframe.reportedTime
1298 			<= fLastReportedKeyframe.requestedTime) {
1299 			inLastRequestedRange
1300 				= *time >= fLastReportedKeyframe.reportedTime
1301 					&& *time <= fLastReportedKeyframe.requestedTime;
1302 		} else {
1303 			inLastRequestedRange
1304 				= *time >= fLastReportedKeyframe.requestedTime
1305 					&& *time <= fLastReportedKeyframe.reportedTime;
1306 		}
1307 	}
1308 
1309 	if (inLastRequestedRange) {
1310 		*frame = fLastReportedKeyframe.reportedFrame;
1311 		*time = fLastReportedKeyframe.reportedTime;
1312 		TRACE_FIND("  same as last reported keyframe\n");
1313 		return B_OK;
1314 	}
1315 
1316 	double frameRate = FrameRate();
1317 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0)
1318 		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);
1319 
1320 	status_t ret;
1321 	if (fGhostStream == NULL) {
1322 		BAutolock _(fSourceLock);
1323 
1324 		fGhostStream = new(std::nothrow) StreamBase(fSource, fSourceLock,
1325 			&fLock);
1326 		if (fGhostStream == NULL) {
1327 			TRACE("  failed to allocate ghost stream\n");
1328 			return B_NO_MEMORY;
1329 		}
1330 
1331 		ret = fGhostStream->Open();
1332 		if (ret != B_OK) {
1333 			TRACE("  ghost stream failed to open: %s\n", strerror(ret));
1334 			return B_ERROR;
1335 		}
1336 
1337 		ret = fGhostStream->Init(fVirtualIndex);
1338 		if (ret != B_OK) {
1339 			TRACE("  ghost stream failed to init: %s\n", strerror(ret));
1340 			return B_ERROR;
1341 		}
1342 	}
1343 	fLastReportedKeyframe.requestedFrame = *frame;
1344 	fLastReportedKeyframe.requestedTime = *time;
1345 	fLastReportedKeyframe.seekFlags = flags;
1346 
1347 	ret = fGhostStream->Seek(flags, frame, time);
1348 	if (ret != B_OK) {
1349 		TRACE("  ghost stream failed to seek: %s\n", strerror(ret));
1350 		return B_ERROR;
1351 	}
1352 
1353 	fLastReportedKeyframe.reportedFrame = *frame;
1354 	fLastReportedKeyframe.reportedTime = *time;
1355 
1356 	TRACE_FIND("  found time: %.2fs\n", *time / 1000000.0);
1357 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1358 		*frame = int64_t(*time * FrameRate() / 1000000.0 + 0.5);
1359 		TRACE_FIND("  found frame: %lld\n", *frame);
1360 	}
1361 
1362 	return B_OK;
1363 }
1364 
1365 
1366 status_t
1367 AVFormatReader::Stream::Seek(uint32 flags, int64* frame, bigtime_t* time)
1368 {
1369 	BAutolock _(&fLock);
1370 
1371 	if (fContext == NULL || fStream == NULL)
1372 		return B_NO_INIT;
1373 
1374 	// Put the old requested values into frame/time, since we already know
1375 	// that the sought frame/time will then match the reported values.
1376 	// TODO: Will not work if client changes seek flags (from backwards to
1377 	// forward or vice versa)!!
1378 	bool inLastRequestedRange = false;
1379 	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
1380 		if (fLastReportedKeyframe.reportedFrame
1381 			<= fLastReportedKeyframe.requestedFrame) {
1382 			inLastRequestedRange
1383 				= *frame >= fLastReportedKeyframe.reportedFrame
1384 					&& *frame <= fLastReportedKeyframe.requestedFrame;
1385 		} else {
1386 			inLastRequestedRange
1387 				= *frame >= fLastReportedKeyframe.requestedFrame
1388 					&& *frame <= fLastReportedKeyframe.reportedFrame;
1389 		}
1390 	} else if ((flags & B_MEDIA_SEEK_TO_FRAME) == 0) {
1391 		if (fLastReportedKeyframe.reportedTime
1392 			<= fLastReportedKeyframe.requestedTime) {
1393 			inLastRequestedRange
1394 				= *time >= fLastReportedKeyframe.reportedTime
1395 					&& *time <= fLastReportedKeyframe.requestedTime;
1396 		} else {
1397 			inLastRequestedRange
1398 				= *time >= fLastReportedKeyframe.requestedTime
1399 					&& *time <= fLastReportedKeyframe.reportedTime;
1400 		}
1401 	}
1402 
1403 	if (inLastRequestedRange) {
1404 		*frame = fLastReportedKeyframe.requestedFrame;
1405 		*time = fLastReportedKeyframe.requestedTime;
1406 		flags = fLastReportedKeyframe.seekFlags;
1407 	}
1408 
1409 	return StreamBase::Seek(flags, frame, time);
1410 }
1411 
1412 
1413 // #pragma mark - AVFormatReader
1414 
1415 
1416 AVFormatReader::AVFormatReader()
1417 	:
1418 	fCopyright(""),
1419 	fStreams(NULL),
1420 	fSourceLock("source I/O lock")
1421 {
1422 	TRACE("AVFormatReader::AVFormatReader\n");
1423 }
1424 
1425 
1426 AVFormatReader::~AVFormatReader()
1427 {
1428 	TRACE("AVFormatReader::~AVFormatReader\n");
1429 	if (fStreams != NULL) {
1430 		// The client was supposed to call FreeCookie() on all
1431 		// allocated streams. Deleting the first stream is always
1432 		// prevented, we delete the other ones just in case.
1433 		int32 count = fStreams[0]->CountStreams();
1434 		for (int32 i = 0; i < count; i++)
1435 			delete fStreams[i];
1436 		delete[] fStreams;
1437 	}
1438 }
1439 
1440 
1441 // #pragma mark -
1442 
1443 
1444 status_t
1445 AVFormatReader::Sniff(int32* _streamCount)
1446 {
1447 	TRACE("AVFormatReader::Sniff\n");
1448 
1449 	BMediaIO* source = dynamic_cast<BMediaIO*>(Source());
1450 	if (source == NULL) {
1451 		TRACE("  not a BMediaIO, but we need it to be one.\n");
1452 		return B_NOT_SUPPORTED;
1453 	}
1454 
1455 	Stream* stream = new(std::nothrow) Stream(source,
1456 		&fSourceLock);
1457 	if (stream == NULL) {
1458 		ERROR("AVFormatReader::Sniff() - failed to allocate Stream\n");
1459 		return B_NO_MEMORY;
1460 	}
1461 
1462 	ObjectDeleter<Stream> streamDeleter(stream);
1463 
1464 	status_t ret = stream->Open();
1465 	if (ret != B_OK) {
1466 		TRACE("  failed to detect stream: %s\n", strerror(ret));
1467 		return ret;
1468 	}
1469 
1470 	delete[] fStreams;
1471 	fStreams = NULL;
1472 
1473 	int32 streamCount = stream->CountStreams();
1474 	if (streamCount == 0) {
1475 		TRACE("  failed to detect any streams: %s\n", strerror(ret));
1476 		return B_ERROR;
1477 	}
1478 
1479 	fStreams = new(std::nothrow) Stream*[streamCount];
1480 	if (fStreams == NULL) {
1481 		ERROR("AVFormatReader::Sniff() - failed to allocate streams\n");
1482 		return B_NO_MEMORY;
1483 	}
1484 
1485 	memset(fStreams, 0, sizeof(Stream*) * streamCount);
1486 	fStreams[0] = stream;
1487 	streamDeleter.Detach();
1488 
1489 	#ifdef TRACE_AVFORMAT_READER
1490 	av_dump_format(const_cast<AVFormatContext*>(stream->Context()), 0, "", 0);
1491 	#endif
1492 
1493 	if (_streamCount != NULL)
1494 		*_streamCount = streamCount;
1495 
1496 	return B_OK;
1497 }
1498 
1499 
1500 void
1501 AVFormatReader::GetFileFormatInfo(media_file_format* mff)
1502 {
1503 	TRACE("AVFormatReader::GetFileFormatInfo\n");
1504 
1505 	if (fStreams == NULL)
1506 		return;
1507 
1508 	// The first cookie is always there!
1509 	const AVFormatContext* context = fStreams[0]->Context();
1510 
1511 	if (context == NULL || context->iformat == NULL) {
1512 		TRACE("  no AVFormatContext or AVInputFormat!\n");
1513 		return;
1514 	}
1515 
1516 	const media_file_format* format = demuxer_format_for(context->iformat);
1517 
1518 	mff->capabilities = media_file_format::B_READABLE
1519 		| media_file_format::B_KNOWS_ENCODED_VIDEO
1520 		| media_file_format::B_KNOWS_ENCODED_AUDIO
1521 		| media_file_format::B_IMPERFECTLY_SEEKABLE;
1522 
1523 	if (format != NULL) {
1524 		mff->family = format->family;
1525 	} else {
1526 		TRACE("  no DemuxerFormat for AVInputFormat!\n");
1527 		mff->family = B_MISC_FORMAT_FAMILY;
1528 	}
1529 
1530 	mff->version = 100;
1531 
1532 	if (format != NULL) {
1533 		strcpy(mff->mime_type, format->mime_type);
1534 	} else {
1535 		// TODO: Would be nice to be able to provide this from AVInputFormat,
1536 		// maybe by extending the FFmpeg code itself (all demuxers).
1537 		strcpy(mff->mime_type, "");
1538 	}
1539 
1540 	if (context->iformat->extensions != NULL)
1541 		strcpy(mff->file_extension, context->iformat->extensions);
1542 	else {
1543 		TRACE("  no file extensions for AVInputFormat.\n");
1544 		strcpy(mff->file_extension, "");
1545 	}
1546 
1547 	if (context->iformat->name != NULL)
1548 		strcpy(mff->short_name,  context->iformat->name);
1549 	else {
1550 		TRACE("  no short name for AVInputFormat.\n");
1551 		strcpy(mff->short_name, "");
1552 	}
1553 
1554 	if (context->iformat->long_name != NULL)
1555 		sprintf(mff->pretty_name, "%s (FFmpeg)", context->iformat->long_name);
1556 	else {
1557 		if (format != NULL)
1558 			sprintf(mff->pretty_name, "%s (FFmpeg)", format->pretty_name);
1559 		else
1560 			strcpy(mff->pretty_name, "Unknown (FFmpeg)");
1561 	}
1562 }
1563 
1564 
1565 status_t
1566 AVFormatReader::GetMetaData(BMetaData* data)
1567 {
1568 	// The first cookie is always there!
1569 	const AVFormatContext* context = fStreams[0]->Context();
1570 
1571 	if (context == NULL)
1572 		return B_NO_INIT;
1573 
1574 	avdictionary_to_message(context->metadata, data);
1575 
1576 	// Add chapter info
1577 	for (unsigned i = 0; i < context->nb_chapters; i++) {
1578 		AVChapter* chapter = context->chapters[i];
1579 		BMetaData chapterData;
1580 		chapterData.SetUInt64(kChapterStart, bigtime_t(1000000.0
1581 			* chapter->start * chapter->time_base.num
1582 			/ chapter->time_base.den + 0.5));
1583 		chapterData.SetUInt64(kChapterEnd, bigtime_t(1000000.0
1584 			* chapter->end * chapter->time_base.num
1585 			/ chapter->time_base.den + 0.5));
1586 
1587 		avdictionary_to_message(chapter->metadata, &chapterData);
1588 		data->AddMetaData(kChapter, &chapterData);
1589 	}
1590 
1591 	// Add program info
1592 	for (unsigned i = 0; i < context->nb_programs; i++) {
1593 		BMetaData programData;
1594 		avdictionary_to_message(context->programs[i]->metadata, &programData);
1595 		data->AddMetaData(kProgramData, &programData);
1596 	}
1597 
1598 	return B_OK;
1599 }
1600 
1601 
1602 // #pragma mark -
1603 
1604 
1605 status_t
1606 AVFormatReader::AllocateCookie(int32 streamIndex, void** _cookie)
1607 {
1608 	TRACE("AVFormatReader::AllocateCookie(%ld)\n", streamIndex);
1609 
1610 	BAutolock _(fSourceLock);
1611 
1612 	if (fStreams == NULL)
1613 		return B_NO_INIT;
1614 
1615 	if (streamIndex < 0 || streamIndex >= fStreams[0]->CountStreams())
1616 		return B_BAD_INDEX;
1617 
1618 	if (_cookie == NULL)
1619 		return B_BAD_VALUE;
1620 
1621 	Stream* cookie = fStreams[streamIndex];
1622 	if (cookie == NULL) {
1623 		// Allocate the cookie
1624 		BMediaIO* source = dynamic_cast<BMediaIO*>(Source());
1625 		if (source == NULL) {
1626 			TRACE("  not a BMediaIO, but we need it to be one.\n");
1627 			return B_NOT_SUPPORTED;
1628 		}
1629 
1630 		cookie = new(std::nothrow) Stream(source, &fSourceLock);
1631 		if (cookie == NULL) {
1632 			ERROR("AVFormatReader::Sniff() - failed to allocate "
1633 				"Stream\n");
1634 			return B_NO_MEMORY;
1635 		}
1636 
1637 		status_t ret = cookie->Open();
1638 		if (ret != B_OK) {
1639 			TRACE("  stream failed to open: %s\n", strerror(ret));
1640 			delete cookie;
1641 			return ret;
1642 		}
1643 	}
1644 
1645 	status_t ret = cookie->Init(streamIndex);
1646 	if (ret != B_OK) {
1647 		TRACE("  stream failed to initialize: %s\n", strerror(ret));
1648 		// NOTE: Never delete the first stream!
1649 		if (streamIndex != 0)
1650 			delete cookie;
1651 		return ret;
1652 	}
1653 
1654 	fStreams[streamIndex] = cookie;
1655 	*_cookie = cookie;
1656 
1657 	return B_OK;
1658 }
1659 
1660 
1661 status_t
1662 AVFormatReader::FreeCookie(void *_cookie)
1663 {
1664 	BAutolock _(fSourceLock);
1665 
1666 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1667 
1668 	// NOTE: Never delete the first cookie!
1669 	if (cookie != NULL && cookie->VirtualIndex() != 0) {
1670 		if (fStreams != NULL)
1671 			fStreams[cookie->VirtualIndex()] = NULL;
1672 		delete cookie;
1673 	}
1674 
1675 	return B_OK;
1676 }
1677 
1678 
1679 // #pragma mark -
1680 
1681 
1682 status_t
1683 AVFormatReader::GetStreamInfo(void* _cookie, int64* frameCount,
1684 	bigtime_t* duration, media_format* format, const void** infoBuffer,
1685 	size_t* infoSize)
1686 {
1687 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1688 	return cookie->GetStreamInfo(frameCount, duration, format, infoBuffer,
1689 		infoSize);
1690 }
1691 
1692 
1693 status_t
1694 AVFormatReader::GetStreamMetaData(void* _cookie, BMetaData* data)
1695 {
1696 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1697 	return cookie->GetMetaData(data);
1698 }
1699 
1700 
1701 status_t
1702 AVFormatReader::Seek(void* _cookie, uint32 seekTo, int64* frame,
1703 	bigtime_t* time)
1704 {
1705 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1706 	return cookie->Seek(seekTo, frame, time);
1707 }
1708 
1709 
1710 status_t
1711 AVFormatReader::FindKeyFrame(void* _cookie, uint32 flags, int64* frame,
1712 	bigtime_t* time)
1713 {
1714 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1715 	return cookie->FindKeyFrame(flags, frame, time);
1716 }
1717 
1718 
1719 status_t
1720 AVFormatReader::GetNextChunk(void* _cookie, const void** chunkBuffer,
1721 	size_t* chunkSize, media_header* mediaHeader)
1722 {
1723 	Stream* cookie = reinterpret_cast<Stream*>(_cookie);
1724 	return cookie->GetNextChunk(chunkBuffer, chunkSize, mediaHeader);
1725 }
1726