xref: /haiku/src/kits/media/SoundPlayNode.cpp (revision d5cd5d63ff0ad395989db6cf4841a64d5b545d1d)
1 /***********************************************************************
2  * AUTHOR: Marcus Overhagen, Jérôme Duval
3  *   FILE: SoundPlayNode.cpp
4  *  DESCR: This is the BBufferProducer, used internally by BSoundPlayer
5  *         This belongs into a private namespace, but isn't for
6  *         compatibility reasons.
7  ***********************************************************************/
8 
9 #include <TimeSource.h>
10 #include <string.h>
11 #include <stdlib.h>
12 #include <unistd.h>
13 #include "SoundPlayNode.h"
14 #include "debug.h"
15 
16 #define DPRINTF 1
17 
18 #if DPRINTF
19 	#undef DPRINTF
20 	#define DPRINTF printf
21 #else
22 	#undef DPRINTF
23 	#define DPRINTF if (1) {} else printf
24 #endif
25 
26 #define SEND_NEW_BUFFER_EVENT (BTimedEventQueue::B_USER_EVENT + 1)
27 
28 _SoundPlayNode::_SoundPlayNode(const char *name, const media_multi_audio_format *format, BSoundPlayer *player) :
29 	BMediaNode(name),
30 	BBufferProducer(B_MEDIA_RAW_AUDIO),
31 	BMediaEventLooper(),
32 	mPlayer(player),
33 	mInitCheckStatus(B_OK),
34 	mOutputEnabled(true),
35 	mBufferGroup(NULL),
36 	mFramesSent(0),
37 	mTooEarlyCount(0)
38 {
39 	CALLED();
40 	mFormat.type = B_MEDIA_RAW_AUDIO;
41 	mFormat.u.raw_audio = *format;
42 
43 	DPRINTF("Format Info:\n");
44 	DPRINTF("  frame_rate:     %f\n",mFormat.u.raw_audio.frame_rate);
45 	DPRINTF("  channel_count:  %ld\n",mFormat.u.raw_audio.channel_count);
46 	DPRINTF("  byte_order:     %ld (",mFormat.u.raw_audio.byte_order);
47 	switch (mFormat.u.raw_audio.byte_order) {
48 		case B_MEDIA_BIG_ENDIAN: DPRINTF("B_MEDIA_BIG_ENDIAN)\n"); break;
49 		case B_MEDIA_LITTLE_ENDIAN: DPRINTF("B_MEDIA_LITTLE_ENDIAN)\n"); break;
50 		default: DPRINTF("unknown)\n"); break;
51 	}
52 	DPRINTF("  buffer_size:    %ld\n",mFormat.u.raw_audio.buffer_size);
53 	DPRINTF("  format:         %ld (",mFormat.u.raw_audio.format);
54 	switch (mFormat.u.raw_audio.format) {
55 		case media_raw_audio_format::B_AUDIO_FLOAT: DPRINTF("B_AUDIO_FLOAT)\n"); break;
56 		case media_raw_audio_format::B_AUDIO_SHORT: DPRINTF("B_AUDIO_SHORT)\n"); break;
57 		case media_raw_audio_format::B_AUDIO_INT: DPRINTF("B_AUDIO_INT)\n"); break;
58 		case media_raw_audio_format::B_AUDIO_CHAR: DPRINTF("B_AUDIO_CHAR)\n"); break;
59 		case media_raw_audio_format::B_AUDIO_UCHAR: DPRINTF("B_AUDIO_UCHAR)\n"); break;
60 		default: DPRINTF("unknown)\n"); break;
61 	}
62 }
63 
64 
65 _SoundPlayNode::~_SoundPlayNode()
66 {
67 	CALLED();
68 	Quit();
69 }
70 
71 bool
72 _SoundPlayNode::IsPlaying()
73 {
74 	return RunState() == B_STARTED;
75 }
76 
77 bigtime_t
78 _SoundPlayNode::Latency()
79 {
80 	return EventLatency();
81 }
82 
83 
84 media_multi_audio_format
85 _SoundPlayNode::Format() const
86 {
87 	return mFormat.u.raw_audio;
88 }
89 
90 // -------------------------------------------------------- //
91 // implementation of BMediaNode
92 // -------------------------------------------------------- //
93 
94 BMediaAddOn * _SoundPlayNode::AddOn(int32 * internal_id) const
95 {
96 	CALLED();
97 	// BeBook says this only gets called if we were in an add-on.
98 	return NULL;
99 }
100 
101 void _SoundPlayNode::Preroll(void)
102 {
103 	CALLED();
104 	// XXX:Performance opportunity
105 	BMediaNode::Preroll();
106 }
107 
108 status_t _SoundPlayNode::HandleMessage(int32 message, const void * data, size_t size)
109 {
110 	CALLED();
111 	return B_ERROR;
112 }
113 
114 void _SoundPlayNode::NodeRegistered(void)
115 {
116 	CALLED();
117 
118 	if (mInitCheckStatus != B_OK) {
119 		ReportError(B_NODE_IN_DISTRESS);
120 		return;
121 	}
122 
123 	SetPriority(B_URGENT_PRIORITY);
124 
125 	mOutput.format = mFormat;
126 	mOutput.destination = media_destination::null;
127 	mOutput.source.port = ControlPort();
128 	mOutput.source.id = 0;
129 	mOutput.node = Node();
130 	strcpy(mOutput.name, Name());
131 
132 	Run();
133 }
134 
135 status_t _SoundPlayNode::RequestCompleted(const media_request_info &info)
136 {
137 	CALLED();
138 	return B_OK;
139 }
140 
141 void _SoundPlayNode::SetTimeSource(BTimeSource *timeSource)
142 {
143 	CALLED();
144 	BMediaNode::SetTimeSource(timeSource);
145 }
146 
147 void
148 _SoundPlayNode::SetRunMode(run_mode mode)
149 {
150 	TRACE("_SoundPlayNode::SetRunMode mode:%i\n", mode);
151 	BMediaNode::SetRunMode(mode);
152 }
153 
154 // -------------------------------------------------------- //
155 // implementation for BBufferProducer
156 // -------------------------------------------------------- //
157 
158 status_t
159 _SoundPlayNode::FormatSuggestionRequested(media_type type, int32 /*quality*/, media_format* format)
160 {
161 	// FormatSuggestionRequested() is not necessarily part of the format negotiation
162 	// process; it's simply an interrogation -- the caller wants to see what the node's
163 	// preferred data format is, given a suggestion by the caller.
164 	CALLED();
165 
166 	// a wildcard type is okay; but we only support raw audio
167 	if (type != B_MEDIA_RAW_AUDIO && type != B_MEDIA_UNKNOWN_TYPE)
168 		return B_MEDIA_BAD_FORMAT;
169 
170 	// this is the format we'll be returning (our preferred format)
171 	*format = mFormat;
172 
173 	return B_OK;
174 }
175 
176 status_t
177 _SoundPlayNode::FormatProposal(const media_source& output, media_format* format)
178 {
179 	// FormatProposal() is the first stage in the BMediaRoster::Connect() process.  We hand
180 	// out a suggested format, with wildcards for any variations we support.
181 	CALLED();
182 
183 	// is this a proposal for our one output?
184 	if (output != mOutput.source) {
185 		TRACE("_SoundPlayNode::FormatProposal returning B_MEDIA_BAD_SOURCE\n");
186 		return B_MEDIA_BAD_SOURCE;
187 	}
188 
189 	// we only support floating-point raw audio, so we always return that, but we
190 	// supply an error code depending on whether we found the proposal acceptable.
191 	media_type requestedType = format->type;
192 	*format = mFormat;
193 	if ((requestedType != B_MEDIA_UNKNOWN_TYPE) && (requestedType != B_MEDIA_RAW_AUDIO)) {
194 		TRACE("_SoundPlayNode::FormatProposal returning B_MEDIA_BAD_FORMAT\n");
195 		return B_MEDIA_BAD_FORMAT;
196 	}
197 	else
198 		return B_OK;		// raw audio or wildcard type, either is okay by us
199 }
200 
201 status_t
202 _SoundPlayNode::FormatChangeRequested(const media_source& source, const media_destination& destination, media_format* io_format, int32* _deprecated_)
203 {
204 	CALLED();
205 
206 	// we don't support any other formats, so we just reject any format changes.
207 	return B_ERROR;
208 }
209 
210 status_t
211 _SoundPlayNode::GetNextOutput(int32* cookie, media_output* out_output)
212 {
213 	CALLED();
214 
215 	if (*cookie == 0) {
216 		*out_output = mOutput;
217 		*cookie += 1;
218 		return B_OK;
219 	} else {
220 		return B_BAD_INDEX;
221 	}
222 }
223 
224 status_t
225 _SoundPlayNode::DisposeOutputCookie(int32 cookie)
226 {
227 	CALLED();
228 	// do nothing because we don't use the cookie for anything special
229 	return B_OK;
230 }
231 
232 status_t
233 _SoundPlayNode::SetBufferGroup(const media_source& for_source, BBufferGroup* newGroup)
234 {
235 	CALLED();
236 
237 	// is this our output?
238 	if (for_source != mOutput.source) {
239 		TRACE("_SoundPlayNode::SetBufferGroup returning B_MEDIA_BAD_SOURCE\n");
240 		return B_MEDIA_BAD_SOURCE;
241 	}
242 
243 	// Are we being passed the buffer group we're already using?
244 	if (newGroup == mBufferGroup)
245 		return B_OK;
246 
247 	// Ahh, someone wants us to use a different buffer group.  At this point we delete
248 	// the one we are using and use the specified one instead.  If the specified group is
249 	// NULL, we need to recreate one ourselves, and use *that*.  Note that if we're
250 	// caching a BBuffer that we requested earlier, we have to Recycle() that buffer
251 	// *before* deleting the buffer group, otherwise we'll deadlock waiting for that
252 	// buffer to be recycled!
253 	delete mBufferGroup;		// waits for all buffers to recycle
254 	if (newGroup != NULL)
255 	{
256 		// we were given a valid group; just use that one from now on
257 		mBufferGroup = newGroup;
258 	}
259 	else
260 	{
261 		// we were passed a NULL group pointer; that means we construct
262 		// our own buffer group to use from now on
263 		size_t size = mOutput.format.u.raw_audio.buffer_size;
264 		int32 count = int32(mLatency / BufferDuration() + 1 + 1);
265 		if (count < 3)
266 			count = 3;
267 		mBufferGroup = new BBufferGroup(size, count);
268 	}
269 
270 	return B_OK;
271 }
272 
273 status_t
274 _SoundPlayNode::GetLatency(bigtime_t* out_latency)
275 {
276 	CALLED();
277 
278 	// report our *total* latency:  internal plus downstream plus scheduling
279 	*out_latency = EventLatency() + SchedulingLatency();
280 	return B_OK;
281 }
282 
283 status_t
284 _SoundPlayNode::PrepareToConnect(const media_source& what, const media_destination& where, media_format* format, media_source* out_source, char* out_name)
285 {
286 	// PrepareToConnect() is the second stage of format negotiations that happens
287 	// inside BMediaRoster::Connect().  At this point, the consumer's AcceptFormat()
288 	// method has been called, and that node has potentially changed the proposed
289 	// format.  It may also have left wildcards in the format.  PrepareToConnect()
290 	// *must* fully specialize the format before returning!
291 	CALLED();
292 
293 	// is this our output?
294 	if (what != mOutput.source)
295 	{
296 		TRACE("_SoundPlayNode::PrepareToConnect returning B_MEDIA_BAD_SOURCE\n");
297 		return B_MEDIA_BAD_SOURCE;
298 	}
299 
300 	// are we already connected?
301 	if (mOutput.destination != media_destination::null)
302 		return B_MEDIA_ALREADY_CONNECTED;
303 
304 	// the format may not yet be fully specialized (the consumer might have
305 	// passed back some wildcards).  Finish specializing it now, and return an
306 	// error if we don't support the requested format.
307 	if (format->type != B_MEDIA_RAW_AUDIO)
308 	{
309 		TRACE("\tnon-raw-audio format?!\n");
310 		return B_MEDIA_BAD_FORMAT;
311 	}
312 	// !!! validate all other fields except for buffer_size here, because the consumer might have
313 	// supplied different values from AcceptFormat()?
314 
315 	// check the buffer size, which may still be wildcarded
316 	if (format->u.raw_audio.buffer_size == media_raw_audio_format::wildcard.buffer_size)
317 	{
318 		format->u.raw_audio.buffer_size = 2048;		// pick something comfortable to suggest
319 		TRACE("\tno buffer size provided, suggesting %lu\n", format->u.raw_audio.buffer_size);
320 	}
321 	else
322 	{
323 		TRACE("\tconsumer suggested buffer_size %lu\n", format->u.raw_audio.buffer_size);
324 	}
325 
326 	// Now reserve the connection, and return information about it
327 	mOutput.destination = where;
328 	mOutput.format = *format;
329 	*out_source = mOutput.source;
330 	strcpy(out_name, Name());
331 	return B_OK;
332 }
333 
334 void
335 _SoundPlayNode::Connect(status_t error, const media_source& source, const media_destination& destination, const media_format& format, char* io_name)
336 {
337 	CALLED();
338 
339 	// is this our output?
340 	if (source != mOutput.source)
341 	{
342 		TRACE("_SoundPlayNode::Connect returning\n");
343 		return;
344 	}
345 
346 	// If something earlier failed, Connect() might still be called, but with a non-zero
347 	// error code.  When that happens we simply unreserve the connection and do
348 	// nothing else.
349 	if (error)
350 	{
351 		mOutput.destination = media_destination::null;
352 		mOutput.format = mFormat;
353 		return;
354 	}
355 
356 	// Okay, the connection has been confirmed.  Record the destination and format
357 	// that we agreed on, and report our connection name again.
358 	mOutput.destination = destination;
359 	mOutput.format = format;
360 	strcpy(io_name, Name());
361 
362 	// Now that we're connected, we can determine our downstream latency.
363 	// Do so, then make sure we get our events early enough.
364 	media_node_id id;
365 	FindLatencyFor(mOutput.destination, &mLatency, &id);
366 	TRACE("_SoundPlayNode::Connect: downstream latency = %Ld\n", mLatency);
367 
368 	// reset our buffer duration, etc. to avoid later calculations
369 	bigtime_t duration = mOutput.format.u.raw_audio.buffer_size * 10000
370 			/ ( (mOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK)
371 				* mOutput.format.u.raw_audio.channel_count)
372 			/ ((int32)(mOutput.format.u.raw_audio.frame_rate / 100));
373 	SetBufferDuration(duration);
374 	TRACE("_SoundPlayNode::Connect: buffer duaration is %Ld\n", duration);
375 
376 	mInternalLatency = (3 * BufferDuration()) / 4;
377 	TRACE("_SoundPlayNode::Connect: using %Ld as internal latency\n", mInternalLatency);
378 	SetEventLatency(mLatency + mInternalLatency);
379 
380 	// Set up the buffer group for our connection, as long as nobody handed us a
381 	// buffer group (via SetBufferGroup()) prior to this.  That can happen, for example,
382 	// if the consumer calls SetOutputBuffersFor() on us from within its Connected()
383 	// method.
384 	if (!mBufferGroup)
385 		AllocateBuffers();
386 }
387 
388 void
389 _SoundPlayNode::Disconnect(const media_source& what, const media_destination& where)
390 {
391 	CALLED();
392 
393 	// is this our output?
394 	if (what != mOutput.source)
395 	{
396 		TRACE("_SoundPlayNode::Disconnect returning\n");
397 		return;
398 	}
399 
400 	// Make sure that our connection is the one being disconnected
401 	if ((where == mOutput.destination) && (what == mOutput.source))
402 	{
403 		mOutput.destination = media_destination::null;
404 		mOutput.format = mFormat;
405 		delete mBufferGroup;
406 		mBufferGroup = NULL;
407 	}
408 	else
409 	{
410 		fprintf(stderr, "\tDisconnect() called with wrong source/destination (%ld/%ld), ours is (%ld/%ld)\n",
411 			what.id, where.id, mOutput.source.id, mOutput.destination.id);
412 	}
413 }
414 
415 void
416 _SoundPlayNode::LateNoticeReceived(const media_source& what, bigtime_t how_much, bigtime_t performance_time)
417 {
418 	CALLED();
419 
420 	printf("_SoundPlayNode::LateNoticeReceived, %Ld too late at %Ld\n", how_much, performance_time);
421 
422 	// is this our output?
423 	if (what != mOutput.source)
424 	{
425 		TRACE("_SoundPlayNode::LateNoticeReceived returning\n");
426 		return;
427 	}
428 
429 	if (RunMode() != B_DROP_DATA)
430 	{
431 		// We're late, and our run mode dictates that we try to produce buffers
432 		// earlier in order to catch up.  This argues that the downstream nodes are
433 		// not properly reporting their latency, but there's not much we can do about
434 		// that at the moment, so we try to start producing buffers earlier to
435 		// compensate.
436 
437 		mInternalLatency += how_much;
438 
439 		if (mInternalLatency > 30000)	// avoid getting a too high latency
440 			mInternalLatency = 30000;
441 
442 		SetEventLatency(mLatency + mInternalLatency);
443 		TRACE("_SoundPlayNode::LateNoticeReceived: increasing latency to %Ld\n", mLatency + mInternalLatency);
444 	}
445 	else
446 	{
447 		// The other run modes dictate various strategies for sacrificing data quality
448 		// in the interests of timely data delivery.  The way *we* do this is to skip
449 		// a buffer, which catches us up in time by one buffer duration.
450 
451 		size_t nFrames = mOutput.format.u.raw_audio.buffer_size
452 			/ ((mOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK)
453 			* mOutput.format.u.raw_audio.channel_count);
454 
455 		mFramesSent += nFrames;
456 
457 		TRACE("_SoundPlayNode::LateNoticeReceived: skipping a buffer to try to catch up\n");
458 	}
459 }
460 
461 void
462 _SoundPlayNode::EnableOutput(const media_source& what, bool enabled, int32* _deprecated_)
463 {
464 	CALLED();
465 
466 	// If I had more than one output, I'd have to walk my list of output records to see
467 	// which one matched the given source, and then enable/disable that one.  But this
468 	// node only has one output, so I just make sure the given source matches, then set
469 	// the enable state accordingly.
470 	// is this our output?
471 	if (what != mOutput.source)
472 	{
473 		fprintf(stderr, "_SoundPlayNode::EnableOutput returning\n");
474 		return;
475 	}
476 
477 	mOutputEnabled = enabled;
478 }
479 
480 void
481 _SoundPlayNode::AdditionalBufferRequested(const media_source& source, media_buffer_id prev_buffer, bigtime_t prev_time, const media_seek_tag* prev_tag)
482 {
483 	CALLED();
484 	// we don't support offline mode
485 	return;
486 }
487 
488 void
489 _SoundPlayNode::LatencyChanged(const media_source& source, const media_destination& destination, bigtime_t new_latency, uint32 flags)
490 {
491 	CALLED();
492 
493 	printf("_SoundPlayNode::LatencyChanged: new_latency %Ld\n", new_latency);
494 
495 	// something downstream changed latency, so we need to start producing
496 	// buffers earlier (or later) than we were previously.  Make sure that the
497 	// connection that changed is ours, and adjust to the new downstream
498 	// latency if so.
499 	if ((source == mOutput.source) && (destination == mOutput.destination))
500 	{
501 		mLatency = new_latency;
502 		SetEventLatency(mLatency + mInternalLatency);
503 	} else {
504 		printf("_SoundPlayNode::LatencyChanged: ignored\n");
505 	}
506 }
507 
508 // -------------------------------------------------------- //
509 // implementation for BMediaEventLooper
510 // -------------------------------------------------------- //
511 
512 void _SoundPlayNode::HandleEvent(
513 				const media_timed_event *event,
514 				bigtime_t lateness,
515 				bool realTimeEvent = false)
516 {
517 	CALLED();
518 	switch (event->type) {
519 		case BTimedEventQueue::B_START:
520 			HandleStart(event,lateness,realTimeEvent);
521 			break;
522 		case BTimedEventQueue::B_SEEK:
523 			HandleSeek(event,lateness,realTimeEvent);
524 			break;
525 		case BTimedEventQueue::B_WARP:
526 			HandleWarp(event,lateness,realTimeEvent);
527 			break;
528 		case BTimedEventQueue::B_STOP:
529 			HandleStop(event,lateness,realTimeEvent);
530 			break;
531 		case BTimedEventQueue::B_HANDLE_BUFFER:
532 			// we don't get any buffers
533 			break;
534 		case SEND_NEW_BUFFER_EVENT:
535 			if (RunState() == BMediaEventLooper::B_STARTED) {
536 				SendNewBuffer(event, lateness, realTimeEvent);
537 			}
538 			break;
539 		case BTimedEventQueue::B_DATA_STATUS:
540 			HandleDataStatus(event,lateness,realTimeEvent);
541 			break;
542 		case BTimedEventQueue::B_PARAMETER:
543 			HandleParameter(event,lateness,realTimeEvent);
544 			break;
545 		default:
546 			fprintf(stderr,"  unknown event type: %li\n",event->type);
547 			break;
548 	}
549 }
550 
551 // protected:
552 
553 // how should we handle late buffers?  drop them?
554 // notify the producer?
555 status_t
556 _SoundPlayNode::SendNewBuffer(const media_timed_event *event, bigtime_t lateness, bool realTimeEvent)
557 {
558 	CALLED();
559 	// printf("latency = %12Ld, event = %12Ld, sched = %5Ld, arrive at %12Ld, now %12Ld, current lateness %12Ld\n", EventLatency() + SchedulingLatency(), EventLatency(), SchedulingLatency(), event->event_time, TimeSource()->Now(), lateness);
560 
561 	// make sure we're both started *and* connected before delivering a buffer
562 	if ((RunState() != BMediaEventLooper::B_STARTED) || (mOutput.destination == media_destination::null))
563 		return B_OK;
564 
565 	// The event->event_time is the time at which the buffer we are preparing here should
566 	// arrive at it's destination. The MediaEventLooper should have scheduled us early enough
567 	// (based on EventLatency() and the SchedulingLatency()) to make this possible.
568 	// lateness is independent of EventLatency()!
569 
570 	if (lateness > (BufferDuration() / 3) ) {
571 		printf("_SoundPlayNode::SendNewBuffer, event scheduled much too late, lateness is %Ld\n", lateness);
572 	}
573 
574 	// skip buffer creation if output not enabled
575 	if (mOutputEnabled) {
576 
577 		// Get the next buffer of data
578 		BBuffer* buffer = FillNextBuffer(event->event_time);
579 
580 		if (buffer) {
581 
582 			// If we are ready way too early, decrase internal latency
583 /*
584 			bigtime_t how_early = event->event_time - TimeSource()->Now() - mLatency - mInternalLatency;
585 			if (how_early > 5000) {
586 
587 				printf("_SoundPlayNode::SendNewBuffer, event scheduled too early, how_early is %Ld\n", how_early);
588 
589 				if (mTooEarlyCount++ == 5) {
590 					mInternalLatency -= how_early;
591 					if (mInternalLatency < 500)
592 						mInternalLatency = 500;
593 					printf("_SoundPlayNode::SendNewBuffer setting internal latency to %Ld\n", mInternalLatency);
594 					SetEventLatency(mLatency + mInternalLatency);
595 					mTooEarlyCount = 0;
596 				}
597 			}
598 */
599 			// send the buffer downstream if and only if output is enabled
600 			if (B_OK != SendBuffer(buffer, mOutput.destination)) {
601 				// we need to recycle the buffer
602 				// if the call to SendBuffer() fails
603 				printf("_SoundPlayNode::SendNewBuffer: Buffer sending failed\n");
604 				buffer->Recycle();
605 			}
606 		}
607 	}
608 
609 	// track how much media we've delivered so far
610 	size_t nFrames = mOutput.format.u.raw_audio.buffer_size
611 		/ ((mOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK)
612 		* mOutput.format.u.raw_audio.channel_count);
613 	mFramesSent += nFrames;
614 
615 	// The buffer is on its way; now schedule the next one to go
616 	// nextEvent is the time at which the buffer should arrive at it's destination
617 	bigtime_t nextEvent = mStartTime + bigtime_t((1000000LL * mFramesSent) / mOutput.format.u.raw_audio.frame_rate);
618 	media_timed_event nextBufferEvent(nextEvent, SEND_NEW_BUFFER_EVENT);
619 	EventQueue()->AddEvent(nextBufferEvent);
620 
621 	return B_OK;
622 }
623 
624 status_t
625 _SoundPlayNode::HandleDataStatus(
626 						const media_timed_event *event,
627 						bigtime_t lateness,
628 						bool realTimeEvent = false)
629 {
630 	TRACE("_SoundPlayNode::HandleDataStatus status: %li, lateness: %Li\n", event->data, lateness);
631 	switch(event->data) {
632 		case B_DATA_NOT_AVAILABLE:
633 			break;
634 		case B_DATA_AVAILABLE:
635 			break;
636 		case B_PRODUCER_STOPPED:
637 			break;
638 		default:
639 			break;
640 	}
641 	return B_OK;
642 }
643 
644 status_t
645 _SoundPlayNode::HandleStart(
646 						const media_timed_event *event,
647 						bigtime_t lateness,
648 						bool realTimeEvent = false)
649 {
650 	CALLED();
651 	// don't do anything if we're already running
652 	if (RunState() != B_STARTED)
653 	{
654 		// We want to start sending buffers now, so we set up the buffer-sending bookkeeping
655 		// and fire off the first "produce a buffer" event.
656 
657 		mFramesSent = 0;
658 		mStartTime = event->event_time;
659 		media_timed_event firstBufferEvent(event->event_time, SEND_NEW_BUFFER_EVENT);
660 
661 		// Alternatively, we could call HandleEvent() directly with this event, to avoid a trip through
662 		// the event queue, like this:
663 		//
664 		//		this->HandleEvent(&firstBufferEvent, 0, false);
665 		//
666 		EventQueue()->AddEvent(firstBufferEvent);
667 	}
668 	return B_OK;
669 }
670 
671 status_t
672 _SoundPlayNode::HandleSeek(
673 						const media_timed_event *event,
674 						bigtime_t lateness,
675 						bool realTimeEvent = false)
676 {
677 	CALLED();
678 	DPRINTF("_SoundPlayNode::HandleSeek(t=%lld,d=%li,bd=%lld)\n",event->event_time,event->data,event->bigdata);
679 	return B_OK;
680 }
681 
682 status_t
683 _SoundPlayNode::HandleWarp(
684 						const media_timed_event *event,
685 						bigtime_t lateness,
686 						bool realTimeEvent = false)
687 {
688 	CALLED();
689 	return B_OK;
690 }
691 
692 status_t
693 _SoundPlayNode::HandleStop(
694 						const media_timed_event *event,
695 						bigtime_t lateness,
696 						bool realTimeEvent = false)
697 {
698 	CALLED();
699 	// flush the queue so downstreamers don't get any more
700 	EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true, SEND_NEW_BUFFER_EVENT);
701 
702 	return B_OK;
703 }
704 
705 status_t
706 _SoundPlayNode::HandleParameter(
707 				const media_timed_event *event,
708 				bigtime_t lateness,
709 				bool realTimeEvent = false)
710 {
711 	CALLED();
712 	return B_OK;
713 }
714 
715 void
716 _SoundPlayNode::AllocateBuffers()
717 {
718 	CALLED();
719 
720 	// allocate enough buffers to span our downstream latency, plus one
721 	size_t size = mOutput.format.u.raw_audio.buffer_size;
722 	int32 count = int32(mLatency / BufferDuration() + 1 + 1);
723 
724 	DPRINTF("\tlatency = %Ld, buffer duration = %Ld, count %ld\n", mLatency, BufferDuration(), count);
725 
726 	if (count < 3)
727 		count = 3;
728 
729 	DPRINTF("\tcreating group of %ld buffers, size = %lu\n", count, size);
730 	mBufferGroup = new BBufferGroup(size, count);
731 }
732 
733 BBuffer*
734 _SoundPlayNode::FillNextBuffer(bigtime_t event_time)
735 {
736 	CALLED();
737 
738 	// get a buffer from our buffer group
739 	BBuffer* buf = mBufferGroup->RequestBuffer(mOutput.format.u.raw_audio.buffer_size, BufferDuration() / 2);
740 
741 	// if we fail to get a buffer (for example, if the request times out), we skip this
742 	// buffer and go on to the next, to avoid locking up the control thread
743 	if (!buf) {
744 		ERROR("_SoundPlayNode::FillNextBuffer: RequestBuffer failed\n");
745 		return NULL;
746 	}
747 
748 	memset(buf->Data(), 0, mOutput.format.u.raw_audio.buffer_size);
749 	if (mPlayer->HasData()) {
750 		mPlayer->PlayBuffer(buf->Data(),
751 			mOutput.format.u.raw_audio.buffer_size, mOutput.format.u.raw_audio);
752 	}
753 
754 	// fill in the buffer header
755 	media_header* hdr = buf->Header();
756 	hdr->type = B_MEDIA_RAW_AUDIO;
757 	hdr->size_used = mOutput.format.u.raw_audio.buffer_size;
758 	hdr->time_source = TimeSource()->ID();
759 	hdr->start_time = event_time;
760 	return buf;
761 }
762