xref: /haiku/src/kits/media/SoundPlayNode.cpp (revision 2f470aec1c92ce6917b8a903e343795dc77af41f)
1 /***********************************************************************
2  * AUTHOR: Marcus Overhagen, Jérôme Duval
3  *   FILE: SoundPlayNode.cpp
4  *  DESCR: This is the BBufferProducer, used internally by BSoundPlayer
5  *         This belongs into a private namespace, but isn't for
6  *         compatibility reasons.
7  ***********************************************************************/
8 
9 #include <TimeSource.h>
10 #include <MediaRoster.h>
11 #include <string.h>
12 #include <stdlib.h>
13 #include <unistd.h>
14 #include "SoundPlayNode.h"
15 #include "debug.h"
16 
17 #define SEND_NEW_BUFFER_EVENT (BTimedEventQueue::B_USER_EVENT + 1)
18 
19 _SoundPlayNode::_SoundPlayNode(const char *name, BSoundPlayer *player) :
20 	BMediaNode(name),
21 	BBufferProducer(B_MEDIA_RAW_AUDIO),
22 	BMediaEventLooper(),
23 	mPlayer(player),
24 	mInitCheckStatus(B_OK),
25 	mOutputEnabled(true),
26 	mBufferGroup(NULL),
27 	mFramesSent(0),
28 	mTooEarlyCount(0)
29 {
30 	CALLED();
31 	mOutput.format.type = B_MEDIA_RAW_AUDIO;
32 	mOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
33 }
34 
35 
36 _SoundPlayNode::~_SoundPlayNode()
37 {
38 	CALLED();
39 	Quit();
40 }
41 
42 bool
43 _SoundPlayNode::IsPlaying()
44 {
45 	return RunState() == B_STARTED;
46 }
47 
48 bigtime_t
49 _SoundPlayNode::CurrentTime()
50 {
51 	int frame_rate = (int)mOutput.format.u.raw_audio.frame_rate;
52 	return frame_rate == 0 ? 0 : bigtime_t((1000000LL * mFramesSent) / frame_rate);
53 }
54 
55 media_multi_audio_format
56 _SoundPlayNode::Format() const
57 {
58 	return mOutput.format.u.raw_audio;
59 }
60 
61 // -------------------------------------------------------- //
62 // implementation of BMediaNode
63 // -------------------------------------------------------- //
64 
65 BMediaAddOn * _SoundPlayNode::AddOn(int32 * internal_id) const
66 {
67 	CALLED();
68 	// BeBook says this only gets called if we were in an add-on.
69 	return NULL;
70 }
71 
72 void _SoundPlayNode::Preroll(void)
73 {
74 	CALLED();
75 	// XXX:Performance opportunity
76 	BMediaNode::Preroll();
77 }
78 
79 status_t _SoundPlayNode::HandleMessage(int32 message, const void * data, size_t size)
80 {
81 	CALLED();
82 	return B_ERROR;
83 }
84 
85 void _SoundPlayNode::NodeRegistered(void)
86 {
87 	CALLED();
88 
89 	if (mInitCheckStatus != B_OK) {
90 		ReportError(B_NODE_IN_DISTRESS);
91 		return;
92 	}
93 
94 	SetPriority(B_URGENT_PRIORITY);
95 
96 	mOutput.format.type = B_MEDIA_RAW_AUDIO;
97 	mOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
98 	mOutput.destination = media_destination::null;
99 	mOutput.source.port = ControlPort();
100 	mOutput.source.id = 0;
101 	mOutput.node = Node();
102 	strcpy(mOutput.name, Name());
103 
104 	Run();
105 }
106 
107 status_t _SoundPlayNode::RequestCompleted(const media_request_info &info)
108 {
109 	CALLED();
110 	return B_OK;
111 }
112 
113 void _SoundPlayNode::SetTimeSource(BTimeSource *timeSource)
114 {
115 	CALLED();
116 	BMediaNode::SetTimeSource(timeSource);
117 }
118 
119 void
120 _SoundPlayNode::SetRunMode(run_mode mode)
121 {
122 	TRACE("_SoundPlayNode::SetRunMode mode:%i\n", mode);
123 	BMediaNode::SetRunMode(mode);
124 }
125 
126 // -------------------------------------------------------- //
127 // implementation for BBufferProducer
128 // -------------------------------------------------------- //
129 
130 status_t
131 _SoundPlayNode::FormatSuggestionRequested(media_type type, int32 /*quality*/, media_format* format)
132 {
133 	// FormatSuggestionRequested() is not necessarily part of the format negotiation
134 	// process; it's simply an interrogation -- the caller wants to see what the node's
135 	// preferred data format is, given a suggestion by the caller.
136 	CALLED();
137 
138 	// a wildcard type is okay; but we only support raw audio
139 	if (type != B_MEDIA_RAW_AUDIO && type != B_MEDIA_UNKNOWN_TYPE)
140 		return B_MEDIA_BAD_FORMAT;
141 
142 	// this is the format we'll be returning (our preferred format)
143 	format->type = B_MEDIA_RAW_AUDIO;
144 	format->u.raw_audio = media_multi_audio_format::wildcard;
145 
146 	return B_OK;
147 }
148 
149 status_t
150 _SoundPlayNode::FormatProposal(const media_source& output, media_format* format)
151 {
152 	// FormatProposal() is the first stage in the BMediaRoster::Connect() process.  We hand
153 	// out a suggested format, with wildcards for any variations we support.
154 	CALLED();
155 
156 	// is this a proposal for our one output?
157 	if (output != mOutput.source) {
158 		TRACE("_SoundPlayNode::FormatProposal returning B_MEDIA_BAD_SOURCE\n");
159 		return B_MEDIA_BAD_SOURCE;
160 	}
161 
162 	// if wildcard, change it to raw audio
163 	if (format->type == B_MEDIA_UNKNOWN_TYPE)
164 		format->type = B_MEDIA_RAW_AUDIO;
165 
166 	// if not raw audio, we can't support it
167 	if (format->type != B_MEDIA_RAW_AUDIO) {
168 		TRACE("_SoundPlayNode::FormatProposal returning B_MEDIA_BAD_FORMAT\n");
169 		return B_MEDIA_BAD_FORMAT;
170 	}
171 
172 #if DEBUG >0
173 	char buf[100];
174 	string_for_format(*format, buf, sizeof(buf));
175 	TRACE("_SoundPlayNode::FormatProposal: format %s\n", buf);
176 #endif
177 
178 	return B_OK;
179 }
180 
181 status_t
182 _SoundPlayNode::FormatChangeRequested(const media_source& source, const media_destination& destination, media_format* io_format, int32* _deprecated_)
183 {
184 	CALLED();
185 
186 	// we don't support any other formats, so we just reject any format changes.
187 	return B_ERROR;
188 }
189 
190 status_t
191 _SoundPlayNode::GetNextOutput(int32* cookie, media_output* out_output)
192 {
193 	CALLED();
194 
195 	if (*cookie == 0) {
196 		*out_output = mOutput;
197 		*cookie += 1;
198 		return B_OK;
199 	} else {
200 		return B_BAD_INDEX;
201 	}
202 }
203 
204 status_t
205 _SoundPlayNode::DisposeOutputCookie(int32 cookie)
206 {
207 	CALLED();
208 	// do nothing because we don't use the cookie for anything special
209 	return B_OK;
210 }
211 
212 status_t
213 _SoundPlayNode::SetBufferGroup(const media_source& for_source, BBufferGroup* newGroup)
214 {
215 	CALLED();
216 
217 	// is this our output?
218 	if (for_source != mOutput.source) {
219 		TRACE("_SoundPlayNode::SetBufferGroup returning B_MEDIA_BAD_SOURCE\n");
220 		return B_MEDIA_BAD_SOURCE;
221 	}
222 
223 	// Are we being passed the buffer group we're already using?
224 	if (newGroup == mBufferGroup)
225 		return B_OK;
226 
227 	// Ahh, someone wants us to use a different buffer group.  At this point we delete
228 	// the one we are using and use the specified one instead.  If the specified group is
229 	// NULL, we need to recreate one ourselves, and use *that*.  Note that if we're
230 	// caching a BBuffer that we requested earlier, we have to Recycle() that buffer
231 	// *before* deleting the buffer group, otherwise we'll deadlock waiting for that
232 	// buffer to be recycled!
233 	delete mBufferGroup;		// waits for all buffers to recycle
234 	if (newGroup != NULL)
235 	{
236 		// we were given a valid group; just use that one from now on
237 		mBufferGroup = newGroup;
238 	}
239 	else
240 	{
241 		// we were passed a NULL group pointer; that means we construct
242 		// our own buffer group to use from now on
243 		size_t size = mOutput.format.u.raw_audio.buffer_size;
244 		int32 count = int32(mLatency / BufferDuration() + 1 + 1);
245 		if (count < 3)
246 			count = 3;
247 		mBufferGroup = new BBufferGroup(size, count);
248 	}
249 
250 	return B_OK;
251 }
252 
253 status_t
254 _SoundPlayNode::GetLatency(bigtime_t* out_latency)
255 {
256 	CALLED();
257 
258 	// report our *total* latency:  internal plus downstream plus scheduling
259 	*out_latency = EventLatency() + SchedulingLatency();
260 	return B_OK;
261 }
262 
263 status_t
264 _SoundPlayNode::PrepareToConnect(const media_source& what, const media_destination& where, media_format* format, media_source* out_source, char* out_name)
265 {
266 	// PrepareToConnect() is the second stage of format negotiations that happens
267 	// inside BMediaRoster::Connect(). At this point, the consumer's AcceptFormat()
268 	// method has been called, and that node has potentially changed the proposed
269 	// format. It may also have left wildcards in the format. PrepareToConnect()
270 	// *must* fully specialize the format before returning!
271 	CALLED();
272 
273 	// is this our output?
274 	if (what != mOutput.source)	{
275 		TRACE("_SoundPlayNode::PrepareToConnect returning B_MEDIA_BAD_SOURCE\n");
276 		return B_MEDIA_BAD_SOURCE;
277 	}
278 
279 	// are we already connected?
280 	if (mOutput.destination != media_destination::null)
281 		return B_MEDIA_ALREADY_CONNECTED;
282 
283 	// the format may not yet be fully specialized (the consumer might have
284 	// passed back some wildcards). Finish specializing it now, and return an
285 	// error if we don't support the requested format.
286 
287 #if DEBUG > 0
288 	char buf[100];
289 	string_for_format(*format, buf, sizeof(buf));
290 	TRACE("_SoundPlayNode::PrepareToConnect: input format %s\n", buf);
291 #endif
292 
293 	// if not raw audio, we can't support it
294 	if (format->type != B_MEDIA_UNKNOWN_TYPE && format->type != B_MEDIA_RAW_AUDIO) {
295 		TRACE("_SoundPlayNode::PrepareToConnect: non raw format, returning B_MEDIA_BAD_FORMAT\n");
296 		return B_MEDIA_BAD_FORMAT;
297 	}
298 
299 	// the haiku mixer might have a hint
300 	// for us, so check for it
301 	#define FORMAT_USER_DATA_TYPE 		0x7294a8f3
302 	#define FORMAT_USER_DATA_MAGIC_1	0xc84173bd
303 	#define FORMAT_USER_DATA_MAGIC_2	0x4af62b7d
304 	uint32 channel_count = 0;
305 	float frame_rate = 0;
306 	if (format->user_data_type == FORMAT_USER_DATA_TYPE
307 			&& *(uint32 *)&format->user_data[0] == FORMAT_USER_DATA_MAGIC_1
308 			&& *(uint32 *)&format->user_data[44] == FORMAT_USER_DATA_MAGIC_2) {
309 		channel_count = *(uint32 *)&format->user_data[4];
310 		frame_rate = *(float *)&format->user_data[20];
311 		TRACE("_SoundPlayNode::PrepareToConnect: found mixer info: channel_count %ld, frame_rate %.1f\n", channel_count, frame_rate);
312 	}
313 
314 	media_format default_format;
315 	default_format.type = B_MEDIA_RAW_AUDIO;
316 	default_format.u.raw_audio.frame_rate = frame_rate > 0 ? frame_rate : 44100;
317 	default_format.u.raw_audio.channel_count = channel_count > 0 ? channel_count : 2;
318 	default_format.u.raw_audio.format = media_raw_audio_format::B_AUDIO_FLOAT;
319 	default_format.u.raw_audio.byte_order = B_MEDIA_HOST_ENDIAN;
320 	default_format.u.raw_audio.buffer_size = 0;
321 	format->SpecializeTo(&default_format);
322 
323 	if (format->u.raw_audio.buffer_size == 0)
324 		format->u.raw_audio.buffer_size = BMediaRoster::Roster()->AudioBufferSizeFor(
325 			format->u.raw_audio.channel_count,
326 			format->u.raw_audio.format,
327 			format->u.raw_audio.frame_rate);
328 
329 #if DEBUG > 0
330 	string_for_format(*format, buf, sizeof(buf));
331 	TRACE("_SoundPlayNode::PrepareToConnect: output format %s\n", buf);
332 #endif
333 
334 	// Now reserve the connection, and return information about it
335 	mOutput.destination = where;
336 	mOutput.format = *format;
337 	*out_source = mOutput.source;
338 	strcpy(out_name, Name());
339 	return B_OK;
340 }
341 
342 void
343 _SoundPlayNode::Connect(status_t error, const media_source& source, const media_destination& destination, const media_format& format, char* io_name)
344 {
345 	CALLED();
346 
347 	// is this our output?
348 	if (source != mOutput.source) {
349 		TRACE("_SoundPlayNode::Connect returning\n");
350 		return;
351 	}
352 
353 	// If something earlier failed, Connect() might still be called, but with a non-zero
354 	// error code.  When that happens we simply unreserve the connection and do
355 	// nothing else.
356 	if (error) {
357 		mOutput.destination = media_destination::null;
358 		mOutput.format.type = B_MEDIA_RAW_AUDIO;
359 		mOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
360 		return;
361 	}
362 
363 	// Okay, the connection has been confirmed.  Record the destination and format
364 	// that we agreed on, and report our connection name again.
365 	mOutput.destination = destination;
366 	mOutput.format = format;
367 	strcpy(io_name, Name());
368 
369 	// Now that we're connected, we can determine our downstream latency.
370 	// Do so, then make sure we get our events early enough.
371 	media_node_id id;
372 	FindLatencyFor(mOutput.destination, &mLatency, &id);
373 	TRACE("_SoundPlayNode::Connect: downstream latency = %Ld\n", mLatency);
374 
375 	// reset our buffer duration, etc. to avoid later calculations
376 	bigtime_t duration = ((mOutput.format.u.raw_audio.buffer_size * 1000000LL)
377 		/ ((mOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK) * mOutput.format.u.raw_audio.channel_count))
378 		/ (int32)mOutput.format.u.raw_audio.frame_rate;
379 	SetBufferDuration(duration);
380 	TRACE("_SoundPlayNode::Connect: buffer duration is %Ld\n", duration);
381 
382 	mInternalLatency = (3 * BufferDuration()) / 4;
383 	TRACE("_SoundPlayNode::Connect: using %Ld as internal latency\n", mInternalLatency);
384 	SetEventLatency(mLatency + mInternalLatency);
385 
386 	// Set up the buffer group for our connection, as long as nobody handed us a
387 	// buffer group (via SetBufferGroup()) prior to this.  That can happen, for example,
388 	// if the consumer calls SetOutputBuffersFor() on us from within its Connected()
389 	// method.
390 	if (!mBufferGroup)
391 		AllocateBuffers();
392 }
393 
394 void
395 _SoundPlayNode::Disconnect(const media_source& what, const media_destination& where)
396 {
397 	CALLED();
398 
399 	// is this our output?
400 	if (what != mOutput.source)
401 	{
402 		TRACE("_SoundPlayNode::Disconnect returning\n");
403 		return;
404 	}
405 
406 	// Make sure that our connection is the one being disconnected
407 	if ((where == mOutput.destination) && (what == mOutput.source))
408 	{
409 		mOutput.destination = media_destination::null;
410 		mOutput.format.type = B_MEDIA_RAW_AUDIO;
411 		mOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
412 		delete mBufferGroup;
413 		mBufferGroup = NULL;
414 	}
415 	else
416 	{
417 		fprintf(stderr, "\tDisconnect() called with wrong source/destination (%ld/%ld), ours is (%ld/%ld)\n",
418 			what.id, where.id, mOutput.source.id, mOutput.destination.id);
419 	}
420 }
421 
422 void
423 _SoundPlayNode::LateNoticeReceived(const media_source& what, bigtime_t how_much, bigtime_t performance_time)
424 {
425 	CALLED();
426 
427 	TRACE("_SoundPlayNode::LateNoticeReceived, %Ld too late at %Ld\n", how_much, performance_time);
428 
429 	// is this our output?
430 	if (what != mOutput.source)
431 	{
432 		TRACE("_SoundPlayNode::LateNoticeReceived returning\n");
433 		return;
434 	}
435 
436 	if (RunMode() != B_DROP_DATA)
437 	{
438 		// We're late, and our run mode dictates that we try to produce buffers
439 		// earlier in order to catch up.  This argues that the downstream nodes are
440 		// not properly reporting their latency, but there's not much we can do about
441 		// that at the moment, so we try to start producing buffers earlier to
442 		// compensate.
443 
444 		mInternalLatency += how_much;
445 
446 		if (mInternalLatency > 30000)	// avoid getting a too high latency
447 			mInternalLatency = 30000;
448 
449 		SetEventLatency(mLatency + mInternalLatency);
450 		TRACE("_SoundPlayNode::LateNoticeReceived: increasing latency to %Ld\n", mLatency + mInternalLatency);
451 	}
452 	else
453 	{
454 		// The other run modes dictate various strategies for sacrificing data quality
455 		// in the interests of timely data delivery.  The way *we* do this is to skip
456 		// a buffer, which catches us up in time by one buffer duration.
457 
458 		size_t nFrames = mOutput.format.u.raw_audio.buffer_size
459 			/ ((mOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK)
460 			* mOutput.format.u.raw_audio.channel_count);
461 
462 		mFramesSent += nFrames;
463 
464 		TRACE("_SoundPlayNode::LateNoticeReceived: skipping a buffer to try to catch up\n");
465 	}
466 }
467 
468 void
469 _SoundPlayNode::EnableOutput(const media_source& what, bool enabled, int32* _deprecated_)
470 {
471 	CALLED();
472 
473 	// If I had more than one output, I'd have to walk my list of output records to see
474 	// which one matched the given source, and then enable/disable that one.  But this
475 	// node only has one output, so I just make sure the given source matches, then set
476 	// the enable state accordingly.
477 	// is this our output?
478 	if (what != mOutput.source)
479 	{
480 		fprintf(stderr, "_SoundPlayNode::EnableOutput returning\n");
481 		return;
482 	}
483 
484 	mOutputEnabled = enabled;
485 }
486 
487 void
488 _SoundPlayNode::AdditionalBufferRequested(const media_source& source, media_buffer_id prev_buffer, bigtime_t prev_time, const media_seek_tag* prev_tag)
489 {
490 	CALLED();
491 	// we don't support offline mode
492 	return;
493 }
494 
495 void
496 _SoundPlayNode::LatencyChanged(const media_source& source, const media_destination& destination, bigtime_t new_latency, uint32 flags)
497 {
498 	CALLED();
499 
500 	TRACE("_SoundPlayNode::LatencyChanged: new_latency %Ld\n", new_latency);
501 
502 	// something downstream changed latency, so we need to start producing
503 	// buffers earlier (or later) than we were previously.  Make sure that the
504 	// connection that changed is ours, and adjust to the new downstream
505 	// latency if so.
506 	if ((source == mOutput.source) && (destination == mOutput.destination))
507 	{
508 		mLatency = new_latency;
509 		SetEventLatency(mLatency + mInternalLatency);
510 	} else {
511 		TRACE("_SoundPlayNode::LatencyChanged: ignored\n");
512 	}
513 }
514 
515 // -------------------------------------------------------- //
516 // implementation for BMediaEventLooper
517 // -------------------------------------------------------- //
518 
519 void _SoundPlayNode::HandleEvent(
520 				const media_timed_event *event,
521 				bigtime_t lateness,
522 				bool realTimeEvent)
523 {
524 	CALLED();
525 	switch (event->type) {
526 		case BTimedEventQueue::B_START:
527 			HandleStart(event,lateness,realTimeEvent);
528 			break;
529 		case BTimedEventQueue::B_SEEK:
530 			HandleSeek(event,lateness,realTimeEvent);
531 			break;
532 		case BTimedEventQueue::B_WARP:
533 			HandleWarp(event,lateness,realTimeEvent);
534 			break;
535 		case BTimedEventQueue::B_STOP:
536 			HandleStop(event,lateness,realTimeEvent);
537 			break;
538 		case BTimedEventQueue::B_HANDLE_BUFFER:
539 			// we don't get any buffers
540 			break;
541 		case SEND_NEW_BUFFER_EVENT:
542 			if (RunState() == BMediaEventLooper::B_STARTED) {
543 				SendNewBuffer(event, lateness, realTimeEvent);
544 			}
545 			break;
546 		case BTimedEventQueue::B_DATA_STATUS:
547 			HandleDataStatus(event,lateness,realTimeEvent);
548 			break;
549 		case BTimedEventQueue::B_PARAMETER:
550 			HandleParameter(event,lateness,realTimeEvent);
551 			break;
552 		default:
553 			fprintf(stderr,"  unknown event type: %li\n",event->type);
554 			break;
555 	}
556 }
557 
558 // protected:
559 
560 // how should we handle late buffers?  drop them?
561 // notify the producer?
562 status_t
563 _SoundPlayNode::SendNewBuffer(const media_timed_event *event, bigtime_t lateness, bool realTimeEvent)
564 {
565 	CALLED();
566 	// printf("latency = %12Ld, event = %12Ld, sched = %5Ld, arrive at %12Ld, now %12Ld, current lateness %12Ld\n", EventLatency() + SchedulingLatency(), EventLatency(), SchedulingLatency(), event->event_time, TimeSource()->Now(), lateness);
567 
568 	// make sure we're both started *and* connected before delivering a buffer
569 	if ((RunState() != BMediaEventLooper::B_STARTED) || (mOutput.destination == media_destination::null))
570 		return B_OK;
571 
572 	// The event->event_time is the time at which the buffer we are preparing here should
573 	// arrive at it's destination. The MediaEventLooper should have scheduled us early enough
574 	// (based on EventLatency() and the SchedulingLatency()) to make this possible.
575 	// lateness is independent of EventLatency()!
576 
577 	if (lateness > (BufferDuration() / 3) ) {
578 		printf("_SoundPlayNode::SendNewBuffer, event scheduled much too late, lateness is %Ld\n", lateness);
579 	}
580 
581 	// skip buffer creation if output not enabled
582 	if (mOutputEnabled) {
583 
584 		// Get the next buffer of data
585 		BBuffer* buffer = FillNextBuffer(event->event_time);
586 
587 		if (buffer) {
588 
589 			// If we are ready way too early, decrase internal latency
590 /*
591 			bigtime_t how_early = event->event_time - TimeSource()->Now() - mLatency - mInternalLatency;
592 			if (how_early > 5000) {
593 
594 				printf("_SoundPlayNode::SendNewBuffer, event scheduled too early, how_early is %Ld\n", how_early);
595 
596 				if (mTooEarlyCount++ == 5) {
597 					mInternalLatency -= how_early;
598 					if (mInternalLatency < 500)
599 						mInternalLatency = 500;
600 					printf("_SoundPlayNode::SendNewBuffer setting internal latency to %Ld\n", mInternalLatency);
601 					SetEventLatency(mLatency + mInternalLatency);
602 					mTooEarlyCount = 0;
603 				}
604 			}
605 */
606 			// send the buffer downstream if and only if output is enabled
607 			if (B_OK != SendBuffer(buffer, mOutput.destination)) {
608 				// we need to recycle the buffer
609 				// if the call to SendBuffer() fails
610 				printf("_SoundPlayNode::SendNewBuffer: Buffer sending failed\n");
611 				buffer->Recycle();
612 			}
613 		}
614 	}
615 
616 	// track how much media we've delivered so far
617 	size_t nFrames = mOutput.format.u.raw_audio.buffer_size
618 		/ ((mOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK)
619 		* mOutput.format.u.raw_audio.channel_count);
620 	mFramesSent += nFrames;
621 
622 	// The buffer is on its way; now schedule the next one to go
623 	// nextEvent is the time at which the buffer should arrive at it's destination
624 	bigtime_t nextEvent = mStartTime + bigtime_t((1000000LL * mFramesSent) / (int32)mOutput.format.u.raw_audio.frame_rate);
625 	media_timed_event nextBufferEvent(nextEvent, SEND_NEW_BUFFER_EVENT);
626 	EventQueue()->AddEvent(nextBufferEvent);
627 
628 	return B_OK;
629 }
630 
631 status_t
632 _SoundPlayNode::HandleDataStatus(
633 						const media_timed_event *event,
634 						bigtime_t lateness,
635 						bool realTimeEvent)
636 {
637 	TRACE("_SoundPlayNode::HandleDataStatus status: %li, lateness: %Li\n", event->data, lateness);
638 	switch(event->data) {
639 		case B_DATA_NOT_AVAILABLE:
640 			break;
641 		case B_DATA_AVAILABLE:
642 			break;
643 		case B_PRODUCER_STOPPED:
644 			break;
645 		default:
646 			break;
647 	}
648 	return B_OK;
649 }
650 
651 status_t
652 _SoundPlayNode::HandleStart(
653 						const media_timed_event *event,
654 						bigtime_t lateness,
655 						bool realTimeEvent)
656 {
657 	CALLED();
658 	// don't do anything if we're already running
659 	if (RunState() != B_STARTED)
660 	{
661 		// We want to start sending buffers now, so we set up the buffer-sending bookkeeping
662 		// and fire off the first "produce a buffer" event.
663 
664 		mFramesSent = 0;
665 		mStartTime = event->event_time;
666 		media_timed_event firstBufferEvent(event->event_time, SEND_NEW_BUFFER_EVENT);
667 
668 		// Alternatively, we could call HandleEvent() directly with this event, to avoid a trip through
669 		// the event queue, like this:
670 		//
671 		//		this->HandleEvent(&firstBufferEvent, 0, false);
672 		//
673 		EventQueue()->AddEvent(firstBufferEvent);
674 	}
675 	return B_OK;
676 }
677 
678 status_t
679 _SoundPlayNode::HandleSeek(
680 						const media_timed_event *event,
681 						bigtime_t lateness,
682 						bool realTimeEvent)
683 {
684 	CALLED();
685 	TRACE("_SoundPlayNode::HandleSeek(t=%lld, d=%li, bd=%lld)\n", event->event_time, event->data, event->bigdata);
686 	return B_OK;
687 }
688 
689 status_t
690 _SoundPlayNode::HandleWarp(
691 						const media_timed_event *event,
692 						bigtime_t lateness,
693 						bool realTimeEvent)
694 {
695 	CALLED();
696 	return B_OK;
697 }
698 
699 status_t
700 _SoundPlayNode::HandleStop(
701 						const media_timed_event *event,
702 						bigtime_t lateness,
703 						bool realTimeEvent)
704 {
705 	CALLED();
706 	// flush the queue so downstreamers don't get any more
707 	EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true, SEND_NEW_BUFFER_EVENT);
708 
709 	return B_OK;
710 }
711 
712 status_t
713 _SoundPlayNode::HandleParameter(
714 				const media_timed_event *event,
715 				bigtime_t lateness,
716 				bool realTimeEvent)
717 {
718 	CALLED();
719 	return B_OK;
720 }
721 
722 void
723 _SoundPlayNode::AllocateBuffers()
724 {
725 	CALLED();
726 
727 	// allocate enough buffers to span our downstream latency, plus one
728 	size_t size = mOutput.format.u.raw_audio.buffer_size;
729 	int32 count = int32(mLatency / BufferDuration() + 1 + 1);
730 
731 	TRACE("_SoundPlayNode::AllocateBuffers: latency = %Ld, buffer duration = %Ld, count %ld\n", mLatency, BufferDuration(), count);
732 
733 	if (count < 3)
734 		count = 3;
735 
736 	TRACE("_SoundPlayNode::AllocateBuffers: creating group of %ld buffers, size = %lu\n", count, size);
737 
738 	mBufferGroup = new BBufferGroup(size, count);
739 	if (mBufferGroup->InitCheck() != B_OK) {
740 		ERROR("_SoundPlayNode::AllocateBuffers: BufferGroup::InitCheck() failed\n");
741 	}
742 }
743 
744 BBuffer*
745 _SoundPlayNode::FillNextBuffer(bigtime_t event_time)
746 {
747 	CALLED();
748 
749 	// get a buffer from our buffer group
750 	BBuffer* buf = mBufferGroup->RequestBuffer(mOutput.format.u.raw_audio.buffer_size, BufferDuration() / 2);
751 
752 	// if we fail to get a buffer (for example, if the request times out), we skip this
753 	// buffer and go on to the next, to avoid locking up the control thread
754 	if (!buf) {
755 		ERROR("_SoundPlayNode::FillNextBuffer: RequestBuffer failed\n");
756 		return NULL;
757 	}
758 
759 	if (mPlayer->HasData()) {
760 		mPlayer->PlayBuffer(buf->Data(),
761 			mOutput.format.u.raw_audio.buffer_size, mOutput.format.u.raw_audio);
762 	} else {
763 		memset(buf->Data(), 0, mOutput.format.u.raw_audio.buffer_size);
764 	}
765 
766 	// fill in the buffer header
767 	media_header* hdr = buf->Header();
768 	hdr->type = B_MEDIA_RAW_AUDIO;
769 	hdr->size_used = mOutput.format.u.raw_audio.buffer_size;
770 	hdr->time_source = TimeSource()->ID();
771 	hdr->start_time = event_time;
772 	return buf;
773 }
774