1 /* 2 * Copyright 2002-2010, Haiku. 3 * Distributed under the terms of the MIT License. 4 * 5 * Authors: 6 * Marcus Overhagen 7 * Jérôme Duval 8 */ 9 10 11 /*! This is the BBufferProducer used internally by BSoundPlayer. 12 */ 13 14 15 #include "SoundPlayNode.h" 16 17 #include <string.h> 18 #include <stdlib.h> 19 #include <unistd.h> 20 21 #include <TimeSource.h> 22 #include <MediaRoster.h> 23 #include "debug.h" 24 25 26 #define SEND_NEW_BUFFER_EVENT (BTimedEventQueue::B_USER_EVENT + 1) 27 28 29 namespace BPrivate { 30 31 32 SoundPlayNode::SoundPlayNode(const char* name, BSoundPlayer* player) 33 : 34 BMediaNode(name), 35 BBufferProducer(B_MEDIA_RAW_AUDIO), 36 BMediaEventLooper(), 37 fPlayer(player), 38 fInitStatus(B_OK), 39 fOutputEnabled(true), 40 fBufferGroup(NULL), 41 fFramesSent(0), 42 fTooEarlyCount(0) 43 { 44 CALLED(); 45 fOutput.format.type = B_MEDIA_RAW_AUDIO; 46 fOutput.format.u.raw_audio = media_multi_audio_format::wildcard; 47 } 48 49 50 SoundPlayNode::~SoundPlayNode() 51 { 52 CALLED(); 53 Quit(); 54 } 55 56 57 bool 58 SoundPlayNode::IsPlaying() 59 { 60 return RunState() == B_STARTED; 61 } 62 63 64 bigtime_t 65 SoundPlayNode::CurrentTime() 66 { 67 int frameRate = (int)fOutput.format.u.raw_audio.frame_rate; 68 return frameRate == 0 ? 0 69 : bigtime_t((1000000LL * fFramesSent) / frameRate); 70 } 71 72 73 media_multi_audio_format 74 SoundPlayNode::Format() const 75 { 76 return fOutput.format.u.raw_audio; 77 } 78 79 80 // #pragma mark - implementation of BMediaNode 81 82 83 BMediaAddOn* 84 SoundPlayNode::AddOn(int32* _internalID) const 85 { 86 CALLED(); 87 // This only gets called if we are in an add-on. 88 return NULL; 89 } 90 91 92 void 93 SoundPlayNode::Preroll() 94 { 95 CALLED(); 96 // TODO: Performance opportunity 97 BMediaNode::Preroll(); 98 } 99 100 101 status_t 102 SoundPlayNode::HandleMessage(int32 message, const void* data, size_t size) 103 { 104 CALLED(); 105 return B_ERROR; 106 } 107 108 109 void 110 SoundPlayNode::NodeRegistered() 111 { 112 CALLED(); 113 114 if (fInitStatus != B_OK) { 115 ReportError(B_NODE_IN_DISTRESS); 116 return; 117 } 118 119 SetPriority(B_URGENT_PRIORITY); 120 121 fOutput.format.type = B_MEDIA_RAW_AUDIO; 122 fOutput.format.u.raw_audio = media_multi_audio_format::wildcard; 123 fOutput.destination = media_destination::null; 124 fOutput.source.port = ControlPort(); 125 fOutput.source.id = 0; 126 fOutput.node = Node(); 127 strcpy(fOutput.name, Name()); 128 129 Run(); 130 } 131 132 133 status_t 134 SoundPlayNode::RequestCompleted(const media_request_info& info) 135 { 136 CALLED(); 137 return B_OK; 138 } 139 140 141 void 142 SoundPlayNode::SetTimeSource(BTimeSource* timeSource) 143 { 144 CALLED(); 145 BMediaNode::SetTimeSource(timeSource); 146 } 147 148 149 void 150 SoundPlayNode::SetRunMode(run_mode mode) 151 { 152 TRACE("SoundPlayNode::SetRunMode mode:%i\n", mode); 153 BMediaNode::SetRunMode(mode); 154 } 155 156 157 // #pragma mark - implementation for BBufferProducer 158 159 160 status_t 161 SoundPlayNode::FormatSuggestionRequested(media_type type, int32 /*quality*/, 162 media_format* format) 163 { 164 // FormatSuggestionRequested() is not necessarily part of the format 165 // negotiation process; it's simply an interrogation -- the caller wants 166 // to see what the node's preferred data format is, given a suggestion by 167 // the caller. 168 CALLED(); 169 170 // a wildcard type is okay; but we only support raw audio 171 if (type != B_MEDIA_RAW_AUDIO && type != B_MEDIA_UNKNOWN_TYPE) 172 return B_MEDIA_BAD_FORMAT; 173 174 // this is the format we'll be returning (our preferred format) 175 format->type = B_MEDIA_RAW_AUDIO; 176 format->u.raw_audio = media_multi_audio_format::wildcard; 177 178 return B_OK; 179 } 180 181 182 status_t 183 SoundPlayNode::FormatProposal(const media_source& output, media_format* format) 184 { 185 // FormatProposal() is the first stage in the BMediaRoster::Connect() 186 // process. We hand out a suggested format, with wildcards for any 187 // variations we support. 188 CALLED(); 189 190 // is this a proposal for our one output? 191 if (output != fOutput.source) { 192 TRACE("SoundPlayNode::FormatProposal returning B_MEDIA_BAD_SOURCE\n"); 193 return B_MEDIA_BAD_SOURCE; 194 } 195 196 // if wildcard, change it to raw audio 197 if (format->type == B_MEDIA_UNKNOWN_TYPE) 198 format->type = B_MEDIA_RAW_AUDIO; 199 200 // if not raw audio, we can't support it 201 if (format->type != B_MEDIA_RAW_AUDIO) { 202 TRACE("SoundPlayNode::FormatProposal returning B_MEDIA_BAD_FORMAT\n"); 203 return B_MEDIA_BAD_FORMAT; 204 } 205 206 #if DEBUG >0 207 char buf[100]; 208 string_for_format(*format, buf, sizeof(buf)); 209 TRACE("SoundPlayNode::FormatProposal: format %s\n", buf); 210 #endif 211 212 return B_OK; 213 } 214 215 216 status_t 217 SoundPlayNode::FormatChangeRequested(const media_source& source, 218 const media_destination& destination, media_format* _format, 219 int32* /* deprecated */) 220 { 221 CALLED(); 222 223 // we don't support any other formats, so we just reject any format changes. 224 return B_ERROR; 225 } 226 227 228 status_t 229 SoundPlayNode::GetNextOutput(int32* cookie, media_output* _output) 230 { 231 CALLED(); 232 233 if (*cookie == 0) { 234 *_output = fOutput; 235 *cookie += 1; 236 return B_OK; 237 } else { 238 return B_BAD_INDEX; 239 } 240 } 241 242 243 status_t 244 SoundPlayNode::DisposeOutputCookie(int32 cookie) 245 { 246 CALLED(); 247 // do nothing because we don't use the cookie for anything special 248 return B_OK; 249 } 250 251 252 status_t 253 SoundPlayNode::SetBufferGroup(const media_source& forSource, 254 BBufferGroup* newGroup) 255 { 256 CALLED(); 257 258 // is this our output? 259 if (forSource != fOutput.source) { 260 TRACE("SoundPlayNode::SetBufferGroup returning B_MEDIA_BAD_SOURCE\n"); 261 return B_MEDIA_BAD_SOURCE; 262 } 263 264 // Are we being passed the buffer group we're already using? 265 if (newGroup == fBufferGroup) 266 return B_OK; 267 268 // Ahh, someone wants us to use a different buffer group. At this point we 269 // delete the one we are using and use the specified one instead. 270 // If the specified group is NULL, we need to recreate one ourselves, and 271 // use *that*. Note that if we're caching a BBuffer that we requested 272 // earlier, we have to Recycle() that buffer *before* deleting the buffer 273 // group, otherwise we'll deadlock waiting for that buffer to be recycled! 274 delete fBufferGroup; 275 // waits for all buffers to recycle 276 277 if (newGroup != NULL) { 278 // we were given a valid group; just use that one from now on 279 fBufferGroup = newGroup; 280 return B_OK; 281 } 282 283 // we were passed a NULL group pointer; that means we construct 284 // our own buffer group to use from now on 285 return AllocateBuffers(); 286 } 287 288 289 status_t 290 SoundPlayNode::GetLatency(bigtime_t* _latency) 291 { 292 CALLED(); 293 294 // report our *total* latency: internal plus downstream plus scheduling 295 *_latency = EventLatency() + SchedulingLatency(); 296 return B_OK; 297 } 298 299 300 status_t 301 SoundPlayNode::PrepareToConnect(const media_source& what, 302 const media_destination& where, media_format* format, 303 media_source* _source, char* _name) 304 { 305 // PrepareToConnect() is the second stage of format negotiations that 306 // happens inside BMediaRoster::Connect(). At this point, the consumer's 307 // AcceptFormat() method has been called, and that node has potentially 308 // changed the proposed format. It may also have left wildcards in the 309 // format. PrepareToConnect() *must* fully specialize the format before 310 // returning! 311 CALLED(); 312 313 // is this our output? 314 if (what != fOutput.source) { 315 TRACE("SoundPlayNode::PrepareToConnect returning " 316 "B_MEDIA_BAD_SOURCE\n"); 317 return B_MEDIA_BAD_SOURCE; 318 } 319 320 // are we already connected? 321 if (fOutput.destination != media_destination::null) 322 return B_MEDIA_ALREADY_CONNECTED; 323 324 // the format may not yet be fully specialized (the consumer might have 325 // passed back some wildcards). Finish specializing it now, and return an 326 // error if we don't support the requested format. 327 328 #if DEBUG > 0 329 char buf[100]; 330 string_for_format(*format, buf, sizeof(buf)); 331 TRACE("SoundPlayNode::PrepareToConnect: input format %s\n", buf); 332 #endif 333 334 // if not raw audio, we can't support it 335 if (format->type != B_MEDIA_UNKNOWN_TYPE 336 && format->type != B_MEDIA_RAW_AUDIO) { 337 TRACE("SoundPlayNode::PrepareToConnect: non raw format, returning " 338 "B_MEDIA_BAD_FORMAT\n"); 339 return B_MEDIA_BAD_FORMAT; 340 } 341 342 // the haiku mixer might have a hint 343 // for us, so check for it 344 #define FORMAT_USER_DATA_TYPE 0x7294a8f3 345 #define FORMAT_USER_DATA_MAGIC_1 0xc84173bd 346 #define FORMAT_USER_DATA_MAGIC_2 0x4af62b7d 347 uint32 channel_count = 0; 348 float frame_rate = 0; 349 if (format->user_data_type == FORMAT_USER_DATA_TYPE 350 && *(uint32 *)&format->user_data[0] == FORMAT_USER_DATA_MAGIC_1 351 && *(uint32 *)&format->user_data[44] == FORMAT_USER_DATA_MAGIC_2) { 352 channel_count = *(uint32 *)&format->user_data[4]; 353 frame_rate = *(float *)&format->user_data[20]; 354 TRACE("SoundPlayNode::PrepareToConnect: found mixer info: " 355 "channel_count %ld, frame_rate %.1f\n", channel_count, frame_rate); 356 } 357 358 media_format default_format; 359 default_format.type = B_MEDIA_RAW_AUDIO; 360 default_format.u.raw_audio.frame_rate = frame_rate > 0 ? frame_rate : 44100; 361 default_format.u.raw_audio.channel_count = channel_count > 0 362 ? channel_count : 2; 363 default_format.u.raw_audio.format = media_raw_audio_format::B_AUDIO_FLOAT; 364 default_format.u.raw_audio.byte_order = B_MEDIA_HOST_ENDIAN; 365 default_format.u.raw_audio.buffer_size = 0; 366 format->SpecializeTo(&default_format); 367 368 if (format->u.raw_audio.buffer_size == 0) { 369 format->u.raw_audio.buffer_size 370 = BMediaRoster::Roster()->AudioBufferSizeFor( 371 format->u.raw_audio.channel_count, format->u.raw_audio.format, 372 format->u.raw_audio.frame_rate); 373 } 374 375 #if DEBUG > 0 376 string_for_format(*format, buf, sizeof(buf)); 377 TRACE("SoundPlayNode::PrepareToConnect: output format %s\n", buf); 378 #endif 379 380 // Now reserve the connection, and return information about it 381 fOutput.destination = where; 382 fOutput.format = *format; 383 *_source = fOutput.source; 384 strcpy(_name, Name()); 385 return B_OK; 386 } 387 388 389 void 390 SoundPlayNode::Connect(status_t error, const media_source& source, 391 const media_destination& destination, const media_format& format, 392 char* name) 393 { 394 CALLED(); 395 396 // is this our output? 397 if (source != fOutput.source) { 398 TRACE("SoundPlayNode::Connect returning\n"); 399 return; 400 } 401 402 // If something earlier failed, Connect() might still be called, but with 403 // a non-zero error code. When that happens we simply unreserve the 404 // connection and do nothing else. 405 if (error) { 406 fOutput.destination = media_destination::null; 407 fOutput.format.type = B_MEDIA_RAW_AUDIO; 408 fOutput.format.u.raw_audio = media_multi_audio_format::wildcard; 409 return; 410 } 411 412 // Okay, the connection has been confirmed. Record the destination and 413 // format that we agreed on, and report our connection name again. 414 fOutput.destination = destination; 415 fOutput.format = format; 416 strcpy(name, Name()); 417 418 // Now that we're connected, we can determine our downstream latency. 419 // Do so, then make sure we get our events early enough. 420 media_node_id id; 421 FindLatencyFor(fOutput.destination, &fLatency, &id); 422 TRACE("SoundPlayNode::Connect: downstream latency = %Ld\n", fLatency); 423 424 // reset our buffer duration, etc. to avoid later calculations 425 bigtime_t duration = ((fOutput.format.u.raw_audio.buffer_size * 1000000LL) 426 / ((fOutput.format.u.raw_audio.format 427 & media_raw_audio_format::B_AUDIO_SIZE_MASK) 428 * fOutput.format.u.raw_audio.channel_count)) 429 / (int32)fOutput.format.u.raw_audio.frame_rate; 430 SetBufferDuration(duration); 431 TRACE("SoundPlayNode::Connect: buffer duration is %Ld\n", duration); 432 433 fInternalLatency = (3 * BufferDuration()) / 4; 434 TRACE("SoundPlayNode::Connect: using %Ld as internal latency\n", 435 fInternalLatency); 436 SetEventLatency(fLatency + fInternalLatency); 437 438 // Set up the buffer group for our connection, as long as nobody handed us 439 // a buffer group (via SetBufferGroup()) prior to this. 440 // That can happen, for example, if the consumer calls SetOutputBuffersFor() 441 // on us from within its Connected() method. 442 if (!fBufferGroup) 443 AllocateBuffers(); 444 } 445 446 447 void 448 SoundPlayNode::Disconnect(const media_source& what, 449 const media_destination& where) 450 { 451 CALLED(); 452 453 // is this our output? 454 if (what != fOutput.source) { 455 TRACE("SoundPlayNode::Disconnect returning\n"); 456 return; 457 } 458 459 // Make sure that our connection is the one being disconnected 460 if (where == fOutput.destination && what == fOutput.source) { 461 fOutput.destination = media_destination::null; 462 fOutput.format.type = B_MEDIA_RAW_AUDIO; 463 fOutput.format.u.raw_audio = media_multi_audio_format::wildcard; 464 delete fBufferGroup; 465 fBufferGroup = NULL; 466 } else { 467 fprintf(stderr, "\tDisconnect() called with wrong source/destination " 468 "(%" B_PRId32 "/%" B_PRId32 "), ours is (%" B_PRId32 "/%" B_PRId32 469 ")\n", what.id, where.id, fOutput.source.id, 470 fOutput.destination.id); 471 } 472 } 473 474 475 void 476 SoundPlayNode::LateNoticeReceived(const media_source& what, bigtime_t howMuch, 477 bigtime_t performanceTime) 478 { 479 CALLED(); 480 481 TRACE("SoundPlayNode::LateNoticeReceived, %" B_PRId64 " too late at %" 482 B_PRId64 "\n", howMuch, performanceTime); 483 484 // is this our output? 485 if (what != fOutput.source) { 486 TRACE("SoundPlayNode::LateNoticeReceived returning\n"); 487 return; 488 } 489 490 if (RunMode() != B_DROP_DATA) { 491 // We're late, and our run mode dictates that we try to produce buffers 492 // earlier in order to catch up. This argues that the downstream nodes are 493 // not properly reporting their latency, but there's not much we can do about 494 // that at the moment, so we try to start producing buffers earlier to 495 // compensate. 496 497 fInternalLatency += howMuch; 498 499 if (fInternalLatency > 30000) // avoid getting a too high latency 500 fInternalLatency = 30000; 501 502 SetEventLatency(fLatency + fInternalLatency); 503 TRACE("SoundPlayNode::LateNoticeReceived: increasing latency to %" 504 B_PRId64 "\n", fLatency + fInternalLatency); 505 } else { 506 // The other run modes dictate various strategies for sacrificing data quality 507 // in the interests of timely data delivery. The way *we* do this is to skip 508 // a buffer, which catches us up in time by one buffer duration. 509 510 size_t nFrames = fOutput.format.u.raw_audio.buffer_size 511 / ((fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK) 512 * fOutput.format.u.raw_audio.channel_count); 513 514 fFramesSent += nFrames; 515 516 TRACE("SoundPlayNode::LateNoticeReceived: skipping a buffer to try to catch up\n"); 517 } 518 } 519 520 521 void 522 SoundPlayNode::EnableOutput(const media_source& what, bool enabled, 523 int32* /* deprecated */) 524 { 525 CALLED(); 526 527 // If I had more than one output, I'd have to walk my list of output 528 // records to see which one matched the given source, and then 529 // enable/disable that one. 530 // But this node only has one output, so I just make sure the given source 531 // matches, then set the enable state accordingly. 532 533 // is this our output? 534 if (what != fOutput.source) { 535 fprintf(stderr, "SoundPlayNode::EnableOutput returning\n"); 536 return; 537 } 538 539 fOutputEnabled = enabled; 540 } 541 542 543 void 544 SoundPlayNode::AdditionalBufferRequested(const media_source& source, 545 media_buffer_id previousBuffer, bigtime_t previousTime, 546 const media_seek_tag* previousTag) 547 { 548 CALLED(); 549 // we don't support offline mode 550 return; 551 } 552 553 554 void 555 SoundPlayNode::LatencyChanged(const media_source& source, 556 const media_destination& destination, bigtime_t newLatency, uint32 flags) 557 { 558 CALLED(); 559 560 TRACE("SoundPlayNode::LatencyChanged: new_latency %" B_PRId64 "\n", 561 newLatency); 562 563 // something downstream changed latency, so we need to start producing 564 // buffers earlier (or later) than we were previously. Make sure that the 565 // connection that changed is ours, and adjust to the new downstream 566 // latency if so. 567 if (source == fOutput.source && destination == fOutput.destination) { 568 fLatency = newLatency; 569 SetEventLatency(fLatency + fInternalLatency); 570 } else { 571 TRACE("SoundPlayNode::LatencyChanged: ignored\n"); 572 } 573 } 574 575 576 // #pragma mark - implementation for BMediaEventLooper 577 578 579 void 580 SoundPlayNode::HandleEvent(const media_timed_event* event, bigtime_t lateness, 581 bool realTimeEvent) 582 { 583 CALLED(); 584 switch (event->type) { 585 case BTimedEventQueue::B_START: 586 HandleStart(event,lateness,realTimeEvent); 587 break; 588 case BTimedEventQueue::B_SEEK: 589 HandleSeek(event,lateness,realTimeEvent); 590 break; 591 case BTimedEventQueue::B_WARP: 592 HandleWarp(event,lateness,realTimeEvent); 593 break; 594 case BTimedEventQueue::B_STOP: 595 HandleStop(event,lateness,realTimeEvent); 596 break; 597 case BTimedEventQueue::B_HANDLE_BUFFER: 598 // we don't get any buffers 599 break; 600 case SEND_NEW_BUFFER_EVENT: 601 if (RunState() == BMediaEventLooper::B_STARTED) 602 SendNewBuffer(event, lateness, realTimeEvent); 603 break; 604 case BTimedEventQueue::B_DATA_STATUS: 605 HandleDataStatus(event,lateness,realTimeEvent); 606 break; 607 case BTimedEventQueue::B_PARAMETER: 608 HandleParameter(event,lateness,realTimeEvent); 609 break; 610 default: 611 fprintf(stderr," unknown event type: %" B_PRId32 "\n", event->type); 612 break; 613 } 614 } 615 616 617 // #pragma mark - protected methods 618 619 620 // how should we handle late buffers? drop them? 621 // notify the producer? 622 status_t 623 SoundPlayNode::SendNewBuffer(const media_timed_event* event, 624 bigtime_t lateness, bool realTimeEvent) 625 { 626 CALLED(); 627 // printf("latency = %12Ld, event = %12Ld, sched = %5Ld, arrive at %12Ld, now %12Ld, current lateness %12Ld\n", EventLatency() + SchedulingLatency(), EventLatency(), SchedulingLatency(), event->event_time, TimeSource()->Now(), lateness); 628 629 // make sure we're both started *and* connected before delivering a buffer 630 if (RunState() != BMediaEventLooper::B_STARTED 631 || fOutput.destination == media_destination::null) 632 return B_OK; 633 634 // The event->event_time is the time at which the buffer we are preparing 635 // here should arrive at it's destination. The MediaEventLooper should have 636 // scheduled us early enough (based on EventLatency() and the 637 // SchedulingLatency()) to make this possible. 638 // lateness is independent of EventLatency()! 639 640 if (lateness > (BufferDuration() / 3) ) { 641 printf("SoundPlayNode::SendNewBuffer, event scheduled much too late, " 642 "lateness is %" B_PRId64 "\n", lateness); 643 } 644 645 // skip buffer creation if output not enabled 646 if (fOutputEnabled) { 647 648 // Get the next buffer of data 649 BBuffer* buffer = FillNextBuffer(event->event_time); 650 651 if (buffer) { 652 653 // If we are ready way too early, decrase internal latency 654 /* 655 bigtime_t how_early = event->event_time - TimeSource()->Now() - fLatency - fInternalLatency; 656 if (how_early > 5000) { 657 658 printf("SoundPlayNode::SendNewBuffer, event scheduled too early, how_early is %Ld\n", how_early); 659 660 if (fTooEarlyCount++ == 5) { 661 fInternalLatency -= how_early; 662 if (fInternalLatency < 500) 663 fInternalLatency = 500; 664 printf("SoundPlayNode::SendNewBuffer setting internal latency to %Ld\n", fInternalLatency); 665 SetEventLatency(fLatency + fInternalLatency); 666 fTooEarlyCount = 0; 667 } 668 } 669 */ 670 // send the buffer downstream if and only if output is enabled 671 if (SendBuffer(buffer, fOutput.source, fOutput.destination) 672 != B_OK) { 673 // we need to recycle the buffer 674 // if the call to SendBuffer() fails 675 printf("SoundPlayNode::SendNewBuffer: Buffer sending " 676 "failed\n"); 677 buffer->Recycle(); 678 } 679 } 680 } 681 682 // track how much media we've delivered so far 683 size_t nFrames = fOutput.format.u.raw_audio.buffer_size 684 / ((fOutput.format.u.raw_audio.format 685 & media_raw_audio_format::B_AUDIO_SIZE_MASK) 686 * fOutput.format.u.raw_audio.channel_count); 687 fFramesSent += nFrames; 688 689 // The buffer is on its way; now schedule the next one to go 690 // nextEvent is the time at which the buffer should arrive at it's 691 // destination 692 bigtime_t nextEvent = fStartTime + bigtime_t((1000000LL * fFramesSent) 693 / (int32)fOutput.format.u.raw_audio.frame_rate); 694 media_timed_event nextBufferEvent(nextEvent, SEND_NEW_BUFFER_EVENT); 695 EventQueue()->AddEvent(nextBufferEvent); 696 697 return B_OK; 698 } 699 700 701 status_t 702 SoundPlayNode::HandleDataStatus(const media_timed_event* event, 703 bigtime_t lateness, bool realTimeEvent) 704 { 705 TRACE("SoundPlayNode::HandleDataStatus status: %" B_PRId32 ", lateness: %" 706 B_PRId64 "\n", event->data, lateness); 707 708 switch (event->data) { 709 case B_DATA_NOT_AVAILABLE: 710 break; 711 case B_DATA_AVAILABLE: 712 break; 713 case B_PRODUCER_STOPPED: 714 break; 715 default: 716 break; 717 } 718 return B_OK; 719 } 720 721 722 status_t 723 SoundPlayNode::HandleStart(const media_timed_event* event, bigtime_t lateness, 724 bool realTimeEvent) 725 { 726 CALLED(); 727 // don't do anything if we're already running 728 if (RunState() != B_STARTED) { 729 // We want to start sending buffers now, so we set up the buffer-sending 730 // bookkeeping and fire off the first "produce a buffer" event. 731 732 fFramesSent = 0; 733 fStartTime = event->event_time; 734 media_timed_event firstBufferEvent(event->event_time, 735 SEND_NEW_BUFFER_EVENT); 736 737 // Alternatively, we could call HandleEvent() directly with this event, 738 // to avoid a trip through the event queue, like this: 739 // 740 // this->HandleEvent(&firstBufferEvent, 0, false); 741 // 742 EventQueue()->AddEvent(firstBufferEvent); 743 } 744 return B_OK; 745 } 746 747 748 status_t 749 SoundPlayNode::HandleSeek(const media_timed_event* event, bigtime_t lateness, 750 bool realTimeEvent) 751 { 752 CALLED(); 753 TRACE("SoundPlayNode::HandleSeek(t=%" B_PRId64 ", d=%" B_PRId32 ", bd=%" 754 B_PRId64 ")\n", event->event_time, event->data, event->bigdata); 755 return B_OK; 756 } 757 758 759 status_t 760 SoundPlayNode::HandleWarp(const media_timed_event* event, bigtime_t lateness, 761 bool realTimeEvent) 762 { 763 CALLED(); 764 return B_OK; 765 } 766 767 768 status_t 769 SoundPlayNode::HandleStop(const media_timed_event* event, bigtime_t lateness, 770 bool realTimeEvent) 771 { 772 CALLED(); 773 // flush the queue so downstreamers don't get any more 774 EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true, 775 SEND_NEW_BUFFER_EVENT); 776 777 return B_OK; 778 } 779 780 781 status_t 782 SoundPlayNode::HandleParameter(const media_timed_event* event, 783 bigtime_t lateness, bool realTimeEvent) 784 { 785 CALLED(); 786 return B_OK; 787 } 788 789 790 status_t 791 SoundPlayNode::AllocateBuffers() 792 { 793 CALLED(); 794 795 // allocate enough buffers to span our downstream latency, plus one 796 size_t size = fOutput.format.u.raw_audio.buffer_size; 797 int32 count = int32(fLatency / BufferDuration() + 1 + 1); 798 799 TRACE("SoundPlayNode::AllocateBuffers: latency = %" B_PRId64 ", buffer " 800 "duration = %" B_PRId64 ", count %" B_PRId32 "\n", fLatency, 801 BufferDuration(), count); 802 803 if (count < 3) 804 count = 3; 805 806 TRACE("SoundPlayNode::AllocateBuffers: creating group of %" B_PRId32 807 " buffers, size = %" B_PRIuSIZE "\n", count, size); 808 809 fBufferGroup = new BBufferGroup(size, count); 810 if (fBufferGroup->InitCheck() != B_OK) { 811 ERROR("SoundPlayNode::AllocateBuffers: BufferGroup::InitCheck() " 812 "failed\n"); 813 } 814 815 return fBufferGroup->InitCheck(); 816 } 817 818 819 BBuffer* 820 SoundPlayNode::FillNextBuffer(bigtime_t eventTime) 821 { 822 CALLED(); 823 824 // get a buffer from our buffer group 825 BBuffer* buffer = fBufferGroup->RequestBuffer( 826 fOutput.format.u.raw_audio.buffer_size, BufferDuration() / 2); 827 828 // If we fail to get a buffer (for example, if the request times out), we 829 // skip this buffer and go on to the next, to avoid locking up the control 830 // thread 831 if (buffer == NULL) { 832 ERROR("SoundPlayNode::FillNextBuffer: RequestBuffer failed\n"); 833 return NULL; 834 } 835 836 if (fPlayer->HasData()) { 837 fPlayer->PlayBuffer(buffer->Data(), 838 fOutput.format.u.raw_audio.buffer_size, fOutput.format.u.raw_audio); 839 } else 840 memset(buffer->Data(), 0, fOutput.format.u.raw_audio.buffer_size); 841 842 // fill in the buffer header 843 media_header* header = buffer->Header(); 844 header->type = B_MEDIA_RAW_AUDIO; 845 header->size_used = fOutput.format.u.raw_audio.buffer_size; 846 header->time_source = TimeSource()->ID(); 847 header->start_time = eventTime; 848 849 return buffer; 850 } 851 852 853 } // namespace BPrivate 854