1 /* 2 * Copyright 2002-2010, Haiku. 3 * Distributed under the terms of the MIT License. 4 * 5 * Authors: 6 * Marcus Overhagen 7 * Jérôme Duval 8 */ 9 10 11 /*! This is the BBufferProducer used internally by BSoundPlayer. 12 */ 13 14 15 #include "SoundPlayNode.h" 16 17 #include <string.h> 18 #include <stdlib.h> 19 #include <unistd.h> 20 21 #include <TimeSource.h> 22 #include <MediaRoster.h> 23 #include "debug.h" 24 25 26 #define SEND_NEW_BUFFER_EVENT (BTimedEventQueue::B_USER_EVENT + 1) 27 28 29 namespace BPrivate { 30 31 32 SoundPlayNode::SoundPlayNode(const char* name, BSoundPlayer* player) 33 : 34 BMediaNode(name), 35 BBufferProducer(B_MEDIA_RAW_AUDIO), 36 BMediaEventLooper(), 37 fPlayer(player), 38 fInitStatus(B_OK), 39 fOutputEnabled(true), 40 fBufferGroup(NULL), 41 fFramesSent(0), 42 fTooEarlyCount(0) 43 { 44 CALLED(); 45 fOutput.format.type = B_MEDIA_RAW_AUDIO; 46 fOutput.format.u.raw_audio = media_multi_audio_format::wildcard; 47 } 48 49 50 SoundPlayNode::~SoundPlayNode() 51 { 52 CALLED(); 53 Quit(); 54 } 55 56 57 bool 58 SoundPlayNode::IsPlaying() 59 { 60 return RunState() == B_STARTED; 61 } 62 63 64 bigtime_t 65 SoundPlayNode::CurrentTime() 66 { 67 int frameRate = (int)fOutput.format.u.raw_audio.frame_rate; 68 return frameRate == 0 ? 0 69 : bigtime_t((1000000LL * fFramesSent) / frameRate); 70 } 71 72 73 media_multi_audio_format 74 SoundPlayNode::Format() const 75 { 76 return fOutput.format.u.raw_audio; 77 } 78 79 80 // #pragma mark - implementation of BMediaNode 81 82 83 BMediaAddOn* 84 SoundPlayNode::AddOn(int32* _internalID) const 85 { 86 CALLED(); 87 // This only gets called if we are in an add-on. 88 return NULL; 89 } 90 91 92 void 93 SoundPlayNode::Preroll() 94 { 95 CALLED(); 96 // TODO: Performance opportunity 97 BMediaNode::Preroll(); 98 } 99 100 101 status_t 102 SoundPlayNode::HandleMessage(int32 message, const void* data, size_t size) 103 { 104 CALLED(); 105 return B_ERROR; 106 } 107 108 109 void 110 SoundPlayNode::NodeRegistered() 111 { 112 CALLED(); 113 114 if (fInitStatus != B_OK) { 115 ReportError(B_NODE_IN_DISTRESS); 116 return; 117 } 118 119 SetPriority(B_URGENT_PRIORITY); 120 121 fOutput.format.type = B_MEDIA_RAW_AUDIO; 122 fOutput.format.u.raw_audio = media_multi_audio_format::wildcard; 123 fOutput.destination = media_destination::null; 124 fOutput.source.port = ControlPort(); 125 fOutput.source.id = 0; 126 fOutput.node = Node(); 127 strcpy(fOutput.name, Name()); 128 129 Run(); 130 } 131 132 133 status_t 134 SoundPlayNode::RequestCompleted(const media_request_info& info) 135 { 136 CALLED(); 137 return B_OK; 138 } 139 140 141 void 142 SoundPlayNode::SetTimeSource(BTimeSource* timeSource) 143 { 144 CALLED(); 145 BMediaNode::SetTimeSource(timeSource); 146 } 147 148 149 void 150 SoundPlayNode::SetRunMode(run_mode mode) 151 { 152 TRACE("SoundPlayNode::SetRunMode mode:%i\n", mode); 153 BMediaNode::SetRunMode(mode); 154 } 155 156 157 // #pragma mark - implementation for BBufferProducer 158 159 160 status_t 161 SoundPlayNode::FormatSuggestionRequested(media_type type, int32 /*quality*/, 162 media_format* format) 163 { 164 // FormatSuggestionRequested() is not necessarily part of the format 165 // negotiation process; it's simply an interrogation -- the caller wants 166 // to see what the node's preferred data format is, given a suggestion by 167 // the caller. 168 CALLED(); 169 170 // a wildcard type is okay; but we only support raw audio 171 if (type != B_MEDIA_RAW_AUDIO && type != B_MEDIA_UNKNOWN_TYPE) 172 return B_MEDIA_BAD_FORMAT; 173 174 // this is the format we'll be returning (our preferred format) 175 format->type = B_MEDIA_RAW_AUDIO; 176 format->u.raw_audio = media_multi_audio_format::wildcard; 177 178 return B_OK; 179 } 180 181 182 status_t 183 SoundPlayNode::FormatProposal(const media_source& output, media_format* format) 184 { 185 // FormatProposal() is the first stage in the BMediaRoster::Connect() 186 // process. We hand out a suggested format, with wildcards for any 187 // variations we support. 188 CALLED(); 189 190 // is this a proposal for our one output? 191 if (output != fOutput.source) { 192 TRACE("SoundPlayNode::FormatProposal returning B_MEDIA_BAD_SOURCE\n"); 193 return B_MEDIA_BAD_SOURCE; 194 } 195 196 // if wildcard, change it to raw audio 197 if (format->type == B_MEDIA_UNKNOWN_TYPE) 198 format->type = B_MEDIA_RAW_AUDIO; 199 200 // if not raw audio, we can't support it 201 if (format->type != B_MEDIA_RAW_AUDIO) { 202 TRACE("SoundPlayNode::FormatProposal returning B_MEDIA_BAD_FORMAT\n"); 203 return B_MEDIA_BAD_FORMAT; 204 } 205 206 #if DEBUG >0 207 char buf[100]; 208 string_for_format(*format, buf, sizeof(buf)); 209 TRACE("SoundPlayNode::FormatProposal: format %s\n", buf); 210 #endif 211 212 return B_OK; 213 } 214 215 216 status_t 217 SoundPlayNode::FormatChangeRequested(const media_source& source, 218 const media_destination& destination, media_format* _format, 219 int32* /* deprecated */) 220 { 221 CALLED(); 222 223 // we don't support any other formats, so we just reject any format changes. 224 return B_ERROR; 225 } 226 227 228 status_t 229 SoundPlayNode::GetNextOutput(int32* cookie, media_output* _output) 230 { 231 CALLED(); 232 233 if (*cookie == 0) { 234 *_output = fOutput; 235 *cookie += 1; 236 return B_OK; 237 } else { 238 return B_BAD_INDEX; 239 } 240 } 241 242 243 status_t 244 SoundPlayNode::DisposeOutputCookie(int32 cookie) 245 { 246 CALLED(); 247 // do nothing because we don't use the cookie for anything special 248 return B_OK; 249 } 250 251 252 status_t 253 SoundPlayNode::SetBufferGroup(const media_source& forSource, 254 BBufferGroup* newGroup) 255 { 256 CALLED(); 257 258 // is this our output? 259 if (forSource != fOutput.source) { 260 TRACE("SoundPlayNode::SetBufferGroup returning B_MEDIA_BAD_SOURCE\n"); 261 return B_MEDIA_BAD_SOURCE; 262 } 263 264 // Are we being passed the buffer group we're already using? 265 if (newGroup == fBufferGroup) 266 return B_OK; 267 268 // Ahh, someone wants us to use a different buffer group. At this point we 269 // delete the one we are using and use the specified one instead. 270 // If the specified group is NULL, we need to recreate one ourselves, and 271 // use *that*. Note that if we're caching a BBuffer that we requested 272 // earlier, we have to Recycle() that buffer *before* deleting the buffer 273 // group, otherwise we'll deadlock waiting for that buffer to be recycled! 274 delete fBufferGroup; 275 // waits for all buffers to recycle 276 277 if (newGroup != NULL) { 278 // we were given a valid group; just use that one from now on 279 fBufferGroup = newGroup; 280 return B_OK; 281 } 282 283 // we were passed a NULL group pointer; that means we construct 284 // our own buffer group to use from now on 285 return AllocateBuffers(); 286 } 287 288 289 status_t 290 SoundPlayNode::GetLatency(bigtime_t* _latency) 291 { 292 CALLED(); 293 294 // report our *total* latency: internal plus downstream plus scheduling 295 *_latency = EventLatency() + SchedulingLatency(); 296 return B_OK; 297 } 298 299 300 status_t 301 SoundPlayNode::PrepareToConnect(const media_source& what, 302 const media_destination& where, media_format* format, 303 media_source* _source, char* _name) 304 { 305 // PrepareToConnect() is the second stage of format negotiations that 306 // happens inside BMediaRoster::Connect(). At this point, the consumer's 307 // AcceptFormat() method has been called, and that node has potentially 308 // changed the proposed format. It may also have left wildcards in the 309 // format. PrepareToConnect() *must* fully specialize the format before 310 // returning! 311 CALLED(); 312 313 // is this our output? 314 if (what != fOutput.source) { 315 TRACE("SoundPlayNode::PrepareToConnect returning " 316 "B_MEDIA_BAD_SOURCE\n"); 317 return B_MEDIA_BAD_SOURCE; 318 } 319 320 // are we already connected? 321 if (fOutput.destination != media_destination::null) 322 return B_MEDIA_ALREADY_CONNECTED; 323 324 // the format may not yet be fully specialized (the consumer might have 325 // passed back some wildcards). Finish specializing it now, and return an 326 // error if we don't support the requested format. 327 328 #if DEBUG > 0 329 char buf[100]; 330 string_for_format(*format, buf, sizeof(buf)); 331 TRACE("SoundPlayNode::PrepareToConnect: input format %s\n", buf); 332 #endif 333 334 // if not raw audio, we can't support it 335 if (format->type != B_MEDIA_UNKNOWN_TYPE 336 && format->type != B_MEDIA_RAW_AUDIO) { 337 TRACE("SoundPlayNode::PrepareToConnect: non raw format, returning " 338 "B_MEDIA_BAD_FORMAT\n"); 339 return B_MEDIA_BAD_FORMAT; 340 } 341 342 // the haiku mixer might have a hint 343 // for us, so check for it 344 #define FORMAT_USER_DATA_TYPE 0x7294a8f3 345 #define FORMAT_USER_DATA_MAGIC_1 0xc84173bd 346 #define FORMAT_USER_DATA_MAGIC_2 0x4af62b7d 347 uint32 channel_count = 0; 348 float frame_rate = 0; 349 if (format->user_data_type == FORMAT_USER_DATA_TYPE 350 && *(uint32 *)&format->user_data[0] == FORMAT_USER_DATA_MAGIC_1 351 && *(uint32 *)&format->user_data[44] == FORMAT_USER_DATA_MAGIC_2) { 352 channel_count = *(uint32 *)&format->user_data[4]; 353 frame_rate = *(float *)&format->user_data[20]; 354 TRACE("SoundPlayNode::PrepareToConnect: found mixer info: " 355 "channel_count %ld, frame_rate %.1f\n", channel_count, frame_rate); 356 } 357 358 media_format default_format; 359 default_format.type = B_MEDIA_RAW_AUDIO; 360 default_format.u.raw_audio.frame_rate = frame_rate > 0 ? frame_rate : 44100; 361 default_format.u.raw_audio.channel_count = channel_count > 0 362 ? channel_count : 2; 363 default_format.u.raw_audio.format = media_raw_audio_format::B_AUDIO_FLOAT; 364 default_format.u.raw_audio.byte_order = B_MEDIA_HOST_ENDIAN; 365 default_format.u.raw_audio.buffer_size = 0; 366 format->SpecializeTo(&default_format); 367 368 if (format->u.raw_audio.buffer_size == 0) { 369 format->u.raw_audio.buffer_size 370 = BMediaRoster::Roster()->AudioBufferSizeFor( 371 format->u.raw_audio.channel_count, format->u.raw_audio.format, 372 format->u.raw_audio.frame_rate); 373 } 374 375 #if DEBUG > 0 376 string_for_format(*format, buf, sizeof(buf)); 377 TRACE("SoundPlayNode::PrepareToConnect: output format %s\n", buf); 378 #endif 379 380 // Now reserve the connection, and return information about it 381 fOutput.destination = where; 382 fOutput.format = *format; 383 *_source = fOutput.source; 384 strcpy(_name, Name()); 385 return B_OK; 386 } 387 388 389 void 390 SoundPlayNode::Connect(status_t error, const media_source& source, 391 const media_destination& destination, const media_format& format, 392 char* name) 393 { 394 CALLED(); 395 396 // is this our output? 397 if (source != fOutput.source) { 398 TRACE("SoundPlayNode::Connect returning\n"); 399 return; 400 } 401 402 // If something earlier failed, Connect() might still be called, but with 403 // a non-zero error code. When that happens we simply unreserve the 404 // connection and do nothing else. 405 if (error) { 406 fOutput.destination = media_destination::null; 407 fOutput.format.type = B_MEDIA_RAW_AUDIO; 408 fOutput.format.u.raw_audio = media_multi_audio_format::wildcard; 409 return; 410 } 411 412 // Okay, the connection has been confirmed. Record the destination and 413 // format that we agreed on, and report our connection name again. 414 fOutput.destination = destination; 415 fOutput.format = format; 416 strcpy(name, Name()); 417 418 // Now that we're connected, we can determine our downstream latency. 419 // Do so, then make sure we get our events early enough. 420 media_node_id id; 421 FindLatencyFor(fOutput.destination, &fLatency, &id); 422 TRACE("SoundPlayNode::Connect: downstream latency = %Ld\n", fLatency); 423 424 // reset our buffer duration, etc. to avoid later calculations 425 bigtime_t duration = ((fOutput.format.u.raw_audio.buffer_size * 1000000LL) 426 / ((fOutput.format.u.raw_audio.format 427 & media_raw_audio_format::B_AUDIO_SIZE_MASK) 428 * fOutput.format.u.raw_audio.channel_count)) 429 / (int32)fOutput.format.u.raw_audio.frame_rate; 430 SetBufferDuration(duration); 431 TRACE("SoundPlayNode::Connect: buffer duration is %Ld\n", duration); 432 433 fInternalLatency = (3 * BufferDuration()) / 4; 434 TRACE("SoundPlayNode::Connect: using %Ld as internal latency\n", 435 fInternalLatency); 436 SetEventLatency(fLatency + fInternalLatency); 437 438 // Set up the buffer group for our connection, as long as nobody handed us 439 // a buffer group (via SetBufferGroup()) prior to this. 440 // That can happen, for example, if the consumer calls SetOutputBuffersFor() 441 // on us from within its Connected() method. 442 if (!fBufferGroup) 443 AllocateBuffers(); 444 } 445 446 447 void 448 SoundPlayNode::Disconnect(const media_source& what, 449 const media_destination& where) 450 { 451 CALLED(); 452 453 // is this our output? 454 if (what != fOutput.source) { 455 TRACE("SoundPlayNode::Disconnect returning\n"); 456 return; 457 } 458 459 // Make sure that our connection is the one being disconnected 460 if (where == fOutput.destination && what == fOutput.source) { 461 fOutput.destination = media_destination::null; 462 fOutput.format.type = B_MEDIA_RAW_AUDIO; 463 fOutput.format.u.raw_audio = media_multi_audio_format::wildcard; 464 delete fBufferGroup; 465 fBufferGroup = NULL; 466 } else { 467 fprintf(stderr, "\tDisconnect() called with wrong source/destination (%ld/%ld), ours is (%ld/%ld)\n", 468 what.id, where.id, fOutput.source.id, fOutput.destination.id); 469 } 470 } 471 472 473 void 474 SoundPlayNode::LateNoticeReceived(const media_source& what, bigtime_t howMuch, 475 bigtime_t performanceTime) 476 { 477 CALLED(); 478 479 TRACE("SoundPlayNode::LateNoticeReceived, %Ld too late at %Ld\n", howMuch, 480 performanceTime); 481 482 // is this our output? 483 if (what != fOutput.source) { 484 TRACE("SoundPlayNode::LateNoticeReceived returning\n"); 485 return; 486 } 487 488 if (RunMode() != B_DROP_DATA) { 489 // We're late, and our run mode dictates that we try to produce buffers 490 // earlier in order to catch up. This argues that the downstream nodes are 491 // not properly reporting their latency, but there's not much we can do about 492 // that at the moment, so we try to start producing buffers earlier to 493 // compensate. 494 495 fInternalLatency += howMuch; 496 497 if (fInternalLatency > 30000) // avoid getting a too high latency 498 fInternalLatency = 30000; 499 500 SetEventLatency(fLatency + fInternalLatency); 501 TRACE("SoundPlayNode::LateNoticeReceived: increasing latency to %Ld\n", fLatency + fInternalLatency); 502 } else { 503 // The other run modes dictate various strategies for sacrificing data quality 504 // in the interests of timely data delivery. The way *we* do this is to skip 505 // a buffer, which catches us up in time by one buffer duration. 506 507 size_t nFrames = fOutput.format.u.raw_audio.buffer_size 508 / ((fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK) 509 * fOutput.format.u.raw_audio.channel_count); 510 511 fFramesSent += nFrames; 512 513 TRACE("SoundPlayNode::LateNoticeReceived: skipping a buffer to try to catch up\n"); 514 } 515 } 516 517 518 void 519 SoundPlayNode::EnableOutput(const media_source& what, bool enabled, 520 int32* /* deprecated */) 521 { 522 CALLED(); 523 524 // If I had more than one output, I'd have to walk my list of output 525 // records to see which one matched the given source, and then 526 // enable/disable that one. 527 // But this node only has one output, so I just make sure the given source 528 // matches, then set the enable state accordingly. 529 530 // is this our output? 531 if (what != fOutput.source) { 532 fprintf(stderr, "SoundPlayNode::EnableOutput returning\n"); 533 return; 534 } 535 536 fOutputEnabled = enabled; 537 } 538 539 540 void 541 SoundPlayNode::AdditionalBufferRequested(const media_source& source, 542 media_buffer_id previousBuffer, bigtime_t previousTime, 543 const media_seek_tag* previousTag) 544 { 545 CALLED(); 546 // we don't support offline mode 547 return; 548 } 549 550 551 void 552 SoundPlayNode::LatencyChanged(const media_source& source, 553 const media_destination& destination, bigtime_t newLatency, uint32 flags) 554 { 555 CALLED(); 556 557 TRACE("SoundPlayNode::LatencyChanged: new_latency %Ld\n", newLatency); 558 559 // something downstream changed latency, so we need to start producing 560 // buffers earlier (or later) than we were previously. Make sure that the 561 // connection that changed is ours, and adjust to the new downstream 562 // latency if so. 563 if (source == fOutput.source && destination == fOutput.destination) { 564 fLatency = newLatency; 565 SetEventLatency(fLatency + fInternalLatency); 566 } else { 567 TRACE("SoundPlayNode::LatencyChanged: ignored\n"); 568 } 569 } 570 571 572 // #pragma mark - implementation for BMediaEventLooper 573 574 575 void 576 SoundPlayNode::HandleEvent(const media_timed_event* event, bigtime_t lateness, 577 bool realTimeEvent) 578 { 579 CALLED(); 580 switch (event->type) { 581 case BTimedEventQueue::B_START: 582 HandleStart(event,lateness,realTimeEvent); 583 break; 584 case BTimedEventQueue::B_SEEK: 585 HandleSeek(event,lateness,realTimeEvent); 586 break; 587 case BTimedEventQueue::B_WARP: 588 HandleWarp(event,lateness,realTimeEvent); 589 break; 590 case BTimedEventQueue::B_STOP: 591 HandleStop(event,lateness,realTimeEvent); 592 break; 593 case BTimedEventQueue::B_HANDLE_BUFFER: 594 // we don't get any buffers 595 break; 596 case SEND_NEW_BUFFER_EVENT: 597 if (RunState() == BMediaEventLooper::B_STARTED) 598 SendNewBuffer(event, lateness, realTimeEvent); 599 break; 600 case BTimedEventQueue::B_DATA_STATUS: 601 HandleDataStatus(event,lateness,realTimeEvent); 602 break; 603 case BTimedEventQueue::B_PARAMETER: 604 HandleParameter(event,lateness,realTimeEvent); 605 break; 606 default: 607 fprintf(stderr," unknown event type: %li\n", event->type); 608 break; 609 } 610 } 611 612 613 // #pragma mark - protected methods 614 615 616 // how should we handle late buffers? drop them? 617 // notify the producer? 618 status_t 619 SoundPlayNode::SendNewBuffer(const media_timed_event* event, 620 bigtime_t lateness, bool realTimeEvent) 621 { 622 CALLED(); 623 // printf("latency = %12Ld, event = %12Ld, sched = %5Ld, arrive at %12Ld, now %12Ld, current lateness %12Ld\n", EventLatency() + SchedulingLatency(), EventLatency(), SchedulingLatency(), event->event_time, TimeSource()->Now(), lateness); 624 625 // make sure we're both started *and* connected before delivering a buffer 626 if (RunState() != BMediaEventLooper::B_STARTED 627 || fOutput.destination == media_destination::null) 628 return B_OK; 629 630 // The event->event_time is the time at which the buffer we are preparing 631 // here should arrive at it's destination. The MediaEventLooper should have 632 // scheduled us early enough (based on EventLatency() and the 633 // SchedulingLatency()) to make this possible. 634 // lateness is independent of EventLatency()! 635 636 if (lateness > (BufferDuration() / 3) ) { 637 printf("SoundPlayNode::SendNewBuffer, event scheduled much too late, " 638 "lateness is %Ld\n", lateness); 639 } 640 641 // skip buffer creation if output not enabled 642 if (fOutputEnabled) { 643 644 // Get the next buffer of data 645 BBuffer* buffer = FillNextBuffer(event->event_time); 646 647 if (buffer) { 648 649 // If we are ready way too early, decrase internal latency 650 /* 651 bigtime_t how_early = event->event_time - TimeSource()->Now() - fLatency - fInternalLatency; 652 if (how_early > 5000) { 653 654 printf("SoundPlayNode::SendNewBuffer, event scheduled too early, how_early is %Ld\n", how_early); 655 656 if (fTooEarlyCount++ == 5) { 657 fInternalLatency -= how_early; 658 if (fInternalLatency < 500) 659 fInternalLatency = 500; 660 printf("SoundPlayNode::SendNewBuffer setting internal latency to %Ld\n", fInternalLatency); 661 SetEventLatency(fLatency + fInternalLatency); 662 fTooEarlyCount = 0; 663 } 664 } 665 */ 666 // send the buffer downstream if and only if output is enabled 667 if (SendBuffer(buffer, fOutput.source, fOutput.destination) 668 != B_OK) { 669 // we need to recycle the buffer 670 // if the call to SendBuffer() fails 671 printf("SoundPlayNode::SendNewBuffer: Buffer sending " 672 "failed\n"); 673 buffer->Recycle(); 674 } 675 } 676 } 677 678 // track how much media we've delivered so far 679 size_t nFrames = fOutput.format.u.raw_audio.buffer_size 680 / ((fOutput.format.u.raw_audio.format 681 & media_raw_audio_format::B_AUDIO_SIZE_MASK) 682 * fOutput.format.u.raw_audio.channel_count); 683 fFramesSent += nFrames; 684 685 // The buffer is on its way; now schedule the next one to go 686 // nextEvent is the time at which the buffer should arrive at it's 687 // destination 688 bigtime_t nextEvent = fStartTime + bigtime_t((1000000LL * fFramesSent) 689 / (int32)fOutput.format.u.raw_audio.frame_rate); 690 media_timed_event nextBufferEvent(nextEvent, SEND_NEW_BUFFER_EVENT); 691 EventQueue()->AddEvent(nextBufferEvent); 692 693 return B_OK; 694 } 695 696 697 status_t 698 SoundPlayNode::HandleDataStatus(const media_timed_event* event, 699 bigtime_t lateness, bool realTimeEvent) 700 { 701 TRACE("SoundPlayNode::HandleDataStatus status: %li, lateness: %Li\n", 702 event->data, lateness); 703 704 switch (event->data) { 705 case B_DATA_NOT_AVAILABLE: 706 break; 707 case B_DATA_AVAILABLE: 708 break; 709 case B_PRODUCER_STOPPED: 710 break; 711 default: 712 break; 713 } 714 return B_OK; 715 } 716 717 718 status_t 719 SoundPlayNode::HandleStart(const media_timed_event* event, bigtime_t lateness, 720 bool realTimeEvent) 721 { 722 CALLED(); 723 // don't do anything if we're already running 724 if (RunState() != B_STARTED) { 725 // We want to start sending buffers now, so we set up the buffer-sending 726 // bookkeeping and fire off the first "produce a buffer" event. 727 728 fFramesSent = 0; 729 fStartTime = event->event_time; 730 media_timed_event firstBufferEvent(event->event_time, 731 SEND_NEW_BUFFER_EVENT); 732 733 // Alternatively, we could call HandleEvent() directly with this event, 734 // to avoid a trip through the event queue, like this: 735 // 736 // this->HandleEvent(&firstBufferEvent, 0, false); 737 // 738 EventQueue()->AddEvent(firstBufferEvent); 739 } 740 return B_OK; 741 } 742 743 744 status_t 745 SoundPlayNode::HandleSeek(const media_timed_event* event, bigtime_t lateness, 746 bool realTimeEvent) 747 { 748 CALLED(); 749 TRACE("SoundPlayNode::HandleSeek(t=%lld, d=%li, bd=%lld)\n", 750 event->event_time, event->data, event->bigdata); 751 return B_OK; 752 } 753 754 755 status_t 756 SoundPlayNode::HandleWarp(const media_timed_event* event, bigtime_t lateness, 757 bool realTimeEvent) 758 { 759 CALLED(); 760 return B_OK; 761 } 762 763 764 status_t 765 SoundPlayNode::HandleStop(const media_timed_event* event, bigtime_t lateness, 766 bool realTimeEvent) 767 { 768 CALLED(); 769 // flush the queue so downstreamers don't get any more 770 EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true, 771 SEND_NEW_BUFFER_EVENT); 772 773 return B_OK; 774 } 775 776 777 status_t 778 SoundPlayNode::HandleParameter(const media_timed_event* event, 779 bigtime_t lateness, bool realTimeEvent) 780 { 781 CALLED(); 782 return B_OK; 783 } 784 785 786 status_t 787 SoundPlayNode::AllocateBuffers() 788 { 789 CALLED(); 790 791 // allocate enough buffers to span our downstream latency, plus one 792 size_t size = fOutput.format.u.raw_audio.buffer_size; 793 int32 count = int32(fLatency / BufferDuration() + 1 + 1); 794 795 TRACE("SoundPlayNode::AllocateBuffers: latency = %Ld, buffer duration " 796 "= %Ld, count %ld\n", fLatency, BufferDuration(), count); 797 798 if (count < 3) 799 count = 3; 800 801 TRACE("SoundPlayNode::AllocateBuffers: creating group of %ld buffers, " 802 "size = %lu\n", count, size); 803 804 fBufferGroup = new BBufferGroup(size, count); 805 if (fBufferGroup->InitCheck() != B_OK) { 806 ERROR("SoundPlayNode::AllocateBuffers: BufferGroup::InitCheck() " 807 "failed\n"); 808 } 809 810 return fBufferGroup->InitCheck(); 811 } 812 813 814 BBuffer* 815 SoundPlayNode::FillNextBuffer(bigtime_t eventTime) 816 { 817 CALLED(); 818 819 // get a buffer from our buffer group 820 BBuffer* buffer = fBufferGroup->RequestBuffer( 821 fOutput.format.u.raw_audio.buffer_size, BufferDuration() / 2); 822 823 // If we fail to get a buffer (for example, if the request times out), we 824 // skip this buffer and go on to the next, to avoid locking up the control 825 // thread 826 if (buffer == NULL) { 827 ERROR("SoundPlayNode::FillNextBuffer: RequestBuffer failed\n"); 828 return NULL; 829 } 830 831 if (fPlayer->HasData()) { 832 fPlayer->PlayBuffer(buffer->Data(), 833 fOutput.format.u.raw_audio.buffer_size, fOutput.format.u.raw_audio); 834 } else 835 memset(buffer->Data(), 0, fOutput.format.u.raw_audio.buffer_size); 836 837 // fill in the buffer header 838 media_header* header = buffer->Header(); 839 header->type = B_MEDIA_RAW_AUDIO; 840 header->size_used = fOutput.format.u.raw_audio.buffer_size; 841 header->time_source = TimeSource()->ID(); 842 header->start_time = eventTime; 843 844 return buffer; 845 } 846 847 848 } // namespace BPrivate 849