/* Copyright (c) 1998-99, Be Incorporated, All Rights Reserved. * Distributed under the terms of the Be Sample Code license. * * Copyright (c) 2000-2008, Ingo Weinhold , * Copyright (c) 2000-2008, Stephan Aßmus , * All Rights Reserved. Distributed under the terms of the MIT license. */ #include "AudioProducer.h" #include #include #include #include #include #include #include #include #include "AudioSupplier.h" #include "EventQueue.h" #include "MessageEvent.h" #define DEBUG_TO_FILE 0 #if DEBUG_TO_FILE # include # include # include # include #endif // DEBUG_TO_FILE // debugging //#define TRACE_AUDIO_PRODUCER #ifdef TRACE_AUDIO_PRODUCER # define TRACE(x...) printf(x) # define ERROR(x...) fprintf(stderr, x) #else # define TRACE(x...) # define ERROR(x...) fprintf(stderr, x) #endif #if DEBUG_TO_FILE static BMediaFile* init_media_file(media_format format, BMediaTrack** _track) { static BMediaFile* file = NULL; static BMediaTrack* track = NULL; if (file == NULL) { entry_ref ref; get_ref_for_path("/boot/home/Desktop/test.wav", &ref); media_file_format fileFormat; int32 cookie = 0; while (get_next_file_format(&cookie, &fileFormat) == B_OK) { if (strcmp(fileFormat.short_name, "wav") == 0) { break; } } file = new BMediaFile(&ref, &fileFormat); media_codec_info info; cookie = 0; while (get_next_encoder(&cookie, &info) == B_OK) { if (strcmp(info.short_name, "raw-audio") == 0) break; } track = file->CreateTrack(&format, &info); if (!track) printf("failed to create track\n"); file->CommitHeader(); } *_track = track; return track != NULL ? file : NULL; } #endif // DEBUG_TO_FILE // constructor AudioProducer::AudioProducer(const char* name, AudioSupplier* supplier, bool lowLatency) : BMediaNode(name), BBufferProducer(B_MEDIA_RAW_AUDIO), BMediaEventLooper(), fBufferGroup(NULL), fLatency(0), fInternalLatency(0), fLowLatency(lowLatency), fOutputEnabled(true), fFramesSent(0), fStartTime(0), fSupplier(supplier), fRunning(false), fPeakListener(NULL) { TRACE("%p->AudioProducer::AudioProducer(%s, %p, %d)\n", this, name, supplier, lowLatency); // initialize our preferred format object fPreferredFormat.type = B_MEDIA_RAW_AUDIO; fPreferredFormat.u.raw_audio.format = media_raw_audio_format::B_AUDIO_FLOAT; // = media_raw_audio_format::B_AUDIO_SHORT; fPreferredFormat.u.raw_audio.channel_count = 2; fPreferredFormat.u.raw_audio.frame_rate = 44100.0; fPreferredFormat.u.raw_audio.byte_order = (B_HOST_IS_BENDIAN) ? B_MEDIA_BIG_ENDIAN : B_MEDIA_LITTLE_ENDIAN; // NOTE: the (buffer_size * 1000000) needs to be dividable by // fPreferredFormat.u.raw_audio.frame_rate! fPreferredFormat.u.raw_audio.buffer_size = 441 * 2 * (fPreferredFormat.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK); if (!fLowLatency) fPreferredFormat.u.raw_audio.buffer_size *= 6; // we're not connected yet fOutput.destination = media_destination::null; fOutput.format = fPreferredFormat; // init the audio supplier if (fSupplier) { fSupplier->SetAudioProducer(this); } } AudioProducer::~AudioProducer() { TRACE("%p->AudioProducer::~AudioProducer()\n", this); #if DEBUG_TO_FILE BMediaTrack* track; if (BMediaFile* file = init_media_file(fPreferredFormat, &track)) { printf("deleting file...\n"); track->Flush(); file->ReleaseTrack(track); file->CloseFile(); delete file; } #endif // DEBUG_TO_FILE // Stop the BMediaEventLooper thread Quit(); TRACE("AudioProducer::~AudioProducer() done\n"); } BMediaAddOn* AudioProducer::AddOn(int32* internalId) const { return NULL; } status_t AudioProducer::FormatSuggestionRequested(media_type type, int32 quality, media_format* _format) { TRACE("%p->AudioProducer::FormatSuggestionRequested()\n", this); if (!_format) return B_BAD_VALUE; // this is the format we'll be returning (our preferred format) *_format = fPreferredFormat; // a wildcard type is okay; we can specialize it if (type == B_MEDIA_UNKNOWN_TYPE) type = B_MEDIA_RAW_AUDIO; // we only support raw audio if (type != B_MEDIA_RAW_AUDIO) return B_MEDIA_BAD_FORMAT; return B_OK; } status_t AudioProducer::FormatProposal(const media_source& output, media_format* format) { TRACE("%p->AudioProducer::FormatProposal()\n", this); // is this a proposal for our one output? if (output != fOutput.source) { TRACE(" -> B_MEDIA_BAD_SOURCE\n"); return B_MEDIA_BAD_SOURCE; } // TODO: we might want to support different audio formats // we only support floating-point raw audio, so we always return that, but // we supply an error code depending on whether we found the proposal // acceptable. media_type requestedType = format->type; *format = fPreferredFormat; // raw audio or wildcard type, either is okay by us if (requestedType != B_MEDIA_UNKNOWN_TYPE && requestedType != B_MEDIA_RAW_AUDIO) { TRACE(" -> B_MEDIA_BAD_FORMAT\n"); return B_MEDIA_BAD_FORMAT; } return B_OK; } status_t AudioProducer::FormatChangeRequested(const media_source& source, const media_destination& destination, media_format* ioFormat, int32* _deprecated_) { TRACE("%p->AudioProducer::FormatChangeRequested()\n", this); if (destination != fOutput.destination) { TRACE(" -> B_MEDIA_BAD_DESTINATION\n"); return B_MEDIA_BAD_DESTINATION; } if (source != fOutput.source) { TRACE(" -> B_MEDIA_BAD_SOURCE\n"); return B_MEDIA_BAD_SOURCE; } fOutput.format = *ioFormat; // notify our audio supplier of the format change if (fSupplier) fSupplier->SetFormat(fOutput.format); return _AllocateBuffers(ioFormat); } status_t AudioProducer::GetNextOutput(int32* cookie, media_output* _output) { TRACE("%p->AudioProducer::GetNextOutput(%ld)\n", this, *cookie); // we have only a single output; if we supported multiple outputs, we'd // iterate over whatever data structure we were using to keep track of // them. if (0 == *cookie) { *_output = fOutput; *cookie += 1; return B_OK; } return B_BAD_INDEX; } status_t AudioProducer::DisposeOutputCookie(int32 cookie) { // do nothing because we don't use the cookie for anything special return B_OK; } status_t AudioProducer::SetBufferGroup(const media_source& forSource, BBufferGroup* newGroup) { TRACE("%p->AudioProducer::SetBufferGroup()\n", this); if (forSource != fOutput.source) return B_MEDIA_BAD_SOURCE; if (newGroup == fBufferGroup) return B_OK; if (fUsingOurBuffers && fBufferGroup) delete fBufferGroup; // waits for all buffers to recycle if (newGroup != NULL) { // we were given a valid group; just use that one from now on fBufferGroup = newGroup; fUsingOurBuffers = false; } else { // we were passed a NULL group pointer; that means we construct // our own buffer group to use from now on size_t size = fOutput.format.u.raw_audio.buffer_size; int32 count = int32(fLatency / BufferDuration() + 1 + 1); fBufferGroup = new BBufferGroup(size, count); fUsingOurBuffers = true; } return B_OK; } status_t AudioProducer::GetLatency(bigtime_t* _latency) { TRACE("%p->AudioProducer::GetLatency()\n", this); // report our *total* latency: internal plus downstream plus scheduling *_latency = EventLatency() + SchedulingLatency(); return B_OK; } status_t AudioProducer::PrepareToConnect(const media_source& what, const media_destination& where, media_format* format, media_source* _source, char* _name) { TRACE("%p->AudioProducer::PrepareToConnect()\n", this); // trying to connect something that isn't our source? if (what != fOutput.source) { TRACE(" -> B_MEDIA_BAD_SOURCE\n"); return B_MEDIA_BAD_SOURCE; } // are we already connected? if (fOutput.destination != media_destination::null) { TRACE(" -> B_MEDIA_ALREADY_CONNECTED\n"); return B_MEDIA_ALREADY_CONNECTED; } // the format may not yet be fully specialized (the consumer might have // passed back some wildcards). Finish specializing it now, and return an // error if we don't support the requested format. if (format->type != B_MEDIA_RAW_AUDIO) { TRACE(" -> B_MEDIA_BAD_FORMAT\n"); return B_MEDIA_BAD_FORMAT; // TODO: we might want to support different audio formats } else if (format->u.raw_audio.format != fPreferredFormat.u.raw_audio.format) { TRACE(" -> B_MEDIA_BAD_FORMAT\n"); return B_MEDIA_BAD_FORMAT; } // !!! validate all other fields except for buffer_size here, because the // consumer might have supplied different values from AcceptFormat()? // check the buffer size, which may still be wildcarded if (format->u.raw_audio.buffer_size == media_raw_audio_format::wildcard.buffer_size) { // pick something comfortable to suggest // NOTE: the (buffer_size * 1000000) needs to be dividable by // fPreferredFormat.u.raw_audio.frame_rate! // TODO: this needs to depend on the other parameters // (but it doesn't matter sincer the AudioProducer is not // currently used like that) TRACE(" -> adjusting buffer size, it was wildcard\n"); format->u.raw_audio.buffer_size = 441 * 2 * sizeof(float); } // Now reserve the connection, and return information about it fOutput.destination = where; fOutput.format = *format; *_source = fOutput.source; strncpy(_name, fOutput.name, B_MEDIA_NAME_LENGTH); TRACE(" -> B_OK\n"); return B_OK; } static bigtime_t estimate_internal_latency(const media_format& format) { bigtime_t startTime = system_time(); // calculate the number of samples per buffer int32 sampleSize = format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK; int32 sampleCount = format.u.raw_audio.buffer_size / sampleSize; // alloc float buffers of this size const int bufferCount = 10; // number of input buffers float* buffers[bufferCount + 1]; for (int32 i = 0; i < bufferCount + 1; i++) buffers[i] = new float[sampleCount]; float* outBuffer = buffers[bufferCount]; // fill all buffers save the last one with arbitrary data and merge them // into the last one for (int32 i = 0; i < bufferCount; i++) { for (int32 k = 0; k < sampleCount; k++) { buffers[i][k] = ((float)i * (float)k) / float(bufferCount * sampleCount); } } for (int32 k = 0; k < sampleCount; k++) { outBuffer[k] = 0; for (int32 i = 0; i < bufferCount; i++) outBuffer[k] += buffers[i][k]; outBuffer[k] /= bufferCount; } // cleanup for (int32 i = 0; i < bufferCount + 1; i++) delete[] buffers[i]; return system_time() - startTime; } void AudioProducer::Connect(status_t error, const media_source& source, const media_destination& destination, const media_format& format, char* _name) { TRACE("AudioProducer::Connect(%s)\n", strerror(error)); // If something earlier failed, Connect() might still be called, but with // a non-zero error code. When that happens we simply unreserve the // connection and do nothing else. if (error != B_OK) { fOutput.destination = media_destination::null; fOutput.format = fPreferredFormat; return; } // Okay, the connection has been confirmed. Record the destination and // format that we agreed on, and report our connection name again. fOutput.destination = destination; fOutput.format = format; strncpy(_name, fOutput.name, B_MEDIA_NAME_LENGTH); // tell our audio supplier about the format if (fSupplier) { TRACE("AudioProducer::Connect() fSupplier->SetFormat()\n"); fSupplier->SetFormat(fOutput.format); } TRACE("AudioProducer::Connect() FindLatencyFor()\n"); // Now that we're connected, we can determine our downstream latency. // Do so, then make sure we get our events early enough. media_node_id id; FindLatencyFor(fOutput.destination, &fLatency, &id); // Use a dry run to see how long it takes me to fill a buffer of data size_t sampleSize = fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK; size_t samplesPerBuffer = fOutput.format.u.raw_audio.buffer_size / sampleSize; fInternalLatency = estimate_internal_latency(fOutput.format); if (!fLowLatency) fInternalLatency *= 32; SetEventLatency(fLatency + fInternalLatency); // reset our buffer duration, etc. to avoid later calculations bigtime_t duration = bigtime_t(1000000) * samplesPerBuffer / bigtime_t(fOutput.format.u.raw_audio.frame_rate * fOutput.format.u.raw_audio.channel_count); TRACE("AudioProducer::Connect() SetBufferDuration(%lld)\n", duration); SetBufferDuration(duration); TRACE("AudioProducer::Connect() _AllocateBuffers()\n"); // Set up the buffer group for our connection, as long as nobody handed // us a buffer group (via SetBufferGroup()) prior to this. That can // happen, for example, if the consumer calls SetOutputBuffersFor() on // us from within its Connected() method. if (!fBufferGroup) _AllocateBuffers(&fOutput.format); TRACE("AudioProducer::Connect() done\n"); } void AudioProducer::Disconnect(const media_source& what, const media_destination& where) { TRACE("%p->AudioProducer::Disconnect()\n", this); // Make sure that our connection is the one being disconnected if ((where == fOutput.destination) && (what == fOutput.source)) { fOutput.destination = media_destination::null; fOutput.format = fPreferredFormat; TRACE("AudioProducer: deleting buffer group...\n"); // Always delete the buffer group, even if it is not ours. // (See BeBook::SetBufferGroup()). delete fBufferGroup; TRACE("AudioProducer: buffer group deleted\n"); fBufferGroup = NULL; } TRACE("%p->AudioProducer::Disconnect() done\n", this); } void AudioProducer::LateNoticeReceived(const media_source& what, bigtime_t howMuch, bigtime_t performanceTime) { ERROR("%p->AudioProducer::LateNoticeReceived(%lld, %lld)\n", this, howMuch, performanceTime); // If we're late, we need to catch up. Respond in a manner appropriate // to our current run mode. if (what == fOutput.source) { if (RunMode() == B_RECORDING) { // ... } else if (RunMode() == B_INCREASE_LATENCY) { fInternalLatency += howMuch; SetEventLatency(fLatency + fInternalLatency); } else { size_t sampleSize = fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK; size_t nSamples = fOutput.format.u.raw_audio.buffer_size / sampleSize; fFramesSent += nSamples; } } } void AudioProducer::EnableOutput(const media_source& what, bool enabled, int32* _deprecated_) { TRACE("%p->AudioProducer::EnableOutput(%d)\n", this, enabled); if (what == fOutput.source) fOutputEnabled = enabled; } status_t AudioProducer::SetPlayRate(int32 numer, int32 denom) { return B_ERROR; } status_t AudioProducer::HandleMessage(int32 message, const void *data, size_t size) { TRACE("%p->AudioProducer::HandleMessage()\n", this); return B_ERROR; } void AudioProducer::AdditionalBufferRequested(const media_source& source, media_buffer_id prevBuffer, bigtime_t prevTime, const media_seek_tag *prevTag) { TRACE("%p->AudioProducer::AdditionalBufferRequested()\n", this); } void AudioProducer::LatencyChanged(const media_source& source, const media_destination& destination, bigtime_t newLatency, uint32 flags) { TRACE("%p->AudioProducer::LatencyChanged(%lld)\n", this, newLatency); if ((source == fOutput.source) && (destination == fOutput.destination)) { fLatency = newLatency; SetEventLatency(fLatency + fInternalLatency); } } void AudioProducer::NodeRegistered() { TRACE("%p->AudioProducer::NodeRegistered()\n", this); // Start the BMediaEventLooper thread SetPriority(B_REAL_TIME_PRIORITY); Run(); // set up as much information about our output as we can fOutput.source.port = ControlPort(); fOutput.source.id = 0; fOutput.node = Node(); ::strcpy(fOutput.name, "MediaPlayer Sound Output"); } void AudioProducer::SetRunMode(run_mode mode) { TRACE("%p->AudioProducer::SetRunMode()\n", this); if (B_OFFLINE == mode) ReportError(B_NODE_FAILED_SET_RUN_MODE); else BBufferProducer::SetRunMode(mode); } void AudioProducer::HandleEvent(const media_timed_event* event, bigtime_t lateness, bool realTimeEvent) { TRACE("%p->AudioProducer::HandleEvent()\n", this); switch (event->type) { case BTimedEventQueue::B_START: TRACE("AudioProducer::HandleEvent(B_START)\n"); if (RunState() != B_STARTED) { fFramesSent = 0; fStartTime = event->event_time; printf("B_START: start time: %lld\n", fStartTime); media_timed_event firstBufferEvent(fStartTime, BTimedEventQueue::B_HANDLE_BUFFER); EventQueue()->AddEvent(firstBufferEvent); } TRACE("AudioProducer::HandleEvent(B_START) done\n"); break; case BTimedEventQueue::B_STOP: TRACE("AudioProducer::HandleEvent(B_STOP)\n"); EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true, BTimedEventQueue::B_HANDLE_BUFFER); TRACE("AudioProducer::HandleEvent(B_STOP) done\n"); break; case BTimedEventQueue::B_HANDLE_BUFFER: { TRACE("AudioProducer::HandleEvent(B_HANDLE_BUFFER)\n"); if ((RunState() == BMediaEventLooper::B_STARTED) && (fOutput.destination != media_destination::null)) { BBuffer* buffer = _FillNextBuffer(event->event_time); if (buffer) { status_t err = B_ERROR; if (fOutputEnabled) err = SendBuffer(buffer, fOutput.destination); if (err) buffer->Recycle(); } size_t sampleSize = fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK; size_t nFrames = fOutput.format.u.raw_audio.buffer_size / (sampleSize * fOutput.format.u.raw_audio.channel_count); fFramesSent += nFrames; bigtime_t nextEvent = fStartTime + bigtime_t(double(fFramesSent) * 1000000.0 / double(fOutput.format.u.raw_audio.frame_rate)); media_timed_event nextBufferEvent(nextEvent, BTimedEventQueue::B_HANDLE_BUFFER); EventQueue()->AddEvent(nextBufferEvent); } else { fprintf(stderr, "B_HANDLE_BUFFER, but not started!\n"); } TRACE("AudioProducer::HandleEvent(B_HANDLE_BUFFER) done\n"); break; } default: break; } } void AudioProducer::SetRunning(bool running) { TRACE("%p->AudioProducer::SetRunning(%d)\n", this, running); fRunning = running; } void AudioProducer::SetPeakListener(BHandler* handler) { fPeakListener = handler; } // #pragma mark - status_t AudioProducer::_AllocateBuffers(media_format* format) { TRACE("%p->AudioProducer::_AllocateBuffers()\n", this); if (fBufferGroup && fUsingOurBuffers) { delete fBufferGroup; fBufferGroup = NULL; } size_t size = format->u.raw_audio.buffer_size; int32 bufferDuration = BufferDuration(); int32 count = 0; if (bufferDuration > 0) { count = (int32)((fLatency + fInternalLatency) / bufferDuration + 2); } fBufferGroup = new BBufferGroup(size, count); fUsingOurBuffers = true; return fBufferGroup->InitCheck(); } BBuffer* AudioProducer::_FillNextBuffer(bigtime_t eventTime) { BBuffer* buffer = fBufferGroup->RequestBuffer( fOutput.format.u.raw_audio.buffer_size, BufferDuration()); if (!buffer) { ERROR("AudioProducer::_FillNextBuffer() - no buffer\n"); return NULL; } size_t sampleSize = fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK; size_t numSamples = fOutput.format.u.raw_audio.buffer_size / sampleSize; // number of sample in the buffer // fill in the buffer header media_header* hdr = buffer->Header(); hdr->type = B_MEDIA_RAW_AUDIO; hdr->time_source = TimeSource()->ID(); buffer->SetSizeUsed(fOutput.format.u.raw_audio.buffer_size); bigtime_t performanceTime = bigtime_t(double(fFramesSent) * 1000000.0 / double(fOutput.format.u.raw_audio.frame_rate)); // fill in data from audio supplier int64 frameCount = numSamples / fOutput.format.u.raw_audio.channel_count; bigtime_t startTime = performanceTime; bigtime_t endTime = bigtime_t(double(fFramesSent + frameCount) * 1000000.0 / fOutput.format.u.raw_audio.frame_rate); if (!fSupplier || fSupplier->InitCheck() != B_OK || fSupplier->GetFrames(buffer->Data(), frameCount, startTime, endTime) != B_OK) { ERROR("AudioProducer::_FillNextBuffer() - supplier error -> silence\n"); memset(buffer->Data(), 0, buffer->SizeUsed()); } // stamp buffer if (RunMode() == B_RECORDING) { hdr->start_time = eventTime; } else { hdr->start_time = fStartTime + performanceTime; } #if DEBUG_TO_FILE BMediaTrack* track; if (BMediaFile* file = init_media_file(fOutput.format, &track)) { track->WriteFrames(buffer->Data(), frameCount); } #endif // DEBUG_TO_FILE if (fPeakListener && fOutput.format.u.raw_audio.format == media_raw_audio_format::B_AUDIO_FLOAT) { // TODO: extend the peak notifier for other sample formats int32 channels = fOutput.format.u.raw_audio.channel_count; float max[channels]; float min[channels]; for (int32 i = 0; i < channels; i++) { max[i] = -1.0; min[i] = 1.0; } float* sample = (float*)buffer->Data(); for (uint32 i = 0; i < frameCount; i++) { for (int32 k = 0; k < channels; k++) { if (*sample < min[k]) min[k] = *sample; if (*sample > max[k]) max[k] = *sample; sample++; } } BMessage message(MSG_PEAK_NOTIFICATION); for (int32 i = 0; i < channels; i++) { float maxAbs = max_c(fabs(min[i]), fabs(max[i])); message.AddFloat("max", maxAbs); } bigtime_t realTime = TimeSource()->RealTimeFor( fStartTime + performanceTime, 0); MessageEvent* event = new (std::nothrow) MessageEvent(realTime, fPeakListener, message); if (event != NULL) EventQueue::Default().AddEvent(event); } return buffer; }