MediaStreamNote
From MozillaWiki
Contents
MediaStream from getUserMedia
Call flow of getUserMedia:
Navigator::MozGetUserMedia(const MediaStreamConstraints& aConstraints,
NavigatorUserMediaSuccessCallback& aOnSuccess,
NavigatorUserMediaErrorCallback& aOnError,
ErrorResult& aRv)
{
...
MediaManager* manager = MediaManager::Get();
aRv = manager->GetUserMedia(privileged, mWindow, aConstraints,
onsuccess, onerror);
}
nsresult
MediaManager::GetUserMedia(bool aPrivileged,
nsPIDOMWindow* aWindow, const MediaStreamConstraints& aConstraints,
nsIDOMGetUserMediaSuccessCallback* aOnSuccess,
nsIDOMGetUserMediaErrorCallback* aOnError)
{
...
StreamListeners* listeners = GetActiveWindows()->Get(windowID);
if (!listeners) {
listeners = new StreamListeners;
GetActiveWindows()->Put(windowID, listeners);
}
...
// Create a disabled listener to act as a placeholder
GetUserMediaCallbackMediaStreamListener* listener =
new GetUserMediaCallbackMediaStreamListener(mediaThread, windowID);
// No need for locking because we always do this in the main thread.
listeners->AppendElement(listener);
...
// Pass callbacks and MediaStreamListener along to GetUserMediaRunnable.
nsRefPtr<GetUserMediaRunnable> runnable;
if (c.mFake) {
// Fake stream from default backend.
runnable = new GetUserMediaRunnable(c, onSuccess.forget(),
onError.forget(), windowID, listener, mPrefs, new MediaEngineDefault());
} else {
// Stream from default device from WebRTC backend.
runnable = new GetUserMediaRunnable(c, onSuccess.forget(),
onError.forget(), windowID, listener, mPrefs);
}
...
#ifdef MOZ_B2G_CAMERA
if (mCameraManager == nullptr) {
mCameraManager = nsDOMCameraManager::CreateInstance(aWindow);
}
#endif
...
if (aPrivileged ||
(c.mFake && !Preferences::GetBool("media.navigator.permission.fake"))) {
mMediaThread->Dispatch(runnable, NS_DISPATCH_NORMAL);
} ...
}
NS_IMETHOD
GetUserMediaRunnable::Run()
{
MediaEngine* backend = mBackend;
// Was a backend provided?
if (!backend) {
// backend will become MediaEngineWebRTC.
backend = mManager->GetBackend(mWindowID);
}
// Was a device provided?
if (!mDeviceChosen) {
// SelectDevice will call void MediaEngineWebRTC::EnumerateVideoDevices(MediaSourceType aMediaSource,
// nsTArray<nsRefPtr<MediaEngineVideoSource> >* aVSources).
// It will new and append MediaEngineWebRTCVideoSource into aVSources.
// Then pick the first devive.
nsresult rv = SelectDevice(backend);
if (rv != NS_OK) {
return rv;
}
}
...
ProcessGetUserMedia(((IsOn(mConstraints.mAudio) && mAudioDevice) ?
mAudioDevice->GetSource() : nullptr),
((IsOn(mConstraints.mVideo) && mVideoDevice) ?
mVideoDevice->GetSource() : nullptr));
return NS_OK;
}
void
ProcessGetUserMedia(MediaEngineAudioSource* aAudioSource,
MediaEngineVideoSource* aVideoSource)
{
...
if (aVideoSource) {
rv = aVideoSource->Allocate(GetInvariant(mConstraints.mVideo), mPrefs);
...
}
...
NS_DispatchToMainThread(new GetUserMediaStreamRunnable(
mSuccess, mError, mWindowID, mListener, aAudioSource, aVideoSource,
peerIdentity
));
...
}
NS_IMETHOD
GetUserMediaStreamRunnable::Run()
{
...
// Create a media stream.
nsRefPtr<nsDOMUserMediaStream> trackunion =
nsDOMUserMediaStream::CreateTrackUnionStream(window, mAudioSource,
mVideoSource);
...
MediaStreamGraph* gm = MediaStreamGraph::GetInstance();
nsRefPtr<SourceMediaStream> stream = gm->CreateSourceStream(nullptr);
...
trackunion->mSourceStream = stream;
...
// The listener was added at the beginning in an inactive state.
// Activate our listener. We'll call Start() on the source when get a callback
// that the MediaStream has started consuming. The listener is freed
// when the page is invalidated (on navigation or close).
mListener->Activate(stream.forget(), mAudioSource, mVideoSource);
...
// This will call to nsresult MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
nsRefPtr<MediaOperationRunnable> runnable(
new MediaOperationRunnable(MEDIA_START, mListener, trackunion,
tracksAvailableCallback,
mAudioSource, mVideoSource, false, mWindowID,
mError.forget()));
mediaThread->Dispatch(runnable, NS_DISPATCH_NORMAL);
// We won't need mError now.
mError = nullptr;
return NS_OK;
}
SourceMediaStream*
MediaStreamGraph::CreateSourceStream(DOMMediaStream* aWrapper)
{
SourceMediaStream* stream = new SourceMediaStream(aWrapper);
NS_ADDREF(stream);
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
stream->SetGraphImpl(graph);
graph->AppendMessage(new CreateMessage(stream));
return stream;
}
virtual void CreateMessage::Run() MOZ_OVERRIDE
{
mStream->GraphImpl()->AddStream(mStream); // It will call mStreams.AppendElement(aStream);
mStream->Init();
}
MediaStreamGraphImpl::ExtractPendingInput
void
MediaStreamGraphImpl::ExtractPendingInput(SourceMediaStream* aStream,
GraphTime aDesiredUpToTime,
bool* aEnsureNextIteration)
{
bool finished;
{
MutexAutoLock lock(aStream->mMutex);
if (aStream->mPullEnabled && !aStream->mFinished &&
!aStream->mListeners.IsEmpty()) {
// Compute how much stream time we'll need assuming we don't block
// the stream at all between mBlockingDecisionsMadeUntilTime and
// aDesiredUpToTime.
StreamTime t =
GraphTimeToStreamTime(aStream, mStateComputedTime) +
(aDesiredUpToTime - mStateComputedTime);
STREAM_LOG(PR_LOG_DEBUG+1, ("Calling NotifyPull aStream=%p t=%f current end=%f", aStream,
MediaTimeToSeconds(t),
MediaTimeToSeconds(aStream->mBuffer.GetEnd())));
if (t > aStream->mBuffer.GetEnd()) {
*aEnsureNextIteration = true;
for (uint32_t j = 0; j < aStream->mListeners.Length(); ++j) {
MediaStreamListener* l = aStream->mListeners[j];
{
MutexAutoUnlock unlock(aStream->mMutex);
l->NotifyPull(this, t);
}
}
}
}
finished = aStream->mUpdateFinished;
for (int32_t i = aStream->mUpdateTracks.Length() - 1; i >= 0; --i) {
SourceMediaStream::TrackData* data = &aStream->mUpdateTracks[i];
aStream->ApplyTrackDisabling(data->mID, data->mData);
for (uint32_t j = 0; j < aStream->mListeners.Length(); ++j) {
MediaStreamListener* l = aStream->mListeners[j];
TrackTicks offset = (data->mCommands & SourceMediaStream::TRACK_CREATE)
? data->mStart : aStream->mBuffer.FindTrack(data->mID)->GetSegment()->GetDuration();
l->NotifyQueuedTrackChanges(this, data->mID, data->mOutputRate,
offset, data->mCommands, *data->mData);
}
if (data->mCommands & SourceMediaStream::TRACK_CREATE) {
...
} else if (data->mData->GetDuration() > 0) {
MediaSegment* dest = aStream->mBuffer.FindTrack(data->mID)->GetSegment();
dest->AppendFrom(data->mData);
}
if (data->mCommands & SourceMediaStream::TRACK_END) {
aStream->mBuffer.FindTrack(data->mID)->SetEnded();
aStream->mUpdateTracks.RemoveElementAt(i);
}
}
if (!aStream->mFinished) {
aStream->mBuffer.AdvanceKnownTracksTime(aStream->mUpdateKnownTracksTime);
}
}
if (aStream->mBuffer.GetEnd() > 0) {
aStream->mHasCurrentData = true;
}
if (finished) {
FinishStream(aStream);
}
}
void
MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream *aSource,
TrackID aID,
StreamTime aDesiredTime,
TrackTicks &aLastEndTime)
{
...
if (delta > 0) {
// nullptr images are allowed
IntSize size(image ? mWidth : 0, image ? mHeight : 0);
segment.AppendFrame(image.forget(), delta, size);
// This can fail if either a) we haven't added the track yet, or b)
// we've removed or finished the track.
if (aSource->AppendToTrack(aID, &(segment))) {
aLastEndTime = target;
}
}
}
bool
SourceMediaStream::AppendToTrack(TrackID aID, MediaSegment* aSegment, MediaSegment *aRawSegment)
{
MutexAutoLock lock(mMutex);
// ::EndAllTrackAndFinished() can end these before the sources notice
bool appended = false;
if (!mFinished) {
TrackData *track = FindDataForTrack(aID);
if (track) {
...
// Must notify first, since AppendFrom() will empty out aSegment
NotifyDirectConsumers(track, aRawSegment ? aRawSegment : aSegment);
track->mData->AppendFrom(aSegment); // note: aSegment is now dead
appended = true;
} else {
aSegment->Clear();
}
}
if (!mDestroyed) {
GraphImpl()->EnsureNextIteration();
}
return appended;
}
void
SourceMediaStream::NotifyDirectConsumers(TrackData *aTrack,
MediaSegment *aSegment)
{
// Call with mMutex locked
MOZ_ASSERT(aTrack);
for (uint32_t j = 0; j < mDirectListeners.Length(); ++j) {
MediaStreamDirectListener* l = mDirectListeners[j];
TrackTicks offset = 0; // FIX! need a separate TrackTicks.... or the end of the internal buffer
l->NotifyRealtimeData(static_cast<MediaStreamGraph*>(GraphImpl()), aTrack->mID, aTrack->mOutputRate,
offset, aTrack->mCommands, *aSegment);
}
}
MediaStreamGraphImpl::RunThread
void
MediaStreamGraphImpl::RunThread()
{
...
// Update mCurrentTime to the min of the playing audio times, or using the
// wall-clock time change if no audio is playing.
UpdateCurrentTime();
...
// Grab pending stream input.
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
SourceMediaStream* is = mStreams[i]->AsSourceStream();
if (is) {
UpdateConsumptionState(is);
ExtractPendingInput(is, endBlockingDecisions, &ensureNextIteration);
}
}
...
// Figure out what each stream wants to do
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
MediaStream* stream = mStreams[i];
if (!doneAllProducing) {
ProcessedMediaStream* ps = stream->AsProcessedStream();
if (ps) {
AudioNodeStream* n = stream->AsAudioNodeStream();
if (n) {
// Since an AudioNodeStream is present, go ahead and
// produce audio block by block for all the rest of the streams.
ProduceDataForStreamsBlockByBlock(i, n->SampleRate(), prevComputedTime, mStateComputedTime);
ticksProcessed += TimeToTicksRoundDown(n->SampleRate(), mStateComputedTime - prevComputedTime);
doneAllProducing = true;
} else {
ps->ProcessInput(prevComputedTime, mStateComputedTime,
ProcessedMediaStream::ALLOW_FINISH);
NS_WARN_IF_FALSE(stream->mBuffer.GetEnd() >=
GraphTimeToStreamTime(stream, mStateComputedTime),
"Stream did not produce enough data");
}
}
}
NotifyHasCurrentData(stream);
if (mRealtime) {
// Only playback audio and video in real-time mode
CreateOrDestroyAudioStreams(prevComputedTime, stream);
TrackTicks ticksPlayedForThisStream = PlayAudio(stream, prevComputedTime, mStateComputedTime);
if (!ticksPlayed) {
ticksPlayed = ticksPlayedForThisStream;
} else {
MOZ_ASSERT(!ticksPlayedForThisStream || ticksPlayedForThisStream == ticksPlayed,
"Each stream should have the same number of frame.");
}
PlayVideo(stream);
}
SourceMediaStream* is = stream->AsSourceStream();
if (is) {
UpdateBufferSufficiencyState(is);
}
GraphTime end;
if (!stream->mBlocked.GetAt(mCurrentTime, &end) || end < GRAPH_TIME_MAX) {
allBlockedForever = false;
}
}
...
}
void
MediaStreamGraphImpl::ExtractPendingInput(SourceMediaStream* aStream,
GraphTime aDesiredUpToTime,
bool* aEnsureNextIteration)
{
...
for (uint32_t j = 0; j < aStream->mListeners.Length(); ++j) {
MediaStreamListener* l = aStream->mListeners[j];
{
MutexAutoUnlock unlock(aStream->mMutex);
// This will call to |void MediaEngineWebRTCVideoSource::NotifyPull|
l->NotifyPull(this, t);
}
}
...
for (int32_t i = aStream->mUpdateTracks.Length() - 1; i >= 0; --i) {
SourceMediaStream::TrackData* data = &aStream->mUpdateTracks[i];
aStream->ApplyTrackDisabling(data->mID, data->mData);
for (uint32_t j = 0; j < aStream->mListeners.Length(); ++j) {
MediaStreamListener* l = aStream->mListeners[j];
TrackTicks offset = (data->mCommands & SourceMediaStream::TRACK_CREATE)
? data->mStart : aStream->mBuffer.FindTrack(data->mID)->GetSegment()->GetDuration();
l->NotifyQueuedTrackChanges(this, data->mID, data->mOutputRate,
offset, data->mCommands, *data->mData);
}
...
}
...
}
void
MediaStreamGraphImpl::PlayVideo(MediaStream* aStream)
{
...
for (uint32_t i = 0; i < aStream->mVideoOutputs.Length(); ++i) {
VideoFrameContainer* output = aStream->mVideoOutputs[i];
...
output->SetCurrentFrame(frame->GetIntrinsicSize(), frame->GetImage(),
targetTime);
}
...
}
AudioContext::CreateXXX
already_AddRefed<MediaElementAudioSourceNode>
AudioContext::CreateMediaElementSource(HTMLMediaElement& aMediaElement,
ErrorResult& aRv)
{
...
nsRefPtr<DOMMediaStream> stream = aMediaElement.MozCaptureStream(aRv);
...
nsRefPtr<MediaElementAudioSourceNode> mediaElementAudioSourceNode =
new MediaElementAudioSourceNode(this, stream);
return mediaElementAudioSourceNode.forget();
}
already_AddRefed<MediaStreamAudioSourceNode>
AudioContext::CreateMediaStreamSource(DOMMediaStream& aMediaStream,
ErrorResult& aRv)
{
...
nsRefPtr<MediaStreamAudioSourceNode> mediaStreamAudioSourceNode =
new MediaStreamAudioSourceNode(this, &aMediaStream);
return mediaStreamAudioSourceNode.forget();
}
already_AddRefed<ScriptProcessorNode>
AudioContext::CreateScriptProcessor(uint32_t aBufferSize,
uint32_t aNumberOfInputChannels,
uint32_t aNumberOfOutputChannels,
ErrorResult& aRv)
{
...
nsRefPtr<ScriptProcessorNode> scriptProcessor =
new ScriptProcessorNode(this, aBufferSize, aNumberOfInputChannels,
aNumberOfOutputChannels);
return scriptProcessor.forget();
}
already_AddRefed<ChannelMergerNode>
AudioContext::CreateChannelMerger(uint32_t aNumberOfInputs, ErrorResult& aRv)
{
if (aNumberOfInputs == 0 ||
aNumberOfInputs > WebAudioUtils::MaxChannelCount) {
aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
return nullptr;
}
nsRefPtr<ChannelMergerNode> mergerNode =
new ChannelMergerNode(this, aNumberOfInputs);
return mergerNode.forget();
}
already_AddRefed<DynamicsCompressorNode>
AudioContext::CreateDynamicsCompressor()
{
nsRefPtr<DynamicsCompressorNode> compressorNode =
new DynamicsCompressorNode(this);
return compressorNode.forget();
}
already_AddRefed<GainNode>
AudioContext::CreateGain()
{
nsRefPtr<GainNode> gainNode = new GainNode(this);
return gainNode.forget();
}
GainNode::GainNode(AudioContext* aContext)
: AudioNode(aContext,
2,
ChannelCountMode::Max,
ChannelInterpretation::Speakers)
, mGain(new AudioParam(MOZ_THIS_IN_INITIALIZER_LIST(),
SendGainToStream, 1.0f))
{
GainNodeEngine* engine = new GainNodeEngine(this, aContext->Destination());
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
engine->SetSourceStream(static_cast<AudioNodeStream*> (mStream.get()));
}
void
GainNode::SendGainToStream(AudioNode* aNode)
{
GainNode* This = static_cast<GainNode*>(aNode);
// It will call to GainNodeEngine::SetTimelineParameter eventually.
SendTimelineParameterToStream(This, GainNodeEngine::GAIN, *This->mGain);
}
void GainNodeEngine::SetTimelineParameter(uint32_t aIndex,
const AudioParamTimeline& aValue,
TrackRate aSampleRate) MOZ_OVERRIDE
{
switch (aIndex) {
case GAIN:
MOZ_ASSERT(mSource && mDestination);
mGain = aValue;
WebAudioUtils::ConvertAudioParamToTicks(mGain, mSource, mDestination);
break;
default:
NS_ERROR("Bad GainNodeEngine TimelineParameter");
}
}
AudioNodeStream*
MediaStreamGraph::CreateAudioNodeStream(AudioNodeEngine* aEngine,
AudioNodeStreamKind aKind,
TrackRate aSampleRate)
{
MOZ_ASSERT(NS_IsMainThread());
if (!aSampleRate) {
aSampleRate = aEngine->NodeMainThread()->Context()->SampleRate();
}
AudioNodeStream* stream = new AudioNodeStream(aEngine, aKind, aSampleRate);
NS_ADDREF(stream);
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
stream->SetGraphImpl(graph);
if (aEngine->HasNode()) {
stream->SetChannelMixingParametersImpl(aEngine->NodeMainThread()->ChannelCount(),
aEngine->NodeMainThread()->ChannelCountModeValue(),
aEngine->NodeMainThread()->ChannelInterpretationValue());
}
graph->AppendMessage(new CreateMessage(stream));
return stream;
}
AudioNode::Connect
void
AudioNode::Connect(AudioNode& aDestination, uint32_t aOutput,
uint32_t aInput, ErrorResult& aRv)
{
// The MediaStreamGraph will handle cycle detection. We don't need to do it
// here.
mOutputNodes.AppendElement(&aDestination);
InputNode* input = aDestination.mInputNodes.AppendElement();
input->mInputNode = this;
input->mInputPort = aInput;
input->mOutputPort = aOutput;
if (aDestination.mStream) {
// Connect streams in the MediaStreamGraph
MOZ_ASSERT(aDestination.mStream->AsProcessedStream());
ProcessedMediaStream* ps =
static_cast<ProcessedMediaStream*>(aDestination.mStream.get());
MOZ_ASSERT(aInput <= UINT16_MAX, "Unexpected large input port number");
MOZ_ASSERT(aOutput <= UINT16_MAX, "Unexpected large output port number");
input->mStreamPort =
ps->AllocateInputPort(mStream, MediaInputPort::FLAG_BLOCK_INPUT,
static_cast<uint16_t>(aInput),
static_cast<uint16_t>(aOutput));
}
// This connection may have connected a panner and a source.
Context()->UpdatePannerSource();
}
AudioNodeStream::ProcessInput
// The MediaStreamGraph guarantees that this is actually one block, for
// AudioNodeStreams.
void
AudioNodeStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
{
...
// We need to generate at least one input
uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
OutputChunks inputChunks;
inputChunks.SetLength(maxInputs);
for (uint16_t i = 0; i < maxInputs; ++i) {
ObtainInputBlock(inputChunks[i], i);
}
bool finished = false;
if (maxInputs <= 1 && mEngine->OutputCount() <= 1) {
mEngine->ProcessBlock(this, inputChunks[0], &mLastChunks[0], &finished);
} else {
mEngine->ProcessBlocksOnPorts(this, inputChunks, mLastChunks, &finished);
}
...
}
virtual void GainNodeEngine::ProcessBlock(AudioNodeStream* aStream,
const AudioChunk& aInput,
AudioChunk* aOutput,
bool* aFinished)
{
...
if (mGain.HasSimpleValue()) {
// Optimize the case where we only have a single value set as the volume
float gain = mGain.GetValue();
if (gain == 0.0f) {
aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
} else {
*aOutput = aInput;
aOutput->mVolume *= gain;
}
} else {
// First, compute a vector of gains for each track tick based on the
// timeline at hand, and then for each channel, multiply the values
// in the buffer with the gain vector.
AllocateAudioBlock(aInput.mChannelData.Length(), aOutput);
// Compute the gain values for the duration of the input AudioChunk
// XXX we need to add a method to AudioEventTimeline to compute this buffer directly.
float computedGain[WEBAUDIO_BLOCK_SIZE];
for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
TrackTicks tick = aStream->GetCurrentPosition();
computedGain[counter] = mGain.GetValueAtTime(tick, counter) * aInput.mVolume;
}
// Apply the gain to the output buffer
for (size_t channel = 0; channel < aOutput->mChannelData.Length(); ++channel) {
const float* inputBuffer = static_cast<const float*> (aInput.mChannelData[channel]);
float* buffer = static_cast<float*> (const_cast<void*>
(aOutput->mChannelData[channel]));
AudioBlockCopyChannelWithScale(inputBuffer, computedGain, buffer);
}
}
}
HTMLImageElement
nsLayoutUtils::SurfaceFromElementResult
nsLayoutUtils::SurfaceFromElement(nsIImageLoadingContent* aElement,
uint32_t aSurfaceFlags,
DrawTarget* aTarget)
{
SurfaceFromElementResult result;
nsresult rv;
nsCOMPtr<imgIRequest> imgRequest;
rv = aElement->GetRequest(nsIImageLoadingContent::CURRENT_REQUEST,
getter_AddRefs(imgRequest));
if (NS_FAILED(rv) || !imgRequest)
return result;
uint32_t status;
imgRequest->GetImageStatus(&status);
if ((status & imgIRequest::STATUS_LOAD_COMPLETE) == 0) {
// Spec says to use GetComplete, but that only works on
// nsIDOMHTMLImageElement, and we support all sorts of other stuff
// here. Do this for now pending spec clarification.
result.mIsStillLoading = (status & imgIRequest::STATUS_ERROR) == 0;
return result;
}
...
if (!noRasterize || imgContainer->GetType() == imgIContainer::TYPE_RASTER) {
if (aSurfaceFlags & SFE_WANT_IMAGE_SURFACE) {
frameFlags |= imgIContainer::FLAG_WANT_DATA_SURFACE;
}
result.mSourceSurface = imgContainer->GetFrame(whichFrame, frameFlags);
if (!result.mSourceSurface) {
return result;
}
...
} else {
result.mDrawInfo.mImgContainer = imgContainer;
result.mDrawInfo.mWhichFrame = whichFrame;
result.mDrawInfo.mDrawingFlags = frameFlags;
}
...
return result;
}