Skip to content

Commit

Permalink
Cherry-pick 278306@main (9f7fcac). https://bugs.webkit.org/show_bug.c…
Browse files Browse the repository at this point in the history
…gi?id=273555

    [GStreamer][WebAudio] Misc improvements in playback pipeline
    https://bugs.webkit.org/show_bug.cgi?id=273555

    Reviewed by Xabier Rodriguez-Calvar.

    A small queue is needed before the sink, by design. In the webaudiosrc element timestamping was
    improved, silent buffers are now flagged as droppable and the render task starts only when going to
    PLAYING state.

    * Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp:
    (WebCore::AudioDestination::create):
    (WebCore::AudioDestinationGStreamer::AudioDestinationGStreamer):
    * Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp:
    (webKitWebAudioSrcConstructed):
    (webKitWebAudioSrcRenderAndPushFrames):
    (webKitWebAudioSrcChangeState):

    Canonical link: https://commits.webkit.org/278306@main

Canonical link: https://commits.webkit.org/274313.288@webkitglib/2.44
  • Loading branch information
philn authored and aperezdc committed May 29, 2024
1 parent 2d57016 commit 90db18f
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 13 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@

#include "AudioDestinationGStreamer.h"

#include "AudioChannel.h"
#include "AudioSourceProvider.h"
#include "AudioUtilities.h"
#include "GStreamerCommon.h"
Expand Down Expand Up @@ -170,12 +169,17 @@ AudioDestinationGStreamer::AudioDestinationGStreamer(AudioIOCallback& callback,

GstElement* audioConvert = makeGStreamerElement("audioconvert", nullptr);
GstElement* audioResample = makeGStreamerElement("audioresample", nullptr);
gst_bin_add_many(GST_BIN_CAST(m_pipeline.get()), m_src.get(), audioConvert, audioResample, audioSink.get(), nullptr);

// Link src pads from webkitAudioSrc to audioConvert ! audioResample ! autoaudiosink.
auto queue = gst_element_factory_make("queue", nullptr);
g_object_set(queue, "max-size-buffers", 2, "max-size-bytes", 0, "max-size-time", 0, nullptr);

gst_bin_add_many(GST_BIN_CAST(m_pipeline.get()), m_src.get(), audioConvert, audioResample, queue, audioSink.get(), nullptr);

// Link src pads from webkitAudioSrc to audioConvert ! audioResample ! queue ! autoaudiosink.
gst_element_link_pads_full(m_src.get(), "src", audioConvert, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(audioConvert, "src", audioResample, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(audioResample, "src", audioSink.get(), "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(audioResample, "src", queue, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(queue, "src", audioSink.get(), "sink", GST_PAD_LINK_CHECK_NOTHING);
}

AudioDestinationGStreamer::~AudioDestinationGStreamer()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ static void webKitWebAudioSrcConstructed(GObject* object)
priv->source = makeGStreamerElement("appsrc", "webaudioSrc");

// Configure the appsrc for minimal latency.
g_object_set(priv->source.get(), "block", TRUE, "blocksize", priv->bufferSize, "format", GST_FORMAT_TIME, nullptr);
g_object_set(priv->source.get(), "block", TRUE, "blocksize", priv->bufferSize, "format", GST_FORMAT_TIME, "is-live", TRUE, nullptr);

gst_bin_add(GST_BIN(src), priv->source.get());
// appsrc's src pad is the only visible pad of our element.
Expand Down Expand Up @@ -314,6 +314,9 @@ static void webKitWebAudioSrcRenderAndPushFrames(const GRefPtr<GstElement>& elem
priv->dispatchCondition.notifyOne();
});

if (GST_STATE(element.get()) < GST_STATE_PAUSED)
return;

if (!priv->destination)
return;

Expand All @@ -323,14 +326,12 @@ static void webKitWebAudioSrcRenderAndPushFrames(const GRefPtr<GstElement>& elem

GstClockTime timestamp = gst_util_uint64_scale(priv->numberOfSamples, GST_SECOND, priv->sampleRate);
priv->numberOfSamples += priv->framesToPull;
GstClockTime duration = gst_util_uint64_scale(priv->numberOfSamples, GST_SECOND, priv->sampleRate) - timestamp;
GstClockTime duration = gst_util_uint64_scale(priv->framesToPull, GST_SECOND, priv->sampleRate);

AudioIOPosition outputTimestamp;
if (auto clock = adoptGRef(gst_element_get_clock(element.get()))) {
auto clockTime = gst_clock_get_time(clock.get());
outputTimestamp.position = Seconds::fromNanoseconds(timestamp);
outputTimestamp.timestamp = MonotonicTime::fromRawSeconds(static_cast<double>((g_get_monotonic_time() + GST_TIME_AS_USECONDS(clockTime)) / 1000000.0));
}
outputTimestamp.position = Seconds::fromNanoseconds(timestamp);
auto now = static_cast<double>((g_get_monotonic_time() + GST_TIME_AS_USECONDS(timestamp)) / 1000000.0);
outputTimestamp.timestamp = MonotonicTime::fromRawSeconds(now);

// FIXME: Add support for local/live audio input.
if (priv->bus)
Expand All @@ -341,11 +342,13 @@ static void webKitWebAudioSrcRenderAndPushFrames(const GRefPtr<GstElement>& elem
priv->hasRenderedAudibleFrame = true;
}

GST_BUFFER_TIMESTAMP(buffer.get()) = outputTimestamp.position.nanoseconds();
GST_BUFFER_TIMESTAMP(buffer.get()) = timestamp;
GST_BUFFER_DURATION(buffer.get()) = duration;

if (priv->bus->isSilent())
if (priv->bus->isSilent()) {
GST_BUFFER_FLAG_SET(buffer.get(), GST_BUFFER_FLAG_GAP);
GST_BUFFER_FLAG_SET(buffer.get(), GST_BUFFER_FLAG_DROPPABLE);
}

// Leak the buffer ref, because gst_app_src_push_buffer steals it.
GstFlowReturn ret = gst_app_src_push_buffer(GST_APP_SRC(priv->source.get()), buffer.leakRef());
Expand Down Expand Up @@ -412,6 +415,10 @@ static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, Gs
gst_buffer_pool_set_config(priv->pool.get(), config);
if (!gst_buffer_pool_set_active(priv->pool.get(), TRUE))
return GST_STATE_CHANGE_FAILURE;
webKitWebAudioSrcRenderIteration(src);
break;
}
case GST_STATE_CHANGE_PAUSED_TO_PLAYING: {
if (!gst_task_start(priv->task.get()))
return GST_STATE_CHANGE_FAILURE;
break;
Expand Down

0 comments on commit 90db18f

Please sign in to comment.