diff --git a/CMakeLists.txt b/CMakeLists.txt
index e41f87f..912357d 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -23,10 +23,13 @@ add_library(gstprojectm SHARED
src/enums.h
src/plugin.h
src/plugin.c
- src/projectm.h
- src/projectm.c
src/gstglbaseaudiovisualizer.h
src/gstglbaseaudiovisualizer.c
+ src/gstpmaudiovisualizer.h
+ src/gstpmaudiovisualizer.c
+ src/pluginbase.h
+ src/pluginbase.c
+ src/register.c
)
target_include_directories(gstprojectm
@@ -73,3 +76,5 @@ target_link_libraries(gstprojectm
${GLIB2_LIBRARIES}
${GLIB2_GOBJECT_LIBRARIES}
)
+
+add_subdirectory(example)
\ No newline at end of file
diff --git a/README.md b/README.md
index 1b43b93..9cad71e 100644
--- a/README.md
+++ b/README.md
@@ -57,25 +57,50 @@ The documentation has been organized into distinct files, each dedicated to a sp
- **[OSX](docs/OSX.md)**
- **[Windows](docs/WINDOWS.md)**
-Once the plugin has been installed, you can use it something like this:
+Once the plugin has been installed, you can use it something like this to render in real-time to an OpenGL window:
```shell
-gst-launch pipewiresrc ! queue ! audioconvert ! projectm preset=/usr/local/share/projectM/presets preset-duration=5 ! video/x-raw,width=2048,height=1440,framerate=60/1 ! videoconvert ! xvimagesink sync=false
+gst-launch pipewiresrc ! queue ! audioconvert ! "audio/x-raw, format=S16LE, rate=44100, channels=2, layout=interleaved" ! projectm preset=/usr/local/share/projectM/presets preset-duration=5 mesh-size=48,32 ! 'video/x-raw(memory:GLMemory),width=2048,height=1440,framerate=60/1' ! queue leaky=downstream max-size-buffers=1 ! glimagesink sync=true
```
-Or to convert an audio file to video:
+To render from a live source in real-time to a gl window, an identity element can be used to setup a proper timestamp source for the pipeline. This example also includes a texture directory:
+```shell
+gst-launch souphttpsrc location=http://your-radio-stream is-live=true ! queue ! decodebin ! audioconvert ! "audio/x-raw, format=S16LE, rate=44100, channels=2, layout=interleaved" ! identity single-segment=true sync=true ! projectm preset=/usr/local/share/projectM/presets preset-duration=5 mesh-size=48,32 texture-dir=/usr/local/share/projectM/presets-milkdrop-texture-pack ! video/x-raw(memory:GLMemory),width=1920,height=1080,framerate=60/1 ! queue leaky=downstream max-size-buffers=1 ! glimagesink sync=true
+```
+
+Or to convert an audio file to video using offline rendering:
```shell
+gst-launch-1.0 -e \
filesrc location=input.mp3 ! decodebin name=dec \
decodebin ! tee name=t \
t. ! queue ! audioconvert ! audioresample ! \
capsfilter caps="audio/x-raw, format=F32LE, channels=2, rate=44100" ! avenc_aac bitrate=256000 ! queue ! mux. \
- t. ! queue ! audioconvert ! projectm preset=/usr/local/share/projectM/presets preset-duration=3 mesh-size=1024,576 ! \
- identity sync=false ! videoconvert ! videorate ! video/x-raw,framerate=60/1,width=3840,height=2160 ! \
+ t. ! queue ! audioconvert ! capsfilter caps="audio/x-raw, format=S16LE, channels=2, rate=44100" ! \
+ projectm preset=/usr/local/share/projectM/presets preset-duration=3 mesh-size=1024,576 ! \
+ identity sync=false ! videoconvert ! videorate ! video/x-raw\(memory:GLMemory\),framerate=60/1,width=3840,height=2160 ! \
+ gldownload \
x264enc bitrate=35000 key-int-max=300 speed-preset=veryslow ! video/x-h264,stream-format=avc,alignment=au ! queue ! mux. \
mp4mux name=mux ! filesink location=render.mp4;
```
+Or converting an audio file with the nVidia optimized encoder, directly from GL memory:
+```shell
+gst-launch-1.0 -e \
+ filesrc location=input.mp3 ! \
+ decodebin ! tee name=t \
+ t. ! queue ! audioconvert ! audioresample ! \
+ capsfilter caps="audio/x-raw, format=F32LE, channels=2, rate=44100" ! \
+ avenc_aac bitrate=320000 ! queue ! mux. \
+ t. ! queue ! audioconvert ! capsfilter caps="audio/x-raw, format=S16LE, channels=2, rate=44100" ! projectm \
+ preset=/usr/local/share/projectM/presets preset-duration=3 mesh-size=1024,576 ! \
+ identity sync=false ! videoconvert ! videorate ! \
+ video/x-raw\(memory:GLMemory\),framerate=60/1,width=1920,height=1080 ! \
+ nvh264enc ! h264parse ! \
+ video/x-h264,stream-format=avc,alignment=au ! queue ! mux. \
+ mp4mux name=mux ! filesink location=render.mp4;
+```
+
Available options
```shell
@@ -200,14 +225,16 @@ Or to convert an audio file to video:
```shell
gst-launch-1.0 -e \
- filesrc location=input.mp3 ! \
+ filesrc location=input.mp3 ! decodebin name=dec \
decodebin ! tee name=t \
t. ! queue ! audioconvert ! audioresample ! \
- capsfilter caps="audio/x-raw, format=F32LE, channels=2, rate=44100" ! avenc_aac bitrate=320000 ! queue ! mux. \
- t. ! queue ! audioconvert ! projectm preset=/usr/local/share/projectM/presets texture-dir=/usr/local/share/projectM/textures preset-duration=6 mesh-size=1024,576 ! \
- identity sync=false ! videoconvert ! videorate ! video/x-raw,framerate=60/1,width=3840,height=2160 ! \
- x264enc bitrate=50000 key-int-max=200 speed-preset=veryslow ! video/x-h264,stream-format=avc,alignment=au ! queue ! mux. \
- mp4mux name=mux ! filesink location=output.mp4
+ capsfilter caps="audio/x-raw, format=F32LE, channels=2, rate=44100" ! avenc_aac bitrate=256000 ! queue ! mux. \
+ t. ! queue ! audioconvert ! capsfilter caps="audio/x-raw, format=S16LE, channels=2, rate=44100" ! \
+ projectm preset=/usr/local/share/projectM/presets preset-duration=3 mesh-size=1024,576 ! \
+ identity sync=false ! videoconvert ! videorate ! video/x-raw\(memory:GLMemory\),framerate=60/1,width=3840,height=2160 ! \
+ gldownload \
+ x264enc bitrate=35000 key-int-max=300 speed-preset=veryslow ! video/x-h264,stream-format=avc,alignment=au ! queue ! mux. \
+ mp4mux name=mux ! filesink location=render.mp4;
```
You may need to adjust some elements which may or may not be present in your GStreamer installation, such as x264enc, avenc_aac, etc.
@@ -220,6 +247,58 @@ gst-inspect projectm
(back to top)
+## ⚙️ Technical Details and Considerations
+
+This plugin integrates [projectM](https://github.com/projectM-visualizer/projectm) with GStreamer using an audio-driven video generation approach.
+Each video frame is rendered based on a fixed number of audio samples received on a sink pad.
+
+projectM visuals are rendered to a pooled OpenGL texture via an FBO (framebuffer object).
+The resulting textures are wrapped as video buffers and pushed on the plugin’s source pad. All rendering and buffer data stay in GPU memory, ensuring efficient performance in GL-based pipelines.
+
+The plugin synchronizes rendering to the GStreamer pipeline clock using audio PTS as the master reference. It supports both real-time playback and offline (faster-than-real-time) rendering depending on the pipeline configuration.
+
+### 🔁 Audio-Driven Video Frame Generation
+
+- A **fixed number of audio samples per video frame** determines the visualization framerate (e.g., 735 samples per frame at 44.1 kHz = ~60 FPS).
+- Audio is consumed from a **sink pad** (e.g. from `pulsesrc`, `filesrc`, or a decoded audio stream).
+- Video frame PTS is derived from the **first audio buffer PTS** or **segment event** plus accumulated samples, ensuring alignment with audio timing.
+
+### 🖼️ OpenGL Rendering and Buffer Handling
+
+- projectM output is rendered to an OpenGL texture via an FBO.
+- Textures are **pooled** and reused across frames to avoid excessive GPU memory allocation and de-allocation.
+- Each rendered texture becomes a GStreamer video buffer pushed downstream.
+- All rendering happens in GPU memory.
+
+### ⏱️ Timing and Synchronization
+
+| Timing Source | Purpose |
+|-----------------|------------------------------------------------------------|
+| Audio PTS | Drives video buffer timestamps. |
+| Sample Rate | Maps audio samples to video frames based on requested fps. |
+| GStreamer Clock | Maintains global pipeline sync. |
+| QoS Event | Triggers frame drops based on QoS reported lag. |
+
+Timestamps are independent of rendering time — they **remain aligned to audio**, even when rendering is slower or faster.
+
+---
+
+## 📉 Performance Trade-offs and Real-Time Considerations
+
+- Rendering is done in **OpenGL**, and **not offloaded to a separate thread**.
+- If frame rendering exceeds the expected framerate budget (e.g. >16.6ms at 60 FPS), the plugin **blocks audio consumption**.
+- This can lead to:
+ - **Backpressure** in the pipeline
+ - **Dropped audio samples** (as seen from sources like `pulsesrc`)
+ - **Dropped video buffers** (especially in sinks like `glimagesink`)
+ - **QoS events** that may fail to recover the stall if rendering is consistently slow
+
+> This is **not an issue** during offline rendering, where timing pressure from real-time sinks is absent.
+
+(back to top)
+
+---
+
## Contributing
diff --git a/build.sh b/build.sh
index 74a1148..3fd9d8c 100755
--- a/build.sh
+++ b/build.sh
@@ -99,7 +99,7 @@ prompt_install() {
# Print example command
echo
echo "Done! Here's an example command:"
- echo "gst-launch-1.0 audiotestsrc ! queue ! audioconvert ! projectm ! "video/x-raw,width=512,height=512,framerate=60/1" ! videoconvert ! $VIDEO_SINK sync=false"
+ echo "gst-launch-1.0 audiotestsrc ! queue ! audioconvert ! projectm ! "video/x-raw,width=512,height=512,framerate=60/1" ! videoconvert ! $VIDEO_SINK sync=true"
else
echo
echo "Done!"
diff --git a/convert.sh b/convert.sh
index 79bff18..c709008 100644
--- a/convert.sh
+++ b/convert.sh
@@ -154,13 +154,15 @@ gst-launch-1.0 -e \
t. ! queue ! audioconvert ! audioresample ! \
capsfilter caps="audio/x-raw, format=F32LE, channels=2, rate=44100" ! \
avenc_aac bitrate=320000 ! queue ! mux. \
- t. ! queue ! audioconvert ! projectm \
+ t. ! queue ! audioconvert ! capsfilter caps="audio/x-raw, format=S16LE, channels=2, rate=44100" ! \
+ projectm \
preset=$PRESET_PATH \
texture-dir=$TEXTURE_DIR \
preset-duration=$PRESET_DURATION \
mesh-size=${MESH_X},${MESH_Y} ! \
identity sync=false ! videoconvert ! videorate ! \
- video/x-raw,framerate=$FRAMERATE/1,width=$VIDEO_WIDTH,height=$VIDEO_HEIGHT ! \
+ video/x-raw\(memory:GLMemory\),framerate=$FRAMERATE/1,width=$VIDEO_WIDTH,height=$VIDEO_HEIGHT ! \
+ gldownload ! \
x264enc bitrate=$(($BITRATE * 1000)) key-int-max=200 speed-preset=$SPEED_PRESET ! \
video/x-h264,stream-format=avc,alignment=au ! queue ! mux. \
mp4mux name=mux ! filesink location=$OUTPUT_FILE &
diff --git a/example/CMakeLists.txt b/example/CMakeLists.txt
new file mode 100644
index 0000000..0d70a02
--- /dev/null
+++ b/example/CMakeLists.txt
@@ -0,0 +1,30 @@
+
+add_executable(dyn-pads-example
+ dynpads.c
+)
+
+target_include_directories(dyn-pads-example
+ PUBLIC
+ ${GSTREAMER_INCLUDE_DIRS}
+ ${GSTREAMER_BASE_INCLUDE_DIRS}
+ ${GSTREAMER_AUDIO_INCLUDE_DIRS}
+ ${GSTREAMER_GL_INCLUDE_DIRS}
+ ${GLIB2_INCLUDE_DIR}
+ ${CMAKE_CURRENT_SOURCE_DIR}
+)
+
+target_link_libraries(dyn-pads-example
+ PRIVATE
+ libprojectM::projectM
+ libprojectM::playlist
+ PUBLIC
+ ${GSTREAMER_LIBRARIES}
+ ${GSTREAMER_BASE_LIBRARIES}
+ ${GSTREAMER_AUDIO_LIBRARIES}
+ ${GSTREAMER_VIDEO_LIBRARIES}
+ ${GSTREAMER_GL_LIBRARIES}
+ ${GSTREAMER_PBUTILS_LIBRARIES}
+ ${GLIB2_LIBRARIES}
+ ${GLIB2_GOBJECT_LIBRARIES}
+ gstprojectm
+)
\ No newline at end of file
diff --git a/example/dynpads.c b/example/dynpads.c
new file mode 100644
index 0000000..6c5b2e6
--- /dev/null
+++ b/example/dynpads.c
@@ -0,0 +1,238 @@
+#include
+
+#include
+
+/**
+ * Example for a "pad added" signal callback handler for handling gst
+ * demuxer-like elements.
+ *
+ * @param element Callback param for the gst element receiving the event.
+ * @param new_pad The pad being added.
+ * @param data The gst element adding the pad (e.g. demuxer).
+ */
+static void on_pad_added(GstElement *element, GstPad *new_pad, gpointer data) {
+
+ GstPad *sink_pad;
+ GstElement *downstream_element = GST_ELEMENT(data);
+ GstPadLinkReturn ret;
+ GstCaps *new_pad_caps = NULL;
+ GstStructure *new_pad_struct = NULL;
+ const gchar *new_pad_type = NULL;
+
+ g_print("Received new pad '%s' from '%s':\n", GST_PAD_NAME(new_pad),
+ GST_ELEMENT_NAME(element));
+
+ /* Check the new pad's capabilities to determine its media type */
+ new_pad_caps = gst_pad_get_current_caps(new_pad);
+ new_pad_struct = gst_caps_get_structure(new_pad_caps, 0);
+ new_pad_type = gst_structure_get_name(new_pad_struct);
+
+ /* Get the sink pad from the downstream element (either audio or video queue)
+ */
+ if (g_str_has_prefix(new_pad_type, "video/x-raw")) {
+ sink_pad = gst_element_get_static_pad(downstream_element, "sink");
+ } else if (g_str_has_prefix(new_pad_type, "audio/x-raw")) {
+ sink_pad = gst_element_get_static_pad(downstream_element, "sink");
+ } else {
+ g_print(" It has type '%s', which we don't handle. Ignoring.\n",
+ new_pad_type);
+ goto exit;
+ }
+
+ /* Check if the pads are already linked */
+ if (gst_pad_is_linked(sink_pad)) {
+ g_print(" We already linked pad %s. Ignoring.\n", GST_PAD_NAME(new_pad));
+ goto exit;
+ }
+
+ /* Link the new pad to the sink pad */
+ ret = gst_pad_link(new_pad, sink_pad);
+ if (GST_PAD_LINK_FAILED(ret)) {
+ g_print(" Type is '%s' but link failed.\n", new_pad_type);
+ } else {
+ g_print(" Link succeeded (type '%s').\n", new_pad_type);
+ }
+
+exit:
+ /* Clean up */
+ if (new_pad_caps != NULL)
+ gst_caps_unref(new_pad_caps);
+
+ if (sink_pad != NULL)
+ gst_object_unref(sink_pad);
+}
+
+/**
+ * Main function to build and run the pipeline to consume a live audio stream
+ * and render projectM to an OpenGL window in real-time.
+ *
+ * souphttpsrc location=... is-live=true ! queue ! decodebin ! audioconvert !
+ * "audio/x-raw, format=S16LE, rate=44100, channels=2, layout=interleaved" !
+ * projectm preset=... preset-duration=... mesh-size=48,32 texture-dir=... !
+ * video/x-raw(memory:GLMemory),width=1920,height=1080,framerate=60/1 ! queue
+ * leaky=downstream max-size-buffers=1 ! glimagesink sync=true
+ */
+int main(int argc, char *argv[]) {
+ GstElement *source, *demuxer, *queue, *audioconvert, *audio_capsfilter,
+ *identity, *projectm_plugin, *video_capsfilter, *sync_queue, *sink;
+ GstBus *bus;
+ GstElement *pipeline;
+
+ gst_init(&argc, &argv);
+
+ // make audio caps
+ GstCaps *audio_caps =
+ gst_caps_new_simple("audio/x-raw", "format", G_TYPE_STRING, "S16LE",
+ "rate", G_TYPE_INT, 44100, "channels", G_TYPE_INT, 2,
+ "layout", G_TYPE_STRING, "interleaved", NULL);
+
+ // make video caps
+ // todo: adjust caps as desired, keep in mind the hardware needs to be able to
+ // keep up in order for this plugin to work flawlessly.
+ GstCaps *video_caps = gst_caps_new_simple(
+ "video/x-raw", "format", G_TYPE_STRING, "RGBA", "width", G_TYPE_INT, 1920,
+ "height", G_TYPE_INT, 1080, "framerate", GST_TYPE_FRACTION, 60, 1, NULL);
+
+ // Create the GL memory feature set.
+ GstCapsFeatures *features =
+ gst_caps_features_new_single(GST_CAPS_FEATURE_MEMORY_GL_MEMORY);
+
+ // Add the GL memory feature set to the structure.
+ gst_caps_set_features(video_caps, 0, features);
+
+ // Create pipeline elements
+ source = gst_element_factory_make("souphttpsrc", "source");
+ g_object_set(source,
+ // todo: configure your stream here..
+ "location", "http://your-stream-url", "is-live", TRUE, NULL);
+
+ // basic stream buffering
+ queue = gst_element_factory_make("queue", "queue");
+
+ // decodebin to decode the stream audio format
+ demuxer = gst_element_factory_make("decodebin", "demuxer");
+ g_object_set(G_OBJECT(demuxer), "max-size-time", "100000000", NULL);
+
+ // convert the audio stream to something we can understand (if needed)
+ audioconvert = gst_element_factory_make("audioconvert", "audioconvert");
+
+ // tell pipeline which audio format we need
+ audio_capsfilter = gst_element_factory_make("capsfilter", "audio_capsfilter");
+ g_object_set(G_OBJECT(audio_capsfilter), "caps", audio_caps, NULL);
+
+ // create an identity element to provide a sream clock, since we won't get one
+ // from souphttpsrc
+ identity = gst_element_factory_make("identity", "identity");
+ g_object_set(G_OBJECT(identity), "single-segment", TRUE, "sync", TRUE, NULL);
+
+ // configure projectM plugin
+ projectm_plugin = gst_element_factory_make("projectm", "projectm");
+
+ // todo: configure your settings here..
+ g_object_set(G_OBJECT(projectm_plugin), "preset-duration", 10.0,
+ //"preset", "/your/presets/directory",
+ "mesh-size", "48,32",
+ //"texture-dir", "/your/presets-milkdrop-texture-pack-directory",
+ NULL);
+
+ // set video caps we want
+ video_capsfilter = gst_element_factory_make("capsfilter", "video_capsfilter");
+ g_object_set(G_OBJECT(video_capsfilter), "caps", video_caps, NULL);
+
+ // optional: create a queue in front of the glimagesink to throw out buffers
+ // that piling up in front of rendering just keep the latest one, the others
+ // will most likely be late
+ sync_queue = gst_element_factory_make("queue", "sync_queue");
+ // 0 (no): The default behavior. The queue is not leaky and will block when
+ // full. 1 (upstream): The queue drops new incoming buffers when it is full.
+ // 2 (downstream): The queue drops the oldest buffers in the queue when it is
+ // full.
+ g_object_set(G_OBJECT(sync_queue), "leaky", 2, "max-size-buffers", 1, NULL);
+
+ // create sink for real-time rendering (synced to the pipeline clock)
+ sink = gst_element_factory_make("glimagesink", "sink");
+ g_object_set(G_OBJECT(sink), "sync", TRUE, NULL);
+
+ pipeline = gst_pipeline_new("test-pipeline");
+
+ if (!pipeline || !source || !demuxer || !queue || !projectm_plugin ||
+ !video_capsfilter || !sync_queue || !sink) {
+ g_printerr("One or more elements could not be created. Exiting.\n");
+ return -1;
+ }
+
+ /* Set up the pipeline */
+ gst_bin_add_many(GST_BIN(pipeline), source, queue, demuxer, audioconvert,
+ audio_capsfilter, identity, projectm_plugin,
+ video_capsfilter, sync_queue, sink, NULL);
+
+ /* Link the elements (but not the demuxer's dynamic pad yet) */
+ if (!gst_element_link(source, queue)) {
+ g_printerr("Elements could not be linked (source to queue). Exiting.\n");
+ return -1;
+ }
+ if (!gst_element_link(queue, demuxer)) {
+ g_printerr(
+ "Elements could not be linked (queue, audioconvert). Exiting.\n");
+ return -1;
+ }
+ /* not yet!
+ if (!gst_element_link(demuxer, audioconvert)) {
+ g_printerr("Elements could not be linked (demuxer, queue). Exiting.\n");
+ return -1;
+ }
+ */
+ if (!gst_element_link(audioconvert, audio_capsfilter)) {
+ g_printerr("Elements could not be linked (audioconvert to "
+ "audio_capsfilter). Exiting.\n");
+ return -1;
+ }
+ if (!gst_element_link(audio_capsfilter, identity)) {
+ g_printerr("Elements could not be linked (audio_capsfilter to identity). "
+ "Exiting.\n");
+ return -1;
+ }
+ if (!gst_element_link(identity, projectm_plugin)) {
+ g_printerr("Elements could not be linked (identity to projectm_plugin). "
+ "Exiting.\n");
+ return -1;
+ }
+ if (!gst_element_link(projectm_plugin, video_capsfilter)) {
+ g_printerr("Elements could not be linked (projectm_plugin to capsfilter). "
+ "Exiting.\n");
+ return -1;
+ }
+ if (!gst_element_link(video_capsfilter, sync_queue)) {
+ g_printerr("Elements could not be linked (video_capsfilter to sync_queue). "
+ "Exiting.\n");
+ return -1;
+ }
+ if (!gst_element_link(sync_queue, sink)) {
+ g_printerr("Elements could not be linked (sync_queue to sink). Exiting.\n");
+ return -1;
+ }
+
+ gst_caps_unref(video_caps);
+ gst_caps_unref(audio_caps);
+
+ /* Connect the "pad-added" signal */
+ g_signal_connect(demuxer, "pad-added", G_CALLBACK(on_pad_added),
+ audioconvert);
+
+ /* Set the pipeline to the PLAYING state */
+ GMainLoop *loop = g_main_loop_new(NULL, FALSE);
+ gst_element_set_state(pipeline, GST_STATE_PLAYING);
+ g_main_loop_run(loop);
+
+ /* Wait until error or EOS */
+ bus = gst_element_get_bus(pipeline);
+ gst_bus_timed_pop_filtered(bus, GST_CLOCK_TIME_NONE,
+ GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
+
+ /* Clean up */
+ gst_element_set_state(pipeline, GST_STATE_NULL);
+ gst_object_unref(bus);
+ gst_object_unref(pipeline);
+
+ return 0;
+}
\ No newline at end of file
diff --git a/src/caps.c b/src/caps.c
index 09b5add..0e8bd1e 100644
--- a/src/caps.c
+++ b/src/caps.c
@@ -12,40 +12,16 @@
GST_DEBUG_CATEGORY_STATIC(gst_projectm_caps_debug);
#define GST_CAT_DEFAULT gst_projectm_caps_debug
-const gchar *get_audio_sink_cap(unsigned int type) {
- const char *format;
-
- switch (type) {
- case 0:
- format =
- GST_AUDIO_CAPS_MAKE("audio/x-raw, "
- "format = (string) " GST_AUDIO_NE(
- S16) ", "
- "layout = (string) interleaved, "
- "channels = (int) { 2 }, "
- "rate = (int) { 44100 }, "
- "channel-mask = (bitmask) { 0x0003 }");
- break;
- default:
- format = NULL;
- break;
- }
-
- return format;
+const gchar *get_audio_sink_cap() {
+ return GST_AUDIO_CAPS_MAKE("audio/x-raw, "
+ "format = (string) " GST_AUDIO_NE(
+ S16) ", "
+ "layout = (string) interleaved, "
+ "channels = (int) { 2 }, "
+ "rate = (int) { 44100 }, "
+ "channel-mask = (bitmask) { 0x0003 }");
}
-const gchar *get_video_src_cap(unsigned int type) {
- const char *format;
-
- switch (type) {
- case 0:
- format = GST_VIDEO_CAPS_MAKE("video/x-raw, format = (string) { ABGR }, "
- "framerate=(fraction)[0/1,MAX]");
- break;
- default:
- format = NULL;
- break;
- }
-
- return format;
+const gchar *get_video_src_cap() {
+ return GST_VIDEO_CAPS_MAKE_WITH_FEATURES("memory:GLMemory", "RGBA");
}
\ No newline at end of file
diff --git a/src/caps.h b/src/caps.h
index 070d281..2b3f32d 100644
--- a/src/caps.h
+++ b/src/caps.h
@@ -10,18 +10,16 @@ G_BEGIN_DECLS
/**
* @brief Get audio sink caps based on the given type.
*
- * @param type - The type of audio caps to retrieve.
* @return The audio caps format string.
*/
-const gchar *get_audio_sink_cap(unsigned int type);
+const gchar *get_audio_sink_cap();
/**
* Get video source caps based on the given type.
*
- * @param type - The type of video caps to retrieve.
* @return The video caps format string.
*/
-const gchar *get_video_src_cap(unsigned int type);
+const gchar *get_video_src_cap();
G_END_DECLS
diff --git a/src/debug.c b/src/debug.c
index 9a948aa..776fa3a 100644
--- a/src/debug.c
+++ b/src/debug.c
@@ -7,7 +7,7 @@
#include "debug.h"
-void gl_error_handler(GstGLContext *context, gpointer data) {
+void gl_error_handler(GstGLContext *context) {
GLuint error = context->gl_vtable->GetError();
switch (error) {
diff --git a/src/debug.h b/src/debug.h
index c0b3969..deaae5e 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -26,7 +26,7 @@ G_BEGIN_DECLS
* @param context The OpenGL context.
* @param data Unused.
*/
-void gl_error_handler(GstGLContext *context, gpointer data);
+void gl_error_handler(GstGLContext *context);
G_END_DECLS
diff --git a/src/gstglbaseaudiovisualizer.c b/src/gstglbaseaudiovisualizer.c
index e1f786a..cb76fd4 100644
--- a/src/gstglbaseaudiovisualizer.c
+++ b/src/gstglbaseaudiovisualizer.c
@@ -35,35 +35,50 @@
#endif
#include "gstglbaseaudiovisualizer.h"
+#include "gstpmaudiovisualizer.h"
#include
/**
* SECTION:GstGLBaseAudioVisualizer
- * @short_description: #GstAudioVisualizer subclass for injecting OpenGL
+ * @short_description: #GstPMAudioVisualizer subclass for injecting OpenGL
* resources in a pipeline
* @title: GstGLBaseAudioVisualizer
- * @see_also: #GstAudioVisualizer
+ * @see_also: #GstPMAudioVisualizer
*
- * Wrapper for GstAudioVisualizer for handling OpenGL contexts.
+ * Wrapper for GstPMAudioVisualizer for handling OpenGL contexts.
*
* #GstGLBaseAudioVisualizer handles the nitty gritty details of retrieving an
* OpenGL context. It also provides `gl_start()` and `gl_stop()` virtual methods
* that ensure an OpenGL context is available and current in the calling thread
- * for initializing and cleaning up OpenGL dependent resources. The `gl_render`
- * virtual method is used to perform OpenGL rendering.
+ * for initializing and cleaning up OpenGL resources. The `render`
+ * virtual method of the GstPMAudioVisualizer is implemented to perform OpenGL
+ * rendering. The implementer provides an implementation for fill_gl_memory to
+ * render directly to gl memory.
+ *
+ * Typical plug-in call order for implementer-provided functions:
+ * - setup (once)
+ * - gl_start (once)
+ * - fill_gl_memory (once for each frame)
+ * - gl_stop (once)
*/
#define GST_CAT_DEFAULT gst_gl_base_audio_visualizer_debug
-GST_DEBUG_CATEGORY_STATIC(GST_CAT_DEFAULT);
+GST_DEBUG_CATEGORY_STATIC(gst_gl_base_audio_visualizer_debug);
+
+#define DEFAULT_TIMESTAMP_OFFSET 0
struct _GstGLBaseAudioVisualizerPrivate {
GstGLContext *other_context;
+ GstBuffer *out_buf;
+ GstBuffer *in_audio;
gint64 n_frames; /* total frames sent */
+
gboolean gl_result;
gboolean gl_started;
GRecMutex context_lock;
+ guint64 frame_duration;
};
/* Properties */
@@ -72,7 +87,7 @@ enum { PROP_0 };
#define gst_gl_base_audio_visualizer_parent_class parent_class
G_DEFINE_ABSTRACT_TYPE_WITH_CODE(
GstGLBaseAudioVisualizer, gst_gl_base_audio_visualizer,
- GST_TYPE_AUDIO_VISUALIZER,
+ GST_TYPE_PM_AUDIO_VISUALIZER,
G_ADD_PRIVATE(GstGLBaseAudioVisualizer)
GST_DEBUG_CATEGORY_INIT(gst_gl_base_audio_visualizer_debug,
"glbaseaudiovisualizer", 0,
@@ -88,39 +103,64 @@ static void gst_gl_base_audio_visualizer_get_property(GObject *object,
GValue *value,
GParamSpec *pspec);
+/* discover gl context / display from gst */
static void gst_gl_base_audio_visualizer_set_context(GstElement *element,
GstContext *context);
+/* handle pipeline state changes */
static GstStateChangeReturn
gst_gl_base_audio_visualizer_change_state(GstElement *element,
GstStateChange transition);
-static gboolean gst_gl_base_audio_visualizer_render(GstAudioVisualizer *bscope,
- GstBuffer *audio,
- GstVideoFrame *video);
+/* renders a video frame using gl, impl for parent class
+ * GstPMAudioVisualizerClass. */
+static GstFlowReturn
+gst_gl_base_audio_visualizer_parent_render(GstPMAudioVisualizer *bscope,
+ GstBuffer *audio, GstBuffer **video,
+ GstClockTime pts);
+
+/* internal utility for resetting state on start */
static void gst_gl_base_audio_visualizer_start(GstGLBaseAudioVisualizer *glav);
+
+/* internal utility for cleaning up gl context on stop */
static void gst_gl_base_audio_visualizer_stop(GstGLBaseAudioVisualizer *glav);
-static gboolean
-gst_gl_base_audio_visualizer_decide_allocation(GstAudioVisualizer *gstav,
- GstQuery *query);
+/* gl memory pool allocation impl for parent class GstPMAudioVisualizerClass */
+static gboolean gst_gl_base_audio_visualizer_parent_decide_allocation(
+ GstPMAudioVisualizer *gstav, GstQuery *query);
+
+/* called when format changes, default empty v-impl for this class. can be
+ * overwritten by implementer. */
static gboolean
gst_gl_base_audio_visualizer_default_setup(GstGLBaseAudioVisualizer *glav);
+
+/* gl context is started and usable. called from gl thread. default empty v-impl
+ * for this class, can be overwritten by implementer. */
static gboolean
gst_gl_base_audio_visualizer_default_gl_start(GstGLBaseAudioVisualizer *glav);
+
+/* gl context is shutting down. called from gl thread. default empty v-impl for
+ * this class. can be overwritten by implementer. */
static void
gst_gl_base_audio_visualizer_default_gl_stop(GstGLBaseAudioVisualizer *glav);
-static gboolean gst_gl_base_audio_visualizer_default_gl_render(
- GstGLBaseAudioVisualizer *glav, GstBuffer *audio, GstVideoFrame *video);
+/* default empty v-impl for rendering a frame. called from gl thread. can be
+ * overwritten by implementer. */
+static gboolean gst_gl_base_audio_visualizer_default_fill_gl_memory(
+ GstGLBaseAudioVisualizer *glav, GstBuffer *in_audio, GstGLMemory *mem);
+
+/* find a valid gl context. lock must have already been acquired. */
static gboolean gst_gl_base_audio_visualizer_find_gl_context_unlocked(
GstGLBaseAudioVisualizer *glav);
-static gboolean gst_gl_base_audio_visualizer_setup(GstAudioVisualizer *gstav);
+/* called whenever the format changes, impl for parent class
+ * GstPMAudioVisualizerClass */
+static gboolean
+gst_gl_base_audio_visualizer_parent_setup(GstPMAudioVisualizer *gstav);
static void
gst_gl_base_audio_visualizer_class_init(GstGLBaseAudioVisualizerClass *klass) {
GObjectClass *gobject_class = G_OBJECT_CLASS(klass);
- GstAudioVisualizerClass *gstav_class = GST_AUDIO_VISUALIZER_CLASS(klass);
+ GstPMAudioVisualizerClass *gstav_class = GST_PM_AUDIO_VISUALIZER_CLASS(klass);
GstElementClass *element_class = GST_ELEMENT_CLASS(klass);
gobject_class->finalize = gst_gl_base_audio_visualizer_finalize;
@@ -130,30 +170,40 @@ gst_gl_base_audio_visualizer_class_init(GstGLBaseAudioVisualizerClass *klass) {
element_class->set_context =
GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_set_context);
- element_class->change_state =
+ gstav_class->change_state =
GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_change_state);
gstav_class->decide_allocation =
- GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_decide_allocation);
- gstav_class->setup = GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_setup);
+ GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_parent_decide_allocation);
+
+ gstav_class->setup =
+ GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_parent_setup);
- gstav_class->render = GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_render);
+ gstav_class->render =
+ GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_parent_render);
klass->supported_gl_api = GST_GL_API_ANY;
+
klass->gl_start =
GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_default_gl_start);
+
klass->gl_stop =
GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_default_gl_stop);
- klass->gl_render =
- GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_default_gl_render);
+
klass->setup = GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_default_setup);
+
+ klass->fill_gl_memory =
+ GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_default_fill_gl_memory);
}
static void gst_gl_base_audio_visualizer_init(GstGLBaseAudioVisualizer *glav) {
glav->priv = gst_gl_base_audio_visualizer_get_instance_private(glav);
glav->priv->gl_started = FALSE;
glav->priv->gl_result = TRUE;
+ glav->priv->in_audio = NULL;
+ glav->priv->out_buf = NULL;
glav->context = NULL;
+ glav->pts = 0;
g_rec_mutex_init(&glav->priv->context_lock);
gst_gl_base_audio_visualizer_start(glav);
}
@@ -174,6 +224,7 @@ static void gst_gl_base_audio_visualizer_set_property(GObject *object,
GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(object);
switch (prop_id) {
+
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
break;
@@ -187,6 +238,7 @@ static void gst_gl_base_audio_visualizer_get_property(GObject *object,
GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(object);
switch (prop_id) {
+
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
break;
@@ -212,8 +264,7 @@ static void gst_gl_base_audio_visualizer_set_context(GstElement *element,
if (old_display != new_display) {
gst_clear_object(&glav->context);
if (gst_gl_base_audio_visualizer_find_gl_context_unlocked(glav)) {
- // TODO does this need to be handled ?
- // gst_pad_mark_reconfigure (GST_BASE_SRC_PAD (glav));
+ gst_pad_mark_reconfigure(GST_BASE_SRC_PAD(glav));
}
}
}
@@ -266,77 +317,152 @@ static void gst_gl_base_audio_visualizer_gl_stop(GstGLContext *context,
glav->priv->gl_started = FALSE;
}
-static gboolean gst_gl_base_audio_visualizer_setup(GstAudioVisualizer *gstav) {
- GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(gstav);
- GstGLBaseAudioVisualizerClass *glav_class =
- GST_GL_BASE_AUDIO_VISUALIZER_GET_CLASS(gstav);
-
- // cascade setup to the derived plugin after gl initialization has been
- // completed
- return glav_class->setup(glav);
-}
-
-static gboolean gst_gl_base_audio_visualizer_default_gl_render(
- GstGLBaseAudioVisualizer *glav, GstBuffer *audio, GstVideoFrame *video) {
+static gboolean gst_gl_base_audio_visualizer_default_fill_gl_memory(
+ GstGLBaseAudioVisualizer *glav, GstBuffer *in_audio, GstGLMemory *mem) {
return TRUE;
}
-typedef struct {
- GstGLBaseAudioVisualizer *glav;
- GstBuffer *in_audio;
- GstVideoFrame *out_video;
-} GstGLRenderCallbackParams;
+static void _fill_gl(GstGLContext *context, GstGLBaseAudioVisualizer *glav) {
+
+ // we're inside the gl thread!
-static void
-gst_gl_base_audio_visualizer_gl_thread_render_callback(gpointer params) {
- GstGLRenderCallbackParams *cb_params = (GstGLRenderCallbackParams *)params;
GstGLBaseAudioVisualizerClass *klass =
- GST_GL_BASE_AUDIO_VISUALIZER_GET_CLASS(cb_params->glav);
+ GST_GL_BASE_AUDIO_VISUALIZER_GET_CLASS(glav);
+
+ GstPMAudioVisualizer *pmav = GST_PM_AUDIO_VISUALIZER(glav);
+
+ GstBuffer *out_buf;
+ GstVideoFrame out_video;
+
+ // GstClockTime start = gst_util_get_timestamp();
+
+ // obtain output buffer from the (GL texture backed) pool
+ gst_pm_audio_visualizer_util_prepare_output_buffer(pmav, &out_buf);
+
+ // GstClockTime after_prepare = gst_util_get_timestamp();
+
+ // map output video frame to buffer outbuf with gl flags
+ gst_video_frame_map(&out_video, &pmav->vinfo, out_buf,
+ GST_MAP_WRITE | GST_MAP_GL |
+ GST_VIDEO_FRAME_MAP_FLAG_NO_REF);
+
+ // GstClockTime after_map = gst_util_get_timestamp();
+
+ GstGLMemory *out_tex = GST_GL_MEMORY_CAST(gst_buffer_peek_memory(out_buf, 0));
+
+ GST_TRACE_OBJECT(glav, "filling gl memory %p", out_tex);
+
+ // call virtual render function with audio and video
+ glav->priv->gl_result =
+ klass->fill_gl_memory(glav, glav->priv->in_audio, out_tex);
- // inside gl thread: call virtual render function with audio and video
- cb_params->glav->priv->gl_result = klass->gl_render(
- cb_params->glav, cb_params->in_audio, cb_params->out_video);
+ gst_video_frame_unmap(&out_video);
+
+ // GstClockTime after_render = gst_util_get_timestamp();
+
+ GstGLSyncMeta *sync_meta = gst_buffer_get_gl_sync_meta(out_buf);
+ if (sync_meta)
+ gst_gl_sync_meta_set_sync_point(sync_meta, glav->context);
+
+ glav->priv->out_buf = out_buf;
+ out_buf = NULL;
+
+ /*GstClockTime end = gst_util_get_timestamp();
+
+ GstClockTime duration = end - start;
+
+ if (duration > glav->priv->frame_duration) {
+ GST_WARNING("Render GL frame took too long: %" GST_TIME_FORMAT ", prepare:
+ %" GST_TIME_FORMAT ", map: %" GST_TIME_FORMAT ", render: %" GST_TIME_FORMAT,
+ GST_TIME_ARGS(duration), GST_TIME_ARGS(after_prepare - start),
+ GST_TIME_ARGS(after_map - after_prepare), GST_TIME_ARGS(after_render -
+ after_map));
+ }
+ */
}
-static gboolean gst_gl_base_audio_visualizer_render(GstAudioVisualizer *bscope,
- GstBuffer *audio,
- GstVideoFrame *video) {
- GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(bscope);
- GstGLRenderCallbackParams cb_params;
- GstGLWindow *window;
+static GstFlowReturn gst_gl_base_audio_visualizer_fill(
+ GstPMAudioVisualizer *bscope, GstGLBaseAudioVisualizer *glav,
+ GstBuffer *audio, GstBuffer **video, GstClockTime pts) {
g_rec_mutex_lock(&glav->priv->context_lock);
+ if (G_UNLIKELY(!glav->context))
+ goto not_negotiated;
- // wrap params into cb_params struct to pass them to the GL window/thread via
- // userdata pointer
- cb_params.glav = glav;
- cb_params.in_audio = audio;
- cb_params.out_video = video;
+ /* 0 framerate and we are at the second frame, eos */
+ if (G_UNLIKELY(GST_VIDEO_INFO_FPS_N(&bscope->vinfo) == 0 &&
+ glav->priv->n_frames == 1))
+ goto eos;
- window = gst_gl_context_get_window(glav->context);
+ // the following vars are params for passing values to _fill_gl()
+ // video is mapped to gl memory
- // dispatch render call through the gl thread
- // call is blocking, accessing audio and video params from gl thread *should*
- // be safe
- gst_gl_window_send_message(
- window,
- GST_GL_WINDOW_CB(gst_gl_base_audio_visualizer_gl_thread_render_callback),
- &cb_params);
+ glav->priv->in_audio = audio != NULL ? gst_buffer_ref(audio) : NULL;
- gst_object_unref(window);
+ // make current presentation timestamp accessible before rendering
+ glav->pts = pts;
- g_rec_mutex_unlock(&glav->priv->context_lock);
+ glav->priv->frame_duration = bscope->frame_duration;
- if (glav->priv->gl_result) {
- glav->priv->n_frames++;
- } else {
- // gl error
- GST_ELEMENT_ERROR(glav, RESOURCE, NOT_FOUND,
- (("failed to render audio visualizer")),
- (("A GL error occurred")));
+ // dispatch _fill_gl to the gl thread, blocking call
+ gst_gl_context_thread_add(glav->context, (GstGLContextThreadFunc)_fill_gl,
+ glav);
+
+ // clear param refs, these pointers never owned the data
+ if (glav->priv->in_audio != NULL) {
+ gst_buffer_unref(glav->priv->in_audio);
+ glav->priv->in_audio = NULL;
}
+ *video = glav->priv->out_buf;
+ glav->priv->out_buf = NULL;
- return glav->priv->gl_result;
+ if (!glav->priv->gl_result)
+ goto gl_error;
+
+ glav->priv->n_frames++;
+
+ g_rec_mutex_unlock(&glav->priv->context_lock);
+
+ return GST_FLOW_OK;
+
+gl_error: {
+ g_rec_mutex_unlock(&glav->priv->context_lock);
+ GST_ELEMENT_ERROR(glav, RESOURCE, NOT_FOUND, (("failed to fill gl buffer")),
+ (("A GL error occurred")));
+ return GST_FLOW_ERROR;
+}
+not_negotiated: {
+ g_rec_mutex_unlock(&glav->priv->context_lock);
+ GST_ELEMENT_ERROR(glav, CORE, NEGOTIATION, (NULL),
+ (("format wasn't negotiated before get function")));
+ return GST_FLOW_NOT_NEGOTIATED;
+}
+eos: {
+ g_rec_mutex_unlock(&glav->priv->context_lock);
+ GST_DEBUG_OBJECT(glav, "eos: 0 framerate, frame %d",
+ (gint)glav->priv->n_frames);
+ return GST_FLOW_EOS;
+}
+}
+
+static gboolean
+gst_gl_base_audio_visualizer_parent_setup(GstPMAudioVisualizer *gstav) {
+ GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(gstav);
+ GstGLBaseAudioVisualizerClass *glav_class =
+ GST_GL_BASE_AUDIO_VISUALIZER_GET_CLASS(gstav);
+
+ // cascade setup to the derived plugin after gl initialization has been
+ // completed
+ return glav_class->setup(glav);
+}
+
+static GstFlowReturn
+gst_gl_base_audio_visualizer_parent_render(GstPMAudioVisualizer *bscope,
+ GstBuffer *audio, GstBuffer **video,
+ GstClockTime pts) {
+ GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(bscope);
+
+ return gst_gl_base_audio_visualizer_fill(bscope, glav, audio, video, pts);
}
static void gst_gl_base_audio_visualizer_start(GstGLBaseAudioVisualizer *glav) {
@@ -493,9 +619,8 @@ error: {
}
}
-static gboolean
-gst_gl_base_audio_visualizer_decide_allocation(GstAudioVisualizer *gstav,
- GstQuery *query) {
+static gboolean gst_gl_base_audio_visualizer_parent_decide_allocation(
+ GstPMAudioVisualizer *gstav, GstQuery *query) {
GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(gstav);
GstGLContext *context;
GstBufferPool *pool = NULL;
@@ -524,7 +649,8 @@ gst_gl_base_audio_visualizer_decide_allocation(GstAudioVisualizer *gstav,
gst_video_info_init(&vinfo);
gst_video_info_from_caps(&vinfo, caps);
size = vinfo.size;
- min = max = 0;
+ min = 0;
+ max = 0;
update_pool = FALSE;
}
@@ -536,6 +662,10 @@ gst_gl_base_audio_visualizer_decide_allocation(GstAudioVisualizer *gstav,
}
config = gst_buffer_pool_get_config(pool);
+ // todo: add config properties
+ if (min < 2) {
+ min = 2;
+ }
gst_buffer_pool_config_set_params(config, caps, size, min, max);
gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_META);
if (gst_query_find_allocation_meta(query, GST_GL_SYNC_META_API_TYPE, NULL))
@@ -544,6 +674,9 @@ gst_gl_base_audio_visualizer_decide_allocation(GstAudioVisualizer *gstav,
gst_buffer_pool_config_add_option(
config, GST_BUFFER_POOL_OPTION_VIDEO_GL_TEXTURE_UPLOAD_META);
+ gst_buffer_pool_config_add_option(
+ config, GST_BUFFER_POOL_OPTION_GL_TEXTURE_TARGET_2D);
+
gst_buffer_pool_set_config(pool, config);
if (update_pool)
@@ -568,10 +701,6 @@ gst_gl_base_audio_visualizer_change_state(GstElement *element,
gst_element_state_get_name(GST_STATE_TRANSITION_CURRENT(transition)),
gst_element_state_get_name(GST_STATE_TRANSITION_NEXT(transition)));
- ret = GST_ELEMENT_CLASS(parent_class)->change_state(element, transition);
- if (ret == GST_STATE_CHANGE_FAILURE)
- return ret;
-
switch (transition) {
case GST_STATE_CHANGE_READY_TO_NULL:
g_rec_mutex_lock(&glav->priv->context_lock);
diff --git a/src/gstglbaseaudiovisualizer.h b/src/gstglbaseaudiovisualizer.h
index a48781b..0ccde78 100644
--- a/src/gstglbaseaudiovisualizer.h
+++ b/src/gstglbaseaudiovisualizer.h
@@ -32,9 +32,8 @@
#ifndef __GST_GL_BASE_AUDIO_VISUALIZER_H__
#define __GST_GL_BASE_AUDIO_VISUALIZER_H__
+#include "gstpmaudiovisualizer.h"
#include
-#include
-#include
#include
typedef struct _GstGLBaseAudioVisualizer GstGLBaseAudioVisualizer;
@@ -72,12 +71,15 @@ GType gst_gl_base_audio_visualizer_get_type(void);
* The parent instance type of a base GL Audio Visualizer.
*/
struct _GstGLBaseAudioVisualizer {
- GstAudioVisualizer parent;
+ GstPMAudioVisualizer parent;
/*< public >*/
GstGLDisplay *display;
GstGLContext *context;
+ /* current buffer presentation timestamp */
+ guint64 pts;
+
/*< private >*/
gpointer _padding[GST_PADDING];
@@ -91,21 +93,29 @@ struct _GstGLBaseAudioVisualizer {
* @gl_stop: called in the GL thread to clean up the element GL state.
* @gl_render: called in the GL thread to fill the current video texture.
* @setup: called when the format changes (delegate from
- * GstAudioVisualizer.setup)
+ * GstPMAudioVisualizer.setup)
*
* The base class for OpenGL based audio visualizers.
- *
+ * Extends GstPMAudioVisualizer to add GL rendering callbacks.
+ * Handles GL context and render buffers.
*/
struct _GstGLBaseAudioVisualizerClass {
- GstAudioVisualizerClass parent_class;
+ GstPMAudioVisualizerClass parent_class;
/*< public >*/
GstGLAPI supported_gl_api;
+ /* called from gl thread once the gl context can be used for initializing gl
+ * resources */
gboolean (*gl_start)(GstGLBaseAudioVisualizer *glav);
+ /* called from gl thread when gl context is being closed for gl resource clean
+ * up */
void (*gl_stop)(GstGLBaseAudioVisualizer *glav);
- gboolean (*gl_render)(GstGLBaseAudioVisualizer *glav, GstBuffer *audio,
- GstVideoFrame *video);
+ /* called when caps have been set for the pipeline */
gboolean (*setup)(GstGLBaseAudioVisualizer *glav);
+ /* called to render each frame, in_audio is optional */
+ gboolean (*fill_gl_memory)(GstGLBaseAudioVisualizer *glav,
+ GstBuffer *in_audio, GstGLMemory *mem);
+
/*< private >*/
gpointer _padding[GST_PADDING];
};
diff --git a/src/gstpmaudiovisualizer.c b/src/gstpmaudiovisualizer.c
new file mode 100644
index 0000000..51275db
--- /dev/null
+++ b/src/gstpmaudiovisualizer.c
@@ -0,0 +1,1153 @@
+/* GStreamer
+ * Copyright (C) <2011> Stefan Kost
+ * Copyright (C) <2015> Luis de Bethencourt
+ *
+ * gstaudiovisualizer.h: base class for audio visualisation elements
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+/**
+ * SECTION:gstaudiovisualizer
+ * @title: GstPMAudioVisualizer
+ * @short_description: Base class for visualizers.
+ *
+ * A baseclass for scopes (visualizers). It takes care of re-fitting the
+ * audio-rate to video-rate and handles renegotiation (downstream video size
+ * changes).
+ *
+ * It also provides several background shading effects. These effects are
+ * applied to a previous picture before the `render()` implementation can draw a
+ * new frame.
+ */
+
+/*
+ * The code in this file is based on
+ * GStreamer / gst-plugins-base, latest version as of 2025/05/29.
+ * gst-libs/gst/pbutils/gstaudiovisualizer.c Git Repository:
+ * https://gitlab.freedesktop.org/gstreamer/gstreamer/-/blob/main/subprojects/gst-plugins-base/gst-libs/gst/pbutils/gstaudiovisualizer.c
+ * Original copyright notice has been retained at the top of this file.
+ *
+ * The code has been modified to improve compatibility with projectM and OpenGL.
+ *
+ * - New apis for implementer-provided memory allocation and video frame
+ * buffer mapping. Used by gl plugins for mapping video frames directly to gl
+ * memory.
+ *
+ * - Main memory based video frame buffers have been removed.
+ *
+ * - Cpu based transition shaders have been removed.
+ *
+ * - Bugfix for the amount of bytes being flushed for a single video frame from
+ * the audio input buffer.
+ *
+ * - Bugfix for repeated qos frame drops while real-time rendering.
+ *
+ * - Uses a sample count based approach for pts/dts timestamps instead
+ * GstAdapter derived timestamps.
+ *
+ * - More locking and handle some race conditions.
+ *
+ * Typical plug-in call order for implementer-provided functions:
+ * - decide_allocation (once)
+ * - setup (once)
+ * - prepare_output_buffer (once for each frame)
+ * - map_output_buffer (once for each frame)
+ * - render (once for each frame)
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include
+
+#include
+
+#include "gstpmaudiovisualizer.h"
+#include
+
+GST_DEBUG_CATEGORY_STATIC(pm_audio_visualizer_debug);
+#define GST_CAT_DEFAULT (pm_audio_visualizer_debug)
+
+enum { PROP_0 };
+
+static GstBaseTransformClass *parent_class = NULL;
+static gint private_offset = 0;
+
+static void
+gst_pm_audio_visualizer_class_init(GstPMAudioVisualizerClass *klass);
+static void gst_pm_audio_visualizer_init(GstPMAudioVisualizer *scope,
+ GstPMAudioVisualizerClass *g_class);
+static void gst_pm_audio_visualizer_set_property(GObject *object, guint prop_id,
+ const GValue *value,
+ GParamSpec *pspec);
+static void gst_pm_audio_visualizer_get_property(GObject *object, guint prop_id,
+ GValue *value,
+ GParamSpec *pspec);
+static void gst_pm_audio_visualizer_dispose(GObject *object);
+
+static gboolean
+gst_pm_audio_visualizer_src_negotiate(GstPMAudioVisualizer *scope);
+static gboolean gst_pm_audio_visualizer_src_setcaps(GstPMAudioVisualizer *scope,
+ GstCaps *caps);
+static gboolean
+gst_pm_audio_visualizer_sink_setcaps(GstPMAudioVisualizer *scope,
+ GstCaps *caps);
+
+static GstFlowReturn gst_pm_audio_visualizer_chain(GstPad *pad,
+ GstObject *parent,
+ GstBuffer *buffer);
+
+static gboolean gst_pm_audio_visualizer_src_event(GstPad *pad,
+ GstObject *parent,
+ GstEvent *event);
+static gboolean gst_pm_audio_visualizer_sink_event(GstPad *pad,
+ GstObject *parent,
+ GstEvent *event);
+
+static gboolean gst_pm_audio_visualizer_src_query(GstPad *pad,
+ GstObject *parent,
+ GstQuery *query);
+
+static GstStateChangeReturn
+gst_pm_audio_visualizer_parent_change_state(GstElement *element,
+ GstStateChange transition);
+
+static GstStateChangeReturn
+gst_pm_audio_visualizer_default_change_state(GstElement *element,
+ GstStateChange transition);
+
+static gboolean
+gst_pm_audio_visualizer_do_bufferpool(GstPMAudioVisualizer *scope,
+ GstCaps *outcaps);
+
+static gboolean
+gst_pm_audio_visualizer_default_decide_allocation(GstPMAudioVisualizer *scope,
+ GstQuery *query);
+
+struct _GstPMAudioVisualizerPrivate {
+ gboolean negotiated;
+
+ GstBufferPool *pool;
+ gboolean pool_active;
+ GstAllocator *allocator;
+ GstAllocationParams params;
+ GstQuery *query;
+
+ /* pads */
+ GstPad *srcpad, *sinkpad;
+
+ GstAdapter *adapter;
+
+ GstBuffer *inbuf;
+
+ guint spf; /* samples per video frame */
+
+ /* QoS stuff */ /* with LOCK */
+ gdouble proportion;
+ /* qos: earliest time to render the next frame, the render loop will skip
+ * frames until this time */
+ GstClockTime earliest_time;
+
+ guint dropped; /* frames dropped / not dropped */
+ guint processed;
+
+ /* samples consumed, relative to the current segment. Basis for timestamps. */
+ guint64 samples_consumed;
+
+ /* configuration mutex */
+ GMutex config_lock;
+
+ GstSegment segment;
+
+ /* ready flag and condition triggered once the plugin is ready to process
+ * buffers, triggers every time a caps event is processed */
+ GCond ready_cond;
+ gboolean ready;
+
+ /* have src caps been setup */
+ gboolean src_ready;
+
+ /* have sink caps been setup */
+ gboolean sink_ready;
+
+ /* clock timestamp pts offset, either from first audio buffer pts or segment
+ * event */
+ gboolean offset_initialized;
+ GstClockTime clock_offset_pts;
+};
+
+/* base class */
+
+GType gst_pm_audio_visualizer_get_type(void) {
+ static gsize audio_visualizer_type = 0;
+
+ if (g_once_init_enter(&audio_visualizer_type)) {
+ static const GTypeInfo audio_visualizer_info = {
+ sizeof(GstPMAudioVisualizerClass),
+ NULL,
+ NULL,
+ (GClassInitFunc)gst_pm_audio_visualizer_class_init,
+ NULL,
+ NULL,
+ sizeof(GstPMAudioVisualizer),
+ 0,
+ (GInstanceInitFunc)gst_pm_audio_visualizer_init,
+ };
+ GType _type;
+
+ /* TODO: rename when exporting it as a library */
+ _type =
+ g_type_register_static(GST_TYPE_ELEMENT, "GstPMAudioVisualizer",
+ &audio_visualizer_info, G_TYPE_FLAG_ABSTRACT);
+
+ private_offset =
+ g_type_add_instance_private(_type, sizeof(GstPMAudioVisualizerPrivate));
+
+ g_once_init_leave(&audio_visualizer_type, _type);
+ }
+ return (GType)audio_visualizer_type;
+}
+
+static inline GstPMAudioVisualizerPrivate *
+gst_pm_audio_visualizer_get_instance_private(GstPMAudioVisualizer *self) {
+ return (G_STRUCT_MEMBER_P(self, private_offset));
+}
+
+static void
+gst_pm_audio_visualizer_class_init(GstPMAudioVisualizerClass *klass) {
+ GObjectClass *gobject_class = (GObjectClass *)klass;
+ GstElementClass *element_class = (GstElementClass *)klass;
+
+ if (private_offset != 0)
+ g_type_class_adjust_private_offset(klass, &private_offset);
+
+ parent_class = g_type_class_peek_parent(klass);
+
+ GST_DEBUG_CATEGORY_INIT(pm_audio_visualizer_debug, "pmaudiovisualizer", 0,
+ "projectm audio visualisation base class");
+
+ gobject_class->set_property = gst_pm_audio_visualizer_set_property;
+ gobject_class->get_property = gst_pm_audio_visualizer_get_property;
+ gobject_class->dispose = gst_pm_audio_visualizer_dispose;
+
+ element_class->change_state =
+ GST_DEBUG_FUNCPTR(gst_pm_audio_visualizer_parent_change_state);
+
+ klass->change_state =
+ GST_DEBUG_FUNCPTR(gst_pm_audio_visualizer_default_change_state);
+
+ klass->decide_allocation =
+ GST_DEBUG_FUNCPTR(gst_pm_audio_visualizer_default_decide_allocation);
+}
+
+static void gst_pm_audio_visualizer_init(GstPMAudioVisualizer *scope,
+ GstPMAudioVisualizerClass *g_class) {
+ GstPadTemplate *pad_template;
+
+ scope->priv = gst_pm_audio_visualizer_get_instance_private(scope);
+
+ /* create the sink and src pads */
+ pad_template =
+ gst_element_class_get_pad_template(GST_ELEMENT_CLASS(g_class), "sink");
+ g_return_if_fail(pad_template != NULL);
+ scope->priv->sinkpad = gst_pad_new_from_template(pad_template, "sink");
+ gst_pad_set_chain_function(scope->priv->sinkpad,
+ GST_DEBUG_FUNCPTR(gst_pm_audio_visualizer_chain));
+ gst_pad_set_event_function(
+ scope->priv->sinkpad,
+ GST_DEBUG_FUNCPTR(gst_pm_audio_visualizer_sink_event));
+ gst_element_add_pad(GST_ELEMENT(scope), scope->priv->sinkpad);
+
+ pad_template =
+ gst_element_class_get_pad_template(GST_ELEMENT_CLASS(g_class), "src");
+ g_return_if_fail(pad_template != NULL);
+ scope->priv->srcpad = gst_pad_new_from_template(pad_template, "src");
+ gst_pad_set_event_function(
+ scope->priv->srcpad,
+ GST_DEBUG_FUNCPTR(gst_pm_audio_visualizer_src_event));
+ gst_pad_set_query_function(
+ scope->priv->srcpad,
+ GST_DEBUG_FUNCPTR(gst_pm_audio_visualizer_src_query));
+ gst_element_add_pad(GST_ELEMENT(scope), scope->priv->srcpad);
+ scope->priv->adapter = gst_adapter_new();
+ scope->priv->inbuf = gst_buffer_new();
+ g_cond_init(&scope->priv->ready_cond);
+
+ scope->priv->dropped = 0;
+ scope->priv->earliest_time = 0;
+ scope->priv->processed = 0;
+ scope->priv->samples_consumed = 0;
+ scope->priv->src_ready = FALSE;
+ scope->priv->sink_ready = FALSE;
+ scope->priv->ready = FALSE;
+ scope->priv->offset_initialized = FALSE;
+ scope->priv->clock_offset_pts = GST_CLOCK_TIME_NONE;
+
+ /* properties */
+
+ /* reset the initial video state */
+ gst_video_info_init(&scope->vinfo);
+ scope->frame_duration = GST_CLOCK_TIME_NONE;
+
+ /* reset the initial state */
+ gst_audio_info_init(&scope->ainfo);
+ gst_video_info_init(&scope->vinfo);
+
+ g_mutex_init(&scope->priv->config_lock);
+}
+
+static void gst_pm_audio_visualizer_set_property(GObject *object, guint prop_id,
+ const GValue *value,
+ GParamSpec *pspec) {
+ GstPMAudioVisualizer *scope = GST_PM_AUDIO_VISUALIZER(object);
+
+ switch (prop_id) {
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
+ break;
+ }
+}
+
+static void gst_pm_audio_visualizer_get_property(GObject *object, guint prop_id,
+ GValue *value,
+ GParamSpec *pspec) {
+ GstPMAudioVisualizer *scope = GST_PM_AUDIO_VISUALIZER(object);
+
+ switch (prop_id) {
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
+ break;
+ }
+}
+
+static void gst_pm_audio_visualizer_dispose(GObject *object) {
+ GstPMAudioVisualizer *scope = GST_PM_AUDIO_VISUALIZER(object);
+
+ if (scope->priv->adapter) {
+ g_object_unref(scope->priv->adapter);
+ scope->priv->adapter = NULL;
+ }
+ if (scope->priv->inbuf) {
+ gst_buffer_unref(scope->priv->inbuf);
+ scope->priv->inbuf = NULL;
+ }
+ if (scope->priv->config_lock.p) {
+ g_mutex_clear(&scope->priv->config_lock);
+ scope->priv->config_lock.p = NULL;
+ }
+ if (scope->priv->ready_cond.p) {
+ g_cond_clear(&scope->priv->ready_cond);
+ scope->priv->ready_cond.p = NULL;
+ }
+
+ G_OBJECT_CLASS(parent_class)->dispose(object);
+}
+
+static void
+gst_pm_audio_visualizer_reset_unlocked(GstPMAudioVisualizer *scope) {
+
+ GST_OBJECT_LOCK(scope);
+ gst_adapter_clear(scope->priv->adapter);
+ gst_segment_init(&scope->priv->segment, GST_FORMAT_UNDEFINED);
+
+ scope->priv->proportion = 1.0;
+ scope->priv->earliest_time = 0;
+ scope->priv->dropped = 0;
+ scope->priv->processed = 0;
+ scope->priv->samples_consumed = 0;
+ GST_OBJECT_UNLOCK(scope);
+}
+
+/* */
+static gboolean gst_pm_audio_visualizer_do_setup(GstPMAudioVisualizer *scope) {
+
+ GstPMAudioVisualizerClass *klass =
+ GST_PM_AUDIO_VISUALIZER_CLASS(G_OBJECT_GET_CLASS(scope));
+
+ GST_OBJECT_LOCK(scope);
+ scope->priv->earliest_time = 0;
+ GST_OBJECT_UNLOCK(scope);
+
+ g_mutex_lock(&scope->priv->config_lock);
+
+ scope->priv->spf = gst_util_uint64_scale_int(
+ GST_AUDIO_INFO_RATE(&scope->ainfo), GST_VIDEO_INFO_FPS_D(&scope->vinfo),
+ GST_VIDEO_INFO_FPS_N(&scope->vinfo));
+ scope->req_spf = scope->priv->spf;
+
+ g_mutex_unlock(&scope->priv->config_lock);
+
+ if (klass->setup && !klass->setup(scope))
+ return FALSE;
+
+ GST_INFO_OBJECT(
+ scope, "video: dimension %dx%d, framerate %d/%d",
+ GST_VIDEO_INFO_WIDTH(&scope->vinfo), GST_VIDEO_INFO_HEIGHT(&scope->vinfo),
+ GST_VIDEO_INFO_FPS_N(&scope->vinfo), GST_VIDEO_INFO_FPS_D(&scope->vinfo));
+
+ GST_INFO_OBJECT(scope, "audio: rate %d, channels: %d, bpf: %d",
+ GST_AUDIO_INFO_RATE(&scope->ainfo),
+ GST_AUDIO_INFO_CHANNELS(&scope->ainfo),
+ GST_AUDIO_INFO_BPF(&scope->ainfo));
+
+ GST_INFO_OBJECT(scope, "blocks: spf %u, req_spf %u", scope->priv->spf,
+ scope->req_spf);
+
+ g_mutex_lock(&scope->priv->config_lock);
+ scope->priv->ready = TRUE;
+ g_cond_broadcast(&scope->priv->ready_cond);
+ g_mutex_unlock(&scope->priv->config_lock);
+
+ return TRUE;
+}
+
+static gboolean
+gst_pm_audio_visualizer_sink_setcaps(GstPMAudioVisualizer *scope,
+ GstCaps *caps) {
+ GstAudioInfo info;
+
+ if (!gst_audio_info_from_caps(&info, caps))
+ goto wrong_caps;
+
+ g_mutex_lock(&scope->priv->config_lock);
+ scope->ainfo = info;
+ g_mutex_unlock(&scope->priv->config_lock);
+
+ GST_DEBUG_OBJECT(scope, "audio: channels %d, rate %d",
+ GST_AUDIO_INFO_CHANNELS(&info), GST_AUDIO_INFO_RATE(&info));
+
+ if (!gst_pm_audio_visualizer_src_negotiate(scope)) {
+ goto not_negotiated;
+ }
+
+ g_mutex_lock(&scope->priv->config_lock);
+ scope->priv->sink_ready = TRUE;
+ g_mutex_unlock(&scope->priv->config_lock);
+
+ if (scope->priv->src_ready) {
+ gst_pm_audio_visualizer_do_setup(scope);
+ }
+
+ return TRUE;
+
+ /* Errors */
+wrong_caps: {
+ GST_WARNING_OBJECT(scope, "could not parse caps");
+ return FALSE;
+}
+not_negotiated: {
+ GST_WARNING_OBJECT(scope, "failed to negotiate");
+ return FALSE;
+}
+}
+
+static gboolean gst_pm_audio_visualizer_src_setcaps(GstPMAudioVisualizer *scope,
+ GstCaps *caps) {
+ GstVideoInfo info;
+ gboolean res;
+
+ if (!gst_video_info_from_caps(&info, caps))
+ goto wrong_caps;
+
+ g_mutex_lock(&scope->priv->config_lock);
+
+ scope->vinfo = info;
+
+ scope->frame_duration = gst_util_uint64_scale_int(
+ GST_SECOND, GST_VIDEO_INFO_FPS_D(&info), GST_VIDEO_INFO_FPS_N(&info));
+
+ g_mutex_unlock(&scope->priv->config_lock);
+
+ gst_pad_set_caps(scope->priv->srcpad, caps);
+
+ /* find a pool for the negotiated caps now */
+ res = gst_pm_audio_visualizer_do_bufferpool(scope, caps);
+ gst_caps_unref(caps);
+
+ g_mutex_lock(&scope->priv->config_lock);
+ scope->priv->src_ready = TRUE;
+ g_mutex_unlock(&scope->priv->config_lock);
+ if (scope->priv->sink_ready) {
+ if (!gst_pm_audio_visualizer_do_setup(scope)) {
+ goto setup_failed;
+ }
+ }
+
+ return res;
+
+ /* ERRORS */
+wrong_caps: {
+ gst_caps_unref(caps);
+ GST_DEBUG_OBJECT(scope, "error parsing caps");
+ return FALSE;
+}
+
+setup_failed: {
+ GST_WARNING_OBJECT(scope, "failed to set up");
+ return FALSE;
+}
+}
+
+static gboolean
+gst_pm_audio_visualizer_src_negotiate(GstPMAudioVisualizer *scope) {
+ GstCaps *othercaps, *target;
+ GstStructure *structure;
+ GstCaps *templ;
+ gboolean ret;
+
+ templ = gst_pad_get_pad_template_caps(scope->priv->srcpad);
+
+ GST_DEBUG_OBJECT(scope, "performing negotiation");
+
+ /* see what the peer can do */
+ othercaps = gst_pad_peer_query_caps(scope->priv->srcpad, NULL);
+ if (othercaps) {
+ target = gst_caps_intersect(othercaps, templ);
+ gst_caps_unref(othercaps);
+ gst_caps_unref(templ);
+
+ if (gst_caps_is_empty(target))
+ goto no_format;
+
+ target = gst_caps_truncate(target);
+ } else {
+ target = templ;
+ }
+
+ target = gst_caps_make_writable(target);
+ structure = gst_caps_get_structure(target, 0);
+ gst_structure_fixate_field_nearest_int(structure, "width", 320);
+ gst_structure_fixate_field_nearest_int(structure, "height", 200);
+ gst_structure_fixate_field_nearest_fraction(structure, "framerate", 25, 1);
+ if (gst_structure_has_field(structure, "pixel-aspect-ratio"))
+ gst_structure_fixate_field_nearest_fraction(structure, "pixel-aspect-ratio",
+ 1, 1);
+
+ target = gst_caps_fixate(target);
+
+ GST_DEBUG_OBJECT(scope, "final caps are %" GST_PTR_FORMAT, target);
+
+ ret = gst_pm_audio_visualizer_src_setcaps(scope, target);
+
+ return ret;
+
+no_format: {
+ gst_caps_unref(target);
+ return FALSE;
+}
+}
+
+/* takes ownership of the pool, allocator and query */
+static gboolean gst_pm_audio_visualizer_set_allocation(
+ GstPMAudioVisualizer *scope, GstBufferPool *pool, GstAllocator *allocator,
+ const GstAllocationParams *params, GstQuery *query) {
+ GstAllocator *oldalloc;
+ GstBufferPool *oldpool;
+ GstQuery *oldquery;
+ GstPMAudioVisualizerPrivate *priv = scope->priv;
+
+ GST_OBJECT_LOCK(scope);
+ oldpool = priv->pool;
+ priv->pool = pool;
+ priv->pool_active = FALSE;
+
+ oldalloc = priv->allocator;
+ priv->allocator = allocator;
+
+ oldquery = priv->query;
+ priv->query = query;
+
+ if (params)
+ priv->params = *params;
+ else
+ gst_allocation_params_init(&priv->params);
+ GST_OBJECT_UNLOCK(scope);
+
+ if (oldpool) {
+ GST_DEBUG_OBJECT(scope, "deactivating old pool %p", oldpool);
+ gst_buffer_pool_set_active(oldpool, FALSE);
+ gst_object_unref(oldpool);
+ }
+ if (oldalloc) {
+ gst_object_unref(oldalloc);
+ }
+ if (oldquery) {
+ gst_query_unref(oldquery);
+ }
+ return TRUE;
+}
+
+static gboolean
+gst_pm_audio_visualizer_do_bufferpool(GstPMAudioVisualizer *scope,
+ GstCaps *outcaps) {
+ GstQuery *query;
+ gboolean result = TRUE;
+ GstBufferPool *pool = NULL;
+ GstPMAudioVisualizerClass *klass;
+ GstAllocator *allocator;
+ GstAllocationParams params;
+
+ /* not passthrough, we need to allocate */
+ /* find a pool for the negotiated caps now */
+ GST_DEBUG_OBJECT(scope, "doing allocation query");
+ query = gst_query_new_allocation(outcaps, TRUE);
+
+ if (!gst_pad_peer_query(scope->priv->srcpad, query)) {
+ /* not a problem, we use the query defaults */
+ GST_DEBUG_OBJECT(scope, "allocation query failed");
+ }
+
+ klass = GST_PM_AUDIO_VISUALIZER_GET_CLASS(scope);
+
+ GST_DEBUG_OBJECT(scope, "calling decide_allocation");
+ g_assert(klass->decide_allocation != NULL);
+ result = klass->decide_allocation(scope, query);
+
+ GST_DEBUG_OBJECT(scope, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, result,
+ query);
+
+ if (!result)
+ goto no_decide_allocation;
+
+ /* we got configuration from our peer or the decide_allocation method,
+ * parse them */
+ if (gst_query_get_n_allocation_params(query) > 0) {
+ gst_query_parse_nth_allocation_param(query, 0, &allocator, ¶ms);
+ } else {
+ allocator = NULL;
+ gst_allocation_params_init(¶ms);
+ }
+
+ if (gst_query_get_n_allocation_pools(query) > 0)
+ gst_query_parse_nth_allocation_pool(query, 0, &pool, NULL, NULL, NULL);
+
+ /* now store */
+ result = gst_pm_audio_visualizer_set_allocation(scope, pool, allocator,
+ ¶ms, query);
+
+ return result;
+
+ /* Errors */
+no_decide_allocation: {
+ GST_WARNING_OBJECT(scope, "Subclass failed to decide allocation");
+ gst_query_unref(query);
+
+ return result;
+}
+}
+
+static gboolean
+gst_pm_audio_visualizer_default_decide_allocation(GstPMAudioVisualizer *scope,
+ GstQuery *query) {
+ /* removed main memory pool implementation. This vmethod is overridden for
+ * using gl memory by gstglbaseaudiovisualizer. */
+ g_error("vmethod gst_pm_audio_visualizer_default_decide_allocation is not "
+ "implemented");
+}
+
+GstFlowReturn
+gst_pm_audio_visualizer_util_prepare_output_buffer(GstPMAudioVisualizer *scope,
+ GstBuffer **outbuf) {
+ GstPMAudioVisualizerPrivate *priv;
+
+ priv = scope->priv;
+
+ g_assert(priv->pool != NULL);
+
+ /* we can't reuse the input buffer */
+ if (!priv->pool_active) {
+ GST_DEBUG_OBJECT(scope, "setting pool %p active", priv->pool);
+ if (!gst_buffer_pool_set_active(priv->pool, TRUE))
+ goto activate_failed;
+ priv->pool_active = TRUE;
+ }
+ GST_DEBUG_OBJECT(scope, "using pool alloc");
+
+ return gst_buffer_pool_acquire_buffer(priv->pool, outbuf, NULL);
+
+ /* ERRORS */
+activate_failed: {
+ GST_ELEMENT_ERROR(scope, RESOURCE, SETTINGS,
+ ("failed to activate bufferpool"),
+ ("failed to activate bufferpool"));
+ return GST_FLOW_ERROR;
+}
+}
+
+static GstFlowReturn gst_pm_audio_visualizer_chain(GstPad *pad,
+ GstObject *parent,
+ GstBuffer *buffer) {
+ GstFlowReturn ret = GST_FLOW_OK;
+ GstPMAudioVisualizer *scope = GST_PM_AUDIO_VISUALIZER(parent);
+ GstPMAudioVisualizerClass *klass;
+ guint64 ts;
+ guint avail, sbpf;
+ // databuf is a buffer holding to one video frame worth of audio data used as
+ // temp buffer for copying from the adapter only
+ // inbuf is a plugin-scoped buffer holding a copy of the one video frame worth
+ // of audio data from the adapter to process
+ GstBuffer *databuf, *inbuf;
+ gint bpf;
+
+ klass = GST_PM_AUDIO_VISUALIZER_CLASS(G_OBJECT_GET_CLASS(scope));
+
+ // ensure caps have been setup for sink and src pads, and plugin init code is
+ // done
+ g_mutex_lock(&scope->priv->config_lock);
+ while (!scope->priv->ready) {
+ g_cond_wait(&scope->priv->ready_cond, &scope->priv->config_lock);
+ }
+ g_mutex_unlock(&scope->priv->config_lock);
+
+ /*
+ if (!scope->priv->first_buffer) {
+ GstPoll *poll;
+ poll = gst_poll_new_timer();
+
+ // Wait for 1 second
+ gst_poll_wait(poll, 1 * GST_SECOND);
+
+ gst_poll_free(poll);
+ scope->priv->first_buffer = TRUE;
+ }*/
+
+ if (buffer == NULL) {
+ return GST_FLOW_OK;
+ }
+
+ /* remember pts timestamp of the first audio buffer as stream clock offset
+ * timestamp */
+ g_mutex_lock(&scope->priv->config_lock);
+ if (!scope->priv->offset_initialized) {
+ scope->priv->offset_initialized = TRUE;
+ scope->priv->clock_offset_pts = GST_BUFFER_PTS(buffer);
+
+ GstClock *clock = gst_element_get_clock(GST_ELEMENT(scope));
+ GstClockTime running_time = gst_clock_get_time(clock) -
+ gst_element_get_base_time(GST_ELEMENT(scope));
+
+ GST_DEBUG_OBJECT(scope,
+ "Buffer ts: %" GST_TIME_FORMAT
+ ", running_time: %" GST_TIME_FORMAT,
+ GST_TIME_ARGS(scope->priv->clock_offset_pts),
+ GST_TIME_ARGS(running_time));
+ }
+ g_mutex_unlock(&scope->priv->config_lock);
+
+ /* resync on DISCONT */
+ if (GST_BUFFER_FLAG_IS_SET(buffer, GST_BUFFER_FLAG_DISCONT)) {
+ gst_adapter_clear(scope->priv->adapter);
+ }
+
+ /* Make sure have an output format */
+ if (gst_pad_check_reconfigure(scope->priv->srcpad)) {
+ if (!gst_pm_audio_visualizer_src_negotiate(scope)) {
+ gst_pad_mark_reconfigure(scope->priv->srcpad);
+ goto not_negotiated;
+ }
+ }
+
+ bpf = GST_AUDIO_INFO_BPF(&scope->ainfo);
+
+ if (bpf == 0) {
+ ret = GST_FLOW_NOT_NEGOTIATED;
+ goto beach;
+ }
+
+ GST_TRACE_OBJECT(scope, "Chain func pushing %lu bytes to adapter",
+ gst_buffer_get_size(buffer));
+
+ gst_adapter_push(scope->priv->adapter, buffer);
+
+ g_mutex_lock(&scope->priv->config_lock);
+
+ /* this is what we want */
+ /* number of audio bytes to process for one video frame */
+ /* samples per video frame * audio bytes per frame for both channels */
+ sbpf = scope->req_spf * bpf;
+
+ inbuf = scope->priv->inbuf;
+ /* original code FIXME: the timestamp in the adapter would be different - this
+ * should be fixed now by deriving timestamps from the number of samples
+ * consumed. */
+ gst_buffer_copy_into(inbuf, buffer, GST_BUFFER_COPY_METADATA, 0, -1);
+
+ /* this is what we have */
+ avail = gst_adapter_available(scope->priv->adapter);
+ // GST_LOG_OBJECT(scope, "avail: %u, bpf: %u", avail, sbpf);
+ while (avail >= sbpf) {
+ GstBuffer *outbuf;
+
+ GstClockTime start = gst_util_get_timestamp();
+
+ /* calculate timestamp based on audio input samples already processed to
+ * avoid clock drift */
+ ts = scope->priv->clock_offset_pts +
+ gst_util_uint64_scale_int(scope->priv->samples_consumed, GST_SECOND,
+ GST_AUDIO_INFO_RATE(&scope->ainfo));
+
+ scope->priv->samples_consumed += scope->req_spf;
+
+ /* check for QoS, don't compute buffers that are known to be late */
+ if (GST_CLOCK_TIME_IS_VALID(ts)) {
+ GstClockTime earliest_time;
+ gdouble proportion;
+ guint64 qostime;
+
+ qostime = gst_segment_to_running_time(&scope->priv->segment,
+ GST_FORMAT_TIME, ts) +
+ scope->frame_duration;
+
+ earliest_time = scope->priv->earliest_time;
+ proportion = scope->priv->proportion;
+
+ if (scope->priv->segment.format != GST_FORMAT_TIME) {
+ GST_WARNING_OBJECT(scope,
+ "Segment format not TIME, skipping QoS checks");
+ } else if (GST_CLOCK_TIME_IS_VALID(earliest_time) &&
+ qostime <= earliest_time) {
+ GstClockTime stream_time, jitter;
+ GstMessage *qos_msg;
+
+ GST_DEBUG_OBJECT(scope,
+ "QoS: skip ts: %" GST_TIME_FORMAT
+ ", earliest: %" GST_TIME_FORMAT,
+ GST_TIME_ARGS(qostime), GST_TIME_ARGS(earliest_time));
+
+ ++scope->priv->dropped;
+ stream_time = gst_segment_to_stream_time(&scope->priv->segment,
+ GST_FORMAT_TIME, ts);
+ jitter = GST_CLOCK_DIFF(qostime, earliest_time);
+ qos_msg =
+ gst_message_new_qos(GST_OBJECT(scope), FALSE, qostime, stream_time,
+ ts, GST_BUFFER_DURATION(buffer));
+ gst_message_set_qos_values(qos_msg, jitter, proportion, 1000000);
+ gst_message_set_qos_stats(qos_msg, GST_FORMAT_BUFFERS,
+ scope->priv->processed, scope->priv->dropped);
+ gst_element_post_message(GST_ELEMENT(scope), qos_msg);
+
+ goto skip;
+ }
+ }
+
+ /* map ts via segment for general use */
+ ts = gst_segment_to_stream_time(&scope->priv->segment, GST_FORMAT_TIME, ts);
+
+ ++scope->priv->processed;
+
+ /* recheck as the value could have changed */
+ sbpf = scope->req_spf * bpf;
+
+ /* no buffer allocated, we don't care why. */
+ if (ret != GST_FLOW_OK)
+ break;
+
+ /* sync controlled properties */
+ if (GST_CLOCK_TIME_IS_VALID(ts))
+ gst_object_sync_values(GST_OBJECT(scope), ts);
+
+ /* this can fail as the data size we need could have changed */
+ if (!(databuf = gst_adapter_get_buffer(scope->priv->adapter, sbpf)))
+ break;
+
+ /* place sbpf number of bytes of audio data into inbuf */
+ gst_buffer_remove_all_memory(inbuf);
+ gst_buffer_copy_into(inbuf, databuf, GST_BUFFER_COPY_MEMORY, 0, sbpf);
+ gst_buffer_unref(databuf);
+
+ /* call class->render() vmethod */
+ g_mutex_unlock(&scope->priv->config_lock);
+
+ GstClockTime before_render = gst_util_get_timestamp();
+
+ if (klass->render) {
+ ret = klass->render(scope, inbuf, &outbuf, ts);
+ if (ret != GST_FLOW_OK) {
+ goto beach;
+ }
+ }
+
+ GstClockTime after_render = gst_util_get_timestamp();
+
+ if (gst_buffer_get_size(outbuf) == 0) {
+ GST_WARNING_OBJECT(scope, "Empty or invalid buffer, dropping.");
+ return GST_FLOW_OK;
+ }
+
+ /* populate timestamps after rendering so they can't be changed by accident
+ */
+ GST_TRACE_OBJECT(scope, "Pushing buffer to src with %d bytes avail", avail);
+ GST_BUFFER_PTS(outbuf) = ts;
+ GST_BUFFER_DTS(outbuf) = ts;
+ GST_BUFFER_DURATION(outbuf) = scope->frame_duration;
+
+ GstClockTime end = gst_util_get_timestamp();
+ GstClockTime duration = end - start;
+
+ if (duration > scope->frame_duration) {
+ GST_WARNING("Generating frame from audio took too long: %" GST_TIME_FORMAT
+ ", before_render: %" GST_TIME_FORMAT
+ ", render: %" GST_TIME_FORMAT,
+ GST_TIME_ARGS(duration), GST_TIME_ARGS(before_render - start),
+ GST_TIME_ARGS(after_render - before_render));
+ }
+
+ ret = gst_pad_push(scope->priv->srcpad, outbuf);
+ if (ret != GST_FLOW_OK) {
+ GST_WARNING_OBJECT(scope, "Failed to push buffer to pad");
+ }
+ outbuf = NULL;
+ g_mutex_lock(&scope->priv->config_lock);
+
+ skip:
+ /* recheck as the value could have changed */
+ sbpf = scope->req_spf * bpf;
+ // GST_LOG_OBJECT(scope, "avail: %u, bpf: %u", avail, sbpf);
+ /* we want to take less or more, depending on spf : req_spf */
+ if (avail - sbpf >= sbpf) {
+ // enough audio data for more frames is available
+ gst_adapter_unmap(scope->priv->adapter);
+ gst_adapter_flush(scope->priv->adapter, sbpf);
+ } else if (avail >= sbpf) {
+ // was just enough audio data for one frame
+ /* just flush a bit and stop */
+ // rendering. seems like a bug in the original code
+ // gst_adapter_flush(scope->priv->adapter, (avail - sbpf));
+
+ // instead just flush one video frame worth of audio data from the buffer
+ // and stop
+ gst_adapter_unmap(scope->priv->adapter);
+ gst_adapter_flush(scope->priv->adapter, sbpf);
+ break;
+ }
+ avail = gst_adapter_available(scope->priv->adapter);
+
+ if (ret != GST_FLOW_OK)
+ break;
+ }
+
+ g_mutex_unlock(&scope->priv->config_lock);
+
+beach:
+ return ret;
+
+ /* ERRORS */
+not_negotiated: {
+ GST_DEBUG_OBJECT(scope, "Failed to renegotiate");
+ return GST_FLOW_NOT_NEGOTIATED;
+}
+}
+
+static gboolean gst_pm_audio_visualizer_src_event(GstPad *pad,
+ GstObject *parent,
+ GstEvent *event) {
+ gboolean res;
+ GstPMAudioVisualizer *scope;
+
+ scope = GST_PM_AUDIO_VISUALIZER(parent);
+
+ switch (GST_EVENT_TYPE(event)) {
+ case GST_EVENT_QOS: {
+ gdouble proportion;
+ GstClockTimeDiff diff;
+ GstClockTime timestamp;
+
+ gst_event_parse_qos(event, NULL, &proportion, &diff, ×tamp);
+
+ /* save stuff for the _chain() function */
+ g_mutex_lock(&scope->priv->config_lock);
+ scope->priv->proportion = proportion;
+ if (diff > 0)
+ /* we're late, this is a good estimate for next displayable
+ * frame (see part-qos.txt) */
+ scope->priv->earliest_time =
+ timestamp + MIN(3 * diff, GST_SECOND) + scope->frame_duration;
+ else
+ scope->priv->earliest_time = timestamp + diff;
+ g_mutex_unlock(&scope->priv->config_lock);
+
+ res = gst_pad_push_event(scope->priv->sinkpad, event);
+ break;
+ }
+ case GST_EVENT_RECONFIGURE:
+ /* don't forward */
+ gst_event_unref(event);
+ res = TRUE;
+ break;
+ default:
+ res = gst_pad_event_default(pad, parent, event);
+ break;
+ }
+
+ return res;
+}
+
+static gboolean gst_pm_audio_visualizer_sink_event(GstPad *pad,
+ GstObject *parent,
+ GstEvent *event) {
+ gboolean res;
+ GstPMAudioVisualizer *scope;
+
+ scope = GST_PM_AUDIO_VISUALIZER(parent);
+
+ switch (GST_EVENT_TYPE(event)) {
+ case GST_EVENT_CAPS: {
+ GstCaps *caps;
+
+ gst_event_parse_caps(event, &caps);
+ res = gst_pm_audio_visualizer_sink_setcaps(scope, caps);
+ gst_event_unref(event);
+ break;
+ }
+ case GST_EVENT_FLUSH_STOP:
+ g_mutex_lock(&scope->priv->config_lock);
+ gst_pm_audio_visualizer_reset_unlocked(scope);
+ g_mutex_unlock(&scope->priv->config_lock);
+ res = gst_pad_push_event(scope->priv->srcpad, event);
+ break;
+ case GST_EVENT_SEGMENT: {
+ /* the newsegment values are used to clip the input samples
+ * and to convert the incoming timestamps to running time so
+ * we can do QoS */
+ g_mutex_lock(&scope->priv->config_lock);
+ gst_event_copy_segment(event, &scope->priv->segment);
+ if (scope->priv->segment.format != GST_FORMAT_TIME) {
+ GST_WARNING_OBJECT(scope, "Unexpected segment format: %d",
+ scope->priv->segment.format);
+ }
+ scope->priv->clock_offset_pts =
+ scope->priv->segment.start; // or segment.position if it's a live seek
+ scope->priv->offset_initialized = TRUE;
+ scope->priv->samples_consumed = 0;
+ g_mutex_unlock(&scope->priv->config_lock);
+
+ res = gst_pad_push_event(scope->priv->srcpad, event);
+ GST_DEBUG_OBJECT(
+ scope, "Segment start: %" GST_TIME_FORMAT ", stop: %" GST_TIME_FORMAT,
+ GST_TIME_ARGS(scope->priv->segment.start),
+ GST_TIME_ARGS(scope->priv->segment.stop));
+ break;
+ }
+ default:
+ res = gst_pad_event_default(pad, parent, event);
+ break;
+ }
+
+ return res;
+}
+
+static gboolean gst_pm_audio_visualizer_src_query(GstPad *pad,
+ GstObject *parent,
+ GstQuery *query) {
+ gboolean res = FALSE;
+ GstPMAudioVisualizer *scope;
+
+ scope = GST_PM_AUDIO_VISUALIZER(parent);
+
+ switch (GST_QUERY_TYPE(query)) {
+ case GST_QUERY_LATENCY: {
+ /* We need to send the query upstream and add the returned latency to our
+ * own */
+ GstClockTime min_latency, max_latency;
+ gboolean us_live;
+ GstClockTime our_latency;
+ guint max_samples;
+ gint rate = GST_AUDIO_INFO_RATE(&scope->ainfo);
+
+ if (rate == 0)
+ break;
+
+ if ((res = gst_pad_peer_query(scope->priv->sinkpad, query))) {
+ gst_query_parse_latency(query, &us_live, &min_latency, &max_latency);
+
+ GST_DEBUG_OBJECT(
+ scope, "Peer latency: min %" GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
+ GST_TIME_ARGS(min_latency), GST_TIME_ARGS(max_latency));
+
+ /* the max samples we must buffer buffer */
+ max_samples = MAX(scope->req_spf, scope->priv->spf);
+ our_latency = gst_util_uint64_scale_int(max_samples, GST_SECOND, rate);
+
+ GST_DEBUG_OBJECT(scope, "Our latency: %" GST_TIME_FORMAT,
+ GST_TIME_ARGS(our_latency));
+
+ /* we add some latency but only if we need to buffer more than what
+ * upstream gives us */
+ min_latency += our_latency;
+ if (max_latency != -1)
+ max_latency += our_latency;
+
+ GST_DEBUG_OBJECT(scope,
+ "Calculated total latency : min %" GST_TIME_FORMAT
+ " max %" GST_TIME_FORMAT,
+ GST_TIME_ARGS(min_latency), GST_TIME_ARGS(max_latency));
+
+ gst_query_set_latency(query, TRUE, min_latency, max_latency);
+ }
+ break;
+ }
+ default:
+ res = gst_pad_query_default(pad, parent, query);
+ break;
+ }
+
+ return res;
+}
+
+static GstStateChangeReturn
+gst_pm_audio_visualizer_parent_change_state(GstElement *element,
+ GstStateChange transition) {
+ GstStateChangeReturn ret;
+ GstPMAudioVisualizer *scope;
+
+ scope = GST_PM_AUDIO_VISUALIZER(element);
+
+ switch (transition) {
+ case GST_STATE_CHANGE_READY_TO_PAUSED:
+ g_mutex_lock(&scope->priv->config_lock);
+ gst_pm_audio_visualizer_reset_unlocked(scope);
+ g_mutex_unlock(&scope->priv->config_lock);
+ break;
+ default:
+ break;
+ }
+
+ ret = GST_ELEMENT_CLASS(parent_class)->change_state(element, transition);
+ if (ret == GST_STATE_CHANGE_FAILURE)
+ return ret;
+
+ switch (transition) {
+ case GST_STATE_CHANGE_PAUSED_TO_READY:
+ gst_pm_audio_visualizer_set_allocation(scope, NULL, NULL, NULL, NULL);
+ break;
+ case GST_STATE_CHANGE_READY_TO_NULL:
+ break;
+ default:
+ break;
+ }
+
+ GstPMAudioVisualizerClass *klass =
+ GST_PM_AUDIO_VISUALIZER_CLASS(G_OBJECT_GET_CLASS(scope));
+ ret = klass->change_state(element, transition);
+
+ return ret;
+}
+
+static GstStateChangeReturn
+gst_pm_audio_visualizer_default_change_state(GstElement *element,
+ GstStateChange transition) {
+ return GST_STATE_CHANGE_SUCCESS;
+}
\ No newline at end of file
diff --git a/src/gstpmaudiovisualizer.h b/src/gstpmaudiovisualizer.h
new file mode 100644
index 0000000..c7c4b61
--- /dev/null
+++ b/src/gstpmaudiovisualizer.h
@@ -0,0 +1,121 @@
+/* GStreamer
+ * Copyright (C) <2011> Stefan Kost
+ * Copyright (C) <2015> Luis de Bethencourt
+ *
+ * gstaudiovisualizer.c: base class for audio visualisation elements
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+/*
+ * The code in this file is based on
+ * GStreamer / gst-plugins-base, latest version as of 2025/05/29.
+ * gst-libs/gst/pbutils/gstaudiovisualizer.h Git Repository:
+ * https://gitlab.freedesktop.org/gstreamer/gstreamer/-/blob/main/subprojects/gst-plugins-base/gst-libs/gst/pbutils/gstaudiovisualizer.h
+ *
+ * Original copyright notice has been retained at the top of this file.
+ * The code has been modified to improve compatibility with projectM and OpenGL.
+ * See impl for details.
+ */
+
+#ifndef __GST_PM_AUDIO_VISUALIZER_H__
+#define __GST_PM_AUDIO_VISUALIZER_H__
+
+#include
+
+#include
+#include
+
+G_BEGIN_DECLS
+#define GST_TYPE_PM_AUDIO_VISUALIZER (gst_pm_audio_visualizer_get_type())
+#define GST_PM_AUDIO_VISUALIZER(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_PM_AUDIO_VISUALIZER, \
+ GstPMAudioVisualizer))
+#define GST_PM_AUDIO_VISUALIZER_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_PM_AUDIO_VISUALIZER, \
+ GstPMAudioVisualizerClass))
+#define GST_PM_AUDIO_VISUALIZER_GET_CLASS(obj) \
+ (G_TYPE_INSTANCE_GET_CLASS((obj), GST_TYPE_PM_AUDIO_VISUALIZER, \
+ GstPMAudioVisualizerClass))
+#define GST_PM_IS_SYNAESTHESIA(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_PM_AUDIO_VISUALIZER))
+#define GST_PM_IS_SYNAESTHESIA_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_PM_AUDIO_VISUALIZER))
+typedef struct _GstPMAudioVisualizer GstPMAudioVisualizer;
+typedef struct _GstPMAudioVisualizerClass GstPMAudioVisualizerClass;
+typedef struct _GstPMAudioVisualizerPrivate GstPMAudioVisualizerPrivate;
+
+struct _GstPMAudioVisualizer {
+ GstElement parent;
+
+ /* min samples per frame wanted by the subclass (one channel) */
+ guint req_spf;
+
+ /* video state */
+ GstVideoInfo vinfo;
+
+ /* audio state */
+ GstAudioInfo ainfo;
+
+ /*< private >*/
+ GstPMAudioVisualizerPrivate *priv;
+
+ guint64 frame_duration;
+};
+
+/**
+ * GstPMAudioVisualizerClass:
+ * @decide_allocation: buffer pool allocation
+ * @prepare_output_buffer: allocate a buffer for rendering a frame.
+ * @map_output_buffer: map video frame to memory buffer.
+ * @render: render a frame from an audio buffer.
+ * @setup: called whenever the format changes.
+ *
+ * Base class for audio visualizers, derived from gstreamer
+ * GstAudioVisualizerClass. This plugin handles rendering video frames with a
+ * fixed framerate from audio input samples.
+ */
+struct _GstPMAudioVisualizerClass {
+ /*< private >*/
+ GstElementClass parent_class;
+
+ /*< public >*/
+ /* virtual function, called whenever the format changes */
+ gboolean (*setup)(GstPMAudioVisualizer *scope);
+
+ /* virtual function for rendering a frame */
+ GstFlowReturn (*render)(GstPMAudioVisualizer *scope, GstBuffer *audio,
+ GstBuffer **video, GstClockTime pts);
+
+ /* virtual function for buffer pool allocation */
+ gboolean (*decide_allocation)(GstPMAudioVisualizer *scope, GstQuery *query);
+
+ /* virtual function to allow overridden change_state, cascading to GstElement
+ */
+ GstStateChangeReturn (*change_state)(GstElement *element,
+ GstStateChange transition);
+};
+
+GType gst_pm_audio_visualizer_get_type(void);
+
+GstFlowReturn
+gst_pm_audio_visualizer_util_prepare_output_buffer(GstPMAudioVisualizer *scope,
+ GstBuffer **outbuf);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(GstPMAudioVisualizer, gst_object_unref)
+
+G_END_DECLS
+#endif /* __GST_PM_AUDIO_VISUALIZER_H__ */
diff --git a/src/plugin.c b/src/plugin.c
index 125b2ed..a79f055 100644
--- a/src/plugin.c
+++ b/src/plugin.c
@@ -1,4 +1,3 @@
-#include
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
@@ -6,29 +5,25 @@
#ifdef USE_GLEW
#include
#endif
+
+#include "plugin.h"
+
#include
#include
-#include
-
-#include
#include "caps.h"
-#include "config.h"
#include "debug.h"
-#include "enums.h"
#include "gstglbaseaudiovisualizer.h"
-#include "plugin.h"
-#include "projectm.h"
GST_DEBUG_CATEGORY_STATIC(gst_projectm_debug);
#define GST_CAT_DEFAULT gst_projectm_debug
struct _GstProjectMPrivate {
- GLenum gl_format;
- projectm_handle handle;
- GstClockTime first_frame_time;
- gboolean first_frame_received;
+ GstBaseProjectMPrivate base;
+
+ GstBuffer *in_audio;
+ GstGLMemory *mem;
};
G_DEFINE_TYPE_WITH_CODE(GstProjectM, gst_projectm,
@@ -40,330 +35,139 @@ G_DEFINE_TYPE_WITH_CODE(GstProjectM, gst_projectm,
void gst_projectm_set_property(GObject *object, guint property_id,
const GValue *value, GParamSpec *pspec) {
+
GstProjectM *plugin = GST_PROJECTM(object);
- const gchar *property_name = g_param_spec_get_name(pspec);
- GST_DEBUG_OBJECT(plugin, "set-property <%s>", property_name);
-
- switch (property_id) {
- case PROP_PRESET_PATH:
- plugin->preset_path = g_strdup(g_value_get_string(value));
- break;
- case PROP_TEXTURE_DIR_PATH:
- plugin->texture_dir_path = g_strdup(g_value_get_string(value));
- break;
- case PROP_BEAT_SENSITIVITY:
- plugin->beat_sensitivity = g_value_get_float(value);
- break;
- case PROP_HARD_CUT_DURATION:
- plugin->hard_cut_duration = g_value_get_double(value);
- break;
- case PROP_HARD_CUT_ENABLED:
- plugin->hard_cut_enabled = g_value_get_boolean(value);
- break;
- case PROP_HARD_CUT_SENSITIVITY:
- plugin->hard_cut_sensitivity = g_value_get_float(value);
- break;
- case PROP_SOFT_CUT_DURATION:
- plugin->soft_cut_duration = g_value_get_double(value);
- break;
- case PROP_PRESET_DURATION:
- plugin->preset_duration = g_value_get_double(value);
- break;
- case PROP_MESH_SIZE: {
- const gchar *meshSizeStr = g_value_get_string(value);
- gint width, height;
-
- gchar **parts = g_strsplit(meshSizeStr, ",", 2);
-
- if (parts && g_strv_length(parts) == 2) {
- width = atoi(parts[0]);
- height = atoi(parts[1]);
-
- plugin->mesh_width = width;
- plugin->mesh_height = height;
-
- g_strfreev(parts);
- }
- } break;
- case PROP_ASPECT_CORRECTION:
- plugin->aspect_correction = g_value_get_boolean(value);
- break;
- case PROP_EASTER_EGG:
- plugin->easter_egg = g_value_get_float(value);
- break;
- case PROP_PRESET_LOCKED:
- plugin->preset_locked = g_value_get_boolean(value);
- break;
- case PROP_ENABLE_PLAYLIST:
- plugin->enable_playlist = g_value_get_boolean(value);
- break;
- case PROP_SHUFFLE_PRESETS:
- plugin->shuffle_presets = g_value_get_boolean(value);
- break;
- default:
- G_OBJECT_WARN_INVALID_PROPERTY_ID(object, property_id, pspec);
- break;
- }
+ gst_projectm_base_set_property(object, &plugin->settings, property_id, value,
+ pspec);
}
void gst_projectm_get_property(GObject *object, guint property_id,
GValue *value, GParamSpec *pspec) {
GstProjectM *plugin = GST_PROJECTM(object);
- const gchar *property_name = g_param_spec_get_name(pspec);
- GST_DEBUG_OBJECT(plugin, "get-property <%s>", property_name);
-
- switch (property_id) {
- case PROP_PRESET_PATH:
- g_value_set_string(value, plugin->preset_path);
- break;
- case PROP_TEXTURE_DIR_PATH:
- g_value_set_string(value, plugin->texture_dir_path);
- break;
- case PROP_BEAT_SENSITIVITY:
- g_value_set_float(value, plugin->beat_sensitivity);
- break;
- case PROP_HARD_CUT_DURATION:
- g_value_set_double(value, plugin->hard_cut_duration);
- break;
- case PROP_HARD_CUT_ENABLED:
- g_value_set_boolean(value, plugin->hard_cut_enabled);
- break;
- case PROP_HARD_CUT_SENSITIVITY:
- g_value_set_float(value, plugin->hard_cut_sensitivity);
- break;
- case PROP_SOFT_CUT_DURATION:
- g_value_set_double(value, plugin->soft_cut_duration);
- break;
- case PROP_PRESET_DURATION:
- g_value_set_double(value, plugin->preset_duration);
- break;
- case PROP_MESH_SIZE: {
- gchar *meshSizeStr =
- g_strdup_printf("%lu,%lu", plugin->mesh_width, plugin->mesh_height);
- g_value_set_string(value, meshSizeStr);
- g_free(meshSizeStr);
- break;
- }
- case PROP_ASPECT_CORRECTION:
- g_value_set_boolean(value, plugin->aspect_correction);
- break;
- case PROP_EASTER_EGG:
- g_value_set_float(value, plugin->easter_egg);
- break;
- case PROP_PRESET_LOCKED:
- g_value_set_boolean(value, plugin->preset_locked);
- break;
- case PROP_ENABLE_PLAYLIST:
- g_value_set_boolean(value, plugin->enable_playlist);
- break;
- case PROP_SHUFFLE_PRESETS:
- g_value_set_boolean(value, plugin->shuffle_presets);
- break;
- default:
- G_OBJECT_WARN_INVALID_PROPERTY_ID(object, property_id, pspec);
- break;
- }
+ gst_projectm_base_get_property(object, &plugin->settings, property_id, value,
+ pspec);
}
static void gst_projectm_init(GstProjectM *plugin) {
plugin->priv = gst_projectm_get_instance_private(plugin);
- // Set default values for properties
- plugin->preset_path = DEFAULT_PRESET_PATH;
- plugin->texture_dir_path = DEFAULT_TEXTURE_DIR_PATH;
- plugin->beat_sensitivity = DEFAULT_BEAT_SENSITIVITY;
- plugin->hard_cut_duration = DEFAULT_HARD_CUT_DURATION;
- plugin->hard_cut_enabled = DEFAULT_HARD_CUT_ENABLED;
- plugin->hard_cut_sensitivity = DEFAULT_HARD_CUT_SENSITIVITY;
- plugin->soft_cut_duration = DEFAULT_SOFT_CUT_DURATION;
- plugin->preset_duration = DEFAULT_PRESET_DURATION;
- plugin->enable_playlist = DEFAULT_ENABLE_PLAYLIST;
- plugin->shuffle_presets = DEFAULT_SHUFFLE_PRESETS;
-
- const gchar *meshSizeStr = DEFAULT_MESH_SIZE;
- gint width, height;
-
- gchar **parts = g_strsplit(meshSizeStr, ",", 2);
-
- if (parts && g_strv_length(parts) == 2) {
- width = atoi(parts[0]);
- height = atoi(parts[1]);
-
- plugin->mesh_width = width;
- plugin->mesh_height = height;
-
- g_strfreev(parts);
- }
-
- plugin->aspect_correction = DEFAULT_ASPECT_CORRECTION;
- plugin->easter_egg = DEFAULT_EASTER_EGG;
- plugin->preset_locked = DEFAULT_PRESET_LOCKED;
- plugin->priv->handle = NULL;
+ gst_gl_memory_init_once();
+
+ gst_projectm_base_init(&plugin->settings, &plugin->priv->base);
+
+ plugin->priv->in_audio = NULL;
+ plugin->priv->mem = NULL;
}
static void gst_projectm_finalize(GObject *object) {
+
GstProjectM *plugin = GST_PROJECTM(object);
- g_free(plugin->preset_path);
- g_free(plugin->texture_dir_path);
+
+ gst_projectm_base_finalize(&plugin->settings, &plugin->priv->base);
G_OBJECT_CLASS(gst_projectm_parent_class)->finalize(object);
}
static void gst_projectm_gl_stop(GstGLBaseAudioVisualizer *src) {
+
GstProjectM *plugin = GST_PROJECTM(src);
- if (plugin->priv->handle) {
- GST_DEBUG_OBJECT(plugin, "Destroying ProjectM instance");
- projectm_destroy(plugin->priv->handle);
- plugin->priv->handle = NULL;
- }
+
+ GST_PROJECTM_BASE_LOCK(plugin);
+
+ gst_projectm_base_gl_stop(G_OBJECT(src), &plugin->priv->base);
+
+ GST_PROJECTM_BASE_UNLOCK(plugin);
}
static gboolean gst_projectm_gl_start(GstGLBaseAudioVisualizer *glav) {
// Cast the audio visualizer to the ProjectM plugin
GstProjectM *plugin = GST_PROJECTM(glav);
+ GstPMAudioVisualizer *gstav = GST_PM_AUDIO_VISUALIZER(glav);
-#ifdef USE_GLEW
- GST_DEBUG_OBJECT(plugin, "Initializing GLEW");
- GLenum err = glewInit();
- if (GLEW_OK != err) {
- GST_ERROR_OBJECT(plugin, "GLEW initialization failed");
- return FALSE;
- }
-#endif
+ GST_PROJECTM_BASE_LOCK(plugin);
- // Check if ProjectM instance exists, and create if not
- if (!plugin->priv->handle) {
- // Create ProjectM instance
- plugin->priv->handle = projectm_init(plugin);
- if (!plugin->priv->handle) {
- GST_ERROR_OBJECT(plugin, "ProjectM could not be initialized");
- return FALSE;
- }
- gl_error_handler(glav->context, plugin);
- }
+ gst_projectm_base_gl_start(G_OBJECT(glav), &plugin->priv->base,
+ &plugin->settings, glav->context, &gstav->vinfo);
+
+ GST_PROJECTM_BASE_UNLOCK(plugin);
+
+ GST_INFO_OBJECT(plugin, "GL start complete");
return TRUE;
}
static gboolean gst_projectm_setup(GstGLBaseAudioVisualizer *glav) {
- GstAudioVisualizer *bscope = GST_AUDIO_VISUALIZER(glav);
- GstProjectM *plugin = GST_PROJECTM(glav);
- // Calculate depth based on pixel stride and bits
- gint depth = bscope->vinfo.finfo->pixel_stride[0] *
- ((bscope->vinfo.finfo->bits >= 8) ? 8 : 1);
-
- // Calculate required samples per frame
- bscope->req_spf =
- (bscope->ainfo.channels * bscope->ainfo.rate * 2) / bscope->vinfo.fps_n;
-
- // get GStreamer video format and map it to the corresponding OpenGL pixel
- // format
- const GstVideoFormat video_format = GST_VIDEO_INFO_FORMAT(&bscope->vinfo);
-
- // TODO: why is the reversed byte order needed when copying pixel data from
- // OpenGL ?
- switch (video_format) {
- case GST_VIDEO_FORMAT_ABGR:
- plugin->priv->gl_format = GL_RGBA;
- break;
-
- case GST_VIDEO_FORMAT_RGBA:
- // GL_ABGR_EXT does not seem to be well-supported, does not work on Windows
- plugin->priv->gl_format = GL_ABGR_EXT;
- break;
-
- default:
- GST_ERROR_OBJECT(plugin, "Unsupported video format: %d", video_format);
- return FALSE;
- }
+ GstPMAudioVisualizer *gstav = GST_PM_AUDIO_VISUALIZER(glav);
// Log audio info
GST_DEBUG_OBJECT(
glav, "Audio Information ",
- bscope->ainfo.channels, bscope->ainfo.rate,
- bscope->ainfo.finfo->description);
+ gstav->ainfo.channels, gstav->ainfo.rate,
+ gstav->ainfo.finfo->description);
// Log video info
- GST_DEBUG_OBJECT(glav,
- "Video Information ",
- GST_VIDEO_INFO_WIDTH(&bscope->vinfo),
- GST_VIDEO_INFO_HEIGHT(&bscope->vinfo), bscope->vinfo.fps_n,
- bscope->vinfo.fps_d, depth, bscope->req_spf);
+ GST_DEBUG_OBJECT(
+ glav,
+ "Video Information ",
+ GST_VIDEO_INFO_WIDTH(&gstav->vinfo), GST_VIDEO_INFO_HEIGHT(&gstav->vinfo),
+ gstav->vinfo.fps_n, gstav->vinfo.fps_d, gstav->req_spf);
return TRUE;
}
-static double get_seconds_since_first_frame(GstProjectM *plugin,
- GstVideoFrame *frame) {
- if (!plugin->priv->first_frame_received) {
- // Store the timestamp of the first frame
- plugin->priv->first_frame_time = GST_BUFFER_PTS(frame->buffer);
- plugin->priv->first_frame_received = TRUE;
- return 0.0;
- }
+static gboolean gst_projectm_fill_gl_memory_callback(gpointer stuff) {
- // Calculate elapsed time
- GstClockTime current_time = GST_BUFFER_PTS(frame->buffer);
- GstClockTime elapsed_time = current_time - plugin->priv->first_frame_time;
-
- // Convert to fractional seconds
- gdouble elapsed_seconds = (gdouble)elapsed_time / GST_SECOND;
-
- return elapsed_seconds;
-}
-
-// TODO: CLEANUP & ADD DEBUGGING
-static gboolean gst_projectm_render(GstGLBaseAudioVisualizer *glav,
- GstBuffer *audio, GstVideoFrame *video) {
- GstProjectM *plugin = GST_PROJECTM(glav);
-
- GstMapInfo audioMap;
+ GstProjectM *plugin = GST_PROJECTM(stuff);
+ GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(stuff);
gboolean result = TRUE;
- // get current gst (PTS) time and set projectM time
- double seconds_since_first_frame =
- get_seconds_since_first_frame(plugin, video);
- projectm_set_frame_time(plugin->priv->handle, seconds_since_first_frame);
+ // VIDEO
+ GST_TRACE_OBJECT(plugin, "rendering projectM to fbo %d",
+ plugin->priv->base.fbo->fbo_id);
- // AUDIO
- gst_buffer_map(audio, &audioMap, GST_MAP_READ);
+ /*
+ const GstGLFuncs *glFunctions = glav->context->gl_vtable;
- // GST_DEBUG_OBJECT(plugin, "Audio Samples: %u, Offset: %lu, Offset End: %lu,
- // Sample Rate: %d, FPS: %d, Required Samples Per Frame: %d",
- // audioMap.size / 8, audio->offset, audio->offset_end,
- // bscope->ainfo.rate, bscope->vinfo.fps_n, bscope->req_spf);
+ GLuint tex_id = gst_gl_memory_get_texture_id(plugin->priv->mem);
- projectm_pcm_add_int16(plugin->priv->handle, (gint16 *)audioMap.data,
- audioMap.size / 4, PROJECTM_STEREO);
+ glFunctions->BindFramebuffer(GL_FRAMEBUFFER, plugin->priv->base.fbo->fbo_id);
+ glFunctions->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D, tex_id, 0); glFunctions->BindTexture(GL_TEXTURE_2D, 0);
+*/
+ gst_projectm_base_fill_gl_memory_callback(
+ GST_OBJECT(plugin), &plugin->priv->base, &plugin->settings, glav->context,
+ glav->pts, plugin->priv->in_audio);
- // GST_DEBUG_OBJECT(plugin, "Audio Data: %d %d %d %d", ((gint16
- // *)audioMap.data)[100], ((gint16 *)audioMap.data)[101], ((gint16
- // *)audioMap.data)[102], ((gint16 *)audioMap.data)[103]);
+ // GST_DEBUG_OBJECT(plugin, "Video Data: %d %d\n",
+ // GST_VIDEO_FRAME_N_PLANES(video), ((uint8_t
+ // *)(GST_VIDEO_FRAME_PLANE_DATA(video, 0)))[0]);
- // VIDEO
- const GstGLFuncs *glFunctions = glav->context->gl_vtable;
+ // GST_DEBUG_OBJECT(plugin, "Rendered one frame");
- size_t windowWidth, windowHeight;
+ return result;
+}
- projectm_get_window_size(plugin->priv->handle, &windowWidth, &windowHeight);
+static gboolean gst_projectm_fill_gl_memory(GstGLBaseAudioVisualizer *glav,
+ GstBuffer *in_audio,
+ GstGLMemory *mem) {
- projectm_opengl_render_frame(plugin->priv->handle);
- gl_error_handler(glav->context, plugin);
+ GstProjectM *plugin = GST_PROJECTM(glav);
- glFunctions->ReadPixels(0, 0, windowWidth, windowHeight,
- plugin->priv->gl_format, GL_UNSIGNED_INT_8_8_8_8,
- (guint8 *)GST_VIDEO_FRAME_PLANE_DATA(video, 0));
+ GST_PROJECTM_BASE_LOCK(plugin);
- gst_buffer_unmap(audio, &audioMap);
+ plugin->priv->in_audio = in_audio;
+ plugin->priv->mem = mem;
- // GST_DEBUG_OBJECT(plugin, "Video Data: %d %d\n",
- // GST_VIDEO_FRAME_N_PLANES(video), ((uint8_t
- // *)(GST_VIDEO_FRAME_PLANE_DATA(video, 0)))[0]);
+ gboolean result = gst_gl_framebuffer_draw_to_texture(
+ plugin->priv->base.fbo, mem, gst_projectm_fill_gl_memory_callback,
+ plugin);
- // GST_DEBUG_OBJECT(plugin, "Rendered one frame");
+ plugin->priv->in_audio = NULL;
+ plugin->priv->mem = NULL;
+
+ GST_PROJECTM_BASE_UNLOCK(plugin);
return result;
}
@@ -375,8 +179,8 @@ static void gst_projectm_class_init(GstProjectMClass *klass) {
GST_GL_BASE_AUDIO_VISUALIZER_CLASS(klass);
// Setup audio and video caps
- const gchar *audio_sink_caps = get_audio_sink_cap(0);
- const gchar *video_src_caps = get_video_src_cap(0);
+ const gchar *audio_sink_caps = get_audio_sink_cap();
+ const gchar *video_src_caps = get_video_src_cap();
gst_element_class_add_pad_template(
GST_ELEMENT_CLASS(klass),
@@ -397,143 +201,13 @@ static void gst_projectm_class_init(GstProjectMClass *klass) {
gobject_class->set_property = gst_projectm_set_property;
gobject_class->get_property = gst_projectm_get_property;
- g_object_class_install_property(
- gobject_class, PROP_PRESET_PATH,
- g_param_spec_string(
- "preset", "Preset",
- "Specifies the path to the preset file. The preset file determines "
- "the visual style and behavior of the audio visualizer.",
- DEFAULT_PRESET_PATH, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- g_object_class_install_property(
- gobject_class, PROP_TEXTURE_DIR_PATH,
- g_param_spec_string("texture-dir", "Texture Directory",
- "Sets the path to the directory containing textures "
- "used in the visualizer.",
- DEFAULT_TEXTURE_DIR_PATH,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- g_object_class_install_property(
- gobject_class, PROP_BEAT_SENSITIVITY,
- g_param_spec_float(
- "beat-sensitivity", "Beat Sensitivity",
- "Controls the sensitivity to audio beats. Higher values make the "
- "visualizer respond more strongly to beats.",
- 0.0, 5.0, DEFAULT_BEAT_SENSITIVITY,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- g_object_class_install_property(
- gobject_class, PROP_HARD_CUT_DURATION,
- g_param_spec_double("hard-cut-duration", "Hard Cut Duration",
- "Sets the duration, in seconds, for hard cuts. Hard "
- "cuts are abrupt transitions in the visualizer.",
- 0.0, 999999.0, DEFAULT_HARD_CUT_DURATION,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- g_object_class_install_property(
- gobject_class, PROP_HARD_CUT_ENABLED,
- g_param_spec_boolean(
- "hard-cut-enabled", "Hard Cut Enabled",
- "Enables or disables hard cuts. When enabled, the visualizer may "
- "exhibit sudden transitions based on the audio input.",
- DEFAULT_HARD_CUT_ENABLED,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- g_object_class_install_property(
- gobject_class, PROP_HARD_CUT_SENSITIVITY,
- g_param_spec_float(
- "hard-cut-sensitivity", "Hard Cut Sensitivity",
- "Adjusts the sensitivity of the visualizer to hard cuts. Higher "
- "values increase the responsiveness to abrupt changes in audio.",
- 0.0, 1.0, DEFAULT_HARD_CUT_SENSITIVITY,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- g_object_class_install_property(
- gobject_class, PROP_SOFT_CUT_DURATION,
- g_param_spec_double(
- "soft-cut-duration", "Soft Cut Duration",
- "Sets the duration, in seconds, for soft cuts. Soft cuts are "
- "smoother transitions between visualizer states.",
- 0.0, 999999.0, DEFAULT_SOFT_CUT_DURATION,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- g_object_class_install_property(
- gobject_class, PROP_PRESET_DURATION,
- g_param_spec_double("preset-duration", "Preset Duration",
- "Sets the duration, in seconds, for each preset. A "
- "zero value causes the preset to play indefinitely.",
- 0.0, 999999.0, DEFAULT_PRESET_DURATION,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- g_object_class_install_property(
- gobject_class, PROP_MESH_SIZE,
- g_param_spec_string("mesh-size", "Mesh Size",
- "Sets the size of the mesh used in rendering. The "
- "format is 'width,height'.",
- DEFAULT_MESH_SIZE,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- g_object_class_install_property(
- gobject_class, PROP_ASPECT_CORRECTION,
- g_param_spec_boolean(
- "aspect-correction", "Aspect Correction",
- "Enables or disables aspect ratio correction. When enabled, the "
- "visualizer adjusts for aspect ratio differences in rendering.",
- DEFAULT_ASPECT_CORRECTION,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- g_object_class_install_property(
- gobject_class, PROP_EASTER_EGG,
- g_param_spec_float(
- "easter-egg", "Easter Egg",
- "Controls the activation of an Easter Egg feature. The value "
- "determines the likelihood of triggering the Easter Egg.",
- 0.0, 1.0, DEFAULT_EASTER_EGG,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- g_object_class_install_property(
- gobject_class, PROP_PRESET_LOCKED,
- g_param_spec_boolean(
- "preset-locked", "Preset Locked",
- "Locks or unlocks the current preset. When locked, the visualizer "
- "remains on the current preset without automatic changes.",
- DEFAULT_PRESET_LOCKED, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- g_object_class_install_property(
- gobject_class, PROP_ENABLE_PLAYLIST,
- g_param_spec_boolean(
- "enable-playlist", "Enable Playlist",
- "Enables or disables the playlist feature. When enabled, the "
- "visualizer can switch between presets based on a provided playlist.",
- DEFAULT_ENABLE_PLAYLIST, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- g_object_class_install_property(
- gobject_class, PROP_SHUFFLE_PRESETS,
- g_param_spec_boolean(
- "shuffle-presets", "Shuffle Presets",
- "Enables or disables preset shuffling. When enabled, the visualizer "
- "randomly selects presets from the playlist if presets are provided "
- "and not locked. Playlist must be enabled for this to take effect.",
- DEFAULT_SHUFFLE_PRESETS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ gst_projectm_base_install_properties(gobject_class);
gobject_class->finalize = gst_projectm_finalize;
scope_class->supported_gl_api = GST_GL_API_OPENGL3 | GST_GL_API_GLES2;
scope_class->gl_start = GST_DEBUG_FUNCPTR(gst_projectm_gl_start);
scope_class->gl_stop = GST_DEBUG_FUNCPTR(gst_projectm_gl_stop);
- scope_class->gl_render = GST_DEBUG_FUNCPTR(gst_projectm_render);
+ scope_class->fill_gl_memory = GST_DEBUG_FUNCPTR(gst_projectm_fill_gl_memory);
scope_class->setup = GST_DEBUG_FUNCPTR(gst_projectm_setup);
}
-
-static gboolean plugin_init(GstPlugin *plugin) {
- GST_DEBUG_CATEGORY_INIT(gst_projectm_debug, "projectm", 0,
- "projectM visualizer plugin");
-
- return gst_element_register(plugin, "projectm", GST_RANK_NONE,
- GST_TYPE_PROJECTM);
-}
-
-GST_PLUGIN_DEFINE(GST_VERSION_MAJOR, GST_VERSION_MINOR, projectm,
- "plugin to visualize audio using the ProjectM library",
- plugin_init, PACKAGE_VERSION, PACKAGE_LICENSE, PACKAGE_NAME,
- PACKAGE_ORIGIN)
diff --git a/src/plugin.h b/src/plugin.h
index de1acff..3f01b3e 100644
--- a/src/plugin.h
+++ b/src/plugin.h
@@ -2,7 +2,7 @@
#define __GST_PROJECTM_H__
#include "gstglbaseaudiovisualizer.h"
-#include
+#include "pluginbase.h"
typedef struct _GstProjectMPrivate GstProjectMPrivate;
@@ -12,31 +12,23 @@ G_BEGIN_DECLS
G_DECLARE_FINAL_TYPE(GstProjectM, gst_projectm, GST, PROJECTM,
GstGLBaseAudioVisualizer)
+/*
+ * Main plug-in. Handles interactions with projectM.
+ * Uses GstPMAudioVisualizer for handling audio-visualization (audio input,
+ * timing, video frame data). GstGLBaseAudioVisualizer extends
+ * GstPMAudioVisualizer to add gl context handling and is used by this plugin
+ * directly. GstProjectM -> GstGLBaseAudioVisualizer -> GstPMAudioVisualizer.
+ */
struct _GstProjectM {
GstGLBaseAudioVisualizer element;
- gchar *preset_path;
- gchar *texture_dir_path;
-
- gfloat beat_sensitivity;
- gdouble hard_cut_duration;
- gboolean hard_cut_enabled;
- gfloat hard_cut_sensitivity;
- gdouble soft_cut_duration;
- gdouble preset_duration;
- gulong mesh_width;
- gulong mesh_height;
- gboolean aspect_correction;
- gfloat easter_egg;
- gboolean preset_locked;
- gboolean enable_playlist;
- gboolean shuffle_presets;
+ GstBaseProjectMSettings settings;
GstProjectMPrivate *priv;
};
struct _GstProjectMClass {
- GstAudioVisualizerClass parent_class;
+ GstGLBaseAudioVisualizerClass parent_class;
};
static void gst_projectm_set_property(GObject *object, guint prop_id,
@@ -53,8 +45,9 @@ static gboolean gst_projectm_gl_start(GstGLBaseAudioVisualizer *glav);
static void gst_projectm_gl_stop(GstGLBaseAudioVisualizer *glav);
-static gboolean gst_projectm_render(GstGLBaseAudioVisualizer *glav,
- GstBuffer *audio, GstVideoFrame *video);
+static gboolean gst_projectm_fill_gl_memory(GstGLBaseAudioVisualizer *glav,
+ GstBuffer *in_audio,
+ GstGLMemory *mem);
static void gst_projectm_class_init(GstProjectMClass *klass);
diff --git a/src/pluginbase.c b/src/pluginbase.c
new file mode 100644
index 0000000..4d74987
--- /dev/null
+++ b/src/pluginbase.c
@@ -0,0 +1,538 @@
+
+#include "pluginbase.h"
+
+#include "enums.h"
+
+#include "config.h"
+// #endif
+#include "debug.h"
+#include "gstglbaseaudiovisualizer.h"
+
+#include
+#include
+
+GST_DEBUG_CATEGORY_STATIC(gst_projectm_base_debug);
+#define GST_CAT_DEFAULT gst_projectm_base_debug
+
+void projectm_base_init_once() {
+ GST_DEBUG_CATEGORY_INIT(gst_projectm_base_debug, "projectm_base", 0,
+ "projectM visualizer plugin base");
+}
+
+static gboolean projectm_init(GObject *plugin,
+ GstBaseProjectMSettings *settings,
+ GstVideoInfo *vinfo, projectm_handle *ret_handle,
+ projectm_playlist_handle *ret_playlist) {
+ projectm_handle handle = NULL;
+ projectm_playlist_handle playlist = NULL;
+
+ // Create ProjectM instance
+ GST_DEBUG_OBJECT(plugin, "Creating projectM instance..");
+ handle = projectm_create();
+
+ if (!handle) {
+ GST_DEBUG_OBJECT(
+ plugin,
+ "project_create() returned NULL, projectM instance was not created!");
+ return FALSE;
+ } else {
+ GST_DEBUG_OBJECT(plugin, "Created projectM instance!");
+ }
+ *ret_handle = handle;
+
+ if (settings->enable_playlist) {
+ GST_DEBUG_OBJECT(plugin, "Playlist enabled");
+
+ // initialize preset playlist
+ playlist = projectm_playlist_create(handle);
+ *ret_playlist = playlist;
+ projectm_playlist_set_shuffle(playlist, settings->shuffle_presets);
+ // projectm_playlist_set_preset_switched_event_callback(_playlist,
+ // &ProjectMWrapper::PresetSwitchedEvent, static_cast(this));
+ } else {
+ GST_DEBUG_OBJECT(plugin, "Playlist disabled");
+ }
+ // Log properties
+ GST_INFO_OBJECT(plugin,
+ "Using Properties: "
+ "preset=%s, "
+ "texture-dir=%s, "
+ "beat-sensitivity=%f, "
+ "hard-cut-duration=%f, "
+ "hard-cut-enabled=%d, "
+ "hard-cut-sensitivity=%f, "
+ "soft-cut-duration=%f, "
+ "preset-duration=%f, "
+ "mesh-size=(%lu, %lu)"
+ "aspect-correction=%d, "
+ "easter-egg=%f, "
+ "preset-locked=%d, "
+ "enable-playlist=%d, "
+ "shuffle-presets=%d",
+ settings->preset_path, settings->texture_dir_path,
+ settings->beat_sensitivity, settings->hard_cut_duration,
+ settings->hard_cut_enabled, settings->hard_cut_sensitivity,
+ settings->soft_cut_duration, settings->preset_duration,
+ settings->mesh_width, settings->mesh_height,
+ settings->aspect_correction, settings->easter_egg,
+ settings->preset_locked, settings->enable_playlist,
+ settings->shuffle_presets);
+
+ // Load preset file if path is provided
+ if (settings->preset_path != NULL) {
+ unsigned int added_count = projectm_playlist_add_path(
+ playlist, settings->preset_path, true, false);
+ GST_INFO_OBJECT(plugin, "Loaded preset path: %s, presets found: %d",
+ settings->preset_path, added_count);
+ }
+
+ // Set texture search path if directory path is provided
+ if (settings->texture_dir_path != NULL) {
+ const gchar *texturePaths[1] = {settings->texture_dir_path};
+ projectm_set_texture_search_paths(handle, texturePaths, 1);
+ }
+
+ // Set properties
+ projectm_set_beat_sensitivity(handle, settings->beat_sensitivity);
+ projectm_set_hard_cut_duration(handle, settings->hard_cut_duration);
+ projectm_set_hard_cut_enabled(handle, settings->hard_cut_enabled);
+ projectm_set_hard_cut_sensitivity(handle, settings->hard_cut_sensitivity);
+ projectm_set_soft_cut_duration(handle, settings->soft_cut_duration);
+
+ // Set preset duration, or set to in infinite duration if zero
+ if (settings->preset_duration > 0.0) {
+ projectm_set_preset_duration(handle, settings->preset_duration);
+ // kick off the first preset
+ if (projectm_playlist_size(playlist) > 1 && !settings->preset_locked) {
+ projectm_playlist_play_next(playlist, true);
+ }
+ } else {
+ projectm_set_preset_duration(handle, 999999.0);
+ }
+
+ projectm_set_mesh_size(handle, settings->mesh_width, settings->mesh_height);
+ projectm_set_aspect_correction(handle, settings->aspect_correction);
+ projectm_set_easter_egg(handle, settings->easter_egg);
+ projectm_set_preset_locked(handle, settings->preset_locked);
+
+ gdouble fps;
+ gst_util_fraction_to_double(GST_VIDEO_INFO_FPS_N(vinfo),
+ GST_VIDEO_INFO_FPS_D(vinfo), &fps);
+
+ projectm_set_fps(handle, gst_util_gdouble_to_guint64(fps));
+ projectm_set_window_size(handle, GST_VIDEO_INFO_WIDTH(vinfo),
+ GST_VIDEO_INFO_HEIGHT(vinfo));
+
+ return TRUE;
+}
+
+void gst_projectm_base_set_property(GObject *object,
+ GstBaseProjectMSettings *settings,
+ guint property_id, const GValue *value,
+ GParamSpec *pspec) {
+
+ const gchar *property_name = g_param_spec_get_name(pspec);
+ GST_DEBUG_OBJECT(object, "set-property <%s>", property_name);
+
+ switch (property_id) {
+ case PROP_PRESET_PATH:
+ settings->preset_path = g_strdup(g_value_get_string(value));
+ break;
+ case PROP_TEXTURE_DIR_PATH:
+ settings->texture_dir_path = g_strdup(g_value_get_string(value));
+ break;
+ case PROP_BEAT_SENSITIVITY:
+ settings->beat_sensitivity = g_value_get_float(value);
+ break;
+ case PROP_HARD_CUT_DURATION:
+ settings->hard_cut_duration = g_value_get_double(value);
+ break;
+ case PROP_HARD_CUT_ENABLED:
+ settings->hard_cut_enabled = g_value_get_boolean(value);
+ break;
+ case PROP_HARD_CUT_SENSITIVITY:
+ settings->hard_cut_sensitivity = g_value_get_float(value);
+ break;
+ case PROP_SOFT_CUT_DURATION:
+ settings->soft_cut_duration = g_value_get_double(value);
+ break;
+ case PROP_PRESET_DURATION:
+ settings->preset_duration = g_value_get_double(value);
+ break;
+ case PROP_MESH_SIZE: {
+ const gchar *meshSizeStr = g_value_get_string(value);
+ gint width, height;
+
+ gchar **parts = g_strsplit(meshSizeStr, ",", 2);
+
+ if (parts && g_strv_length(parts) == 2) {
+ width = atoi(parts[0]);
+ height = atoi(parts[1]);
+
+ settings->mesh_width = width;
+ settings->mesh_height = height;
+
+ g_strfreev(parts);
+ }
+ } break;
+ case PROP_ASPECT_CORRECTION:
+ settings->aspect_correction = g_value_get_boolean(value);
+ break;
+ case PROP_EASTER_EGG:
+ settings->easter_egg = g_value_get_float(value);
+ break;
+ case PROP_PRESET_LOCKED:
+ settings->preset_locked = g_value_get_boolean(value);
+ break;
+ case PROP_ENABLE_PLAYLIST:
+ settings->enable_playlist = g_value_get_boolean(value);
+ break;
+ case PROP_SHUFFLE_PRESETS:
+ settings->shuffle_presets = g_value_get_boolean(value);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID(object, property_id, pspec);
+ break;
+ }
+}
+
+void gst_projectm_base_get_property(GObject *object,
+ GstBaseProjectMSettings *settings,
+ guint property_id, GValue *value,
+ GParamSpec *pspec) {
+
+ const gchar *property_name = g_param_spec_get_name(pspec);
+ GST_DEBUG_OBJECT(settings, "get-property <%s>", property_name);
+
+ switch (property_id) {
+ case PROP_PRESET_PATH:
+ g_value_set_string(value, settings->preset_path);
+ break;
+ case PROP_TEXTURE_DIR_PATH:
+ g_value_set_string(value, settings->texture_dir_path);
+ break;
+ case PROP_BEAT_SENSITIVITY:
+ g_value_set_float(value, settings->beat_sensitivity);
+ break;
+ case PROP_HARD_CUT_DURATION:
+ g_value_set_double(value, settings->hard_cut_duration);
+ break;
+ case PROP_HARD_CUT_ENABLED:
+ g_value_set_boolean(value, settings->hard_cut_enabled);
+ break;
+ case PROP_HARD_CUT_SENSITIVITY:
+ g_value_set_float(value, settings->hard_cut_sensitivity);
+ break;
+ case PROP_SOFT_CUT_DURATION:
+ g_value_set_double(value, settings->soft_cut_duration);
+ break;
+ case PROP_PRESET_DURATION:
+ g_value_set_double(value, settings->preset_duration);
+ break;
+ case PROP_MESH_SIZE: {
+ gchar *meshSizeStr =
+ g_strdup_printf("%lu,%lu", settings->mesh_width, settings->mesh_height);
+ g_value_set_string(value, meshSizeStr);
+ g_free(meshSizeStr);
+ break;
+ }
+ case PROP_ASPECT_CORRECTION:
+ g_value_set_boolean(value, settings->aspect_correction);
+ break;
+ case PROP_EASTER_EGG:
+ g_value_set_float(value, settings->easter_egg);
+ break;
+ case PROP_PRESET_LOCKED:
+ g_value_set_boolean(value, settings->preset_locked);
+ break;
+ case PROP_ENABLE_PLAYLIST:
+ g_value_set_boolean(value, settings->enable_playlist);
+ break;
+ case PROP_SHUFFLE_PRESETS:
+ g_value_set_boolean(value, settings->shuffle_presets);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID(object, property_id, pspec);
+ break;
+ }
+}
+
+void gst_projectm_base_init(GstBaseProjectMSettings *settings,
+ GstBaseProjectMPrivate *priv) {
+
+ // Set default values for properties
+ settings->preset_path = DEFAULT_PRESET_PATH;
+ settings->texture_dir_path = DEFAULT_TEXTURE_DIR_PATH;
+ settings->beat_sensitivity = DEFAULT_BEAT_SENSITIVITY;
+ settings->hard_cut_duration = DEFAULT_HARD_CUT_DURATION;
+ settings->hard_cut_enabled = DEFAULT_HARD_CUT_ENABLED;
+ settings->hard_cut_sensitivity = DEFAULT_HARD_CUT_SENSITIVITY;
+ settings->soft_cut_duration = DEFAULT_SOFT_CUT_DURATION;
+ settings->preset_duration = DEFAULT_PRESET_DURATION;
+ settings->enable_playlist = DEFAULT_ENABLE_PLAYLIST;
+ settings->shuffle_presets = DEFAULT_SHUFFLE_PRESETS;
+
+ const gchar *meshSizeStr = DEFAULT_MESH_SIZE;
+ gint width, height;
+
+ gchar **parts = g_strsplit(meshSizeStr, ",", 2);
+
+ if (parts && g_strv_length(parts) == 2) {
+ width = atoi(parts[0]);
+ height = atoi(parts[1]);
+
+ settings->mesh_width = width;
+ settings->mesh_height = height;
+
+ g_strfreev(parts);
+ }
+
+ settings->aspect_correction = DEFAULT_ASPECT_CORRECTION;
+ settings->easter_egg = DEFAULT_EASTER_EGG;
+ settings->preset_locked = DEFAULT_PRESET_LOCKED;
+
+ priv->first_frame_time = 0;
+ priv->first_frame_received = FALSE;
+
+ g_mutex_init(&priv->projectm_lock);
+}
+
+void gst_projectm_base_finalize(GstBaseProjectMSettings *settings,
+ GstBaseProjectMPrivate *priv) {
+ g_free(settings->preset_path);
+ g_free(settings->texture_dir_path);
+ g_mutex_clear(&priv->projectm_lock);
+}
+
+gboolean gst_projectm_base_gl_start(GObject *plugin,
+ GstBaseProjectMPrivate *priv,
+ GstBaseProjectMSettings *settings,
+ GstGLContext *context,
+ GstVideoInfo *vinfo) {
+
+#ifdef USE_GLEW
+ GST_DEBUG_OBJECT(plugin, "Initializing GLEW");
+ GLenum err = glewInit();
+ if (GLEW_OK != err) {
+ GST_ERROR_OBJECT(plugin, "GLEW initialization failed");
+ return FALSE;
+ }
+#endif
+
+ // initialize render texture
+ priv->fbo = gst_gl_framebuffer_new_with_default_depth(
+ context, GST_VIDEO_INFO_WIDTH(vinfo), GST_VIDEO_INFO_HEIGHT(vinfo));
+
+ // Check if ProjectM instance exists, and create if not
+ if (!priv->handle) {
+ // Create ProjectM instance
+ priv->first_frame_received = FALSE;
+ if (!projectm_init(plugin, settings, vinfo, &priv->handle,
+ &priv->playlist)) {
+ GST_ERROR_OBJECT(plugin, "projectM could not be initialized");
+ return FALSE;
+ }
+ gl_error_handler(context);
+ }
+
+ GST_INFO_OBJECT(plugin, "projectM GL start complete");
+ return TRUE;
+}
+
+void gst_projectm_base_gl_stop(GObject *plugin, GstBaseProjectMPrivate *priv) {
+
+ if (priv->handle) {
+ GST_DEBUG_OBJECT(plugin, "Destroying ProjectM instance");
+ projectm_destroy(priv->handle);
+ priv->handle = NULL;
+ }
+ if (priv->fbo) {
+ gst_object_unref(priv->fbo);
+ priv->fbo = NULL;
+ }
+}
+
+gdouble get_seconds_since_first_frame(GstBaseProjectMPrivate *priv,
+ GstClockTime pts) {
+ // timestamp to sync to
+ GstClockTime current_time = pts;
+
+ if (!priv->first_frame_received) {
+ // Store the timestamp of the first frame
+ priv->first_frame_time = current_time;
+ priv->first_frame_received = TRUE;
+ return 0.0;
+ }
+
+ // Calculate elapsed time
+ GstClockTime elapsed_time = current_time - priv->first_frame_time;
+
+ // Convert to fractional seconds
+ gdouble elapsed_seconds = (gdouble)elapsed_time / GST_SECOND;
+
+ return elapsed_seconds;
+}
+
+void gst_projectm_base_fill_audio_buffer(GstBaseProjectMPrivate *priv,
+ GstBuffer *in_audio) {
+
+ if (in_audio != NULL) {
+
+ GstMapInfo audioMap;
+
+ gst_buffer_map(in_audio, &audioMap, GST_MAP_READ);
+
+ projectm_pcm_add_int16(priv->handle, (gint16 *)audioMap.data,
+ audioMap.size / 4, PROJECTM_STEREO);
+
+ gst_buffer_unmap(in_audio, &audioMap);
+ }
+}
+
+void gst_projectm_base_fill_gl_memory_callback(
+ GstObject *plugin, GstBaseProjectMPrivate *priv,
+ GstBaseProjectMSettings *settings, GstGLContext *context, GstClockTime pts,
+ GstBuffer *in_audio) {
+
+ // get current gst sync time (pts) and set projectM time
+ gdouble seconds_since_first_frame = get_seconds_since_first_frame(priv, pts);
+
+ gdouble time = (gdouble)pts / GST_SECOND;
+ if (fabs(time - seconds_since_first_frame) > 0.00001) {
+ GST_DEBUG_OBJECT(plugin, "Injecting projectM timestamp %f s, pts %f ms",
+ seconds_since_first_frame, time);
+ }
+
+ projectm_set_frame_time(priv->handle, seconds_since_first_frame);
+
+ // process audio buffer
+ gst_projectm_base_fill_audio_buffer(priv, in_audio);
+
+ // render the frame
+ projectm_opengl_render_frame_fbo(priv->handle, priv->fbo->fbo_id);
+
+ gl_error_handler(context);
+}
+
+void gst_projectm_base_install_properties(GObjectClass *gobject_class) {
+
+ // Setup properties
+ g_object_class_install_property(
+ gobject_class, PROP_PRESET_PATH,
+ g_param_spec_string(
+ "preset", "Preset",
+ "Specifies the path to the preset file. The preset file determines "
+ "the visual style and behavior of the audio visualizer.",
+ DEFAULT_PRESET_PATH, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property(
+ gobject_class, PROP_TEXTURE_DIR_PATH,
+ g_param_spec_string("texture-dir", "Texture Directory",
+ "Sets the path to the directory containing textures "
+ "used in the visualizer.",
+ DEFAULT_TEXTURE_DIR_PATH,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property(
+ gobject_class, PROP_BEAT_SENSITIVITY,
+ g_param_spec_float(
+ "beat-sensitivity", "Beat Sensitivity",
+ "Controls the sensitivity to audio beats. Higher values make the "
+ "visualizer respond more strongly to beats.",
+ 0.0, 5.0, DEFAULT_BEAT_SENSITIVITY,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property(
+ gobject_class, PROP_HARD_CUT_DURATION,
+ g_param_spec_double("hard-cut-duration", "Hard Cut Duration",
+ "Sets the duration, in seconds, for hard cuts. Hard "
+ "cuts are abrupt transitions in the visualizer.",
+ 0.0, 999999.0, DEFAULT_HARD_CUT_DURATION,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property(
+ gobject_class, PROP_HARD_CUT_ENABLED,
+ g_param_spec_boolean(
+ "hard-cut-enabled", "Hard Cut Enabled",
+ "Enables or disables hard cuts. When enabled, the visualizer may "
+ "exhibit sudden transitions based on the audio input.",
+ DEFAULT_HARD_CUT_ENABLED,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property(
+ gobject_class, PROP_HARD_CUT_SENSITIVITY,
+ g_param_spec_float(
+ "hard-cut-sensitivity", "Hard Cut Sensitivity",
+ "Adjusts the sensitivity of the visualizer to hard cuts. Higher "
+ "values increase the responsiveness to abrupt changes in audio.",
+ 0.0, 1.0, DEFAULT_HARD_CUT_SENSITIVITY,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property(
+ gobject_class, PROP_SOFT_CUT_DURATION,
+ g_param_spec_double(
+ "soft-cut-duration", "Soft Cut Duration",
+ "Sets the duration, in seconds, for soft cuts. Soft cuts are "
+ "smoother transitions between visualizer states.",
+ 0.0, 999999.0, DEFAULT_SOFT_CUT_DURATION,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property(
+ gobject_class, PROP_PRESET_DURATION,
+ g_param_spec_double("preset-duration", "Preset Duration",
+ "Sets the duration, in seconds, for each preset. A "
+ "zero value causes the preset to play indefinitely.",
+ 0.0, 999999.0, DEFAULT_PRESET_DURATION,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property(
+ gobject_class, PROP_MESH_SIZE,
+ g_param_spec_string("mesh-size", "Mesh Size",
+ "Sets the size of the mesh used in rendering. The "
+ "format is 'width,height'.",
+ DEFAULT_MESH_SIZE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property(
+ gobject_class, PROP_ASPECT_CORRECTION,
+ g_param_spec_boolean(
+ "aspect-correction", "Aspect Correction",
+ "Enables or disables aspect ratio correction. When enabled, the "
+ "visualizer adjusts for aspect ratio differences in rendering.",
+ DEFAULT_ASPECT_CORRECTION,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property(
+ gobject_class, PROP_EASTER_EGG,
+ g_param_spec_float(
+ "easter-egg", "Easter Egg",
+ "Controls the activation of an Easter Egg feature. The value "
+ "determines the likelihood of triggering the Easter Egg.",
+ 0.0, 1.0, DEFAULT_EASTER_EGG,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property(
+ gobject_class, PROP_PRESET_LOCKED,
+ g_param_spec_boolean(
+ "preset-locked", "Preset Locked",
+ "Locks or unlocks the current preset. When locked, the visualizer "
+ "remains on the current preset without automatic changes.",
+ DEFAULT_PRESET_LOCKED, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property(
+ gobject_class, PROP_ENABLE_PLAYLIST,
+ g_param_spec_boolean(
+ "enable-playlist", "Enable Playlist",
+ "Enables or disables the playlist feature. When enabled, the "
+ "visualizer can switch between presets based on a provided playlist.",
+ DEFAULT_ENABLE_PLAYLIST, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property(
+ gobject_class, PROP_SHUFFLE_PRESETS,
+ g_param_spec_boolean(
+ "shuffle-presets", "Shuffle Presets",
+ "Enables or disables preset shuffling. When enabled, the visualizer "
+ "randomly selects presets from the playlist if presets are provided "
+ "and not locked. Playlist must be enabled for this to take effect.",
+ DEFAULT_SHUFFLE_PRESETS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+}
\ No newline at end of file
diff --git a/src/pluginbase.h b/src/pluginbase.h
new file mode 100644
index 0000000..3a48821
--- /dev/null
+++ b/src/pluginbase.h
@@ -0,0 +1,171 @@
+
+#ifndef PLUGINBASE_H
+#define PLUGINBASE_H
+
+#include
+#include
+#include
+#include
+
+/*
+ * Basic gst/projectM integration structs and functions that can be re-used for
+ * alternative plugin implementations.
+ */
+G_BEGIN_DECLS
+
+/**
+ * projectM config properties.
+ */
+struct _GstBaseProjectMSettings {
+
+ gchar *preset_path;
+ gchar *texture_dir_path;
+
+ gfloat beat_sensitivity;
+ gdouble hard_cut_duration;
+ gboolean hard_cut_enabled;
+ gfloat hard_cut_sensitivity;
+ gdouble soft_cut_duration;
+ gdouble preset_duration;
+ gulong mesh_width;
+ gulong mesh_height;
+ gboolean aspect_correction;
+ gfloat easter_egg;
+ gboolean preset_locked;
+ gboolean enable_playlist;
+ gboolean shuffle_presets;
+};
+
+/**
+ * Variables needed for projectM (fbo) rendering.
+ */
+struct _GstBaseProjectMPrivate {
+ projectm_handle handle;
+ projectm_playlist_handle playlist;
+ GMutex projectm_lock;
+
+ GstClockTime first_frame_time;
+ gboolean first_frame_received;
+
+ GstGLFramebuffer *fbo;
+};
+
+typedef struct _GstBaseProjectMPrivate GstBaseProjectMPrivate;
+typedef struct _GstBaseProjectMSettings GstBaseProjectMSettings;
+
+/**
+ * One time initialization. Should be called once before any other function is
+ * this unit.
+ */
+void projectm_base_init_once();
+
+/**
+ * get_property delegate for projectM setting structs.
+ *
+ * @param object Plugin gst object.
+ * @param settings Settings struct to update.
+ * @param property_id Property id to update.
+ * @param value Property value.
+ * @param pspec Gst param type spec.
+ */
+void gst_projectm_base_set_property(GObject *object,
+ GstBaseProjectMSettings *settings,
+ guint property_id, const GValue *value,
+ GParamSpec *pspec);
+
+/**
+ * set_property delegate for projectM setting structs.
+ *
+ * @param object Plugin gst object.
+ * @param settings Settings struct to update.
+ * @param property_id Property id to update.
+ * @param value Property value.
+ * @param pspec Gst param type spec.
+ */
+void gst_projectm_base_get_property(GObject *object,
+ GstBaseProjectMSettings *settings,
+ guint property_id, GValue *value,
+ GParamSpec *pspec);
+
+/**
+ * Plugin init() delegate for projectM settings and priv.
+ *
+ * @param settings Settings to init.
+ * @param priv Private obj to init.
+ */
+void gst_projectm_base_init(GstBaseProjectMSettings *settings,
+ GstBaseProjectMPrivate *priv);
+
+/**
+ * Plugin finalize() delegate for projectM settings and priv.
+ *
+ * @param settings Settings to init.
+ * @param priv Private obj to init.
+ */
+void gst_projectm_base_finalize(GstBaseProjectMSettings *settings,
+ GstBaseProjectMPrivate *priv);
+
+/**
+ * GL start delegate to setup projectM fbo rendering.
+ *
+ * @param plugin Plugin gst object.
+ * @param priv Plugin priv data.
+ * @param settings Plugin settings.
+ * @param context The gl context to use for projectM rendering.
+ * @param vinfo Video rendering details.
+ *
+ * @return TRUE on success.
+ */
+gboolean gst_projectm_base_gl_start(GObject *plugin,
+ GstBaseProjectMPrivate *priv,
+ GstBaseProjectMSettings *settings,
+ GstGLContext *context, GstVideoInfo *vinfo);
+
+/**
+ * GL stop delegate to clean up projectM rendering resources.
+ *
+ * @param plugin Plugin gst object.
+ * @param priv Plugin priv data.
+ */
+void gst_projectm_base_gl_stop(GObject *plugin, GstBaseProjectMPrivate *priv);
+
+/**
+ * Just pushes audio data to projectM without rendering.
+ *
+ * @param priv Plugin priv data.
+ * @param in_audio Audio data buffer to push to projectM.
+ */
+void gst_projectm_base_fill_audio_buffer(GstBaseProjectMPrivate *priv,
+ GstBuffer *in_audio);
+
+/**
+ * Render one frame with projectM.
+ *
+ * @param plugin Plugin gst object.
+ * @param priv Plugin priv data.
+ * @param settings Plugin settings.
+ * @param context ProjectM GL context.
+ * @param pts Current pts timestamp.
+ * @param in_audio Input audio buffer to push to projectM before rendering, may
+ * be NULL.
+ */
+void gst_projectm_base_fill_gl_memory_callback(
+ GstObject *plugin, GstBaseProjectMPrivate *priv,
+ GstBaseProjectMSettings *settings, GstGLContext *context, GstClockTime pts,
+ GstBuffer *in_audio);
+
+/**
+ * Install properties from projectM settings to given plugin class.
+ *
+ * @param gobject_class Plugin class to install properties to.
+ */
+void gst_projectm_base_install_properties(GObjectClass *gobject_class);
+
+#define GST_PROJECTM_BASE_LOCK(plugin) \
+ (g_mutex_lock(&plugin->priv->base.projectm_lock))
+#define GST_PROJECTM_BASE_UNLOCK(plugin) \
+ (g_mutex_unlock(&plugin->priv->base.projectm_lock))
+
+G_END_DECLS
+
+#endif // PLUGINBASE_H
diff --git a/src/projectm.c b/src/projectm.c
deleted file mode 100644
index 1bac137..0000000
--- a/src/projectm.c
+++ /dev/null
@@ -1,127 +0,0 @@
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include
-
-#include
-#include
-
-#include "plugin.h"
-#include "projectm.h"
-
-GST_DEBUG_CATEGORY_STATIC(projectm_debug);
-#define GST_CAT_DEFAULT projectm_debug
-
-projectm_handle projectm_init(GstProjectM *plugin) {
- projectm_handle handle = NULL;
- projectm_playlist_handle playlist = NULL;
-
- GST_DEBUG_CATEGORY_INIT(projectm_debug, "projectm", 0, "ProjectM");
-
- GstAudioVisualizer *bscope = GST_AUDIO_VISUALIZER(plugin);
-
- // Create ProjectM instance
- GST_DEBUG_OBJECT(plugin, "Creating projectM instance..");
- handle = projectm_create();
-
- if (!handle) {
- GST_DEBUG_OBJECT(
- plugin,
- "project_create() returned NULL, projectM instance was not created!");
- return NULL;
- } else {
- GST_DEBUG_OBJECT(plugin, "Created projectM instance!");
- }
-
- if (plugin->enable_playlist) {
- GST_DEBUG_OBJECT(plugin, "Playlist enabled");
-
- // initialize preset playlist
- playlist = projectm_playlist_create(handle);
- projectm_playlist_set_shuffle(playlist, plugin->shuffle_presets);
- // projectm_playlist_set_preset_switched_event_callback(_playlist,
- // &ProjectMWrapper::PresetSwitchedEvent, static_cast(this));
- } else {
- GST_DEBUG_OBJECT(plugin, "Playlist disabled");
- }
-
- // Log properties
- GST_INFO_OBJECT(
- plugin,
- "Using Properties: "
- "preset=%s, "
- "texture-dir=%s, "
- "beat-sensitivity=%f, "
- "hard-cut-duration=%f, "
- "hard-cut-enabled=%d, "
- "hard-cut-sensitivity=%f, "
- "soft-cut-duration=%f, "
- "preset-duration=%f, "
- "mesh-size=(%lu, %lu)"
- "aspect-correction=%d, "
- "easter-egg=%f, "
- "preset-locked=%d, "
- "enable-playlist=%d, "
- "shuffle-presets=%d",
- plugin->preset_path, plugin->texture_dir_path, plugin->beat_sensitivity,
- plugin->hard_cut_duration, plugin->hard_cut_enabled,
- plugin->hard_cut_sensitivity, plugin->soft_cut_duration,
- plugin->preset_duration, plugin->mesh_width, plugin->mesh_height,
- plugin->aspect_correction, plugin->easter_egg, plugin->preset_locked,
- plugin->enable_playlist, plugin->shuffle_presets);
-
- // Load preset file if path is provided
- if (plugin->preset_path != NULL) {
- int added_count =
- projectm_playlist_add_path(playlist, plugin->preset_path, true, false);
- GST_INFO("Loaded preset path: %s, presets found: %d", plugin->preset_path,
- added_count);
- }
-
- // Set texture search path if directory path is provided
- if (plugin->texture_dir_path != NULL) {
- const gchar *texturePaths[1] = {plugin->texture_dir_path};
- projectm_set_texture_search_paths(handle, texturePaths, 1);
- }
-
- // Set properties
- projectm_set_beat_sensitivity(handle, plugin->beat_sensitivity);
- projectm_set_hard_cut_duration(handle, plugin->hard_cut_duration);
- projectm_set_hard_cut_enabled(handle, plugin->hard_cut_enabled);
- projectm_set_hard_cut_sensitivity(handle, plugin->hard_cut_sensitivity);
- projectm_set_soft_cut_duration(handle, plugin->soft_cut_duration);
-
- // Set preset duration, or set to in infinite duration if zero
- if (plugin->preset_duration > 0.0) {
- projectm_set_preset_duration(handle, plugin->preset_duration);
-
- // kick off the first preset
- if (projectm_playlist_size(playlist) > 1 && !plugin->preset_locked) {
- projectm_playlist_play_next(playlist, true);
- }
- } else {
- projectm_set_preset_duration(handle, 999999.0);
- }
-
- projectm_set_mesh_size(handle, plugin->mesh_width, plugin->mesh_height);
- projectm_set_aspect_correction(handle, plugin->aspect_correction);
- projectm_set_easter_egg(handle, plugin->easter_egg);
- projectm_set_preset_locked(handle, plugin->preset_locked);
-
- projectm_set_fps(handle, GST_VIDEO_INFO_FPS_N(&bscope->vinfo));
- projectm_set_window_size(handle, GST_VIDEO_INFO_WIDTH(&bscope->vinfo),
- GST_VIDEO_INFO_HEIGHT(&bscope->vinfo));
-
- return handle;
-}
-
-// void projectm_render(GstProjectM *plugin, gint16 *samples, gint sample_count)
-// {
-// GST_DEBUG_OBJECT(plugin, "Rendering %d samples", sample_count);
-
-// projectm_pcm_add_int16(plugin->handle, samples, sample_count,
-// PROJECTM_STEREO);
-
-// projectm_opengl_render_frame(plugin->handle);
-// }
diff --git a/src/projectm.h b/src/projectm.h
deleted file mode 100644
index 1ba6a37..0000000
--- a/src/projectm.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef __PROJECTM_H__
-#define __PROJECTM_H__
-
-#include
-
-#include "plugin.h"
-#include
-
-G_BEGIN_DECLS
-
-/**
- * @brief Initialize ProjectM
- */
-projectm_handle projectm_init(GstProjectM *plugin);
-
-/**
- * @brief Render ProjectM
- */
-// void projectm_render(GstProjectM *plugin, gint16 *samples, gint
-// sample_count);
-
-G_END_DECLS
-
-#endif /* __PROJECTM_H__ */
\ No newline at end of file
diff --git a/src/register.c b/src/register.c
new file mode 100644
index 0000000..800586e
--- /dev/null
+++ b/src/register.c
@@ -0,0 +1,34 @@
+
+#include "config.h"
+#include "plugin.h"
+
+#include
+
+/*
+ * This unit registers all gst elements from this plugin library to make them
+ * available to GStreamer.
+ */
+
+GST_DEBUG_CATEGORY(gst_projectm_debug);
+#define GST_CAT_DEFAULT gst_projectm_debug
+
+static gboolean plugin_init(GstPlugin *plugin) {
+
+ projectm_base_init_once();
+
+ GST_DEBUG_CATEGORY_INIT(gst_projectm_debug, "projectm", 0,
+ "projectM visualizer plugin");
+
+ // register main plugin projectM element
+ gboolean p1 = gst_element_register(plugin, "projectm", GST_RANK_NONE,
+ GST_TYPE_PROJECTM);
+
+ // add additional elements here..
+
+ return p1;
+}
+
+GST_PLUGIN_DEFINE(GST_VERSION_MAJOR, GST_VERSION_MINOR, projectm,
+ "plugin to visualize audio using the ProjectM library",
+ plugin_init, PACKAGE_VERSION, PACKAGE_LICENSE, PACKAGE_NAME,
+ PACKAGE_ORIGIN)