Commit 6bc8437e authored by Leander Schulten's avatar Leander Schulten
Browse files

Merge branch 'feature/rtAudio'

parents 02045d0e 40a5ff45
Pipeline #190895 passed with stage
in 6 minutes and 30 seconds
[submodule "src/lib/aubio/aubio.git"]
path = src/lib/aubio/aubio.git
url = https://github.com/aubio/aubio
[submodule "src/lib/RtAudio/rtaudio"]
path = src/lib/RtAudio/rtaudio
url = https://github.com/thestk/rtaudio.git
......@@ -32,8 +32,8 @@ Die Lichtsteuerung hängt von ein paar anderen Projekten ab.
#### [Boost](https://www.boost.org/)
Boost wird benötigt, um bei Modules nicht lineare Codeausführung zu ermöglichen (Corotines) und wird genutzt, um Stacktraces auszugeben.
#### [Capture_Windows_SoundOutput](https://git.rwth-aachen.de/leander.schulten/Capture_Windows_SoundOutput)
Dieses Project ermöglicht das abfangen des Windows Audioausgangs. Für andere Plattformen ligen keine implementierungen vor, diese sind aber herzlich willkommen. Die kompilierte Version ist hier im git vorhanden.
#### [RtAudio](https://github.com/thestk/rtaudio)
RtAudio (Realtime Audio) ist eine Bibliothek, mit welcher auf jedem Betriebsystem alle Audioeingänge aufgenommen werden können. Unter Windows ist es auch möglich den Audio Ausgang aufzunehmen. Vor RtAudio wurde diese Aufgabe von der [Capture_Windows_SoundOutput](https://git.rwth-aachen.de/leander.schulten/Capture_Windows_SoundOutput) lib übernommen (Diese konnte nur unter Windows den Standardausgang aufnehmen).
#### [AudioFFT](https://git.rwth-aachen.de/leander.schulten/FFT_for_Audio)
Dieses Projekt ermöglicht es, den abgefangenen Sound auf Frequenzbereiche aufzuspalten, wobei jedem Frequenzbereich eine "Energie"/Stärke zugeordnet wird. Dieses Projekt hängt wiederum von [fftw](http://www.fftw.org/) ab. Diese Abhängigkeiten befinden sich auch hier als kompilierte lib im git.
......
......@@ -276,3 +276,8 @@ win32-msvc{
LIBS += -L$$PWD/'lib/AudioFFT/dll/AudioFFT.dll'
INCLUDEPATH += $$PWD/'lib/AudioFFT/include'
}
# RTAudio
INCLUDEPATH += $$PWD/lib/RtAudio/include
LIBS += -L$$PWD/lib/RtAudio/lib -lrtaudio
win32: LIBS += -lole32 -lwinmm -lksuser -lmfplat -lmfuuid -lwmcodecdspuuid
......@@ -5,10 +5,27 @@
#include "gui/oscillogram.h"
#include <algorithm>
#ifdef AUDIO_IF_WIN
#error AUDIO_IF_WIN is already defined
#endif
#ifdef Q_OS_WIN
#define AUDIO_IF_WIN(x) x
#else
#define AUDIO_IF_WIN(x)
#endif
namespace Audio {
AudioCaptureManager::AudioCaptureManager():audiofft(sample.size())
{
rtAudio.showWarnings();
updateCaptureDeviceList();
}
int AudioCaptureManager::rtAudioCallback(void * /*outputBuffer*/, void *inputBuffer, unsigned int nFrames, double /*streamTime*/, RtAudioStreamStatus /*status*/, void * /*userData*/) {
bool end = false;
get().dataCallback(static_cast<float *>(inputBuffer), nFrames, &end);
return end;
}
void AudioCaptureManager::initCallback(int channels, int samplesPerSecond) {
......@@ -20,8 +37,11 @@ void AudioCaptureManager::initCallback(int channels, int samplesPerSecond) {
}
}
void AudioCaptureManager::dataCallback(float* data, unsigned int frames, bool*done){
void AudioCaptureManager::dataCallback(float* data, unsigned int frames, bool*done){
*done = !run;
if (!run) {
return;
}
if(!data)
return;
if(channels<0)
......@@ -41,7 +61,7 @@ void AudioCaptureManager::dataCallback(float* data, unsigned int frames, bool*do
audiofft.analyse(sample.data(),1,fftoutput.data());
{
// feed the *analysis classes with new samples
unsigned restFrames = frames;
int restFrames = static_cast<int>(frames);
if (restFrames % samplesPerFrame != 0) {
static bool once = false;
if (!once) {
......@@ -75,36 +95,176 @@ void AudioCaptureManager::dataCallback(float* data, unsigned int frames, bool*do
}
}
// db scale
std::transform(fftoutput.begin(),fftoutput.end(),fftoutput.begin(),[](auto i){return 10*std::log10(1+i);});
std::transform(fftoutput.begin(), fftoutput.end(), fftoutput.begin(), [](auto i) { return 10 * std::log10(1 + i); });
if(GUI::Graph::getLast())
GUI::Graph::getLast()->showData(fftoutput.data(),fftoutput.size());
if(GUI::Colorplot::getLast()){
if (GUI::Graph::getLast() && run) {
GUI::Graph::getLast()->showData(fftoutput.data(), fftoutput.size());
}
if (GUI::Colorplot::getLast() && run) {
GUI::Colorplot::getLast()->startBlock();
for (int i = 0; i < 512; ++i) {
GUI::Colorplot::getLast()->pushDataToBlock(fftoutput.at(i));
}
GUI::Colorplot::getLast()->endBlock();
}
if(GUI::Oscillogram::getLast())
GUI::Oscillogram::getLast()->showData(sample.data(),sample.size());
if (GUI::Oscillogram::getLast() && run) {
GUI::Oscillogram::getLast()->showData(sample.data(), sample.size());
}
}
bool AudioCaptureManager::startCapturingFromInput(unsigned input) {
if (input >= rtAudio.getDeviceCount()) {
return false;
}
const auto di = rtAudio.getDeviceInfo(input);
if (di.inputChannels == 0 AUDIO_IF_WIN(&&di.outputChannels == 0)) {
return false;
}
// it was a device open before now
int sampleRate = this->samplesPerSecond;
if (this->samplesPerSecond > 0) {
bool supported = std::any_of(di.sampleRates.cbegin(), di.sampleRates.cend(), [this](int s) { return s == this->samplesPerSecond; });
if (!supported) {
return false;
}
} else {
sampleRate = static_cast<int>(di.preferredSampleRate);
}
initCallback(static_cast<int>(di.inputChannels + di.outputChannels), sampleRate);
RtAudio::StreamParameters isp;
isp.deviceId = input;
isp.nChannels = di.inputChannels AUDIO_IF_WIN(+di.outputChannels);
isp.firstChannel = 0;
unsigned samplesPerFrame = static_cast<unsigned>(this->samplesPerFrame);
try {
rtAudio.openStream(nullptr, &isp, RTAUDIO_FLOAT32, static_cast<unsigned>(this->samplesPerSecond), &samplesPerFrame, rtAudioCallback, nullptr, nullptr, nullptr);
if (static_cast<int>(samplesPerFrame) != this->samplesPerFrame) {
rtAudio.closeStream();
return false;
}
rtAudio.startStream();
run = true;
emit this->capturingStatusChanged();
} catch (const RtAudioError &error) {
ErrorNotifier::showError(QString::fromStdString(error.getMessage()));
run = false;
emit capturingStatusChanged();
return false;
}
return true;
}
bool AudioCaptureManager::startCapturingFromDevice(const QString &name) {
for (unsigned i = 0; i < rtAudio.getDeviceCount(); ++i) {
if (auto di = rtAudio.getDeviceInfo(i); di.name.c_str() == name) {
return startCapturingFromInput(i);
}
}
return false;
}
bool AudioCaptureManager::startCapturing(QString filePathToCaptureLibrary){
bool AudioCaptureManager::startCapturingFromDefaultInput() {
stopCapturingAndWait();
typedef int (*capture)(void (*)(int, int), void (*)(float *, unsigned int, bool *));
auto func = reinterpret_cast<capture>(QLibrary::resolve(filePathToCaptureLibrary,"captureAudio"));
if(func){
captureAudioThread = std::thread([this,func](){
run = true;
emit this->capturingStatusChanged();
func(&AudioCaptureManager::staticInitCallback,&AudioCaptureManager::staticDataCallback);
run = false;
emit this->capturingStatusChanged();
});
}
return func;
#ifdef Q_OS_WIN
// check if default output is availible
const auto output = rtAudio.getDefaultOutputDevice();
if (output < rtAudio.getDeviceCount()) {
const auto di = rtAudio.getDeviceInfo(output);
if (di.isDefaultOutput) {
if (startCapturingFromInput(output)) {
currentCaptureDevice = getIndexForDeviceName(di.name.c_str());
emit currentCaptureDeviceChanged();
return true;
}
}
}
#endif
// check if default input is availible
const auto input = rtAudio.getDefaultInputDevice();
if (input >= rtAudio.getDeviceCount()) {
return false;
}
const auto di = rtAudio.getDeviceInfo(input);
if (!di.isDefaultInput) {
return false;
}
if (startCapturingFromInput(input)) {
currentCaptureDevice = getIndexForDeviceName(di.name.c_str());
emit currentCaptureDeviceChanged();
return true;
}
return false;
}
void AudioCaptureManager::stopCapturing() {
try {
if (rtAudio.isStreamRunning()) {
rtAudio.abortStream();
}
if (rtAudio.isStreamOpen()) {
rtAudio.closeStream();
}
} catch (const RtAudioError &e) {
ErrorNotifier::showError("Error while stopping audio stream: " + QString(e.what()));
}
run = false;
emit capturingStatusChanged();
}
void AudioCaptureManager::stopCapturingAndWait() {
try {
if (rtAudio.isStreamOpen()) {
rtAudio.closeStream();
std::this_thread::yield();
while (rtAudio.isStreamRunning()) {
std::this_thread::sleep_for(std::chrono::microseconds(500));
}
}
run = false;
} catch (const RtAudioError &e) {
ErrorNotifier::showError("Error while stopping audio stream: " + QString(e.what()));
}
emit capturingStatusChanged();
}
bool AudioCaptureManager::isCapturing() const {
return run || rtAudio.isStreamRunning();
}
void AudioCaptureManager::updateCaptureDeviceList() {
QString name;
if (currentCaptureDevice >= 0 && currentCaptureDevice < captureDeviceNames.ssize()) {
name = captureDeviceNames[currentCaptureDevice];
}
captureDeviceNames.clear();
for (unsigned i = 0; i < rtAudio.getDeviceCount(); ++i) {
if (auto di = rtAudio.getDeviceInfo(i); di.inputChannels > 0 AUDIO_IF_WIN(|| di.outputChannels > 0)) {
captureDeviceNames.emplace_back(QString::fromStdString(di.name.c_str()));
}
}
auto newIndex = getIndexForDeviceName(name);
if (newIndex != currentCaptureDevice) {
currentCaptureDevice = newIndex;
emit currentCaptureDeviceChanged();
}
}
void AudioCaptureManager::setCurrentCaptureDevice(int index) {
if (index != currentCaptureDevice) {
if (index < 0 || index >= captureDeviceNames.ssize()) {
stopCapturing();
currentCaptureDevice = -1;
emit currentCaptureDeviceChanged();
return;
}
stopCapturingAndWait();
currentCaptureDevice = index;
if (!startCapturingFromDevice(captureDeviceNames[index])) {
ErrorNotifier::showError("Error while starting stream " + captureDeviceNames[index]);
}
emit currentCaptureDeviceChanged();
}
}
const EventSeries *AudioCaptureManager::requestTempoAnalysis(Aubio::OnsetDetectionFunction f) {
......
#ifndef AUDIOCAPTUREMANAGER_H
#define AUDIOCAPTUREMANAGER_H
#include "audioeventdata.h"
#include "modelvector.h"
#include "sample.h"
#include "aubio/onsetanalysis.h"
#include "aubio/tempoanalysis.h"
#include "audio_fft.h"
#include "audioeventdata.h"
#include "sample.h"
#include <RtAudio.h>
#include <map>
#include <thread>
......@@ -30,11 +32,16 @@ class AudioCaptureManager : public QObject
{
Q_OBJECT
Q_PROPERTY(bool capturing READ isCapturing NOTIFY capturingStatusChanged)
Q_PROPERTY(int currentCaptureDevice READ getCurrentCaptureDevice WRITE setCurrentCaptureDevice NOTIFY currentCaptureDeviceChanged)
Q_PROPERTY(QAbstractItemModel *captureDeviceNames READ getCaptureDeviceNamesModel CONSTANT)
Sample<float,4096> sample;
std::array<float,2048> fftoutput;
std::thread captureAudioThread;
std::array<float, 2048> fftoutput;
std::atomic_bool run;
int currentCaptureDevice = -1;
RtAudio rtAudio;
AudioFFT audiofft;
ModelVector<QString> captureDeviceNames;
int channels = -1;
int samplesPerSecond = -1;
int samplesPerFrame = -1;
......@@ -49,22 +56,55 @@ class AudioCaptureManager : public QObject
private:
AudioCaptureManager();
~AudioCaptureManager(){
if(captureAudioThread.joinable()){
run.store(false);
captureAudioThread.join();
}
}
private:
static void staticInitCallback(int channels, int samplesPerSecond) { get().initCallback(channels, samplesPerSecond); }
static void staticDataCallback(float* data, unsigned int frames, bool*done){get().dataCallback(data,frames,done);}
static int rtAudioCallback(void *outputBuffer, void *inputBuffer, unsigned int nFrames, double streamTime, RtAudioStreamStatus status, void *userData);
void initCallback(int channels, int samplesPerSecond);
void dataCallback(float* data, unsigned int frames, bool*done);
void dataCallback(float *data, unsigned int frames, bool *done);
/**
* @brief startCapturingFromInput starts the captuing from an input device
* @param inputIndex The index of the input device from rtAudio.getDeviceInfo(...)
* @return true if the starting of the capturing was successful, false otherwise
*/
bool startCapturingFromInput(unsigned inputIndex);
/**
* @brief getIndexForDeviceName returns the index in the captureDeviceNames for the device with the given name
* @param name the device name
* @return the index of the device in the captureDeviceNames, or -1, if there is no device with the given name
*/
template <typename String>
int getIndexForDeviceName(const String &name);
public:
bool startCapturing(QString filePathToCaptureLibrary);
void stopCapturing(){run=false;}
void stopCapturingAndWait(){run=false;if(captureAudioThread.joinable())captureAudioThread.join();}
bool isCapturing(){return run;}
/**
* @brief startCapturingFromDevice starts the capturing with a capture device with the given name. For the names, see getCaptureDeviceNames()
* @param name the name of the capture device
* @return true, if the capturing stats successfully, false otherwise
*/
Q_INVOKABLE bool startCapturingFromDevice(const QString &name);
/**
* @brief startCapturingFromDefaultInput starts the capturing from the default input device. On windows from the default output.
* @return true, if the capturing stats successfully, false otherwise
*/
bool startCapturingFromDefaultInput();
void stopCapturing();
void stopCapturingAndWait();
bool isCapturing() const;
/**
* @brief updateCaptureDeviceList updates the list of devices from which capturing can be started. See getCaptureDeviceNames()
*/
Q_INVOKABLE void updateCaptureDeviceList();
int getCurrentCaptureDevice() const { return currentCaptureDevice; }
void setCurrentCaptureDevice(int index);
QAbstractItemModel *getCaptureDeviceNamesModel() { return &captureDeviceNames; }
/**
* @brief getCaptureDeviceNames returns the name of all devices from which capturing can be started. The list can be updated with updateCaptureDeviceList()
* @return a name list of all devices from which capturing can be started
*/
const std::vector<QString> &getCaptureDeviceNames() const { return captureDeviceNames.getVector(); }
const std::array<float, 2048> &getFFTOutput() { return fftoutput; }
/**
* @brief requestTempoAnalysis requests the data series from a tempo analysis that uses a spezific onset detection function
......@@ -87,8 +127,21 @@ public:
static AudioCaptureManager & get(){static AudioCaptureManager m;return m;}
signals:
void capturingStatusChanged();
void currentCaptureDeviceChanged();
};
template <typename String>
int AudioCaptureManager::getIndexForDeviceName(const String &name) {
int index = 0;
for (const auto &n : captureDeviceNames) {
if (n == name) {
return index;
}
++index;
}
return -1;
}
} // namespace Audio
#endif // AUDIOCAPTUREMANAGER_H
*/*
\ No newline at end of file
#!/bin/bash
# see https://github.com/thestk/rtaudio/blob/master/install.txt
# add and update git submodule
git submodule init
git submodule update
# cd into git repo
GIT_DIR=rtaudio
cd $GIT_DIR
# build
if [[ "$OSTYPE" == "msys" ]] || ! [[ -z "$GITLAB_CI" ]]; then
# we are on windows or on the gitlab ci
./autogen.sh --no-configure
./configure --with-wasapi --host=mingw32
FILES_TO_COPY="librtaudio.a"
elif [[ "$OSTYPE" == "darwin"* ]]; then
# macOS
./autogen.sh --with-core
FILES_TO_COPY="librtaudio.dylib librtaudio.6.dylib"
else
#linux
./autogen.sh --with-alsa
FILES_TO_COPY="librtaudio.so librtaudio.6.so"
fi
make
cd ..
#copy headers and lib
mkdir -p lib
cd $GIT_DIR/.libs/
cp $FILES_TO_COPY ../../lib
cd ../../
mkdir -p include
cp $GIT_DIR/RtAudio.h include
Subproject commit 57c2c9d7598a783a5167422cd744f4d3797141bb
#ifndef CAPTURE_WINDOWS_SOUNDOUTPUT_GLOBAL_H
#define CAPTURE_WINDOWS_SOUNDOUTPUT_GLOBAL_H
#include <QtCore/qglobal.h>
#if defined(CAPTURE_WINDOWS_SOUNDOUTPUT_LIBRARY)
# define CAPTURE_WINDOWS_SOUNDOUTPUTSHARED_EXPORT Q_DECL_EXPORT
#else
# define CAPTURE_WINDOWS_SOUNDOUTPUTSHARED_EXPORT Q_DECL_IMPORT
#endif
extern "C" CAPTURE_WINDOWS_SOUNDOUTPUTSHARED_EXPORT int captureAudio(void(*init)(int channels),void(*callback)(float * data, unsigned int numFramesAvailable, bool * done));
#endif // CAPTURE_WINDOWS_SOUNDOUTPUT_GLOBAL_H
......@@ -20,4 +20,9 @@ cd aubio
./build_aubio.sh
cd ..
echo $'\n\nBuild RtAudio'
cd RtAudio
./build_rtAudio.sh
cd ..
echo "Lib installation complete"
......@@ -259,11 +259,6 @@ int main(int argc, char *argv[]) {
Settings::connect(&settings,&Settings::driverFilePathChanged,[&](){
Driver::loadAndStartDriver(settings.getDriverFilePath());
});
Settings::connect(&settings,&Settings::audioCaptureFilePathChanged,[&](){
if(!Audio::AudioCaptureManager::get().startCapturing(settings.getAudioCaptureFilePath())){
ErrorNotifier::get()->newError(QStringLiteral("Failed to load Audio Capture Library"));
}
});
Settings::connect(&settings,&Settings::updatePauseInMsChanged,[&](){
if(Driver::getCurrentDriver()){
Driver::getCurrentDriver()->setWaitTime(std::chrono::milliseconds(settings.getUpdatePauseInMs()));
......@@ -296,6 +291,7 @@ int main(int argc, char *argv[]) {
engine.rootContext()->setContextProperty(QStringLiteral("ledConsumer"),&Modules::LedConsumer::allLedConsumer);
QQmlEngine::setObjectOwnership(&Driver::dmxValueModel,QQmlEngine::CppOwnership);
engine.rootContext()->setContextProperty(QStringLiteral("dmxOutputValues"),&Driver::dmxValueModel);
engine.rootContext()->setContextProperty(QStringLiteral("AudioManager"), &Audio::AudioCaptureManager::get());
engine.load(QUrl(QStringLiteral("qrc:/qml/main.qml")));
......@@ -329,7 +325,10 @@ int main(int argc, char *argv[]) {
driver.start();
#endif
qDebug() << "start capturing : " << Audio::AudioCaptureManager::get().startCapturing(settings.getAudioCaptureFilePath());
auto &audioManager = Audio::AudioCaptureManager::get();
if (!audioManager.startCapturingFromDefaultInput()) {
ErrorNotifier::showError("Audio capturing not possible");
}
Modules::ModuleManager::singletone()->controller().start();
//ControlPanel::getLastCreated()->addDimmerGroupControl();
......
......@@ -99,6 +99,8 @@ public:
}else{
if constexpr(std::is_base_of_v<QObject, Type>)
return model[index.row()].property("name");
else if constexpr (std::is_same_v<QString, Type>)
return model[index.row()];
else
return "No Display Property available! See ModelVector";
}
......@@ -162,6 +164,14 @@ public:
* @brief endPushBack Call this function if you have started push_backing with beginPushBack and are finisched
*/
void endPushBack(){endInsertRows();}
void beginInsert(int firstIndex, int length) { beginInsertRows(QModelIndex(), firstIndex, firstIndex + length - 1); }
void endInsert() { endInsertRows(); }
auto insert(int index, const Type &value) {
beginInsert(index, 1);
auto res = model.insert(cbegin() + index, value);
endInsert();
return res;
}
typename std::vector<Type>::const_iterator cbegin()const{
return model.cbegin();
}
......
......@@ -8,7 +8,7 @@ Pane{
GridLayout{
anchors.left: parent.left
anchors.right: parent.right
rowSpacing: 20
rowSpacing: 16
columns: 2
Label{
text: "Settings file path:"
......@@ -103,14 +103,14 @@ Pane{
Label{
Layout.fillWidth: true
text: "AudioCaptureLib:"
text: "Audio Capture Device:"
}
TextFieldFileChooser{
ComboBox{
model: AudioManager.captureDeviceNames
Layout.fillWidth: true
folder: false
path: Settings.audioCaptureFilePath
onPathChanged: {Settings.audioCaptureFilePath = path;path = Settings.audioCaptureFilePath;}
fileChooser: fileDialog
onActivated: AudioManager.currentCaptureDevice = index
currentIndex: AudioManager.currentCaptureDevice
onDownChanged: if(down)AudioManager.updateCaptureDeviceList()
}
}
FileDialog{
......
......@@ -20,7 +20,6 @@ class Settings : public QObject
Q_PROPERTY(QString compilerFlags READ getCompilerFlags WRITE setCompilerFlags NOTIFY compilerFlagsChanged)
Q_PROPERTY(QString compilerLibraryFlags READ getCompilerLibraryFlags WRITE setCompilerLibraryFlags NOTIFY compilerLibraryFlagsChanged)
Q_PROPERTY(QString includePath READ getIncludePath WRITE setIncludePath NOTIFY includePathChanged)
Q_PROPERTY(QString audioCaptureFilePath READ getAudioCaptureFilePath WRITE setAudioCaptureFilePath NOTIFY audioCaptureFilePathChanged)
Q_PROPERTY(unsigned int updatePauseInMs READ getUpdatePauseInMs WRITE setUpdatePauseInMs NOTIFY updatePauseInMsChanged)
static inline QFileInfo localSettingsFile;
public:
......@@ -58,17 +57,6 @@ public:
}
QString getDriverFilePath()const{return value(QStringLiteral("driverFilePath")).toString();}
void setAudioCaptureFilePath(const QString& file){
if(file == getAudioCaptureFilePath()){
return;
}
if(QFile::exists(file)){
setValue(QStringLiteral("audioCaptureFilePath"),file);
emit audioCaptureFilePathChanged();
}
}
QString getAudioCaptureFilePath()const{return value(QStringLiteral("audioCaptureFilePath")).toString();}
void setUpdatePauseInMs(unsigned int pause){setValue(QStringLiteral("updatePauseInMs"),pause);emit updatePauseInMsChanged();}
unsigned int getUpdatePauseInMs()const{return value(QStringLiteral("updatePauseInMs")).toUInt();}
void setModuleDirPath( const QString &_moduleDirPath){
......@@ -136,7 +124,6 @@ signals:
void compilerFlagsChanged();
void compilerLibraryFlagsChanged();
void includePathChanged();
void audioCaptureFilePathChanged();
public slots:
};
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment