Commit dd2beb97 authored by Leander Schulten's avatar Leander Schulten
Browse files

AudioManager: You can now capture audio inputs (mikes and loopback inputs)....

AudioManager: You can now capture audio inputs (mikes and loopback inputs). You can select the capture device in the settings view.
Modelmanager: The DisplayRole does return the right value if the Modelvector consists of QStrings
parent 316393b9
Pipeline #190668 failed with stage
in 4 minutes and 45 seconds
......@@ -8,7 +8,14 @@
namespace Audio {
AudioCaptureManager::AudioCaptureManager():audiofft(sample.size())
{
rtAudio.showWarnings();
updateCaptureDeviceList();
}
int AudioCaptureManager::rtAudioCallback(void * /*outputBuffer*/, void *inputBuffer, unsigned int nFrames, double /*streamTime*/, RtAudioStreamStatus /*status*/, void * /*userData*/) {
bool end = false;
get().dataCallback(static_cast<float *>(inputBuffer), nFrames, &end);
return end;
}
void AudioCaptureManager::initCallback(int channels, int samplesPerSecond) {
......@@ -20,8 +27,11 @@ void AudioCaptureManager::initCallback(int channels, int samplesPerSecond) {
}
}
void AudioCaptureManager::dataCallback(float* data, unsigned int frames, bool*done){
void AudioCaptureManager::dataCallback(float* data, unsigned int frames, bool*done){
*done = !run;
if (!run) {
return;
}
if(!data)
return;
if(channels<0)
......@@ -41,7 +51,7 @@ void AudioCaptureManager::dataCallback(float* data, unsigned int frames, bool*do
audiofft.analyse(sample.data(),1,fftoutput.data());
{
// feed the *analysis classes with new samples
unsigned restFrames = frames;
int restFrames = static_cast<int>(frames);
if (restFrames % samplesPerFrame != 0) {
static bool once = false;
if (!once) {
......@@ -75,38 +85,208 @@ void AudioCaptureManager::dataCallback(float* data, unsigned int frames, bool*do
}
}
// db scale
std::transform(fftoutput.begin(),fftoutput.end(),fftoutput.begin(),[](auto i){return 10*std::log10(1+i);});
std::transform(fftoutput.begin(), fftoutput.end(), fftoutput.begin(), [](auto i) { return 10 * std::log10(1 + i); });
if(GUI::Graph::getLast())
GUI::Graph::getLast()->showData(fftoutput.data(),fftoutput.size());
if(GUI::Colorplot::getLast()){
if (GUI::Graph::getLast() && run) {
GUI::Graph::getLast()->showData(fftoutput.data(), fftoutput.size());
}
if (GUI::Colorplot::getLast() && run) {
GUI::Colorplot::getLast()->startBlock();
for (int i = 0; i < 512; ++i) {
GUI::Colorplot::getLast()->pushDataToBlock(fftoutput.at(i));
}
GUI::Colorplot::getLast()->endBlock();
}
if(GUI::Oscillogram::getLast())
GUI::Oscillogram::getLast()->showData(sample.data(),sample.size());
if (GUI::Oscillogram::getLast() && run) {
GUI::Oscillogram::getLast()->showData(sample.data(), sample.size());
}
}
void AudioCaptureManager::startCapturingFromCaptureLibrary(AudioCaptureManager::CaptureLibEntry func) {
captureAudioThread = std::thread([this, func]() {
run = true;
emit this->capturingStatusChanged();
func(&AudioCaptureManager::staticInitCallback, &AudioCaptureManager::staticDataCallback);
run = false;
emit this->capturingStatusChanged();
});
}
bool AudioCaptureManager::startCapturing(QString filePathToCaptureLibrary){
stopCapturingAndWait();
typedef int (*capture)(void (*)(int, int), void (*)(float *, unsigned int, bool *));
auto func = reinterpret_cast<capture>(QLibrary::resolve(filePathToCaptureLibrary,"captureAudio"));
if(func){
captureAudioThread = std::thread([this,func](){
run = true;
emit this->capturingStatusChanged();
func(&AudioCaptureManager::staticInitCallback,&AudioCaptureManager::staticDataCallback);
run = false;
emit this->capturingStatusChanged();
});
bool AudioCaptureManager::startCapturingFromInput(unsigned input) {
if (input >= rtAudio.getDeviceCount()) {
return false;
}
const auto di = rtAudio.getDeviceInfo(input);
if (di.inputChannels == 0) {
return false;
}
// it was a device open before now
int sampleRate = this->samplesPerSecond;
if (this->samplesPerSecond > 0) {
bool supported = std::any_of(di.sampleRates.cbegin(), di.sampleRates.cend(), [this](int s) { return s == this->samplesPerSecond; });
if (!supported) {
return false;
}
} else {
sampleRate = static_cast<int>(di.preferredSampleRate);
}
initCallback(static_cast<int>(di.inputChannels), sampleRate);
RtAudio::StreamParameters isp;
isp.deviceId = input;
isp.nChannels = di.inputChannels;
isp.firstChannel = 0;
unsigned samplesPerFrame = static_cast<unsigned>(this->samplesPerFrame);
try {
rtAudio.openStream(nullptr, &isp, RTAUDIO_FLOAT32, static_cast<unsigned>(this->samplesPerSecond), &samplesPerFrame, rtAudioCallback, nullptr, nullptr, nullptr);
if (static_cast<int>(samplesPerFrame) != this->samplesPerFrame) {
rtAudio.closeStream();
return false;
}
rtAudio.startStream();
run = true;
emit this->capturingStatusChanged();
} catch (const RtAudioError &error) {
ErrorNotifier::showError(QString::fromStdString(error.getMessage()));
run = false;
emit capturingStatusChanged();
return false;
}
return true;
}
bool AudioCaptureManager::loadCaptureLibrary(QString name, QString filePathToCaptureLibrary) {
auto func = reinterpret_cast<CaptureLibEntry>(QLibrary::resolve(filePathToCaptureLibrary, "captureAudio"));
if (func) {
// replace if name is already there
captureLibraries.emplace(name, func);
}
return func;
}
bool AudioCaptureManager::startCapturingFromDevice(const QString &name) {
for (const auto &c : captureLibraries) {
if (c.first == name) {
startCapturingFromCaptureLibrary(c.second);
return true;
}
}
for (unsigned i = 0; i < rtAudio.getDeviceCount(); ++i) {
if (auto di = rtAudio.getDeviceInfo(i); di.name.c_str() == name) {
return startCapturingFromInput(i);
}
}
return false;
}
bool AudioCaptureManager::startCapturingFromCaptureLibrary() {
if (captureLibraries.empty()) {
return false;
}
stopCapturingAndWait();
startCapturingFromCaptureLibrary(captureLibraries.begin()->second);
currentCaptureDevice = 0;
emit currentCaptureDeviceChanged();
return true;
}
bool AudioCaptureManager::startCapturingFromDefaultInput() {
stopCapturingAndWait();
// check if default input is availible
const auto input = rtAudio.getDefaultInputDevice();
if (input >= rtAudio.getDeviceCount()) {
return false;
}
const auto di = rtAudio.getDeviceInfo(input);
if (!di.isDefaultInput) {
return false;
}
if (startCapturingFromInput(input)) {
currentCaptureDevice = getIndexForDeviceName(di.name.c_str());
emit currentCaptureDeviceChanged();
return true;
}
return false;
}
void AudioCaptureManager::stopCapturing() {
try {
if (rtAudio.isStreamRunning()) {
rtAudio.abortStream();
}
if (rtAudio.isStreamOpen()) {
rtAudio.closeStream();
}
} catch (const RtAudioError &e) {
ErrorNotifier::showError("Error while stopping audio stream: " + QString(e.what()));
}
run = false;
emit capturingStatusChanged();
}
void AudioCaptureManager::stopCapturingAndWait() {
try {
if (captureAudioThread.joinable()) {
captureAudioThread.join();
} else {
if (rtAudio.isStreamOpen()) {
rtAudio.closeStream();
std::this_thread::yield();
while (rtAudio.isStreamRunning()) {
std::this_thread::sleep_for(std::chrono::microseconds(500));
}
}
}
} catch (const RtAudioError &e) {
ErrorNotifier::showError("Error while stopping audio stream: " + QString(e.what()));
}
run = false;
emit capturingStatusChanged();
}
bool AudioCaptureManager::isCapturing() const {
return run || rtAudio.isStreamRunning();
}
void AudioCaptureManager::updateCaptureDeviceList() {
QString name;
if (currentCaptureDevice > 0 && currentCaptureDevice < captureDeviceNames.ssize()) {
name = captureDeviceNames[currentCaptureDevice];
}
captureDeviceNames.clear();
for (const auto &i : captureLibraries) {
captureDeviceNames.push_back(i.first);
}
for (unsigned i = 0; i < rtAudio.getDeviceCount(); ++i) {
if (auto di = rtAudio.getDeviceInfo(i); di.inputChannels > 0) {
captureDeviceNames.emplace_back(QString::fromStdString(di.name.c_str()));
}
}
auto newIndex = getIndexForDeviceName(name);
if (newIndex != currentCaptureDevice) {
currentCaptureDevice = newIndex;
emit currentCaptureDeviceChanged();
}
}
void AudioCaptureManager::setCurrentCaptureDevice(int index) {
if (index != currentCaptureDevice) {
if (index < 0 || index >= captureDeviceNames.ssize()) {
stopCapturing();
currentCaptureDevice = -1;
emit currentCaptureDeviceChanged();
return;
}
stopCapturingAndWait();
currentCaptureDevice = index;
if (!startCapturingFromDevice(captureDeviceNames[index])) {
ErrorNotifier::showError("Error while starting stream " + captureDeviceNames[index]);
}
emit currentCaptureDeviceChanged();
}
}
const EventSeries *AudioCaptureManager::requestTempoAnalysis(Aubio::OnsetDetectionFunction f) {
if (samplesPerSecond < 0) {
return nullptr;
......
#ifndef AUDIOCAPTUREMANAGER_H
#define AUDIOCAPTUREMANAGER_H
#include "audioeventdata.h"
#include "modelvector.h"
#include "sample.h"
#include "aubio/onsetanalysis.h"
#include "aubio/tempoanalysis.h"
#include "audio_fft.h"
#include "audioeventdata.h"
#include "sample.h"
#include "lib/RtAudio/rtaudio/RtAudio.h"
#include <map>
#include <thread>
......@@ -30,11 +32,22 @@ class AudioCaptureManager : public QObject
{
Q_OBJECT
Q_PROPERTY(bool capturing READ isCapturing NOTIFY capturingStatusChanged)
Q_PROPERTY(int currentCaptureDevice READ getCurrentCaptureDevice WRITE setCurrentCaptureDevice NOTIFY currentCaptureDeviceChanged)
Q_PROPERTY(QAbstractItemModel *captureDeviceNames READ getCaptureDeviceNamesModel CONSTANT)
Sample<float,4096> sample;
std::array<float,2048> fftoutput;
std::thread captureAudioThread;
std::atomic_bool run;
int currentCaptureDevice = -1;
RtAudio rtAudio;
AudioFFT audiofft;
ModelVector<QString> captureDeviceNames;
using CaptureLibEntry = int (*)(void(int, int), void(float *, unsigned int, bool *));
/**
* @brief captureLibraries all libraries loaded with loadCaptureLibrary. The string contains the name and the CaptureLibEntry the entry funciton
*/
std::map<QString, CaptureLibEntry> captureLibraries;
int channels = -1;
int samplesPerSecond = -1;
int samplesPerFrame = -1;
......@@ -56,15 +69,75 @@ private:
}
}
private:
static int rtAudioCallback(void *outputBuffer, void *inputBuffer, unsigned int nFrames, double streamTime, RtAudioStreamStatus status, void *userData);
static void staticInitCallback(int channels, int samplesPerSecond) { get().initCallback(channels, samplesPerSecond); }
static void staticDataCallback(float* data, unsigned int frames, bool*done){get().dataCallback(data,frames,done);}
void initCallback(int channels, int samplesPerSecond);
void dataCallback(float* data, unsigned int frames, bool*done);
void dataCallback(float *data, unsigned int frames, bool *done);
/**
* @brief startCapturingFromCaptureLibrary starts the audio capturing with the given function
* @param func The entry function of the audio capture lib
*/
void startCapturingFromCaptureLibrary(CaptureLibEntry func);
/**
* @brief startCapturingFromInput starts the captuing from an input device
* @param inputIndex The index of the input device from rtAudio.getDeviceInfo(...)
* @return true if the starting of the capturing was successful, false otherwise
*/
bool startCapturingFromInput(unsigned inputIndex);
/**
* @brief getIndexForDeviceName returns the index in the captureDeviceNames for the device with the given name
* @param name the device name
* @return the index of the device in the captureDeviceNames, or -1, if there is no device with the given name
*/
template <typename String>
int getIndexForDeviceName(const String &name);
public:
bool startCapturing(QString filePathToCaptureLibrary);
void stopCapturing(){run=false;}
void stopCapturingAndWait(){run=false;if(captureAudioThread.joinable())captureAudioThread.join();}
bool isCapturing(){return run;}
/**
* @brief loadCaptureLibrary loads the library located at the given path with a given name, with this name you can start capturing. The list can be updated with updateCaptureDeviceList()
* @param name the name that the capture device should have, if there is already a device with the name, replace this
* @param filePathToCaptureLibrary The path to the capture lib
* @return true, if the loading was successfull, false otherwise
*/
bool loadCaptureLibrary(QString name, QString filePathToCaptureLibrary);
/**
* @brief startCapturingFromDevice starts the capturing with a capture device with the given name. For the names, see getCaptureDeviceNames()
* @param name the name of the capture device
* @return true, if the capturing stats successfully, false otherwise
*/
Q_INVOKABLE bool startCapturingFromDevice(const QString &name);
/**
* @brief startCapturingFromCaptureLibrary starts the capturing from one capture library that was loaded with loadCaptureLibrary(...), which one is undefined
* @return true, if the capturing stats successfully, false otherwise
*/
bool startCapturingFromCaptureLibrary();
/**
* @brief startCapturingFromDefaultInput starts the capturing from the default input device
* @return true, if the capturing stats successfully, false otherwise
*/
bool startCapturingFromDefaultInput();
void stopCapturing();
void stopCapturingAndWait();
bool isCapturing() const;
/**
* @brief updateCaptureDeviceList updates the list of devices from which capturing can be started. See getCaptureDeviceNames()
*/
Q_INVOKABLE void updateCaptureDeviceList();
int getCurrentCaptureDevice() const { return currentCaptureDevice; }
void setCurrentCaptureDevice(int index);
QAbstractItemModel *getCaptureDeviceNamesModel() { return &captureDeviceNames; }
/**
* @brief getCaptureDeviceNames returns the name of all devices from which capturing can be started. The list can be updated with updateCaptureDeviceList()
* @return a name list of all devices from which capturing can be started
*/
const std::vector<QString> &getCaptureDeviceNames() const { return captureDeviceNames.getVector(); }
const std::array<float, 2048> &getFFTOutput() { return fftoutput; }
/**
* @brief requestTempoAnalysis requests the data series from a tempo analysis that uses a spezific onset detection function
......@@ -87,8 +160,21 @@ public:
static AudioCaptureManager & get(){static AudioCaptureManager m;return m;}
signals:
void capturingStatusChanged();
void currentCaptureDeviceChanged();
};
template <typename String>
int AudioCaptureManager::getIndexForDeviceName(const String &name) {
int index = 0;
for (const auto &n : captureDeviceNames) {
if (n == name) {
return index;
}
++index;
}
return -1;
}
} // namespace Audio
#endif // AUDIOCAPTUREMANAGER_H
......@@ -259,8 +259,12 @@ int main(int argc, char *argv[]) {
Settings::connect(&settings,&Settings::driverFilePathChanged,[&](){
Driver::loadAndStartDriver(settings.getDriverFilePath());
});
Settings::connect(&settings,&Settings::audioCaptureFilePathChanged,[&](){
if(!Audio::AudioCaptureManager::get().startCapturing(settings.getAudioCaptureFilePath())){
Settings::connect(&settings, &Settings::audioCaptureFilePathChanged, [&]() {
if (Audio::AudioCaptureManager::get().loadCaptureLibrary("Windows Output", settings.getAudioCaptureFilePath())) {
if (!Audio::AudioCaptureManager::get().startCapturingFromDevice("Windows Output")) {
ErrorNotifier::get()->newError(QStringLiteral("Failed to start capturing with Audio Capture Library"));
}
} else {
ErrorNotifier::get()->newError(QStringLiteral("Failed to load Audio Capture Library"));
}
});
......@@ -296,6 +300,7 @@ int main(int argc, char *argv[]) {
engine.rootContext()->setContextProperty(QStringLiteral("ledConsumer"),&Modules::LedConsumer::allLedConsumer);
QQmlEngine::setObjectOwnership(&Driver::dmxValueModel,QQmlEngine::CppOwnership);
engine.rootContext()->setContextProperty(QStringLiteral("dmxOutputValues"),&Driver::dmxValueModel);
engine.rootContext()->setContextProperty(QStringLiteral("AudioManager"), &Audio::AudioCaptureManager::get());
engine.load(QUrl(QStringLiteral("qrc:/qml/main.qml")));
......@@ -329,7 +334,13 @@ int main(int argc, char *argv[]) {
driver.start();
#endif
qDebug() << "start capturing : " << Audio::AudioCaptureManager::get().startCapturing(settings.getAudioCaptureFilePath());
auto &audioManager = Audio::AudioCaptureManager::get();
audioManager.loadCaptureLibrary("Windows Output", settings.getAudioCaptureFilePath());
if (!audioManager.startCapturingFromCaptureLibrary()) {
if (!audioManager.startCapturingFromDefaultInput()) {
ErrorNotifier::showError("Audio capturing not possible");
}
}
Modules::ModuleManager::singletone()->controller().start();
//ControlPanel::getLastCreated()->addDimmerGroupControl();
......
......@@ -99,6 +99,8 @@ public:
}else{
if constexpr(std::is_base_of_v<QObject, Type>)
return model[index.row()].property("name");
else if constexpr (std::is_same_v<QString, Type>)
return model[index.row()];
else
return "No Display Property available! See ModelVector";
}
......
......@@ -8,7 +8,7 @@ Pane{
GridLayout{
anchors.left: parent.left
anchors.right: parent.right
rowSpacing: 20
rowSpacing: 16
columns: 2
Label{
text: "Settings file path:"
......@@ -112,6 +112,17 @@ Pane{
onPathChanged: {Settings.audioCaptureFilePath = path;path = Settings.audioCaptureFilePath;}
fileChooser: fileDialog
}
Label{
Layout.fillWidth: true
text: "Audio Capture Device:"
}
ComboBox{
model: AudioManager.captureDeviceNames
Layout.fillWidth: true
onActivated: AudioManager.currentCaptureDevice = index
currentIndex: AudioManager.currentCaptureDevice
onDownChanged: if(down)AudioManager.updateCaptureDeviceList()
}
}
FileDialog{
property var callback;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment