Commit a16b2bff authored by Leander Schulten's avatar Leander Schulten
Browse files

Use new Windows Capture Lib from...

Use new Windows Capture Lib from leander.schulten/Capture_Windows_SoundOutput@a82eb3c3494de53ea03b1b86dde0ba04b256c92e
The capture API now returns the sample rate.
parent c713ab2d
Pipeline #190110 passed with stage
in 4 minutes and 24 seconds
......@@ -11,10 +11,13 @@ AudioCaptureManager::AudioCaptureManager():audiofft(sample.size())
}
void AudioCaptureManager::initCallback(int channels){
void AudioCaptureManager::initCallback(int channels, int samplesPerSecond) {
this->channels = channels;
if(GUI::Colorplot::getLast())
this->samplesPerSecond = samplesPerSecond;
this->samplesPerFrame = samplesPerSecond / 100;
if (GUI::Colorplot::getLast()) {
GUI::Colorplot::getLast()->setBlockSize(512);
}
}
void AudioCaptureManager::dataCallback(float* data, unsigned int frames, bool*done){
......@@ -90,7 +93,7 @@ void AudioCaptureManager::dataCallback(float* data, unsigned int frames, bool*do
bool AudioCaptureManager::startCapturing(QString filePathToCaptureLibrary){
stopCapturingAndWait();
typedef int (*capture)(void(*)(int),void(*)(float*,unsigned int, bool*)) ;
typedef int (*capture)(void (*)(int, int), void (*)(float *, unsigned int, bool *));
auto func = reinterpret_cast<capture>(QLibrary::resolve(filePathToCaptureLibrary,"captureAudio"));
if(func){
captureAudioThread = std::thread([this,func](){
......@@ -104,23 +107,29 @@ bool AudioCaptureManager::startCapturing(QString filePathToCaptureLibrary){
return func;
}
const EventSeries &AudioCaptureManager::requestTempoAnalysis(Aubio::OnsetDetectionFunction f) {
const EventSeries *AudioCaptureManager::requestTempoAnalysis(Aubio::OnsetDetectionFunction f) {
if (samplesPerSecond < 0) {
return nullptr;
}
// check if already there
if (const auto i = tempoAnalyzes.find(f); i != tempoAnalyzes.end()) {
return i->second.second;
return &i->second.second;
}
// We need this ugly syntax, because we can not copy or move a EventRange object. See https://stackoverflow.com/a/25767752/10162645
return tempoAnalyzes.emplace(std::piecewise_construct, std::make_tuple(f), std::forward_as_tuple(std::piecewise_construct, std::forward_as_tuple(f, 1024, 441, 44100), std::forward_as_tuple(44100))).first->second.second;
return &tempoAnalyzes.emplace(std::piecewise_construct, std::make_tuple(f), std::forward_as_tuple(std::piecewise_construct, std::forward_as_tuple(f, 1024, samplesPerFrame, samplesPerSecond), std::forward_as_tuple(samplesPerSecond))).first->second.second;
// short: tempoAnalyzes.emplace(f, {Aubio::TempoAnalysis(f, 1024, 441, 44100), OnsetDataSeries(44100)});
}
const OnsetDataSeries &AudioCaptureManager::requestOnsetAnalysis(Aubio::OnsetDetectionFunction f) {
const OnsetDataSeries *AudioCaptureManager::requestOnsetAnalysis(Aubio::OnsetDetectionFunction f) {
if (samplesPerSecond < 0) {
return nullptr;
}
// check if already there
if (const auto i = onsetAnalyzes.find(f); i != onsetAnalyzes.end()) {
return i->second.second;
return &i->second.second;
}
// We need this ugly syntax, because we can not copy or move a EventRange object. See https://stackoverflow.com/a/25767752/10162645
return onsetAnalyzes.emplace(std::piecewise_construct, std::make_tuple(f), std::forward_as_tuple(std::piecewise_construct, std::forward_as_tuple(f, 1024, 441, 44100), std::forward_as_tuple(44100))).first->second.second;
return &onsetAnalyzes.emplace(std::piecewise_construct, std::make_tuple(f), std::forward_as_tuple(std::piecewise_construct, std::forward_as_tuple(f, 1024, samplesPerFrame, samplesPerSecond), std::forward_as_tuple(samplesPerSecond))).first->second.second;
// short: onsetAnalyzes.emplace(f, {Aubio::OnsetAnalysis(f, 1024, 441, 44100), OnsetDataSeries(44100)});
}
......
......@@ -36,6 +36,8 @@ class AudioCaptureManager : public QObject
std::atomic_bool run;
AudioFFT audiofft;
int channels = -1;
int samplesPerSecond = -1;
int samplesPerFrame = -1;
/**
* @brief tempoAnalyzes all tempo analyzes that were request by requestTempoAnalysis
*/
......@@ -54,9 +56,9 @@ private:
}
}
private:
static void staticInitCallback(int channels){get().initCallback(channels);}
static void staticInitCallback(int channels, int samplesPerSecond) { get().initCallback(channels, samplesPerSecond); }
static void staticDataCallback(float* data, unsigned int frames, bool*done){get().dataCallback(data,frames,done);}
void initCallback(int channels);
void initCallback(int channels, int samplesPerSecond);
void dataCallback(float* data, unsigned int frames, bool*done);
public:
bool startCapturing(QString filePathToCaptureLibrary);
......@@ -70,14 +72,14 @@ public:
* @param f the onset function that should be used
* @return the Event Series produced by the analysis object using the specific onset detection function
*/
const EventSeries &requestTempoAnalysis(Aubio::OnsetDetectionFunction f);
const EventSeries *requestTempoAnalysis(Aubio::OnsetDetectionFunction f);
/**
* @brief requestOnsetAnalysis requests the data series from a onset analysis that uses a spezific onset detection function
* You can call the function with the same parameters multiple times, the result will be the same
* @param f the onset function that should be used
* @return the Onset Data Series produced by the analysis object using the specific onset detection function
*/
const OnsetDataSeries &requestOnsetAnalysis(Aubio::OnsetDetectionFunction f);
const OnsetDataSeries *requestOnsetAnalysis(Aubio::OnsetDetectionFunction f);
public:
AudioCaptureManager(AudioCaptureManager const&) = delete;
......
......@@ -8,7 +8,9 @@ using namespace Audio::Aubio;
namespace GUI {
float AudioEventDataView::getX(const Audio::EventSeries &e, int sample) { return static_cast<float>(width()) - (static_cast<float>(e.getNewestSample()) - sample) / (e.getSamplesPerSecond() / pixelPerSecond); }
float AudioEventDataView::getX(const Audio::EventSeries *e, int sample) {
return static_cast<float>(width()) - (static_cast<float>(e->getNewestSample()) - sample) / (e->getSamplesPerSecond() / pixelPerSecond);
}
AudioEventDataView::AudioEventDataView(QQuickItem *parent) : QQuickItem(parent) {
setFlag(ItemHasContents);
......@@ -23,18 +25,26 @@ AudioEventDataView::AudioEventDataView(QQuickItem *parent) : QQuickItem(parent)
}
void AudioEventDataView::enableDetectionFor(OnsetDetectionFunction f, AudioEventDataView::DataType type, bool enabled) {
colors[to_integral(f)][type].first = enabled;
if (enabled) {
if (type == BeatEvent) {
if (beatData.find(f) == beatData.end()) {
beatData.emplace(f, Audio::AudioCaptureManager::get().requestTempoAnalysis(f));
auto p = Audio::AudioCaptureManager::get().requestTempoAnalysis(f);
if (!p) {
return;
}
beatData.emplace(f, p);
}
} else {
if (onsetData.find(f) == onsetData.end()) {
onsetData.emplace(f, Audio::AudioCaptureManager::get().requestOnsetAnalysis(f));
auto p = Audio::AudioCaptureManager::get().requestOnsetAnalysis(f);
if (!p) {
return;
}
onsetData.emplace(f, p);
}
}
}
colors[to_integral(f)][type].first = enabled;
}
bool AudioEventDataView::isDetectionEnabledFor(OnsetDetectionFunction onsetDetectionFunction, AudioEventDataView::DataType type) { return colors[to_integral(onsetDetectionFunction)][type].first; }
......@@ -90,7 +100,7 @@ QSGNode *AudioEventDataView::updatePaintNode(QSGNode *node, QQuickItem::UpdatePa
return gNode->geometry();
};
const auto fillEvents = [this](auto geometry, const auto &data) {
auto events = data.getEvents();
auto events = data->getEvents();
geometry->allocate(events->size() * 2);
auto vertexData = geometry->vertexDataAsPoint2D();
for (const auto &e : *events) {
......@@ -107,24 +117,24 @@ QSGNode *AudioEventDataView::updatePaintNode(QSGNode *node, QQuickItem::UpdatePa
for (auto &[f, data] : onsetData) {
if (isDetectionEnabledFor(f, OnsetValue)) {
QSGGeometry *geometry = getGeometry(getColor(f, OnsetValue));
const auto lockedData = data.getOnsetData();
const auto lockedData = data->getOnsetData();
geometry->allocate(lockedData->size());
auto vertexData = geometry->vertexDataAsPoint2D();
for (const auto &o : *lockedData) {
vertexData->x = getX(data, o.sample);
vertexData->y = height() - ((o.onsetValue / data.getMaxOnsetValue()) * (height() - 50));
vertexData->y = height() - ((o.onsetValue / data->getMaxOnsetValue()) * (height() - 50));
++vertexData;
}
geometry->setDrawingMode(QSGGeometry::DrawLineStrip);
}
if (isDetectionEnabledFor(f, ThresholdValue)) {
QSGGeometry *geometry = getGeometry(getColor(f, ThresholdValue));
const auto lockedData = data.getOnsetData();
const auto lockedData = data->getOnsetData();
geometry->allocate(lockedData->size());
auto vertexData = geometry->vertexDataAsPoint2D();
for (const auto &o : *lockedData) {
vertexData->x = getX(data, o.currentThreshold);
vertexData->y = height() - ((o.onsetValue / data.getMaxThreshold()) * (height() - 50));
vertexData->y = height() - ((o.onsetValue / data->getMaxThreshold()) * (height() - 50));
++vertexData;
}
geometry->setDrawingMode(QSGGeometry::DrawLineStrip);
......
......@@ -10,13 +10,13 @@ namespace GUI {
class AudioEventDataView : public QQuickItem {
Q_OBJECT
std::map<enum Audio::Aubio::OnsetDetectionFunction, const Audio::OnsetDataSeries &> onsetData;
std::map<enum Audio::Aubio::OnsetDetectionFunction, const Audio::EventSeries &> beatData;
std::map<enum Audio::Aubio::OnsetDetectionFunction, const Audio::OnsetDataSeries *> onsetData;
std::map<enum Audio::Aubio::OnsetDetectionFunction, const Audio::EventSeries *> beatData;
Q_PROPERTY(bool visibleForUser MEMBER visibleForUser NOTIFY visibleForUserChanged)
Q_PROPERTY(int pixelPerSecond MEMBER pixelPerSecond NOTIFY pixelPerSecondChanged)
int pixelPerSecond = 100;
bool visibleForUser = true;
float getX(const Audio::EventSeries &e, int sample);
float getX(const Audio::EventSeries *e, int sample);
public:
enum DataType { BeatEvent, OnsetEvent, OnsetValue, ThresholdValue, Last = ThresholdValue };
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment