Commit c161c861 authored by Leander Schulten's avatar Leander Schulten

Audio + GUI: Add support for tempo confidence. Don't show the threshold values...

Audio + GUI: Add support for tempo confidence. Don't show the threshold values (are const). Fix Bugs with number of elements in enum. Show all data in own row and not overlapped.
parent d23a767c
Pipeline #271509 failed with stage
in 6 minutes and 8 seconds
......@@ -21,6 +21,8 @@ bool TempoAnalysis::processNewSamples(float *newSamples) {
unsigned TempoAnalysis::getLastBeat() { return aubio_tempo_get_last(tempo.get()); }
float TempoAnalysis::getCurrentTempoConfidence() { return aubio_tempo_get_confidence(tempo.get()); }
float TempoAnalysis::getCurrentBPM() { return aubio_tempo_get_bpm(tempo.get()); }
unsigned TempoAnalysis::getLastTatum() { return static_cast<unsigned>(aubio_tempo_get_last_tatum(tempo.get())); }
......
......@@ -37,6 +37,12 @@ public:
*/
unsigned getLastBeat();
/**
* @brief getCurrentTempoConfidence returns the current confidence about the tempo
* @return the confidence from 0.0 (no) to 1.0 (high)
*/
float getCurrentTempoConfidence();
/**
* @brief getCurrentBPM return the current determined beats per second
* @return the currently determined beats per minute
......
......@@ -86,11 +86,12 @@ void AudioCaptureManager::dataCallback(float* data, unsigned int frames, bool*do
pair.second.increaseNewestSampleBy(samplesPerFrame);
}
for (auto &[onsetFunction, pair] : tempoAnalyzes) {
bool wasBeat = pair.first.processNewSamples(sample.data() + sample.size() - restFrames);
bool wasBeat = pair.tempoAnalysis.processNewSamples(sample.data() + sample.size() - restFrames);
if (wasBeat) {
pair.second.addEvent(pair.first.getLastBeat());
pair.events.addEvent(pair.tempoAnalysis.getLastBeat());
}
pair.second.increaseNewestSampleBy(samplesPerFrame);
pair.events.increaseNewestSampleBy(samplesPerFrame);
pair.currentConfidence = pair.tempoAnalysis.getCurrentTempoConfidence();
}
restFrames -= samplesPerFrame;
}
......@@ -278,16 +279,16 @@ void AudioCaptureManager::setCurrentCaptureDevice(int index) {
}
}
const EventSeries *AudioCaptureManager::requestTempoAnalysis(Aubio::OnsetDetectionFunction f) {
TempoAnalysisData AudioCaptureManager::requestTempoAnalysis(Aubio::OnsetDetectionFunction f) {
if (samplesPerSecond < 0) {
return nullptr;
return {nullptr, nullptr};
}
// check if already there
if (const auto i = tempoAnalyzes.find(f); i != tempoAnalyzes.end()) {
return &i->second.second;
return i->second;
}
// We need this ugly syntax, because we can not copy or move a EventRange object. See https://stackoverflow.com/a/25767752/10162645
return &tempoAnalyzes.emplace(std::piecewise_construct, std::make_tuple(f), std::forward_as_tuple(std::piecewise_construct, std::forward_as_tuple(f, 1024, samplesPerFrame, samplesPerSecond), std::forward_as_tuple(samplesPerSecond))).first->second.second;
return tempoAnalyzes.emplace(std::piecewise_construct, std::make_tuple(f), std::forward_as_tuple(f, 1024, samplesPerFrame, samplesPerSecond)).first->second;
// short: tempoAnalyzes.emplace(f, {Aubio::TempoAnalysis(f, 1024, 441, 44100), OnsetDataSeries(44100)});
}
......
......@@ -27,6 +27,12 @@ namespace Audio {
*
*/
struct TempoAnalysisData {
const EventSeries *events;
const float *confidence;
operator bool() { return events != nullptr; }
};
/**
* @brief The AudioCaptureManager class gets the data from the captureWindowsSountoutput Project and analyse the data and give the data to the other components
*/
......@@ -49,10 +55,19 @@ class AudioCaptureManager : public QObject
int channels = -1;
int samplesPerSecond = -1;
int samplesPerFrame = -1;
struct TempoAnalysis {
Aubio::TempoAnalysis tempoAnalysis;
EventSeries events;
float currentConfidence;
TempoAnalysis(Aubio::OnsetDetectionFunction onsetDetectionFunction, int fftSize, int stepSize, int sampleRate)
: tempoAnalysis(onsetDetectionFunction, fftSize, stepSize, sampleRate), events(sampleRate), currentConfidence(0) {}
operator TempoAnalysisData() { return {&events, &currentConfidence}; }
};
/**
* @brief tempoAnalyzes all tempo analyzes that were request by requestTempoAnalysis
*/
std::map<Aubio::OnsetDetectionFunction, std::pair<Aubio::TempoAnalysis, EventSeries>> tempoAnalyzes;
std::map<Aubio::OnsetDetectionFunction, TempoAnalysis> tempoAnalyzes;
/**
* @brief onsetAnalyzes all onset analyzes that were request by requestOnsetAnalysis
*/
......@@ -115,9 +130,9 @@ public:
* @brief requestTempoAnalysis requests the data series from a tempo analysis that uses a spezific onset detection function
* You can call the function with the same parameters multiple times, the result will be the same
* @param f the onset function that should be used
* @return the Event Series produced by the analysis object using the specific onset detection function
* @return the Event Series produced by the analysis object using the specific onset detection function and a confidence level between 0 (no) and 1 (high)
*/
const EventSeries *requestTempoAnalysis(Aubio::OnsetDetectionFunction f);
TempoAnalysisData requestTempoAnalysis(Aubio::OnsetDetectionFunction f);
/**
* @brief requestOnsetAnalysis requests the data series from a onset analysis that uses a spezific onset detection function
* You can call the function with the same parameters multiple times, the result will be the same
......
......@@ -39,7 +39,7 @@ class EventSeries {
* @brief newestSample is the count of the newest sample, there is no newer sample
*/
unsigned newestSample = 0;
const unsigned samplesPerSecond;
unsigned samplesPerSecond;
/**
* @brief events that are older than newestSample - neededRange gets deleted
*/
......
......@@ -45,6 +45,17 @@ void AudioEventDataView::enableDetectionFor(OnsetDetectionFunction f, AudioEvent
}
}
colors[to_integral(f)][type].first = enabled;
names.clear();
for (auto &[f, data] : beatData) {
if (isDetectionEnabledFor(f, BeatEvent)) {
names.push_back("Beat: " + Audio::Aubio::toQString(f));
}
}
for (auto &[f, data] : onsetData) {
if (isDetectionEnabledFor(f, OnsetEvent) || isDetectionEnabledFor(f, OnsetValue)) {
names.push_back("Onset: " + Audio::Aubio::toQString(f));
}
}
}
bool AudioEventDataView::isDetectionEnabledFor(OnsetDetectionFunction onsetDetectionFunction, AudioEventDataView::DataType type) { return colors[to_integral(onsetDetectionFunction)][type].first; }
......@@ -99,22 +110,46 @@ QSGNode *AudioEventDataView::updatePaintNode(QSGNode *node, QQuickItem::UpdatePa
}
return gNode->geometry();
};
const auto fillEvents = [this](auto geometry, const auto &data) {
int eventsEnabledCount = 0;
for (auto &[f, data] : beatData) {
if (isDetectionEnabledFor(f, BeatEvent)) {
++eventsEnabledCount;
}
}
for (auto &[f, data] : onsetData) {
if (isDetectionEnabledFor(f, OnsetEvent) || isDetectionEnabledFor(f, OnsetValue)) {
++eventsEnabledCount;
}
}
const auto sectionHeight = height() / eventsEnabledCount;
auto sectionOffset = 0;
const auto fillEvents = [this, &sectionOffset, sectionHeight](auto geometry, const auto &data, float confidence = 1) {
auto events = data->getEvents();
geometry->allocate(events->size() * 2);
auto vertexData = geometry->vertexDataAsPoint2D();
for (const auto &e : *events) {
const auto x = getX(data, e);
vertexData->x = x;
vertexData->y = 0;
vertexData->y = sectionOffset + sectionHeight * (1.f - confidence);
++vertexData;
vertexData->x = x;
vertexData->y = height();
vertexData->y = sectionOffset + sectionHeight * confidence;
++vertexData;
}
sectionOffset += sectionHeight;
geometry->setDrawingMode(QSGGeometry::DrawLines);
};
for (auto &[f, data] : beatData) {
if (isDetectionEnabledFor(f, BeatEvent)) {
fillEvents(getGeometry(getColor(f, BeatEvent)), data.events, *data.confidence);
}
}
for (auto &[f, data] : onsetData) {
if (isDetectionEnabledFor(f, OnsetEvent)) {
fillEvents(getGeometry(getColor(f, OnsetEvent)), data);
} else if (isDetectionEnabledFor(f, OnsetValue)) {
sectionOffset += sectionHeight;
}
if (isDetectionEnabledFor(f, OnsetValue)) {
QSGGeometry *geometry = getGeometry(getColor(f, OnsetValue));
const auto lockedData = data->getOnsetData();
......@@ -122,19 +157,7 @@ QSGNode *AudioEventDataView::updatePaintNode(QSGNode *node, QQuickItem::UpdatePa
auto vertexData = geometry->vertexDataAsPoint2D();
for (const auto &o : *lockedData) {
vertexData->x = getX(data, o.sample);
vertexData->y = height() - ((o.onsetValue / data->getMaxOnsetValue()) * (height() - 50));
++vertexData;
}
geometry->setDrawingMode(QSGGeometry::DrawLineStrip);
}
if (isDetectionEnabledFor(f, ThresholdValue)) {
QSGGeometry *geometry = getGeometry(getColor(f, ThresholdValue));
const auto lockedData = data->getOnsetData();
geometry->allocate(lockedData->size());
auto vertexData = geometry->vertexDataAsPoint2D();
for (const auto &o : *lockedData) {
vertexData->x = getX(data, o.currentThreshold);
vertexData->y = height() - ((o.onsetValue / data->getMaxThreshold()) * (height() - 50));
vertexData->y = sectionOffset - ((o.onsetValue / data->getMaxOnsetValue()) * (sectionHeight));
++vertexData;
}
geometry->setDrawingMode(QSGGeometry::DrawLineStrip);
......
......@@ -11,24 +11,26 @@ namespace GUI {
class AudioEventDataView : public QQuickItem {
Q_OBJECT
std::map<enum Audio::Aubio::OnsetDetectionFunction, const Audio::OnsetDataSeries *> onsetData;
std::map<enum Audio::Aubio::OnsetDetectionFunction, const Audio::EventSeries *> beatData;
std::map<enum Audio::Aubio::OnsetDetectionFunction, Audio::TempoAnalysisData> beatData;
Q_PROPERTY(bool visibleForUser MEMBER visibleForUser NOTIFY visibleForUserChanged)
Q_PROPERTY(int pixelPerSecond MEMBER pixelPerSecond NOTIFY pixelPerSecondChanged)
Q_PROPERTY(QAbstractItemModel *names READ getNames CONSTANT)
int pixelPerSecond = 100;
bool visibleForUser = true;
float getX(const Audio::EventSeries *e, int sample);
ModelVector<QString> names;
public:
enum DataType { BeatEvent, OnsetEvent, OnsetValue, ThresholdValue, Last = ThresholdValue };
enum DataType { BeatEvent, OnsetEvent, OnsetValue, Last = OnsetValue };
Q_ENUM(DataType)
private:
std::array<std::array<std::pair<bool, QColor>, DataType::Last>, static_cast<int>(Audio::Aubio::OnsetDetectionFunction::Last)> colors;
std::array<std::array<std::pair<bool, QColor>, DataType::Last + 1>, static_cast<int>(Audio::Aubio::OnsetDetectionFunction::Last) + 1> colors;
public:
explicit AudioEventDataView(QQuickItem *parent = nullptr);
Q_INVOKABLE int getNumberOfOnsetDetectionFunctions() const { return static_cast<int>(Audio::Aubio::OnsetDetectionFunction::Last); }
Q_INVOKABLE int getNumberOfOnsetDetectionFunctions() const { return static_cast<int>(Audio::Aubio::OnsetDetectionFunction::Last) + 1; }
Q_INVOKABLE QString getNameOfOnsetDetectionFunctions(int f) const { return Audio::Aubio::toQString(Audio::Aubio::toOnsetDetectionFunction(f)); }
Q_INVOKABLE void enableDetectionFor(int onsetDetectionFunction, DataType type, bool enabled = true) { enableDetectionFor(Audio::Aubio::toOnsetDetectionFunction(onsetDetectionFunction), type, enabled); }
......@@ -43,6 +45,8 @@ public:
Q_INVOKABLE QColor getColor(int onsetDetectionFunction, DataType usage) const { return getColor(Audio::Aubio::toOnsetDetectionFunction(onsetDetectionFunction), usage); }
[[nodiscard]] QColor getColor(Audio::Aubio::OnsetDetectionFunction onsetDetectionFunction, DataType usage) const;
QAbstractItemModel *getNames() { return &names; }
signals:
void visibleForUserChanged();
void pixelPerSecondChanged();
......
......@@ -23,6 +23,24 @@ Item {
stepSize: 1;
onValueChanged: parent.pixelPerSecond = value;
}
Column{
anchors.top: parent.top
anchors.left: parent.left
anchors.bottom: parent.bottom
width: 200
Repeater{
model: dataView.names;
id: nameRepeater
delegate: Label{
width: 200
height: dataView.height / nameRepeater.count
verticalAlignment: "AlignTop"
topPadding: Math.min(15, Math.max(0, height / 2 - 20))
leftPadding: 5
text: modelData;
} // delegate
} // Repeater
} // Column
}
Item{
property bool show: true
......@@ -60,7 +78,7 @@ Item {
Column{
Repeater{
id: rootRepeater
property var names: ["Beat Events", "Onset Events", "Onset Values", "Onset Threshold"]
property var names: ["Beat Events", "Onset Events", "Onset Values"]
model: dataView.getNumberOfOnsetDetectionFunctions();
delegate: ColumnLayout{
width: 195
......@@ -71,7 +89,7 @@ Item {
text: dataView.getNameOfOnsetDetectionFunctions(index);
}
Repeater{
model: 4
model: 3
delegate: CheckBox{
Layout.preferredHeight: implicitHeight-16
Layout.fillWidth: true
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment