audiocapturemanager.cpp 12.6 KB
Newer Older
1
#include "audiocapturemanager.h"
2
#include "errornotifier.h"
3
#include "gui/colorplot.h"
4
#include "gui/graph.h"
5
#include "gui/oscillogram.h"
6
7
#include <algorithm>

8
9
10
11
12
13
14
15
16
17
#ifdef AUDIO_IF_WIN
#error AUDIO_IF_WIN is already defined
#endif

#ifdef Q_OS_WIN
#define AUDIO_IF_WIN(x) x
#else
#define AUDIO_IF_WIN(x)
#endif

18
19
20
namespace Audio {
AudioCaptureManager::AudioCaptureManager():audiofft(sample.size())
{
21
22
23
    rtAudio.showWarnings();
    updateCaptureDeviceList();
}
24

25
26
27
28
int AudioCaptureManager::rtAudioCallback(void * /*outputBuffer*/, void *inputBuffer, unsigned int nFrames, double /*streamTime*/, RtAudioStreamStatus /*status*/, void * /*userData*/) {
    bool end = false;
    get().dataCallback(static_cast<float *>(inputBuffer), nFrames, &end);
    return end;
29
30
}

31
void AudioCaptureManager::initCallback(int channels, int samplesPerSecond) {
32
    this->channels = channels;
33
34
35
    this->samplesPerSecond = samplesPerSecond;
    this->samplesPerFrame = samplesPerSecond / 100;
    if (GUI::Colorplot::getLast()) {
36
        GUI::Colorplot::getLast()->setBlockSize(512);
37
    }
38
39
}

40
void AudioCaptureManager::dataCallback(float* data, unsigned int frames, bool*done){    
41
    *done = !run;
42
43
44
    if (!run) {
        return;
    }
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
    if(!data)
        return;
    if(channels<0)
        return;
    int firstIndex = -1;
    for (int i = 0; i < channels; ++i) {
        if(data[i]!=0){
            firstIndex=i;
            break;
        }
    }
    if(firstIndex==-1)
        return;

    sample.addData(data,data+frames*static_cast<unsigned>(channels),channels-1,firstIndex);

    audiofft.analyse(sample.data(),1,fftoutput.data());
62
63
    {
        // feed the *analysis classes with new samples
64
        int restFrames = static_cast<int>(frames);
65
        if (restFrames % samplesPerFrame != 0) {
66
67
68
            static bool once = false;
            if (!once) {
                once = true;
69
                ErrorNotifier::showError(QStringLiteral("The samples from the audio capture service does not have a length of %1 or x * %1. The length is %2. Can not analyse audio data.").arg(samplesPerFrame).arg(frames));
70
            }
71
72
73
74
        } else {
            while (restFrames != 0) {
                if (restFrames >= sample.size()) {
                    // we have to ignore some data
75
                    restFrames -= samplesPerFrame;
76
77
78
79
80
81
82
83
                    continue;
                }
                for (auto &[onsetFunction, pair] : onsetAnalyzes) {
                    bool wasOnset = pair.first.processNewSamples(sample.data() + sample.size() - restFrames);
                    pair.second.addOnsetData(pair.second.getNewestSample(), pair.first.getOnsetValue(), 0);
                    if (wasOnset) {
                        pair.second.addEvent(pair.first.getLastOnset());
                    }
84
                    pair.second.increaseNewestSampleBy(samplesPerFrame);
85
86
87
88
89
90
                }
                for (auto &[onsetFunction, pair] : tempoAnalyzes) {
                    bool wasBeat = pair.first.processNewSamples(sample.data() + sample.size() - restFrames);
                    if (wasBeat) {
                        pair.second.addEvent(pair.first.getLastBeat());
                    }
91
                    pair.second.increaseNewestSampleBy(samplesPerFrame);
92
                }
93
                restFrames -= samplesPerFrame;
94
95
96
97
            }
        }
    }
    // db scale
98
    std::transform(fftoutput.begin(), fftoutput.end(), fftoutput.begin(), [](auto i) { return 10 * std::log10(1 + i); });
99

100
101
102
103
    if (GUI::Graph::getLast() && run) {
        GUI::Graph::getLast()->showData(fftoutput.data(), fftoutput.size());
    }
    if (GUI::Colorplot::getLast() && run) {
104
        GUI::Colorplot::getLast()->startBlock();
105
        for (int i = 0; i < 512; ++i) {
106
            GUI::Colorplot::getLast()->pushDataToBlock(fftoutput.at(i));
107
        }
108
        GUI::Colorplot::getLast()->endBlock();
109
    }
110
111
112
113
    if (GUI::Oscillogram::getLast() && run) {
        GUI::Oscillogram::getLast()->showData(sample.data(), sample.size());
    }
}
114

115
116
117
118
119
120
121
122
void AudioCaptureManager::startCapturingFromCaptureLibrary(AudioCaptureManager::CaptureLibEntry func) {
    captureAudioThread = std::thread([this, func]() {
        run = true;
        emit this->capturingStatusChanged();
        func(&AudioCaptureManager::staticInitCallback, &AudioCaptureManager::staticDataCallback);
        run = false;
        emit this->capturingStatusChanged();
    });
123
124
}

125
126
127
128
129
bool AudioCaptureManager::startCapturingFromInput(unsigned input) {
    if (input >= rtAudio.getDeviceCount()) {
        return false;
    }
    const auto di = rtAudio.getDeviceInfo(input);
130
    if (di.inputChannels == 0 AUDIO_IF_WIN(&&di.outputChannels == 0)) {
131
132
133
134
135
136
137
138
139
140
141
142
        return false;
    }
    // it was a device open before now
    int sampleRate = this->samplesPerSecond;
    if (this->samplesPerSecond > 0) {
        bool supported = std::any_of(di.sampleRates.cbegin(), di.sampleRates.cend(), [this](int s) { return s == this->samplesPerSecond; });
        if (!supported) {
            return false;
        }
    } else {
        sampleRate = static_cast<int>(di.preferredSampleRate);
    }
143
    initCallback(static_cast<int>(di.inputChannels + di.outputChannels), sampleRate);
144
145
146

    RtAudio::StreamParameters isp;
    isp.deviceId = input;
147
    isp.nChannels = di.inputChannels AUDIO_IF_WIN(+di.outputChannels);
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
    isp.firstChannel = 0;
    unsigned samplesPerFrame = static_cast<unsigned>(this->samplesPerFrame);
    try {
        rtAudio.openStream(nullptr, &isp, RTAUDIO_FLOAT32, static_cast<unsigned>(this->samplesPerSecond), &samplesPerFrame, rtAudioCallback, nullptr, nullptr, nullptr);
        if (static_cast<int>(samplesPerFrame) != this->samplesPerFrame) {
            rtAudio.closeStream();
            return false;
        }
        rtAudio.startStream();
        run = true;
        emit this->capturingStatusChanged();
    } catch (const RtAudioError &error) {
        ErrorNotifier::showError(QString::fromStdString(error.getMessage()));
        run = false;
        emit capturingStatusChanged();
        return false;
    }
    return true;
}

168
bool AudioCaptureManager::loadCaptureLibrary(const QString &name, const QString &filePathToCaptureLibrary) {
169
170
171
    auto func = reinterpret_cast<CaptureLibEntry>(QLibrary::resolve(filePathToCaptureLibrary, "captureAudio"));
    if (func) {
        // replace if name is already there
172
173
174
        auto res = captureLibraries.emplace(name, func);
        if (res.second) {
            auto pos = std::distance(captureLibraries.begin(), res.first);
175
            captureDeviceNames.insert(static_cast<int>(pos), name);
176
177
178
179
180
            if (currentCaptureDevice >= pos) {
                currentCaptureDevice++;
                emit currentCaptureDeviceChanged();
            }
        }
181
182
183
184
    }
    return func;
}

185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
bool AudioCaptureManager::startCapturingFromDevice(const QString &name) {
    for (const auto &c : captureLibraries) {
        if (c.first == name) {
            startCapturingFromCaptureLibrary(c.second);
            return true;
        }
    }
    for (unsigned i = 0; i < rtAudio.getDeviceCount(); ++i) {
        if (auto di = rtAudio.getDeviceInfo(i); di.name.c_str() == name) {
            return startCapturingFromInput(i);
        }
    }
    return false;
}

bool AudioCaptureManager::startCapturingFromCaptureLibrary() {
    if (captureLibraries.empty()) {
        return false;
    }
    stopCapturingAndWait();
    startCapturingFromCaptureLibrary(captureLibraries.begin()->second);
    currentCaptureDevice = 0;
    emit currentCaptureDeviceChanged();
    return true;
}

bool AudioCaptureManager::startCapturingFromDefaultInput() {    
    stopCapturingAndWait();
213
214
215
216
217
218
219
220
221
222
223
224
225
226
#ifdef Q_OS_WIN
    // check if default output is availible
    const auto output = rtAudio.getDefaultOutputDevice();
    if (output < rtAudio.getDeviceCount()) {
        const auto di = rtAudio.getDeviceInfo(output);
        if (di.isDefaultOutput) {
            if (startCapturingFromInput(output)) {
                currentCaptureDevice = getIndexForDeviceName(di.name.c_str());
                emit currentCaptureDeviceChanged();
                return true;
            }
        }
    }
#endif
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
    // check if default input is availible
    const auto input = rtAudio.getDefaultInputDevice();
    if (input >= rtAudio.getDeviceCount()) {
        return false;
    }
    const auto di = rtAudio.getDeviceInfo(input);
    if (!di.isDefaultInput) {
        return false;
    }
    if (startCapturingFromInput(input)) {
        currentCaptureDevice = getIndexForDeviceName(di.name.c_str());
        emit currentCaptureDeviceChanged();
        return true;
    }
    return false;
}

void AudioCaptureManager::stopCapturing() {
    try {
        if (rtAudio.isStreamRunning()) {
            rtAudio.abortStream();
        }
        if (rtAudio.isStreamOpen()) {
            rtAudio.closeStream();
        }
    } catch (const RtAudioError &e) {
        ErrorNotifier::showError("Error while stopping audio stream: " + QString(e.what()));
    }
    run = false;
    emit capturingStatusChanged();
}

void AudioCaptureManager::stopCapturingAndWait() {
    try {
        if (captureAudioThread.joinable()) {
262
            run = false;
263
264
265
266
267
268
269
270
271
            captureAudioThread.join();
        } else {
            if (rtAudio.isStreamOpen()) {
                rtAudio.closeStream();
                std::this_thread::yield();
                while (rtAudio.isStreamRunning()) {
                    std::this_thread::sleep_for(std::chrono::microseconds(500));
                }
            }
272
            run = false;
273
274
275
276
277
278
279
280
281
282
283
284
285
        }
    } catch (const RtAudioError &e) {
        ErrorNotifier::showError("Error while stopping audio stream: " + QString(e.what()));
    }
    emit capturingStatusChanged();
}

bool AudioCaptureManager::isCapturing() const {
    return run || rtAudio.isStreamRunning();
}

void AudioCaptureManager::updateCaptureDeviceList() {
    QString name;
286
    if (currentCaptureDevice >= 0 && currentCaptureDevice < captureDeviceNames.ssize()) {
287
288
289
290
291
292
293
294
        name = captureDeviceNames[currentCaptureDevice];
    }
    captureDeviceNames.clear();
    for (const auto &i : captureLibraries) {
        captureDeviceNames.push_back(i.first);
    }

    for (unsigned i = 0; i < rtAudio.getDeviceCount(); ++i) {
295
        if (auto di = rtAudio.getDeviceInfo(i); di.inputChannels > 0 AUDIO_IF_WIN(|| di.outputChannels > 0)) {
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
            captureDeviceNames.emplace_back(QString::fromStdString(di.name.c_str()));
        }
    }
    auto newIndex = getIndexForDeviceName(name);
    if (newIndex != currentCaptureDevice) {
        currentCaptureDevice = newIndex;
        emit currentCaptureDeviceChanged();
    }
}

void AudioCaptureManager::setCurrentCaptureDevice(int index) {
    if (index != currentCaptureDevice) {
        if (index < 0 || index >= captureDeviceNames.ssize()) {
            stopCapturing();
            currentCaptureDevice = -1;
            emit currentCaptureDeviceChanged();
            return;
        }
        stopCapturingAndWait();
        currentCaptureDevice = index;
        if (!startCapturingFromDevice(captureDeviceNames[index])) {
            ErrorNotifier::showError("Error while starting stream " + captureDeviceNames[index]);
        }
        emit currentCaptureDeviceChanged();
    }
}

323
324
325
326
const EventSeries *AudioCaptureManager::requestTempoAnalysis(Aubio::OnsetDetectionFunction f) {
    if (samplesPerSecond < 0) {
        return nullptr;
    }
327
328
    // check if already there
    if (const auto i = tempoAnalyzes.find(f); i != tempoAnalyzes.end()) {
329
        return &i->second.second;
330
331
    }
    // We need this ugly syntax, because we can not copy or move a EventRange object. See https://stackoverflow.com/a/25767752/10162645
332
    return &tempoAnalyzes.emplace(std::piecewise_construct, std::make_tuple(f), std::forward_as_tuple(std::piecewise_construct, std::forward_as_tuple(f, 1024, samplesPerFrame, samplesPerSecond), std::forward_as_tuple(samplesPerSecond))).first->second.second;
333
334
    // short:  tempoAnalyzes.emplace(f, {Aubio::TempoAnalysis(f, 1024, 441, 44100), OnsetDataSeries(44100)});
}
335

336
337
338
339
const OnsetDataSeries *AudioCaptureManager::requestOnsetAnalysis(Aubio::OnsetDetectionFunction f) {
    if (samplesPerSecond < 0) {
        return nullptr;
    }
340
341
    // check if already there
    if (const auto i = onsetAnalyzes.find(f); i != onsetAnalyzes.end()) {
342
        return &i->second.second;
343
344
    }
    // We need this ugly syntax, because we can not copy or move a EventRange object. See https://stackoverflow.com/a/25767752/10162645
345
    return &onsetAnalyzes.emplace(std::piecewise_construct, std::make_tuple(f), std::forward_as_tuple(std::piecewise_construct, std::forward_as_tuple(f, 1024, samplesPerFrame, samplesPerSecond), std::forward_as_tuple(samplesPerSecond))).first->second.second;
346
    // short:  onsetAnalyzes.emplace(f, {Aubio::OnsetAnalysis(f, 1024, 441, 44100), OnsetDataSeries(44100)});
347
348
}

349
} // namespace Audio