diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index cd1c1883306e1472253b0ff264ca762a6851f79a..1b13827b09fcceb4f47d45dec3b7c1482cb1343e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,5 +1,5 @@ # we use the oldest compatible version -image: python:3.9-slim +image: python:3.9 variables: PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip" @@ -15,6 +15,8 @@ cache: - .cache/pip before_script: + # Install build dependency for pyaudio + - apt-get update -yq && apt-get install -yq portaudio19-dev - python -m pip install --upgrade pip - python -m pip install hatch diff --git a/CITATION.cff b/CITATION.cff index f7f01c639797e4db22875a63165cda95bbf2b923..43ca0619e8ca5a05fabfeb5a58efa484ca952c8a 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -23,6 +23,11 @@ authors: email: max.beer@rwth-aachen.de affiliation: RWTH Aachen University orcid: 'https://orcid.org/0009-0007-9002-256X' + - given-names: Paul + family-names: Surrey + email: paul.surrey@rwth-aachen.de + affiliation: RWTH Aachen University + orcid: 'https://orcid.org/0009-0002-9033-0670' - given-names: René family-names: Otten email: rene.otten@rwth-aachen.de diff --git a/pyproject.toml b/pyproject.toml index 9c058e51abc265d05dd1ae70122f9fb2e62a5989..55ae1cf651c3fca12af4c227eb304ded7cafcd65 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ dependencies = [ "scipy", "matplotlib >= 3.7", "dill", - "typing_extensions >= 4.5.0" + "typing_extensions >= 4.5.0", ] [project.optional-dependencies] @@ -49,6 +49,9 @@ qcodes = [ "qcodes", "qcodes_contrib_drivers", ] +audio_playback = [ + "pyaudio >= 0.2.14", +] doc = [ "sphinx", "pydata-sphinx-theme", @@ -62,6 +65,7 @@ complete = [ "python-spectrometer[qcodes]", "python-spectrometer[simulator]", "python-spectrometer[zurich_instruments]", + "python-spectrometer[audio_playback]", "python-spectrometer[doc]", "python-spectrometer[tests]", ] diff --git a/src/python_spectrometer/_audio_manager.py b/src/python_spectrometer/_audio_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..f515b2cad19f139df17174368297491736810573 --- /dev/null +++ b/src/python_spectrometer/_audio_manager.py @@ -0,0 +1,143 @@ +""" This module contains methods for using auditory channels to interface to humans """ + +import queue +import threading +from typing import Union, Literal + +import numpy as np +from scipy import signal + +try: + import pyaudio +except ImportError: + pyaudio = None + + +def _waveform_playback_target(waveform_queue: queue.Queue, stop_flag: threading.Event, max_playbacks: Union[int, float], bitrate: int): + """ This function will be started in a separate thread to feed the audio output with new data. + """ + + import time + import pyaudio + + pyaudio_instance = pyaudio.PyAudio() + + stream = pyaudio_instance.open(format=pyaudio.paFloat32, + channels=1, + rate=bitrate, + output=True) + + last_waveform = None + repeats = 0 + + # run the playback look until the stop flag is set + try: + while not stop_flag.is_set(): + + # waiting for a sample + while last_waveform is None and waveform_queue.empty() and not stop_flag.is_set(): + time.sleep(0.01) + + # getting the latest sample from the queue and resetting the playback counter + while not waveform_queue.empty() and not stop_flag.is_set(): + last_waveform = waveform_queue.get() + repeats = 0 + + # exit the playback loop then the stop flag is set. + if stop_flag.is_set(): break + + # playing back the last sample and increasing the counter + # this plays the last sample on repeat up to a set number of repetitions + if last_waveform is not None: + stream.write(last_waveform) + repeats += 1 + + # if the counter surpasses the max_playbacks, remove the sample + if repeats >= max_playbacks: + last_waveform = None + finally: + # the stop_flag has been raised, thus thing will be closed. + stream.close() + pyaudio_instance.terminate() + + + +class WaveformPlaybackManager: + """ Manages a thread used to play back the recorded noise samples. + This class has been written with the help of ChatGPT 4o. + + Parameter + --------- + max_playbacks : Union[int, float] + How often one sample is to be replayed. If 1 is given, then the sample is played back only once. If 10 is given, then the sample is played back 10 times if no new waveform is acquired. if np.inf is given, then the sample is played back until the AudtoryManager.stop() is called. (default = 10) + audio_amplitude_normalization : Union[Literal["single_max"], float], default "single_max" + The factor with with which the waveform is divided by to + normalize the waveform. This can be used to set the volume. + The default "single_max" normalized each sample depending on + only that sample, thus the volume might not carry significant + information. + + """ + + def __init__(self, max_playbacks: int = 10, amplitude_normalization: Union[Literal["single_max"], float] = "single_max"): + + if pyaudio is None: + raise ValueError("Please install PyAudio to listen to noise.") + + self.max_playbacks = max_playbacks + self.amplitude_normalization = amplitude_normalization + + self.waveform_queue = queue.Queue() + self.stop_flag = threading.Event() + self.playback_thread = None + self._BITRATE = 44100 + + def start(self): + """Starts the thread. The thread then waits until a samples is given via the notify method. + """ + + while not self.waveform_queue.empty(): + self.waveform_queue.get() + + self.stop_flag.clear() + + self.playback_thread = threading.Thread(target=_waveform_playback_target, args=(self.waveform_queue, self.stop_flag, self.max_playbacks, self._BITRATE)) + self.playback_thread.start() + + def notify(self, waveform:np.ndarray, bitrate:int): + """ Sends a waveform of a noise sample to the playback thread. The thread is started if the thread is not running. + """ + + # calculating the number of samples that the waveform should have to fit the target bit rate. + num = int(np.floor(self._BITRATE/bitrate*len(waveform))) + + # normalize the waveform + if self.amplitude_normalization == "single_max": + waveform /= np.max(np.abs(waveform)) + elif isinstance(self.amplitude_normalization, float): + waveform /= np.abs(self.amplitude_normalization) + + waveform -= np.mean(waveform) + + # sample data to match the BITRATE + waveform = signal.resample(waveform, num) + + if self.playback_thread is None or not self.playback_thread.is_alive(): + self.start() + + self.waveform_queue.put(waveform.flatten().astype("float32")) + + def stop(self): + """ Stops the playback and the thread. + """ + + # notify the thread + self.stop_flag.set() + + # wait until the thread has terminated + if self.playback_thread is not None and self.playback_thread.is_alive(): + self.playback_thread.join() + + def __del__(self): + self.stop() + diff --git a/src/python_spectrometer/core.py b/src/python_spectrometer/core.py index b0fe1d1df6a808b7d85d4fdf7f93d88e3e447aef..23a4ad957f8e4b2498e2cc24e9cffea668d7cb88 100644 --- a/src/python_spectrometer/core.py +++ b/src/python_spectrometer/core.py @@ -22,6 +22,7 @@ from qutil.signal_processing.real_space import Id, welch from qutil.typecheck import check_literals from qutil.ui import progressbar +from ._audio_manager import WaveformPlaybackManager from ._plot_manager import PlotManager from .daq import settings as daq_settings from .daq.base import DAQ @@ -177,6 +178,16 @@ class Spectrometer: responsive while acquisition is running. prop_cycle : cycler.Cycler A property cycler for styling the plotted lines. + play_sound : bool, default False + Play the recorded noise sample out loud. + audio_amplitude_normalization : Union[Literal["single_max"], float], default "single_max" + The factor with with which the waveform is divided by to + normalize the waveform. This can be used to set the volume. + The default "single_max" normalized each sample depending on + only that one sample, thus the volume might not carry significant + information. Alternatively a factor like 1e-9 can be given to + specify that 1nA of signal corresponds to the full audio output + amplitude. savepath : str or Path Directory where the data is saved. All relative paths, for example those given to :meth:`serialize_to_disk`, will be @@ -242,6 +253,12 @@ class Spectrometer: 'noverlap': 2000, 'nperseg': 4000} + Use the audio interface to listen to the noise: + + >>> spect_with_audio = Spectrometer(daq, savepath=mkdtemp(), play_sound=True) + >>> spect_with_audio.take('a comment', f_max=20000, A=2e-4) + >>> spect_with_audio.audio_stream.stop() + """ _OLD_PARAMETER_NAMES = { 'plot_cumulative_power': 'plot_cumulative', @@ -260,7 +277,9 @@ class Spectrometer: plot_density: bool = True, plot_cumulative_normalized: bool = False, plot_style: _styleT = 'fast', plot_update_mode: Optional[Literal['fast', 'always', 'never']] = None, - plot_dB_scale: bool = False, threaded_acquisition: bool = True, + plot_dB_scale: bool = False, play_sound: bool = False, + audio_amplitude_normalization: Union[Literal["single_max"], float] = "single_max", + threaded_acquisition: bool = True, purge_raw_data: bool = False, prop_cycle=None, savepath: _pathT = None, relative_paths: bool = True, compress: bool = True, raw_unit: str = 'V', processed_unit: str = 'V', figure_kw: Optional[Mapping] = None, @@ -304,6 +323,9 @@ class Spectrometer: uses_windowed_estimator, figure_kw, subplot_kw, gridspec_kw, legend_kw) + self._audio_amplitude_normalization = audio_amplitude_normalization + self._play_sound = play_sound + # Expose plot properties from plot manager _to_expose = ('fig', 'ax', 'ax_raw', 'leg', 'plot_raw', 'plot_timetrace', 'plot_cumulative', 'plot_negative_frequencies', 'plot_absolute_frequencies', 'plot_amplitude', @@ -362,6 +384,36 @@ class Spectrometer: def savepath(self, path): self._savepath = io.to_global_path(path) + @cached_property + def audio_stream(self) -> WaveformPlaybackManager: + """Manages audio waveform playback.""" + return WaveformPlaybackManager(amplitude_normalization=self.audio_amplitude_normalization) + + @property + def play_sound(self): + """Play the recorded noise sample out loud.""" + return self._play_sound + + @play_sound.setter + def play_sound(self, flag:bool): + if self._play_sound != flag: + self._play_sound = flag + # as the play back was deactivate, the stream might need to be stopped. + # this will be done now: + if not flag and 'audio_stream' in self.__dict__: + del self.audio_stream + + @property + def audio_amplitude_normalization(self): + """The factor the waveform is divided by to normalize the waveform.""" + return self._audio_amplitude_normalization + + @audio_amplitude_normalization.setter + def audio_amplitude_normalization(self, val): + self._audio_amplitude_normalization = val + if 'audio_stream' in self.__dict__: + self.audio_stream.amplitude_normalization = val + def _resolve_path(self, file: _pathT) -> Path: """Resolve file to a fully qualified path.""" if not (file := Path(file)).is_absolute(): @@ -599,6 +651,9 @@ class Spectrometer: else: measurement_metadata = self._take_sequential(iterator, progress, key, **settings) + if self.play_sound: + self.play(key) + self._data[key].update(measurement_metadata=measurement_metadata) if self.purge_raw_data: del self._data[key]['timetrace_raw'] @@ -790,6 +845,43 @@ class Spectrometer: with self._plot_manager.plot_context: self._plot_manager.update_figure() + def play(self, comment_or_index: _keyT, use_processed_timetrace: bool = False, min_duration: Union[None, float] = None): + """Plays the noise out loud to allow the scientist to use their auditory input. + + Parameters + ---------- + use_processed_timetrace : bool + If true, then the 'timetrace_processed' data is used for the playback. If False is given, then 'timetrace_raw' is used. (default=False) + min_duration : Union[None, float] + The minimum duration that the noise is to be played. The sample will be repeated until the overall duration is equal to or larger than the min_duration. + + """ + + key = self._parse_keys(comment_or_index)[0] + + fs = self._data[key]['settings'].fs + dt = 1/fs + + if use_processed_timetrace: + data = self._data[key]['timetrace_processed'][-1] + else: + data = self._data[key]['timetrace_raw'][-1] + + original_duration = dt*len(data) # in s + + # taking the real component of the signal if a complex numpy array is given + if np.iscomplexobj(data): + data = np.abs(data) + + # repeat the wave to go up to the min_duration + if min_duration is not None: + repetitions = np.ceil(min_duration/original_duration) + if repetitions > 1: + data = np.repeat(data[None, :], repetitions, axis=0).flatten() + + if self.audio_stream is not None: + self.audio_stream.notify(data.flatten().astype("float32"), fs) + def reprocess_data(self, *comment_or_index: _keyT, save: Literal[False, True, 'overwrite'] = False, @@ -950,10 +1042,8 @@ class Spectrometer: file = file.with_stem(file.stem + '_files').with_suffix('.txt') else: file = self._runfile - file = io.check_path_length(file) file.write_text('\n'.join(self.files)) - if verbose: print(f'Wrote filenames to {file}.')