# # ------------------------------------------------------------------------------------------- # # VVV VVV A # VVV VVV AAA Virtual Acoustics (VA) # VVV VVV AAA Real-time auralisation for virtual reality # VVV VVV AAA # VVVVVV AAA (c) Copyright Institut of Technical Acoustics (ITA) # VVVV AAA RWTH Aachen University (http://www.akustik.rwth-aachen.de) # # ------------------------------------------------------------------------------------------- # # WARNING: This configuration file for the VACore has been automatically created by CMake. # You can move this file and change the argument path accordingly, otherwise # your changes will be overwritten. Alternatively, you can directly modify # the prototype file 'VACore.ini.proto' and re-run CMake. # [Paths] # Any entry value will be added to the search path list, but only if existing! They will also be made available as macros (see below). # If a file can not be found during runtime by local name, the core will try to locate it using the paths in ascending name order. # Macros are not substituted, here. However, it is recommended to use AddSearchPath during runtime, if you have individual directories # you want to add. # Relative configuration path (with some more hardware setup files) conf_dir = conf conf_dir_dev = @CMAKE_CURRENT_SOURCE_DIR@/conf # Relative data path (with some common files) data_dir = data data_dir_dev = @CMAKE_CURRENT_SOURCE_DIR@/data raven_data = @ITA_RAVEN_DATA_PATH@ # Absolute data path with further files (big data, not shipped with VA) big_data_dir = @ITA_VACORE_BIG_DATA_DIR@ # Inside scene data directory InsideSceneData = @INSIDE_SCENE_DATA@ [Files] # List further config files to be appended to this configuration (also looking at paths) #VRLabSetup = VASetup.VRLab.ini [Macros] # Macros can be defined and will be replaced with given value by the core. Usage: "$(MyMacroName)/file.abc" -> "MyValue/file.abc" # Macros are substituted forwardly by key name order (use with care), otherwise stay untouched: A = B; C = $(A) -> $(C) is B DefaultHRIR = HRIR/ITA-Kunstkopf_HRIR_AP11_Pressure_Equalized_3x3_256.v17.ir.daff HumanDir = Directivity/Singer.v17.ms.daff Trumpet = Directivity/Trumpet1.v17.ms.daff # Legacy support, please use $(DefaultHRIR) macro VADefaultHRIRDataset = $(DefaultHRIR) # Define some other macros (examples) ProjectName = MyVirtualAcousticsProject [Debug] # Dump the audio device input streams into file #DumpDeviceInput = $(ProjectName)_in.wav # Dump the final output audio streams into file #DumpFinalOutput = $(ProjectName)_out.wav # Set log level: 0 = quiet; 1 = errors; 2 = warnings (default); 3 = info; 4 = verbose; 5 = trace; LogLevel = @ITA_VACORE_DEFAULT_DEBUG_LEVEL@ [Audio driver] # MANDATORY: Audio driver backend (ASIO|Portaudio) #Driver = ASIO Driver = Portaudio # MANDATORY: Audio device (e.g. ASIO4ALL v2, ASIO Hammerfall DSP, Portaudio 'default', 0,1,2,3,...) #Device = ASIO4ALL v2 #Device = ASIO Hammerfall DSP #Device = ASIO Fireface USB #Device = ASIO MADIface USB #Device = Focusrite USB 2.0 Audio Driver <<<<<<< Updated upstream ======= #Device = M-Audio Fast Track Ultra ASIO #Device = Yamaha Steinberg USB ASIO >>>>>>> Stashed changes Device = default # MANDATORY: Sampling rate [Hz] Samplerate = 44100 # OPTIONAL: Buffersize used for audio streaming [Samples] # AUTO will determine the buffersize from the audio device automatically # Note: Adjust this to your latency requirements Buffersize = AUTO # --= Audio renderers =-- # Syntax [Renderer:] can be freely chosen, but must be unique # # Fields: # # Enabled = (true|false) Create the renderer or skip it (default: true) # Class = Renderer type/class to create [MANDATORY] # Outputs = Modules the renderer sends its output samples to (hardware output or reproduction module) [MANDATORY] # [Renderer:MyBinauralFreeField] Class = BinauralFreeField Enabled = true Outputs = MyTalkthroughHeadphones HRIRFilterLength = 256 MotionModelNumHistoryKeys = 10000 MotionModelWindowSize = 0.1 MotionModelWindowDelay = 0.1 MotionModelLogInputSources = false MotionModelLogEstimatedOutputSources = false MotionModelLogInputListener = false MotionModelLogEstimatedOutputListener = false SwitchingAlgorithm = linear [Renderer:MyAmbientMixer] class = AmbientMixer Description = Low-cost renderer to make sound audible without spatializations Enabled = false OutputGroup = MyDesktopHP Outputs = MyTalkthroughHeadphones [Renderer:MyBinauralArtificialReverb] Class = BinauralArtificialReverb Enabled = false Outputs = MyTalkthroughHeadphones ReverberationTime = 0.71 RoomVolume = 200 RoomSurfaceArea = 88 MaxReverbFilterLengthSamples = 88200 PositionThreshold = 1.0 AngleThresholdDegree = 30 SoundPowerCorrectionFactor = 0.05 TimeSlotResolution = 0.005 MaxReflectionDensity = 12000.0 ScatteringCoefficient = 0.1 SpeedOfSound = 344.0 [Renderer:MyBinauralRoomAcoustics] Class = BinauralRoomAcoustics Enabled = false Outputs = MyTalkthroughHeadphones # Setup options: Local, Remote, Hybrid Setup = Local ServerIP = PC-SEACEN HybridLocalTasks = DS HybridRemoteTasks = ER_IS, DD_RT RavenDataBasePath = $(raven_data) # Task processing (Timeout = with desired update rate, for resource efficient processing; EventSync = process on request (for sporadic updates); Continuous = update as often as possible, for standalone server) TaskProcessing = Timeout # Desired update rates in Hz, may lead to resource problems UpdateRateDS = 12.0 UpdateRateER = 4.2 UpdateRateDD = 0.08 DirectSoundPowerCorrectionFactor = 0.05 [Renderer:MyMonauralFreeField] Class = MonauralFreeField Enabled = false Outputs = MyTalkthroughHeadphones MotionModelNumHistoryKeys = 10000 MotionModelWindowSize = 0.2 MotionModelWindowDelay = 0.1 MotionModelLogInputSources = false MotionModelLogEstimatedOutputSources = false DumpListeners = false DumpListenersGain = 1.0 SpeedOfSound = 344.0 SwitchingAlgorithm = linear [Renderer:MyHearingAidRenderer] Class = PrototypeHearingAid Enabled = false Outputs = HearingAidTalkthrough #Outputs = MyTalkthroughHeadphones UpdateRateDS = 120.0 UpdateRateIS = 20.0 UpdateRateRT = 1.0 RenderingGain = 1.0 RenderingDelayInMs = 10.0 HRIRFilterLength = 256 # Dump individual listener signals DumpListeners = false # Gain for listener dumping [Factor] DumpListenersGain = 10 [Renderer:MyGenericRenderer] Class = PrototypeGenericPath Enabled = false Outputs = MyTalkthroughHeadphones NumChannels = 2 IRFilterLengthSamples = 88200 IRFilterDelaySamples = 0 OutputMonitoring = true [Renderer:MyAirTrafficNoiseRenderer] Class = BinauralAirTrafficNoise Enabled = false Outputs = MyTalkthroughHeadphones [Renderer:MyVBAPFreefield] Class = VBAPFreeField Enabled = false Output = VRLab_Horizontal_LS Outputs = MixdownHeadphones [Renderer:MyDummyRenderer] class = PrototypeDummy Description = Dummy renderer for testing and benchmarking Enabled = false OutputGroup = MyDesktopHP Outputs = MyTalkthroughHeadphones # --= Audio reproduction =-- # Here you write down the audio reproduction modules that are created after VA starts. # Each section has the format [Reproduction:] ( must be unique and is freely chooseable). # Warum diese Schicht mit Eingängen und Ausgängen? #- Vernünftige Gruppierung für Meters #- Abstraktion der Renderer und Reproductions von physikalischen Kanälen #- Zusatzfunktionalität unbh. von Renderer und Reproductions: Equalization # #- Diese Inputs müssen als Signalquellen im Core erscheinen [Reproduction:MyTalkthroughHeadphones] Class = Talkthrough Enabled = true Name = Generic talkthrough to output group Outputs = MyDesktopHP [Reproduction:MySubwooferMixer] Class = LowFrequencyMixer Enabled = false Name = Generic low frequency (subwoofer) loudspeaker mixer Outputs = Cave_SW MixingChannels = ALL [Reproduction:MyHD600] Class = Headphones Enabled = false # Headphone impulse response inverse file path (normalized) HpIRInvFile = HPEQ/HD600_all_eq_128_stereo.wav #HpIRInvFilterLength = 22050 # Headphone impulse response inverse gain for calibration ( HpIR * HpIRInv == 0dB ) HpIRInvCalibrationGainDecibel = 0.1 Name = Equalized Sennheiser HD600 headphones Outputs = MyDesktopHP [Reproduction:MyNCTC] Class = NCTC Enabled = false Name = Crosstalk cancellation for N loudspeaker Outputs = MyDesktopLS TrackedListenerID = 1 # algorithm: reg|... Algorithm = reg RegularizationBeta = 0.01 DelaySamples = 2048 UseTrackedListenerHRIR = false CTCDefaultHRIR = $(DefaultHRIR) Optimization = OPTIMIZATION_NONE [Reproduction:MyAmbisonics] Class = HOA Enabled = false Name = Higher-Order Ambisonics Enabled = false Algorithm = HOA Outputs = VRLab_Horizontal_LS [Reproduction:MyMixdownHeadphones] Class = BinauralMixdown Enabled = false Name = Binaural mixdown of virtual loudspeaker setup using HRIR techniques Outputs = MyDesktopHP VirtualOutput = MyDesktopLS TrackedListenerID = 1 HRIRFilterLength = 128 [Reproduction:HearingAidTalkthrough] Class = Talkthrough Enabled = false Name = Hearing aid takthrough Outputs = HearingAid [Setup] # hardware setup example for a desktop environment, usually more sophisticated # settings are outsourced to a VASetup.ini that is included in the 'Files' section # -- outputs [Output:MyDesktopHP] Description = Desktop user with headphones Devices = MyHP [Output:MyDesktopLS] Description = Desktop user with loudspeaker monitors Devices = MyLSLeft, MyLSRight # -- inputs [Input:MyDesktopMike] Description = Generic desktop microphone, i.e. from WebCam or Line-In Devices = MyLineIn # -- hardware devices [OutputDevice:MyHP] Type = HP Description = Headphone hardware device (two-channels) Channels = 1,2 [OutputDevice:MyLSLeft] Type = LS Description = Loudspeaker hardware device Channels = 1 Position = -0.5, 0.2, -0.4 [OutputDevice:MyLSRight] Type = LS Description = Loudspeaker hardware device Channels = 2 Position = 0.5, 0.2, -0.4 [InputDevice:MyLineIn] Type = MIC Description = Table microphone in front of user using line-in Position = 0, -0.3, 0.2 Channels = 1