Adding diffraction auralization scripts and VA core configuration file

parent 7b2d10d8
[Paths]
# add as many as you like, they don't have to exist.
data_dir = data
data_dir_dev_jst = D:/Users/stienen/dev/VA/VACore/data
data_dir_dev_filbert =
[Macros]
ProjectName = BA_Filbert
DefaultHRIR = ITA_Artificial_Head_5x5_44kHz_128.v17.ir.daff
[Audio driver]
Driver = Virtual
Device = Trigger
Samplerate = 44100
BufferSize = 128
OutputChannels = 2
[HomogeneousMedium]
DefaultSoundSpeed = 344.0 # m/s
DefaultStaticPressure = 101125.0 # [Pa]
DefaultTemperature = 20.0 # [Degree centigrade]
DefaultRelativeHumidity = 20.0 # [Percent]
DefaultShiftSpeed = 0.0, 0.0, 0.0 # 3D vector in m/s
[Renderer:MyBinauralFreeField]
Enabled = false
Class = BinauralFreeField
Reproductions = VirtualTalkthrough
RecordOutputEnabled = true
RecordOutputFilePath = $(ProjectName)_BinauralFreeField.wav
HRIRFilterLength = 256
MotionModelNumHistoryKeys = 10000
MotionModelWindowSize = 0.1
MotionModelWindowDelay = 0.1
MotionModelLogInputSources = false
MotionModelLogEstimatedOutputSources = false
MotionModelLogInputListener = false
MotionModelLogEstimatedOutputListener = false
SwitchingAlgorithm = cubicspline
[Renderer:GenericMaekawa]
Class = PrototypeGenericPath
Reproductions = VirtualTalkthrough
NumChannels = 2
IRFilterLengthSamples = 4096
RecordOutputEnabled = true
RecordOutputFilePath = $(ProjectName)_Maekawa.wav
[Renderer:GenericMaekawaApprox]
Class = PrototypeGenericPath
Reproductions = VirtualTalkthrough
NumChannels = 2
IRFilterLengthSamples = 4096
RecordOutputEnabled = true
RecordOutputFilePath = $(ProjectName)_Maekawa_Approx.wav
[Renderer:GenericUTD]
Class = PrototypeGenericPath
Reproductions = VirtualTalkthrough
NumChannels = 2
IRFilterLengthSamples = 4096
RecordOutputEnabled = true
RecordOutputFilePath = $(ProjectName)_UTD.wav
[Renderer:GenericUTDApprox]
Class = PrototypeGenericPath
Reproductions = VirtualTalkthrough
NumChannels = 2
IRFilterLengthSamples = 4096
RecordOutputEnabled = true
RecordOutputFilePath = $(ProjectName)_UTD_Approx.wav
[Renderer:GenericBTMS]
Class = PrototypeGenericPath
Reproductions = VirtualTalkthrough
NumChannels = 2
IRFilterLengthSamples = 4096
RecordOutputEnabled = true
RecordOutputFilePath = $(ProjectName)_BTMS.wav
[Reproduction:VirtualTalkthrough]
Class = Talkthrough
Enabled = true
Name = Virtual talkthrough to output group
Outputs = MyDesktopHP
[Setup]
[Output:MyDesktopHP]
Description = Desktop user with headphones
Devices = MyHP
[OutputDevice:MyHP]
Type = HP
Description = Headphone hardware device (two-channels)
Channels = 1,2
function [ ir ] = itaVA_convert_thirds( mags, ir_length, fs, freqs )
%ITAVA_CONVERT_THIRDS Converts third octave magnitude spectra to an impulse response
%
% Defaults to third octave spectrum resolution (31 mags) returning an
% impulse response of 1024 samples at 44.1kHz
%
% mags magnitude spectrum (factors)
% ir_length length of impulse response (samples)
% fs sampling rate (Hertz)
% freqs supporting / base vector of frequencies (Hertz)
if nargin < 4
% ita_ANSI_center_frequencies
freqs = [ 20 25 31.5000000000000 40 50 62.5000000000000 80 100 125 155 200 250 315 400 500 630 800 1000 1250 1600 2000 2500 3150 4000 5000 6350 8000 10000 12500 16000 20000 ];
end
if nargin < 3
fs = 44100;
end
if nargin < 2
ir_length = 1024;
end
assert( numel( freqs ) >= 2 )
freq_vec = linspace( freqs( 1 ), freqs( end ), ir_length / 2 );
mags_p = interp1( freqs, mags, freq_vec, 'pchip' )';
mags_P = itaAudio( mags_p, fs, 'freq' );
mags_P.signalType = 'energy';
ir_mp = ita_minimumphase( mags_P );
ir = ir_mp.timeData;
end
%% Paths
addpath( genpath( 'win32-x64.vc12' ) ) % VA binaries etc.
addpath( '../matlab' ) % diffraction simulation scripts
% addpath( 'sciebo/Bachelor Arbeit' )
%% Diffraction simulation setup
r = 5;
w = itaFiniteWedge( [ 1 1 0 ] ./ sqrt( 2 ), [ -1 1 0 ] ./ sqrt( 2 ), [ 0 0 5 ], 10 );
source_pos = [ -r 0 0 ];
f = ita_ANSI_center_frequencies;
ir_length = 4096;
fs = 44100;
c = 344; % m/s
%% VA server start
vaserver_binaray_path = which( 'VAServer.exe' );
if( isempty( vaserver_binaray_path ) )
warning( 'Could not find VAServer executable, please add VA bin folder to Matlab path' )
itaVA_setup
vaserver_binaray_path = which( 'VAServer.exe' );
assert( ~isempty( vaserver_binaray_path ) )
end
ini_file_name = 'VACore.diffraction_auralization.ini';
vaserver_call = [ '"' fullfile( vaserver_binaray_path ) '" localhost:12340 "' fullfile( pwd, ini_file_name ) '" &' ];
system( vaserver_call );
%% VA connection
va = itaVA;
while( true )
try
va.connect
va.add_search_path( pwd )
catch
if ~va.get_connected
disp( 'Waiting for server to come up' )
pause( 0.2 )
disp( 'Retrying' )
else
break
end
end
end
params = struct();
params.RecordOutputBaseFolder = fullfile( pwd, 'recording', datestr( now, 'yyyy-mm-dd_HH-MM-SS' ) );
renderer_id = 'GenericMaekawa';
params.RecordOutputFileName = [ renderer_id '.wav' ];
va.set_rendering_module_parameters( renderer_id, params );
renderer_id = 'GenericMaekawaApprox';
params.RecordOutputFileName = [ renderer_id '.wav' ];
va.set_rendering_module_parameters( renderer_id, params );
renderer_id = 'GenericUTD';
params.RecordOutputFileName = [ renderer_id '.wav' ];
va.set_rendering_module_parameters( renderer_id, params );
renderer_id = 'GenericUTDApprox';
params.RecordOutputFileName = [ renderer_id '.wav' ];
va.set_rendering_module_parameters( renderer_id, params );
renderer_id = 'GenericBTMSApprox';
params.RecordOutputFileName = [ renderer_id '.wav' ];
%va.set_rendering_module_parameters( renderer_id, params );
%% --------------- Auralization -----------------
L = va.create_sound_receiver( 'itaVA_Receiver' );
S = va.create_sound_source( 'itaVA_Source' );
va.set_sound_receiver_position( L, source_pos );
X = va.create_signal_source_buffer_from_file( 'chirp.wav' );
%X = va.create_signal_source_buffer_from_file( 'chirp.wav' );
%X = va.create_signal_source_buffer_from_file( 'gershwin-mono.wav' );
va.set_signal_source_buffer_playback_action( X, 'play' )
va.set_signal_source_buffer_looping( X, true );
va.set_sound_source_signal_source( S, X )
timestep = 128 / 44100; % here: depends on block size and sample rate
manual_clock = 0;
va.set_core_clock( 0 );
N = 3400;
disp( [ 'Auralization result length: ' num2str( N * timestep ) ' s' ] )
alpha_d_rad = linspace( pi, 3 * pi / 2, N );
receiver_pos = zeros( N, 3 );
receiver_pos( :, 1 ) = r * sin( alpha_d_rad - 3 * pi / 4 ); % x absolute position
receiver_pos( :, 2 ) = r * cos( alpha_d_rad - 3 * pi / 4 ); % y absolute position
h = waitbar( 0, 'Hold on, running auralization' );
for n = 1:N
r_pos = receiver_pos( n, : );
va.set_sound_source_position( S, r_pos );
%% Detour
in_shadow_zone = ita_diffraction_shadow_zone( w, source_pos, r_pos );
if in_shadow_zone
% Detour over aperture point
apex = w.get_aperture_point( source_pos, r_pos );
distance = norm( source_pos - apex ) + ...
norm( r_pos - apex );
else
% Direct line-of-sight
distance = norm( source_pos - r_pos );
end
%% Direct sound
ir_direct_sound = itaAudio( 1 );
ir_direct_sound.samplingRate = fs;
ir_direct_sound.timeData = zeros( ir_length, 1 );
if ~in_shadow_zone
k = 2 * pi * f' ./ c;
tf_direct_sound = exp( -1i * k * distance ) ./ distance;
k = 2 * pi * ir_direct_sound.freqVector ./ c;
ir_direct_sound.freqData = exp( -1i * k * distance ) ./ distance;
else
tf_direct_sound = zeros( numel( f ), 1 );
end
%% Maekawa
% Get Maekawa diffraction simulation TF
tf_maekawa = ita_diffraction_maekawa( w, source_pos, r_pos, f, c );
ir_maekawa = itaAudio( 1 );
ir_maekawa.samplingRate = fs;
ir_maekawa.timeData = itaVA_convert_thirds( tf_maekawa + tf_direct_sound, ir_length + 2, fs, f );
path_update = struct();
path_update.source = S;
path_update.receiver = L;
path_update.ch1 = ir_maekawa.timeData( :, 1 );
path_update.ch2 = ir_maekawa.timeData( :, 1 );
path_update.delay = distance / va.get_homogeneous_medium_sound_speed();
va.set_rendering_module_parameters( 'GenericMaekawa', path_update );
%% Maekawa shadow zone approximation
% Get Maekawa diffraction simulation TF
tf_maekawa_a = ita_diffraction_maekawa_approx( w, source_pos, r_pos, f, c );
ir_maekawa_a = itaAudio( 1 );
ir_maekawa_a.samplingRate = fs;
ir_maekawa_a.timeData = itaVA_convert_thirds( tf_maekawa_a + tf_direct_sound, ir_length + 2, fs, f );
path_update = struct();
path_update.source = S;
path_update.receiver = L;
path_update.ch1 = ir_maekawa_a.timeData( :, 1 );
path_update.ch2 = ir_maekawa_a.timeData( :, 1 );
path_update.delay = distance / va.get_homogeneous_medium_sound_speed();
va.set_rendering_module_parameters( 'GenericMaekawaApprox', path_update );
%% UTD
% Get UTD diffraction simulation TF
tf_utd = ita_diffraction_utd( w, source_pos, r_pos, f, c );
ir_utd = itaAudio( 1 );
ir_utd.samplingRate = fs;
ir_utd.timeData = itaVA_convert_thirds( tf_utd + tf_direct_sound, ir_length + 2, fs, f );
path_update = struct();
path_update.source = S;
path_update.receiver = L;
path_update.ch1 = ir_utd.timeData( :, 1 );
path_update.ch2 = ir_utd.timeData( :, 1 );
path_update.delay = distance / va.get_homogeneous_medium_sound_speed();
va.set_rendering_module_parameters( 'GenericUTD', path_update );
%% UTD shadow zone approximation
% Get UTD approximation diffraction simulation TF
tf_utd_a = ita_diffraction_utd_approximated( w, source_pos, r_pos, f, c );
ir_utd_a = itaAudio( 1 );
ir_utd_a.samplingRate = fs;
ir_utd_a.timeData = itaVA_convert_thirds( tf_utd_a + tf_direct_sound, ir_length + 2, fs, f );
path_update = struct();
path_update.source = S;
path_update.receiver = L;
path_update.ch1 = ir_utd_a.timeData( :, 1 );
path_update.ch2 = ir_utd_a.timeData( :, 1 );
path_update.delay = distance / va.get_homogeneous_medium_sound_speed();
va.set_rendering_module_parameters( 'GenericUTDApprox', path_update );
%% BTM(S)?
% @todo
%% Continue auralization processing
% Increment core clock
manual_clock = manual_clock + timestep;
va.call_module( 'manualclock', struct( 'time', manual_clock ) );
% Process audio chain by incrementing one block
va.call_module( 'virtualaudiodevice', struct( 'trigger', true ) );
waitbar( n / N )
end
close( h )
va.disconnect
disp( 'Stop VA to export simulation results from rendering module(s)' )
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment