diff --git a/.gitignore b/.gitignore
index 70ad142a0382995a29ea82f6f6c0e0c91ad73548..451eb7151151a13f11264f4290b6756f70bcc2e8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,3 +6,8 @@ dist
 examples/jupyter/*.ipynb_checkpoints*
 *.wav
 *.daff
+src/vapython/vanet/_va_interface.py
+src/vapython/vanet/_vanet_grpc.py
+__pycache__
+.coverage
+cov-report-html
\ No newline at end of file
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..bee7396f641cd8ecff82210b4c53ee06fa02925c
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,63 @@
+workflow:
+  rules:
+    - if: $CI_PIPELINE_SOURCE == 'merge_request_event'
+    - if: $CI_PIPELINE_SOURCE == 'pipeline'
+    - if: $CI_COMMIT_REF_PROTECTED == "true" && $CI_OPEN_MERGE_REQUESTS
+      when: never
+    - if: $CI_COMMIT_REF_PROTECTED == "true"
+
+stages:
+   - pretest
+   - build
+   - test
+
+format:
+  stage: pretest
+  script:
+    - hatch fmt --check --formatter
+    - if(!$?) { Exit $LASTEXITCODE }
+
+lint:
+  stage: pretest
+  script:
+    - hatch fmt --check --linter
+    - if(!$?) { Exit $LASTEXITCODE }
+
+typecheck:
+  stage: pretest
+  script:
+    - hatch run types:check
+    - if(!$?) { Exit $LASTEXITCODE }
+
+build:
+  stage: build
+  script:
+    - hatch build
+
+docs:
+  stage: build
+  script:
+    - hatch run docs:build
+  artifacts:
+    paths:
+      - "site/"
+
+test:
+  stage: test
+  script:
+    - hatch run test:test
+    - if(!$?) { Exit $LASTEXITCODE }
+
+coverage:
+  stage: test
+  script:
+    - hatch run cov-ci
+    - if(!$?) { Exit $LASTEXITCODE }
+  coverage: '/TOTAL.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
+  artifacts:
+    reports:
+      coverage_report:
+        coverage_format: cobertura
+        path: coverage.xml
+
+
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..4f4e903624d19f75375c93f97b1904c0c9c755aa
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,54 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
+and this project adheres to a versioning scheme similar to Matlab, with versions based on the release year.
+
+## [Unreleased]
+
+## [2024a] - 2024-09-26
+
+### Added
+
+- Add `CHANGELOG.md`, `CONTRIBURING.md`, and `CODE_OF_CONDUCT` files (!6)
+- Add CI configuration (!4)
+- Unit testing for most used functions in binding (!4)
+- Documentation full for binding (!5)
+- Add NatNetTracking support (!8) -> Feature parity with VAMatlab
+- Add the option to add default values to the generated Python class (!10)
+- Add event handling (!11)
+- New examples (!7)
+
+### Changed
+
+- Complete rework of binding using `betterproto` for a pure Python implementation (!4)
+- Set the minimum Python version to 3.9 (!9)
+- CMakeLists.txt adapted to new, pure Python binding (!12)
+- Improved README.md (!15)
+
+## [2023b] - 2023-11-16
+
+### Changed
+
+- Improved README.md
+
+## [2023a] - 2023-06-21
+
+### Added
+
+- Better wheel generation with multiple Python versions supported
+
+### Fixed
+
+- many minor bugs in binding
+
+## [2022a] - 2022-06-15
+
+### Changed
+
+- Modernize build system
+
+## Previous versions
+
+- No changelog available
\ No newline at end of file
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 585979b01c52c06dbd5db3a65e08411f5198d20f..4f955df671196385a0533c2321850c372bc32a52 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -3,98 +3,20 @@ cmake_minimum_required (VERSION 3.26 FATAL_ERROR)
 project (
 	VAPython
 	LANGUAGES CXX C
-	VERSION 2023.1
+	VERSION 2024.0
 )
 
-set (CMAKE_DEBUG_POSTFIX "-d")
+find_program(HATCH_EXECUTABLE "hatch")
 
-if (NOT DEFINED CMAKE_RUNTIME_OUTPUT_DIRECTORY AND WIN32)
-	set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/$<CONFIG>/bin)
-	set (CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/$<CONFIG>/lib)
-	set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/$<CONFIG>/lib)
+if (HATCH_EXECUTABLE)
+	set(VAPYTHON_BUILD_COMMAND hatch build -t wheel)
+else ()
+	set (VAPYTHON_BUILD_COMMAND echo "hatch not found, skipping build for VAPython")
 endif ()
 
-if (CMAKE_GENERATOR MATCHES "Visual Studio")
-	set_property (GLOBAL PROPERTY USE_FOLDERS ON)
-endif ()
-
-CPMAddPackage (
-	NAME python-cmake-wheel
-	GITHUB_REPOSITORY Klebert-Engineering/python-cmake-wheel
-	GIT_TAG main
-	DOWNLOAD_ONLY TRUE
-)
-
-list (INSERT CMAKE_MODULE_PATH 0 CMAKE_MODULE_PATH ${python-cmake-wheel_SOURCE_DIR})
-
-include (python-wheel)
-
-find_package (Python REQUIRED COMPONENTS Interpreter Development.SABIModule)
-
-# Add target 'va', the project name is *not* used here, so in python the module is called 'va'.
-python_add_library (
-	${PROJECT_NAME}
-	MODULE
-	USE_SABI
-	3.8
-	WITH_SOABI
-	src/vasingleton.cpp
-	src/vasingletondoc.hpp
-	src/vasingletonmethods.hpp
-)
-add_library (VA::${PROJECT_NAME} ALIAS ${PROJECT_NAME})
-
-# Linking
-target_link_libraries (${PROJECT_NAME} PRIVATE VA::VANet)
-
-# Required compiler features
-target_compile_features (${PROJECT_NAME} PUBLIC cxx_std_17)
-
-# If build outside of VA project, change IDE folder
-if (NOT CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
-	set_property (TARGET ${PROJECT_NAME} PROPERTY FOLDER "Bindings/VA")
-endif ()
-
-# Remove the debug postfix, so the module always has the same name. Also set the output dir to bin, so if shared build
-# is enabled, the module can find all the dlls.
-set_target_properties (
-	${PROJECT_NAME} PROPERTIES DEBUG_POSTFIX "" LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/$<CONFIG>/bin"
-)
-
-set (WHEEL_DEPLOY_DIRECTORY "${CMAKE_BINARY_DIR}/python_wheels")
+add_custom_target(${PROJECT_NAME} COMMAND ${VAPYTHON_BUILD_COMMAND} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
 
-add_wheel (
-	${PROJECT_NAME}
-	VERSION
-	${PROJECT_VERSION}
-	AUTHOR
-	"Institute for Hearing Technology and Acoustics (IHTA), RWTH Aachen University"
-	URL
-	"https://www.virtualacoustics.org"
-	LICENSE_PATH
-	${CMAKE_CURRENT_SOURCE_DIR}/LICENSE.md
-	PYTHON_REQUIRES
-	">=3.8"
-	DESCRIPTION
-	"Virtual Acoustics (VA) singleton interface"
-	DEPLOY_FILES
-	""
-	TARGET_DEPENDENCIES
-	VA::VABase # VANet is now static, not needed here.
-	MODULE_DEPENDENCIES
-	""
-)
-
-file (WRITE ${CMAKE_CURRENT_BINARY_DIR}/setup.cfg "[bdist_wheel]\npy_limited_api = cp38")
-
-file (READ ${CMAKE_CURRENT_BINARY_DIR}/setup.py setup_CONTENT)
-string (REGEX REPLACE "(sources=\\[\\])" "\\1, py_limited_api=True" setup_CONTENT "${setup_CONTENT}") #
-file (WRITE ${CMAKE_CURRENT_BINARY_DIR}/setup.py ${setup_CONTENT})
-
-set_property (TARGET ${PROJECT_NAME}-copy-files ${PROJECT_NAME}-setup-py PROPERTY FOLDER "Bindings/VA")
-set_property (TARGET wheel PROPERTY FOLDER "Bindings")
-
-# ---Install---
+set_property (TARGET ${PROJECT_NAME} PROPERTY FOLDER "Bindings/VA")
 
 # prepare the install folder name
 math (EXPR RELEASE_LETTER "${PROJECT_VERSION_MINOR}+97")
@@ -103,30 +25,44 @@ set (VA_PYTHON_OUTPUT_FOLDER "VAPython_v${PROJECT_VERSION_MAJOR}${RELEASE_LETTER
 
 # Readme
 install (
-	FILES "README.md"
+	FILES "README.md" "LICENSE.md" "CHANGELOG.md"
 	DESTINATION ${VA_PYTHON_OUTPUT_FOLDER}
 	COMPONENT ${PROJECT_NAME}
 )
 
-# Wheel files for installation on user machine
-install (
-	DIRECTORY ${WHEEL_DEPLOY_DIRECTORY}/
-	DESTINATION ${VA_PYTHON_OUTPUT_FOLDER}
-	COMPONENT ${PROJECT_NAME}
-	FILES_MATCHING
-	PATTERN "*.whl"
-)
+# Transfer variables into the install script
+install (CODE "set(VA_PYTHON_OUTPUT_FOLDER \"${VA_PYTHON_OUTPUT_FOLDER}\")" COMPONENT ${PROJECT_NAME})
+install (CODE "set(VA_PYTHON_PACKAGE_PATH \"${CMAKE_CURRENT_SOURCE_DIR}\")" COMPONENT ${PROJECT_NAME})
+install (CODE "set(VA_PYTHON_PACKAGE_WHEEL \"vapython-${PROJECT_VERSION}-py3-none-any.whl\")" COMPONENT ${PROJECT_NAME})
+install (CODE "set(VA_PYTHON_PACKAGE_WHEEL_FINAL \"vapython-v${PROJECT_VERSION_MAJOR}${RELEASE_LETTER}-py3-none-any.whl\")" COMPONENT ${PROJECT_NAME})
 
-# Tests
+# during install create a wheel from the content of the python folder and include it in the install
 install (
-	DIRECTORY "tests"
-	DESTINATION ${VA_PYTHON_OUTPUT_FOLDER}
-	COMPONENT ${PROJECT_NAME}
-)
-
-# Examples
-install (
-	DIRECTORY "examples"
-	DESTINATION ${VA_PYTHON_OUTPUT_FOLDER}
+	CODE [[
+		find_package(Python3 COMPONENTS Interpreter)
+
+		if (Python3_Interpreter_FOUND)
+			execute_process(
+				COMMAND ${Python3_EXECUTABLE} -m build --wheel ${VA_PYTHON_PACKAGE_PATH}
+				RESULT_VARIABLE _result
+			)
+			if (NOT _result EQUAL 0)
+				message(FATAL_ERROR "Failed to create wheel")
+			endif ()
+		else ()
+			message(FATAL_ERROR "Python3 not found")
+		endif ()
+
+		set(_wheel "${VA_PYTHON_PACKAGE_PATH}/dist/${VA_PYTHON_PACKAGE_WHEEL}")
+		set(_wheel_final "${VA_PYTHON_PACKAGE_PATH}/dist/${VA_PYTHON_PACKAGE_WHEEL_FINAL}")
+
+		if (NOT EXISTS "${_wheel}")
+			message(FATAL_ERROR "Wheel not found")
+		endif ()
+
+		file(COPY_FILE ${_wheel} ${_wheel_final})
+
+		file(INSTALL ${_wheel_final} DESTINATION ${CMAKE_INSTALL_PREFIX}/${VA_PYTHON_OUTPUT_FOLDER}/)
+	]]
 	COMPONENT ${PROJECT_NAME}
 )
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000000000000000000000000000000000..53532e1d8b28729486d4ba0225751f04a9dd369e
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,132 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, caste, color, religion, or sexual
+identity and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+  and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the overall
+  community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or advances of
+  any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email address,
+  without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+  professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official email address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+<pascal.palenda@akustik.rwth-aachen.de>.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series of
+actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or permanent
+ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within the
+community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.1, available at
+[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
+
+Community Impact Guidelines were inspired by
+[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
+
+For answers to common questions about this code of conduct, see the FAQ at
+[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
+[https://www.contributor-covenant.org/translations][translations].
+
+[homepage]: https://www.contributor-covenant.org
+[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
+[Mozilla CoC]: https://github.com/mozilla/diversity
+[FAQ]: https://www.contributor-covenant.org/faq
+[translations]: https://www.contributor-covenant.org/translations
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..dca0490d4659ca54c0f1f7cdda795fa793d74916
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,43 @@
+# Contributing Guidelines
+
+Thank you for considering contributing to our project!
+We welcome contributions from the community to help improve and grow our project.
+Before contributing, please take a moment to review the following guidelines:
+
+## How to Contribute
+
+1. Fork the repository and clone it to your local machine.
+2. Create a new branch for your contribution: `git checkout -b feature/my-feature`.
+   The branch name should describe the changes you are making.
+3. Make your changes and ensure they align with our project's goals and coding standards.
+4. Test your changes thoroughly to ensure they work as expected.
+5. Commit your changes with clear and descriptive commit messages following the [EU Commit Guidelines](https://ec.europa.eu/component-library/v1.15.0/eu/docs/conventions/git/).
+6. Add your changes to the `CHANGELOG.md` file under the `[Unreleased]` section and follow the [Keep a Changelog](https://keepachangelog.com/en/1.1.0/) format.
+7. Push your changes to your fork.
+8. Open a merge request (MR) against the main branch of our repository.
+9. Provide a detailed description of your changes in the MR, including any relevant context or references.
+10. Ensure your MR passes all automated checks and tests.
+11. Be responsive to feedback and be willing to make further changes if requested.
+
+## Code Style and Standards
+
+- Follow our project's coding style and conventions.
+- Write clear, concise, and maintainable code.
+- Use meaningful variable names and document your code to improve code readability and usability.
+
+## Reporting Issues
+
+If you encounter a bug or have a feature request, please open an issue on our [repository](https://git.rwth-aachen.de/ita/VAPython).
+Provide detailed information about the issue, including steps to reproduce it if applicable.
+
+## Code of Conduct
+
+Please adhere to our [project's Code of Conduct](CODE_OF_CONDUCT.md) in all interactions and contributions.
+
+## Licensing
+
+By contributing to our project, you agree to license your contributions under the same license as the project.
+Ensure that you have the right to contribute the code under the project's license.
+
+We appreciate your interest in contributing to our project and look forward to your contributions!
+If you have any questions or need assistance, feel free to contact us.
diff --git a/LICENSE.md b/LICENSE.md
index fa64b037c0d2d630d0a0bb95332d5e634c083aed..298a59f6bc3dcad64fad8acaefc112562a51afda 100644
--- a/LICENSE.md
+++ b/LICENSE.md
@@ -1,4 +1,4 @@
-Copyright 2015-2023 Institute of Technical Acoustics (ITA), RWTH Aachen University
+Copyright 2015-2024 Institute of Technical Acoustics (ITA), RWTH Aachen University
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use files of this project except in compliance with the License.
diff --git a/README.md b/README.md
index 0ef06e83fef50b12b50ae785eb6f012e97c57320..4ea7f57e67944aa7e95b7452406f973be853d01c 100644
--- a/README.md
+++ b/README.md
@@ -1,53 +1,97 @@
 # VAPython
 
-VAPython is a C++ extension for the Python3 interpreter that provides a (networked) interface to VA.
+VAPython is a pure Python binding for controlling a [Virtual Acoustics (VA)](https://www.virtualacoustics.org/) server.
 
-## License
+It utilizes the gRPC protocol to communicate with the VA server and provides a Pythonic interface to the VA API.
+This is achieved through the use of [betterproto](https://github.com/danielgtaylor/python-betterproto).
 
-Copyright 2015-2023 Institute of Technical Acoustics (ITA), RWTH Aachen University
+## Requirements
 
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use files of this project except in compliance with the License.
-You may obtain a copy of the License at
+Python 3.9 or higher.
 
-<http://www.apache.org/licenses/LICENSE-2.0>
+## Install
 
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
+The downloaded package contains a wheel file that can be installed using pip:
 
-## Requirements
+```bash
+pip install VAPython-<version>-py3-none-any.whl
+```
 
-Python 3.8 or higher.
+where `<version>` is the version of the package,e.g. `v2024a`.
 
-## Install
+Once installed, the package can be imported and used like this:
+
+```python
+from vapython import VA
+
+va = VA()    # create a VA object
+va.connect() # connect to the VA server
+```
+
+This way, the Python commands will be comparable to the commands of VAMatlab.
+
+## Examples
+
+This binding includes examples that demonstrate how to use the API.
+These can be found in the `examples` directory of the package.
+
+When VAPython is installed, a list of examples can be shown using the example runner:
+```bash
+python -m vapython.examples
+```
+By entering the respective number, one of the examples can be selected and opened.
+Note, that this will use the default application for `.py` files that is registered with your operating system.
+This might not be a text editor.
+
+Alternatively, a specific example can directly be executed using the respective name:
+```bash
+python -m vapython.examples.<example_name>
+```
 
-The VA binary packages come with a wheel installer.
-The wheel supports all versions above 3.8.
-It can be installed using pip from the respective main folder with:
+## Development guide
+
+VAPython uses [hatch](https://hatch.pypa.io/dev/) for packaging and building.
+
+Building in this case means generating the Python files from the proto files and rendering the template for the wrapper.
+In case changes are made to the proto files, the build scripts or the template, the build process has to be triggered manually.
+This can be done by running the following command in the root directory of the project:
 
 ```bash
-pip install VAPython-2022.0-cp38-abi3-win_amd64.whl
+hatch build
 ```
 
-Remember to adapt the release version (here `2022.0`) in the command above.
-Afterwards, you can start a VAServer and try the `va_example_simple.py` script in the *example* folder.
+Optionally,
 
-## Quick build guide
+```bash
+hatch build -t wheel
+```
 
-VAPython can be built from the [VA](https://git.rwth-aachen.de/ita/VA) base project.
-Further build instructions can be found [here](https://git.rwth-aachen.de/ita/ITACoreLibs/-/wikis/home#build-guide).
-During configuration, the variable `ITA_VA_WITH_BINDING_PYTHON` must be set to enable the build for VAPython.
-Afterwards, the CMake target for VAPython can be built or a wheel generated with the `wheel` target.
+can be used to only build a wheel package (and save some time).
 
-## Using
+When only changes are made to the Python files, the build process is not necessary.
+In this case it is recommended to use the default environment of hatch which includes a editable install of the package.
+This is also the environment in which the tests are run.
 
-Within Python, the module is called `VAPython`. It is generally recommended to import using the *va* alias:
+### Running the examples
 
-```python
-import VAPython as va
+When developing and using hatch, the examples runner can also be called via hatch:
+
+```bash
+hatch run examples
 ```
 
-This way, the Python commands will be mostly comparable to the commands of VAMatlab.
+## License
+
+Copyright 2015-2024 Institute of Technical Acoustics (ITA), RWTH Aachen University
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use files of this project except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/docs/css/custom.css b/docs/css/custom.css
new file mode 100644
index 0000000000000000000000000000000000000000..b837b8fd38bb3ba71deecac9bc2899b05911a774
--- /dev/null
+++ b/docs/css/custom.css
@@ -0,0 +1,5 @@
+/* Indentation. */
+div.doc-contents:not(.first) {
+    padding-left: 25px;
+    border-left: .05rem solid var(--md-typeset-table-color);
+  }
\ No newline at end of file
diff --git a/docs/docstr_coverage_badge.svg b/docs/docstr_coverage_badge.svg
new file mode 100644
index 0000000000000000000000000000000000000000..56ebe18bd7caccd4eb1f7eb72f42abd353bad76c
--- /dev/null
+++ b/docs/docstr_coverage_badge.svg
@@ -0,0 +1,20 @@
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="142" height="20">
+    <linearGradient id="s" x2="0" y2="100%">
+        <stop offset="0" stop-color="#bbb" stop-opacity=".1"/>
+        <stop offset="1" stop-opacity=".1"/>
+    </linearGradient>
+    <clipPath id="r">
+        <rect width="142" height="20" rx="3" fill="#fff"/>
+    </clipPath>
+    <g clip-path="url(#r)">
+        <rect width="99" height="20" fill="#555"/>
+        <rect x="99" width="43" height="20" fill="#97CA00"/>
+        <rect width="142" height="20" fill="url(#s)"/>
+    </g>
+    <g fill="#fff" text-anchor="middle" font-family="Verdana,Geneva,DejaVu Sans,sans-serif" font-size="110">
+        <text x="505" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="890">docstr-coverage</text>
+        <text x="505" y="140" transform="scale(.1)" textLength="890">docstr-coverage</text>
+        <text x="1195" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)">93%</text>
+        <text x="1195" y="140" transform="scale(.1)">93%</text>
+    </g>
+</svg>
\ No newline at end of file
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..d1baf50e03a9507b1f13ea6304091915ec19cc0e
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,29 @@
+# VAPython
+
+![doccoverage](docstr_coverage_badge.svg)
+
+This is the API documentation for the VAPython package.
+VAPython is a Python package that provides a Pythonic interface to [Virtual Acoustics (VA)](https://www.virtualacoustics.org/VA/).
+VA is a real-time auralization framework for scientific research providing modules and interfaces for experiments and demonstrations. It is open source and fully controllable enabling reproducible research.
+
+## Prototyping
+
+You will find, that certain functionality seems to be implemented in multiple methods with different input parameters.
+This is because VA interfaces make use of prototyping methods, which usually have names similar to
+`va.set_..._parameters()` or `va.get_..._parameters()`. More information is given
+[here](https://www.virtualacoustics.org/VA/documentation/control/#prototyping).
+
+
+::: vapython.va.VA
+
+::: vapython._helper.possible_auralization_modes
+    handler: python
+    options:
+        show_if_no_docstring: true
+        line_length: 60
+
+::: vapython._helper.default_auralization_mode
+    handler: python
+    options:
+        show_if_no_docstring: true
+        line_length: 60
diff --git a/examples/jupyter/va_core_controller.ipynb b/examples/jupyter/va_core_controller.ipynb
deleted file mode 100644
index e13f7aaab74d991d58f431b0af9ffca95dd5434d..0000000000000000000000000000000000000000
--- a/examples/jupyter/va_core_controller.ipynb
+++ /dev/null
@@ -1,531 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# VA core controller\n",
-    "This is an example how to control global VA core functionality"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Prerequisites"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Successfully connected, server core version is: VACore v2017.d (debug)\n"
-     ]
-    }
-   ],
-   "source": [
-    "import sys\n",
-    "sys.path.append( '../../Lib/site-packages' )\n",
-    "sys.path.append( '../../dist/Lib/site-packages' )\n",
-    "import ipywidgets as widgets\n",
-    "import VAPython as va\n",
-    "if not va.connect() :\n",
-    "    raise 'Could not connect to local VA server'\n",
-    "else :\n",
-    "    print( 'Successfully connected, server core version is: ' + va.get_version() )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Control"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Output"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Mute / unmute\n",
-    "You can mute and unmute the entire audio output by using `set_output_muted` and receive the current setting by `is_output_muted`. The setter uses the optional argument `True` or `False` and will mute the output if no argument is passed."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "application/vnd.jupyter.widget-view+json": {
-       "model_id": "ab9e1c96bb1b4c2aaea9c95d6ee930e8",
-       "version_major": 2,
-       "version_minor": 0
-      },
-      "text/plain": [
-       "A Jupyter Widget"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "mute_output_button = widgets.ToggleButton( description = 'Output muted', value = va.get_output_muted() )\n",
-    "\n",
-    "def on_mute_button_clicked( b ) :\n",
-    "    if b.name == 'value' :\n",
-    "        va.set_output_muted( b.new ) # True if toggle button appears 'active'\n",
-    "\n",
-    "mute_output_button.observe( on_mute_button_clicked )\n",
-    "display( mute_output_button )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Gain\n",
-    "The output gain or output volume of VA can be controlled by `set_output_gain` and received by `get_output_gain`. Gains or volumes are defined as a factore between 0 and 1. The same functions also exist for the audio inputs of the sound device."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [
-    {
-     "ename": "NameError",
-     "evalue": "name 'widgets' is not defined",
-     "output_type": "error",
-     "traceback": [
-      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
-      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
-      "\u001b[1;32m<ipython-input-1-d87da51b8000>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m output_gain_slider = widgets.FloatSlider( \n\u001b[0m\u001b[0;32m      2\u001b[0m     \u001b[0mvalue\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mva\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_output_gain\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      3\u001b[0m     \u001b[0mdescription\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;34m'Output gain:'\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m     \u001b[0mmin\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m0.0\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m     \u001b[0mmax\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m1.0\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
-      "\u001b[1;31mNameError\u001b[0m: name 'widgets' is not defined"
-     ]
-    }
-   ],
-   "source": [
-    "output_gain_slider = widgets.FloatSlider( \n",
-    "    value = va.get_output_gain(),\n",
-    "    description = 'Output gain:',\n",
-    "    min = 0.0,\n",
-    "    max = 1.0,\n",
-    "    step = 0.1,\n",
-    "    readout = True,\n",
-    "    readout_format = '.1f' )\n",
-    "\n",
-    "def on_output_gain_changed( s ) :\n",
-    "    if s.name == 'value' :\n",
-    "        va.set_output_gain( s.new )\n",
-    "\n",
-    "output_gain_slider.observe( on_output_gain_changed )\n",
-    "display( output_gain_slider )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Global auralization mode\n",
-    "\n",
-    "The auralization mode is a bundle of flags to control the acoustic phenomena that should be considered during audio rendering. It's purpose is to demonstrate the audibility of certain aspects, like sound source directivity.\n",
-    "Auralization mode can be set globally, but also individually for rendering modules, sources and receivers."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "DS, ER, DD, DIR, AA, TV, SC, DIF, NF, DP, SL, TR, AB\n",
-      "Direct sound, early reflections, diffuse decay, source directivity, air absorption, atmospheric temporal variations, scattering, diffraction, near-field effects, doppler shifts, spherical spreading loss, transmission, absorption\n"
-     ]
-    }
-   ],
-   "source": [
-    "global_am = va.get_global_auralization_mode()\n",
-    "print( global_am )\n",
-    "global_am_long = va.get_global_auralization_mode( False )\n",
-    "print( global_am_long )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Macros\n",
-    "\n",
-    "Macros can be defined to make your life easier. Don't mess around with file pathes too much, use macros. Don't rename output file names for recording and other exported information, use macros. You can test your macros using th method `substitute_macro` (see below), but you don't have to do it yourself. VA will always substitute macros where possible."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "$(DefaultHRIR): HRIR\\\\ITA-Kunstkopf_HRIR_AP11_Pressure_Equalized_3x3_256.v17.ir.daff\n",
-      "$(ProjectName): MyVirtualAcousticsProject\n",
-      "$(data): $(data)\n",
-      "$(big_data_dir): C:\\\\data\n",
-      "$(conf): $(conf)\n"
-     ]
-    }
-   ],
-   "source": [
-    "print( '$(DefaultHRIR): ' + va.substitute_macro( '$(DefaultHRIR)' ) )\n",
-    "print( '$(ProjectName): ' + va.substitute_macro( '$(ProjectName)' ) )\n",
-    "print( '$(data): ' + va.substitute_macro( '$(data)' ) )\n",
-    "print( '$(big_data_dir): ' + va.substitute_macro( '$(big_data_dir)' ) )\n",
-    "print( '$(conf): ' + va.substitute_macro( '$(conf)' ) )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Paths\n",
-    "\n",
-    "Using search path is encouraged, as it makes it easier to move from one PC to another. You can get the available search paths and also add new search paths. Paths are always bound to the server PC."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "{'path_0': 'C:/data', 'path_1': 'C:/dev/VA/VACore/conf', 'path_2': 'C:/dev/VA/VACore/data', 'path_3': 'C:/data/InsideSceneData'}\n",
-      "client working directory: C:\\dev\\VA\\VAPython\\examples\\jupyter\n"
-     ]
-    }
-   ],
-   "source": [
-    "print( va.get_search_paths() )\n",
-    "\n",
-    "import os\n",
-    "current_working_dir = os.getcwd()\n",
-    "print( \"client working directory: \" + current_working_dir )\n",
-    "va.add_search_path( current_working_dir ); # only makes sense if client and server are running on same PC"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Reset\n",
-    "Use `reset` to reset the entire scene."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 7,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "application/vnd.jupyter.widget-view+json": {
-       "model_id": "f7649c3b43d646d88712e9fea5ad8094",
-       "version_major": 2,
-       "version_minor": 0
-      },
-      "text/plain": [
-       "A Jupyter Widget"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "reset_button = widgets.Button( description = 'Reset VA server' )\n",
-    "\n",
-    "def on_reset_button_clicked( b ) :\n",
-    "    va.reset()\n",
-    "    \n",
-    "reset_button.on_click( on_reset_button_clicked )\n",
-    "display( reset_button )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Modules\n",
-    "You can interact with any registered VA module using a magic struct. In Python, this struct is represented by a (nested) dictionary, that is translated to a VAStruct. This struct can be used to call modules and receive information in return. It is used to change any setting in a registered module. This way, no new interface methods for prototyping have to be added during development and testing."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 8,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "application/vnd.jupyter.widget-view+json": {
-       "model_id": "fb3c42c0da5f463c98a2f51e91095964",
-       "version_major": 2,
-       "version_minor": 0
-      },
-      "text/plain": [
-       "A Jupyter Widget"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Calling BinauralFreeField:MyBinauralFreeField\n",
-      "Module returned an exception, could not get help\n",
-      "Calling VACore\n"
-     ]
-    }
-   ],
-   "source": [
-    "allmods = va.get_modules()\n",
-    "\n",
-    "modnames = list()\n",
-    "for mod in allmods[:] :\n",
-    "    modnames.append( mod[\"name\"] )\n",
-    "mods_dropdown_menu = widgets.Dropdown( options=modnames )\n",
-    "\n",
-    "mod_call_button = widgets.Button( description = 'Call' )\n",
-    "mod_widget_box = widgets.HBox( [ mods_dropdown_menu, mod_call_button ] )\n",
-    "\n",
-    "def on_mod_call( b ) :\n",
-    "    mod = allmods[ mods_dropdown_menu.index ]\n",
-    "    print( 'Calling ' + mod[\"name\"] )\n",
-    "    modnames.append( mod[\"name\"] )\n",
-    "    try :\n",
-    "        va.call_module( mod[\"name\"], { 'help': True } )\n",
-    "    except :\n",
-    "        print( 'Module returned an exception, could not get help' )\n",
-    "        \n",
-    "mod_call_button.on_click( on_mod_call )\n",
-    "\n",
-    "display( mod_widget_box )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Rendering modules\n",
-    "Rendering modules are special modules that can also be listed using `get_rendering_modules`. They can be muted/unmuted individually and also have an own output gain control. Additionally all rendering modules have specialized parameter setter and getter."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 9,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "application/vnd.jupyter.widget-view+json": {
-       "model_id": "5a0ed7d50a8146ab8e54740b6f2c44c4",
-       "version_major": 2,
-       "version_minor": 0
-      },
-      "text/plain": [
-       "A Jupyter Widget"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "allrendmods = va.get_rendering_modules();\n",
-    "\n",
-    "\n",
-    "# Dropdown menu\n",
-    "rendmodnames = list()\n",
-    "for mod in allrendmods[:] :\n",
-    "    rendmodnames.append( mod[\"id\"] )\n",
-    "rendmods_dropdown_menu = widgets.Dropdown( options=rendmodnames )\n",
-    "\n",
-    "\n",
-    "# Mute toggle button\n",
-    "rendmod_mute_button = widgets.ToggleButton( description = 'Mute' )\n",
-    "\n",
-    "def on_rendmod_mute_button_clicked( b ) :\n",
-    "    rendmod = allrendmods[ rendmods_dropdown_menu.index ]\n",
-    "    if b.name == 'value' :\n",
-    "        va.set_rendering_module_muted( mod[\"id\"], b.new )\n",
-    "        \n",
-    "rendmod_mute_button.observe( on_rendmod_mute_button_clicked )\n",
-    "\n",
-    "\n",
-    "# Gain slider\n",
-    "rendmod_gain_slider = widgets.FloatSlider( \n",
-    "    value = 1.0,\n",
-    "    description = 'Gain:',\n",
-    "    min = 0.0,\n",
-    "    max = 1.0,\n",
-    "    step = 0.1,\n",
-    "    readout = True,\n",
-    "    readout_format = '.1f' )\n",
-    "\n",
-    "def on_rendmod_gain_changed( s ) :\n",
-    "    rendmod = allrendmods[ rendmods_dropdown_menu.index ]\n",
-    "    if s.name == 'value' :\n",
-    "        va.set_rendering_module_gain( rendmod[\"id\"], s.new )\n",
-    "\n",
-    "rendmod_gain_slider.observe( on_rendmod_gain_changed )\n",
-    "\n",
-    "\n",
-    "# Parameter getter\n",
-    "rendmod_button_params = widgets.Button( description = 'Parameters' )\n",
-    "\n",
-    "def on_rendmod_parameter( b ) :\n",
-    "    rendmod = allrendmods[ rendmods_dropdown_menu.index ]\n",
-    "    print( va.get_rendering_module_parameters( rendmod[\"id\"] ) )\n",
-    "    \n",
-    "rendmod_button_params.on_click( on_rendmod_parameter )\n",
-    "\n",
-    "\n",
-    "# Horizontal box with widgets\n",
-    "rendmod_widget_box = widgets.HBox( [ rendmods_dropdown_menu, rendmod_mute_button, rendmod_gain_slider, rendmod_button_params ] )\n",
-    "display( rendmod_widget_box )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Reproduction modules\n",
-    "Reproduction modules are special modules that can also be listed using `get_reproduction_modules`. They can be muted/unmuted individually and also have an own output gain control. Additionally all reproduction modules have specialized parameter setter and getter."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 11,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "application/vnd.jupyter.widget-view+json": {
-       "model_id": "217211a8a5db48f3808a95f22bcdc056",
-       "version_major": 2,
-       "version_minor": 0
-      },
-      "text/plain": [
-       "A Jupyter Widget"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "{}\n"
-     ]
-    }
-   ],
-   "source": [
-    "allrepmods = va.get_reproduction_modules();\n",
-    "\n",
-    "\n",
-    "# Dropdown menu\n",
-    "repmodnames = list()\n",
-    "for mod in allrepmods[:] :\n",
-    "    repmodnames.append( mod[\"id\"] )\n",
-    "repmods_dropdown_menu = widgets.Dropdown( options=repmodnames )\n",
-    "\n",
-    "\n",
-    "# Mute toggle button\n",
-    "repmod_mute_button = widgets.ToggleButton( description = 'Mute' )\n",
-    "\n",
-    "def on_repmod_mute_button_clicked( b ) :\n",
-    "    repmod = allrepmods[ repmods_dropdown_menu.index ]\n",
-    "    if b.name == 'value' :\n",
-    "        va.set_reproduction_module_muted( mod[\"id\"], b.new )\n",
-    "        \n",
-    "repmod_mute_button.observe( on_repmod_mute_button_clicked )\n",
-    "\n",
-    "\n",
-    "# Gain slider\n",
-    "repmod_gain_slider = widgets.FloatSlider( \n",
-    "    value = 1.0,\n",
-    "    description = 'Gain:',\n",
-    "    min = 0.0,\n",
-    "    max = 1.0,\n",
-    "    step = 0.1,\n",
-    "    readout = True,\n",
-    "    readout_format = '.1f' )\n",
-    "\n",
-    "def on_repmod_gain_changed( s ) :\n",
-    "    repmod = allrepmods[ repmods_dropdown_menu.index ]\n",
-    "    if s.name == 'value' :\n",
-    "        va.set_reproduction_module_gain( repmod[\"id\"], s.new )\n",
-    "\n",
-    "repmod_gain_slider.observe( on_repmod_gain_changed )\n",
-    "\n",
-    "\n",
-    "# Parameter getter\n",
-    "repmod_button_params = widgets.Button( description = 'Parameters' )\n",
-    "\n",
-    "def on_repmod_parameter( b ) :\n",
-    "    repmod = allrepmods[ repmods_dropdown_menu.index ]\n",
-    "    print( va.get_reproduction_module_parameters( repmod[\"id\"] ) )\n",
-    "    \n",
-    "repmod_button_params.on_click( on_repmod_parameter )\n",
-    "\n",
-    "\n",
-    "# Horizontal box with widgets\n",
-    "repmod_widget_box = widgets.HBox( [ repmods_dropdown_menu, repmod_mute_button, repmod_gain_slider, repmod_button_params ] )\n",
-    "display( repmod_widget_box )"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.6"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/examples/jupyter/va_experimental_renderer_example.ipynb b/examples/jupyter/va_experimental_renderer_example.ipynb
deleted file mode 100644
index 8042b565de9aca10d1d3c993f06d7a77870384f3..0000000000000000000000000000000000000000
--- a/examples/jupyter/va_experimental_renderer_example.ipynb
+++ /dev/null
@@ -1,395 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# VA experimental renderer example\n",
-    "VA has prototype renderers, that can be used for quick auralization of any given situation. A renderer of the **PrototypeGenericPath** class uses a uniform block convolution of required number of channels and filter length. For each source-listener-pair, a new convolution instance is provided. It can be updated using the parameter setter, i.e. the FIR filter can be updated in real-time with an impulse resonse (IR) in time domain directly out of Python (or Matlab). On server side, no files (except for the signal sources, if an audio file is used) have to be provided.\n",
-    "\n",
-    "The filter length, the number of channels and the number of source-listener-pairs are only limited by the computational power you can provide.\n",
-    "\n",
-    "This combination of a VA server with an experimental rendering module using the **PrototypeGenericPath** renderer can effectively be used for teaching purposes, i.e. to implement a real-time binaural auralization in Python or Matlab."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Prerequisites"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Successfully connected, server core version is: VACore v2017.d (debug)\n"
-     ]
-    }
-   ],
-   "source": [
-    "import sys\n",
-    "#sys.path.append( '../../Lib/site-packages' )\n",
-    "sys.path.append( '../../dist/Lib/site-packages' )\n",
-    "import ipywidgets as widgets\n",
-    "import VAPython as va\n",
-    "if not va.connect() :\n",
-    "    raise 'Could not connect to local VA server'\n",
-    "else :\n",
-    "    print( 'Successfully connected, server core version is: ' + va.get_version() )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Auralize room acoustics with a measured binaural room impulse response (BRIR)\n",
-    "### Configuration\n",
-    "You have to start a VA server, that instatiates a **PrototypeGenericPath** renderer with **two channels** and a filter length that is long enough to fit the reverberation time of the room you want to make audible, for example **2 seconds** (or **88200 samples** at a sampling frequency of 44.1 kHz). If you start the experimental server provided with a VA binary package, this configuration is already in place. See also `conf/VACore.experimental.ini` for further details.\n",
-    "### Rendering module name\n",
-    "Check out the rendering module name to know which rendering module to call. Use `get_rendering_modules` like this:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Experimental\n"
-     ]
-    }
-   ],
-   "source": [
-    "for rmod in va.get_rendering_modules() :\n",
-    "    if rmod['class'] == 'PrototypeGenericPath' :\n",
-    "        print( rmod['id'] )\n",
-    "rmod_name = 'Experimental' # alter this if you are using a different name"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Get renderer information\n",
-    "To receive useful information, renderer usually return available configurations if `get_renderer_parameters` is called without arguments. Try this:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/markdown": [
-       "\n",
-       " --- GenericPath renderer instance 'Experimental' ---\n",
-       "\n",
-       "[help]\n",
-       "If the call module struct contains a key with the name 'help', this help text will be shown and the return struct will be returned with the key name 'help'.\n",
-       "\n",
-       "[info]\n",
-       "If the call module struct contains a key with the name 'info', information on the static configuration of the renderer will be returned.\n",
-       "\n",
-       "[update]\n",
-       "For every successful path update, the VA source and sound receiver ID has to be passed like this:\n",
-       " receiver: <int>, the number of the sound receiver identifier\n",
-       " source: <int>, the number of the source identifier\n",
-       "\n",
-       "Updating the path filter (impulse response in time domain) for a sound receiver and a source can be performed in two ways:\n",
-       " a) using a path to a multi-channel WAV file:\n",
-       "    Provide a key with the name 'filepath' and the path to the WAV file (absolute or containing the macro '$(VADataDir)' or relative to the executable) [priority given to 'filepath' if b) also applies]\n",
-       " b) sending floating-point data for each channel\n",
-       "    Provide a key for each channel with the generic name 'ch#', where the hash is substituted by the actual number of channel (starting at 1), and the value to this key will contain floating point data (or a sample buffer). The call parameter struct does not necessarily have to contain all channels, also single channels will be updated if key is given.\n",
-       "\n",
-       "Note: the existence of the key 'verbose' will print update information at server console and will provide the update info as an 'info' key in the returned struct.\n"
-      ],
-      "text/plain": [
-       "<IPython.core.display.Markdown object>"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "rmod_generic_info = va.get_rendering_module_parameters( rmod_name )\n",
-    "\n",
-    "from IPython.display import Markdown, display\n",
-    "display( Markdown( rmod_generic_info[ 'help' ] ) )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Or, now that we know we should use the `info` key, type"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "{'filterdelaysamples': 0,\n",
-       " 'irfilterlengthsamples': 88200,\n",
-       " 'numchannels': 2,\n",
-       " 'numpaths': 0}"
-      ]
-     },
-     "execution_count": 4,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "va.get_rendering_module_parameters( rmod_name, { 'info' : True } )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Input data preparation\n",
-    "Let us quickly set up a virtual scene using input data from the Internet.\n",
-    "Download for example anechoic recordings directly from [here](http://www.openairlib.net/sites/default/files/anechoic/data/judebrereton/modern-clarinet-bb/mono/cl-mod-bb-piece-32.wav) and a binaural impulse response from [here](http://www.openairlib.net/sites/default/files/auralization/data/audiolab/lady-chapel-st-albans-cathedral/stereo/stalbans_a_binaural.wav). Either add the download folder as search path, or put the files where VA can find it (e.g. in the `data` folder)."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "va.add_search_path( '../../../VACore/data' )\n",
-    "va.add_search_path( 'C:\\dev\\VA\\VACore\\data' )\n",
-    "\n",
-    "signal_source_id = va.create_signal_source_buffer_from_file( 'cl-mod-bb-piece-32.wav' )\n",
-    "va.set_signal_source_buffer_playback_action_str( signal_source_id, 'play' )\n",
-    "va.set_signal_source_buffer_looping( signal_source_id, True )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Creating the scene\n",
-    "To update a source-listener-pair, a scene should be set up."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Experimental sound source id: 1\n",
-      "Experimental listener id: 1\n"
-     ]
-    }
-   ],
-   "source": [
-    "sound_source_id = va.create_sound_source( 'PyExperimentalSoundSource' )\n",
-    "print( 'Experimental sound source id: ' + str( sound_source_id ) )\n",
-    "va.set_sound_source_signal_source( sound_source_id, signal_source_id )\n",
-    "receiver_id = va.create_sound_receiver( 'PyExperimentalListener' )\n",
-    "print( 'Experimental listener id: ' + str( sound_source_id ) )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Updating paths\n",
-    "Now that we have a source-listener-pair established, we can update the impulse response of that path. To do so, we have to assembly a `dict` variable that provides the required information. This `dict` will be transmitted to the renderer and the update will be performed.\n",
-    "\n",
-    "### Setting a simple unequal two-channel dirac"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 7,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "{'receiver': 1, 'source': 1, 'ch1': [0.9, 0.0, 0.0, 0.0], 'ch2': [0.0, 0.0, -0.4, 0.0], 'verbose': True}\n"
-     ]
-    }
-   ],
-   "source": [
-    "update_dirac = dict()\n",
-    "update_dirac[ 'receiver' ] = receiver_id\n",
-    "update_dirac[ 'source' ] = sound_source_id\n",
-    "update_dirac[ 'ch1' ] = [ 0.9, 0.0,  0.0, 0.0 ] # Length of samples is arbitrary, here\n",
-    "update_dirac[ 'ch2' ] = [ 0.0, 0.0, -0.4, 0.0 ] # Length of samples is arbitrary, here\n",
-    "update_dirac[ 'verbose' ] = True; # Get information about update as a result\n",
-    "print( update_dirac )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Now all we have to do is transmit the update task to the renderer"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 20,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "va.set_rendering_module_parameters( rmod_name, update_dirac )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Update by loading from a file path\n",
-    "\n",
-    "It is not necessary to transmit an entire impulse response for each channel to the path you want to update. You can also use a file path for a single or all channels."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 14,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "{'receiver': 1, 'source': 1, 'filepath': 'stalbans_a_binaural.wav', 'verbose': True}\n"
-     ]
-    }
-   ],
-   "source": [
-    "update_filepath = dict()\n",
-    "update_filepath[ 'receiver' ] = receiver_id\n",
-    "update_filepath[ 'source' ] = sound_source_id\n",
-    "update_filepath[ 'filepath' ] = 'stalbans_a_binaural.wav'\n",
-    "#update_filepath[ 'channel' ] = 2 # ... in case you explicitly want to update a single channel with a mono IR file\n",
-    "update_filepath[ 'verbose' ] = True;\n",
-    "print( update_filepath )"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 16,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "va.set_rendering_module_parameters( rmod_name, update_filepath )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Update by loading samples into Python and transmit IR\n",
-    "If you want to load an manipulate samples using Python, you can do the following. Make sure that the file is in the same folder of this notebook, or modify path accordingly.\n",
-    "\n",
-    "You can use `scipy` or `wave` to obtain data from a WAVE file, however sample type conversion might be an issue because they usually only provide integer type, and VA requires floating point samples. In this example, the input file is a 24bit signed integer."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 18,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Loaded an impulse response of 2 channel(s) with 264600 filter taps.\n"
-     ]
-    }
-   ],
-   "source": [
-    "import wave, struct\n",
-    "w = wave.open( 'stalbans_a_binaural.wav' )\n",
-    "raw_ir = w.readframes( w.getnframes() )\n",
-    "ir_length = w.getnframes()\n",
-    "ir_channels = w.getnchannels()\n",
-    "assert( w.getsampwidth() == 3 )\n",
-    "\n",
-    "# Deinterleave and convert sample type (slow implementation, but more easy to interpret)\n",
-    "ir = list()\n",
-    "for n in range( ir_channels ) :\n",
-    "    ir.append( [] )\n",
-    "    for i in range( ir_length ) :\n",
-    "        rbegin = 3 * ( n + i * ir_channels + 0 )\n",
-    "        rend   = 3 * ( n + i * ir_channels + 1 )\n",
-    "        sample_sint24 = raw_ir[ rbegin : rend ]\n",
-    "        sample_sint32 = sample_sint24 + ( b'\\0' if sample_sint24[2] < 128 else b'\\xff' )\n",
-    "        sample_float = ( struct.unpack( 'i', sample_sint32 )[0] ) / pow( 2, 24 - 1 )\n",
-    "        ir[ n ].append( sample_float )\n",
-    "\n",
-    "print( 'Loaded an impulse response of %i channel(s) with %i filter taps.' % ( len( ir ), len( ir[ 0 ] ) ) )"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 19,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "update_ir = dict()\n",
-    "update_ir[ 'receiver' ] = receiver_id\n",
-    "update_ir[ 'source' ] = sound_source_id\n",
-    "update_ir[ 'ch1' ] = ir[ 0 ]; # Requires ir samples to be floating point, so sample type conversion might be required\n",
-    "update_ir[ 'ch2' ] = ir[ 1 ]; # Requires ir samples to be floating point, so sample type conversion might be required\n",
-    "update_ir[ 'verbose' ] = True;"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 21,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "va.set_rendering_module_parameters( rmod_name, update_ir )"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.2"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/examples/jupyter/va_notebook_example.ipynb b/examples/jupyter/va_notebook_example.ipynb
deleted file mode 100644
index 44d826fb2cf09c08c285193d20a54a4fb3e7f25e..0000000000000000000000000000000000000000
--- a/examples/jupyter/va_notebook_example.ipynb
+++ /dev/null
@@ -1,185 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# VA notebook test\n",
-    "\n",
-    "This is a simple test program that demonstrates the use of the VA Python binding within a jupyter notebook environment.\n",
-    "\n",
-    "## Before we start\n",
-    "Before we start scripting, let's make VA available for us. If it is not installed and available from everywhere, this is how you can add the `va` module folder:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": false
-   },
-   "outputs": [],
-   "source": [
-    "import sys\n",
-    "sys.path.append( \"../../dist/Lib/site-packages\" ) # build\n",
-    "sys.path.append( \"../Lib/site-packages\" ) # deploy"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Start\n",
-    "\n",
-    "We start by making va available for our script"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import VAPython as va"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Ok. Now let's try to connect to the VA server that should be running on the same computer where this jupyter notebook is running. "
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "We start by finding out where we are currently working and list the files available, i.e. to identify files that can be used as HRIR, audio file or directivity."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "connection_status = va.connect()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "We can check the connection by the following line"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "connected = va.is_connected()\n",
-    "if connected :\n",
-    "    print( \"VA connection ready!\" )\n",
-    "else :\n",
-    "    print( \"Something went wrong.\" )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "... and also use different server names and ports"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if not connected :\n",
-    "    va.connect( \"localhost\", 12340 ) # these are the default arguments"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "current_working_dir = os.getcwd()\n",
-    "print( \"working directory: \" + current_working_dir )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Now lets add this folder to VA. This means that VA can find files that reside in this location. All you have to do is use a file name or a relative path from this base path. You can add as much folders as you like."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "va.add_search_path( current_working_dir )"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "vamods = va.get_modules()\n",
-    "print( vamods )"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "hw = va.get_hardware_configuration()\n",
-    "print( hw )"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "core_conf = va.get_core_configuration()\n",
-    "print( core_conf )"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.2"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/examples/jupyter/va_scene_controller.ipynb b/examples/jupyter/va_scene_controller.ipynb
deleted file mode 100644
index 868c016579c2040791cedaa249270c9a4bf99b97..0000000000000000000000000000000000000000
--- a/examples/jupyter/va_scene_controller.ipynb
+++ /dev/null
@@ -1,189 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# VA scene controller\n",
-    "This is a simple example and utility notebook that demonstrates the use of scene control mechanisms."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Prerequisites\n",
-    "You can ignore this part, it is for preparation purposes only."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import sys\n",
-    "sys.path.append( '../../Lib/site-packages' )\n",
-    "sys.path.append( '../../dist/Lib/site-packages' )\n",
-    "import ipywidgets as widgets\n",
-    "import VAPython as va\n",
-    "if not va.connect() :\n",
-    "    raise 'Could not connect to VA server on localhost'"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {
-    "collapsed": true
-   },
-   "source": [
-    "### Sound receiver"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {},
-   "outputs": [
-    {
-     "ename": "Exception",
-     "evalue": "Not implemented (error code 8)",
-     "output_type": "error",
-     "traceback": [
-      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
-      "\u001b[1;31mException\u001b[0m                                 Traceback (most recent call last)",
-      "\u001b[1;32m<ipython-input-2-28e67d0dde0f>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m      8\u001b[0m     description = 'Sound receivers' )\n\u001b[0;32m      9\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0msound_receiver_ids\u001b[0m \u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 10\u001b[1;33m     \u001b[0mfirst_sound_receiver_pos\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mva\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_sound_receiver_position\u001b[0m\u001b[1;33m(\u001b[0m \u001b[0msound_receiver_ids\u001b[0m\u001b[1;33m[\u001b[0m \u001b[1;36m0\u001b[0m \u001b[1;33m]\u001b[0m \u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     11\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     12\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mon_sound_receiver_update\u001b[0m\u001b[1;33m(\u001b[0m \u001b[0mw\u001b[0m \u001b[1;33m)\u001b[0m \u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
-      "\u001b[1;31mException\u001b[0m: Not implemented (error code 8)"
-     ]
-    }
-   ],
-   "source": [
-    "sound_receiver_ids = va.get_sound_receiver_ids()\n",
-    "sound_receivers_dict = {}\n",
-    "for sound_receiver_id in sound_receiver_ids :\n",
-    "    sound_receivers_dict.update( { va.get_sound_receiver_name( sound_receiver_id ) : sound_receiver_id } )\n",
-    "\n",
-    "sound_receivers_dropdown_menu = widgets.Dropdown( \n",
-    "    options = sound_receivers_dict,\n",
-    "    description = 'Sound receivers' )\n",
-    "if sound_receiver_ids :\n",
-    "    first_sound_receiver_pos = va.get_sound_receiver_position( sound_receiver_ids[ 0 ] )\n",
-    "\n",
-    "def on_sound_receiver_update( w ) :\n",
-    "    sound_receiver_id = sound_receivers_dropdown_menu.value\n",
-    "    if sound_receiver_id :\n",
-    "        sound_receiver_pos = [ sound_receivers_input_pos_x.value,  sound_receivers_input_pos_y.value,  sound_receivers_input_pos_z.value ]\n",
-    "        va.set_sound_receiver_position( sound_receiver_id, sound_receiver_pos )\n",
-    "\n",
-    "sound_receivers_input_pos_x = widgets.FloatText( description = 'X' )\n",
-    "sound_receivers_input_pos_y = widgets.FloatText( description = 'Y' )\n",
-    "sound_receivers_input_pos_z = widgets.FloatText( description = 'Z' )\n",
-    "\n",
-    "sound_receiver_update_button = widgets.Button( description = 'Update' )\n",
-    "sound_receiver_update_button.on_click( on_sound_receiver_update )\n",
-    "\n",
-    "def on_sound_receiver_select( d ) :\n",
-    "    if d.type == 'change' and type( d.new ) is str :\n",
-    "        sound_receiver_id = sound_receivers_dropdown_menu.options[ d.new ]        \n",
-    "        sound_receiver_pos = va.get_sound_receiver_position( sound_receiver_id )\n",
-    "        sound_receivers_input_pos_x.value = sound_receiver_pos[ 0 ]\n",
-    "        sound_receivers_input_pos_y.value = sound_receiver_pos[ 1 ]\n",
-    "        sound_receivers_input_pos_z.value = sound_receiver_pos[ 2 ]\n",
-    "        \n",
-    "sound_receivers_dropdown_menu.observe( on_sound_receiver_select )\n",
-    "\n",
-    "sound_receivers_input_widget_box = widgets.HBox( [ sound_receivers_dropdown_menu, sound_receivers_input_pos_x, sound_receivers_input_pos_y, sound_receivers_input_pos_z, sound_receiver_update_button ] )\n",
-    "display( sound_receivers_input_widget_box )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {
-    "collapsed": true
-   },
-   "source": [
-    "### Sources"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "application/vnd.jupyter.widget-view+json": {
-       "model_id": "87f1cda6f53e44bbb8eb4e0af40b3b5b",
-       "version_major": 2,
-       "version_minor": 0
-      },
-      "text/plain": [
-       "A Jupyter Widget"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "sound_source_ids = va.get_sound_source_ids()\n",
-    "sound_sources_dict = {}\n",
-    "for source_id in sound_source_ids :\n",
-    "    sound_sources_dict.update( { va.get_sound_source_name( source_id ) : source_id } )\n",
-    "\n",
-    "sound_sources_dropdown_menu = widgets.Dropdown( \n",
-    "    options = sound_sources_dict,\n",
-    "    description = 'Sound sources' )\n",
-    "if sound_source_ids :\n",
-    "    first_sound_source_pos = va.get_sound_source_position( sound_source_ids[ 0 ] )\n",
-    "\n",
-    "def on_sound_source_update( w ) :\n",
-    "    sound_source_id = sound_sources_dropdown_menu.value\n",
-    "    if sound_source_id :\n",
-    "        sound_source_pos = [ sound_source_input_pos_x.value,  sound_source_input_pos_y.value,  sound_source_input_pos_z.value ]\n",
-    "        va.set_sound_source_position( sound_source_id, sound_source_pos )\n",
-    "\n",
-    "sound_source_input_pos_x = widgets.FloatText( description = 'X' )\n",
-    "sound_source_input_pos_y = widgets.FloatText( description = 'Y' )\n",
-    "sound_source_input_pos_z = widgets.FloatText( description = 'Z' )\n",
-    "\n",
-    "sound_source_update_button = widgets.Button( description = 'Update' )\n",
-    "sound_source_update_button.on_click( on_sound_source_update )\n",
-    "\n",
-    "def on_sound_source_select( d ) :\n",
-    "    if d.type == 'change' and type( d.new ) is str :\n",
-    "        sound_source_id = sound_sources_dropdown_menu.options[ d.new ]        \n",
-    "        sound_source_pos = va.get_sound_source_position( sound_source_id )\n",
-    "        sound_source_input_pos_x.value = sound_source_pos[ 0 ]\n",
-    "        sound_source_input_pos_y.value = sound_source_pos[ 1 ]\n",
-    "        sound_source_input_pos_z.value = sound_source_pos[ 2 ]\n",
-    "        \n",
-    "sound_sources_dropdown_menu.observe( on_sound_source_select )\n",
-    "\n",
-    "sound_source_input_widget_box = widgets.HBox( [ sound_sources_dropdown_menu, sound_source_input_pos_x, sound_source_input_pos_y, sound_source_input_pos_z, sound_source_update_button ] )\n",
-    "display( sound_source_input_widget_box )"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.2"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/examples/jupyter/va_signal_sources_controller.ipynb b/examples/jupyter/va_signal_sources_controller.ipynb
deleted file mode 100644
index 9757729b2dfa83d514a62168c177792f5c2483b0..0000000000000000000000000000000000000000
--- a/examples/jupyter/va_signal_sources_controller.ipynb
+++ /dev/null
@@ -1,117 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# VA core controller\n",
-    "This is a simple example how to manage signal source.\n",
-    "> This script is intended to be used during a VA session with a scene already set up."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Prerequisites"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 7,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "c:\\users\\jonas\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\ipykernel_launcher.py:5: RuntimeWarning: Was still connected, forced disconnect.\n",
-      "  \"\"\"\n"
-     ]
-    }
-   ],
-   "source": [
-    "import sys\n",
-    "sys.path.append( '../../Lib/site-packages' )\n",
-    "import ipywidgets as widgets\n",
-    "import VAPython as va\n",
-    "if not va.connect() :\n",
-    "    raise Exception( 'Could not connect to local VA server' )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Signal sources\n",
-    "In VA, signal sources represent the emitted sound that is released from a virtual sound source. There are a few different types available, and most of them can be created and controlled dynamically."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### List available signal sources"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 25,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "application/vnd.jupyter.widget-view+json": {
-       "model_id": "d433a63f9d0b482cb84ff48dd0045d6e",
-       "version_major": 2,
-       "version_minor": 0
-      },
-      "text/plain": [
-       "A Jupyter Widget"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "signal_sources = va.get_signal_source_infos()\n",
-    "if not signal_sources :\n",
-    "    raise Exception( 'There are no signal sources available, stopping here.' )\n",
-    "    \n",
-    "signal_sources_type = list()\n",
-    "for signal_source in signal_sources :\n",
-    "    signal_sources_type.append( signal_source['type'] )\n",
-    "\n",
-    "children = [ widgets.Text( description = name ) for name in signal_sources_type ]\n",
-    "tab = widgets.Tab()\n",
-    "tab.children = children\n",
-    "for i in range( len( children ) ):\n",
-    "    tab.set_title( i, signal_sources_type[ i ] )\n",
-    "\n",
-    "display( tab )"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.2"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/examples/jupyter/va_simple_acoustic_scene.ipynb b/examples/jupyter/va_simple_acoustic_scene.ipynb
deleted file mode 100644
index 18fd9bb31f9b7c846a4e81caea2e21c663f24a9f..0000000000000000000000000000000000000000
--- a/examples/jupyter/va_simple_acoustic_scene.ipynb
+++ /dev/null
@@ -1,248 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# VA simple acoustic scene\n",
-    "This is an example notebook how to create a simple acoustic scene in VA using Python."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Prerequisites\n",
-    "If VA Python extension is not installed, add the folder manually."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import sys\n",
-    "sys.path.append( '../../Lib/site-packages' ) # deploy\n",
-    "sys.path.append( '../../dist/Lib/site-packages' ) # dev\n",
-    "import os\n",
-    "print( 'Current working directory:', os.getcwd() )\n",
-    "import VAPython as va\n",
-    "print( 'Successfully loaded VA Python extension')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Connect"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if not va.connect( 'localhost' ) :\n",
-    "    raise 'Could not connect to server on localhost, not running?'"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Reset VA to clear the scene"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "va.reset()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Control output gain"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "va.set_output_gain( 0.25 )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Add the current working directory and any further relative or absolute directories where you want to put resource files. VA provides search paths where to look for any file. From now on, only use relative paths or macros to paths.\n",
-    "> Pathes are relevant on server side, not on a remote client. The files must be available on the computer **where the VA application is running**!"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "va.add_search_path( os.getcwd() );\n",
-    "va.add_search_path( 'C:\\dev\\VA\\VACore\\data' );"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Signal source\n",
-    "Create a signal source from a file and start playback with looping mode"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "signal_source_id = va.create_signal_source_buffer_from_file( '$(DemoSound)' )\n",
-    "va.set_signal_source_buffer_playback_action_str( signal_source_id, 'play' )\n",
-    "va.set_signal_source_buffer_looping( signal_source_id, True )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Virtual sound source\n",
-    "Create a virtual sound source with any name and set a position"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "sound_source_id = va.create_sound_source( 'PySoundSource' )\n",
-    "va.set_sound_source_position( sound_source_id, ( 1.5, 1.7, -1.1 ) )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Connect signal and source\n",
-    "Connect the signal source to the virtual sound source"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "va.set_sound_source_signal_source( sound_source_id, signal_source_id )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Head-related transfer function / Head-related impulse response\n",
-    "Load an HRIR (time domain representation of an HRTF) as a directivity that will be assigned to a sound receiver (aka listener). See [OpenDAFF](http://www.opendaff.org) for more information.\n",
-    "> We use a macro `DefaultHRIR` here, that is usually available for a VA core."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "hrir_id = va.create_directivity_from_file( '$(DefaultHRIR)' )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Virtual listener\n",
-    "Create a sound receiver with arbitrary name, assign the HRTF/HRIR and set a position."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "listener_id = va.create_sound_receiver( 'PyListener' )\n",
-    "va.set_sound_receiver_position( listener_id, ( 0, 1.7, 0 ) )\n",
-    "va.set_sound_receiver_orientation_vu( listener_id, ( 0, 0, -1, ), ( 0, 1, 0 ) ) #  Default view is to -Z (OpenGL)\n",
-    "va.set_sound_receiver_directivity( listener_id, hrir_id )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Active listener\n",
-    "Set an active listener. This concept is deprecated, but should be used for compatibility until it is removed."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "va.set_active_sound_receiver( listener_id )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Disconnect"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "va.disconnect();"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.2"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/examples/va_example_simple.py b/examples/va_example_simple.py
deleted file mode 100644
index 5fbd34970f7a9b8d56ce3db8562a883201ce2316..0000000000000000000000000000000000000000
--- a/examples/va_example_simple.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import os
-current_exec_dir = os.getcwd()
-
-# Make sure you installed the va module
-import VAPython as va
-
-va.connect() # localhost
-va.reset()
-va.set_output_gain( 0.25 )
-va.add_search_path( current_exec_dir ) # add current working path to find any file lying around here
-
-signal_source_id = va.create_signal_source_buffer_from_file( '$(DemoSound)' ) # Provide this file or modify file name and use your own
-
-sound_source_id = va.create_sound_source( 'PySoundSource' )
-va.set_sound_source_signal_source( sound_source_id, signal_source_id )
-va.set_sound_source_position( sound_source_id, ( 1, 1.2, -1 ) ) # OpenGL axes convention, direction is lower front-right from listener pos (s.b.)
-
-hrir_id = va.create_directivity_from_file( '$(DefaultHRIR)' )
-
-sound_receiver_id = va.create_sound_receiver( 'PyListener' )
-va.set_sound_receiver_directivity( sound_receiver_id, hrir_id )
-va.set_sound_receiver_position( sound_receiver_id, ( 0, 1.7, 0 ) ) # Ear height at 1.7m 
-
-va.set_signal_source_buffer_looping( signal_source_id, True )
-va.set_signal_source_buffer_playback_action_str( signal_source_id, 'play' )
-
-va.disconnect()
diff --git a/manual_tests/__init__.py b/manual_tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/manual_tests/test_connection.py b/manual_tests/test_connection.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c7485bb2f964ea6b3374130fb4ac9b1f3324d72
--- /dev/null
+++ b/manual_tests/test_connection.py
@@ -0,0 +1,58 @@
+from vapython import VA
+
+
+def callback(data):
+    print(data)
+
+
+va = VA()
+
+print("Connecting to VA...")
+
+va.connect()
+
+print("Check if the server lists a connected client by pressing 'c' in the server window.")
+
+input("Press Enter to continue with creating a sound receiver...")
+
+va.create_sound_receiver("receiver")
+
+print("Did the running server print the message 'Created sound receiver 'receiver' and assigned ID 1'?")
+
+input("Press Enter to continue with attaching an event handler, expect event output.\n to stop press Enter again...")
+
+va.attach_event_handler(callback)
+
+input("Press Enter to continue with detaching an event handler...")
+
+va.detach_event_handler(callback)
+
+print("Did the printing stop?")
+
+input("Press Enter to continue with creating a sound source...")
+
+va.create_sound_source("source")
+
+print("Did the running server print the message 'Created sound source 'source' and assigned ID 1'?")
+
+input("Press Enter to continue with disconnecting from the server...")
+
+va.disconnect()
+
+print("Check if the server does not list a connected client by pressing 'c' in the server window.")
+
+input("Press Enter to continue with reconnecting to the server...")
+
+va.connect()
+
+print("Check if the server lists a connected client by pressing 'c' in the server window.")
+
+input("Press Enter to continue and disconnect again...")
+
+va.disconnect()
+
+print("Check if the server does not list a connected client by pressing 'c' in the server window.")
+
+input("Press Enter to finish...")
+
+print("Finished.")
diff --git a/mkdocs.yml b/mkdocs.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8f4ba25ceaa9110e3ca062880caeddcce0bc898c
--- /dev/null
+++ b/mkdocs.yml
@@ -0,0 +1,33 @@
+site_name: VAPython
+
+theme:
+  name: material
+
+extra_css:
+- css/custom.css
+
+nav:
+  - Home: index.md
+
+plugins:
+  - mkdocstrings:
+      handlers: 
+        python:
+          paths: [src]
+          options:
+            docstring_options:
+              ignore_init_summary: true
+            docstring_section_style: list
+            filters: ["!^_"]
+            inherited_members: true
+            merge_init_into_class: true
+            separate_signature: true
+            show_root_heading: true
+            show_root_full_path: false
+            show_signature_annotations: true
+            show_source: false
+            show_symbol_type_heading: true
+            show_symbol_type_toc: true
+            signature_crossrefs: true
+            summary: true
+            unwrap_annotated: true
\ No newline at end of file
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..a505076d0cc1f3bcf63a0fca859c69faeeefd028
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,171 @@
+[build-system]
+requires = [
+  "hatchling",
+  "betterproto[compiler]>=2.0.0b6",
+  "grpcio-tools",
+  "black",
+  "jinja2",
+  "isort",
+  "toml; python_version<'3.11'",
+]
+build-backend = "hatchling.build"
+
+[project]
+name = "VAPython"
+dynamic = ["version"]
+description = ''
+readme = "README.md"
+requires-python = ">=3.9"
+license = "Apache-2.0"
+keywords = []
+authors = [
+  { name = "Pascal Palenda", email = "pascal.palenda@akustik.rwth-aachen.de" },
+]
+classifiers = [
+  "Development Status :: 1 - Planning",
+  "Programming Language :: Python",
+  "Programming Language :: Python :: 3.9",
+  "Programming Language :: Python :: 3.10",
+  "Programming Language :: Python :: 3.11",
+  "Programming Language :: Python :: 3.12",
+  "Programming Language :: Python :: Implementation :: CPython",
+  "Programming Language :: Python :: Implementation :: PyPy",
+]
+dependencies = [
+  "betterproto>=2.0.0b6",
+  "nest-asyncio",
+  "scipy",
+  "platformdirs",
+]
+
+[project.urls]
+Documentation = "https://git.rwth-aachen.de/ita/VAPython#readme"
+Issues = "https://git.rwth-aachen.de/ita/VAPython/issues"
+Source = "https://git.rwth-aachen.de/ita/VAPython"
+
+[tool.hatch.version]
+path = "CMakelists.txt"
+pattern = "project\\s+\\(\\s*\\w+[\\w\\W]+VERSION\\s+(?P<version>\\d{1,4}\\.\\d+\\.?\\d*)"
+
+[tool.hatch.build]
+artifacts = [
+  "**/vanet/_vanet_grpc.py",
+  "**/vanet/_va_interface.py",
+]
+
+[tool.hatch.build.hooks.custom]
+path="scripts/build_hook.py"
+
+[tool.hatch.envs.default]
+path = ".hatch"
+dependencies = [
+  "coverage[toml]>=6.5",
+  "pytest",
+  "pytest-asyncio",
+  "pytest-mock",
+  "betterproto[compiler]>=2.0.0b6",
+  "grpcio-tools",
+  "ruff",
+  "jinja2",
+]
+[tool.hatch.envs.default.scripts]
+examples = "py -m vapython.examples.__main__ {args}"
+test = "pytest {args:tests}"
+test-cov = "coverage run -m pytest {args:tests}"
+cov-report = [
+  "- coverage combine",
+  "coverage report",
+]
+cov-report-html = [
+  "- coverage combine",
+  "coverage html -d {args:cov-report-html}",
+]
+cov = [
+  "test-cov",
+  "cov-report",
+]
+cov-html = [
+  "test-cov",
+  "cov-report-html",
+]
+cov-ci = [
+  "test-cov",
+  "cov-report",
+  "coverage xml",
+]
+
+[[tool.hatch.envs.test.matrix]]
+python = ["3.9", "3.10", "3.11", "3.12"]
+
+[tool.hatch.envs.types]
+extra-dependencies = [
+  "mypy>=1.0.0",
+]
+[tool.hatch.envs.types.scripts]
+check = "mypy --install-types --non-interactive {args:src/vapython tests}"
+
+[tool.hatch.envs.docs]
+dependencies = [
+  "mkdocs",
+  "mkdocs-material",
+  "mkdocstrings[python]>=0.18",
+  "black",
+  "docstr-coverage"
+]
+[tool.hatch.envs.docs.scripts]
+build = "mkdocs build"
+serve = "mkdocs serve"
+doc-cov = "docstr-coverage {args:src/vapython} --exclude=\".*(?:_grpc|NatNetClient).*\" -b docs --skip-file-doc"
+
+[tool.ruff]
+target-version = "py39"
+line-length = 120
+ignore = [
+  # Allow non-abstract empty methods in abstract base classes
+  "B027",
+  # Allow boolean positional values in function calls, like `dict.get(... True)`
+  "FBT003",
+  # Ignore checks for possible passwords
+  "S105", "S106", "S107",
+  # Ignore complexity
+  "C901", "PLR0911", "PLR0912", "PLR0913", "PLR0915",
+  # Okay to use random
+  "S311",
+]
+unfixable = [
+  # Don't touch unused imports
+  "F401",
+]
+exclude = [
+  "*/vanet/_vanet_grpc.py",
+  "*/tracking/NatNetClient.py"
+]
+
+[tool.ruff.lint.per-file-ignores]
+# Tests can use magic values, assertions, and relative imports
+"tests/**/*" = ["PLR2004", "S101", "TID252", "SLF001"]
+"scripts/**/*" = ["INP001"]
+"src/vapython/examples/**/*" = ["T201"]
+"manual_tests/**/*" = ["T201"]
+
+[tool.ruff.lint.pyupgrade]
+keep-runtime-typing = true
+
+[tool.coverage.run]
+source_pkgs = ["vapython", "tests"]
+branch = true
+parallel = true
+omit = [
+  "src/vapython/__about__.py",
+]
+
+[tool.coverage.paths]
+vapython = ["src/vapython", "*/vapython/src/vapython"]
+tests = ["tests", "*/vapython/tests"]
+
+[tool.coverage.report]
+exclude_lines = [
+  "no cov",
+  "if __name__ == .__main__.:",
+  "if TYPE_CHECKING:",
+]
diff --git a/scripts/build_hook.py b/scripts/build_hook.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ae9b848e4ed2bf9de42a53536a3e296414f8ace
--- /dev/null
+++ b/scripts/build_hook.py
@@ -0,0 +1,45 @@
+import shutil
+import sys
+from pathlib import Path
+
+from hatchling.builders.hooks.plugin.interface import BuildHookInterface
+
+parent_path = Path(__file__).parent
+sys.path.append(str(parent_path))
+from build_vapython import build_vapython  # noqa: E402
+
+
+class CustomBuildHook(BuildHookInterface):
+    def initialize(self, version, build_data):
+        root = Path(self.root)
+
+        build_dir = root / "build" / f"vapython-{version}"
+        src_dir = root / "src" / "vapython"
+
+        if not build_dir.exists():
+            build_dir.mkdir(parents=True)
+
+        branch = "master"
+        installed_git = shutil.which("git")
+        if installed_git:
+            from subprocess import check_output
+
+            branch = (
+                check_output([installed_git, "rev-parse", "--abbrev-ref", "HEAD"], cwd=src_dir).decode("utf-8").strip()
+            )
+
+            if branch not in ["master", "develop"]:
+                branch = "develop"
+
+        build_vapython(build_dir, build_dir / "va_python.py", branch)
+
+        vanet_dir = src_dir / "vanet"
+
+        if not vanet_dir.exists():
+            vanet_dir.mkdir(parents=True)
+
+        shutil.copy(build_dir / "stubs" / "vanet" / "__init__.py", vanet_dir / "_vanet_grpc.py")
+        shutil.copy(build_dir / "va_python.py", vanet_dir / "_va_interface.py")
+
+        build_data["artifacts"].append(str((vanet_dir / "_vanet_grpc.py").relative_to(self.root)))
+        build_data["artifacts"].append(str((vanet_dir / "_va_interface.py").relative_to(self.root)))
diff --git a/scripts/build_vapython.py b/scripts/build_vapython.py
new file mode 100644
index 0000000000000000000000000000000000000000..e39b24056da47ffbcba4637f7d086ef595fc0078
--- /dev/null
+++ b/scripts/build_vapython.py
@@ -0,0 +1,513 @@
+import ast
+import re
+import subprocess
+import sys
+from importlib import resources
+from pathlib import Path
+from urllib.request import urlretrieve
+
+import black
+import isort
+import jinja2
+
+parent_path = Path(__file__).parent
+
+
+if sys.version_info >= (3, 11, 0):
+    import tomllib as toml
+else:
+    import toml
+
+
+def download_proto_files(branch: str, base_path: Path) -> dict[str, Path]:
+    """Download the proto files from the VANet repository and save them to the given base_path.
+
+    Args:
+        branch: The branch to download the files from.
+        base_path: The base path to save the files to.
+    """
+    if not base_path.exists():
+        base_path.mkdir(parents=True)
+
+    branch_clean = re.sub(r'[<>:"/\\|?*]', "_", branch)
+    download_base_path = base_path / branch_clean
+    if not download_base_path.exists():
+        download_base_path.mkdir(parents=True)
+
+    proto_files = ["va_event", "va_exception", "va_struct", "va_messages", "va"]
+    proto_data_base = {}
+    for proto_file in proto_files:
+        proto_file_url = f"https://git.rwth-aachen.de/ita/VANet/-/raw/{branch}/protos/{proto_file}.proto"
+        file_path = download_base_path / f"{proto_file}.proto"
+
+        urlretrieve(proto_file_url, file_path)  # noqa: S310
+
+        proto_data_base[file_path.name] = file_path
+
+    return proto_data_base
+
+
+def generate_unified_proto_file(out_path: Path, proto_data_base: dict[str, Path]):
+    """Generate a unified proto file from the given proto files.
+
+    This function will merge the given proto files into a single proto file.
+    This is necessary as the python betterproto library is easier to work with with a single file.
+
+    Args:
+        out_path: The path to save the unified proto file to.
+        proto_data_base: The proto files to merge into the unified proto file.
+    """
+    output_file = out_path / "VA.proto"
+    with open(output_file, "w") as out_file:
+        out_file.write(
+            r"""syntax = "proto3";
+package vanet;
+
+import "google/protobuf/empty.proto";
+import "google/protobuf/wrappers.proto";
+"""
+        )
+        for file in proto_data_base.values():
+            with open(file) as in_file:
+                for line in in_file.readlines():
+                    if not re.findall("^(?:syntax)|(?:import)|(?:package)", line):
+                        out_file.write(line)
+            out_file.write("\n")
+
+    return output_file
+
+
+def generate_python_files(out_path: Path, proto_file_path: Path, proto_include_path: Path):
+    """Generate the python files from the given proto file.
+
+    This function uses the grpc_tools library and the python betterproto library to generate the python files.
+
+    Args:
+        out_path: The path to save the generated python files to.
+        proto_file_path: The path to the proto file to generate the python files from.
+        proto_include_path: The path to the proto include files.
+    """
+
+    def _get_resource_file_name(package_or_requirement: str, resource_name: str) -> str:
+        """Obtain the filename for a resource on the file system."""
+        file_name = None
+        file_name = (resources.files(package_or_requirement) / resource_name).resolve()
+        return str(file_name)
+
+    if not out_path.exists():
+        out_path.mkdir(parents=True)
+
+    proto_include = _get_resource_file_name("grpc_tools", "_proto")
+
+    args = f"--python_betterproto_out={out_path} -I{proto_include} -I{proto_include_path} {proto_file_path}"
+    command = f"python -m grpc_tools.protoc {args}"
+    result = subprocess.call(command, shell=True)  # noqa: S602
+
+    if result != 0:
+        error_message = f"protoc exited with return code {result}"
+        raise RuntimeError(error_message)
+
+    return out_path / "vanet" / "__init__.py"
+
+
+def fix_generated_file(file: Path):
+    """Fix the generated python file.
+
+    This function fixes some minor issues that stem from the generation process and the way the proto files are written.
+
+    To be specific:
+        - since the proto files have both the package and the service name as VA, the generated python files have name collisions.
+        - to fix this, the package nane is changed to vanet in `generate_unified_proto_file`
+        - however, to work with the grpc library, the message route has to be changed from /vanet.VA/ to /VA.VA/
+
+    Args:
+        file: The file to fix.
+    """
+    # fix empty class, see: https://github.com/danielgtaylor/python-betterproto/issues/9
+    with open(file) as f:
+        lines = f.readlines()
+    with open(file, "w") as f:
+        f.write("# mypy: ignore-errors\n")
+        for line in lines:
+            fixed_line = line.replace("/vanet.VA/", "/VA.VA/")
+            f.write(fixed_line)
+
+
+def get_documentation():
+    """Get the documentation for the methods in the VAInterface class.
+
+    This method reads the documentation from the documentation.toml file and assembles the final docstrings.
+    The docstrings are formatted following the google docstring style.
+    The results are stored in a dictionary with the method names as keys and the docstrings as values, which is then returned.
+
+    Returns:
+        A dictionary with the method names as keys and the docstrings as values.
+    """
+    documentation_file = parent_path / "documentation.toml"
+    if sys.version_info >= (3, 11, 0):
+        with open(documentation_file, "rb") as f:
+            data = toml.load(f)
+    else:
+        with open(documentation_file) as f:
+            data = toml.load(f)
+
+    final_doc = {}
+    for key, value in data["VA"].items():
+        docstring = ""
+
+        if value.get("brief"):
+            docstring += value["brief"] + "\n\n"
+
+        if value.get("detail"):
+            docstring += value["detail"] + "\n\n"
+
+        if value.get("args"):
+            docstring += "Args:\n"
+            if isinstance(value["args"], str):
+                docstring += f"    {value['args']}\n"
+            else:
+                for arg in value["args"]:
+                    docstring += f"    {arg}\n"
+            docstring += "\n"
+
+        if value.get("returns"):
+            docstring += "Returns:\n"
+            if isinstance(value["returns"], str):
+                docstring += f"    {value['returns']}\n"
+            else:
+                for ret in value["returns"]:
+                    docstring += f"    {ret}\n"
+            docstring += "\n"
+
+        if value.get("raises"):
+            docstring += "Raises:\n"
+            if isinstance(value["raises"], str):
+                docstring += f"    {value['raises']}\n"
+            else:
+                for exc in value["raises"]:
+                    docstring += f"    {exc}\n"
+            docstring += "\n"
+
+        if value.get("post_doc"):
+            docstring += value["post_doc"]
+
+        if docstring[-2:] == "\n\n":
+            docstring = docstring[:-1]
+
+        final_doc[key] = docstring
+
+    return final_doc
+
+
+def get_default_values():
+    """Get the default values for the methods in the VAInterface class.
+
+    Returns:
+        A dictionary with the method names as keys and the parameters with its default values as values.
+    """
+    default_values_file = parent_path / "default_values.toml"
+    if sys.version_info >= (3, 11, 0):
+        with open(default_values_file, "rb") as f:
+            data = toml.load(f)
+    else:
+        with open(default_values_file) as f:
+            data = toml.load(f)
+
+    return data["VA"]
+
+
+def parse_python_file(file_path: Path):
+    """Parse the python file and extract the methods from the VAStub class.
+
+    This function is where most of the magic happens. It reads the generated python file and extracts the methods from the VAStub class.
+    The resulting data can then be used to render the template.
+
+    The steps are as follows:
+        - get the documentation and default values for the methods
+        - read the generated grpc file
+        - find the VAStub class and iterate over its methods
+            - extract the method name
+            - given the method name, extract the argument type and get the message type
+            - transform the message type to pure python type or custom VA types
+            - handle special cases:
+                - bool parameters
+                - default values
+                - name fixes
+                - private methods
+            - get the return type of the method
+        - build the output list with the method data
+
+    Args:
+        file_path: The path to the generated python file.
+
+    Returns:
+        A list of dictionaries with the method data.
+    """
+    documentation = get_documentation()
+    default_values = get_default_values()
+
+    with open(file_path) as file:
+        all_code = ast.parse(file.read())
+
+    name_fix = {
+        "get_geometry_mesh_i_ds": "get_geometry_mesh_ids",
+        "get_sound_portal_i_ds": "get_sound_portal_ids",
+        "get_scene_i_ds": "get_scene_ids",
+        "get_sound_receiver_i_ds": "get_sound_receiver_ids",
+        "get_sound_source_i_ds": "get_sound_source_ids",
+    }
+
+    private_methods = [
+        "get_state",
+        "attach_event_handler",
+    ]
+
+    output_data = []
+
+    for node in ast.walk(all_code):
+        if isinstance(node, ast.ClassDef) and node.name == "VaStub":
+            for n in node.body:
+                if isinstance(n, (ast.AsyncFunctionDef, ast.FunctionDef)):
+                    # Handle method arguments
+                    message = [
+                        {"name": arg.arg, "type": arg.annotation.value} for arg in n.args.args if arg.arg != "self"
+                    ]
+
+                    if message.__len__() > 1:
+                        msg = f"Method '{n.name}' had more than one argument, which is not supported."
+                        raise RuntimeError(msg)
+
+                    message_type = message[0]["type"]
+                    message_def = find_classdef(all_code, message_type)
+
+                    members = []
+                    if message_def is None:
+                        match = re.search(r"protobuf\.(.*)Value", message_type)
+                        if "protobuf.Empty" in message_type:
+                            pass
+                        # using this regex "protobuf\.(.*)Value" check if a group is found in message_type and use that
+                        elif match:
+                            value_type = match.group(1)
+                            if value_type == "Bool":
+                                members = [{"name": "value", "type": "bool"}]
+                            elif value_type in {"Int32", "Int64"}:
+                                members = [{"name": "value", "type": "int"}]
+                            elif value_type in {"Float", "Double"}:
+                                members = [{"name": "value", "type": "float"}]
+                            elif value_type == "String":
+                                members = [{"name": "value", "type": "str"}]
+                            members[-1]["name_org"] = "value"
+                        else:
+                            msg = f"Message type {message_type} not found"
+                            raise RuntimeError(msg)
+                    elif message_type == "Struct":
+                        members = [{"name": "data", "type": "VAStruct", "name_org": "fields"}]
+                    else:
+                        members = get_classdef_assigns(message_def)
+
+                    # TODO: handle protobuf types
+                    # TODO: handle struct, convert dict to struct ... and the other way around?!
+
+                    kw_members = []
+                    for member in members:
+                        if member["type"] == "bool":
+                            members.remove(member)
+                            kw_members.append(member)
+
+                    # Handle method return type
+                    return_type = None
+                    wrapped_return_type = False
+                    type_overwrites = {
+                        "Struct": "VAStruct",
+                        "Vector3": "VAVector",
+                        "Quaternion": "VAQuaternion",
+                    }
+                    if n.returns and isinstance(n.returns, ast.Constant):
+                        match = re.search(r"protobuf\.(.*)Value", n.returns.value)
+                        if "protobuf.Empty" in n.returns.value:
+                            pass
+                        elif n.returns.value in type_overwrites:
+                            return_type = type_overwrites[n.returns.value]
+                        elif match:
+                            value_type = match.group(1)
+                            wrapped_return_type = True
+                            if value_type == "Bool":
+                                return_type = "bool"
+                            elif value_type in {"Int32", "Int64"}:
+                                return_type = "int"
+                            elif value_type in {"Float", "Double"}:
+                                return_type = "float"
+                            elif value_type == "String":
+                                return_type = "str"
+                        else:
+                            # TODO: handle this and return tuples from the methods
+                            return_type = "vanet." + n.returns.value
+
+                    # Handle special cases
+                    fixed_name = name_fix[n.name] if n.name in name_fix else n.name
+
+                    if fixed_name in private_methods:
+                        fixed_name = f"_{fixed_name}"
+
+                    if fixed_name in default_values:
+                        for member in members:
+                            if member["name"] in default_values[fixed_name]:
+                                member["default"] = default_values[fixed_name][member["name"]]
+
+                        for member in kw_members:
+                            if member["name"] in default_values[fixed_name]:
+                                member["default"] = default_values[fixed_name][member["name"]]
+
+                    members = sorted(members, key=lambda x: "default" in x)
+                    kw_members = sorted(kw_members, key=lambda x: "default" in x)
+
+                    if fixed_name == "_attach_event_handler":
+                        continue
+
+                    # Build output data
+                    output_data.append(
+                        {
+                            "name": fixed_name,
+                            "org_name": n.name,
+                            "args": members,
+                            "kwargs": kw_members,
+                            "message_type": message_type,
+                            "returns": return_type,
+                            "wrapped_return_type": wrapped_return_type,
+                            "docstring": (documentation[fixed_name] if fixed_name in documentation else ""),
+                        }
+                    )
+
+    return output_data
+
+
+def find_classdef(node: ast, class_name):
+    """Find a class definition in the given AST node.
+
+    Helper function for `parse_python_file`.
+
+    Args:
+        node: The AST node to search in.
+        class_name: The name of the class to find.
+
+    Returns:
+        The class definition node if found, otherwise None
+    """
+    for n in ast.walk(node):
+        if isinstance(n, ast.ClassDef) and n.name == class_name:
+            return n
+    return None
+
+
+def get_classdef_assigns(classdef: ast.ClassDef):
+    """Get the assignments of a class definition.
+
+    Helper function for `parse_python_file`.
+
+    Args:
+        classdef: The class definition to get the assignments from.
+
+    Returns:
+        A list of dictionaries with the name, type, and original name of the assignments.
+    """
+    assigns = []
+    for n in classdef.body:
+        if isinstance(n, ast.AnnAssign):
+            type_name = ""
+
+            if isinstance(n.annotation, ast.Name):
+                type_name = n.annotation.id
+            else:
+                if sys.version_info >= (3, 9, 0) and isinstance(n.annotation, ast.Constant):
+                    type_name = n.annotation.value
+                elif sys.version_info < (3, 9, 0) and isinstance(n.annotation, ast.Str):
+                    type_name = n.annotation.s
+
+                type_overwrites = {
+                    "Struct": "VAStruct",
+                    "Vector3": "Union[VAVector, List[float], Tuple[float, float, float]]",
+                    "Quaternion": "Union[VAQuaternion, List[float], Tuple[float, float, float,float]]",
+                }
+                type_name = "vanet." + type_name if type_name not in type_overwrites else type_overwrites[type_name]
+
+            overwrites = {
+                "id": "id_",
+                "type": "type_",
+            }
+            assigns.append(
+                {
+                    "name": (n.target.id if n.target.id not in overwrites else overwrites[n.target.id]),
+                    "type": type_name,
+                    "name_org": n.target.id,
+                }
+            )
+    return assigns
+
+
+def render_template(data, out_file: Path):
+    """Render the template with the given data and save it to the given file.
+
+    This uses the jinja2 library to render the template.
+
+    Args:
+        data: The data to render the template with.
+        out_file: The file to save the rendered template to.
+    """
+    if not isinstance(out_file, Path):
+        out_file = Path(out_file)
+
+    template_loader = jinja2.FileSystemLoader(searchpath=parent_path / "templates")
+
+    template_env = jinja2.Environment(
+        loader=template_loader,
+        trim_blocks=True,
+        lstrip_blocks=True,
+        autoescape=jinja2.select_autoescape(
+            disabled_extensions=("py.j2",),
+            default_for_string=True,
+            default=False,
+        ),
+    )
+    template = template_env.get_template("wrapper.py.j2")
+
+    rendered_template = template.render(data)
+
+    with open(out_file, "w") as f:
+        f.write(rendered_template)
+
+    black.format_file_in_place(out_file, write_back=black.WriteBack.YES, fast=True, mode=black.FileMode())
+
+    isort.file(out_file)
+
+
+def build_vapython(base_path: Path, output_wrapper_file: Path, branch: str = "master"):
+    """Build the VAInterface python file.
+
+    This function is the main entry point for the script. It orchestrates the whole process of generating the VAInterface python file.
+
+    Args:
+        base_path: The base path to save the generated files to.
+        output_wrapper_file: The file to save the generated VAInterface python file to.
+        branch: The branch to download the proto files from.
+    """
+    download_path = base_path / "download"
+    stub_path = base_path / "stubs"
+
+    proto_data_base = download_proto_files(branch, download_path)
+
+    output_file = generate_unified_proto_file(download_path, proto_data_base)
+
+    output_python_file = generate_python_files(stub_path, output_file, output_file.parent)
+
+    fix_generated_file(output_python_file)
+
+    output = parse_python_file(output_python_file)
+
+    render_template({"methods": output}, output_wrapper_file)
+
+
+if __name__ == "__main__":
+    base_path = Path("generation")
+    branch = "feature/grpc-improvement"
+
+    build_vapython(base_path, parent_path / "vapy.py", branch)
diff --git a/scripts/default_values.toml b/scripts/default_values.toml
new file mode 100644
index 0000000000000000000000000000000000000000..5187b50605ffa13a766e2e3579c246ba847aae56
--- /dev/null
+++ b/scripts/default_values.toml
@@ -0,0 +1,35 @@
+[VA.create_sound_source]
+name = "\"source\""
+
+[VA.create_sound_receiver]
+name = "\"receiver\""
+
+[VA.create_directivity_from_parameters]
+name = "\"directivity\""
+
+[VA.create_signal_source_text_to_speech]
+name = "\"T2S_signal\""
+
+[VA.create_signal_source_buffer_from_parameters]
+name = "\"buffer_signal\""
+
+[VA.create_signal_source_engine]
+name = "\"engine_signal\""
+
+[VA.create_signal_source_sequencer]
+name = "\"sequencer_signal\""
+
+[VA.create_signal_source_prototype_from_parameters]
+name = "\"prototype_signal\""
+
+[VA.create_signal_source_network_stream]
+name = "\"network_stream_signal\""
+
+[VA.create_signal_source_machine]
+name = "\"machine_signal\""
+
+[VA.get_rendering_modules]
+only_enabled = "True"
+
+[VA.get_reproduction_modules]
+only_enabled = "True"
\ No newline at end of file
diff --git a/scripts/documentation.toml b/scripts/documentation.toml
new file mode 100644
index 0000000000000000000000000000000000000000..f7749e09b362f5362202538ee5d294d6de0869fd
--- /dev/null
+++ b/scripts/documentation.toml
@@ -0,0 +1,1419 @@
+# ----------- Globals ------------
+
+[VA.get_version_info]
+brief = "Get the version of the running VACore."
+returns = "Version information stucture."
+
+[VA.call_module]
+brief = "Call an internal module of the VAServer."
+detail = """This can be used to access advanced functionality in the VAServer.
+As such, this method is useful for prototyping.
+
+The modules that can be called include among others:
+
+- The Core itself
+- Rendering modules
+- Reproduction modules
+
+As the functionality between the modules can be different,
+there is no general documentation possible. An [example](https://www.virtualacoustics.org/VA/documentation/recording/#controlling-the-virtual-card-audio-processing)
+is given for calling the Virtual audio device used for offline auralization.
+Special calls have to be extracted from C++ code in the VACore.
+Even though it is recommended to implement at least a help text that will be returned
+if the 'help' or 'info' key is set to any value, this is not always the case.
+Similarly, a true-valued 'verbose' key flag can activate verbose return
+or output on debug stream if implemented."""
+args = [
+    "module_name: The name of the module to call",
+    "module_parameters: A dictionary with string keys of parameters to pass to the module."
+]
+returns = "The result of the call."
+
+[VA.get_modules]
+brief = "Get a list of all available, internal modules of the core."
+detail = """Amongst others, a VA module can be
+
+- the Core itself
+- a rendering module
+- a reproduction module
+"""
+returns = "A list of module information dictionaries."
+
+[VA.get_input_gain]
+brief = "Get the global audio interface input gain."
+returns = "The global audio interface input gain."
+
+[VA.set_input_gain]
+brief = "Set the global audio interface input gain."
+detail = """Warning:
+    Setting gains will potentially penetrate a calibrated rendering and reproduction chain"""
+args = [
+    "gain: The global audio interface input gain, in the range of 0 to 1."
+]
+
+[VA.get_input_muted]
+brief = "Get the global audio interface input muted status."
+returns = "True if the global audio interface input is muted, False if not."
+
+[VA.set_input_muted]
+brief = "Mute or unmute the global audio interface input."
+args = [
+    "muted: True if the global audio interface input should be muted, False if not."
+]
+
+[VA.get_output_gain]
+brief = "Get the global audio interface output gain."
+returns = "The global audio interface output gain."
+
+[VA.set_output_gain]
+brief = "Set the global audio interface output gain."
+detail = """Warning:
+    Setting gains will potentially penetrate a calibrated rendering and reproduction chain"""
+args = [
+    "gain: The global audio interface output gain, in the range of 0 to 1."
+]
+
+[VA.get_output_muted]
+brief = "Get the global audio interface output muted status."
+returns = "True if the global audio interface output is muted, False if not."
+
+[VA.set_output_muted]
+brief = "Mute or unmute the global audio interface output."
+args = [
+    "muted: True if the global audio interface output should be muted, False if not."
+]
+
+[VA.reset]
+brief = "Reset the VACore."
+detail = """This method resets the VACore to its initial state.
+Clear all scenes, removes sound entities, resets states and medium values."""
+
+[VA.get_state]
+brief = "Get the state of the VACore."
+detail = "Please use the more appropriately named [`get_server_state`][vapython.va.VA.get_server_state] method."
+returns = "The state of the VACore."
+
+[VA.get_search_paths]
+brief = "Get the search paths of the VACore."
+returns = "A VAStruct with the search paths."
+
+[VA.get_file_list]
+brief = "Returns files that are accessible through search paths."
+detail = """Warning:
+    This call may take a while and can result in a lot of return data."""
+args = [
+    "file_suffix_filter: File suffix / type filtering, i.e. \"wav\" or \"daff\". \"*\" or empty string will list all.",
+    "recursive: Search recursively in paths."
+]
+returns = "A VAStruct withfile paths."
+
+[VA.get_core_configuration]
+brief = "Get the configuration of the VACore."
+detail = """This method returns the content of the
+configuration `.ini` file content with which the Server was started.
+This is returned in the form of a VAStruct dictionary."""
+args = [
+    "only_enabled: If True, the configuration is only returned for enabled modules, e.g. rendering / reproduction modules."
+]
+returns = "A VAStruct with the configuration."
+
+[VA.get_hardware_configuration]
+brief = "Get the hardware configuration of the VACore."
+returns = "A VAStruct with the hardware configuration."
+
+[VA.get_core_clock]
+brief = "Get the current core time."
+detail = """This method returns the current clock of the core instance in seconds.
+This Clock is offset-cleaned from Stream Tracker, which uses the default clock"""
+returns = "The current core time in seconds."
+
+[VA.set_core_clock]
+brief = "Set the current core time."
+detail = """This methods (re)sets the core time to the given value in seconds.
+The new value must be a number greater or equal zero."""
+args = [
+    "time: The new core time in seconds."
+]
+
+[VA.get_update_locked]
+brief = "Get the update lock status of the VACore."
+detail = """This determines wheather a synchronized scene modification is in progress or not."""
+returns = "True if the core is locked, False if not."
+
+[VA.lock_update]
+brief = "Locks the scene."
+detail = """This method locks the scene for synchronized modification.
+During a locked scene, no changes are directly applied.
+After unlocking, all modifications are syncrhonously applied."""
+
+[VA.unlock_update]
+brief = "Unlocks the scene."
+detail = """This method unlocks the scene for synchronized modification.
+After unlocking, all modifications are syncrhonously applied."""
+returns = "The state ID of the scene if successfully proceeded, -1 if not."
+
+# ----------- Directivity ------------
+
+[VA.create_directivity_from_parameters]
+brief = "Create a directivity from a set of parameters."
+detail = """Examples:
+    >>> va.create_directivity_from_parameters(
+    ...    "fig-8",
+    ...    {"filepath": "path/to/file.daff"},
+    ...)
+    0"""
+args = [
+    "name: The name for the directivity",
+    "parameters: A dictionary with string keys of parameters to create the directivity with."
+]
+returns = "The ID of the created directivity."
+
+[VA.delete_directivity]
+brief = "Delete a directivity."
+detail = """This is only possible if the directivity is not in use.
+Otherwise the method will do nothing."""
+args = [
+    "directivity_id: The ID of the directivity to delete."
+]
+returns = "True, if successful (i.e. not in use)."
+
+[VA.get_directivity_info]
+brief = "Returns information on a loaded directivity."
+args = [
+    "directivity_id: The ID of the directivity to get information on."
+]
+returns = "A data structure with information on the directivity."
+
+[VA.get_directivity_infos]
+brief = "Returns information on all loaded directivities."
+returns = "A list of data structures with information on all loaded directivities."
+
+[VA.set_directivity_name]
+brief = "Set the name of a directivity."
+args = [
+    "directivity_id: The ID of the directivity to rename",
+    "name: The new name for the directivity."
+]
+
+[VA.get_directivity_name]
+brief = "Get the name of a directivity."
+args = [
+    "directivity_id: The ID of the directivity to get the name of."
+]
+returns = "The name of the directivity."
+
+[VA.set_directivity_parameters]
+brief = "Set the parameters of a directivity."
+args = [
+    "directivity_id: The ID of the directivity to set the parameters of",
+    "parameters: A dictionary with string keys of parameters to set."
+]
+
+[VA.get_directivity_parameters]
+brief = "Get the parameters of a directivity."
+args = [
+    "directivity_id: The ID of the directivity to get the parameters of",
+    "parameters: A dictionary with string keys of parameters to get."
+]
+returns = "A dictionary with the parameters of the directivity."
+
+# ----------- Acoustic Materials ------------
+
+[VA.create_acoustic_material]
+brief = "Create an acoustic material by passing an AcousticMaterial."
+args = [
+    "acoustic_material_data: A AcousticMaterial object to create the material with",
+    "name: The name for the material."
+]
+returns = "The ID of the created material."
+
+[VA.create_acoustic_material_from_parameters]
+brief = "Create an acoustic material from a set of parameters."
+args = [
+    "name: The name for the material",
+    "parameters: A dictionary with string keys of parameters to create the material with."
+]
+returns = "The ID of the created material."
+
+[VA.delete_acoustic_material]
+brief = "Delete an acoustic material."
+args = [
+    "acoustic_material_id: The ID of the material to delete."
+]
+returns = "True, if successful."
+
+[VA.get_acoustic_material_info]
+brief = "Returns information on an acoustic material."
+args = [
+    "acoustic_material_id: The ID of the material to get information on."
+]
+returns = "A data structure with information on the material."
+
+[VA.get_acoustic_material_infos]
+brief = "Returns information on all loaded acoustic materials."
+returns = "A list of data structures with information on all loaded materials."
+
+[VA.set_acoustic_material_name]
+brief = "Set the name of an acoustic material."
+args = [
+    "acoustic_material_id: The ID of the material to rename",
+    "name: The new name for the material."
+]
+
+[VA.get_acoustic_material_name]
+brief = "Get the name of a acoustic material."
+args = [
+    "acoustic_material_id: The ID of the material to get the name of."
+]
+returns = "The name of the acoustic material."
+
+[VA.set_acoustic_material_parameters]
+brief = "Set the parameters of a acoustic material."
+args = [
+    "acoustic_material_id: The ID of the material to set the parameters of",
+    "parameters: A dictionary with string keys of parameters to set."
+]
+
+[VA.get_acoustic_material_parameters]
+brief = "Get the parameters of a acoustic material."
+args = [
+    "acoustic_material_id: The ID of the acoustic_material to get the parameters of",
+    "parameters: A dictionary with string keys of parameters to get."
+]
+returns = "A dictionary with the parameters of the acoustic material."
+
+# ----------- Geometry ------------
+
+[VA.create_geometry_mesh]
+brief = "Create a geometry mesh by passing a GeometryMesh."
+args = [
+    "geometry_mesh_data: A GeometryMesh object to create the geometry with",
+    "name: The name for the geometry."
+]
+returns = "The ID of the created geometry."
+
+[VA.create_geometry_mesh_from_parameters]
+brief = "Create a geometry mesh from a set of parameters."
+args = [
+    "name: The name for the geometry",
+    "parameters: A dictionary with string keys of parameters to create the geometry with."
+]
+returns = "The ID of the created geometry mesh."
+
+[VA.delete_geometry_mesh]
+brief = "Delete a geometry mesh."
+args = [
+    "geometry_mesh_id: The ID of the geometry to delete."
+]
+returns = "True, if successful."
+
+[VA.get_geometry_mesh]
+brief = "Returns a geometry mesh."
+args = [
+    "geometry_mesh_id: The ID of the geometry to get information on."
+]
+returns = "A data structure with the geometry mesh."
+
+[VA.get_geometry_mesh_ids]
+brief = "Returns the IDs of all geometry meshes."
+returns = "A list geometry mesh IDs."
+
+[VA.set_geometry_mesh_name]
+brief = "Set the name of a geometery mesh."
+args = [
+    "geometry_mesh_id: The ID of the geometery to rename",
+    "name: The new name for the geometery."
+]
+
+[VA.get_geometry_mesh_name]
+brief = "Get the name of a geometerymesh."
+args = [
+    "geometry_mesh_id: The ID of the geometery to get the name of."
+]
+returns = "The name of the geometery."
+
+[VA.set_geometry_mesh_enabled]
+brief = "Set if a geometery mesh is enabled."
+args = [
+    "geometry_mesh_id: The ID of the geometery to rename",
+    "enabled: True if should be enabled, False if not."
+]
+
+[VA.get_geometry_mesh_enabled]
+brief = "Get if a geometery mesh is enabled."
+args = [
+    "geometry_mesh_id: The ID of the geometery to get the name of."
+]
+returns = "True if enabled, False if not."
+
+[VA.set_geometry_mesh_parameters]
+brief = "Set the parameters of a geometery mesh."
+args = [
+    "geometry_mesh_id: The ID of the geometery to set the parameters of",
+    "parameters: A dictionary with string keys of parameters to set."
+]
+
+[VA.get_geometry_mesh_parameters]
+brief = "Get the parameters of a geometery mesh."
+args = [
+    "geometry_mesh_id: The ID of the geometery to get the parameters of",
+    "parameters: A dictionary with string keys of parameters to get."
+]
+returns = "A dictionary with the parameters of the geometery mesh."
+
+# ----------- Signal Source ------------
+
+[VA.create_signal_source_buffer_from_parameters]
+brief = "Create a buffer signal source from a set of parameters."
+detail = """Creates a buffer signal source by directly passing samples via a VAStruct.
+The struct must contain a 'samples' key with a nested struct for the channels.
+The keys of the nested struct are the channel names and the values are the samples.
+The name must be of the form 'chX' where X is the channel number starting with 1.
+The samples are passed as a list of floating point values between -1 and 1.
+
+For example:
+{
+    'samples': {
+        'ch1': [0.1, 0.2, 0.3],
+        'ch2': [0.4, 0.5, 0.6]
+    }
+}"""
+args = [
+    "name: The name for the signal source",
+    "parameters: A dictionary with string keys of parameters to create the signal source with."
+]
+returns = "The ID of the created signal source."
+
+[VA.create_signal_source_prototype_from_parameters]
+brief = "Create a prototype signal source from a set of parameters."
+detail = """Creates a prototype signal source by passing a VAStruct with the parameters.
+Which signal source is instantiated is depending on the given 'class' parameter and
+the currently available core feature set.
+This prototype function is used to provide a generic interface call so no API changes are required
+during rapid prototyping.
+
+Usually, the 'class' key corresponds to a certain implementation of a prototype, e.g. 'jet_engine'.
+Further parameters may also be required depending on the prototype.
+For this please refer to the specific prototype implementation, e.g.
+[here][https://www.virtualacoustics.de/VA/documentation/scene/#jet-engine]
+
+The parameters may also be controlled by the
+[`set_signal_source_parameters`][vapython.va.VA.set_signal_source_parameters]
+and obtained via [`get_signal_source_parameters`][vapython.va.VA.get_signal_source_parameters]."""
+args = [
+    "name: The name for the signal source",
+    "parameters: A dictionary with string keys of parameters to create the signal source with."
+]
+returns = "The ID of the created signal source."
+
+[VA.create_signal_source_text_to_speech]
+brief = "Create a [text-to-speech](https://www.virtualacoustics.org/VA/documentation/scene/#text-to-speech-tts) signal source which streams generated samples from a text input."
+args = [
+    "name: The name for the signal source."
+]
+returns = "The ID of the created signal source."
+
+[VA.create_signal_source_sequencer]
+brief = "Create a sequencer signal source."
+detail = "For details, please refer to the [source code](https://git.rwth-aachen.de/ita/VACore/-/tree/master/src/Audiosignals)."
+args = [
+    "name: The name for the signal source."
+]
+returns = "The ID of the created signal source."
+
+[VA.create_signal_source_network_stream]
+brief = "Create a network stream signal source."
+detail = """Deprecated:
+    This signal source is deprecated.
+"""
+args = [
+    "interface: The network interface to listen on",
+    "port: The port to listen on",
+    "name: The name for the signal source."
+]
+returns = "The ID of the created signal source."
+
+[VA.create_signal_source_engine]
+brief = "Create a rotating engine signal source."
+detail = "For details, please refer to the [source code](https://git.rwth-aachen.de/ita/VACore/-/tree/master/src/Audiosignals)."
+args = [
+    "name: The name for the signal source",
+    "parameters: A dictionary with string keys of parameters to create the signal source with."
+]
+returns = "The ID of the created signal source."
+
+[VA.create_signal_source_machine]
+brief = "Create a machine signal source."
+detail = "For details, please refer to the [source code](https://git.rwth-aachen.de/ita/VACore/-/tree/master/src/Audiosignals)."
+args = [
+    "name: The name for the signal source",
+    "parameters: A dictionary with string keys of parameters to create the signal source with."
+]
+returns = "The ID of the created signal source."
+
+[VA.delete_signal_source]
+brief = "Delete a signal source."
+detail = """This is only possible if the signal source is not in use."""
+args = [
+    "signal_source_id: The string ID of the signal source to delete."
+]
+returns = "True, if successful (i.e. not in use)."
+
+[VA.get_signal_source_info]
+brief = "Returns information on a signal source."
+args = [
+    "signal_source_id: The string ID of the signal source to get information on."
+]
+returns = "The information on the signal source (id, name, type, etc.)."
+
+[VA.get_signal_source_infos]
+brief = "Returns information on all loaded signal sources."
+returns = "A list with information on all loaded signal sources."
+
+[VA.get_signal_source_buffer_playback_state]
+brief = "Get the playback state of a buffer signal source."
+detail = "###This should be overwritten by a parent class.###."
+args = [
+    "signal_source_id: The string ID of the signal source to get the playback state of."
+]
+returns = "The playback state of the buffer signal source."
+
+[VA.set_signal_source_buffer_playback_action]
+brief = "Set the playback state of a buffer signal source."
+detail = """###This should be overwritten by a parent class.###
+Start, stop or pause the playback of an audiofile/buffer.
+"""
+args = [
+    "signal_source_id: The string ID of the signal source to get the playback state of",
+    "playback_action: The action to perform (start, stop, pause)."
+]
+
+[VA.set_signal_source_buffer_playback_position]
+brief = "Set the playback position of a buffer signal source."
+args = [
+    "signal_source_id: The string ID of the signal source to set the playback position of",
+    "position: The position to set the playback to in seconds."
+]
+
+[VA.set_signal_source_buffer_looping]
+brief = "Set if a buffer signal source should loop."
+args = [
+    "signal_source_id: The string ID of the signal source to set the looping of",
+    "looping: True if it should loop, False if not."
+]
+
+[VA.get_signal_source_buffer_looping]
+brief = "Returns True if a buffer signal source is set to be looping."
+args = [
+    "signal_source_id: The string ID of the signal source to get the looping of."
+]
+returns = "True if looping, False if not."
+
+[VA.set_signal_source_parameters]
+brief = "Set the parameters of a signal source."
+detail = """This method sets parameters of a signal source.
+Behavior depends on type and implementation of the signal source."""
+args = [
+    "signal_source_id: The string ID of the signal source to set the parameters of",
+    "parameters: A dictionary with string keys of parameters to set."
+]
+
+[VA.get_signal_source_parameters]
+brief = "Get the parameters of a signal source."
+detail = """This method returns parameters of a signal source.
+Behavior depends on type and implementation of the signal source."""
+args = [
+    "signal_source_id: The string ID of the signal source to get the parameters of",
+    "parameters: A dictionary with string keys of parameters to get."
+]
+returns = "A dictionary with the parameters of the signal source."
+
+[VA.add_signal_source_sequencer_sample]
+brief = "Add sequencer sample from file path."
+args = [
+    "signal_source_id: The string ID of the signal source to add the sample to",
+    "file_path: The path to the sample file."
+]
+returns = "The ID of the added sample."
+
+[VA.add_signal_source_sequencer_playback]
+brief = "Add sequencer sound playback."
+args = [
+    "signal_source_id: The string ID of the signal source to add the playback to",
+    "sound_id: The ID of the sound to play",
+    "flags: Sampler playback start flags",
+    "time_code: Time code."
+]
+returns = "Sample playback identifier."
+
+[VA.remove_signal_source_sequencer_sample]
+brief = "Remove sound sample (a sample added for playback can not be reverted)."
+args = [
+    "signal_source_id: The string ID of the signal source to remove the sample from",
+    "sample_id: The ID of the sample to remove."
+]
+
+# ----------- Sound Source ------------
+
+[VA.create_sound_source]
+brief = "Create a sound source."
+detail = """This method creates a new sound source and returns its ID.
+A signal source is not created and assigned to the sound source."""
+args = [
+    "name: The name for the sound source."
+]
+returns = "The ID of the created sound source, if the method succeeded, -1, otherwise."
+
+[VA.get_sound_source_ids]
+brief = "Get the IDs of all sound sources in the scene."
+returns = "A list of sound source IDs."
+
+[VA.get_sound_source_info]
+brief = "Get information on a sound source."
+args = [
+    "sound_source_id: The ID of the sound source to get information on."
+]
+returns = "A data structure with information on the sound source."
+
+[VA.create_sound_source_explicit_renderer]
+brief = "Create a sound source for a certain renderer."
+detail = """This method creates a new sound source and returns its ID.
+This sound source will be skipped for rendering from any other but the given renderer.
+See [`get_rendering_modules`][vapython.va.VA.get_rendering_modules]
+to get a list of available renderers and their name identifier.
+
+Examples:
+    >>> va.create_sound_source_explicit_renderer(
+    ...    "MyFreeFieldRenderer",
+    ...    "AwesomeSoundSource",
+    ...)
+    0"""
+args = [
+    "renderer_id: The name of the renderer to assign the sound source to",
+    "name: The name for the sound source."
+]
+returns = "The ID of the created sound source, if the method succeeded, -1, otherwise."
+
+[VA.delete_sound_source]
+brief = "Delete a sound source."
+args = [
+    "sound_source_id: The ID of the sound source to delete."
+]
+returns = "True, if successful."
+
+[VA.set_sound_source_enabled]
+brief = "Enables or disables a sound source."
+detail = "This removes the sound source from processing within renderers."
+args = [
+    "sound_source_id: The ID of the sound source to enable or disable",
+    "enabled: True if the sound source should be enabled, False if not."
+]
+
+[VA.get_sound_source_enabled]
+brief = "Returns sound source enabled/disabled status."
+args = [
+    "sound_source_id: The ID of the sound source to get the enabled/disabled status of."
+]
+returns = "True if enabled, False if not."
+
+[VA.get_sound_source_name]
+brief = "Get the name of a sound source."
+args = [
+    "sound_source_id: The ID of the sound source to get the name of."
+]
+returns = "The name of the sound source."
+
+[VA.set_sound_source_name]
+brief = "Set the name of a sound source."
+args = [
+    "sound_source_id: The ID of the sound source to rename",
+    "name: The new name for the sound source."
+]
+
+[VA.get_sound_source_signal_source]
+brief = "Get the signal source of a sound source."
+detail = "This returns the ID of the signal source assigned to the sound source."
+args = [
+    "sound_source_id: The ID of the sound source to get the signal source of."
+]
+returns = "The ID of the signal source."
+
+[VA.set_sound_source_signal_source]
+brief = "Attach a signal source to sound source."
+detail = """Let a sound source play back samples from the given signal source.
+An empty string removes the signal source from sound source (silence).
+Also see [`remove_sound_source_signal_source`][vapython.va.VA.remove_sound_source_signal_source]"""
+args = [
+    "sound_source_id: The ID of the sound source to attach the signal source to",
+    "signal_source_id: The string ID of the signal source to attach."
+]
+
+[VA.get_sound_source_geometry_mesh]
+brief = "Get the geometry mesh ID of a sound source."
+args = [
+    "sound_source_id: The ID of the sound source to get the geometry mesh ID of."
+]
+returns = "The ID of the geometry mesh assigened to the sound source."
+
+[VA.set_sound_source_geometry_mesh]
+brief = "Assign a geometry mesh to a sound source."
+detail = """
+Sound sources can have geometrical meshes assigned.
+This can be used to identify which geometry is part of a sound source that should not be
+considered for direct path occlusio, or if the moving source geometry is relevant for occlusion of 
+other source-receiver-paths, like cars, trains, etc."""
+args = [
+    "sound_source_id: The ID of the sound source to assign the geometry mesh to",
+    "geometry_mesh_id: The ID of the geometry mesh to assign."
+]
+
+[VA.set_sound_source_parameters]
+brief = "Set the parameters of a sound source."
+detail = """This general parameter setter can be used for quick solutions changing
+sound source parameters without introducing new methods to the main interface.
+
+Examples:
+    >>> va.set_sound_source_parameters(
+    ...    0,
+    ...    {"special_parameter": 3.141},
+    ...)
+    0
+
+Note:
+    This method only works for special implementations and not in general.
+"""
+args = [
+    "sound_source_id: The ID of the sound source to set the parameters of",
+    "parameters: A dictionary with string keys of parameters to set."
+]
+
+[VA.get_sound_source_parameters]
+brief = "Get the parameters of a sound source."
+detail = """This general parameter getter can be used for quick solutions retreiving
+sound source parameters without introducing new methods to the main interface.
+
+Note:
+    This method only works for special implementations and not in general."""
+args = [
+    "sound_source_id: The ID of the sound source to get the parameters of",
+    "parameters: A dictionary with string keys of parameters to get."
+]
+returns = "A dictionary with the parameters of the sound source."
+
+[VA.get_sound_source_directivity]
+brief = "Get the directivity ID of a sound source."
+detail = "If the sound source is not assigned a directivity, the methods returns -1."
+args = [
+    "sound_source_id: The ID of the sound source to get the directivity ID of."
+]
+returns = "The ID of the directivity assigned to the sound source."
+
+[VA.set_sound_source_directivity]
+brief = "Assign a directivity to a sound source."
+detail = """In order to remove an assigned directivity, you can pass -1 to the method."""
+args = [
+    "sound_source_id: The ID of the sound source to assign the directivity to",
+    "directivity_id: The ID of the directivity to assign to the sound source."
+]
+
+[VA.get_sound_source_sound_power]
+brief = "Get the sound power of a sound source."
+args = [
+    "sound_source_id: The ID of the sound source to get the sound power of."
+]
+returns = "The sound power of the sound source in Watts [W]."
+
+[VA.set_sound_source_sound_power]
+brief = "Set the sound power of a sound source."
+args = [
+    "sound_source_id: The ID of the sound source to set the sound power of",
+    "sound_power: The sound power of the sound source in Watts [W]."
+]
+
+[VA.get_sound_source_muted]
+brief = "Get if a sound source is muted."
+args = [
+    "sound_source_id: The ID of the sound source to get the muted status of."
+]
+returns = "True if muted, False if not."
+
+[VA.set_sound_source_muted]
+brief = "Mute or unmute a sound source."
+args = [
+    "sound_source_id: The ID of the sound source to mute or unmute",
+    "muted: True if the sound source should be muted, False if not."
+]
+
+[VA.get_sound_source_pose]
+brief = "Get the pose of a sound source."
+detail = "Note: In this context, the pose is the combination of position and orientation."
+args = [
+    "sound_source_id: The ID of the sound source to get the pose of."
+]
+returns = "A data structure with the pose of the sound source."
+
+[VA.set_sound_source_pose]
+brief = "Set the pose of a sound source."
+detail = """Note: In this context, the pose is the combination of position and orientation.
+
+Examples:
+    Using the VAPython types:
+    >>> va.set_sound_source_pose(
+    ...    0,
+    ...    VAVector(0,1,2),
+    ...    VAQuaternion(0,0,0,1),
+    ...)
+
+    or using tuples and lists:
+    >>> va.set_sound_source_pose(
+    ...    0,
+    ...    [0,1,2],
+    ...    (0,0,0,1),
+    ...)"""
+args = [
+    "sound_source_id: The ID of the sound source to set the pose of",
+    "position: The position of the sound source",
+    "orientation: The orientation of the sound source as a quaternion."
+]
+
+[VA.get_sound_source_position]
+brief = "Get the position of a sound source."
+args = [
+    "sound_source_id: The ID of the sound source to get the position of."
+]
+returns = "The position of the sound source."
+
+[VA.set_sound_source_position]
+brief = "Set the position of a sound source."
+detail = """Examples:
+    Using the VAPython types:
+    >>> va.set_sound_source_position(
+    ...    0,
+    ...    VAVector(0,1,2),
+    ...)
+
+    or using tuples and lists:
+    >>> va.set_sound_source_position(
+    ...    0,
+    ...    [0,1,2],
+    ...)"""
+args = [
+    "sound_source_id: The ID of the sound source to set the position of",
+    "position: The position of the sound source."
+]
+
+[VA.get_sound_source_orientation]
+brief = "Get the orientation of a sound source."
+args = [
+    "sound_source_id: The ID of the sound source to get the orientation of."
+]
+returns = "The orientation of the sound source as a quaternion."
+
+[VA.set_sound_source_orientation]
+brief = "Set the orientation of a sound source."
+detail = """Examples:
+    Using the VAPython types:
+    >>> va.set_sound_source_orientation(
+    ...    0,
+    ...    VAQuaternion(0,0,0,1),
+    ...)
+
+    or using tuples and lists:
+    >>> va.set_sound_source_orientation(
+    ...    0,
+    ...    (0,0,0,1),
+    ...)"""
+args = [
+    "sound_source_id: The ID of the sound source to set the orientation of",
+    "orientation: The orientation of the sound source as a quaternion."
+]
+
+[VA.get_sound_source_orientation_vu]
+brief = "Get the orientation of a sound source as a view-up vector pair."
+args = [
+    "sound_source_id: The ID of the sound source to get the orientation of."
+]
+returns = "The orientation of the sound source as a view-up vector pair."
+
+[VA.set_sound_source_orientation_vu]
+brief = "Set the orientation of a sound source as a view-up vector pair."
+detail = """Examples:
+    Using the VAPython types, looking to the right:
+    >>> va.set_sound_source_orientation_vu(
+    ...    0,
+    ...    VAVector(1,0,0),
+    ...    VAVector(0,1,0),
+    ...)
+
+    or using tuples and lists:
+    >>> va.set_sound_source_orientation_vu(
+    ...    0,
+    ...    [1,0,0],
+    ...    (0,1,0),
+    ...)"""
+args = [
+    "sound_source_id: The ID of the sound source to set the orientation of",
+    "view: The view vector of the sound source",
+    "up: The up vector of the sound source."
+]
+
+# ----------- Sound Receiver ------------
+
+[VA.create_sound_receiver]
+brief = "Create a sound receiver."
+detail = "This method creates a new sound receiver and returns its ID."
+args = [
+    "name: The name for the sound receiver."
+]
+returns = "The ID of the created sound receiver, if the method succeeded, -1, otherwise."
+
+[VA.get_sound_receiver_ids]
+brief = "Get the IDs of all sound receivers in the scene."
+returns = "A list of sound receiver IDs."
+
+[VA.get_sound_receiver_info]
+brief = "Get information on a sound receiver."
+args = [
+    "sound_receiver_id: The ID of the sound receiver to get information on."
+]
+returns = "A data structure with information on the sound receiver."
+
+[VA.create_sound_receiver_explicit_renderer]
+brief = "Create a sound receiver for a certain renderer."
+detail = """This method creates a new sound receiver and returns its ID.
+This sound receiver will be skipped for rendering from any other but the given renderer.
+See [`get_rendering_modules`][vapython.va.VA.get_rendering_modules]
+to get a list of available renderers and their name identifier.
+"""
+args = [
+    "renderer_id: The name of the renderer to assign the sound receiver to",
+    "name: The name for the sound receiver."
+]
+returns = "The ID of the created sound receiver, if the method succeeded, -1, otherwise."
+
+[VA.delete_sound_receiver]
+brief = "Delete a sound receiver."
+args = [
+    "sound_receiver_id: The ID of the sound receiver to delete."
+]
+returns = "True, if successful."
+
+[VA.set_sound_receiver_enabled]
+brief = "Enables or disables a sound receiver."
+detail = "This removes the sound receiver from processing within renderers."
+args = [
+    "sound_receiver_id: The ID of the sound receiver to enable or disable",
+    "enabled: True if the sound receiver should be enabled, False if not."
+]
+
+[VA.get_sound_receiver_enabled]
+brief = "Returns sound receiver enabled/disabled status."
+args = [
+    "sound_receiver_id: The ID of the sound receiver to get the enabled/disabled status of."
+]
+returns = "True if enabled, False if not."
+
+[VA.get_sound_receiver_name]
+brief = "Get the name of a sound receiver."
+args = [
+    "sound_receiver_id: The ID of the sound receiver to get the name of."
+]
+returns = "The name of the sound receiver."
+
+[VA.set_sound_receiver_name]
+brief = "Set the name of a sound receiver."
+args = [
+    "sound_receiver_id: The ID of the sound receiver to rename",
+    "name: The new name for the sound receiver."
+]
+
+[VA.set_sound_receiver_parameters]
+brief = "Set the parameters of a sound receiver."
+detail = """This general parameter setter can be used for quick solutions changing
+sound receiver parameters without introducing new methods to the main interface.
+
+Examples:
+    >>> va.set_sound_receiver_parameters(
+    ...    0,
+    ...    {"special_parameter": 3.141},
+    ...)
+    0
+
+Note:
+    This method only works for special implementations and not in general."""
+args = [
+    "sound_receiver_id: The ID of the sound receiver to set the parameters of",
+    "parameters: A dictionary with string keys of parameters to set."
+]
+
+[VA.get_sound_receiver_parameters]
+brief = "Get the parameters of a sound receiver."
+detail = """This general parameter getter can be used for quick solutions retreiving
+sound receiver parameters without introducing new methods to the main interface.
+
+Note:
+    This method only works for special implementations and not in general."""
+args = [
+    "sound_receiver_id: The ID of the sound receiver to get the parameters of",
+    "parameters: A dictionary with string keys of parameters to get."
+]
+returns = "A dictionary with the parameters of the sound receiver."
+
+[VA.get_sound_receiver_directivity]
+brief = "Get the directivity ID of a sound receiver."
+detail = "If the sound receiver is not assigned a directivity, the methods returns -1."
+args = [
+    "sound_receiver_id: The ID of the sound receiver to get the directivity ID of."
+]
+returns = "The ID of the directivity assigned to the sound receiver."
+
+[VA.set_sound_receiver_directivity]
+brief = "Assign a directivity to a sound receiver."
+detail = """In order to remove an assigned directivity, you can pass -1 to the method.
+Note:
+    For a binaural synthesis, the directivity should be set to the HRTF of the listener."""
+args = [
+    "sound_receiver_id: The ID of the sound receiver to assign the directivity to",
+    "directivity_id: The ID of the directivity to assign to the sound receiver."
+]
+
+[VA.get_sound_receiver_muted]
+brief = "Get if a sound receiver is muted."
+args = [
+    "sound_receiver_id: The ID of the sound receiver to get the muted status of."
+]
+returns = "True if muted, False if not."
+
+[VA.set_sound_receiver_muted]
+brief = "Mute or unmute a sound receiver."
+args = [
+    "sound_receiver_id: The ID of the sound receiver to mute or unmute",
+    "muted: True if the sound receiver should be muted, False if not."
+]
+
+[VA.get_sound_receiver_geometry_mesh]
+brief = "Get the geometry mesh ID of a sound receiver."
+args = [
+    "sound_receiver_id: The ID of the sound receiver to get the geometry mesh ID of."
+]
+returns = "The ID of the geometry mesh assigened to the sound receiver."
+
+[VA.set_sound_receiver_geometry_mesh]
+brief = "Assign a geometry mesh to a sound receiver."
+detail = """
+Sound receivers can have geometrical meshes assigned.
+This can be used to identify which geometry is part of a sound receiver that should not be
+considered for direct path occlusio, or if the moving receiver geometry is relevant for occlusion of 
+other receiver-receiver-paths, like cars, trains, etc."""
+args = [
+    "sound_receiver_id: The ID of the sound receiver to assign the geometry mesh to",
+    "geometry_mesh_id: The ID of the geometry mesh to assign."
+]
+
+[VA.get_sound_receiver_pose]
+brief = "Get the pose of a sound receiver."
+detail = "Note: In this context, the pose is the combination of position and orientation."
+args = [
+    "sound_receiver_id: The ID of the sound receiver to get the pose of."
+]
+returns = "A data structure with the pose of the sound receiver."
+
+[VA.set_sound_receiver_pose]
+brief = "Set the pose of a sound receiver."
+detail = """Note: In this context, the pose is the combination of position and orientation.
+
+Examples:
+    Using the VAPython types:
+    >>> va.set_sound_receiver_pose(
+    ...    0,
+    ...    VAVector(0,1,2),
+    ...    VAQuaternion(0,0,0,1),
+    ...)
+
+    or using tuples and lists:
+    >>> va.set_sound_receiver_pose(
+    ...    0,
+    ...    [0,1,2],
+    ...    (0,0,0,1),
+    ...)"""
+args = [
+    "sound_receiver_id: The ID of the sound receiver to set the pose of",
+    "position: The position of the sound receiver",
+    "orientation: The orientation of the sound receiver as a quaternion."
+]
+
+[VA.get_sound_receiver_position]
+brief = "Get the position of a sound receiver."
+args = [
+    "sound_receiver_id: The ID of the sound receiver to get the position of."
+]
+returns = "The position of the sound receiver."
+
+[VA.set_sound_receiver_position]
+brief = "Set the position of a sound receiver."
+detail = """Examples:
+    Using the VAPython types:
+    >>> va.set_sound_receiver_position(
+    ...    0,
+    ...    VAVector(0,1,2),
+    ...)
+
+    or using tuples and lists:
+    >>> va.set_sound_receiver_position(
+    ...    0,
+    ...    [0,1,2],
+    ...)"""
+args = [
+    "sound_receiver_id: The ID of the sound receiver to set the position of",
+    "position: The position of the sound receiver."
+]
+
+[VA.get_sound_receiver_orientation]
+brief = "Get the orientation of a sound receiver."
+args = [
+    "sound_receiver_id: The ID of the sound receiver to get the orientation of."
+]
+returns = "The orientation of the sound receiver as a quaternion."
+
+[VA.set_sound_receiver_orientation]
+brief = "Set the orientation of a sound receiver."
+detail = """Examples:
+    Using the VAPython types:
+    >>> va.set_sound_source_orientation(
+    ...    0,
+    ...    VAQuaternion(0,0,0,1),
+    ...)
+
+    or using tuples and lists:
+    >>> va.set_sound_source_orientation(
+    ...    0,
+    ...    (0,0,0,1),
+    ...)"""
+args = [
+    "sound_receiver_id: The ID of the sound receiver to set the orientation of",
+    "orientation: The orientation of the sound receiver as a quaternion."
+]
+
+[VA.get_sound_receiver_orientation_vu]
+brief = "Get the orientation of a sound receiver as a view-up vector pair."
+args = [
+    "sound_receiver_id: The ID of the sound receiver to get the orientation of."
+]
+returns = "The orientation of the sound receiver as a view-up vector pair."
+
+[VA.set_sound_receiver_orientation_vu]
+brief = "Set the orientation of a sound receiver as a view-up vector pair."
+detail = """Examples:
+    Using the VAPython types, looking to the right:
+    >>> va.set_sound_source_orientation_vu(
+    ...    0,
+    ...    VAVector(1,0,0),
+    ...    VAVector(0,1,0),
+    ...)
+
+    or using tuples and lists:
+    >>> va.set_sound_source_orientation_vu(
+    ...    0,
+    ...    [1,0,0],
+    ...    (0,1,0),
+    ...)"""
+args = [
+    "sound_receiver_id: The ID of the sound receiver to set the orientation of",
+    "view: The view vector of the sound receiver",
+    "up: The up vector of the sound receiver."
+]
+
+[VA.get_sound_receiver_head_above_torso_orientation]
+brief = "Get the head-above-torso orientation of a sound receiver."
+args = [
+    "sound_receiver_id: The ID of the sound receiver to get the head-above-torso orientation of."
+]
+returns = "The head-above-torso orientation of the sound receiver as a quaternion."
+
+[VA.set_sound_receiver_head_above_torso_orientation]
+brief = "Set the head-above-torso orientation of a sound receiver."
+detail = """Examples:
+    Using the VAPython types:
+    >>> va.set_sound_receiver_head_above_torso_orientation(
+    ...    0,
+    ...    VAQuaternion(0,0,0,1),
+    ...)
+
+    or using tuples and lists:
+    >>> va.set_sound_receiver_head_above_torso_orientation(
+    ...    0,
+    ...    (0,0,0,1),
+    ...)"""
+args = [
+    "sound_receiver_id: The ID of the sound receiver to set the head-above-torso orientation of",
+    "orientation: The head-above-torso orientation of the sound receiver as a quaternion."
+]
+
+[VA.get_sound_receiver_real_world_position_orientation_vu]
+brief = "Get the real-world position and orientation of a sound receiver."
+detail = """Note:
+    Coordinates refer the to center of the head on the axis which goes through both ears."""
+args = [
+    "sound_receiver_id: The ID of the sound receiver to get the real-world position and orientation of."
+]
+returns = "The real-world position and orientation of the sound receiver."
+
+[VA.set_sound_receiver_real_world_position_orientation_vu]
+brief = "Set the real-world position and orientation of a sound receiver."
+detail = """This function is used to provide the crosstalk-cancellation module with the
+current position of the sound receivers head in the real-world.
+
+Examples:
+    Using the VAPython types, looking to the left:
+    >>> va.set_sound_receiver_real_world_position_orientation_vu(
+    ...    0,
+    ...    VAVector(0,1,2),
+    ...    VAVector(-1,0,0),
+    ...    VAVector(0,1,0),
+    ...)
+
+    or using tuples and lists:
+    >>> va.set_sound_receiver_real_world_position_orientation_vu(
+    ...    0,
+    ...    [0,1,2],
+    ...    [-1,0,0],
+    ...    (0,1,0),
+    ...)
+
+Note:
+    Coordinates refer the to center of the head on the axis which goes through both ears."""
+args = [
+    "sound_receiver_id: The ID of the sound receiver to set the real-world position and orientation of",
+    "position: The real-world position of the sound receiver",
+    "view: The view vector of the sound receiver",
+    "up: The up vector of the sound receiver."
+]
+
+[VA.get_sound_receiver_real_world_pose]
+brief = "Get the real-world pose of a sound receiver."
+detail = """Note:
+    Coordinates refer the to center of the head on the axis which goes through both ears."""
+args = [
+    "sound_receiver_id: The ID of the sound receiver to get the real-world pose of."
+]
+returns = "The real-world pose of the sound receiver."
+
+[VA.set_sound_receiver_real_world_pose]
+brief = "Set the real-world pose of a sound receiver."
+detail = """This function is used to provide the crosstalk-cancellation module with the
+current position of the sound receivers head in the real-world.
+
+Examples:
+    Using the VAPython types:
+    >>> va.set_sound_receiver_real_world_pose(
+    ...    0,
+    ...    VAVector(0,1,2),
+    ...    VAQuaternion(0,0,0,1),
+    ...)
+
+    or using tuples and lists:
+    >>> va.set_sound_receiver_real_world_pose(
+    ...    0,
+    ...    [0,1,2],
+    ...    (0,0,0,1),
+    ...)"""
+args = [
+    "sound_receiver_id: The ID of the sound receiver to set the real-world pose of",
+    "position: The real-world position of the sound receiver",
+    "orientation: The real-world orientation of the sound receiver as a quaternion."
+]
+
+[VA.get_sound_receiver_real_world_head_above_torso_orientation]
+brief = "Get the real-world head-above-torso orientation of a sound receiver."
+detail = """A neutral HATO orientation will use the neutral head position looking to the front.
+A diverging orientation will describe how the head is oriented with
+respect to the torso (from the viewpoint of torso)."""
+args = [
+    "sound_receiver_id: The ID of the sound receiver to get the real-world head-above-torso orientation of."
+]
+returns = "The real-world head-above-torso orientation of the sound receiver as a quaternion."
+
+[VA.set_sound_receiver_real_world_head_above_torso_orientation]
+brief = "Set the real-world head-above-torso orientation of a sound receiver."
+detail = """A neutral HATO orientation will use the neutral head position looking to the front.
+A diverging orientation will describe how the head is oriented with
+respect to the torso (from the viewpoint of torso).
+
+Examples:
+    Using the VAPython types:
+    >>> va.set_sound_receiver_real_world_head_above_torso_orientation(
+    ...    0,
+    ...    VAQuaternion(0,0,0,1),
+    ...)
+
+    or using tuples and lists:
+    >>> va.set_sound_receiver_real_world_head_above_torso_orientation(
+    ...    0,
+    ...    (0,0,0,1),
+    ...)"""
+args = [
+    "sound_receiver_id: The ID of the sound receiver to set the real-world head-above-torso orientation of",
+    "orientation: The real-world head-above-torso orientation of the sound receiver as a quaternion."
+]
+
+# ----------- Rendering Modules ------------
+
+[VA.get_rendering_modules]
+brief = "Get the inforation of rendering modules."
+detail = """Returns a list of data structures with information on all available rendering modules.
+If the 'only_enabled' key is set to True, only enabled rendering modules are returned."""
+args = [
+    "only_enabled: If True, only enabled rendering modules are returned."
+]
+returns = "A list of data structures with information on all available rendering modules."
+
+[VA.set_rendering_module_muted]
+brief = "Mute or unmute a rendering module."
+args = [
+    "rendering_module_id: The name of the rendering module to mute or unmute",
+    "muted: True if the rendering module should be muted, False if not."
+]
+
+[VA.get_rendering_module_muted]
+brief = "Get if a rendering module is muted."
+args = [
+    "rendering_module_id: The name of the rendering module to get the muted status of."
+]
+returns = "True if muted, False if not."
+
+[VA.set_rendering_module_gain]
+brief = "Set the gain of a rendering module."
+detail = """Warning:
+    Setting gains will potentially penetrate a calibrated rendering and reproduction chain."""
+args = [
+    "rendering_module_id: The name of the rendering module to set the gain of",
+    "gain: The gain of the rendering module."
+]
+
+[VA.get_rendering_module_gain]
+brief = "Get the gain of a rendering module."
+args = [
+    "rendering_module_id: The name of the rendering module to get the gain of."
+]
+returns = "The gain of the rendering module."
+
+[VA.set_rendering_module_parameters]
+brief = "Set the parameters of a rendering module."
+detail = """Also refer to [the online documentation](https://www.virtualacoustics.de/VA/documentation/control/#query-registered-modules) or
+[`call_module()`][vapython.va.VA.call_module]"""
+args = [
+    "rendering_module_id: The name of the rendering module to set the parameters of",
+    "parameters: A dictionary with string keys of parameters to set."
+]
+
+[VA.get_rendering_module_parameters]
+brief = "Get the parameters of a rendering module."
+detail = """Also refer to [the online documentation](https://www.virtualacoustics.de/VA/documentation/control/#query-registered-modules) or
+[`call_module()`][vapython.va.VA.call_module]"""
+args = [
+    "rendering_module_id: The name of the rendering module to get the parameters of",
+    "parameters: A dictionary with string keys of parameters to get."
+]
+returns = "A dictionary with the parameters of the rendering module."
+
+# ----------- Reproduction Modules ------------
+
+[VA.get_reproduction_modules]
+brief = "Get the inforation of reproduction modules."
+detail = """Returns a list of data structures with information on all available reproduction modules.
+If the 'only_enabled' key is set to True, only enabled reproduction modules are returned."""
+args = [
+    "only_enabled: If True, only enabled reproduction modules are returned."
+]
+returns = "A list of data structures with information on all available reproduction modules."
+
+[VA.set_reproduction_module_muted]
+brief = "Mute or unmute a reproduction module."
+args = [
+    "reproduction_module_id: The name of the reproduction module to mute or unmute",
+    "muted: True if the reproduction module should be muted, False if not."
+]
+
+[VA.get_reproduction_module_muted]
+brief = "Get if a reproduction module is muted."
+args = [
+    "reproduction_module_id: The name of the reproduction module to get the muted status of."
+]
+returns = "True if muted, False if not."
+
+[VA.set_reproduction_module_gain]
+brief = "Set the gain of a reproduction module."
+detail = """Warning:
+    Setting gains will potentially penetrate a calibrated reproduction and reproduction chain."""
+args = [
+    "reproduction_module_id: The name of the reproduction module to set the gain of",
+    "gain: The gain of the reproduction module."
+]
+
+[VA.get_reproduction_module_gain]
+brief = "Get the gain of a reproduction module."
+args = [
+    "reproduction_module_id: The name of the reproduction module to get the gain of."
+]
+returns = "The gain of the reproduction module."
+
+[VA.set_reproduction_module_parameters]
+brief = "Set the parameters of a reproduction module."
+detail = """Also refer to [the online documentation](https://www.virtualacoustics.de/VA/documentation/control/#query-registered-modules) or
+[`call_module()`][vapython.va.VA.call_module]"""
+args = [
+    "reproduction_module_id: The name of the reproduction module to set the parameters of",
+    "parameters: A dictionary with string keys of parameters to set."
+]
+
+[VA.get_reproduction_module_parameters]
+brief = "Get the parameters of a reproduction module."
+detail = """Also refer to [the online documentation](https://www.virtualacoustics.de/VA/documentation/control/#query-registered-modules) or
+[`call_module()`][vapython.va.VA.call_module]"""
+args = [
+    "reproduction_module_id: The name of the reproduction module to get the parameters of",
+    "parameters: A dictionary with string keys of parameters to get."
+]
+returns = "A dictionary with the parameters of the reproduction module."
+
+# ----------- Auralization modes ------------
+
+[VA.get_global_auralization_mode]
+brief = "Get the global auralization mode."
+detail = """Warns:
+    Use the overwritten method."""
+
+[VA.set_global_auralization_mode]
+brief = "Set the global auralization mode."
+detail = """Warns:
+    Use the overwritten method."""
+
+[VA.set_sound_source_auralization_mode]
+brief = "Set the auralization mode of a sound source."
+detail = """Warns:
+    Use the overwritten method."""
+
+[VA.get_sound_source_auralization_mode]
+brief = "Get the auralization mode of a sound source."
+detail = """Warns:
+    Use the overwritten method."""
+
+[VA.set_sound_receiver_auralization_mode]
+brief = "Set the auralization mode of a sound receiver."
+detail = """Warns:
+    Use the overwritten method."""
+
+[VA.get_sound_receiver_auralization_mode]
+brief = "Get the auralization mode of a sound receiver."
+detail = """Warns:
+    Use the overwritten method."""
+
+[VA.set_rendering_module_auralization_mode]
+brief = "Set the auralization mode of a rendering module."
+detail = """Warns:
+    Use the overwritten method."""
+
+[VA.get_rendering_module_auralization_mode]
+brief = "Get the auralization mode of a rendering module."
+detail = """Warns:
+    Use the overwritten method."""
diff --git a/scripts/templates/wrapper.py.j2 b/scripts/templates/wrapper.py.j2
new file mode 100644
index 0000000000000000000000000000000000000000..86cc0805f68d315bd5d9fbdbc77931dd43a4172c
--- /dev/null
+++ b/scripts/templates/wrapper.py.j2
@@ -0,0 +1,243 @@
+import asyncio
+import concurrent
+from threading import Thread
+from typing import Callable, List, Optional, Tuple, Union
+
+from grpclib.client import Channel
+from grpclib.const import Cardinality
+
+import vapython.vanet._helper as vanet_helper
+import vapython.vanet._vanet_grpc as vanet
+from vapython._types import VAStruct, VAVector, VAQuaternion
+
+class VAInterface:
+    """A class for interfacing with the VA server.
+
+    This class is used to connect to the VA server and call its interface methods.
+    Note that this is generated code and should not be modified.
+
+    The overwritten class [VA][vapython.va.VA] should be used instead of this class.
+    """
+
+    def __init__(self) -> None:
+        """Initializes the VAInterface object"""
+        self.__service: Optional[vanet.VaStub] = None
+        self._channel: Optional[Channel] = None
+        self._event_callbacks: List[Callable[[vanet.Event], None]] = []
+        self._event_thread: Optional[Thread] = None
+        self._connected: bool = False
+        self._event_thread_future: Optional[concurrent.futures.Future] = None
+        self.__loop: Optional[asyncio.AbstractEventLoop] = None
+
+    def __del__(self):
+        """Destructor for the VAInterface object.
+
+        Closes the channel if it is open.
+        """
+
+        self.disconnect()
+
+    def connect(self, host: str = "localhost", port: int = 12340, *, add_event_handling: bool = True) -> None:
+        """Connects to the VA server.
+
+        Args:
+            host: The hostname of the VA server.
+            port: The port number of the VA server.
+            add_event_handling: If True, VAPython will receive events from the VAServer.
+                If False, no events will be received and the server will not be able to determine that a client is connected.
+                However, not sending events can reduce network traffic.
+        """
+
+        self._channel = Channel(host=host, port=port)
+
+        self.__loop = self._channel._loop
+
+        self.__service = vanet.VaStub(self._channel)
+
+        def background_event_tread(loop: asyncio.AbstractEventLoop) -> None:
+            asyncio.set_event_loop(loop)
+            loop.run_forever()
+
+        self._event_thread = Thread(target=background_event_tread, args=(self._loop,) , daemon=True)
+        self._event_thread.start()
+
+        try:
+            self._get_state()
+        except ConnectionError as e:
+            print(f"Could not connect to VA server. Error: {e}")
+            print("Please make sure the VA server is running and accessible.")
+            self.disconnect()
+            exit(1)
+
+        self._connected = True
+
+        if add_event_handling:
+            # Event loop in separate thread, based on: https://gist.github.com/dmfigol/3e7d5b84a16d076df02baa9f53271058
+            async def handle_events():
+                async with self._service.channel.request(
+                    "/VA.VA/AttachEventHandler",
+                    Cardinality.UNARY_STREAM,
+                    vanet.betterproto_lib_google_protobuf.Empty,
+                    vanet.Event,
+                ) as stream:
+                    await stream.send_message(
+                        vanet.betterproto_lib_google_protobuf.Empty()
+                    )
+
+                    while self._connected:
+                        event = await stream.recv_message()
+                        if event is None:
+                            await stream.end()
+                            break
+                        else:
+                            for event_callback in self._event_callbacks:
+                                event_callback(event)
+                            await asyncio.sleep(0.1)
+
+                    await stream.cancel()
+
+            self._event_thread_future = asyncio.run_coroutine_threadsafe(handle_events(), self._loop)
+
+    def disconnect(self):
+        """Disconnects from the VA server."""
+        self._connected = False
+        if self._event_thread_future:
+            self._event_thread_future.result()
+
+        if self.__loop and self._loop.is_running():
+            self.__loop.call_soon_threadsafe(self.__loop.stop)
+
+        if self._event_thread:
+            self._event_thread.join()
+            self._event_thread = None
+
+        if self._channel:
+            self._channel.close()
+
+    def get_server_address(self) -> Optional[str]:
+        """Returns the address of the VA server.
+
+        Returns:
+            The address of the VA server. If not connected to a server, returns `None`.
+        """
+
+        if self._channel:
+            host = self._channel._host
+            port = self._channel._port
+            return f"{host}:{port}"
+        return None
+
+    {% for method in methods %}
+    def {{ method.name }}(self,
+    {% for arg in method.args %}
+        {{ arg.name}}: {{ arg.type }} {% if arg.default %} = {{ arg.default }}{% endif %},
+    {% endfor %}
+    {% if method.kwargs %}
+        *,
+    {% for kwarg in method.kwargs %}
+        {{ kwarg.name }}: {{ kwarg.type }} {% if kwarg.default %} = {{ kwarg.default }}{% endif %},
+    {% endfor %}
+    {% endif %}
+    ){% if method.returns %} -> {{ method.returns }}{% endif %}:
+        {% if method.docstring %}
+        """{{ method.docstring -}}
+        """
+
+        {% endif %}
+        return_value = asyncio.run_coroutine_threadsafe(
+            self._service.{{ method.org_name }}(
+                vanet.{{ method.message_type }}(
+                    {% for arg in method.args %}
+                    {% if arg.type == "VAStruct" %}
+                    {{ arg.name_org }}=vanet_helper.convert_struct_to_vanet({{ arg.name }}),
+                    {% elif "VAVector" in arg.type %}
+                    {{ arg.name_org }}=vanet_helper.convert_vector_to_vanet({{ arg.name }}),
+                    {% elif "VAQuaternion" in arg.type %}
+                    {{ arg.name_org }}=vanet_helper.convert_quaternion_to_vanet({{ arg.name }}),
+                    {% else %}
+                    {{ arg.name_org }}={{ arg.name }},
+                    {% endif %}
+                    {% endfor %}
+                    {% for arg in method.kwargs %}
+                    {{ arg.name_org }}={{ arg.name }},
+                    {% endfor %}
+                )
+            ),
+            self._loop
+        ).result()
+        {% if not method.wrapped_return_type %}
+        {% if method.returns == "VAStruct" %}
+        return vanet_helper.convert_struct_from_vanet(return_value)
+        {% elif method.returns == "VAVector" %}
+        return vanet_helper.convert_vector_from_vanet(return_value)
+        {% elif method.returns == "VAQuaternion" %}
+        return vanet_helper.convert_quaternion_from_vanet(return_value)
+        {% else %}
+        return return_value
+        {% endif %}
+        {% else %}
+        return return_value.value
+        {% endif %}
+
+    {% endfor %}
+
+    @property
+    def _service(self) -> vanet.VaStub:
+        """The service object for the VA server.
+
+        Returns:
+            The service object for the VA server.
+        """
+        if not self.__service:
+            msg = "Not connected to a server"
+            raise ValueError(msg)
+
+        return self.__service
+
+    @property
+    def _loop(self) -> asyncio.AbstractEventLoop:
+        """The event loop.
+
+        Returns:
+            The event loop.
+        """
+        if not self.__loop:
+            msg = "Not connected to a server"
+            raise ValueError(msg)
+
+        return self.__loop
+
+    def attach_event_handler(self, callback: Callable[[vanet.Event], None]) -> None:
+        """Attaches a callback function to the event handler (for advaned users).
+
+        Generally, the VAServer sends event messages to connected interfaces whenever something in the VACore changes,
+        e.g. a sound source is created or changes its position. An interface can connect a callback function to react
+        to those events.
+
+        See also [`detach_event_handler()`][vapython.va.VA.detach_event_handler].
+
+        Examples:
+        ```python
+        from vapython import VA
+        def callback(event_data)
+            print(event_data)
+        
+        # This will print the output of every event sent by the VAServer
+        # Careful: This might lead to spamming your terminal output.
+        va.attach_event_handler(callback)
+        ```
+
+        Args:
+            callback: The callback function to attach.
+        """
+        self._event_callbacks.append(callback)
+
+    def detach_event_handler(self, callback: Callable[[vanet.Event], None]) -> None:
+        """Detaches a previously attached callback function from the event handler (for advaned users).
+
+        See also [`attach_event_handler()`][vapython.va.VA.attach_event_handler].
+
+        Args:
+            callback: The callback function to detach.
+        """
+        self._event_callbacks.remove(callback)
\ No newline at end of file
diff --git a/src/common.hpp b/src/common.hpp
deleted file mode 100644
index c0f010344c406102d2434070b4601adf52ca0f56..0000000000000000000000000000000000000000
--- a/src/common.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef IW_VA_PYTHON_COMMON
-#define IW_VA_PYTHON_COMMON
-
-// Temporarily "disable" debug to include Python.
-// This is done, so that the Python debug libraries are not needed, even when building
-// this module in debug mode.
-#ifdef _DEBUG
-#	define TMP_DEBUG
-#	undef _DEBUG
-// Fix msvc compiler issue error C2039: '_invalid_parameter': is not a member of '`global namespace''
-// See https://github.com/microsoft/STL/issues/2335#issuecomment-967293809 for more detail
-#	define _STL_CRT_SECURE_INVALID_PARAMETER( expr ) _CRT_SECURE_INVALID_PARAMETER( expr )
-#endif
-#include <Python.h>
-#ifdef TMP_DEBUG
-#	define _DEBUG
-#	undef TMP_DEBUG
-#endif
-
-#endif
\ No newline at end of file
diff --git a/src/vapython/__init__.py b/src/vapython/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..275f4341df7109d1dee801f993127e24318034f0
--- /dev/null
+++ b/src/vapython/__init__.py
@@ -0,0 +1,12 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import nest_asyncio  # type: ignore
+
+from vapython.va import VA
+
+nest_asyncio.apply()
+
+
+__all__ = ["VA"]
diff --git a/src/vapython/_helper.py b/src/vapython/_helper.py
new file mode 100644
index 0000000000000000000000000000000000000000..83aeaf153dc668270c568e2c065eb3191e7006f3
--- /dev/null
+++ b/src/vapython/_helper.py
@@ -0,0 +1,246 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import configparser
+import re
+import shutil
+import subprocess
+import warnings
+from importlib import metadata
+from pathlib import Path
+from typing import Optional
+
+from platformdirs import user_config_dir
+
+possible_auralization_modes = {
+    "Nothing": (0, "Nothing"),
+    "DS": (1 << 0, "Direct Sound"),
+    "ER": (1 << 1, "Early Reflections"),
+    "DD": (1 << 2, "Diffuse Decay"),
+    "SD": (1 << 3, "Source Directivity"),
+    "MA": (1 << 4, "Medium Absorption"),
+    "TV": (1 << 5, "Atmospheric Temporal Variations"),
+    "SC": (1 << 6, "Scattering"),
+    "DF": (1 << 7, "Diffraction"),
+    "NF": (1 << 8, "Near-field Effects"),
+    "DP": (1 << 9, "Doppler Effect"),
+    "SL": (1 << 10, "Spherical Spreading Loss"),
+    "TR": (1 << 11, "Transmission Loss"),
+    "AB": (1 << 12, "Absorption"),
+}
+"""
+Dictionary containing all possible auralization modes.
+
+Auralization modes allow to enable or disable specific effects in the auralization process during runtime.
+A more in-depth explanation of each auralization mode can be found [here](https://www.virtualacoustics.de/VA/documentation/control/#global-auralization-mode).
+"""
+
+default_auralization_mode = sum(
+    [
+        possible_auralization_modes["DS"][0],
+        possible_auralization_modes["SD"][0],
+        possible_auralization_modes["MA"][0],
+        possible_auralization_modes["DP"][0],
+        possible_auralization_modes["SL"][0],
+        possible_auralization_modes["TR"][0],
+        possible_auralization_modes["AB"][0],
+    ]
+)
+"""
+Set of default auralization modes.
+
+For more information see [`possible_auralization_modes`][vapython._helper.possible_auralization_modes] and the
+[VA documentation](https://www.virtualacoustics.de/VA/documentation/control/#global-auralization-mode).
+"""
+
+
+def parse_aura_mode_str(input_string: str, aura_mode: int = default_auralization_mode) -> int:
+    """Parses the input string and returns the corresponding auralization mode as an integer.
+
+    The input string can contain a list of auralization modes separated by commas. Each auralization mode can be
+    prefixed with a "+" or "-" sign to enable or disable the mode, respectively.
+    The aura mode can also be set to "*" to enable all aura modes.
+
+    Examples:
+        >>> parse_aura_mode_str("DS,ER,DD")
+        7
+        >>> parse_aura_mode_str("+DS,-ER,DD")
+        5
+        >>> parse_aura_mode_str("*")
+        8191
+        >>> parse_aura_mode_str("default")
+        7705
+        >>> parse_aura_mode_str("")
+        0
+
+    Args:
+        input_string: The input string representing the auralization mode.
+        aura_mode (optional): The default auralization mode. Defaults to `default_aura_mode`.
+
+    Returns:
+        The auralization mode as an integer.
+
+    Raises:
+        ValueError: If an invalid auralization mode is encountered in the input string.
+    """
+    if input_string == "":
+        return 0  # No aura mode
+
+    if input_string == "*":
+        return sum(m[0] for m in possible_auralization_modes.values())  # All aura modes
+
+    if input_string.lower() == "default":
+        return default_auralization_mode
+
+    split_input = re.findall(r"(?:(\+|-)([A-Z]{2})(?:,|$))+?", input_string, re.IGNORECASE)
+
+    enable = []
+    disable = []
+
+    for sign, mode in split_input:
+        # check if mode is valid
+        if mode not in possible_auralization_modes:
+            msg = f"Invalid aura mode: {mode}"
+            raise ValueError(msg)
+
+        if sign == "+":
+            enable.append(possible_auralization_modes[mode][0])
+        else:
+            disable.append(possible_auralization_modes[mode][0])
+
+    aura_mode = aura_mode | sum(enable)
+    aura_mode = aura_mode & ~sum(disable)
+
+    return aura_mode  # noqa: RET504
+
+
+def convert_aura_mode_to_str(aura_mode: int, *, short_form: bool = True) -> str:
+    """Converts an auralization mode value to a string representation.
+
+    Auralization modes are represented as a bitfield. This function converts the bitfield to a string representation.
+    Some special values are also supported:
+    - 0: "Nothing"
+    - All modes: "All"
+    - Default modes, see [`default_aura_mode`][vapython._helpers.default_aura_mode]: "Default"
+
+    See also [`possible_auralization_modes`][vapython._helpers.possible_auralization_modes] for all possible aura modes.
+
+    Examples:
+        >>> convert_aura_mode_to_str(0)
+        "Nothing"
+        >>> convert_aura_mode_to_str(0b1111111111111)
+        "All"
+        >>> convert_aura_mode_to_str(0b1111111111111, short_form=False)
+        "Direct Sound,Early Reflections,Diffuse Decay,Source Directivity,Medium Absorption,...
+        Atmospheric Temporal Variations,Scattering,Diffraction,Near-field Effects,Doppler Effect,...
+        Spherical Spreading Loss,Transmission Loss,Absorption"
+        >>> convert_aura_mode_to_str(7)
+        "DS,ER,DD"
+
+    Args:
+        aura_mode: The auralization mode value to convert.
+        short_form (optional): Whether to use the short form or the full name for each aura mode. Defaults to True.
+
+    Returns:
+        The string representation of the auralization mode value.
+
+    """
+    if aura_mode == 0 and not short_form:
+        return "Nothing"
+
+    if aura_mode == sum(m[0] for m in possible_auralization_modes.values()) and not short_form:
+        return "All"
+
+    if aura_mode == default_auralization_mode and not short_form:
+        return "Default"
+
+    aura_modes = []
+
+    for mode, (value, name) in possible_auralization_modes.items():
+        if aura_mode & value:
+            if short_form:
+                aura_modes.append(mode)
+            else:
+                aura_modes.append(name)
+
+    return ",".join(aura_modes)
+
+
+def find_server_executable() -> Optional[Path]:
+    """Finds the a VAServer executable.
+
+    This function first checks if an executable path is stored in the user's configuration directory.
+    If not, the function checks if the VAServer executable is available in the system's PATH.
+    If not found, it opens a file dialog to allow the user to manually select the VAServer executable.
+    A valid executable path is stored in the user's configuration directory for future use.
+
+    This function also checks if the version of the VAServer executable matches the version of the VAPython package
+    and issues a warning if a mismatch is detected. This however does not mean, that the VAServer executable is not
+    compatible with the VAPython package.
+
+    Returns:
+        Union[Path, None]: The path to the VAServer executable if found, None otherwise.
+    """
+
+    config_dir = Path(user_config_dir("vapython", version=metadata.version("vapython")))
+    config_file = config_dir / "vapython.cfg"
+
+    executable: Optional[Path] = None
+    if config_file.exists():
+        config = configparser.ConfigParser()
+        config.read(config_file)
+        executable = Path(config["executable"]["path"])
+
+    if executable is None or not executable.is_file() or not executable.exists():
+        executable_raw = shutil.which("VAServer")
+
+        if executable_raw is not None:
+            executable = Path(executable_raw)
+
+        if executable is None:
+            import tkinter as tk
+            from tkinter import filedialog
+
+            root = tk.Tk()
+            root.withdraw()
+            root.attributes("-topmost", 1)
+
+            print(  # noqa: T201
+                "NOTE: A window has opened for specifying the path to the 'VAServer.exe'. This should be located in the `bin` subfolder of the respective VA directory."
+            )
+            executable_raw = filedialog.askopenfilename(
+                title="Select VAServer executable",
+                filetypes=[("VAServer executable", "*.exe")],
+                initialfile="VAServer.exe",
+            )
+
+            if not executable_raw:
+                return None
+
+            executable = Path(executable_raw)
+
+        result = subprocess.run([str(executable), "--version"], capture_output=True, check=False)
+        exe_version_raw = result.stdout.decode().strip()
+        exe_version = re.split(r"(\d{4}).(\D)", exe_version_raw)
+        major_version = exe_version[1]
+        minor_version = exe_version[2]
+        minor_version = ord(minor_version) - 97
+        exe_version_final = f"{major_version}.{minor_version}"
+
+        if metadata.version("vapython") != exe_version_final:
+            warnings.warn(
+                (
+                    "Version mismatch between VAPython package and VAServer executable. "
+                    f"Python package version: {metadata.version('vapython')}, executable version: {exe_version_final}"
+                ),
+                stacklevel=2,
+            )
+
+        config = configparser.ConfigParser()
+        config["executable"] = {"path": str(executable)}
+        config_dir.mkdir(parents=True, exist_ok=True)
+        with open(config_file, "w") as config_file_handle:
+            config.write(config_file_handle)
+
+    return Path(executable)
diff --git a/src/vapython/_types.py b/src/vapython/_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ff3c47039cf55d8784125d355dc22eb1dc57f90
--- /dev/null
+++ b/src/vapython/_types.py
@@ -0,0 +1,54 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+"""Type definitions for VAPython.
+
+This mainly includes the type definitions for the VA data structures in pure Python.
+These include the `VAStruct`, `VAVector`, and `VAQuaternion` types which map to a dict and two named tuples, respectively.
+"""
+
+from typing import NamedTuple, Union
+
+VAStructValue = Union[bool, int, float, str, bytes, list[float], "VAStruct"]
+
+VAStruct = dict[str, VAStructValue]
+"""Represents a structure in VAPython.
+
+Used to communicate complex data and messages to and from the VAServer.
+"""
+
+
+class VAVector(NamedTuple):
+    """Represents a vector in three-dimensional space.
+
+    Note:
+        In VAPython, vectors follow the OpenGL coordinate system, i.e.,
+        the x-axis points to the right, the y-axis points upwards, and the z-axis points towards the viewer.
+
+    Attributes:
+        x (float): The x-coordinate of the vector.
+        y (float): The y-coordinate of the vector.
+        z (float): The z-coordinate of the vector.
+    """
+
+    x: float
+    y: float
+    z: float
+
+
+class VAQuaternion(NamedTuple):
+    """
+    Represents a quaternion in a 3D space.
+
+    Attributes:
+        x (float): The x-coordinate of the quaternion.
+        y (float): The y-coordinate of the quaternion.
+        z (float): The z-coordinate of the quaternion.
+        w (float): The w-coordinate of the quaternion.
+    """
+
+    x: float
+    y: float
+    z: float
+    w: float
diff --git a/src/vapython/examples/__init__.py b/src/vapython/examples/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/vapython/examples/__main__.py b/src/vapython/examples/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..887b6c53cbccac43f33613f89b0c8a536bd3fe54
--- /dev/null
+++ b/src/vapython/examples/__main__.py
@@ -0,0 +1,44 @@
+import os
+import sys
+from pathlib import Path
+
+
+def main():
+    """
+    Main function to list and open example scripts.
+    This function performs the following steps:
+    1. Lists all Python files in the current directory that contain "example" in their name.
+    2. If no command-line arguments are provided, it prints the list of example files and prompts the user to select one by index.
+    3. If a command-line argument is provided, it uses that as the index of the example file to open.
+    4. Validates the selected index and either exits, prints an error message, or opens the file.
+    Returns:
+        None
+    """
+
+    current_path = Path(__file__).parent
+
+    example_files = list(current_path.glob("*example*.py"))
+
+    if len(sys.argv) == 1:
+        for i, example_file in enumerate(example_files):
+            print(f"{i}: {example_file.stem}")
+
+        print(f"{len(example_files)}: Exit")
+
+        example_index = int(input("Enter the index of the example you want to open: "))
+    else:
+        example_index = int(sys.argv[1])
+
+    if example_index == len(example_files):
+        return
+
+    if example_index < 0 or example_index >= len(example_files):
+        print("Invalid index")
+        return
+
+    example_file = example_files[example_index]
+    os.startfile(example_file)  # noqa: S606
+
+
+if __name__ == "__main__":
+    main()
diff --git a/src/vapython/examples/va_example_offline_simulation.py b/src/vapython/examples/va_example_offline_simulation.py
new file mode 100644
index 0000000000000000000000000000000000000000..916de882afd96e4cf4e270f7b7d3e6813506a361
--- /dev/null
+++ b/src/vapython/examples/va_example_offline_simulation.py
@@ -0,0 +1,67 @@
+"""VA offline simulation/auralization example
+
+Requires VA to run with a virtual audio device that can be triggered by
+the user. Also the rendering module(s) have to be set to record the output
+to hard drive. To shutdown the server from Matlab, remote control modus
+must be activated. Finally, the utilized buffer size and sampling rate
+specified in the VACore.ini must match the parameters used here.
+
+You can run the run_VAServer_recording.bat batch script to start a
+VAServer using the required settings. This is located in VA's main folder.
+"""
+
+import numpy as np
+
+from vapython import VA
+
+buffer_size = 64
+sampling_rate = 44100
+
+# Connect and set up simple scene
+va = VA()
+
+va.connect("localhost")
+
+L = va.create_sound_receiver("VA_Listener")
+va.set_sound_receiver_position(L, [0, 1.7, 0])
+H = va.create_directivity_from_file("$(DefaultHRIR)")
+va.set_sound_receiver_directivity(L, H)
+
+S = va.create_sound_source("VA_Source")
+X = va.create_signal_source_buffer_from_file("$(DemoSound)")
+va.set_signal_source_buffer_playback_action(X, "play")
+va.set_signal_source_buffer_looping(X, looping=True)
+va.set_sound_source_signal_source(S, X)
+
+# Example for a synchronized scene update & audio processing simulation/auralization
+
+time_step = buffer_size / sampling_rate  # here: depends on block size and sample rate
+manual_clock = 0.0
+va.set_core_clock(0)
+
+spatial_step = 0.005
+print(f"Resulting sound source speed: {spatial_step / time_step} m/s")
+
+num_steps = 6000
+print(f"Simulation result duration: {num_steps * time_step} s")
+
+x = np.linspace(-100, 100, num=num_steps)  # motion from x = -100m to x = 100m
+
+print("Hold on, running auralization")
+for n in range(len(x)):
+    # Modify scene as you please
+    va.set_sound_source_position(S, [x[n], 1.7, -3])
+
+    # Increment core clock
+    manual_clock += time_step
+    va.call_module("manualclock", {"time": manual_clock})
+
+    # Process audio chain by incrementing one block
+    va.call_module("virtualaudiodevice", {"trigger": True})
+
+    print(f"\rProgress: {(n + 1) / num_steps:.2%}", end="")
+
+print("\nSimulation completed.")
+
+va.shutdown_server()
+va.disconnect()
diff --git a/src/vapython/examples/va_example_simple.py b/src/vapython/examples/va_example_simple.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f6dd4952575d8f5e3d4303888f64548d45a831e
--- /dev/null
+++ b/src/vapython/examples/va_example_simple.py
@@ -0,0 +1,49 @@
+"""VA simple example
+
+Note that the VAServer must be started manually before running this
+example. Check the quick start guide for more information:
+https://www.virtualacoustics.org/VA/documentation/#quick-start-guide
+"""
+
+from pathlib import Path
+
+from vapython import VA
+
+# Create VA
+va = VA()
+
+# Connect to VA application (start the application first)
+va.connect("localhost")
+
+# Reset VA to clear the scene
+va.reset()
+
+# Control output gain
+va.set_output_gain(0.25)
+
+# Add the current absolute folder path to VA application
+current_folder = Path(__file__).parent.absolute()
+va.add_search_path(current_folder)
+
+# Create a signal source and start playback
+X = va.create_signal_source_buffer_from_file("$(DemoSound)")
+va.set_signal_source_buffer_playback_action(X, "play")
+va.set_signal_source_buffer_looping(X, looping=True)
+
+# Create a virtual sound source and set a position
+# (front-right of receiver)
+S = va.create_sound_source("VA_Source")
+va.set_sound_source_position(S, [2, 1.7, -2])
+
+# Create a listener with a HRTF and position him
+L = va.create_sound_receiver("VA_Listener")
+va.set_sound_receiver_position(L, [0, 1.7, 0])
+
+H = va.create_directivity_from_file("$(DefaultHRIR)")
+va.set_sound_receiver_directivity(L, H)
+
+# Connect the signal source to the virtual sound source
+va.set_sound_source_signal_source(S, X)
+
+# More information
+print("Type `help(VA)` for more information or visit 'www.virtualacoustics.org/VA/documentation'.")
diff --git a/src/vapython/py.typed b/src/vapython/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/vapython/tracking/NatNetClient.py b/src/vapython/tracking/NatNetClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4b0a8e7e908745edbd80ec478f04f73f8eed775
--- /dev/null
+++ b/src/vapython/tracking/NatNetClient.py
@@ -0,0 +1,509 @@
+# SPDX-FileCopyrightText: NaturalPoint, Inc
+#
+# License:
+# This software is provided by the copyright holders and contributors "as is" and
+# any express or implied warranties, including, but not limited to, the implied
+# warranties of merchantability and fitness for a particular purpose are disclaimed.
+# In no event shall NaturalPoint, Inc. or contributors be liable for any direct,
+# indirect, incidental, special, exemplary, or consequential damages
+# (including, but not limited to, procurement of substitute goods or services;
+# loss of use, data, or profits; or business interruption) however caused
+# and on any theory of liability, whether in contract, strict liability,
+# or tort (including negligence or otherwise) arising in any way out of
+# the use of this software, even if advised of the possibility of such damage.
+
+"""
+This module provides a client for the NatNet protocol used by OptiTrack motion capture systems.
+It was taken from the NatNet SDK version 2.10.0 and modified to work within the VAPython environment.
+"""
+
+import select
+import socket
+import struct
+from threading import Thread
+
+
+def trace(*args):
+    pass  # print( "".join(map(str,args)) )
+
+
+# Create structs for reading various object types to speed up parsing.
+Vector3 = struct.Struct("<fff")
+Quaternion = struct.Struct("<ffff")
+FloatValue = struct.Struct("<f")
+DoubleValue = struct.Struct("<d")
+
+
+class NatNetClient:
+    def __init__(self):
+        # Change this value to the IP address of the NatNet server.
+        self.serverIPAddress = "169.254.201.120"
+
+        # This should match the multicast address listed in Motive's streaming settings.
+        self.multicastAddress = "239.255.42.99"
+
+        # NatNet Command channel
+        self.commandPort = 1510
+
+        # NatNet Data channel
+        self.dataPort = 1511
+
+        # Set this to a callback method of your choice to receive per-rigid-body data at each frame.
+        self.rigidBodyListener = None
+
+        # NatNet stream version. This will be updated to the actual version the server is using during initialization.
+        self.__natNetStreamVersion = (3, 0, 0, 0)
+
+        self.__stop = False
+        self.__dataThread: Thread | None = None
+        self.__commandThread: Thread | None = None
+
+    # Client/server message ids
+    NAT_PING = 0
+    NAT_PINGRESPONSE = 1
+    NAT_REQUEST = 2
+    NAT_RESPONSE = 3
+    NAT_REQUEST_MODELDEF = 4
+    NAT_MODELDEF = 5
+    NAT_REQUEST_FRAMEOFDATA = 6
+    NAT_FRAMEOFDATA = 7
+    NAT_MESSAGESTRING = 8
+    NAT_DISCONNECT = 9
+    NAT_UNRECOGNIZED_REQUEST = 100
+
+    # Create a data socket to attach to the NatNet stream
+    def __createDataSocket(self, port):
+        result = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)  # Internet  # UDP
+        result.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        result.bind(("", port))
+
+        mreq = struct.pack("4sl", socket.inet_aton(self.multicastAddress), socket.INADDR_ANY)
+        result.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
+
+        result.setblocking(False)
+        return result
+
+    # Create a command socket to attach to the NatNet stream
+    def __createCommandSocket(self):
+        result = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+        result.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        result.bind(("", 0))
+        result.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+        result.setblocking(False)
+
+        return result
+
+    # Unpack a rigid body object from a data packet
+    def __unpackRigidBody(self, data):
+        offset = 0
+
+        # ID (4 bytes)
+        id = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+        trace("ID:", id)
+
+        # Position and orientation
+        pos = Vector3.unpack(data[offset : offset + 12])
+        offset += 12
+        trace("\tPosition:", pos[0], ",", pos[1], ",", pos[2])
+        rot = Quaternion.unpack(data[offset : offset + 16])
+        offset += 16
+        trace("\tOrientation:", rot[0], ",", rot[1], ",", rot[2], ",", rot[3])
+
+        # Marker count (4 bytes)
+        markerCount = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+        markerCountRange = range(0, markerCount)
+        trace("\tMarker Count:", markerCount)
+
+        # Send information to any listener.
+        if self.rigidBodyListener is not None:
+            self.rigidBodyListener(id, pos, rot)
+
+        # Marker positions
+        for i in markerCountRange:
+            pos = Vector3.unpack(data[offset : offset + 12])
+            offset += 12
+            trace("\tMarker", i, ":", pos[0], ",", pos[1], ",", pos[2])
+
+        if self.__natNetStreamVersion[0] >= 2:
+            # Marker ID's
+            for i in markerCountRange:
+                id = int.from_bytes(data[offset : offset + 4], byteorder="little")
+                offset += 4
+                trace("\tMarker ID", i, ":", id)
+
+            # Marker sizes
+            for i in markerCountRange:
+                size = FloatValue.unpack(data[offset : offset + 4])
+                offset += 4
+                trace("\tMarker Size", i, ":", size[0])
+
+            (markerError,) = FloatValue.unpack(data[offset : offset + 4])
+            offset += 4
+            trace("\tMarker Error:", markerError)
+
+        # Version 2.6 and later
+        if (
+            ((self.__natNetStreamVersion[0] == 2) and (self.__natNetStreamVersion[1] >= 6))
+            or self.__natNetStreamVersion[0] > 2
+            or self.__natNetStreamVersion[0] == 0
+        ):
+            (param,) = struct.unpack("h", data[offset : offset + 2])
+            trackingValid = (param & 0x01) != 0
+            offset += 2
+            trace("\tTracking Valid:", "True" if trackingValid else "False")
+
+        return offset
+
+    # Unpack a skeleton object from a data packet
+    def __unpackSkeleton(self, data):
+        offset = 0
+
+        id = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+        trace("ID:", id)
+
+        rigidBodyCount = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+        trace("Rigid Body Count:", rigidBodyCount)
+        for j in range(0, rigidBodyCount):
+            offset += self.__unpackRigidBody(data[offset:])
+
+        return offset
+
+    # Unpack data from a motion capture frame message
+    def __unpackMocapData(self, data):
+        trace("Begin MoCap Frame\n-----------------\n")
+
+        data = memoryview(data)
+        offset = 0
+
+        # Frame number (4 bytes)
+        frameNumber = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+        trace("Frame #:", frameNumber)
+
+        # Marker set count (4 bytes)
+        markerSetCount = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+        trace("Marker Set Count:", markerSetCount)
+
+        for i in range(0, markerSetCount):
+            # Model name
+            modelName, separator, remainder = bytes(data[offset:]).partition(b"\0")
+            offset += len(modelName) + 1
+            trace("Model Name:", modelName.decode("utf-8"))
+
+            # Marker count (4 bytes)
+            markerCount = int.from_bytes(data[offset : offset + 4], byteorder="little")
+            offset += 4
+            trace("Marker Count:", markerCount)
+
+            for j in range(0, markerCount):
+                pos = Vector3.unpack(data[offset : offset + 12])
+                offset += 12
+                # trace( "\tMarker", j, ":", pos[0],",", pos[1],",", pos[2] )
+
+        # Unlabeled markers count (4 bytes)
+        unlabeledMarkersCount = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+        trace("Unlabeled Markers Count:", unlabeledMarkersCount)
+
+        for i in range(0, unlabeledMarkersCount):
+            pos = Vector3.unpack(data[offset : offset + 12])
+            offset += 12
+            trace("\tMarker", i, ":", pos[0], ",", pos[1], ",", pos[2])
+
+        # Rigid body count (4 bytes)
+        rigidBodyCount = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+        trace("Rigid Body Count:", rigidBodyCount)
+
+        for i in range(0, rigidBodyCount):
+            offset += self.__unpackRigidBody(data[offset:])
+
+        # Version 2.1 and later
+        skeletonCount = 0
+        if (self.__natNetStreamVersion[0] == 2 and self.__natNetStreamVersion[1] > 0) or self.__natNetStreamVersion[
+            0
+        ] > 2:
+            skeletonCount = int.from_bytes(data[offset : offset + 4], byteorder="little")
+            offset += 4
+            trace("Skeleton Count:", skeletonCount)
+            for i in range(0, skeletonCount):
+                offset += self.__unpackSkeleton(data[offset:])
+
+        # Labeled markers (Version 2.3 and later)
+        labeledMarkerCount = 0
+        if (self.__natNetStreamVersion[0] == 2 and self.__natNetStreamVersion[1] > 3) or self.__natNetStreamVersion[
+            0
+        ] > 2:
+            labeledMarkerCount = int.from_bytes(data[offset : offset + 4], byteorder="little")
+            offset += 4
+            trace("Labeled Marker Count:", labeledMarkerCount)
+            for i in range(0, labeledMarkerCount):
+                id = int.from_bytes(data[offset : offset + 4], byteorder="little")
+                offset += 4
+                pos = Vector3.unpack(data[offset : offset + 12])
+                offset += 12
+                size = FloatValue.unpack(data[offset : offset + 4])
+                offset += 4
+
+                # Version 2.6 and later
+                if (
+                    (self.__natNetStreamVersion[0] == 2 and self.__natNetStreamVersion[1] >= 6)
+                    or self.__natNetStreamVersion[0] > 2
+                    or major == 0
+                ):
+                    (param,) = struct.unpack("h", data[offset : offset + 2])
+                    offset += 2
+                    occluded = (param & 0x01) != 0
+                    pointCloudSolved = (param & 0x02) != 0
+                    modelSolved = (param & 0x04) != 0
+
+        # Force Plate data (version 2.9 and later)
+        if (self.__natNetStreamVersion[0] == 2 and self.__natNetStreamVersion[1] >= 9) or self.__natNetStreamVersion[
+            0
+        ] > 2:
+            forcePlateCount = int.from_bytes(data[offset : offset + 4], byteorder="little")
+            offset += 4
+            trace("Force Plate Count:", forcePlateCount)
+            for i in range(0, forcePlateCount):
+                # ID
+                forcePlateID = int.from_bytes(data[offset : offset + 4], byteorder="little")
+                offset += 4
+                trace("Force Plate", i, ":", forcePlateID)
+
+                # Channel Count
+                forcePlateChannelCount = int.from_bytes(data[offset : offset + 4], byteorder="little")
+                offset += 4
+
+                # Channel Data
+                for j in range(0, forcePlateChannelCount):
+                    trace("\tChannel", j, ":", forcePlateID)
+                    forcePlateChannelFrameCount = int.from_bytes(data[offset : offset + 4], byteorder="little")
+                    offset += 4
+                    for k in range(0, forcePlateChannelFrameCount):
+                        forcePlateChannelVal = int.from_bytes(data[offset : offset + 4], byteorder="little")
+                        offset += 4
+                        trace("\t\t", forcePlateChannelVal)
+
+        # Latency
+        (latency,) = FloatValue.unpack(data[offset : offset + 4])
+        offset += 4
+
+        # Timecode
+        timecode = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+        timecodeSub = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+
+        # Timestamp (increased to double precision in 2.7 and later)
+        if (self.__natNetStreamVersion[0] == 2 and self.__natNetStreamVersion[1] >= 7) or self.__natNetStreamVersion[
+            0
+        ] > 2:
+            (timestamp,) = DoubleValue.unpack(data[offset : offset + 8])
+            offset += 8
+        else:
+            (timestamp,) = FloatValue.unpack(data[offset : offset + 4])
+            offset += 4
+
+        # Frame parameters
+        (param,) = struct.unpack("h", data[offset : offset + 2])
+        isRecording = (param & 0x01) != 0
+        trackedModelsChanged = (param & 0x02) != 0
+        offset += 2
+
+        # Send information to any listener.
+        if self.newFrameListener is not None:
+            self.newFrameListener(
+                frameNumber,
+                markerSetCount,
+                unlabeledMarkersCount,
+                rigidBodyCount,
+                skeletonCount,
+                labeledMarkerCount,
+                latency,
+                timecode,
+                timecodeSub,
+                timestamp,
+                isRecording,
+                trackedModelsChanged,
+            )
+
+    # Unpack a marker set description packet
+    def __unpackMarkerSetDescription(self, data):
+        offset = 0
+
+        name, separator, remainder = bytes(data[offset:]).partition(b"\0")
+        offset += len(name) + 1
+        trace("Markerset Name:", name.decode("utf-8"))
+
+        markerCount = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+
+        for i in range(0, markerCount):
+            name, separator, remainder = bytes(data[offset:]).partition(b"\0")
+            offset += len(name) + 1
+            trace("\tMarker Name:", name.decode("utf-8"))
+
+        return offset
+
+    # Unpack a rigid body description packet
+    def __unpackRigidBodyDescription(self, data):
+        offset = 0
+
+        # Version 2.0 or higher
+        if self.__natNetStreamVersion[0] >= 2:
+            name, separator, remainder = bytes(data[offset:]).partition(b"\0")
+            offset += len(name) + 1
+            trace("\tMarker Name:", name.decode("utf-8"))
+
+        id = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+
+        parentID = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+
+        timestamp = Vector3.unpack(data[offset : offset + 12])
+        offset += 12
+
+        return offset
+
+    # Unpack a skeleton description packet
+    def __unpackSkeletonDescription(self, data):
+        offset = 0
+
+        name, separator, remainder = bytes(data[offset:]).partition(b"\0")
+        offset += len(name) + 1
+        trace("\tMarker Name:", name.decode("utf-8"))
+
+        id = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+
+        rigidBodyCount = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+
+        for i in range(0, rigidBodyCount):
+            offset += self.__unpackRigidBodyDescription(data[offset:])
+
+        return offset
+
+    # Unpack a data description packet
+    def __unpackDataDescriptions(self, data):
+        offset = 0
+        datasetCount = int.from_bytes(data[offset : offset + 4], byteorder="little")
+        offset += 4
+
+        for i in range(0, datasetCount):
+            type = int.from_bytes(data[offset : offset + 4], byteorder="little")
+            offset += 4
+            if type == 0:
+                offset += self.__unpackMarkerSetDescription(data[offset:])
+            elif type == 1:
+                offset += self.__unpackRigidBodyDescription(data[offset:])
+            elif type == 2:
+                offset += self.__unpackSkeletonDescription(data[offset:])
+
+    def __dataThreadFunction(self, socket, stop):
+        while not stop():
+            # Block for input
+            ready = select.select([socket], [], [], 1.0)
+            if ready[0]:
+                data, addr = socket.recvfrom(32768)  # 32k byte buffer size
+                if len(data) > 0:
+                    self.__processMessage(data)
+
+    def __processMessage(self, data):
+        trace("Begin Packet\n------------\n")
+
+        messageID = int.from_bytes(data[0:2], byteorder="little")
+        trace("Message ID:", messageID)
+
+        packetSize = int.from_bytes(data[2:4], byteorder="little")
+        trace("Packet Size:", packetSize)
+
+        offset = 4
+        if messageID == self.NAT_FRAMEOFDATA:
+            self.__unpackMocapData(data[offset:])
+        elif messageID == self.NAT_MODELDEF:
+            self.__unpackDataDescriptions(data[offset:])
+        elif messageID == self.NAT_PINGRESPONSE:
+            offset += 256  # Skip the sending app's Name field
+            offset += 4  # Skip the sending app's Version info
+            self.__natNetStreamVersion = struct.unpack("BBBB", data[offset : offset + 4])
+            offset += 4
+        elif messageID == self.NAT_RESPONSE:
+            if packetSize == 4:
+                commandResponse = int.from_bytes(data[offset : offset + 4], byteorder="little")
+                offset += 4
+            else:
+                message, separator, remainder = bytes(data[offset:]).partition(b"\0")
+                offset += len(message) + 1
+                trace("Command response:", message.decode("utf-8"))
+        elif messageID == self.NAT_UNRECOGNIZED_REQUEST:
+            trace("Received 'Unrecognized request' from server")
+        elif messageID == self.NAT_MESSAGESTRING:
+            message, separator, remainder = bytes(data[offset:]).partition(b"\0")
+            offset += len(message) + 1
+            trace("Received message from server:", message.decode("utf-8"))
+        else:
+            trace("ERROR: Unrecognized packet type")
+
+        trace("End Packet\n----------\n")
+
+    def sendCommand(self, command, commandStr, socket, address):
+        # Compose the message in our known message format
+        if command == self.NAT_REQUEST_MODELDEF or command == self.NAT_REQUEST_FRAMEOFDATA:
+            packetSize = 0
+            commandStr = ""
+        elif command == self.NAT_REQUEST:
+            packetSize = len(commandStr) + 1
+        elif command == self.NAT_PING:
+            commandStr = "Ping"
+            packetSize = len(commandStr) + 1
+
+        data = command.to_bytes(2, byteorder="little")
+        data += packetSize.to_bytes(2, byteorder="little")
+
+        data += commandStr.encode("utf-8")
+        data += b"\0"
+
+        socket.sendto(data, address)
+
+    def run(self):
+        # Create the data socket
+        self.dataSocket = self.__createDataSocket(self.dataPort)
+        if self.dataSocket is None:
+            print("Could not open data channel")
+            exit
+
+        # Create the command socket
+        self.commandSocket = self.__createCommandSocket()
+        if self.commandSocket is None:
+            print("Could not open command channel")
+            exit
+
+        # Create a separate thread for receiving data packets
+        self.__dataThread = Thread(target=self.__dataThreadFunction, args=(self.dataSocket, lambda: self.__stop))
+        self.__dataThread.daemon = True
+        self.__dataThread.start()
+
+        # Create a separate thread for receiving command packets
+        self.__commandThread = Thread(target=self.__dataThreadFunction, args=(self.commandSocket, lambda: self.__stop))
+        self.__commandThread.daemon = True
+        self.__commandThread.start()
+
+        self.sendCommand(self.NAT_REQUEST_MODELDEF, "", self.commandSocket, (self.serverIPAddress, self.commandPort))
+
+    def stop(self):
+        self.__stop = True
+        if self.__dataThread is not None:
+            self.__dataThread.join()
+
+        if self.__commandThread is not None:
+            self.__commandThread.join()
+
+        self.dataSocket.close()
+        self.commandSocket.close()
diff --git a/src/vapython/tracking/__init__.py b/src/vapython/tracking/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..83b5963c9c4c3f8619ed6ffa69e2289557bde8ea
--- /dev/null
+++ b/src/vapython/tracking/__init__.py
@@ -0,0 +1,113 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import annotations
+
+import warnings
+from enum import Enum
+from typing import TYPE_CHECKING
+
+from vapython.tracking.data_types import *  # noqa F403
+
+ihta_tracking_available = True
+try:
+    import IHTATrackingPython as IHTATracking  # type: ignore
+except ImportError:
+    ihta_tracking_available = False
+    warnings.warn(
+        "IHTATrackingPython not found. Falling back to NatNet only.",
+        ImportWarning,
+        stacklevel=2,
+    )
+
+if not ihta_tracking_available:
+    import vapython.tracking.NatNetClient as NatNetTracking
+
+
+if TYPE_CHECKING:
+    from vapython import VA
+
+
+class TrackingType(Enum):
+    """Enumeration for the tracking types."""
+
+    NatNet = 1
+    ART = 2
+
+
+class Tracker:
+    """Wrapper to interface with different tracking systems.
+
+    Without the IHTATrackingPython library, only NatNet tracking is available.
+    """
+
+    def __init__(self, va_instance: VA, server_ip: str, tracker: TrackingType):
+        """Initialize the tracker.
+
+        Args:
+            va_instance: The VA instance.
+            server_ip: The IP address of the tracking server.
+            tracker: The tracking type.
+        """
+        if tracker == TrackingType.ART and not ihta_tracking_available:
+            msg = "IHTATrackingPython not found. Cannot use ART tracking."
+            raise RuntimeError(msg)
+
+        if ihta_tracking_available:
+            ihta_tracking_type = (
+                IHTATracking.Tracker.Type.NATNET if tracker == TrackingType.NatNet else IHTATracking.Tracker.Type.ART
+            )
+            self._tracker = IHTATracking.Tracker(ihta_tracking_type, server_ip)
+
+            self._tracker.registerCallback(self.ihta_callback)
+        else:
+            self._tracker = NatNetTracking.NatNetClient()
+
+            self._tracker.serverIPAddress = server_ip
+            self._tracker.newFrameListener = self.natnet_frame_callback  # type: ignore
+            self._tracker.rigidBodyListener = self.natnet_callback  # type: ignore
+
+            self._tracker.run()
+
+        self._va_instance = va_instance
+
+    def __del__(self):
+        """Destructor."""
+        if ihta_tracking_available:
+            pass
+        else:
+            self._tracker.stop()
+            del self._tracker
+
+    def disconnect(self):
+        """Disconnect from the tracking."""
+        if ihta_tracking_available:
+            pass
+        else:
+            self._tracker.stop()
+
+    def ihta_callback(self, data: list[IHTATracking.TrackingDataPoint]):
+        """Callback for the IHTA tracking.
+
+        TODO: Implement this method.
+        """
+
+    def natnet_callback(self, new_id, position, rotation):
+        """Callback for the NatNet tracking.
+
+        The callback is called for each rigid body, each frame.
+        The position is given as a tuple of 3 floats (x, y, z).
+        The rotation is given as a tuple of 4 floats (x, y, z, w) aka quaternion.
+        Args:
+            new_id: The new ID.
+            position: The position.
+            rotation: The rotation.
+        """
+        self._va_instance._apply_tracking(new_id, position, rotation)  # noqa SLF001
+
+    def natnet_frame_callback(self, *_):
+        """Callback for the NatNet tracking for each frame.
+
+        This is not used in the current implementation.
+        """
diff --git a/src/vapython/tracking/data_types.py b/src/vapython/tracking/data_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc0becefd7d3de3b303ecd86aa1832bd0ee7f271
--- /dev/null
+++ b/src/vapython/tracking/data_types.py
@@ -0,0 +1,46 @@
+from dataclasses import dataclass
+from typing import Optional, Union
+
+import vapython._types as va_types
+
+
+@dataclass
+class BaseTrackingData:
+    """Base class for tracking data."""
+
+    position_offset: Optional[Union[va_types.VAVector, list[float], tuple[float, float, float]]] = None
+    rotation_offset: Optional[Union[va_types.VAQuaternion, list[float], tuple[float, float, float, float]]] = None
+
+
+@dataclass
+class BaseReceiverTrackingData(BaseTrackingData):
+    """Base class for receiver tracking data."""
+
+    receiver_id: int = -1
+
+
+@dataclass
+class ReceiverTrackingData(BaseReceiverTrackingData):
+    """Class for receiver tracking data."""
+
+
+@dataclass
+class ReceiverTorsoTrackingData(BaseReceiverTrackingData):
+    """Class for receiver torso tracking data."""
+
+
+@dataclass
+class ReceiverRealWorldTrackingData(BaseReceiverTrackingData):
+    """Class for receiver real-world tracking data."""
+
+
+@dataclass
+class ReceiverRealWorldTorsoTrackingData(BaseReceiverTrackingData):
+    """Class for receiver real-world torso tracking data."""
+
+
+@dataclass
+class SourceTrackingData(BaseTrackingData):
+    """Class for source tracking data."""
+
+    source_id: int = -1
diff --git a/src/vapython/va.py b/src/vapython/va.py
new file mode 100644
index 0000000000000000000000000000000000000000..df73fa44cff067cdcd9330b65109efebf78c9b35
--- /dev/null
+++ b/src/vapython/va.py
@@ -0,0 +1,939 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import annotations
+
+import time
+import warnings
+from pathlib import Path
+from typing import Any, Optional, Union
+
+from scipy.spatial.transform import Rotation  # type: ignore
+
+import vapython._helper as helper
+import vapython._types as va_types
+import vapython.vanet._vanet_grpc as va_grpc
+from vapython import tracking
+from vapython.vanet import VAInterface
+
+
+class VA(VAInterface):
+    """
+    Python interface to connect to and control a VAServer instance.
+
+    Examples:
+        >>> va = VA()
+        >>> # This will connect to a VAServer running on the same computer (localhost with port 12340)
+        >>> # NOTE: Generally, a VAServer can run on another computer using the respective IP address
+        >>> va.connect()
+        >>> va.create_sound_source("source1")
+
+    Todo:
+        - get_server is called get_server_state in matlab. Should we rename it?
+        - what should get_server return? A enum?
+        - `attach_event_handler` raise warning or smth.
+        - how is a non-connected call handled?
+        - what about logging? in comb with event handling? integrate the VAServer output?
+    """
+
+    @staticmethod
+    def start_server(
+        config_ini_file: Union[str, Path, None] = None,
+        va_server_path: Union[str, Path, None] = None,
+        extra_args: Union[list[str], None] = None,
+        *,
+        dedicated_window: bool = True,
+    ) -> None:
+        """
+        Starts a VAServer on this computer (localhost:12340) using remote control mode.
+        Use shutdown_server() method to stop the server again.
+
+        Args:
+            config_ini_file: Path to the VAServer configuration file.
+            va_server_path: Path to the VAServer executable.
+            extra_args: Extra arguments to pass to the VAServer executable.
+        """
+        import subprocess
+
+        if va_server_path is None:
+            va_server_path = helper.find_server_executable()
+
+        if isinstance(va_server_path, str):
+            va_server_path = Path(va_server_path)
+
+        if va_server_path is None or not Path(va_server_path).is_file():
+            msg = "Could not find valid VAServer executable."
+            raise FileNotFoundError(msg)
+
+        if config_ini_file is None:
+            va_base_dir = va_server_path.parent.parent
+            config_ini_file = va_base_dir / "conf" / "VACore.ini"
+
+        if isinstance(config_ini_file, str):
+            config_ini_file = Path(config_ini_file)
+
+        if not config_ini_file.is_file():
+            msg = "Could not find valid VACore.ini file."
+            raise FileNotFoundError(msg)
+
+        if extra_args is None:
+            extra_args = []
+
+        server_call: list[Union[str, Path]] = [
+            va_server_path,
+            "--config",
+            config_ini_file,
+            "--server-address",
+            "localhost:12340",
+            "--remote",
+            *extra_args,
+        ]
+
+        popen_kwargs: dict[str, Any] = {}
+        if dedicated_window:
+            popen_kwargs["creationflags"] = subprocess.CREATE_NEW_CONSOLE
+
+        # TODO: if no dedicated window is used, reroute the server output to logging ?!
+
+        subprocess.Popen(
+            server_call,
+            **popen_kwargs,
+        )
+
+    def __init__(self) -> None:
+        """Constructor for the VA class."""
+        super().__init__()
+        self._timer_interval: Optional[float] = None
+        self._timer_last_call: Optional[int] = None
+        self._tracker: Optional[tracking.Tracker] = None
+
+        self._tracker_data: dict[
+            int,
+            Union[
+                tracking.ReceiverTrackingData,
+                tracking.ReceiverTorsoTrackingData,
+                tracking.ReceiverRealWorldTrackingData,
+                tracking.ReceiverRealWorldTorsoTrackingData,
+                tracking.SourceTrackingData,
+            ],
+        ] = {}
+
+    def get_global_auralization_mode(self, *, short_form: bool = True) -> str:  # type: ignore[override]
+        """Get the [global auralization mode](https://www.virtualacoustics.org/VA/documentation/control/#global-auralization-mode).
+
+        Args:
+            short_form (optional): Whether to return the short form of the auralization mode. Defaults to True.
+
+        Returns:
+            A string representation of the auralization mode.
+
+        Examples:
+            >>> va.get_global_auralization_mode()
+            "DS,ER"
+        """
+        return helper.convert_aura_mode_to_str(
+            super().get_global_auralization_mode(),
+            short_form=short_form,
+        )
+
+    def set_global_auralization_mode(self, mode: str) -> None:  # type: ignore[override]
+        """Set the [global auralization mode](https://www.virtualacoustics.org/VA/documentation/control/#global-auralization-mode).
+
+        Args:
+            mode: The auralization mode to set.
+
+        Examples:
+            >>> va.set_global_auralization_mode("+DS,-ER")
+
+            This will enable the Direct Sound and disable the Early Reflections.
+        """
+        super().set_global_auralization_mode(
+            helper.parse_aura_mode_str(
+                mode,
+                super().get_global_auralization_mode(),
+            )
+        )
+
+    def get_sound_receiver_auralization_mode(self, receiver_id: int, *, short_form: bool = True) -> str:  # type: ignore[override]
+        """Get the auralization mode for a specific sound receiver.
+
+        See also [get_global_auralization_mode()][vapython.va.VA.get_global_auralization_mode] for a quick example.
+
+        Args:
+            receiver_id: The ID of the sound receiver.
+            short_form (optional): Whether to return the short form of the auralization mode. Defaults to True.
+
+        Returns:
+            A string representation of the auralization mode.
+        """
+        return helper.convert_aura_mode_to_str(
+            super().get_sound_receiver_auralization_mode(receiver_id),
+            short_form=short_form,
+        )
+
+    def set_sound_receiver_auralization_mode(self, receiver_id: int, mode: str) -> None:  # type: ignore[override]
+        """Set the auralization mode for a specific sound receiver.
+
+        See also [set_global_auralization_mode()][vapython.va.VA.set_global_auralization_mode] for a quick example.
+
+        Args:
+            receiver_id: The ID of the sound receiver.
+            mode: The auralization mode to set.
+        """
+        super().set_sound_receiver_auralization_mode(
+            receiver_id,
+            helper.parse_aura_mode_str(
+                mode,
+                super().get_sound_receiver_auralization_mode(receiver_id),
+            ),
+        )
+
+    def get_sound_source_auralization_mode(self, source_id: int, *, short_form: bool = True) -> str:  # type: ignore[override]
+        """Get the auralization mode for a specific sound source.
+
+        See also [get_global_auralization_mode()][vapython.va.VA.get_global_auralization_mode] for a quick example.
+
+        Args:
+            source_id: The ID of the sound source.
+            short_form (optional): Whether to return the short form of the auralization mode. Defaults to True.
+
+        Returns:
+            A string representation of the auralization mode.
+        """
+        return helper.convert_aura_mode_to_str(
+            super().get_sound_source_auralization_mode(source_id),
+            short_form=short_form,
+        )
+
+    def set_sound_source_auralization_mode(self, source_id: int, mode: str) -> None:  # type: ignore[override]
+        """Set the auralization mode for a specific sound source.
+
+        See also [set_global_auralization_mode()][vapython.va.VA.set_global_auralization_mode] for a quick example.
+
+        Args:
+            source_id (int): The ID of the sound source.
+            mode (str): The auralization mode to set.
+        """
+        super().set_sound_source_auralization_mode(
+            source_id,
+            helper.parse_aura_mode_str(
+                mode,
+                super().get_sound_source_auralization_mode(source_id),
+            ),
+        )
+
+    def get_rendering_module_auralization_mode(self, renderer_id: str, *, short_form: bool = True) -> str:  # type: ignore[override]
+        """Get the auralization mode for a specific rendering module.
+
+        See also [get_global_auralization_mode()][vapython.va.VA.get_global_auralization_mode] for a quick example.
+
+        Args:
+            renderer_id: The ID of the sound source.
+            short_form (optional): Whether to return the short form of the auralization mode. Defaults to True.
+
+        Returns:
+            A string representation of the auralization mode.
+        """
+        return helper.convert_aura_mode_to_str(
+            super().get_rendering_module_auralization_mode(renderer_id),
+            short_form=short_form,
+        )
+
+    def set_rendering_module_auralization_mode(self, renderer_id: str, mode: str) -> None:  # type: ignore[override]
+        """Set the auralization mode for a specific rendering module.
+
+        See also [set_global_auralization_mode()][vapython.va.VA.set_global_auralization_mode] for a quick example.
+
+        Args:
+            renderer_id (int): The ID of the sound source.
+            mode (str): The auralization mode to set.
+        """
+        super().set_rendering_module_auralization_mode(
+            renderer_id,
+            helper.parse_aura_mode_str(
+                mode,
+                super().get_rendering_module_auralization_mode(renderer_id),
+            ),
+        )
+
+    def get_signal_source_buffer_playback_state(self, signal_source_id: str) -> va_grpc.PlaybackStateState:  # type: ignore[override]
+        """Get the playback state of a signal source buffer.
+
+        Args:
+            signal_source_id: The ID of the signal source to get the playback state for.
+
+        Returns:
+            The playback state of the signal source buffer.
+        """
+
+        state = super().get_signal_source_buffer_playback_state(signal_source_id).state
+        return va_grpc.PlaybackStateState(state)
+
+    def set_signal_source_buffer_playback_action(
+        self,
+        signal_source_id: str,
+        playback_action: Union[str, va_grpc.PlaybackActionAction],  # type: ignore[override]
+    ):  # type: ignore[override]
+        """Set the playback action of a signal source buffer.
+
+        Args:
+            signal_source_id: The ID of the signal source to set the playback action for.
+            playback_action: The playback action to set.
+
+        Examples:
+            >>> va.set_signal_source_buffer_playback_action("source1", "PLAY")
+            >>> va.set_signal_source_buffer_playback_action(
+            ...     "source1", va_grpc.PlaybackActionAction.PLAY
+            ... )
+        """
+
+        if isinstance(playback_action, str):
+            playback_action = va_grpc.PlaybackActionAction[playback_action.upper()]  # type: ignore[misc]
+
+        super().set_signal_source_buffer_playback_action(
+            signal_source_id,
+            va_grpc.PlaybackAction(action=playback_action),  # type: ignore[arg-type]
+        )
+
+    def add_search_path(self, path: Union[str, Path]) -> bool:
+        """Add a search path to the server.
+
+        Args:
+            path: The path to add.
+
+        Returns:
+            True if the path is valid, False otherwise.
+
+        Examples:
+            >>> str_path = "C:/path/to/folder"
+            >>> va.add_search_path(str_path)
+            True
+            >>> path = Path("C:/path/to/folder")
+            >>> va.add_search_path(path)
+            True
+        """
+
+        arguments: va_types.VAStruct = {"addsearchpath": str(path)}
+
+        result = super().call_module("VACore", arguments)
+        return bool(result["pathvalid"])
+
+    def create_signal_source_buffer_from_file(self, file_path: Union[str, Path], name: str = "") -> str:
+        """Create a signal source buffer from a file.
+
+        Note:
+            The audiofile must be mono and its sampling rate must match that of the server.
+
+        Args:
+            file_path: The path to the file.
+            name: The name of the signal source buffer.
+        """
+
+        parameters: va_types.VAStruct = {"filepath": str(file_path)}
+
+        return super().create_signal_source_buffer_from_parameters(name=name, parameters=parameters)
+
+    def create_directivity_from_file(self, file_path: Union[str, Path], name: str = "") -> int:
+        """Create a source or receiver directivity from a file.
+
+        Args:
+            file_path: The path to the file.
+            name: The name of the directivity.
+        """
+
+        parameters: va_types.VAStruct = {"filepath": str(file_path)}
+
+        return super().create_directivity_from_parameters(name=name, parameters=parameters)
+
+    def get_server_state(self) -> va_grpc.CoreState:
+        """Get the state of the server.
+
+        This indicates the state of the server, which can be one of the following:
+        - `CREATED`: server started but not yet initialized
+        - `READY`: successfully initialized and ready for use
+        - `FAIL`: corrupted and can not be recovered
+
+        Returns:
+            The state of the server.
+        """
+
+        return super()._get_state()
+
+    def remove_sound_source_signal_source(self, sound_source_id: int) -> None:
+        """Remove the signal source from the sound source.
+
+        Note: this will not delete the signal source, i.e. it can still be used by other sound sources.
+
+        Args:
+            sound_source_id: The ID of the sound source.
+        """
+
+        super().set_sound_source_signal_source(sound_source_id, "")
+
+    def shutdown_server(self) -> None:
+        """Shutdown the connected server"""
+        arguments: va_types.VAStruct = {"shutdown": True}
+
+        super().call_module("VACore", arguments)
+
+    def get_sound_receiver_orientation_view_up(
+        self, sound_receiver_id: int
+    ) -> va_grpc.GetSoundReceiverOrientationVuReply:
+        """Get the orientation of a sound receiver as a view-up vector pair.
+
+        Alias for / same as [`get_sound_receiver_orientation_vu`][vapython.va.VA.get_sound_receiver_orientation_vu].
+
+        Args:
+            sound_receiver_id: The ID of the sound receiver to get the orientation of.
+
+        Returns:
+            The orientation of the sound receiver as a view-up vector pair.
+        """
+        return super().get_sound_receiver_orientation_vu(sound_receiver_id)
+
+    def get_sound_receiver_real_world_head_position_orientation_view_up(
+        self, sound_receiver_id: int
+    ) -> va_grpc.GetSoundReceiverRealWorldPositionOrientationVuReply:
+        """Get the real-world position and orientation of a sound receiver.
+
+        Alias for / same as
+        [`get_sound_receiver_real_world_position_orientation_vu`][vapython.va.VA.get_sound_receiver_real_world_position_orientation_vu].
+
+        Note:
+            Coordinates refer the to center of the head on the axis which goes through both ears.
+
+        Args:
+            sound_receiver_id: The ID of the sound receiver to get the real-world position and orientation of.
+
+        Returns:
+            The real-world position and orientation of the sound receiver.
+        """
+        return super().get_sound_receiver_real_world_position_orientation_vu(sound_receiver_id)
+
+    def get_sound_source_orientation_view_up(self, sound_source_id: int) -> va_grpc.GetSoundSourceOrientationVuReply:
+        """Get the orientation of a sound source as a view-up vector pair.
+
+        Alias for / same as [`get_sound_source_orientation_vu`][vapython.va.VA.get_sound_source_orientation_vu].
+
+        Args:
+            sound_source_id: The ID of the sound source to get the orientation of.
+
+        Returns:
+            The orientation of the sound source as a view-up vector pair.
+        """
+        return super().get_sound_source_orientation_vu(sound_source_id)
+
+    def set_sound_receiver_orientation_view_up(
+        self,
+        sound_receiver_id: int,
+        view: Union[va_types.VAVector, list[float], tuple[float, float, float]],
+        up: Union[va_types.VAVector, list[float], tuple[float, float, float]],
+    ):
+        """Set the orientation of a sound receiver as a view-up vector pair.
+
+        Alias for / same as [`set_sound_receiver_orientation_vu`][vapython.va.VA.set_sound_receiver_orientation_vu].
+
+        Args:
+            sound_receiver_id: The ID of the sound receiver to set the orientation of.
+            view: The view vector of the sound receiver.
+            up: The up vector of the sound receiver.
+
+        Examples:
+            >>> va.set_sound_receiver_orientation_view_up(
+            ...     sound_receiver_id=1, view=[0, 0, -1], up=[0, 1, 0]
+            ... )
+
+            Here, the view vector in OpenGL coordinates is [0, 0, -1] which means the
+            sound receiver is looking straight ahead.
+            The up vector is pointing upwards in the OpenGL coordinate system, this serves as the reference frame.
+        """
+        super().set_sound_receiver_orientation_vu(sound_receiver_id, view, up)
+
+    def set_sound_receiver_real_world_position_orientation_view_up(
+        self,
+        sound_receiver_id: int,
+        position: Union[va_types.VAVector, list[float], tuple[float, float, float]],
+        view: Union[va_types.VAVector, list[float], tuple[float, float, float]],
+        up: Union[va_types.VAVector, list[float], tuple[float, float, float]],
+    ):
+        """Set the real-world position and orientation of a sound receiver.
+
+        Alias for / same as
+        [`set_sound_receiver_real_world_position_orientation_vu`][vapython.va.VA.set_sound_receiver_real_world_position_orientation_vu].
+
+        This function is used to provide the crosstalk-cancellation module with the current position of the
+        sound receivers head in the real-world.
+
+        Note:
+            Coordinates refer the to center of the head on the axis which goes through both ears.
+
+        Args:
+            sound_receiver_id: The ID of the sound receiver to set the real-world position and orientation of
+            position: The real-world position of the sound receiver.
+            view: The view vector of the sound receiver.
+            up: The up vector of the sound receiver.
+
+        Examples:
+            >>> va.set_sound_receiver_real_world_position_orientation_view_up(
+            ...     sound_receiver_id=1, position=[0, 1.7, 0], view=[0, 0, -1], up=[0, 1, 0]
+            ... )
+
+            Here, the real world origin is defined at the floor.
+            Thus, the position of the sound receiver is 1.7 m above the floor.
+            The view vector in OpenGL coordinates is [0, 0, -1] which means the
+            sound receiver is looking straight ahead.
+            The up vector is pointing upwards in the OpenGL coordinate system, this serves as the reference frame.
+        """
+        super().set_sound_receiver_real_world_position_orientation_vu(sound_receiver_id, position, view, up)
+
+    def set_sound_source_orientation_view_up(
+        self,
+        sound_source_id: int,
+        view: Union[va_types.VAVector, list[float], tuple[float, float, float]],
+        up: Union[va_types.VAVector, list[float], tuple[float, float, float]],
+    ):
+        """Set the orientation of a sound source as a view-up vector pair.
+
+        Alias for / same as [`set_sound_source_orientation_vu`][vapython.va.VA.set_sound_source_orientation_vu].
+
+        Args:
+            sound_source_id: The ID of the sound source to set the orientation of.
+            view: The view vector of the sound source.
+            up: The up vector of the sound source.
+
+        Examples:
+            >>> va.set_sound_source_orientation_view_up(
+            ...     sound_source_id=1, view=[0, 0, -1], up=[0, 1, 0]
+            ... )
+
+            Here, the view vector in OpenGL coordinates is [0, 0, -1] which means the
+            sound source main axis of radiation is straight ahead.
+            The up vector is pointing upwards in the OpenGL coordinate system, this serves as the reference frame.
+        """
+        super().set_sound_source_orientation_vu(sound_source_id, view, up)
+
+    def get_homogeneous_medium_shift_parameters(self, parameters: va_types.VAStruct):
+        return super().get_homogeneous_medium_parameters(parameters)
+
+    def set_homogeneous_medium_shift_parameters(self, parameters: va_types.VAStruct):
+        super().set_homogeneous_medium_parameters(parameters)
+
+    def close_timer(self) -> None:
+        """Close the timer.
+
+        Primarily used for compatibility with Matlab.
+        """
+        self._timer_interval = -1
+        self._timer_last_call = None
+
+    def set_timer(self, interval: float) -> None:
+        """Set the timer interval in seconds."""
+        if interval < 0:
+            msg = "Interval must be greater or equal to 0."
+            raise ValueError(msg)
+
+        self._timer_interval = interval
+        self._timer_last_call = time.perf_counter_ns()
+
+    def wait_for_timer(self) -> None:
+        """Wait for the timer interval.
+
+        This function will wait until the timer interval is reached and return.
+        The timer is reset and a new interval is started.
+        This can be used to adapt the VA scene in a time-controlled manner for example for constant speed movements.
+
+        The interval is set by [`set_timer()`][vapython.va.VA.set_timer] in seconds.
+
+        The accuracy of the timer depends on the system but should be not lower than 1 ms.
+
+        As this is implemented with a busy loop, use with caution.
+
+        Examples:
+            We want to move a sound source with a constant speed. We can use the timer to update the position.
+            Here we move the sound source with 1 m/s and update the position every 0.1 seconds.
+
+            >>> source_id = va.create_sound_source("source1")
+            >>> timer_interval = 0.1
+            >>> va.set_timer(timer_interval)
+            >>> delta_distance = 1 * timer_interval
+            >>> va.set_sound_source_position(source_id, [0, 0, 0])
+            >>> while True:
+            ...     current_position = va.get_sound_source_position(source_id)
+            ...     va.set_sound_source_position(
+            ...         source_id[current_position[0] + delta_distance, 0, 0]
+            ...     )
+            ...     va.wait_for_timer()
+        """
+        if not self._timer_interval:
+            warnings.warn(
+                "Timer interval not set. Use `set_timer` to set the interval.",
+                stacklevel=2,
+            )
+            return
+
+        if not self._timer_last_call:
+            return
+
+        while time.perf_counter_ns() - self._timer_last_call < self._timer_interval * 1e9:
+            pass
+
+        self._timer_last_call = time.perf_counter_ns()
+
+    def connect_tracker(
+        self,
+        server_ip: str,
+        tracker: tracking.TrackingType = tracking.TrackingType.NatNet,
+    ) -> None:
+        """Connect to a tracking system.
+
+        This function connects to a tracking system to track sound sources or receivers.
+        The tracking system can be used to track the position and orientation of sound sources and receivers.
+
+        Sources and receivers can be set as "tracked" via the respective `set_tracked_*` functions.
+        These methods can be used before or after connecting to the tracking system.
+
+        Via the `tracker` argument, the tracking system can be selected. Without the IHTATrackingPython package,
+        only the NatNet tracking system is available.
+
+        Args:
+            server_ip: The IP address of the tracking server.
+            tracker: The tracking system to use. Defaults to `NatNet`.
+
+        Examples:
+            This will connect to a NatNet tracking system running on the same computer and track a
+            sound source with the second rigid body.
+
+            >>> va.connect_tracker("127.0.0.1")
+            >>> va.set_tracked_sound_source(sound_source, 1)
+        """
+        from vapython.tracking import Tracker
+
+        self._tracker = Tracker(self, server_ip, tracker)
+
+    def disconnect_tracker(self) -> None:
+        """Disconnect from the tracking system."""
+        if self._tracker is None:
+            return
+        self._tracker.disconnect()
+        del self._tracker
+        self._tracker = None
+
+    def get_tracker_connected(self) -> bool:
+        """Check if the tracker is connected."""
+        return bool(self._tracker)
+
+    def get_tracker_info(self) -> va_types.VAStruct:
+        """Get information about the tracked objects.
+
+        This includes what objects are tracked and their offsets.
+        As well as if the tracking is connected.
+
+        Returns:
+            A dictionary with information about the tracked objects.
+        """
+        tracker_info: va_types.VAStruct = {"IsConnected": bool(self._tracker)}
+
+        for tracker_id, tracker_data in self._tracker_data.items():
+            concrete_tracker_data: va_types.VAStruct = {"TrackerID": tracker_id}
+
+            if tracker_data.rotation_offset:
+                concrete_tracker_data["RotationOffset"] = str(tracker_data.rotation_offset)
+
+            if isinstance(tracker_data, tracking.SourceTrackingData):
+                concrete_tracker_data["SourceID"] = tracker_data.source_id
+
+                if tracker_data.position_offset:
+                    concrete_tracker_data["PositionOffset"] = str(tracker_data.position_offset)
+
+                tracker_info[f"TrackedSource{tracker_data.source_id}"] = concrete_tracker_data
+
+            elif isinstance(tracker_data, tracking.ReceiverTrackingData):
+                concrete_tracker_data["ReceiverID"] = tracker_data.receiver_id
+
+                if tracker_data.position_offset:
+                    concrete_tracker_data["PositionOffset"] = str(tracker_data.position_offset)
+
+                tracker_info["TrackedReceiver"] = concrete_tracker_data
+
+            elif isinstance(tracker_data, tracking.ReceiverRealWorldTrackingData):
+                concrete_tracker_data["ReceiverID"] = tracker_data.receiver_id
+
+                if tracker_data.position_offset:
+                    concrete_tracker_data["PositionOffset"] = str(tracker_data.position_offset)
+
+                tracker_info["TrackedRealWorldReceiver"] = concrete_tracker_data
+
+            elif isinstance(tracker_data, tracking.ReceiverTorsoTrackingData):
+                concrete_tracker_data["ReceiverID"] = tracker_data.receiver_id
+
+                tracker_info["TrackedReceiverTorso"] = concrete_tracker_data
+
+            elif isinstance(tracker_data, tracking.ReceiverRealWorldTorsoTrackingData):
+                concrete_tracker_data["ReceiverID"] = tracker_data.receiver_id
+
+                tracker_info["TrackedRealWorldReceiverTorso"] = concrete_tracker_data
+
+        return tracker_info
+
+    def set_tracked_sound_source(self, sound_source_id: int, tracker_id: int) -> None:
+        """Set a sound source to be tracked by the tracking system.
+
+        This function sets a sound source to be tracked by the tracking system.
+        The sound source will be tracked with the given `tracker_id`.
+
+        This method can be called before or after connecting to the tracking system.
+
+        Args:
+            sound_source_id: The ID of the sound source to track.
+            tracker_id: The ID of the tracker to use.
+        """
+        self._tracker_data[tracker_id] = tracking.SourceTrackingData(source_id=sound_source_id)
+
+    def set_tracked_sound_receiver(self, sound_receiver_id: int, tracker_id: int) -> None:
+        """Set a sound receiver to be tracked by the tracking system.
+
+        This function sets a sound receiver to be tracked by the tracking system.
+        The sound receiver will be tracked with the given `tracker_id`.
+
+        This method can be called before or after connecting to the tracking system.
+
+        Args:
+            sound_receiver_id: The ID of the sound receiver to track.
+            tracker_id: The ID of the tracker to use.
+        """
+        self._tracker_data[tracker_id] = tracking.ReceiverTrackingData(receiver_id=sound_receiver_id)
+
+    def set_tracked_sound_receiver_torso(self, sound_receiver_id: int, tracker_id: int) -> None:
+        """Set a sound receiver torso to be tracked by the tracking system.
+
+        This function sets a sound receiver torso to be tracked by the tracking system.
+        The sound receiver torso will be tracked with the given `tracker_id`.
+
+        The rotation of the torso will influence the HRIR selection.
+
+        This method can be called before or after connecting to the tracking system.
+
+        Args:
+            sound_receiver_id: The ID of the sound receiver torso to track.
+            tracker_id: The ID of the tracker to use.
+        """
+        self._tracker_data[tracker_id] = tracking.ReceiverTorsoTrackingData(receiver_id=sound_receiver_id)
+
+    def set_tracked_real_world_sound_receiver(self, sound_receiver_id: int, tracker_id: int) -> None:
+        """Set a real-world sound receiver to be tracked by the tracking system.
+
+        This function sets a real-world sound receiver to be tracked by the tracking system.
+        The real-world sound receiver will be tracked with the given `tracker_id`.
+
+        This method can be called before or after connecting to the tracking system.
+
+        Args:
+            sound_receiver_id: The ID of the real-world sound receiver to track.
+            tracker_id: The ID of the tracker to use.
+        """
+        self._tracker_data[tracker_id] = tracking.ReceiverRealWorldTrackingData(receiver_id=sound_receiver_id)
+
+    def set_tracked_real_world_sound_receiver_torso(self, sound_receiver_id: int, tracker_id: int) -> None:
+        """Set a real-world sound receiver torso to be tracked by the tracking system.
+
+        This function sets a real-world sound receiver torso to be tracked by the tracking system.
+        The real-world sound receiver torso will be tracked with the given `tracker_id`.
+
+        The rotation of the torso will influence the HRIR selection.
+
+        This method can be called before or after connecting to the tracking system.
+
+        Args:
+            sound_receiver_id: The ID of the real-world sound receiver torso to track.
+            tracker_id: The ID of the tracker to use.
+        """
+        self._tracker_data[tracker_id] = tracking.ReceiverRealWorldTorsoTrackingData(receiver_id=sound_receiver_id)
+
+    def set_tracked_sound_source_offset(
+        self,
+        sound_source_id: int,
+        *,
+        position_offset: Optional[Union[va_types.VAVector, list[float], tuple[float, float, float]]] = None,
+        orientation_offset: Optional[
+            Union[va_types.VAQuaternion, list[float], tuple[float, float, float, float]]
+        ] = None,
+    ) -> None:
+        """Set the offset for a tracked sound source.
+
+        The orientation offset is applied directly to the orientation of the sound source.
+        The position offset is first rotated by the orientation of the sound source and then applied.
+
+        Args:
+            sound_source_id: The ID of the sound source to set the offset for.
+            position_offset: The position offset to set.
+            orientation_offset: The orientation offset to set.
+        """
+        for data in self._tracker_data.values():
+            if isinstance(data, tracking.SourceTrackingData) and data.source_id == sound_source_id:
+                if position_offset is not None:
+                    data.position_offset = position_offset
+                if orientation_offset is not None:
+                    data.rotation_offset = orientation_offset
+                break
+
+    def set_tracked_sound_receiver_offset(
+        self,
+        sound_receiver_id: int,
+        *,
+        position_offset: Optional[Union[va_types.VAVector, list[float], tuple[float, float, float]]] = None,
+        orientation_offset: Optional[
+            Union[va_types.VAQuaternion, list[float], tuple[float, float, float, float]]
+        ] = None,
+    ) -> None:
+        """Set the offset for a tracked sound receiver.
+
+        This can be useful if the origin of the rigid body does not align with the acoustic center.
+        For example, a tracker mounted on top of a head should still have the acoustic center at ear height.
+
+        The orientation offset is applied directly to the orientation of the sound receiver.
+        The position offset is first rotated by the orientation of the sound receiver and then applied.
+
+        Args:
+            sound_receiver_id: The ID of the sound receiver to set the offset for.
+            position_offset: The position offset to set.
+            orientation_offset: The orientation offset to set.
+        """
+        for data in self._tracker_data.values():
+            if isinstance(data, tracking.ReceiverTrackingData) and data.receiver_id == sound_receiver_id:
+                if position_offset is not None:
+                    data.position_offset = position_offset
+                if orientation_offset is not None:
+                    data.rotation_offset = orientation_offset
+                break
+
+    def set_tracked_sound_receiver_torso_offset(
+        self,
+        sound_receiver_id: int,
+        *,
+        orientation_offset: Optional[
+            Union[va_types.VAQuaternion, list[float], tuple[float, float, float, float]]
+        ] = None,
+    ) -> None:
+        """Set the offset for a tracked sound receiver torso.
+
+        This can be useful if the origin of the rigid body does not align with the acoustic center.
+
+        Args:
+            sound_receiver_id: The ID of the sound receiver torso to set the offset for.
+
+            orientation_offset: The orientation offset to set.
+        """
+        for data in self._tracker_data.values():
+            if isinstance(data, tracking.ReceiverTorsoTrackingData) and data.receiver_id == sound_receiver_id:
+                if orientation_offset is not None:
+                    data.rotation_offset = orientation_offset
+                break
+
+    def set_tracked_real_world_sound_receiver_offset(
+        self,
+        sound_receiver_id: int,
+        *,
+        position_offset: Optional[Union[va_types.VAVector, list[float], tuple[float, float, float]]] = None,
+        orientation_offset: Optional[
+            Union[va_types.VAQuaternion, list[float], tuple[float, float, float, float]]
+        ] = None,
+    ) -> None:
+        """Set the offset for a tracked real-world sound receiver.
+
+        This can be useful if the origin of the rigid body does not align with the acoustic center.
+        For example, a tracker mounted on top of a head should still have the acoustic center at ear height.
+
+        The orientation offset is applied directly to the orientation of the sound receiver.
+        The position offset is first rotated by the orientation of the sound receiver and then applied.
+
+        Args:
+            sound_receiver_id: The ID of the real-world sound receiver to set the offset for.
+            position_offset: The position offset to set.
+            orientation_offset: The orientation offset to set.
+        """
+        for data in self._tracker_data.values():
+            if isinstance(data, tracking.ReceiverRealWorldTrackingData) and data.receiver_id == sound_receiver_id:
+                if position_offset is not None:
+                    data.position_offset = position_offset
+                if orientation_offset is not None:
+                    data.rotation_offset = orientation_offset
+                break
+
+    def set_tracked_real_world_sound_receiver_torso_offset(
+        self,
+        sound_receiver_id: int,
+        *,
+        orientation_offset: Optional[
+            Union[va_types.VAQuaternion, list[float], tuple[float, float, float, float]]
+        ] = None,
+    ) -> None:
+        """Set the offset for a tracked real-world sound receiver torso.
+
+        This can be useful if the origin of the rigid body does not align with the acoustic center.
+
+        Args:
+            sound_receiver_id: The ID of the real-world sound receiver torso to set the offset for.
+            orientation_offset: The orientation offset to set.
+        """
+        for data in self._tracker_data.values():
+            if isinstance(data, tracking.ReceiverRealWorldTorsoTrackingData) and data.receiver_id == sound_receiver_id:
+                if orientation_offset is not None:
+                    data.rotation_offset = orientation_offset
+                break
+
+    def _apply_tracking(
+        self,
+        tracker_id: int,
+        position: tuple[float, float, float],
+        orientation: tuple[float, float, float, float],
+    ):
+        """Internal function to apply the tracking data to the VA server.
+
+        Args:
+            tracker_id: The ID of the tracker to apply the data for.
+            position: The position to apply.
+            orientation: The orientation to apply.
+        """
+        if tracker_id not in self._tracker_data:
+            return
+
+        data = self._tracker_data[tracker_id]
+
+        if data.rotation_offset:
+            orientation_offset = Rotation.from_quat(data.rotation_offset, scalar_first=False)
+            orientation_quat = Rotation.from_quat(orientation, scalar_first=False)
+
+            orientation = tuple((orientation_offset * orientation_quat).as_quat())
+
+        if data.position_offset:
+            orientation_quat = Rotation.from_quat(orientation, scalar_first=False)
+
+            position_offset = orientation_quat.apply(data.position_offset)
+
+            position = tuple([position[i] + position_offset[i] for i in range(3)])
+
+        def head_above_torso_orientation(
+            orientation: tuple[float, float, float, float], receiver_id
+        ) -> tuple[float, float, float, float]:
+            """Calculate the orientation of the head above the torso."""
+            orientation_quat = Rotation.from_quat(orientation, scalar_first=False)
+
+            head_quat = Rotation.from_quat(self.get_sound_receiver_orientation(receiver_id), scalar_first=False)
+            orientation_quat = orientation_quat.inv() * head_quat
+            return tuple(orientation_quat.as_quat())
+
+        if isinstance(data, tracking.SourceTrackingData):
+            self.set_sound_source_pose(data.source_id, position, orientation)
+        elif isinstance(data, tracking.ReceiverTrackingData):
+            self.set_sound_receiver_pose(data.receiver_id, position, orientation)
+        elif isinstance(data, tracking.ReceiverRealWorldTrackingData):
+            self.set_sound_receiver_real_world_pose(data.receiver_id, position, orientation)
+        elif isinstance(data, tracking.ReceiverTorsoTrackingData):
+            # TODO: check if this is correct
+            orientation = head_above_torso_orientation(orientation, data.receiver_id)
+            self.set_sound_receiver_head_above_torso_orientation(data.receiver_id, orientation)
+        elif isinstance(data, tracking.ReceiverRealWorldTorsoTrackingData):
+            # TODO: check if this is correct
+            orientation = head_above_torso_orientation(orientation, data.receiver_id)
+            self.set_sound_receiver_real_world_head_above_torso_orientation(data.receiver_id, orientation)
diff --git a/src/vapython/vanet/__init__.py b/src/vapython/vanet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7fdf97c90fc60903103d2918454256033909b662
--- /dev/null
+++ b/src/vapython/vanet/__init__.py
@@ -0,0 +1,7 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from vapython.vanet._va_interface import VAInterface
+
+__all__ = ["VAInterface"]
diff --git a/src/vapython/vanet/_helper.py b/src/vapython/vanet/_helper.py
new file mode 100644
index 0000000000000000000000000000000000000000..aba34effa731bd6ba7b439edb05ac02b7ea9f7a7
--- /dev/null
+++ b/src/vapython/vanet/_helper.py
@@ -0,0 +1,186 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+"""Helper functions for converting between VAPython and Vanet gRPC types.
+
+This is used as a compatibility layer between native Python and the gRPC world.
+"""
+
+from typing import Union
+
+import betterproto
+
+from vapython._types import VAQuaternion, VAStruct, VAVector
+from vapython.vanet._vanet_grpc import Quaternion as VanetQuaternion
+from vapython.vanet._vanet_grpc import SampleBuffer as VanetSampleBuffer
+from vapython.vanet._vanet_grpc import Struct as VanetStruct
+from vapython.vanet._vanet_grpc import Value as VanetStructValue
+from vapython.vanet._vanet_grpc import Vector3 as VanetVector
+
+
+def convert_struct_to_vanet(input_struct: VAStruct) -> VanetStruct:
+    """Converts a VAStruct or dictionary to a VanetStruct.
+
+    Args:
+        input_struct: The input structure to be converted.
+
+    Returns:
+        The converted VanetStruct.
+
+    Raises:
+        TypeError: If the input_struct is not of type VAStruct or dict.
+        ValueError: If an unknown type of value is encountered in the struct.
+    """
+    if not isinstance(input_struct, dict):
+        msg = "input_struct must be of type VAStruct or dict"
+        raise TypeError(msg)
+
+    output_struct = VanetStruct()
+    for key, value in input_struct.items():
+        if isinstance(value, bool):
+            output_struct.fields[key] = VanetStructValue(bool_value=value)
+        elif isinstance(value, int):
+            output_struct.fields[key] = VanetStructValue(integer_value=value)
+        elif isinstance(value, float):
+            output_struct.fields[key] = VanetStructValue(double_value=value)
+        elif isinstance(value, str):
+            output_struct.fields[key] = VanetStructValue(string_value=value)
+        elif isinstance(value, dict):
+            output_struct.fields[key] = VanetStructValue(struct_value=convert_struct_to_vanet(value))
+        elif isinstance(value, bytes):
+            output_struct.fields[key] = VanetStructValue(data_value=value)
+        elif isinstance(value, list) and all(isinstance(x, float) for x in value):
+            output_struct.fields[key] = VanetStructValue(buffer_value=VanetSampleBuffer(value))
+        else:
+            msg = "Unknown type of value in struct!"
+            raise ValueError(msg)
+
+    return output_struct
+
+
+def convert_struct_from_vanet(input_struct: VanetStruct) -> VAStruct:
+    """
+    Converts a VanetStruct object to a VAStruct object.
+
+    Args:
+        input_struct: The input VanetStruct object to be converted.
+
+    Returns:
+        The converted VAStruct object.
+
+    Raises:
+        TypeError: If the input_struct is not of type VanetStruct.
+        ValueError: If the value in the struct is not set or if the kind of value is unknown.
+    """
+    if not isinstance(input_struct, VanetStruct):
+        msg = "input_struct must be of type VanetStruct"
+        raise TypeError(msg)
+
+    output_struct: VAStruct = {}
+    for key, value in input_struct.fields.items():
+        if betterproto.which_one_of(value, "kind")[0] == "bool_value":
+            output_struct[key] = value.bool_value
+        elif betterproto.which_one_of(value, "kind")[0] == "integer_value":
+            output_struct[key] = value.integer_value
+        elif betterproto.which_one_of(value, "kind")[0] == "double_value":
+            output_struct[key] = value.double_value
+        elif betterproto.which_one_of(value, "kind")[0] == "string_value":
+            output_struct[key] = value.string_value
+        elif betterproto.which_one_of(value, "kind")[0] == "struct_value":
+            output_struct[key] = convert_struct_from_vanet(value.struct_value)
+        elif betterproto.which_one_of(value, "kind")[0] == "data_value":
+            output_struct[key] = value.data_value
+        elif betterproto.which_one_of(value, "kind")[0] == "buffer_value":
+            output_struct[key] = value.buffer_value.samples
+        elif betterproto.which_one_of(value, "kind")[0] == "":
+            msg = "Value in struct not set!"
+            raise ValueError(msg)
+        else:
+            msg = "Unknown kind of value in struct!"
+            raise ValueError(msg)
+
+    return output_struct
+
+
+def convert_vector_to_vanet(input_vector: Union[VAVector, list[float], tuple[float, float, float]]) -> VanetVector:
+    """
+    Converts a vector to a VanetVector object.
+
+    Args:
+        input_vector: The input vector to be converted.
+
+    Returns:
+        The converted VanetVector object.
+
+    Raises:
+        ValueError: If the input vector is not of type VAVector, list, or tuple,
+            or if the length of the input vector is not 3.
+    """
+    if isinstance(input_vector, VAVector):
+        return VanetVector(x=input_vector.x, y=input_vector.y, z=input_vector.z)
+
+    if isinstance(input_vector, (list, tuple)):
+        if len(input_vector) != 3:  # noqa: PLR2004
+            msg = "Vector must be of length 3"
+            raise ValueError(msg)
+        return VanetVector(x=input_vector[0], y=input_vector[1], z=input_vector[2])
+
+    msg = "Vector must be of type VAVector, list or tuple"
+    raise ValueError(msg)
+
+
+def convert_vector_from_vanet(input_vector: VanetVector) -> VAVector:
+    """
+    Converts a VanetVector object to a VAVector object.
+
+    Args:
+        input_vector: The VanetVector object to be converted.
+
+    Returns:
+        The converted VAVector object.
+    """
+    return VAVector(x=input_vector.x, y=input_vector.y, z=input_vector.z)
+
+
+def convert_quaternion_to_vanet(
+    input_quaternion: Union[VAQuaternion, list[float], tuple[float, float, float, float]],
+) -> VanetQuaternion:
+    """
+    Converts a quaternion to a VanetQuaternion object.
+
+    Args:
+        input_quaternion: The input quaternion to be converted.
+
+    Returns:
+        The converted VanetQuaternion object.
+
+    Raises:
+        ValueError: If the input quaternion is not of type VAQuaternion, list, or tuple, or if its length is not 4.
+    """
+    if isinstance(input_quaternion, VAQuaternion):
+        return VanetQuaternion(x=input_quaternion.x, y=input_quaternion.y, z=input_quaternion.z, w=input_quaternion.w)
+
+    if isinstance(input_quaternion, (list, tuple)):
+        if len(input_quaternion) != 4:  # noqa: PLR2004
+            msg = "Quaternion must be of length 4"
+            raise ValueError(msg)
+        return VanetQuaternion(
+            x=input_quaternion[0], y=input_quaternion[1], z=input_quaternion[2], w=input_quaternion[3]
+        )
+
+    msg = "Quaternion must be of type VAQuaternion, list or tuple"
+    raise ValueError(msg)
+
+
+def convert_quaternion_from_vanet(input_quaternion: VanetQuaternion) -> VAQuaternion:
+    """
+    Converts a VanetQuaternion object to a VAQuaternion object.
+
+    Args:
+        input_quaternion (VanetQuaternion): The input VanetQuaternion object to be converted.
+
+    Returns:
+        VAQuaternion: The converted VAQuaternion object.
+    """
+    return VAQuaternion(x=input_quaternion.x, y=input_quaternion.y, z=input_quaternion.z, w=input_quaternion.w)
diff --git a/src/vasingleton.cpp b/src/vasingleton.cpp
deleted file mode 100644
index b3914c981eac6b20d45260bc27cd0a20320874f7..0000000000000000000000000000000000000000
--- a/src/vasingleton.cpp
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- *  --------------------------------------------------------------------------------------------
- *
- *    VVV        VVV A           Virtual Acoustics (VA) | http://www.virtualacoustics.org
- *     VVV      VVV AAA          Licensed under the Apache License, Version 2.0
- *      VVV    VVV   AAA
- *       VVV  VVV     AAA        Copyright 2015-2023
- *        VVVVVV       AAA       Institute of Technical Acoustics (ITA)
- *         VVVV         AAA      RWTH Aachen University
- *
- *  --------------------------------------------------------------------------------------------
- */
-
-#include "common.hpp"
-#include "vasingletondoc.hpp"
-
-// All Python to VA methods. Also pulls in g_pVAError (Python error trace instance)
-#include "vasingletonmethods.hpp"
-
-// VA methods that will appear in Python if they are added to the following table
-// It's corresponding C++ functions are implemented here: vasingletonmethods.hpp
-
-static struct PyMethodDef va_methods[] = {
-	{ "connect", (PyCFunction)connect, METH_VARARGS | METH_KEYWORDS, connect_doc },
-	{ "disconnect", (PyCFunction)disconnect, METH_NOARGS, no_doc },
-	{ "is_connected", (PyCFunction)is_connected, METH_NOARGS, no_doc },
-	{ "reset", (PyCFunction)reset, METH_NOARGS, no_doc },
-
-	{ "get_version", (PyCFunction)get_version, METH_NOARGS, no_doc },
-	{ "get_modules", (PyCFunction)get_modules, METH_NOARGS, no_doc },
-	{ "call_module", (PyCFunction)call_module, METH_VARARGS | METH_KEYWORDS, no_doc },
-
-	{ "get_search_paths", (PyCFunction)get_search_paths, METH_NOARGS, no_doc },
-	{ "add_search_path", (PyCFunction)add_search_path, METH_VARARGS | METH_KEYWORDS, no_doc },
-
-	{ "create_directivity_from_file", (PyCFunction)create_directivity_from_file, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "delete_directivity", (PyCFunction)delete_directivity, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_directivity_info", (PyCFunction)get_directivity_info, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_directivity_infos", (PyCFunction)get_directivity_infos, METH_NOARGS, no_doc },
-
-	{ "create_signal_source_buffer_from_file", (PyCFunction)create_signal_source_buffer_from_file, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "create_signal_source_prototype_from_parameters", (PyCFunction)create_signal_source_prototype_from_parameters, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "create_signal_source_text_to_speech", (PyCFunction)create_signal_source_text_to_speech, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "create_signal_source_sequencer", (PyCFunction)create_signal_source_sequencer, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "create_signal_source_network_stream", (PyCFunction)create_signal_source_network_stream, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "create_signal_source_engine", (PyCFunction)create_signal_source_engine, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "create_signal_source_machine", (PyCFunction)create_signal_source_machine, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "delete_signal_source", (PyCFunction)delete_signal_source, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_signal_source_info", (PyCFunction)get_signal_source_info, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_signal_source_infos", (PyCFunction)get_signal_source_infos, METH_NOARGS, no_doc },
-	{ "get_signal_source_buffer_playback_state", (PyCFunction)get_signal_source_buffer_playback_state, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_signal_source_buffer_playback_state_str", (PyCFunction)get_signal_source_buffer_playback_state_str, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_signal_source_buffer_playback_action", (PyCFunction)set_signal_source_buffer_playback_action, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_signal_source_buffer_playback_action_str", (PyCFunction)set_signal_source_buffer_playback_action_str, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_signal_source_buffer_playback_position", (PyCFunction)set_signal_source_buffer_playback_position, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_signal_source_buffer_looping", (PyCFunction)get_signal_source_buffer_looping, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_signal_source_buffer_looping", (PyCFunction)set_signal_source_buffer_looping, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_signal_source_machine_start_machine", (PyCFunction)set_signal_source_machine_start_machine, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_signal_source_machine_halt_machine", (PyCFunction)set_signal_source_machine_halt_machine, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_signal_source_machine_state_str", (PyCFunction)get_signal_source_machine_state_str, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_signal_source_machine_speed", (PyCFunction)set_signal_source_machine_speed, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_signal_source_machine_speed", (PyCFunction)get_signal_source_machine_speed, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_signal_source_machine_start_file", (PyCFunction)set_signal_source_machine_start_file, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_signal_source_machine_idle_file", (PyCFunction)set_signal_source_machine_idle_file, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_signal_source_machine_stop_file", (PyCFunction)set_signal_source_machine_stop_file, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_signal_source_parameters", (PyCFunction)set_signal_source_parameters, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_signal_source_parameters", (PyCFunction)get_signal_source_parameters, METH_VARARGS | METH_KEYWORDS, no_doc },
-
-	{ "get_sound_source_ids", (PyCFunction)get_sound_source_ids, METH_NOARGS, no_doc },
-	{ "create_sound_source", (PyCFunction)create_sound_source, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "create_sound_source_explicit_renderer", (PyCFunction)create_sound_source_explicit_renderer, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "delete_sound_source", (PyCFunction)delete_sound_source, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_source_enabled", (PyCFunction)set_sound_source_enabled, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_source_enabled", (PyCFunction)get_sound_source_enabled, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_source_name", (PyCFunction)get_sound_source_name, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_source_enabled", (PyCFunction)set_sound_source_enabled, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_source_signal_source", (PyCFunction)get_sound_source_signal_source, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_source_signal_source", (PyCFunction)set_sound_source_signal_source, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "remove_sound_source_signal_source", (PyCFunction)remove_sound_source_signal_source, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_source_auralization_mode", (PyCFunction)get_sound_source_auralization_mode, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_source_auralization_mode", (PyCFunction)set_sound_source_auralization_mode, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_source_parameters", (PyCFunction)set_sound_source_parameters, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_source_parameters", (PyCFunction)get_sound_source_parameters, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_source_directivity", (PyCFunction)get_sound_source_directivity, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_source_directivity", (PyCFunction)set_sound_source_directivity, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_source_sound_power", (PyCFunction)get_sound_source_sound_power, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_source_sound_power", (PyCFunction)set_sound_source_sound_power, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_source_muted", (PyCFunction)get_sound_source_muted, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_source_muted", (PyCFunction)set_sound_source_muted, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_source_position", (PyCFunction)get_sound_source_position, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_source_position", (PyCFunction)set_sound_source_position, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_source_orientation_vu", (PyCFunction)get_sound_source_orientation_vu, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_source_orientation_vu", (PyCFunction)set_sound_source_orientation_vu, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_source_orientation_q", (PyCFunction)not_implemented, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_source_orientation_q", (PyCFunction)not_implemented, METH_VARARGS | METH_KEYWORDS, no_doc },
-
-	{ "get_sound_receiver_ids", (PyCFunction)get_sound_receiver_ids, METH_NOARGS, no_doc },
-	{ "create_sound_receiver", (PyCFunction)create_sound_receiver, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "create_sound_receiver_explicit_renderer", (PyCFunction)create_sound_receiver_explicit_renderer, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "delete_sound_receiver", (PyCFunction)delete_sound_receiver, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_receiver_enabled", (PyCFunction)set_sound_receiver_enabled, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_receiver_enabled", (PyCFunction)get_sound_receiver_enabled, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_receiver_name", (PyCFunction)get_sound_receiver_name, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_receiver_enabled", (PyCFunction)set_sound_receiver_enabled, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_receiver_auralization_mode", (PyCFunction)get_sound_receiver_auralization_mode, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_receiver_auralization_mode", (PyCFunction)set_sound_receiver_auralization_mode, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_receiver_parameters", (PyCFunction)set_sound_receiver_parameters, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_receiver_parameters", (PyCFunction)get_sound_receiver_parameters, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_receiver_directivity", (PyCFunction)get_sound_receiver_directivity, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_receiver_directivity", (PyCFunction)set_sound_receiver_directivity, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_receiver_muted", (PyCFunction)get_sound_receiver_muted, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_receiver_muted", (PyCFunction)set_sound_receiver_muted, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_receiver_position", (PyCFunction)get_sound_receiver_position, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_receiver_position", (PyCFunction)set_sound_receiver_position, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_receiver_orientation_vu", (PyCFunction)get_sound_receiver_orientation_vu, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_receiver_orientation_vu", (PyCFunction)set_sound_receiver_orientation_vu, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_receiver_orientation_q", (PyCFunction)not_implemented, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_receiver_orientation_q", (PyCFunction)not_implemented, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_receiver_real_world_position", (PyCFunction)get_sound_receiver_real_world_position, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_receiver_real_world_position", (PyCFunction)set_sound_receiver_real_world_position, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_receiver_real_world_orientation_vu", (PyCFunction)get_sound_receiver_real_world_orientation_vu, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_receiver_real_world_orientation_vu", (PyCFunction)set_sound_receiver_real_world_orientation_vu, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_receiver_real_world_orientation_q", (PyCFunction)not_implemented, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_receiver_real_world_orientation_q", (PyCFunction)not_implemented, METH_VARARGS | METH_KEYWORDS, no_doc },
-
-	{ "get_sound_portal_ids", (PyCFunction)get_sound_portal_ids, METH_NOARGS, no_doc },
-	{ "get_sound_portal_name", (PyCFunction)get_sound_portal_name, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_portal_name", (PyCFunction)set_sound_portal_name, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_sound_portal_enabled", (PyCFunction)get_sound_portal_enabled, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_sound_portal_enabled", (PyCFunction)set_sound_portal_enabled, METH_VARARGS | METH_KEYWORDS, no_doc },
-
-	{ "get_homogeneous_medium_sound_speed", (PyCFunction)get_homogeneous_medium_sound_speed, METH_NOARGS, no_doc },
-	{ "set_homogeneous_medium_sound_speed", (PyCFunction)set_homogeneous_medium_sound_speed, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_homogeneous_medium_temperature", (PyCFunction)get_homogeneous_medium_temperature, METH_NOARGS, no_doc },
-	{ "set_homogeneous_medium_temperature", (PyCFunction)set_homogeneous_medium_temperature, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_homogeneous_medium_static_pressure", (PyCFunction)get_homogeneous_medium_static_pressure, METH_NOARGS, no_doc },
-	{ "set_homogeneous_medium_static_pressure", (PyCFunction)set_homogeneous_medium_static_pressure, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_homogeneous_medium_relative_humidity", (PyCFunction)get_homogeneous_medium_relative_humidity, METH_NOARGS, no_doc },
-	{ "set_homogeneous_medium_relative_humidity", (PyCFunction)set_homogeneous_medium_relative_humidity, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_homogeneous_medium_shift_speed", (PyCFunction)get_homogeneous_medium_shift_speed, METH_NOARGS, no_doc },
-	{ "set_homogeneous_medium_shift_speed", (PyCFunction)set_homogeneous_medium_shift_speed, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_homogeneous_medium_parameters", (PyCFunction)get_homogeneous_medium_parameters, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_homogeneous_medium_parameters", (PyCFunction)set_homogeneous_medium_parameters, METH_VARARGS | METH_KEYWORDS, no_doc },
-
-
-	{ "create_acoustic_material", (PyCFunction)create_acoustic_material, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "create_acoustic_material_from_file", (PyCFunction)create_acoustic_material_from_file, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "create_acoustic_material_from_parameters", (PyCFunction)create_acoustic_material_from_parameters, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_acoustic_material_infos", (PyCFunction)get_acoustic_material_infos, METH_NOARGS, no_doc },
-
-
-	{ "get_rendering_modules", (PyCFunction)get_rendering_modules, METH_NOARGS, no_doc },
-	{ "set_rendering_module_muted", (PyCFunction)set_rendering_module_muted, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_rendering_module_muted", (PyCFunction)get_rendering_module_muted, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_rendering_module_gain", (PyCFunction)set_rendering_module_gain, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_rendering_module_gain", (PyCFunction)get_rendering_module_gain, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_rendering_module_auralization_mode", (PyCFunction)get_rendering_module_auralization_mode, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_rendering_module_auralization_mode", (PyCFunction)set_rendering_module_auralization_mode, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_rendering_module_parameters", (PyCFunction)get_rendering_module_parameters, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_rendering_module_parameters", (PyCFunction)set_rendering_module_parameters, METH_VARARGS | METH_KEYWORDS, no_doc },
-
-	{ "get_reproduction_modules", (PyCFunction)get_reproduction_modules, METH_NOARGS, no_doc },
-	{ "set_reproduction_module_muted", (PyCFunction)set_reproduction_module_muted, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_reproduction_module_muted", (PyCFunction)get_reproduction_module_muted, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_reproduction_module_gain", (PyCFunction)set_reproduction_module_gain, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_reproduction_module_gain", (PyCFunction)get_reproduction_module_gain, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_reproduction_module_parameters", (PyCFunction)get_reproduction_module_parameters, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_reproduction_module_parameters", (PyCFunction)set_reproduction_module_parameters, METH_VARARGS | METH_KEYWORDS, no_doc },
-
-
-	{ "lock_update", (PyCFunction)lock_update, METH_NOARGS, no_doc },
-	{ "unlock_update", (PyCFunction)unlock_update, METH_NOARGS, no_doc },
-	{ "get_update_locked", (PyCFunction)get_update_locked, METH_NOARGS, no_doc },
-
-	{ "get_input_gain", (PyCFunction)get_input_gain, METH_NOARGS, no_doc },
-	{ "set_input_gain", (PyCFunction)set_input_gain, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_input_muted", (PyCFunction)get_input_muted, METH_NOARGS, no_doc },
-	{ "set_input_muted", (PyCFunction)set_input_muted, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_output_gain", (PyCFunction)get_output_gain, METH_NOARGS, no_doc },
-	{ "set_output_gain", (PyCFunction)set_output_gain, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_output_muted", (PyCFunction)get_output_muted, METH_NOARGS, no_doc },
-	{ "set_output_muted", (PyCFunction)set_output_muted, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_global_auralization_mode", (PyCFunction)get_global_auralization_mode, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "set_global_auralization_mode", (PyCFunction)set_global_auralization_mode, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_core_clock", (PyCFunction)get_core_clock, METH_NOARGS, no_doc },
-	{ "set_core_clock", (PyCFunction)set_core_clock, METH_VARARGS | METH_KEYWORDS, no_doc },
-
-	{ "substitute_macro", (PyCFunction)substitute_macro, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "find_file_path", (PyCFunction)find_file_path, METH_VARARGS | METH_KEYWORDS, no_doc },
-
-	{ "get_core_configuration", (PyCFunction)get_core_configuration, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_hardware_configuration", (PyCFunction)get_hardware_configuration, METH_NOARGS, no_doc },
-	{ "get_file_list", (PyCFunction)get_file_list, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_log_level_str", (PyCFunction)get_log_level_str, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "parse_auralization_mode_str", (PyCFunction)parse_auralization_mode_str, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_auralization_mode_str", (PyCFunction)get_auralization_mode_str, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_volume_str_decibel", (PyCFunction)get_volume_str_decibel, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "parse_playback_state_str", (PyCFunction)parse_playback_state_str, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_playback_state_str", (PyCFunction)get_playback_state_str, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "parse_playback_action_str", (PyCFunction)parse_playback_action_str, METH_VARARGS | METH_KEYWORDS, no_doc },
-	{ "get_playback_action_str", (PyCFunction)get_playback_action_str, METH_VARARGS | METH_KEYWORDS, no_doc },
-
-	{ NULL, NULL }
-};
-
-static struct PyModuleDef vamoduledef = { PyModuleDef_HEAD_INIT, "va", module_doc, -1, va_methods, NULL, NULL, NULL, NULL };
-
-PyMODINIT_FUNC PyInit_VAPython( void )
-{
-	PyObject* pModule = PyModule_Create( &vamoduledef );
-	g_pVAError        = PyErr_NewException( "va.error", NULL, NULL );
-	Py_INCREF( g_pVAError );
-
-	// PyAdd
-	return pModule;
-}
diff --git a/src/vasingletondoc.hpp b/src/vasingletondoc.hpp
deleted file mode 100644
index f9aafeddbeb00c6ead24bfbbc445f8349962724b..0000000000000000000000000000000000000000
--- a/src/vasingletondoc.hpp
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- *  --------------------------------------------------------------------------------------------
- *
- *    VVV        VVV A           Virtual Acoustics (VA) | http://www.virtualacoustics.org
- *     VVV      VVV AAA          Licensed under the Apache License, Version 2.0
- *      VVV    VVV   AAA
- *       VVV  VVV     AAA        Copyright 2015-2021
- *        VVVVVV       AAA       Institute of Technical Acoustics (ITA)
- *         VVVV         AAA      RWTH Aachen University
- *
- *  --------------------------------------------------------------------------------------------
- */
-#ifndef IW_VA_PYTHON_DOC
-#define IW_VA_PYTHON_DOC
-
-#include "common.hpp"
-
-PyDoc_STRVAR( module_doc,
-              "connect(server, port) - connect to a VA server at given server and listening port\n"
-              "disconnect() - disconnect from VA server." );
-
-PyDoc_STRVAR( no_doc,
-              "For this method no dedicated documentation is available. Please read the C++ API documentation\n"
-              "of this method for further information." );
-
-PyDoc_STRVAR( connect_doc,
-              "Connect($module, /, server, port)\n"
-              "--\n"
-              "\n"
-              "Connect to a VA server.\n"
-              "\n"
-              "  server\n"
-              "    Remote server IP.\n"
-              "  port\n"
-              "    TCP/IP listening port, usually 12340." );
-
-#endif
\ No newline at end of file
diff --git a/src/vasingletonmethods.hpp b/src/vasingletonmethods.hpp
deleted file mode 100644
index e681ce3b380c7fc658b4931681592759d0197699..0000000000000000000000000000000000000000
--- a/src/vasingletonmethods.hpp
+++ /dev/null
@@ -1,2772 +0,0 @@
-/*
- *  --------------------------------------------------------------------------------------------
- *
- *    VVV        VVV A           Virtual Acoustics (VA) | http://www.virtualacoustics.org
- *     VVV      VVV AAA          Licensed under the Apache License, Version 2.0
- *      VVV    VVV   AAA
- *       VVV  VVV     AAA        Copyright 2015-2021
- *        VVVVVV       AAA       Institute of Technical Acoustics (ITA)
- *         VVVV         AAA      RWTH Aachen University
- *
- *  --------------------------------------------------------------------------------------------
- */
-#ifndef IW_VA_PYTHON_METHODS
-#define IW_VA_PYTHON_METHODS
-
-#include "common.hpp"
-
-#include <VA.h>
-#include <VANet.h>
-#include <string>
-
-// If you want to extend the va Python pSelf interface, also add
-// the function to the va_methods table in vasingleton.cpp - otherwise they will not show up.
-// Documentation goes into vasingletondoc.hpp
-
-static std::unique_ptr<IVANetClient> g_pVANetClient = nullptr; //!< Static pointer to VANetClient instance
-// static IVANetClient* g_pVANetClient = nullptr; //!< Static pointer to VANetClient instance
-static PyObject* g_pVAError = nullptr; //!< Static pointer to error instance
-
-// Ugly definitions to ease try-catching VA exceptions
-#define VAPY_REQUIRE_CONN_TRY \
-	try                       \
-	{                         \
-		RequireCoreAvailable( );
-#define VAPY_CATCH_RETURN                                                \
-	}                                                                    \
-	catch( const CVAException& oError )                                  \
-	{                                                                    \
-		PyErr_SetString( PyExc_Exception, oError.ToString( ).c_str( ) ); \
-		return NULL;                                                     \
-	}
-
-//! Helper for API dev
-static PyObject* not_implemented( PyObject*, PyObject* )
-{
-	VA_EXCEPT_NOT_IMPLEMENTED;
-};
-
-//! Raises an exception if core is not available
-static void RequireCoreAvailable( )
-{
-	if( !g_pVANetClient )
-		VA_EXCEPT2( NETWORK_ERROR, "VA client not available, please connect first" );
-
-	if( !g_pVANetClient->GetCoreInstance( ) )
-		VA_EXCEPT2( NETWORK_ERROR, "VA client available, but access to VA interface failed. Please reconnect." );
-};
-
-std::string SaveStringToUnicodeConversion( const std::string& sInputString )
-{
-	std::string sOutputString = sInputString;
-	const Py_ssize_t iLength  = sInputString.length( );
-	char* pcBuffer( &sOutputString[0] );
-	for( Py_ssize_t i = 0; i < iLength; i++ )
-		if( pcBuffer[i] < 0 )
-			pcBuffer[i] = '_';
-	return &sOutputString[0];
-};
-
-PyObject* ConvertFloatVectorToPythonList( const std::vector<float> vfValues )
-{
-	PyObject* pList = PyList_New( vfValues.size( ) );
-
-	for( Py_ssize_t i = 0; i < PyList_Size( pList ); i++ )
-		PyList_SetItem( pList, i, PyLong_FromDouble( vfValues[i] ) );
-
-	return pList;
-};
-
-PyObject* ConvertDoubleVectorToPythonList( const std::vector<double> vdValues )
-{
-	PyObject* pList = PyList_New( vdValues.size( ) );
-
-	for( Py_ssize_t i = 0; i < PyList_Size( pList ); i++ )
-		PyList_SetItem( pList, i, PyLong_FromDouble( vdValues[i] ) );
-
-	return pList;
-};
-
-PyObject* ConvertIntVectorToPythonList( const std::vector<int> viValues )
-{
-	PyObject* pList = PyList_New( viValues.size( ) );
-
-	for( Py_ssize_t i = 0; i < PyList_Size( pList ); i++ )
-		PyList_SetItem( pList, i, PyLong_FromLong( viValues[i] ) );
-
-	return pList;
-};
-
-//! Helper to convert recursively from VAStruct to Python dict
-PyObject* ConvertVAStructToPythonDict( const CVAStruct& oInStruct )
-{
-	PyObject* pOutDict = PyDict_New( );
-
-	CVAStruct::const_iterator cit = oInStruct.Begin( );
-	while( cit != oInStruct.End( ) )
-	{
-		const std::string sKey( ( *cit++ ).first );
-		const CVAStructValue& oValue( oInStruct[sKey] );
-
-		PyObject* pNewValue = nullptr;
-		if( oValue.IsBool( ) )
-		{
-			pNewValue = PyBool_FromLong( bool( oValue ) );
-		}
-		else if( oValue.IsInt( ) )
-		{
-			pNewValue = PyLong_FromLong( int( oValue ) );
-		}
-		else if( oValue.IsDouble( ) )
-		{
-			pNewValue = PyFloat_FromDouble( double( oValue ) );
-		}
-		else if( oValue.IsString( ) )
-		{
-			pNewValue = PyUnicode_FromString( SaveStringToUnicodeConversion( std::string( oValue ) ).c_str( ) );
-		}
-		else if( oValue.IsStruct( ) )
-		{
-			pNewValue = ConvertVAStructToPythonDict( oValue );
-		}
-		else if( oValue.IsData( ) )
-		{
-			pNewValue = PyByteArray_FromStringAndSize( (char*)oValue.GetData( ), oValue.GetDataSize( ) );
-			Py_INCREF( pNewValue );
-		}
-		else if( oValue.IsSampleBuffer( ) )
-		{
-			const CVASampleBuffer& oSampleBuffer( oValue );
-			pNewValue = PyList_New( oSampleBuffer.GetNumSamples( ) );
-			Py_INCREF( pNewValue );
-			for( int i = 0; i < oSampleBuffer.GetNumSamples( ); i++ )
-				PyList_SetItem( pNewValue, i, PyFloat_FromDouble( oSampleBuffer.GetDataReadOnly( )[i] ) );
-		}
-		else
-		{
-			VA_EXCEPT2( INVALID_PARAMETER, "Could not interpret value of key '" + sKey + "' as a supported python dict type. Value was" + oValue.ToString( ) );
-		}
-
-		if( !pNewValue )
-			VA_EXCEPT2( INVALID_PARAMETER, "Could not create python object from value of key '" + sKey + "'. Value was" + oValue.ToString( ) );
-
-		if( PyDict_SetItemString( pOutDict, sKey.c_str( ), pNewValue ) == -1 )
-			VA_EXCEPT2( INVALID_PARAMETER, "Could not create python object from value of key '" + sKey + "'. Value was" + oValue.ToString( ) );
-	}
-
-	return pOutDict;
-};
-
-//! Helper to convert recursively from Python dict to VAStruct
-CVAStruct ConvertPythonDictToVAStruct( PyObject* pInDict )
-{
-	CVAStruct oReturn;
-
-	if( pInDict == nullptr )
-		return oReturn;
-
-	PyObject* pKeyList   = PyDict_Keys( pInDict );
-	PyObject* pValueList = PyDict_Values( pInDict );
-
-	for( Py_ssize_t i = 0; i < PyList_Size( pKeyList ); i++ )
-	{
-		PyObject* pKey   = PyList_GetItem( pKeyList, i );
-		PyObject* pValue = PyList_GetItem( pValueList, i );
-		char* pcKeyName  = nullptr;
-		if( !PyArg_Parse( pKey, "s", &pcKeyName ) )
-			VA_EXCEPT2( INVALID_PARAMETER, "Invalid key '" + std::string( pcKeyName ) + "'" );
-
-		if( Py_None == pValue )
-		{
-			oReturn[pcKeyName] = false;
-		}
-		else if( PyBool_Check( pValue ) )
-		{
-			oReturn[pcKeyName] = ( PyLong_AsLong( pValue ) != 0 );
-		}
-		else if( PyLong_Check( pValue ) )
-		{
-			oReturn[pcKeyName] = PyLong_AsLong( pValue );
-		}
-		else if( PyFloat_Check( pValue ) )
-		{
-			oReturn[pcKeyName] = PyFloat_AsDouble( pValue );
-		}
-		else if( PyUnicode_Check( pValue ) )
-		{
-			char* pcStringValue = nullptr;
-			if( !PyArg_Parse( pValue, "s", &pcStringValue ) )
-				VA_EXCEPT2( INVALID_PARAMETER, "Invalid string value at key '" + std::string( pcKeyName ) + "': " + std::string( pcStringValue ) );
-			oReturn[pcKeyName] = std::string( pcStringValue );
-		}
-		else if( PyDict_Check( pValue ) )
-		{
-			oReturn[pcKeyName] = ConvertPythonDictToVAStruct( pValue );
-		}
-		else if( PyList_Check( pValue ) )
-		{
-			// Sample buffer
-			CVASampleBuffer oBuffer( int( PyList_Size( pValue ) ) );
-			for( int n = 0; n < oBuffer.GetNumSamples( ); n++ )
-			{
-				PyObject* pSample = PyList_GetItem( pValue, n );
-				if( !PyFloat_Check( pSample ) )
-				{
-					VA_EXCEPT2( INVALID_PARAMETER, "Samples must be floating point values" );
-				}
-				else
-				{
-					oBuffer.vfSamples[n] = float( PyFloat_AsDouble( pSample ) );
-				}
-			}
-			oReturn[pcKeyName] = oBuffer;
-		}
-		else if( PyBytes_Check( pValue ) )
-		{
-			// Data blob
-			size_t iBytes      = PyBytes_Size( pValue );
-			char* pcData       = PyBytes_AsString( pValue );
-			oReturn[pcKeyName] = CVAStructValue( pcData, int( iBytes ) );
-		}
-		else
-		{
-			VA_EXCEPT2( INVALID_PARAMETER, "Could not interpret value of key '" + std::string( pcKeyName ) + "' as a supported VAStruct type." )
-		}
-	}
-
-	return oReturn;
-};
-
-CVAAcousticMaterial ConvertPythonDictToAcousticMaterial( PyObject* pMaterial )
-{
-	CVAAcousticMaterial oMaterial;
-	VA_EXCEPT_NOT_IMPLEMENTED;
-	return oMaterial;
-}
-
-
-// ------------------------------- Python module extension methods
-
-static PyObject* connect( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	if( !g_pVANetClient )
-		g_pVANetClient = IVANetClient::Create( );
-
-	if( g_pVANetClient->IsConnected( ) )
-	{
-		PyErr_WarnEx( NULL, "Was still connected, forced disconnect.", 1 );
-		g_pVANetClient->Disconnect( );
-	}
-
-	static char* pKeyWordList[] = { "name", "port", NULL };
-	const char* pcFormat        = "|si:connect";
-	char* pcServerIP            = nullptr;
-	int iServerPort             = 12340;
-
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcServerIP, &iServerPort ) )
-		return NULL;
-
-	std::string sServerIP = pcServerIP ? std::string( pcServerIP ) : "localhost";
-
-	if( g_pVANetClient->Initialize( sServerIP, iServerPort ) )
-		return PyBool_FromLong( 1 );
-
-	PyErr_SetString( PyExc_ConnectionError, std::string( "Could not connect to " + sServerIP + " on " + std::to_string( (long)iServerPort ) ).c_str( ) );
-	return NULL;
-};
-
-static PyObject* disconnect( PyObject*, PyObject* )
-{
-	if( !g_pVANetClient )
-		return PyBool_FromLong( 0 );
-
-	return PyBool_FromLong( g_pVANetClient->Disconnect( ) );
-};
-
-static PyObject* is_connected( PyObject*, PyObject* )
-{
-	if( !g_pVANetClient )
-		return PyBool_FromLong( 0 );
-	else
-		return PyBool_FromLong( g_pVANetClient->IsConnected( ) );
-};
-
-static PyObject* reset( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	g_pVANetClient->GetCoreInstance( )->Reset( );
-	Py_RETURN_NONE;
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_version( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	CVAVersionInfo oInfo;
-	g_pVANetClient->GetCoreInstance( )->GetVersionInfo( &oInfo );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( oInfo.ToString( ) ).c_str( ) );
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_modules( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	std::vector<CVAModuleInfo> voModuleInfos;
-	g_pVANetClient->GetCoreInstance( )->GetModules( voModuleInfos );
-
-	PyObject* pModuleList = PyList_New( voModuleInfos.size( ) );
-
-	for( size_t i = 0; i < voModuleInfos.size( ); i++ )
-	{
-		CVAModuleInfo& oModule( voModuleInfos[i] );
-		PyObject* pModuleInfo = Py_BuildValue( "{s:i,s:s,s:s}", "index", i, "name", SaveStringToUnicodeConversion( oModule.sName ).c_str( ), "description",
-		                                       SaveStringToUnicodeConversion( oModule.sDesc ).c_str( ) );
-		PyList_SetItem( pModuleList, i, pModuleInfo ); // steals reference
-	}
-
-	return pModuleList;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* call_module( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "module_name", "arguments_dict", NULL };
-	const char* pcFormat        = "sO:call_module";
-	char* pcModuleName          = nullptr;
-	PyObject* pArgumentsDict    = nullptr;
-
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcModuleName, &pArgumentsDict ) )
-		return NULL;
-
-	std::string sModuleName = pcModuleName ? std::string( pcModuleName ) : "";
-	CVAStruct oInArgs       = ConvertPythonDictToVAStruct( pArgumentsDict );
-	CVAStruct oOutArgs      = g_pVANetClient->GetCoreInstance( )->CallModule( sModuleName, oInArgs );
-
-	return ConvertVAStructToPythonDict( oOutArgs );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* add_search_path( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "directory_path", NULL };
-	const char* pcFormat        = "s:add_search_path";
-	char* pcPath                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcPath ) )
-		return NULL;
-
-	return PyBool_FromLong( g_pVANetClient->GetCoreInstance( )->AddSearchPath( std::string( pcPath ) ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_search_paths( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	CVAStruct oPaths = g_pVANetClient->GetCoreInstance( )->GetSearchPaths( );
-	return ConvertVAStructToPythonDict( oPaths );
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_update_locked( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	return PyLong_FromLong( g_pVANetClient->GetCoreInstance( )->GetUpdateLocked( ) );
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* lock_update( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	g_pVANetClient->GetCoreInstance( )->LockUpdate( );
-	Py_RETURN_NONE;
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* unlock_update( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	return PyLong_FromLong( g_pVANetClient->GetCoreInstance( )->UnlockUpdate( ) );
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* create_directivity_from_file( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "path", "name", NULL };
-	const char* pcFormat        = "s|s:create_directivity_from_file";
-	char* pcPath                = nullptr;
-	char* pcName                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcPath, &pcName ) )
-		return NULL;
-
-	std::string sName = pcName ? std::string( pcName ) : "";
-	return PyLong_FromLong( g_pVANetClient->GetCoreInstance( )->CreateDirectivityFromFile( std::string( pcPath ), sName ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* delete_directivity( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:delete_directivity";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-	return PyBool_FromLong( g_pVANetClient->GetCoreInstance( )->DeleteDirectivity( iID ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_directivity_info( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_directivity_info";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	CVADirectivityInfo oInfo = g_pVANetClient->GetCoreInstance( )->GetDirectivityInfo( iID );
-
-	PyObject* pInfo = Py_BuildValue( "{s:i,s:s,s:i,s:i,s:s}", "id", oInfo.iID, "name", SaveStringToUnicodeConversion( oInfo.sName ).c_str( ), "class", oInfo.iClass,
-	                                 "references", oInfo.iReferences, "description", SaveStringToUnicodeConversion( oInfo.sDesc ).c_str( ) );
-
-	return pInfo;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_directivity_infos( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	std::vector<CVADirectivityInfo> voInfos;
-	g_pVANetClient->GetCoreInstance( )->GetDirectivityInfos( voInfos );
-
-	PyObject* pInfoList = PyList_New( voInfos.size( ) );
-
-	for( size_t i = 0; i < voInfos.size( ); i++ )
-	{
-		CVADirectivityInfo& oInfo( voInfos[i] );
-		PyObject* pInfo = Py_BuildValue( "{s:i,s:s,s:i,s:i,s:s}", "id", oInfo.iID, "name", SaveStringToUnicodeConversion( oInfo.sName ).c_str( ), "class", oInfo.iClass,
-		                                 "references", oInfo.iReferences, "description", SaveStringToUnicodeConversion( oInfo.sDesc ).c_str( ) );
-		PyList_SetItem( pInfoList, i, pInfo ); // steals reference
-	}
-
-	return pInfoList;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_source_ids( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	std::vector<int> viIDs;
-	g_pVANetClient->GetCoreInstance( )->GetSoundSourceIDs( viIDs );
-	return ConvertIntVectorToPythonList( viIDs );
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* create_sound_source( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "name", NULL };
-	const char* pcFormat        = "s:create_sound_source";
-	char* pcName                = nullptr;
-
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcName ) )
-		return NULL;
-
-	std::string sName = pcName ? std::string( pcName ) : "PySoundSource";
-	return PyLong_FromLong( g_pVANetClient->GetCoreInstance( )->CreateSoundSource( sName ) );
-	VAPY_CATCH_RETURN;
-};
-
-
-static PyObject* create_sound_source_explicit_renderer( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "renderer", "name", NULL };
-	const char* pcFormat        = "ss:create_sound_source_explicit_renderer";
-	char* pcRenderer            = nullptr;
-	char* pcName                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcRenderer, &pcName ) )
-		return NULL;
-
-	std::string sRenderer = pcRenderer ? std::string( pcRenderer ) : "Unspecified";
-	std::string sName     = pcName ? std::string( pcName ) : "PySoundSource_" + sRenderer;
-	return PyLong_FromLong( g_pVANetClient->GetCoreInstance( )->CreateSoundSourceExplicitRenderer( sRenderer, sName ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* delete_sound_source( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:delete_sound_source";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	return PyLong_FromLong( g_pVANetClient->GetCoreInstance( )->DeleteSoundSource( iID ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_source_enabled( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "enabled", NULL };
-	const char* pcFormat        = "i|b:set_sound_source_enabled";
-	long iID                    = -1;
-	bool bEnabled               = true;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &bEnabled ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetSoundSourceEnabled( iID, bEnabled );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_source_enabled( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_source_enabled";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	return PyBool_FromLong( g_pVANetClient->GetCoreInstance( )->GetSoundSourceEnabled( iID ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_source_name( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_source_name";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( g_pVANetClient->GetCoreInstance( )->GetSoundSourceName( iID ) ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_source_signal_source( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_source_signal_source";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( g_pVANetClient->GetCoreInstance( )->GetSoundSourceSignalSource( iID ) ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_source_signal_source( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "signalsource", NULL };
-	const char* pcFormat        = "is:set_sound_source_signal_source";
-	long iID                    = -1;
-	char* pcSignalSourceID;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &pcSignalSourceID ) )
-		return NULL;
-
-	std::string sSIgnalSourceID = pcSignalSourceID ? std::string( pcSignalSourceID ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetSoundSourceSignalSource( iID, sSIgnalSourceID );
-
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* remove_sound_source_signal_source( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:remove_sound_source_signal_source";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->RemoveSoundSourceSignalSource( iID );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_source_auralization_mode( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "short_mode", NULL };
-	const char* pcFormat        = "i|b:get_sound_source_auralization_mode";
-	long iID                    = -1;
-	bool bShortMode             = true;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	const int iAM         = g_pVANetClient->GetCoreInstance( )->GetSoundSourceAuralizationMode( iID );
-	const std::string sAM = SaveStringToUnicodeConversion( IVAInterface::GetAuralizationModeStr( iAM, bShortMode ) );
-
-	return PyUnicode_FromString( sAM.c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_source_auralization_mode( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "auralizationmode", NULL };
-	const char* pcFormat        = "is:set_sound_source_auralization_mode";
-	long iID                    = -1;
-	char* pcAM                  = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &pcAM ) )
-		return NULL;
-
-	std::string sAM      = pcAM ? std::string( pcAM ) : "";
-	const int iCurrentAM = g_pVANetClient->GetCoreInstance( )->GetSoundSourceAuralizationMode( iID );
-	const int iAM        = IVAInterface::ParseAuralizationModeStr( sAM, iCurrentAM );
-	g_pVANetClient->GetCoreInstance( )->SetSoundSourceAuralizationMode( iID, iAM );
-
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_source_parameters( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "parameters", NULL };
-	const char* pcFormat        = "iO:set_sound_source_parameters";
-	long iID                    = -1;
-	PyObject* pParameters       = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &pParameters ) )
-		return NULL;
-
-	CVAStruct oParameters = ConvertPythonDictToVAStruct( pParameters );
-	g_pVANetClient->GetCoreInstance( )->SetSoundSourceParameters( iID, oParameters );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-
-static PyObject* get_sound_source_parameters( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "iO:get_sound_source_parameters";
-	long iID                    = -1;
-	PyObject* pParameters       = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &pParameters ) )
-		return NULL;
-
-	CVAStruct oParameters = ConvertPythonDictToVAStruct( pParameters );
-	CVAStruct oReturn     = g_pVANetClient->GetCoreInstance( )->GetSoundSourceParameters( iID, oParameters );
-	return ConvertVAStructToPythonDict( oReturn );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_source_directivity( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_source_directivity";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	return PyLong_FromLong( g_pVANetClient->GetCoreInstance( )->GetSoundSourceDirectivity( iID ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_source_directivity( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "directivity", NULL };
-	const char* pcFormat        = "ii:set_sound_source_directivity";
-	long iID                    = -1;
-	long iDirectivityID         = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &iDirectivityID ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetSoundSourceDirectivity( iID, iDirectivityID );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_source_sound_power( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_source_sound_power";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	return PyFloat_FromDouble( g_pVANetClient->GetCoreInstance( )->GetSoundSourceSoundPower( iID ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_source_sound_power( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "gain", NULL };
-	const char* pcFormat        = "id:set_sound_source_sound_power";
-	long iID                    = -1;
-	double dPower               = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &dPower ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetSoundSourceSoundPower( iID, dPower );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_source_muted( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "muted", NULL };
-	const char* pcFormat        = "i|b:set_sound_source_muted";
-	long iID                    = -1;
-	bool bMuted                 = true;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &bMuted ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetSoundSourceMuted( iID, bMuted );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_source_muted( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_source_muted";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	return PyBool_FromLong( g_pVANetClient->GetCoreInstance( )->GetSoundSourceMuted( iID ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_source_position( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_source_position";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	VAVec3 v3Pos = g_pVANetClient->GetCoreInstance( )->GetSoundSourcePosition( iID );
-
-	PyObject* pPosList = PyList_New( 3 );
-	PyList_SetItem( pPosList, 0, PyFloat_FromDouble( v3Pos.x ) );
-	PyList_SetItem( pPosList, 1, PyFloat_FromDouble( v3Pos.y ) );
-	PyList_SetItem( pPosList, 2, PyFloat_FromDouble( v3Pos.z ) );
-
-	return pPosList;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_source_position( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "pos", NULL };
-	const char* pcFormat        = "i(ddd):set_sound_source_position";
-	long iID                    = -1;
-	VAVec3 v3Pos;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &v3Pos.x, &v3Pos.y, &v3Pos.z ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetSoundSourcePosition( iID, v3Pos );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-
-static PyObject* get_sound_source_orientation_vu( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_source_orientation_vu";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	VAVec3 v3View, v3Up;
-	g_pVANetClient->GetCoreInstance( )->GetSoundSourceOrientationVU( iID, v3View, v3Up );
-
-	return Py_BuildValue( "(ddd)(ddd)", v3View.x, v3View.y, v3View.z, v3Up.x, v3Up.y, v3Up.z );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_source_orientation_vu( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "view", "up", NULL };
-	const char* pcFormat        = "i(ddd)(ddd):set_sound_source_orientation_vu";
-	long iID                    = -1;
-	VAVec3 v3View, v3Up;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &v3View.x, &v3View.y, &v3View.z, &v3Up.x, &v3Up.y, &v3Up.z ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetSoundSourceOrientationVU( iID, v3View, v3Up );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_receiver_ids( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	std::vector<int> viIDs;
-	g_pVANetClient->GetCoreInstance( )->GetSoundReceiverIDs( viIDs );
-
-	PyObject* pIDList = PyList_New( viIDs.size( ) );
-	for( Py_ssize_t i = 0; i < PyList_Size( pIDList ); i++ )
-		PyList_SetItem( pIDList, i, PyLong_FromLong( viIDs[i] ) );
-
-	return pIDList;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* create_sound_receiver( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "name", NULL };
-	const char* pcFormat        = "s:create_sound_receiver";
-	char* pcName                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcName ) )
-		return NULL;
-
-	std::string sName = pcName ? std::string( pcName ) : "PySoundReceiver";
-	return PyLong_FromLong( g_pVANetClient->GetCoreInstance( )->CreateSoundReceiver( sName ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* create_sound_receiver_explicit_renderer( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "renderer", "name", NULL };
-	const char* pcFormat        = "ss:create_sound_receiver_explicit_renderer";
-	char* pcRenderer            = nullptr;
-	char* pcName                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcRenderer, &pcName ) )
-		return NULL;
-
-	std::string sRenderer = pcRenderer ? std::string( pcRenderer ) : "Unspecified";
-	std::string sName     = pcName ? std::string( pcName ) : "PySoundReceiver_" + sRenderer;
-	return PyLong_FromLong( g_pVANetClient->GetCoreInstance( )->CreateSoundReceiverExplicitRenderer( sRenderer, sName ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* delete_sound_receiver( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:delete_sound_receiver";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	return PyLong_FromLong( g_pVANetClient->GetCoreInstance( )->DeleteSoundReceiver( iID ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_receiver_enabled( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "enabled", NULL };
-	const char* pcFormat        = "i|b:set_sound_receiver_enabled";
-	long iID                    = -1;
-	bool bEnabled               = true;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &bEnabled ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetSoundReceiverEnabled( iID, bEnabled );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_receiver_enabled( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_receiver_enabled";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	return PyBool_FromLong( g_pVANetClient->GetCoreInstance( )->GetSoundReceiverEnabled( iID ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_receiver_name( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_receiver_name";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( g_pVANetClient->GetCoreInstance( )->GetSoundReceiverName( iID ) ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_receiver_auralization_mode( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "short_mode", NULL };
-	const char* pcFormat        = "i|b:get_sound_receiver_auralization_mode";
-	long iID                    = -1;
-	bool bShortMode             = true;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &bShortMode ) )
-		return NULL;
-
-
-	const int iAM         = g_pVANetClient->GetCoreInstance( )->GetSoundReceiverAuralizationMode( iID );
-	const std::string sAM = SaveStringToUnicodeConversion( IVAInterface::GetAuralizationModeStr( iAM, bShortMode ) );
-	return PyUnicode_FromString( sAM.c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_receiver_auralization_mode( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "auralization_mode", NULL };
-	const char* pcFormat        = "is:set_sound_receiver_auralization_mode";
-	long iID                    = -1;
-	char* pcAM                  = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &pcAM ) )
-		return NULL;
-
-	const std::string sAM = pcAM ? std::string( pcAM ) : "";
-	const int iCurrentAM  = g_pVANetClient->GetCoreInstance( )->GetSoundReceiverAuralizationMode( iID );
-	const int iAM         = IVAInterface::ParseAuralizationModeStr( sAM, iCurrentAM );
-	g_pVANetClient->GetCoreInstance( )->SetSoundReceiverAuralizationMode( iID, iAM );
-
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_receiver_parameters( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "parameters", NULL };
-	const char* pcFormat        = "iO:set_sound_receiver_parameters";
-	long iID                    = -1;
-	PyObject* pParameters       = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &pParameters ) )
-		return NULL;
-
-	CVAStruct oParameters = ConvertPythonDictToVAStruct( pParameters );
-	g_pVANetClient->GetCoreInstance( )->SetSoundReceiverParameters( iID, oParameters );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-
-static PyObject* get_sound_receiver_parameters( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "iO:get_sound_receiver_parameters";
-	long iID                    = -1;
-	PyObject* pParameters       = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &pParameters ) )
-		return NULL;
-
-	CVAStruct oParameters = ConvertPythonDictToVAStruct( pParameters );
-	CVAStruct oReturn     = g_pVANetClient->GetCoreInstance( )->GetSoundReceiverParameters( iID, oParameters );
-	return ConvertVAStructToPythonDict( oReturn );
-
-	VAPY_CATCH_RETURN;
-};
-
-
-static PyObject* get_sound_receiver_directivity( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_receiver_directivity";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	return PyLong_FromLong( g_pVANetClient->GetCoreInstance( )->GetSoundReceiverDirectivity( iID ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_receiver_directivity( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "directivity", NULL };
-	const char* pcFormat        = "ii:set_sound_receiver_directivity";
-	long iID                    = -1;
-	long iDirectivityID         = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &iDirectivityID ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetSoundReceiverDirectivity( iID, iDirectivityID );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_receiver_muted( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "muted", NULL };
-	const char* pcFormat        = "i|b:set_sound_receiver_muted";
-	long iID                    = -1;
-	bool bMuted                 = true;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &bMuted ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetSoundReceiverMuted( iID, bMuted );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_receiver_muted( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_receiver_muted";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	return PyBool_FromLong( g_pVANetClient->GetCoreInstance( )->GetSoundReceiverMuted( iID ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-
-static PyObject* get_sound_receiver_position( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_receiver_position";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	VAVec3 v3Pos = g_pVANetClient->GetCoreInstance( )->GetSoundReceiverPosition( iID );
-
-	PyObject* pPosList = PyList_New( 3 );
-	PyList_SetItem( pPosList, 0, PyFloat_FromDouble( v3Pos.x ) );
-	PyList_SetItem( pPosList, 1, PyFloat_FromDouble( v3Pos.y ) );
-	PyList_SetItem( pPosList, 2, PyFloat_FromDouble( v3Pos.z ) );
-
-	return pPosList;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_receiver_position( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "pos", NULL };
-	const char* pcFormat        = "i(ddd):set_sound_receiver_position";
-	long iID                    = -1;
-	VAVec3 v3Pos;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &v3Pos.x, &v3Pos.y, &v3Pos.z ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetSoundReceiverPosition( iID, v3Pos );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_receiver_orientation_vu( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_receiver_orientation_vu";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	VAVec3 v3View, v3Up;
-	g_pVANetClient->GetCoreInstance( )->GetSoundReceiverOrientationVU( iID, v3View, v3Up );
-
-	return Py_BuildValue( "(ddd)(ddd)", v3View.x, v3View.y, v3View.z, v3Up.x, v3Up.y, v3Up.z );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_receiver_orientation_vu( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "view", "up", NULL };
-	const char* pcFormat        = "i(ddd)(ddd):set_sound_receiver_orientation_vu";
-	long iID                    = -1;
-	VAVec3 v3View, v3Up;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &v3View.x, &v3View.y, &v3View.z, &v3Up.x, &v3Up.y, &v3Up.z ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetSoundReceiverOrientationVU( iID, v3View, v3Up );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_receiver_real_world_position( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_receiver_real_world_position";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	VAVec3 v3Pos, vView, vUp;
-	g_pVANetClient->GetCoreInstance( )->GetSoundReceiverRealWorldPositionOrientationVU( iID, v3Pos, vView, vUp );
-
-	PyObject* pPosList = PyList_New( 3 );
-	PyList_SetItem( pPosList, 0, PyFloat_FromDouble( v3Pos.x ) );
-	PyList_SetItem( pPosList, 1, PyFloat_FromDouble( v3Pos.y ) );
-	PyList_SetItem( pPosList, 2, PyFloat_FromDouble( v3Pos.z ) );
-
-	return pPosList;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_receiver_real_world_position( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "pos", NULL };
-	const char* pcFormat        = "i(ddd):set_sound_receiver_real_world_position";
-	long iID                    = -1;
-	VAVec3 v3Pos;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &v3Pos.x, &v3Pos.y, &v3Pos.z ) )
-		return NULL;
-
-	VAVec3 v3PosDummy, v3View, v3Up;
-	g_pVANetClient->GetCoreInstance( )->GetSoundReceiverRealWorldPositionOrientationVU( iID, v3PosDummy, v3View, v3Up );
-	g_pVANetClient->GetCoreInstance( )->SetSoundReceiverRealWorldPositionOrientationVU( iID, v3Pos, v3View, v3Up );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_receiver_real_world_orientation_vu( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_receiver_real_world_orientation_vu";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	VAVec3 v3Pos, v3View, v3Up;
-	g_pVANetClient->GetCoreInstance( )->GetSoundReceiverRealWorldPositionOrientationVU( iID, v3Pos, v3View, v3Up );
-
-	return Py_BuildValue( "(ddd)(ddd)", v3View.x, v3View.y, v3View.z, v3Up.x, v3Up.y, v3Up.z );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_receiver_real_world_orientation_vu( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "view", "up", NULL };
-	const char* pcFormat        = "i(ddd)(ddd):set_sound_receiver_real_world_orientation_vu";
-	long iID                    = -1;
-	VAVec3 v3View, v3Up;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &v3View.x, &v3View.y, &v3View.z, &v3Up.x, &v3Up.y, &v3Up.z ) )
-		return NULL;
-
-	VAVec3 v3Pos, v3View_dummy, v3Up_dummy;
-	g_pVANetClient->GetCoreInstance( )->GetSoundReceiverRealWorldPositionOrientationVU( iID, v3Pos, v3View_dummy, v3Up_dummy );
-	g_pVANetClient->GetCoreInstance( )->SetSoundReceiverRealWorldPositionOrientationVU( iID, v3Pos, v3View, v3Up );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* create_signal_source_buffer_from_file( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "path", "name", NULL };
-	const char* pcFormat        = "s|s:create_signal_source_buffer_from_file";
-	char* pcPath                = nullptr;
-	char* pcName                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcPath, &pcName ) )
-		return NULL;
-
-	std::string sName         = pcName ? std::string( pcName ) : "";
-	std::string sSignalSource = g_pVANetClient->GetCoreInstance( )->CreateSignalSourceBufferFromFile( std::string( pcPath ), sName );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sSignalSource ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* create_signal_source_prototype_from_parameters( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "parameters", "name", NULL };
-	const char* pcFormat        = "O|s:create_signal_source_prototype_from_parameters";
-	PyObject* pParameters       = nullptr;
-	char* pcName                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pParameters, &pcName ) )
-		return NULL;
-
-	CVAStruct oParameters = ConvertPythonDictToVAStruct( pParameters );
-	std::string sName     = pcName ? std::string( pcName ) : "";
-	auto sID              = g_pVANetClient->GetCoreInstance( )->CreateSignalSourcePrototypeFromParameters( oParameters, sName );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sID ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* create_signal_source_text_to_speech( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "name", NULL };
-	const char* pcFormat        = "|s:create_signal_source_text_to_speech";
-	char* pcName                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcName ) )
-		return NULL;
-
-	std::string sName         = pcName ? std::string( pcName ) : "";
-	std::string sSignalSource = g_pVANetClient->GetCoreInstance( )->CreateSignalSourceTextToSpeech( sName );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sSignalSource ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* create_signal_source_sequencer( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "name", NULL };
-	const char* pcFormat        = "|s:create_signal_source_sequencer";
-	char* pcName                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcName ) )
-		return NULL;
-
-	std::string sName         = pcName ? std::string( pcName ) : "";
-	std::string sSignalSource = g_pVANetClient->GetCoreInstance( )->CreateSignalSourceSequencer( sName );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sSignalSource ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* create_signal_source_network_stream( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "interface", "port", "name", NULL };
-	const char* pcFormat        = "si|s:create_signal_source_network_stream";
-	char* pcInterface           = nullptr;
-	int iPort                   = -1;
-	char* pcName                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcInterface, &iPort, &pcName ) )
-		return NULL;
-
-	std::string sName         = pcName ? std::string( pcName ) : "";
-	std::string sInterface    = pcInterface ? std::string( pcInterface ) : "";
-	std::string sSignalSource = g_pVANetClient->GetCoreInstance( )->CreateSignalSourceNetworkStream( sInterface, iPort, sName );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sSignalSource ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* create_signal_source_engine( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "name", NULL };
-	const char* pcFormat        = "O|s:create_signal_source_engine";
-	PyObject* pParameters       = nullptr;
-	char* pcName                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pParameters, &pcName ) )
-		return NULL;
-
-	std::string sName         = pcName ? std::string( pcName ) : "";
-	CVAStruct oParameters     = ConvertPythonDictToVAStruct( pParameters );
-	std::string sSignalSource = g_pVANetClient->GetCoreInstance( )->CreateSignalSourceEngine( oParameters, sName );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sSignalSource ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* create_signal_source_machine( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "name", NULL };
-	const char* pcFormat        = "O|s:create_signal_source_machine";
-	PyObject* pParameters       = nullptr;
-	char* pcName                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pParameters, &pcName ) )
-		return NULL;
-
-	CVAStruct oParameters     = ConvertPythonDictToVAStruct( pParameters );
-	std::string sName         = pcName ? std::string( pcName ) : "";
-	std::string sSignalSource = g_pVANetClient->GetCoreInstance( )->CreateSignalSourceMachine( oParameters, sName );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sSignalSource ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* delete_signal_source( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signal_source", NULL };
-	const char* pcFormat        = "s:delete_signal_source";
-	char* pcSignalSource        = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	bool bRet                 = g_pVANetClient->GetCoreInstance( )->DeleteSignalSource( sSignalSource );
-	return PyBool_FromLong( bRet );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_signal_source_info( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", NULL };
-	const char* pcFormat        = "s:get_signal_source_info";
-	char* pcSignalSource        = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	CVASignalSourceInfo oInfo = g_pVANetClient->GetCoreInstance( )->GetSignalSourceInfo( sSignalSource );
-
-	PyObject* pInfo = Py_BuildValue( "{s:s,s:s,s:s,s:i,s:i,s:s}", "id", SaveStringToUnicodeConversion( oInfo.sID ).c_str( ), "name",
-	                                 SaveStringToUnicodeConversion( oInfo.sName ).c_str( ), "state", SaveStringToUnicodeConversion( oInfo.sState ).c_str( ), "type",
-	                                 oInfo.iType, "references", oInfo.iReferences, "description", SaveStringToUnicodeConversion( oInfo.sDesc ).c_str( ) );
-
-	return pInfo;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_signal_source_infos( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	std::vector<CVASignalSourceInfo> voInfos;
-	g_pVANetClient->GetCoreInstance( )->GetSignalSourceInfos( voInfos );
-
-	PyObject* pInfoList = PyList_New( voInfos.size( ) );
-
-	for( size_t i = 0; i < voInfos.size( ); i++ )
-	{
-		CVASignalSourceInfo& oInfo( voInfos[i] );
-		PyObject* pInfo = Py_BuildValue( "{s:s,s:s,s:s,s:i,s:i,s:s}", "id", SaveStringToUnicodeConversion( oInfo.sID ).c_str( ), "name",
-		                                 SaveStringToUnicodeConversion( oInfo.sName ).c_str( ), "state", SaveStringToUnicodeConversion( oInfo.sState ).c_str( ), "type",
-		                                 oInfo.iType, "references", oInfo.iReferences, "description", SaveStringToUnicodeConversion( oInfo.sDesc ).c_str( ) );
-		PyList_SetItem( pInfoList, i, pInfo ); // steals reference
-	}
-
-	return pInfoList;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_signal_source_buffer_playback_state( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", NULL };
-	const char* pcFormat        = "s:get_signal_source_buffer_playback_state";
-	char* pcSignalSource        = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	int iState                = g_pVANetClient->GetCoreInstance( )->GetSignalSourceBufferPlaybackState( sSignalSource );
-	return PyLong_FromLong( iState );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_signal_source_buffer_playback_state_str( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", NULL };
-	const char* pcFormat        = "s:get_signal_source_buffer_playback_state_str";
-	char* pcSignalSource        = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	int iState                = g_pVANetClient->GetCoreInstance( )->GetSignalSourceBufferPlaybackState( sSignalSource );
-	std::string sState        = g_pVANetClient->GetCoreInstance( )->GetPlaybackStateStr( iState );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sState ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_signal_source_buffer_playback_action( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", "action", NULL };
-	const char* pcFormat        = "si:va_set_signal_source_buffer_playback_action";
-	char* pcSignalSource        = nullptr;
-	int iAction                 = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource, &iAction ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetSignalSourceBufferPlaybackAction( sSignalSource, iAction );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_signal_source_buffer_playback_action_str( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", "action_string", NULL };
-	const char* pcFormat        = "ss:set_signal_source_buffer_playback_action_str";
-	char* pcSignalSource        = nullptr;
-	char* pcAction              = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource, &pcAction ) )
-		return NULL;
-
-	std::string sAction = pcAction ? std::string( pcAction ) : "";
-	int iAction         = g_pVANetClient->GetCoreInstance( )->ParsePlaybackAction( sAction );
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetSignalSourceBufferPlaybackAction( sSignalSource, iAction );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_signal_source_buffer_playback_position( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", "playback_position", NULL };
-	const char* pcFormat        = "si:set_signal_source_buffer_playback_position";
-	char* pcSignalSource        = nullptr;
-	int iPosition               = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource, &iPosition ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetSignalSourceBufferPlaybackPosition( sSignalSource, iPosition );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_signal_source_buffer_looping( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", NULL };
-	const char* pcFormat        = "s:get_signal_source_buffer_looping";
-	char* pcSignalSource        = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	return PyBool_FromLong( g_pVANetClient->GetCoreInstance( )->GetSignalSourceBufferLooping( sSignalSource ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_signal_source_buffer_looping( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", "enabled", NULL };
-	const char* pcFormat        = "s|b:set_signal_source_buffer_looping";
-	char* pcSignalSource        = nullptr;
-	bool bEnabled               = true;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource, &bEnabled ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetSignalSourceBufferLooping( sSignalSource, bEnabled );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_signal_source_machine_start_machine( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", NULL };
-	const char* pcFormat        = "s:set_signal_source_machine_start_machine";
-	char* pcSignalSource        = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetSignalSourceMachineStartMachine( sSignalSource );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_signal_source_machine_halt_machine( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", NULL };
-	const char* pcFormat        = "s:set_signal_source_machine_halt_machine";
-	char* pcSignalSource        = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetSignalSourceMachineHaltMachine( sSignalSource );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_signal_source_machine_state_str( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", NULL };
-	const char* pcFormat        = "s:get_signal_source_machine_state_str";
-	char* pcSignalSource        = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	std::string sState        = g_pVANetClient->GetCoreInstance( )->GetSignalSourceMachineStateStr( sSignalSource );
-
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sState ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_signal_source_machine_speed( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", "speed", NULL };
-	const char* pcFormat        = "sd:set_signal_source_machine_speed";
-	char* pcSignalSource        = nullptr;
-	double dSpeed;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource, &dSpeed ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetSignalSourceMachineSpeed( sSignalSource, dSpeed );
-
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_signal_source_machine_speed( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", NULL };
-	const char* pcFormat        = "s:get_signal_source_machine_speed";
-	char* pcSignalSource        = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	double dSpeed             = g_pVANetClient->GetCoreInstance( )->GetSignalSourceMachineSpeed( sSignalSource );
-
-	return PyFloat_FromDouble( dSpeed );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_signal_source_machine_start_file( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", "filepath", NULL };
-	const char* pcFormat        = "ss:set_signal_source_machine_start_file";
-	char* pcSignalSource        = nullptr;
-	char* pcPath                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource, &pcPath ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	std::string sPath         = pcPath ? std::string( pcPath ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetSignalSourceMachineStartFile( sSignalSource, sPath );
-
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_signal_source_machine_idle_file( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", "filepath", NULL };
-	const char* pcFormat        = "ss:set_signal_source_machine_idle_file";
-	char* pcSignalSource        = nullptr;
-	char* pcPath                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource, &pcPath ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	std::string sPath         = pcPath ? std::string( pcPath ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetSignalSourceMachineIdleFile( sSignalSource, sPath );
-
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_signal_source_machine_stop_file( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", "filepath", NULL };
-	const char* pcFormat        = "ss:vset_signal_source_machine_stop_file";
-	char* pcSignalSource        = nullptr;
-	char* pcPath                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource, &pcPath ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	std::string sPath         = pcPath ? std::string( pcPath ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetSignalSourceMachineStopFile( sSignalSource, sPath );
-
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_signal_source_parameters( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "signalsource", "parameters", NULL };
-	const char* pcFormat        = "iO:get_signal_source_parameters";
-	char* pcSignalSource        = nullptr;
-	PyObject* pParameters       = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource, &pParameters ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	CVAStruct oParameters     = ConvertPythonDictToVAStruct( pParameters );
-	CVAStruct oReturn         = g_pVANetClient->GetCoreInstance( )->GetSignalSourceParameters( sSignalSource, oParameters );
-	return ConvertVAStructToPythonDict( oReturn );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_signal_source_parameters( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "iO:set_signal_source_parameters";
-	char* pcSignalSource        = nullptr;
-	PyObject* pParameters       = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcSignalSource, &pParameters ) )
-		return NULL;
-
-	std::string sSignalSource = pcSignalSource ? std::string( pcSignalSource ) : "";
-	CVAStruct oParameters     = ConvertPythonDictToVAStruct( pParameters );
-	g_pVANetClient->GetCoreInstance( )->SetSignalSourceParameters( sSignalSource, oParameters );
-
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_portal_ids( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	std::vector<int> viIDs;
-	g_pVANetClient->GetCoreInstance( )->GetSoundPortalIDs( viIDs );
-
-	PyObject* pIDList = PyList_New( viIDs.size( ) );
-	for( Py_ssize_t i = 0; i < PyList_Size( pIDList ); i++ )
-		PyList_SetItem( pIDList, i, PyLong_FromLong( viIDs[i] ) );
-
-	return pIDList;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_portal_name( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_portal_name";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( g_pVANetClient->GetCoreInstance( )->GetSoundPortalName( iID ) ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_portal_name( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "name", NULL };
-	const char* pcFormat        = "is:set_sound_portal_name";
-	long iID                    = -1;
-	char* pcName                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &pcName ) )
-		return NULL;
-
-	std::string sName = pcName ? std::string( pcName ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetSoundPortalName( iID, sName );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_sound_portal_enabled( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "i:get_sound_portal_enabled";
-	long iID                    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID ) )
-		return NULL;
-
-	return PyFloat_FromDouble( g_pVANetClient->GetCoreInstance( )->GetSoundPortalEnabled( iID ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_sound_portal_enabled( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "name", NULL };
-	const char* pcFormat        = "i|b:set_sound_portal_enabled";
-	long iID                    = -1;
-	bool bEnabled               = true;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iID, &bEnabled ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetSoundPortalEnabled( iID, bEnabled );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-
-static PyObject* get_homogeneous_medium_sound_speed( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	return PyFloat_FromDouble( g_pVANetClient->GetCoreInstance( )->GetHomogeneousMediumSoundSpeed( ) );
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_homogeneous_medium_sound_speed( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "sound_speed", NULL };
-	const char* pcFormat        = "d:set_homogeneous_medium_sound_speed";
-	double dSoundSpeed          = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &dSoundSpeed ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetHomogeneousMediumSoundSpeed( dSoundSpeed );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-static PyObject* get_homogeneous_medium_temperature( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	return PyFloat_FromDouble( g_pVANetClient->GetCoreInstance( )->GetHomogeneousMediumTemperature( ) );
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_homogeneous_medium_temperature( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "temperature", NULL };
-	const char* pcFormat        = "d:set_homogeneous_medium_temperature";
-	double dTemperature         = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &dTemperature ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetHomogeneousMediumTemperature( dTemperature );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_homogeneous_medium_static_pressure( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	return PyFloat_FromDouble( g_pVANetClient->GetCoreInstance( )->GetHomogeneousMediumStaticPressure( ) );
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_homogeneous_medium_static_pressure( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "static_pressure", NULL };
-	const char* pcFormat        = "d:set_homogeneous_medium_static_pressure";
-	double dStaticPressure      = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &dStaticPressure ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetHomogeneousMediumStaticPressure( dStaticPressure );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_homogeneous_medium_relative_humidity( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	return PyFloat_FromDouble( g_pVANetClient->GetCoreInstance( )->GetHomogeneousMediumRelativeHumidity( ) );
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_homogeneous_medium_relative_humidity( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "relative_humidity", NULL };
-	const char* pcFormat        = "d:set_homogeneous_medium_relative_humidity";
-	double dRelativeHumidity    = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &dRelativeHumidity ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetHomogeneousMediumRelativeHumidity( dRelativeHumidity );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_homogeneous_medium_shift_speed( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	VAVec3 v3ShiftSpeed = g_pVANetClient->GetCoreInstance( )->GetHomogeneousMediumShiftSpeed( );
-
-	PyObject* pList = PyList_New( 3 );
-	PyList_SetItem( pList, 0, PyFloat_FromDouble( v3ShiftSpeed.x ) );
-	PyList_SetItem( pList, 1, PyFloat_FromDouble( v3ShiftSpeed.y ) );
-	PyList_SetItem( pList, 2, PyFloat_FromDouble( v3ShiftSpeed.z ) );
-
-	return pList;
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_homogeneous_medium_shift_speed( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "shift_speed", NULL };
-	const char* pcFormat        = "(ddd):set_homogeneous_medium_shift_speed";
-	VAVec3 v3ShiftSpeed;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &v3ShiftSpeed.x, &v3ShiftSpeed.y, &v3ShiftSpeed.z ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetHomogeneousMediumShiftSpeed( v3ShiftSpeed );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_homogeneous_medium_parameters( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "arguments", NULL };
-	const char* pcFormat        = "|O:get_homogeneous_medium_parameters";
-	PyObject* pParamArgs        = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pParamArgs ) )
-		return NULL;
-
-	CVAStruct oArgs       = ConvertPythonDictToVAStruct( pParamArgs );
-	CVAStruct oParameters = g_pVANetClient->GetCoreInstance( )->GetHomogeneousMediumParameters( oArgs );
-
-	return ConvertVAStructToPythonDict( oParameters );
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_homogeneous_medium_parameters( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "parameters", NULL };
-	const char* pcFormat        = "O:set_homogeneous_medium_parameters";
-	PyObject* pParameters       = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pParameters ) )
-		return NULL;
-
-	CVAStruct oParameters = ConvertPythonDictToVAStruct( pParameters );
-
-	g_pVANetClient->GetCoreInstance( )->SetHomogeneousMediumParameters( oParameters );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-
-static PyObject* get_acoustic_material_infos( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	std::vector<CVAAcousticMaterial> voInfos;
-	g_pVANetClient->GetCoreInstance( )->GetAcousticMaterialInfos( voInfos );
-
-	PyObject* pList = PyList_New( voInfos.size( ) );
-
-	for( size_t i = 0; i < voInfos.size( ); i++ )
-	{
-		CVAAcousticMaterial& oInfo( voInfos[i] );
-		PyObject* pInfo = Py_BuildValue(
-		    "{s:i,s:i,s:O!,s:s,s:O!,s:O!,s:O!,s:O!}", "id", oInfo.iID, "class", oInfo.iType, "parameters", ConvertVAStructToPythonDict( oInfo.oParams ), "name",
-		    SaveStringToUnicodeConversion( oInfo.sName ).c_str( ), "absorption_values", ConvertFloatVectorToPythonList( oInfo.vfAbsorptionValues ), "scattering_values",
-		    ConvertFloatVectorToPythonList( oInfo.vfScatteringValues ), "transmission_values", ConvertFloatVectorToPythonList( oInfo.vfTransmissionValues ) );
-		PyList_SetItem( pList, i, pInfo ); // steals reference
-	}
-
-	return pList;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* create_acoustic_material_from_file( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "parameters", "name", NULL };
-	const char* pcFormat        = "s|s:create_acoustic_material_from_file";
-	char* pcFilePath            = nullptr;
-	char* pcName                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcFilePath, &pcName ) )
-		return NULL;
-
-	std::string sFilePath = pcFilePath ? std::string( pcFilePath ) : "";
-	std::string sName     = pcName ? std::string( pcName ) : "";
-	const int iID         = g_pVANetClient->GetCoreInstance( )->CreateAcousticMaterialFromFile( sFilePath, sName );
-	return PyLong_FromLong( iID );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* create_acoustic_material_from_parameters( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "parameters", "name", NULL };
-	const char* pcFormat        = "O|s:create_acoustic_material_from_parameters";
-	PyObject* pParams           = nullptr;
-	char* pcName                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pParams, &pcName ) )
-		return NULL;
-
-	const CVAStruct oParams = ConvertPythonDictToVAStruct( pParams );
-	std::string sName       = pcName ? std::string( pcName ) : "";
-	const int iID           = g_pVANetClient->GetCoreInstance( )->CreateAcousticMaterialFromParameters( oParams, sName );
-	return PyLong_FromLong( iID );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* create_acoustic_material( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "material", "name", NULL };
-	const char* pcFormat        = "O|s:create_acoustic_material";
-	PyObject* pMaterial         = nullptr;
-	char* pcName                = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pMaterial, &pcName ) )
-		return NULL;
-
-	const CVAAcousticMaterial oMaterial = ConvertPythonDictToAcousticMaterial( pMaterial );
-	std::string sName                   = pcName ? std::string( pcName ) : "";
-	const int iID                       = g_pVANetClient->GetCoreInstance( )->CreateAcousticMaterial( oMaterial, sName );
-	return PyLong_FromLong( iID );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_rendering_modules( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	std::vector<CVAAudioRendererInfo> voInfos;
-	g_pVANetClient->GetCoreInstance( )->GetRenderingModules( voInfos );
-
-	PyObject* pList = PyList_New( voInfos.size( ) );
-
-	for( size_t i = 0; i < voInfos.size( ); i++ )
-	{
-		CVAAudioRendererInfo& oInfo( voInfos[i] );
-		PyObject* pInfo = Py_BuildValue(
-		    "{s:b,s:s,s:s,s:s,s:s,s:b,s:b,s:O}", "enabled", oInfo.bEnabled, "class", SaveStringToUnicodeConversion( oInfo.sClass ).c_str( ), "description",
-		    SaveStringToUnicodeConversion( oInfo.sDescription ).c_str( ), "id", SaveStringToUnicodeConversion( oInfo.sID ).c_str( ), "output_recording_file_path",
-		    SaveStringToUnicodeConversion( oInfo.sOutputRecordingFilePath ).c_str( ), "output_detector_enabled", oInfo.bOutputDetectorEnabled, "output_recording_enabled",
-		    oInfo.bOutputRecordingEnabled, "parameters", ConvertVAStructToPythonDict( oInfo.oParams ) );
-		PyList_SetItem( pList, i, pInfo ); // steals reference
-	}
-
-	return pList;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_rendering_module_gain( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "s:get_rendering_module_gain";
-	char* pcID                  = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcID ) )
-		return NULL;
-
-	std::string sID = pcID ? std::string( pcID ) : "";
-	return PyFloat_FromDouble( g_pVANetClient->GetCoreInstance( )->GetRenderingModuleGain( sID ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_rendering_module_gain( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "gain", NULL };
-	const char* pcFormat        = "sd:set_rendering_module_gain";
-	char* pcID                  = nullptr;
-	double dGain                = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcID, &dGain ) )
-		return NULL;
-
-	std::string sID = pcID ? std::string( pcID ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetRenderingModuleGain( sID, dGain );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_rendering_module_auralization_mode( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "short_mode", NULL };
-	const char* pcFormat        = "s|b:get_rendering_module_auralization_mode";
-	char* pcID                  = nullptr;
-	bool bShortMode             = true;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcID, &bShortMode ) )
-		return NULL;
-
-	std::string sID       = pcID ? std::string( pcID ) : "";
-	const int iAM         = g_pVANetClient->GetCoreInstance( )->GetRenderingModuleAuralizationMode( sID );
-	const std::string sAM = IVAInterface::GetAuralizationModeStr( iAM, bShortMode );
-
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sAM ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_rendering_module_auralization_mode( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "auralization_mode", NULL };
-	const char* pcFormat        = "ss:set_rendering_module_auralization_mode";
-	char* pcID                  = nullptr;
-	char* pcAM                  = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcID, &pcAM ) )
-		return NULL;
-
-	std::string sID       = pcID ? std::string( pcID ) : "";
-	const std::string sAM = pcAM ? std::string( pcAM ) : "";
-
-	const int iCurrentAM = g_pVANetClient->GetCoreInstance( )->GetRenderingModuleAuralizationMode( sID );
-	const int iAM        = IVAInterface::ParseAuralizationModeStr( sAM, iCurrentAM );
-	g_pVANetClient->GetCoreInstance( )->SetRenderingModuleAuralizationMode( sID, iAM );
-
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_rendering_module_parameters( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "renderer_id", "arguments", NULL };
-	const char* pcFormat        = "s|O:get_rendering_module_parameters";
-	char* pcID                  = nullptr;
-	PyObject* pArguments        = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcID, &PyDict_Type, &pArguments ) )
-		return NULL;
-
-	std::string sID       = pcID ? std::string( pcID ) : "";
-	const CVAStruct oArgs = ConvertPythonDictToVAStruct( pArguments );
-	const CVAStruct oRet  = g_pVANetClient->GetCoreInstance( )->GetRenderingModuleParameters( sID, oArgs );
-
-	return ConvertVAStructToPythonDict( oRet );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_rendering_module_parameters( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "parameters", NULL };
-	const char* pcFormat        = "sO:set_rendering_module_parameters";
-	char* pcID                  = nullptr;
-	PyObject* pParams           = NULL;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcID, &PyDict_Type, &pParams ) )
-		return NULL;
-
-	std::string sID             = pcID ? std::string( pcID ) : "";
-	const CVAStruct oParameters = ConvertPythonDictToVAStruct( pParams );
-	g_pVANetClient->GetCoreInstance( )->SetRenderingModuleParameters( sID, oParameters );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-
-static PyObject* set_rendering_module_muted( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "muted", NULL };
-	const char* pcFormat        = "s|b:set_rendering_module_muted";
-	char* pcID                  = nullptr;
-	bool bMuted                 = true;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcID, &bMuted ) )
-		return NULL;
-
-	std::string sID = pcID ? std::string( pcID ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetRenderingModuleMuted( sID, bMuted );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_rendering_module_muted( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "s:get_rendering_module_muted";
-	char* pcID                  = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcID ) )
-		return NULL;
-
-	std::string sID = pcID ? std::string( pcID ) : "";
-	return PyBool_FromLong( g_pVANetClient->GetCoreInstance( )->GetRenderingModuleMuted( sID ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_reproduction_modules( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	std::vector<CVAAudioReproductionInfo> voInfos;
-	g_pVANetClient->GetCoreInstance( )->GetReproductionModules( voInfos );
-
-	PyObject* pList = PyList_New( voInfos.size( ) );
-
-	for( size_t i = 0; i < voInfos.size( ); i++ )
-	{
-		CVAAudioReproductionInfo& oInfo( voInfos[i] );
-		PyObject* pInfo = Py_BuildValue( "{s:b,s:s,s:s,s:s}", "enabled", oInfo.bEnabled, "class", SaveStringToUnicodeConversion( oInfo.sClass ).c_str( ), "description",
-		                                 SaveStringToUnicodeConversion( oInfo.sDescription ).c_str( ), "id", SaveStringToUnicodeConversion( oInfo.sID ).c_str( ) );
-		PyList_SetItem( pList, i, pInfo ); // steals reference
-	}
-
-	return pList;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_reproduction_module_gain( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "s:get_reproduction_module_gain";
-	char* pcID                  = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcID ) )
-		return NULL;
-
-	std::string sID = pcID ? std::string( pcID ) : "";
-	return PyFloat_FromDouble( g_pVANetClient->GetCoreInstance( )->GetReproductionModuleGain( sID ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_reproduction_module_gain( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "gain", NULL };
-	const char* pcFormat        = "sd:set_reproduction_module_gain";
-	char* pcID                  = nullptr;
-	double dGain                = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcID, &dGain ) )
-		return NULL;
-
-	std::string sID = pcID ? std::string( pcID ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetReproductionModuleGain( sID, dGain );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_reproduction_module_parameters( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "arguments", NULL };
-	const char* pcFormat        = "s|O:get_reproduction_module_parameters";
-	char* pcID                  = nullptr;
-	PyObject* pArguments        = NULL;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcID, &PyDict_Type, &pArguments ) )
-		return NULL;
-
-	std::string sID       = pcID ? std::string( pcID ) : "";
-	const CVAStruct oArgs = ConvertPythonDictToVAStruct( pArguments );
-	const CVAStruct oRet  = g_pVANetClient->GetCoreInstance( )->GetReproductionModuleParameters( sID, oArgs );
-
-	return ConvertVAStructToPythonDict( oRet );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_reproduction_module_parameters( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "parameters", NULL };
-	const char* pcFormat        = "sO:set_reproduction_module_parameters";
-	char* pcID                  = nullptr;
-	PyObject* pParams           = NULL;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcID, &PyDict_Type, &pParams ) )
-		return NULL;
-
-	std::string sID             = pcID ? std::string( pcID ) : "";
-	const CVAStruct oParameters = ConvertPythonDictToVAStruct( pParams );
-	g_pVANetClient->GetCoreInstance( )->SetReproductionModuleParameters( sID, oParameters );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_reproduction_module_muted( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", "muted", NULL };
-	const char* pcFormat        = "s|b:set_reproduction_module_muted";
-	char* pcID                  = nullptr;
-	bool bMuted                 = true;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcID, &bMuted ) )
-		return NULL;
-
-	std::string sID = pcID ? std::string( pcID ) : "";
-	g_pVANetClient->GetCoreInstance( )->SetReproductionModuleMuted( sID, bMuted );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_reproduction_module_muted( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "id", NULL };
-	const char* pcFormat        = "s:get_reproduction_module_muted";
-	char* pcID                  = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcID ) )
-		return NULL;
-
-	std::string sID = pcID ? std::string( pcID ) : "";
-	return PyBool_FromLong( g_pVANetClient->GetCoreInstance( )->GetReproductionModuleMuted( sID ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_input_gain( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	return PyFloat_FromDouble( g_pVANetClient->GetCoreInstance( )->GetInputGain( ) );
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_input_gain( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "gain", NULL };
-	const char* pcFormat        = "d:set_input_gain";
-	double dGain                = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &dGain ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetInputGain( dGain );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_input_muted( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "muted", NULL };
-	const char* pcFormat        = "|b:set_input_muted";
-	bool bMuted                 = true;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &bMuted ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetInputMuted( bMuted );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_input_muted( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	return PyBool_FromLong( g_pVANetClient->GetCoreInstance( )->GetInputMuted( ) );
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_output_gain( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	return PyFloat_FromDouble( g_pVANetClient->GetCoreInstance( )->GetInputGain( ) );
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_output_gain( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "gain", NULL };
-	const char* pcFormat        = "d:set_output_gain";
-	double dGain                = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &dGain ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetOutputGain( dGain );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_output_muted( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "muted", NULL };
-	const char* pcFormat        = "|b:set_output_muted";
-	bool bMuted                 = true;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &bMuted ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetOutputMuted( bMuted );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_output_muted( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	return PyBool_FromLong( g_pVANetClient->GetCoreInstance( )->GetOutputMuted( ) );
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_global_auralization_mode( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "short_mode", NULL };
-	const char* pcFormat        = "|b:get_global_auralization_mode";
-	bool bShortMode             = true;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &bShortMode ) )
-		return NULL;
-
-	const int iAM         = g_pVANetClient->GetCoreInstance( )->GetGlobalAuralizationMode( );
-	const std::string sAM = SaveStringToUnicodeConversion( IVAInterface::GetAuralizationModeStr( iAM, bShortMode ) );
-
-	return PyUnicode_FromString( sAM.c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_global_auralization_mode( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "auralization_mode", NULL };
-	const char* pcFormat        = "s:set_global_auralization_mode";
-	char* pcAM                  = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcAM ) )
-		return NULL;
-
-	std::string sAM = pcAM ? std::string( pcAM ) : "";
-
-	const int iCurrentAM = g_pVANetClient->GetCoreInstance( )->GetGlobalAuralizationMode( );
-	const int iAM        = IVAInterface::ParseAuralizationModeStr( sAM, iCurrentAM );
-	g_pVANetClient->GetCoreInstance( )->SetGlobalAuralizationMode( iAM );
-
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_core_clock( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-	return PyFloat_FromDouble( g_pVANetClient->GetCoreInstance( )->GetCoreClock( ) );
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* set_core_clock( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "time", NULL };
-	const char* pcFormat        = "d:set_core_clock";
-	double dTime                = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &dTime ) )
-		return NULL;
-
-	g_pVANetClient->GetCoreInstance( )->SetCoreClock( dTime );
-	Py_RETURN_NONE;
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* substitute_macro( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "rawstring", NULL };
-	const char* pcFormat        = "s:substitute_macro";
-	char* pcRawString           = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcRawString ) )
-		return NULL;
-
-	std::string sSubstitutedString = g_pVANetClient->GetCoreInstance( )->SubstituteMacros( std::string( pcRawString ) );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sSubstitutedString ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* find_file_path( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "rawpath", NULL };
-	const char* pcFormat        = "s:find_file_path";
-	char* pcRawFilePath         = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcRawFilePath ) )
-		return NULL;
-
-	std::string sSubstitutedPath = g_pVANetClient->GetCoreInstance( )->FindFilePath( std::string( pcRawFilePath ) );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sSubstitutedPath ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_core_configuration( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "filter_enabled", NULL };
-	const char* pcFormat        = "|b:get_core_configuration";
-	bool bFilterEnabled         = true;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &bFilterEnabled ) )
-		return NULL;
-
-	CVAStruct oCoreConfig = g_pVANetClient->GetCoreInstance( )->GetCoreConfiguration( bFilterEnabled );
-	return ConvertVAStructToPythonDict( oCoreConfig );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_hardware_configuration( PyObject*, PyObject* )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	CVAStruct oHWConfig = g_pVANetClient->GetCoreInstance( )->GetHardwareConfiguration( );
-	return ConvertVAStructToPythonDict( oHWConfig );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_file_list( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "recursive", "filter_suffix_mask", NULL };
-	const char* pcFormat        = "|bs:get_file_list";
-	bool bFilterEnabled         = true;
-	char* pcFilterSuffixMask    = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &bFilterEnabled, &pcFilterSuffixMask ) )
-		return NULL;
-
-	std::string sFilterSuffixMask = pcFilterSuffixMask ? std::string( pcFilterSuffixMask ) : "*";
-	CVAStruct oFileList           = g_pVANetClient->GetCoreInstance( )->GetFileList( bFilterEnabled, sFilterSuffixMask );
-	return ConvertVAStructToPythonDict( oFileList );
-
-	VAPY_CATCH_RETURN;
-};
-
-
-static PyObject* get_log_level_str( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "loglevel", NULL };
-	const char* pcFormat        = "i:get_log_level_str";
-	int iLogLevel               = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iLogLevel ) )
-		return NULL;
-
-	std::string sLogLevel = g_pVANetClient->GetCoreInstance( )->GetLogLevelStr( iLogLevel );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sLogLevel ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* parse_auralization_mode_str( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "auralization_mode_string", NULL };
-	const char* pcFormat        = "s:parse_auralization_mode_str";
-	char* pcAM                  = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcAM ) )
-		return NULL;
-
-	int iAM = g_pVANetClient->GetCoreInstance( )->ParseAuralizationModeStr( std::string( pcAM ) );
-	return PyLong_FromLong( iAM );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_auralization_mode_str( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "auralization_mode", "short_name", NULL };
-	const char* pcFormat        = "i|b:get_auralization_mode_str";
-	int iAM                     = -1;
-	bool bShort                 = false;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iAM, &bShort ) )
-		return NULL;
-
-	std::string sAM = g_pVANetClient->GetCoreInstance( )->GetAuralizationModeStr( iAM, bShort );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sAM ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_volume_str_decibel( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "gain", NULL };
-	const char* pcFormat        = "d:get_volume_str_decibel";
-	double dGain                = -1.0f;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &dGain ) )
-		return NULL;
-
-	std::string sGainDB = g_pVANetClient->GetCoreInstance( )->GetVolumeStrDecibel( dGain );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sGainDB ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* parse_playback_state_str( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "state_string", NULL };
-	const char* pcFormat        = "s:parse_playback_state_str";
-	char* pcState               = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcState ) )
-		return NULL;
-
-	int iState = g_pVANetClient->GetCoreInstance( )->ParsePlaybackState( std::string( pcState ) );
-	return PyLong_FromLong( iState );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* parse_playback_action_str( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "action_string", NULL };
-	const char* pcFormat        = "s:parse_playback_action_str";
-	char* pcAction              = nullptr;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &pcAction ) )
-		return NULL;
-
-	int iState = g_pVANetClient->GetCoreInstance( )->ParsePlaybackState( std::string( pcAction ) );
-	return PyLong_FromLong( iState );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_playback_state_str( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "state", NULL };
-	const char* pcFormat        = "i:get_playback_state_str";
-	int iState                  = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iState ) )
-		return NULL;
-
-	std::string sState = g_pVANetClient->GetCoreInstance( )->GetPlaybackStateStr( iState );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sState ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-static PyObject* get_playback_action_str( PyObject*, PyObject* pArgs, PyObject* pKeywordTuple )
-{
-	VAPY_REQUIRE_CONN_TRY;
-
-	static char* pKeyWordList[] = { "state", NULL };
-	const char* pcFormat        = "i:get_playback_action_str";
-	int iAction                 = -1;
-	if( !PyArg_ParseTupleAndKeywords( pArgs, pKeywordTuple, pcFormat, pKeyWordList, &iAction ) )
-		return NULL;
-
-	std::string sAction = g_pVANetClient->GetCoreInstance( )->GetPlaybackActionStr( iAction );
-	return PyUnicode_FromString( SaveStringToUnicodeConversion( sAction ).c_str( ) );
-
-	VAPY_CATCH_RETURN;
-};
-
-#endif
\ No newline at end of file
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..de967f1de753b1543965b88b1a238416dbdb76c1
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,27 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import asyncio
+
+import pytest_asyncio
+from grpclib.testing import ChannelFor
+
+import vapython as vapy
+import vapython.vanet._vanet_grpc as va_grpc
+
+
+@pytest_asyncio.fixture(scope="session")
+async def mocked_connection(session_mocker):
+    service = va_grpc.VaBase()
+
+    async with ChannelFor([service]) as channel:
+        session_mocker.patch("vapython.vanet._va_interface.Channel.__new__", return_value=channel)
+        va = vapy.VA()
+        session_mocker.patch.object(va, "_get_state", autospec=True)
+        va.connect(add_event_handling=False)
+        yield va, service
+
+    # return the "ownership" of the event loop back to the pytest-asyncio thread
+    asyncio.set_event_loop(va._loop)
+    va.disconnect()
diff --git a/tests/directivity_test.py b/tests/directivity_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..239a80e7bb4bdda1f6ed500599c8915056c63ddd
--- /dev/null
+++ b/tests/directivity_test.py
@@ -0,0 +1,249 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import random
+from pathlib import Path
+
+import pytest
+from betterproto.lib.google import protobuf
+
+import vapython.vanet._helper as va_grpc_helper
+import vapython.vanet._vanet_grpc as va_grpc
+
+from .utils import random_grpc_struct, random_string, random_struct
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random_string(5), random_struct(), random.randint(0, 100)) for _ in range(5)],
+)
+async def test_create_directivity_from_parameters(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "create_directivity_from_parameters"
+    message_name = "CreateDirectivityFromParametersRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Int32Value(test_input[2]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(name=test_input[0], parameters=test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(test_input[0], va_grpc_helper.convert_struct_to_vanet(test_input[1]))
+    )
+    assert ret_val == test_input[2]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    range(5),
+)
+@pytest.mark.parametrize(
+    "path_type",
+    [str, Path],
+)
+async def test_create_directivity_from_file(mocked_connection, mocker, test_input, path_type):  # noqa: ARG001
+    va, service = mocked_connection
+
+    public_method_name = "create_directivity_from_file"
+    method_name = "create_directivity_from_parameters"
+    message_name = "CreateDirectivityFromParametersRequest"
+
+    identifier = random.randint(0, 100)
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Int32Value(identifier), autospec=True)
+
+    test_path = path_type(random_string(5))
+
+    function = getattr(va, public_method_name)
+
+    ret_val = function(test_path)
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)("", va_grpc_helper.convert_struct_to_vanet({"filepath": str(test_path)}))
+    )
+    assert ret_val == identifier
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random.choice([True, False])) for _ in range(5)],
+)
+async def test_delete_directivity(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "delete_directivity"
+    message_name = "DeleteDirectivityRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.BoolValue(test_input[1]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == test_input[1]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    range(5),
+)
+async def test_get_directivity_info(mocked_connection, mocker, test_input):  # noqa: ARG001
+    va, service = mocked_connection
+
+    method_name = "get_directivity_info"
+    message_name = "GetDirectivityInfoRequest"
+    reply_name = "DirectivityInfo"
+
+    reply = getattr(va_grpc, reply_name)(
+        id=random.randint(0, 100),
+        name=random_string(5),
+        parameters=random_grpc_struct(),
+    )
+
+    mocker.patch.object(service, method_name, return_value=reply, autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(reply.id)
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(reply.id))
+    assert ret_val == reply
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    range(5),
+)
+async def test_get_directivity_infos(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "get_directivity_infos"
+
+    info = va_grpc.DirectivityInfo(
+        id=random.randint(0, 100),
+        name=random_string(5),
+        parameters=random_grpc_struct(),
+    )
+    reply = va_grpc.DirectivityInfosReply(
+        directivity_infos=[info for _ in range(test_input)],
+    )
+
+    mocker.patch.object(service, method_name, return_value=reply, autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function()
+
+    assert getattr(service, method_name).called
+    assert ret_val == reply
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random_string(5)) for _ in range(5)],
+)
+async def test_set_directivity_name(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "set_directivity_name"
+    message_name = "SetDirectivityNameRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0], test_input[1]))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random_string(5)) for _ in range(5)],
+)
+async def test_get_directivity_name(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "get_directivity_name"
+    message_name = "GetDirectivityNameRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.StringValue(test_input[1]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == test_input[1]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            random_struct(),
+        )
+        for _ in range(5)
+    ],
+)
+async def test_set_directivity_parameters(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "set_directivity_parameters"
+    message_name = "SetDirectivityParametersRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            test_input[0],
+            va_grpc_helper.convert_struct_to_vanet(test_input[1]),
+        )
+    )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            random_struct(),
+            random_grpc_struct(),
+        )
+        for _ in range(5)
+    ],
+)
+async def test_get_directivity_parameters(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "get_directivity_parameters"
+    message_name = "GetDirectivityParametersRequest"
+
+    mocker.patch.object(service, method_name, return_value=test_input[2], autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(test_input[0], va_grpc_helper.convert_struct_to_vanet(test_input[1]))
+    )
+    assert ret_val == va_grpc_helper.convert_struct_from_vanet(test_input[2])
diff --git a/tests/global_methods_test.py b/tests/global_methods_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..e65b3c08872bc7e5715539da37e40b4d3a5d4237
--- /dev/null
+++ b/tests/global_methods_test.py
@@ -0,0 +1,501 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import random
+from pathlib import Path
+
+import pytest
+from betterproto.lib.google import protobuf
+
+import vapython._helper as va_helper
+import vapython.vanet._helper as helper
+import vapython.vanet._vanet_grpc as va_grpc
+from vapython import VA
+
+from .utils import random_grpc_struct, random_string, random_struct
+
+random_floats = [-1.0, 0.0, 1.0, 3.141]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random_string(5),
+            random_string(5),
+            random_string(5),
+            random_string(5),
+        )
+        for _ in range(5)
+    ]
+    + [("", "", "", "")],
+)
+async def test_get_version_info(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "get_version_info", return_value=va_grpc.VersionInfo(*test_input), autospec=True)
+
+    info = va.get_version_info()
+
+    assert service.get_version_info.called
+    assert info.version == test_input[0]
+    assert info.release_date == test_input[1]
+    assert info.property_flags == test_input[2]
+    assert info.comments == test_input[3]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("test_input", [True, False])
+async def test_set_output_muted(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "set_output_muted", return_value=protobuf.Empty(), autospec=True)
+
+    va.set_output_muted(muted=test_input)
+
+    service.set_output_muted.assert_called_once_with(va_grpc.SetOutputMutedRequest(muted=test_input))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("test_input", [True, False])
+async def test_get_output_muted(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "get_output_muted", return_value=protobuf.BoolValue(value=test_input), autospec=True)
+
+    ret_val = va.get_output_muted()
+
+    assert service.get_output_muted.called
+    assert ret_val == test_input
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("test_input", [True, False])
+async def test_set_input_muted(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "set_input_muted", return_value=protobuf.Empty(), autospec=True)
+
+    va.set_input_muted(muted=test_input)
+
+    service.set_input_muted.assert_called_once_with(va_grpc.SetInputMutedRequest(muted=test_input))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("test_input", [True, False])
+async def test_get_input_muted(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "get_input_muted", return_value=protobuf.BoolValue(value=test_input), autospec=True)
+
+    ret_val = va.get_input_muted()
+
+    assert service.get_input_muted.called
+    assert ret_val == test_input
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random_string(5),
+            random_struct(),
+        )
+        for _ in range(5)
+    ],
+)
+async def test_call_module(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(
+        service, "call_module", return_value=helper.convert_struct_to_vanet(test_input[1]), autospec=True
+    )
+
+    ret_val = va.call_module(module_name=test_input[0], module_parameters=test_input[1])
+
+    service.call_module.assert_called_once_with(
+        va_grpc.CallModuleRequest(
+            module_name=test_input[0], module_parameters=helper.convert_struct_to_vanet(test_input[1])
+        )
+    )
+    assert test_input[1] == ret_val
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        va_grpc.VaModuleInfos(
+            module_infos=[
+                va_grpc.VaModuleInfosModuleInfo(
+                    name=random_string(5), description=random_string(10), id=random.randint(0, 5)
+                )
+                for _ in range(random.randint(0, 5))
+            ]
+        )
+        for _ in range(5)
+    ],
+)
+async def test_get_modules(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "get_modules", return_value=test_input, autospec=True)
+
+    ret_val = va.get_modules()
+
+    assert service.get_modules.called
+    assert test_input == ret_val
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("test_input", random_floats)
+async def test_set_output_gain(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "set_output_gain", return_value=protobuf.Empty(), autospec=True)
+
+    va.set_output_gain(gain=test_input)
+
+    service.set_output_gain.assert_called_once_with(va_grpc.SetOutputGainRequest(gain=test_input))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("test_input", random_floats)
+async def test_get_output_gain(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "get_output_gain", return_value=protobuf.DoubleValue(value=test_input), autospec=True)
+
+    ret_val = va.get_output_gain()
+
+    assert service.get_output_gain.called
+    assert ret_val == test_input
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("test_input", random_floats)
+async def test_set_input_gain(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "set_input_gain", return_value=protobuf.Empty(), autospec=True)
+
+    va.set_input_gain(gain=test_input)
+
+    service.set_input_gain.assert_called_once_with(va_grpc.SetInputGainRequest(gain=test_input))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("test_input", random_floats)
+async def test_get_input_gain(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "get_input_gain", return_value=protobuf.DoubleValue(value=test_input), autospec=True)
+
+    ret_val = va.get_input_gain()
+
+    assert service.get_input_gain.called
+    assert ret_val == test_input
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        ("+DS", 0),
+        ("+DS,-ER", 0),
+        ("+DS,-ER,-SD", 5),
+        ("+DS,-ER,-SD", 8191),
+    ],
+)
+async def test_set_global_auralization_mode(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "set_global_auralization_mode", return_value=protobuf.Empty(), autospec=True)
+    mocker.patch.object(
+        service, "get_global_auralization_mode", return_value=protobuf.Int32Value(value=test_input[1]), autospec=True
+    )
+
+    va.set_global_auralization_mode(test_input[0])
+
+    service.set_global_auralization_mode.assert_called_once_with(
+        va_grpc.SetGlobalAuralizationModeRequest(mode=va_helper.parse_aura_mode_str(*test_input))
+    )
+    assert service.get_global_auralization_mode.called
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("aura_mode", [0, 1, 4096, 8191, 5, 7, 10])
+async def test_get_global_auralization_mode(mocked_connection, mocker, aura_mode):
+    va, service = mocked_connection
+
+    mocker.patch.object(
+        service, "get_global_auralization_mode", return_value=protobuf.Int32Value(value=aura_mode), autospec=True
+    )
+
+    ret_val = va.get_global_auralization_mode()
+
+    assert service.get_global_auralization_mode.called
+    assert ret_val == va_helper.convert_aura_mode_to_str(aura_mode)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "core_state", [va_grpc.CoreStateState.CREATED, va_grpc.CoreStateState.READY, va_grpc.CoreStateState.FAIL]
+)
+async def test_get_server_state(mocked_connection, mocker, core_state):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "get_state", return_value=va_grpc.CoreState(core_state), autospec=True)
+
+    ret_val = va.get_server_state()
+
+    assert service.get_state.called
+    assert ret_val == va_grpc.CoreState(core_state)
+
+
+@pytest.mark.asyncio
+async def test_reset(mocked_connection, mocker):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "reset", return_value=protobuf.Empty(), autospec=True)
+
+    va.reset()
+
+    assert service.reset.called
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "filter_conf",
+    [True, False],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [random_grpc_struct() for _ in range(5)],
+)
+async def test_get_core_configuration(mocked_connection, mocker, filter_conf, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "get_core_configuration", return_value=test_input, autospec=True)
+
+    ret_val = va.get_core_configuration(only_enabled=filter_conf)
+
+    service.get_core_configuration.assert_called_once_with(va_grpc.GetCoreConfigurationRequest(filter_conf))
+    assert ret_val == helper.convert_struct_from_vanet(test_input)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [random_grpc_struct() for _ in range(5)],
+)
+async def test_get_hardware_configuration(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "get_hardware_configuration", return_value=test_input, autospec=True)
+
+    ret_val = va.get_hardware_configuration()
+
+    assert service.get_hardware_configuration.called
+    assert ret_val == helper.convert_struct_from_vanet(test_input)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    range(5),
+)
+async def test_get_core_clock(mocked_connection, mocker, test_input):  # noqa: ARG001
+    va, service = mocked_connection
+
+    clock = random.uniform(0, 10e3)
+
+    mocker.patch.object(service, "get_core_clock", return_value=protobuf.DoubleValue(clock), autospec=True)
+
+    ret_val = va.get_core_clock()
+
+    assert service.get_core_clock.called
+    assert ret_val == clock
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    range(5),
+)
+async def test_set_core_clock(mocked_connection, mocker, test_input):  # noqa: ARG001
+    va, service = mocked_connection
+
+    clock = random.uniform(0, 10e3)
+
+    mocker.patch.object(service, "set_core_clock", return_value=protobuf.Empty(), autospec=True)
+
+    va.set_core_clock(clock)
+
+    service.set_core_clock.assert_called_once_with(va_grpc.SetCoreClockRequest(clock))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [True, False],
+)
+async def test_get_update_locked(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "get_update_locked", return_value=protobuf.BoolValue(test_input), autospec=True)
+
+    ret_val = va.get_update_locked()
+
+    assert service.get_update_locked.called
+    assert ret_val == test_input
+
+
+@pytest.mark.asyncio
+async def test_lock_update(mocked_connection, mocker):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "lock_update", return_value=protobuf.Empty(), autospec=True)
+
+    va.lock_update()
+
+    assert service.lock_update.called
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    range(5),
+)
+async def test_unlock_update(mocked_connection, mocker, test_input):  # noqa: ARG001
+    va, service = mocked_connection
+
+    scene_state = random.randint(-1, 100)
+
+    mocker.patch.object(service, "unlock_update", return_value=protobuf.Int32Value(scene_state), autospec=True)
+
+    ret_val = va.unlock_update()
+
+    assert service.unlock_update.called
+    assert ret_val == scene_state
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    range(5),
+)
+@pytest.mark.parametrize(
+    "input_type",
+    [Path, str],
+)
+@pytest.mark.parametrize(
+    "return_value",
+    [True, False],
+)
+async def test_add_search_path(mocked_connection, mocker, test_input, input_type, return_value):
+    va, service = mocked_connection
+
+    mocker.patch.object(
+        service, "call_module", return_value=helper.convert_struct_to_vanet({"pathvalid": return_value}), autospec=True
+    )
+
+    test_path = input_type("/".join([random_string(5) for _ in range(test_input)]))
+
+    ret_val = va.add_search_path(test_path)
+
+    service.call_module.assert_called_once_with(
+        va_grpc.CallModuleRequest(
+            module_name="VACore", module_parameters=helper.convert_struct_to_vanet({"addsearchpath": str(test_path)})
+        )
+    )
+    assert ret_val == return_value
+
+
+@pytest.mark.asyncio
+async def test_shutdown_server(mocked_connection, mocker):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "call_module", return_value=helper.convert_struct_to_vanet({}), autospec=True)
+
+    va.shutdown_server()
+
+    service.call_module.assert_called_once_with(
+        va_grpc.CallModuleRequest(
+            module_name="VACore", module_parameters=helper.convert_struct_to_vanet({"shutdown": True})
+        )
+    )
+
+
+@pytest.mark.asyncio
+async def test_get_server_address(mocked_connection):
+    va = VA()
+
+    addr = va.get_server_address()
+
+    assert addr is None
+
+    va, _ = mocked_connection
+
+    addr = va.get_server_address()
+
+    assert addr == "localhost:12340"
+
+
+def test_start_server(mocker):
+    with pytest.raises(FileNotFoundError, match="Could not find valid VACore.ini file."):
+        VA.start_server(config_ini_file="should_not_exist.ini", va_server_path=__file__)
+
+    with pytest.raises(FileNotFoundError, match="Could not find valid VAServer executable."):
+        VA.start_server(config_ini_file="should_not_exist.ini", va_server_path="should_not_exist.exe")
+
+    mocker.patch("vapython._helper.find_server_executable", return_value=Path(__file__), autospec=True)
+
+    with pytest.raises(FileNotFoundError, match="Could not find valid VACore.ini file."):
+        VA.start_server(config_ini_file="should_not_exist.ini")
+
+    with pytest.raises(FileNotFoundError, match="Could not find valid VACore.ini file."):
+        VA.start_server()
+
+    mocker.patch("vapython._helper.find_server_executable", return_value=Path())
+
+    with pytest.raises(FileNotFoundError, match="Could not find valid VAServer executable."):
+        VA.start_server()
+
+    import subprocess
+
+    mocker.patch("subprocess.Popen", return_value=None, autospec=True)
+
+    VA.start_server(config_ini_file=Path(__file__), va_server_path=Path(__file__))
+
+    subprocess.Popen.assert_called_once_with(
+        [
+            Path(__file__),
+            "--config",
+            Path(__file__),
+            "--server-address",
+            "localhost:12340",
+            "--remote",
+        ],
+        creationflags=subprocess.CREATE_NEW_CONSOLE,
+    )
+
+    VA.start_server(
+        config_ini_file=Path(__file__), va_server_path=Path(__file__), extra_args=["foo", "bar"], dedicated_window=False
+    )
+
+    subprocess.Popen.assert_called_with(
+        [
+            Path(__file__),
+            "--config",
+            Path(__file__),
+            "--server-address",
+            "localhost:12340",
+            "--remote",
+            "foo",
+            "bar",
+        ],
+    )
diff --git a/tests/helper_test.py b/tests/helper_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a02728d3db2a20792dba96113925c0563154555
--- /dev/null
+++ b/tests/helper_test.py
@@ -0,0 +1,128 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from pathlib import Path
+
+import pytest
+
+from vapython._helper import (
+    convert_aura_mode_to_str,
+    default_auralization_mode,
+    find_server_executable,
+    parse_aura_mode_str,
+    possible_auralization_modes,
+)
+
+
+def test_parse_aura_mode_str():
+    # Test case 1: Empty input string should return 0
+    assert parse_aura_mode_str("", 0) == 0
+
+    # Test case 2: Input string "*" should return the sum of all possible aura modes
+    assert parse_aura_mode_str("*", 0) == sum(m[0] for m in possible_auralization_modes.values())
+
+    # Test case 3: Input string "default" should return the default aura mode
+    assert parse_aura_mode_str("default", 0) == default_auralization_mode
+
+    # Test case 4: Input string "+AB,-SD" should enable AB and disable SD
+    assert parse_aura_mode_str("+AB,-SD", 0) == 4096
+
+    # Test case 5: Input string "+DS,-ER,SD" should enable DS, disable ER
+    assert parse_aura_mode_str("+DS,-ER,SD", 0) == 1
+
+    # Test case 6: Input string "-DS,-ER,SD" with aura_mode = 5 should enable DS, disable ER
+    assert parse_aura_mode_str("-DS,-ER,SD", 5) == 4
+
+    # Test case 7: Input string "-DS,+ER,SD" with aura_mode = default_aura_mode should disable DS, enable ER
+    assert (
+        parse_aura_mode_str("-DS,+ER,SD", default_auralization_mode)
+        == default_auralization_mode - possible_auralization_modes["DS"][0] + possible_auralization_modes["ER"][0]
+    )
+
+
+def test_parse_aura_mode_str_invalid_input():
+    # Test case 1: Invalid aura mode
+    with pytest.raises(ValueError, match="Invalid aura mode: XX"):
+        parse_aura_mode_str("+XX", 0)
+
+    # Test case 2: Invalid aura mode
+    with pytest.raises(ValueError, match="Invalid aura mode: XX"):
+        parse_aura_mode_str("-XX", 0)
+
+
+def test_convert_aura_mode_to_str():
+    # Test case 1: aura_mode = 0
+    assert convert_aura_mode_to_str(0, short_form=False) == "Nothing"
+    assert convert_aura_mode_to_str(0, short_form=True) == ""
+
+    # Test case 2: aura_mode = sum of all possible_aura_modes values, aura_mode = 8191 (binary: 1111111111111)
+    assert convert_aura_mode_to_str(sum(m[0] for m in possible_auralization_modes.values()), short_form=False) == "All"
+    assert convert_aura_mode_to_str(8191) == "DS,ER,DD,SD,MA,TV,SC,DF,NF,DP,SL,TR,AB"
+
+    # Test case 3: aura_mode = default_aura_mode
+    assert convert_aura_mode_to_str(default_auralization_mode, short_form=False) == "Default"
+
+    # Test case 4: aura_mode = 5 (binary: 101)
+    assert convert_aura_mode_to_str(5) == "DS,DD"
+
+    # Test case 5: aura_mode = 7 (binary: 111)
+    assert convert_aura_mode_to_str(7) == "DS,ER,DD"
+
+    # Test case 6: aura_mode = 10 (binary: 1010)
+    assert convert_aura_mode_to_str(10) == "ER,SD"
+
+    # Test case 7: aura_mode = 15 (binary: 1111)
+    assert convert_aura_mode_to_str(15) == "DS,ER,DD,SD"
+
+    # Test case 8: aura_mode = 4096 (binary: 1000000000000)
+    assert convert_aura_mode_to_str(4096) == "AB"
+
+    # Test case 9: aura_mode = 1 (binary: 1)
+    assert convert_aura_mode_to_str(1, short_form=False) == "Direct Sound"
+
+
+def test_find_server_executable(mocker, tmp_path):
+    import shutil
+    import subprocess
+    import tkinter as tk
+
+    mocker.patch(
+        "subprocess.run", autospec=True, return_value=subprocess.CompletedProcess("", 0, b"VACore 2023.b (release)")
+    )
+    mocker.patch("vapython._helper.user_config_dir", return_value=tmp_path, autospec=True)
+    mocker.patch("vapython._helper.metadata.version", return_value="2023.1", autospec=True)
+
+    config_file = tmp_path / "vapython.cfg"
+
+    mocker.patch("shutil.which", return_value="string", autospec=True)
+    result = find_server_executable()
+
+    assert result == Path("string")
+
+    shutil.which.assert_called_once_with("VAServer")
+
+    assert config_file.exists()
+
+    config_file.unlink()
+
+    mocker.patch("shutil.which", return_value=None)
+    mocker.patch("tkinter.filedialog.askopenfilename", return_value="tkstring", autospec=True)
+
+    result = find_server_executable()
+
+    assert result == Path("tkstring")
+    shutil.which.assert_called_with("VAServer")
+    tk.filedialog.askopenfilename.assert_called_once()
+
+    assert config_file.exists()
+
+    config_file.unlink()
+
+    mocker.patch("tkinter.filedialog.askopenfilename", return_value="")
+
+    result = find_server_executable()
+
+    assert result is None
+    shutil.which.assert_called_with("VAServer")
+    tk.filedialog.askopenfilename.assert_called()
diff --git a/tests/receiver_test.py b/tests/receiver_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..0307ab62b90cde1f10687e2fb9700b82be4cb51b
--- /dev/null
+++ b/tests/receiver_test.py
@@ -0,0 +1,282 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import random
+
+import pytest
+from betterproto.lib.google import protobuf
+
+import vapython.vanet._vanet_grpc as va_grpc
+from vapython._types import VAQuaternion
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(4)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_set_sound_receiver_head_above_torso_orientation(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "set_sound_receiver_head_above_torso_orientation"
+    message_name = "SetSoundReceiverHeadAboveTorsoOrientationRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            test_input[0],
+            va_grpc.Quaternion(*test_input[1]),
+        )
+    )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(4)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_get_sound_receiver_head_above_torso_orientation(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "get_sound_receiver_head_above_torso_orientation"
+    message_name = "GetSoundReceiverHeadAboveTorsoOrientationRequest"
+
+    reply = va_grpc.Quaternion(*test_input[1])
+
+    mocker.patch.object(service, method_name, return_value=reply, autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == VAQuaternion(*test_input[1])
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "public_method_name",
+    [
+        "set_sound_receiver_real_world_position_orientation_vu",
+        "set_sound_receiver_real_world_position_orientation_view_up",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(3)],
+            [random.uniform(-100, 100) for _ in range(3)],
+            [random.uniform(-100, 100) for _ in range(3)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_set_sound_receiver_real_world_position_orientation_vu(
+    mocked_connection, mocker, public_method_name, test_input
+):
+    va, service = mocked_connection
+
+    method_name = "set_sound_receiver_real_world_position_orientation_vu"
+    message_name = "SetSoundReceiverRealWorldPositionOrientationVuRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, public_method_name)
+
+    function(test_input[0], test_input[1], test_input[2], test_input[3])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            test_input[0],
+            va_grpc.Vector3(*test_input[1]),
+            va_grpc.Vector3(*test_input[2]),
+            va_grpc.Vector3(*test_input[3]),
+        )
+    )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "public_method_name",
+    [
+        "get_sound_receiver_real_world_position_orientation_vu",
+        "get_sound_receiver_real_world_head_position_orientation_view_up",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(3)],
+            [random.uniform(-100, 100) for _ in range(3)],
+            [random.uniform(-100, 100) for _ in range(3)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_get_sound_receiver_real_world_position_orientation_vu(
+    mocked_connection, mocker, public_method_name, test_input
+):
+    va, service = mocked_connection
+
+    method_name = "get_sound_receiver_real_world_position_orientation_vu"
+    message_name = "GetSoundReceiverRealWorldPositionOrientationVuRequest"
+    reply_name = "GetSoundReceiverRealWorldPositionOrientationVuReply"
+
+    reply = getattr(va_grpc, reply_name)(
+        va_grpc.Vector3(*test_input[1]), va_grpc.Vector3(*test_input[2]), va_grpc.Vector3(*test_input[3])
+    )
+
+    mocker.patch.object(service, method_name, return_value=reply, autospec=True)
+
+    function = getattr(va, public_method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == reply
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(3)],
+            [random.uniform(-100, 100) for _ in range(4)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_set_sound_receiver_real_world_pose(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "set_sound_receiver_real_world_pose"
+    message_name = "SetSoundReceiverRealWorldPoseRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1], test_input[2])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            test_input[0],
+            va_grpc.Vector3(*test_input[1]),
+            va_grpc.Quaternion(*test_input[2]),
+        )
+    )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(3)],
+            [random.uniform(-100, 100) for _ in range(4)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_get_sound_receiver_real_world_pose(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "get_sound_receiver_real_world_pose"
+    message_name = "GetSoundReceiverRealWorldPoseRequest"
+    reply_name = "GetSoundReceiverRealWorldPoseReply"
+
+    reply = getattr(va_grpc, reply_name)(va_grpc.Vector3(*test_input[1]), va_grpc.Quaternion(*test_input[2]))
+
+    mocker.patch.object(service, method_name, return_value=reply, autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == reply
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(4)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_set_sound_receiver_real_world_head_above_torso_orientation(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "set_sound_receiver_real_world_head_above_torso_orientation"
+    message_name = "SetSoundReceiverRealWorldHeadAboveTorsoOrientationRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            test_input[0],
+            va_grpc.Quaternion(*test_input[1]),
+        )
+    )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(4)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_get_sound_receiver_real_world_head_above_torso_orientation(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "get_sound_receiver_real_world_head_above_torso_orientation"
+    message_name = "GetSoundReceiverRealWorldHeadAboveTorsoOrientationRequest"
+
+    reply = va_grpc.Quaternion(*test_input[1])
+
+    mocker.patch.object(service, method_name, return_value=reply, autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == VAQuaternion(*test_input[1])
diff --git a/tests/rendering_module_test.py b/tests/rendering_module_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..e550ede10234ab6025fac04ff79b00feff4584eb
--- /dev/null
+++ b/tests/rendering_module_test.py
@@ -0,0 +1,253 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import random
+
+import pytest
+from betterproto.lib.google import protobuf
+
+import vapython._helper as va_helper
+import vapython.vanet._helper as va_grpc_helper
+import vapython.vanet._vanet_grpc as va_grpc
+
+from .utils import random_grpc_struct, random_string, random_struct
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    range(5),
+)
+@pytest.mark.parametrize(
+    "filter_modules",
+    [True, False],
+)
+async def test_get_rendering_modules(mocked_connection, mocker, test_input, filter_modules):
+    va, service = mocked_connection
+
+    method_name = "get_rendering_modules"
+
+    info = va_grpc.AudioRendererInfo(
+        name=random_string(5),
+        description=random_string(5),
+        enabled=random.choice([True, False]),
+        output_detector_enabled=random.choice([True, False]),
+        output_recording_enabled=random.choice([True, False]),
+        output_recording_file_path=random_string(5),
+        parameters=random_grpc_struct(),
+    )
+    reply = va_grpc.AudioRendererInfos(
+        audio_renderer_infos=[info for _ in range(test_input)],
+    )
+
+    mocker.patch.object(service, method_name, return_value=reply, autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(only_enabled=filter_modules)
+
+    getattr(service, method_name).assert_called_once_with(
+        va_grpc.GetRenderingModulesRequest(only_enabled=filter_modules)
+    )
+    assert ret_val == reply
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "muted",
+    [True, False],
+)
+async def test_get_rendering_module_muted(mocked_connection, mocker, muted):
+    va, service = mocked_connection
+
+    method_name = "get_rendering_module_muted"
+    message_name = "GetRenderingModuleMutedRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.BoolValue(muted), autospec=True)
+
+    identifier = random_string(5)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(identifier)
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(identifier))
+    assert ret_val == muted
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "muted",
+    [True, False],
+)
+async def test_set_rendering_module_muted(mocked_connection, mocker, muted):
+    va, service = mocked_connection
+
+    method_name = "set_rendering_module_muted"
+    message_name = "SetRenderingModuleMutedRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    string = random_string(5)
+
+    function = getattr(va, method_name)
+
+    function(string, muted=muted)
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(string, muted))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random_string(5), random.uniform(-1, 1)) for _ in range(5)],
+)
+async def test_set_rendering_module_gain(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "set_rendering_module_gain"
+    message_name = "SetRenderingModuleGainRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0], test_input[1]))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random_string(5), random.uniform(-1, 1)) for _ in range(5)],
+)
+async def test_get_rendering_module_gain(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "get_rendering_module_gain"
+    message_name = "GetRenderingModuleGainRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.DoubleValue(test_input[1]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == test_input[1]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random_string(5),
+            random_struct(),
+        )
+        for _ in range(5)
+    ],
+)
+async def test_set_rendering_module_parameters(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "set_rendering_module_parameters"
+    message_name = "SetRenderingModuleParametersRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            test_input[0],
+            va_grpc_helper.convert_struct_to_vanet(test_input[1]),
+        )
+    )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random_string(5),
+            random_struct(),
+            random_grpc_struct(),
+        )
+        for _ in range(5)
+    ],
+)
+async def test_get_rendering_module_parameters(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "get_rendering_module_parameters"
+    message_name = "GetRenderingModuleParametersRequest"
+
+    mocker.patch.object(service, method_name, return_value=test_input[2], autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(test_input[0], va_grpc_helper.convert_struct_to_vanet(test_input[1]))
+    )
+    assert ret_val == va_grpc_helper.convert_struct_from_vanet(test_input[2])
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        ("+DS", 0),
+        ("+DS,-ER", 0),
+        ("+DS,-ER,-SD", 5),
+        ("+DS,-ER,-SD", 8191),
+    ],
+)
+async def test_set_rendering_module_auralization_mode(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    mocker.patch.object(service, "set_rendering_module_auralization_mode", return_value=protobuf.Empty(), autospec=True)
+    mocker.patch.object(
+        service,
+        "get_rendering_module_auralization_mode",
+        return_value=protobuf.Int32Value(value=test_input[1]),
+        autospec=True,
+    )
+
+    identifier = random_string(5)
+
+    va.set_rendering_module_auralization_mode(identifier, test_input[0])
+
+    service.set_rendering_module_auralization_mode.assert_called_once_with(
+        va_grpc.SetRenderingModuleAuralizationModeRequest(identifier, va_helper.parse_aura_mode_str(*test_input))
+    )
+    assert service.get_rendering_module_auralization_mode.called
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("aura_mode", [0, 1, 4096, 8191, 5, 7, 10])
+async def test_get_rendering_module_auralization_mode(mocked_connection, mocker, aura_mode):
+    va, service = mocked_connection
+
+    mocker.patch.object(
+        service,
+        "get_rendering_module_auralization_mode",
+        return_value=protobuf.Int32Value(value=aura_mode),
+        autospec=True,
+    )
+
+    identifier = random_string(5)
+
+    ret_val = va.get_rendering_module_auralization_mode(identifier)
+
+    service.get_rendering_module_auralization_mode.assert_called_once_with(
+        va_grpc.GetRenderingModuleAuralizationModeRequest(identifier)
+    )
+    assert ret_val == va_helper.convert_aura_mode_to_str(aura_mode)
diff --git a/tests/reproduction_module_test.py b/tests/reproduction_module_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5c7b2e567bd54b9965d321aed4fe141a60ca60c
--- /dev/null
+++ b/tests/reproduction_module_test.py
@@ -0,0 +1,202 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import random
+
+import pytest
+from betterproto.lib.google import protobuf
+
+import vapython.vanet._helper as va_grpc_helper
+import vapython.vanet._vanet_grpc as va_grpc
+
+from .utils import random_grpc_struct, random_string, random_struct
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    range(5),
+)
+@pytest.mark.parametrize(
+    "filter_modules",
+    [True, False],
+)
+async def test_get_reproduction_modules(mocked_connection, mocker, test_input, filter_modules):
+    va, service = mocked_connection
+
+    method_name = "get_reproduction_modules"
+
+    info = va_grpc.AudioReproductionInfo(
+        id=random_string(5),
+        description=random_string(5),
+        enabled=random.choice([True, False]),
+        input_detector_enabled=random.choice([True, False]),
+        input_recording_enabled=random.choice([True, False]),
+        input_recording_file_path=random_string(5),
+        bool_detector_enabled=random.choice([True, False]),
+        bool_recording_enabled=random.choice([True, False]),
+        bool_recording_file_path=random_string(5),
+        parameters=random_grpc_struct(),
+    )
+    reply = va_grpc.AudioReproductionInfos(
+        audio_reproduction_infos=[info for _ in range(test_input)],
+    )
+
+    mocker.patch.object(service, method_name, return_value=reply, autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(only_enabled=filter_modules)
+
+    getattr(service, method_name).assert_called_once_with(
+        va_grpc.GetReproductionModulesRequest(only_enabled=filter_modules)
+    )
+    assert ret_val == reply
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "muted",
+    [True, False],
+)
+async def test_get_reproduction_module_muted(mocked_connection, mocker, muted):
+    va, service = mocked_connection
+
+    method_name = "get_reproduction_module_muted"
+    message_name = "GetReproductionModuleMutedRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.BoolValue(muted), autospec=True)
+
+    identifier = random_string(5)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(identifier)
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(identifier))
+    assert ret_val == muted
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "muted",
+    [True, False],
+)
+async def test_set_reproduction_module_muted(mocked_connection, mocker, muted):
+    va, service = mocked_connection
+
+    method_name = "set_reproduction_module_muted"
+    message_name = "SetReproductionModuleMutedRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    string = random_string(5)
+
+    function = getattr(va, method_name)
+
+    function(string, muted=muted)
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(string, muted))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random_string(5), random.uniform(-1, 1)) for _ in range(5)],
+)
+async def test_set_reproduction_module_gain(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "set_reproduction_module_gain"
+    message_name = "SetReproductionModuleGainRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0], test_input[1]))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random_string(5), random.uniform(-1, 1)) for _ in range(5)],
+)
+async def test_get_reproduction_module_gain(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "get_reproduction_module_gain"
+    message_name = "GetReproductionModuleGainRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.DoubleValue(test_input[1]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == test_input[1]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random_string(5),
+            random_struct(),
+        )
+        for _ in range(5)
+    ],
+)
+async def test_set_reproduction_module_parameters(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "set_reproduction_module_parameters"
+    message_name = "SetReproductionModuleParametersRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            test_input[0],
+            va_grpc_helper.convert_struct_to_vanet(test_input[1]),
+        )
+    )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random_string(5),
+            random_struct(),
+            random_grpc_struct(),
+        )
+        for _ in range(5)
+    ],
+)
+async def test_get_reproduction_module_parameters(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "get_reproduction_module_parameters"
+    message_name = "GetReproductionModuleParametersRequest"
+
+    mocker.patch.object(service, method_name, return_value=test_input[2], autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(test_input[0], va_grpc_helper.convert_struct_to_vanet(test_input[1]))
+    )
+    assert ret_val == va_grpc_helper.convert_struct_from_vanet(test_input[2])
diff --git a/tests/signal_source_test.py b/tests/signal_source_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4e585cf74dc0da9286b4010607dd2584e6bd88f
--- /dev/null
+++ b/tests/signal_source_test.py
@@ -0,0 +1,470 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import random
+from pathlib import Path
+
+import pytest
+from betterproto.lib.google import protobuf
+
+import vapython.vanet._helper as va_grpc_helper
+import vapython.vanet._vanet_grpc as va_grpc
+
+from .utils import random_grpc_struct, random_string, random_struct
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random_string(5), random_struct(), random_string(5)) for _ in range(5)],
+)
+async def test_create_signal_source_prototype_from_parameters(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "create_signal_source_prototype_from_parameters"
+    message_name = "CreateSignalSourcePrototypeFromParametersRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.StringValue(test_input[2]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(name=test_input[0], parameters=test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(test_input[0], va_grpc_helper.convert_struct_to_vanet(test_input[1]))
+    )
+    assert ret_val == test_input[2]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random_string(5), random_struct(), random_string(5)) for _ in range(5)],
+)
+async def test_create_signal_source_buffer_from_parameters(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "create_signal_source_buffer_from_parameters"
+    message_name = "CreateSignalSourceBufferFromParametersRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.StringValue(test_input[2]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(name=test_input[0], parameters=test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            name=test_input[0], parameters=va_grpc_helper.convert_struct_to_vanet(test_input[1])
+        )
+    )
+    assert ret_val == test_input[2]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    range(5),
+)
+@pytest.mark.parametrize(
+    "path_type",
+    [str, Path],
+)
+async def test_create_signal_source_buffer_from_file(mocked_connection, mocker, test_input, path_type):  # noqa: ARG001
+    va, service = mocked_connection
+
+    public_method_name = "create_signal_source_buffer_from_file"
+    method_name = "create_signal_source_buffer_from_parameters"
+    message_name = "CreateSignalSourceBufferFromParametersRequest"
+
+    identifier = random_string(5)
+
+    mocker.patch.object(service, method_name, return_value=protobuf.StringValue(identifier), autospec=True)
+
+    test_path = path_type(random_string(5))
+
+    function = getattr(va, public_method_name)
+
+    ret_val = function(file_path=test_path)
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            name="", parameters=va_grpc_helper.convert_struct_to_vanet({"filepath": str(test_path)})
+        )
+    )
+    assert ret_val == identifier
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random_string(5), random_string(5)) for _ in range(5)],
+)
+async def test_create_signal_source_text_to_speech(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "create_signal_source_text_to_speech"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.StringValue(test_input[1]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(va_grpc.CreateSignalSourceTextToSpeechRequest(test_input[0]))
+    assert ret_val == test_input[1]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random_string(5), random_string(5)) for _ in range(5)],
+)
+async def test_create_signal_source_sequencer(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "create_signal_source_sequencer"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.StringValue(test_input[1]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(va_grpc.CreateSignalSourceSequencerRequest(test_input[0]))
+    assert ret_val == test_input[1]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random_string(5), random_struct(), random_string(5)) for _ in range(5)],
+)
+async def test_create_signal_source_engine(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "create_signal_source_engine"
+    message_name = "CreateSignalSourceEngineRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.StringValue(test_input[2]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(name=test_input[0], parameters=test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(test_input[0], va_grpc_helper.convert_struct_to_vanet(test_input[1]))
+    )
+    assert ret_val == test_input[2]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random_string(5), random_struct(), random_string(5)) for _ in range(5)],
+)
+async def test_create_signal_source_machine(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "create_signal_source_machine"
+    message_name = "CreateSignalSourceMachineRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.StringValue(test_input[2]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(name=test_input[0], parameters=test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(test_input[0], va_grpc_helper.convert_struct_to_vanet(test_input[1]))
+    )
+    assert ret_val == test_input[2]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random_string(5), random.choice([True, False])) for _ in range(5)],
+)
+async def test_delete_acoustic_material(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "delete_signal_source"
+    message_name = "DeleteSignalSourceRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.BoolValue(test_input[1]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == test_input[1]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    range(5),
+)
+async def test_get_signal_source_info(mocked_connection, mocker, test_input):  # noqa: ARG001
+    va, service = mocked_connection
+
+    method_name = "get_signal_source_info"
+    message_name = "GetSignalSourceInfoRequest"
+    reply_name = "SignalSourceInfo"
+
+    reply = getattr(va_grpc, reply_name)(
+        id=random_string(5),
+        name=random_string(5),
+        description=random_string(5),
+    )
+
+    mocker.patch.object(service, method_name, return_value=reply, autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(reply.id)
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(reply.id))
+    assert ret_val == reply
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    range(5),
+)
+async def test_get_signal_source_infos(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "get_signal_source_infos"
+
+    info = va_grpc.SignalSourceInfo(
+        id=random_string(5),
+        name=random_string(5),
+        description=random_string(5),
+    )
+    reply = va_grpc.SignalSourceInfos(
+        signal_source_infos=[info for _ in range(test_input)],
+    )
+
+    mocker.patch.object(service, method_name, return_value=reply, autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function()
+
+    assert getattr(service, method_name).called
+    assert ret_val == reply
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    range(5),
+)
+@pytest.mark.parametrize(
+    "looping",
+    [True, False],
+)
+async def test_set_signal_source_buffer_looping(mocked_connection, mocker, test_input, looping):  # noqa: ARG001
+    va, service = mocked_connection
+
+    method_name = "set_signal_source_buffer_looping"
+    message_name = "SetSignalSourceBufferLoopingRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    string = random_string(5)
+
+    function = getattr(va, method_name)
+
+    function(string, looping=looping)
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(string, looping))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    range(5),
+)
+@pytest.mark.parametrize(
+    "looping",
+    [True, False],
+)
+async def test_get_signal_source_buffer_looping(mocked_connection, mocker, test_input, looping):  # noqa: ARG001
+    va, service = mocked_connection
+
+    method_name = "get_signal_source_buffer_looping"
+    message_name = "GetSignalSourceBufferLoopingRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.BoolValue(looping), autospec=True)
+
+    string = random_string(5)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(string)
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(string))
+    assert ret_val == looping
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random_string(5),
+            random_struct(),
+        )
+        for _ in range(5)
+    ],
+)
+async def test_set_signal_source_parameters(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "set_signal_source_parameters"
+    message_name = "SetSignalSourceParametersRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            test_input[0],
+            va_grpc_helper.convert_struct_to_vanet(test_input[1]),
+        )
+    )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random_string(5),
+            random_struct(),
+            random_grpc_struct(),
+        )
+        for _ in range(5)
+    ],
+)
+async def test_get_signal_source_parameters(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "get_signal_source_parameters"
+    message_name = "GetSignalSourceParametersRequest"
+
+    mocker.patch.object(service, method_name, return_value=test_input[2], autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(test_input[0], va_grpc_helper.convert_struct_to_vanet(test_input[1]))
+    )
+    assert ret_val == va_grpc_helper.convert_struct_from_vanet(test_input[2])
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random_string(5),
+            random.uniform(0, 10),
+        )
+        for _ in range(5)
+    ],
+)
+async def test_set_signal_source_buffer_playback_position(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "set_signal_source_buffer_playback_position"
+    message_name = "SetSignalSourceBufferPlaybackPositionRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            test_input[0],
+            test_input[1],
+        )
+    )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "state",
+    [
+        va_grpc.PlaybackStateState.INVALID,
+        va_grpc.PlaybackStateState.STOPPED,
+        va_grpc.PlaybackStateState.PAUSED,
+        va_grpc.PlaybackStateState.PLAYING,
+    ],
+)
+async def test_get_signal_source_buffer_playback_state(mocked_connection, mocker, state):
+    va, service = mocked_connection
+
+    method_name = "get_signal_source_buffer_playback_state"
+    message_name = "GetSignalSourceBufferPlaybackStateRequest"
+
+    mocker.patch.object(service, method_name, return_value=va_grpc.PlaybackState(state=state), autospec=True)
+
+    identifier = random_string(5)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(identifier)
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(identifier))
+    assert ret_val == state
+    assert ret_val.__str__() == state.name
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        ("none", va_grpc.PlaybackActionAction.NONE),
+        ("play", va_grpc.PlaybackActionAction.PLAY),
+        ("pause", va_grpc.PlaybackActionAction.PAUSE),
+        ("stop", va_grpc.PlaybackActionAction.STOP),
+        (va_grpc.PlaybackActionAction.NONE, va_grpc.PlaybackActionAction.NONE),
+        (va_grpc.PlaybackActionAction.PLAY, va_grpc.PlaybackActionAction.PLAY),
+        (va_grpc.PlaybackActionAction.PAUSE, va_grpc.PlaybackActionAction.PAUSE),
+        (va_grpc.PlaybackActionAction.STOP, va_grpc.PlaybackActionAction.STOP),
+    ],
+)
+async def test_set_signal_source_buffer_playback_action(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "set_signal_source_buffer_playback_action"
+    message_name = "SetSignalSourceBufferPlaybackActionRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    identifier = random_string(5)
+
+    function = getattr(va, method_name)
+
+    function(identifier, test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            identifier,
+            va_grpc.PlaybackAction(test_input[1]),
+        )
+    )
diff --git a/tests/source_receiver_test.py b/tests/source_receiver_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..cbda6588b7f59951d9dd0019100773c06caa6da9
--- /dev/null
+++ b/tests/source_receiver_test.py
@@ -0,0 +1,848 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import random
+
+import pytest
+from betterproto.lib.google import protobuf
+
+import vapython.vanet._helper as va_grpc_helper
+import vapython.vanet._vanet_grpc as va_grpc
+from vapython._helper import convert_aura_mode_to_str
+from vapython._types import VAQuaternion, VAVector
+
+from .utils import random_grpc_struct, random_string, random_struct
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [(random_string(5), random.randint(0, 100)) for _ in range(5)],
+)
+async def test_create_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"create_sound_{entity}"
+    message_name = f"CreateSound{entity.capitalize()}Request"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Int32Value(value=test_input[1]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == test_input[1]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [(random_string(5), random_string(5), random.randint(0, 100)) for _ in range(5)],
+)
+async def test_create_explicit_renderer_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"create_sound_{entity}_explicit_renderer"
+    message_name = f"CreateSound{entity.capitalize()}ExplicitRendererRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Int32Value(value=test_input[2]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0], test_input[1]))
+    assert ret_val == test_input[2]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random.choice([True, False])) for _ in range(5)],
+)
+async def test_delete_sound_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"delete_sound_{entity}"
+    message_name = f"DeleteSound{entity.capitalize()}Request"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.BoolValue(value=test_input[1]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == test_input[1]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random.choice([True, False])) for _ in range(5)],
+)
+async def test_set_sound_enabled_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"set_sound_{entity}_enabled"
+    message_name = f"SetSound{entity.capitalize()}EnabledRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], enabled=test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0], test_input[1]))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random.choice([True, False])) for _ in range(5)],
+)
+async def test_get_sound_enabled_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"get_sound_{entity}_enabled"
+    message_name = f"GetSound{entity.capitalize()}EnabledRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.BoolValue(test_input[1]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == test_input[1]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random_string(5)) for _ in range(5)],
+)
+async def test_set_sound_name_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"set_sound_{entity}_name"
+    message_name = f"SetSound{entity.capitalize()}NameRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0], test_input[1]))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random_string(5)) for _ in range(5)],
+)
+async def test_get_sound_name_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"get_sound_{entity}_name"
+    message_name = f"GetSound{entity.capitalize()}NameRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.StringValue(test_input[1]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == test_input[1]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random.randint(0, 100)) for _ in range(5)],
+)
+async def test_set_sound_directivity_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"set_sound_{entity}_directivity"
+    message_name = f"SetSound{entity.capitalize()}DirectivityRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0], test_input[1]))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random.randint(0, 100)) for _ in range(5)],
+)
+async def test_get_sound_directivity_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"get_sound_{entity}_directivity"
+    message_name = f"GetSound{entity.capitalize()}DirectivityRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Int32Value(test_input[1]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == test_input[1]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random.choice([True, False])) for _ in range(5)],
+)
+async def test_set_sound_muted_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"set_sound_{entity}_muted"
+    message_name = f"SetSound{entity.capitalize()}MutedRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], muted=test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0], test_input[1]))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random.choice([True, False])) for _ in range(5)],
+)
+async def test_get_sound_muted_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"get_sound_{entity}_muted"
+    message_name = f"GetSound{entity.capitalize()}MutedRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.BoolValue(test_input[1]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == test_input[1]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(3)],
+            [random.uniform(-100, 100) for _ in range(4)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_set_sound_pose_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"set_sound_{entity}_pose"
+    message_name = f"SetSound{entity.capitalize()}PoseRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1], test_input[2])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            test_input[0],
+            va_grpc.Vector3(*test_input[1]),
+            va_grpc.Quaternion(*test_input[2]),
+        )
+    )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(3)],
+            [random.uniform(-100, 100) for _ in range(4)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_get_sound_pose_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"get_sound_{entity}_pose"
+    message_name = f"GetSound{entity.capitalize()}PoseRequest"
+    reply_name = f"GetSound{entity.capitalize()}PoseReply"
+
+    reply = getattr(va_grpc, reply_name)(va_grpc.Vector3(*test_input[1]), va_grpc.Quaternion(*test_input[2]))
+
+    mocker.patch.object(service, method_name, return_value=reply, autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == reply
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(3)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_set_sound_position_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"set_sound_{entity}_position"
+    message_name = f"SetSound{entity.capitalize()}PositionRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            test_input[0],
+            va_grpc.Vector3(*test_input[1]),
+        )
+    )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(3)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_get_sound_position_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"get_sound_{entity}_position"
+    message_name = f"GetSound{entity.capitalize()}PositionRequest"
+
+    reply = va_grpc.Vector3(*test_input[1])
+
+    mocker.patch.object(service, method_name, return_value=reply, autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == VAVector(*test_input[1])
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(4)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_set_sound_orientation_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"set_sound_{entity}_orientation"
+    message_name = f"SetSound{entity.capitalize()}OrientationRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            test_input[0],
+            va_grpc.Quaternion(*test_input[1]),
+        )
+    )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(4)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_get_sound_orientation_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"get_sound_{entity}_orientation"
+    message_name = f"GetSound{entity.capitalize()}OrientationRequest"
+
+    reply = va_grpc.Quaternion(*test_input[1])
+
+    mocker.patch.object(service, method_name, return_value=reply, autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == VAQuaternion(*test_input[1])
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "version",
+    [
+        "_vu",
+        "_view_up",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(3)],
+            [random.uniform(-100, 100) for _ in range(3)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_set_sound_orientation_vu_(mocked_connection, mocker, entity, version, test_input):
+    va, service = mocked_connection
+
+    public_method_name = f"set_sound_{entity}_orientation{version}"
+    method_name = f"set_sound_{entity}_orientation_vu"
+    message_name = f"SetSound{entity.capitalize()}OrientationVuRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, public_method_name)
+
+    function(test_input[0], test_input[1], test_input[2])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            test_input[0],
+            va_grpc.Vector3(*test_input[1]),
+            va_grpc.Vector3(*test_input[2]),
+        )
+    )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "version",
+    [
+        "_vu",
+        "_view_up",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            [random.uniform(-100, 100) for _ in range(3)],
+            [random.uniform(-100, 100) for _ in range(3)],
+        )
+        for _ in range(5)
+    ],
+)
+async def test_get_sound_orientation_vu_(mocked_connection, mocker, entity, version, test_input):
+    va, service = mocked_connection
+
+    public_method_name = f"get_sound_{entity}_orientation{version}"
+    method_name = f"get_sound_{entity}_orientation_vu"
+    message_name = f"GetSound{entity.capitalize()}OrientationVuRequest"
+    reply_name = f"GetSound{entity.capitalize()}OrientationVuReply"
+
+    reply = getattr(va_grpc, reply_name)(va_grpc.Vector3(*test_input[1]), va_grpc.Vector3(*test_input[2]))
+
+    mocker.patch.object(service, method_name, return_value=reply, autospec=True)
+
+    function = getattr(va, public_method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == reply
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            random_struct(),
+        )
+        for _ in range(5)
+    ],
+)
+async def test_set_sound_parameters_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"set_sound_{entity}_parameters"
+    message_name = f"SetSound{entity.capitalize()}ParametersRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(
+            test_input[0],
+            va_grpc_helper.convert_struct_to_vanet(test_input[1]),
+        )
+    )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [
+        (
+            random.randint(0, 100),
+            random_struct(),
+            random_grpc_struct(),
+        )
+        for _ in range(5)
+    ],
+)
+async def test_get_sound_parameters_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"get_sound_{entity}_parameters"
+    message_name = f"GetSound{entity.capitalize()}ParametersRequest"
+
+    mocker.patch.object(service, method_name, return_value=test_input[2], autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(
+        getattr(va_grpc, message_name)(test_input[0], va_grpc_helper.convert_struct_to_vanet(test_input[1]))
+    )
+    assert ret_val == va_grpc_helper.convert_struct_from_vanet(test_input[2])
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100),) for _ in range(2)],
+)
+@pytest.mark.parametrize(
+    "aura_mode",
+    [
+        ("+DS", 0, 1),
+        ("+DS,-ER", 0, 1),
+        ("+DS,-ER,-SD", 5, 5),
+        ("+DS,-ER,-SD", 8191, 8181),
+    ],
+)
+async def test_set_sound_auralization_mode_(mocked_connection, mocker, entity, test_input, aura_mode):
+    va, service = mocked_connection
+
+    method_name = f"set_sound_{entity}_auralization_mode"
+    method_name_getter = f"get_sound_{entity}_auralization_mode"
+    message_name = f"SetSound{entity.capitalize()}AuralizationModeRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+    mocker.patch.object(
+        service, method_name_getter, return_value=protobuf.Int32Value(value=aura_mode[1]), autospec=True
+    )
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], aura_mode[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0], aura_mode[2]))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100),) for _ in range(2)],
+)
+@pytest.mark.parametrize(
+    "aura_mode",
+    [0, 1, 4096, 8191, 5, 7, 10],
+)
+async def test_get_sound_auralization_mode_(mocked_connection, mocker, entity, test_input, aura_mode):
+    va, service = mocked_connection
+
+    method_name = f"get_sound_{entity}_auralization_mode"
+    message_name = f"GetSound{entity.capitalize()}AuralizationModeRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Int32Value(aura_mode), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == convert_aura_mode_to_str(aura_mode)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [([random.randint(0, 100) for _ in range(3)],) for _ in range(5)],
+)
+async def test_get_sound_ids_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"get_sound_{entity}_ids"
+    method_name_server = f"get_sound_{entity}_i_ds"
+
+    mocker.patch.object(service, method_name_server, return_value=va_grpc.IntIdVector(test_input[0]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function()
+
+    assert getattr(service, method_name_server).called
+    assert ret_val == va_grpc.IntIdVector(test_input[0])
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "entity",
+    [
+        "source",
+        "receiver",
+    ],
+)
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100),) for _ in range(5)],
+)
+async def test_get_sound_info_(mocked_connection, mocker, entity, test_input):
+    va, service = mocked_connection
+
+    method_name = f"get_sound_{entity}_info"
+    reply_name = f"Sound{entity.capitalize()}Info"
+
+    reply = getattr(va_grpc, reply_name)(
+        id=test_input[0],
+        name=random_string(5),
+        explicit_renderer_id=random_string(5),
+    )
+
+    mocker.patch.object(service, method_name, return_value=reply, autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    assert getattr(service, method_name).called
+    assert ret_val == reply
diff --git a/tests/source_test.py b/tests/source_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a80287d39d85f10e2dfe1a061f7036eb2553af7
--- /dev/null
+++ b/tests/source_test.py
@@ -0,0 +1,117 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import random
+
+import pytest
+from betterproto.lib.google import protobuf
+
+import vapython.vanet._vanet_grpc as va_grpc
+
+from .utils import random_string
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random_string(5)) for _ in range(5)],
+)
+async def test_set_sound_signal_source(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "set_sound_source_signal_source"
+    message_name = "SetSoundSourceSignalSourceRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0], test_input[1]))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random_string(5)) for _ in range(5)],
+)
+async def test_get_sound_signal_source(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "get_sound_source_signal_source"
+    message_name = "GetSoundSourceSignalSourceRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.StringValue(test_input[1]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == test_input[1]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    range(5),
+)
+async def test_remove_sound_source_signal_source(mocked_connection, mocker, test_input):  # noqa: ARG001
+    va, service = mocked_connection
+
+    public_method_name = "remove_sound_source_signal_source"
+    method_name = "set_sound_source_signal_source"
+    message_name = "SetSoundSourceSignalSourceRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, public_method_name)
+
+    source_id = random.randint(0, 100)
+
+    function(source_id)
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(source_id, ""))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random.uniform(-1, 1)) for _ in range(5)],
+)
+async def test_set_sound_sound_power(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "set_sound_source_sound_power"
+    message_name = "SetSoundSourceSoundPowerRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.Empty(), autospec=True)
+
+    function = getattr(va, method_name)
+
+    function(test_input[0], test_input[1])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0], test_input[1]))
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+    "test_input",
+    [(random.randint(0, 100), random.uniform(-1, 1)) for _ in range(5)],
+)
+async def test_get_sound_sound_power(mocked_connection, mocker, test_input):
+    va, service = mocked_connection
+
+    method_name = "get_sound_source_sound_power"
+    message_name = "GetSoundSourceSoundPowerRequest"
+
+    mocker.patch.object(service, method_name, return_value=protobuf.DoubleValue(test_input[1]), autospec=True)
+
+    function = getattr(va, method_name)
+
+    ret_val = function(test_input[0])
+
+    getattr(service, method_name).assert_called_once_with(getattr(va_grpc, message_name)(test_input[0]))
+    assert ret_val == test_input[1]
diff --git a/tests/timer_test.py b/tests/timer_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..26d95b072ff85b20904ed3a988fdf4f6bbe04395
--- /dev/null
+++ b/tests/timer_test.py
@@ -0,0 +1,78 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import random
+import time
+
+import pytest
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("test_input", range(5))
+async def test_close_timer(mocked_connection, test_input):  # noqa: ARG001
+    va, _ = mocked_connection
+
+    va._timer_interval = random.uniform(0, 10)
+
+    va.close_timer()
+
+    assert va._timer_interval == -1
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("test_input", range(5))
+async def test_set_timer(mocked_connection, test_input):  # noqa: ARG001
+    va, _ = mocked_connection
+
+    interval = random.uniform(0, 10)
+
+    va.set_timer(interval)
+
+    assert va._timer_interval == interval
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("test_input", range(5))
+async def test_set_timer_wrong_input(mocked_connection, test_input):  # noqa: ARG001
+    va, _ = mocked_connection
+
+    interval = random.uniform(-10, -1)
+
+    with pytest.raises(ValueError, match="Interval must be greater"):
+        va.set_timer(interval)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("test_input", range(5))
+@pytest.mark.xfail(reason="This test is not reliable")
+async def test_wait_for_timer(mocked_connection, test_input):  # noqa: ARG001
+    va, _ = mocked_connection
+
+    interval = random.uniform(1e-3, 10e-3)
+    interval_ns = interval * 1e9
+    dummy_execution_time_ratio = random.uniform(0, 0.8)
+
+    va.set_timer(interval)
+
+    runs = 100
+    diffs = []
+    start = time.perf_counter_ns()
+    for _ in range(runs):
+        time.sleep(interval * dummy_execution_time_ratio)  # noqa: ASYNC101
+
+        va.wait_for_timer()
+
+        now = time.perf_counter_ns()
+        diffs.append(now - start)
+        start = now
+
+    # remove first 5 values as they may not accurate
+    diffs = diffs[5:]
+
+    avg = sum(diffs) / len(diffs)
+    assert avg >= interval_ns
+    assert pytest.approx(interval_ns, abs=1e6) == avg
+
+    for i, diff in enumerate(diffs):
+        assert pytest.approx(interval_ns, abs=1e6) == diff, f"Failed at index {i} with diff {diff}"
diff --git a/tests/utils.py b/tests/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ca10d8e7713999fde54f8ba7e330f1d2c02ba8c
--- /dev/null
+++ b/tests/utils.py
@@ -0,0 +1,35 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import random
+import string
+
+import vapython.vanet._helper as helper
+from vapython._types import VAStruct
+from vapython.vanet._vanet_grpc import Struct as VanetStruct
+
+
+def random_string(length: int) -> str:
+    return "".join(random.choice(string.ascii_letters) for _ in range(length))
+
+
+def random_struct() -> VAStruct:
+    struct: VAStruct = {}
+
+    struct["bool"] = random.choice([True, False])
+    struct["int"] = random.randint(-100, 100)
+    struct["double"] = random.uniform(-100, 100)
+    struct["string"] = random_string(5)
+
+    nested_struct: VAStruct = {}
+    nested_struct["bytes"] = b"\x00\x01\x02"
+    nested_struct["buffer"] = [random.uniform(-100, 100) for _ in range(random.randint(0, 5))]
+
+    struct["struct"] = nested_struct
+
+    return struct
+
+
+def random_grpc_struct() -> VanetStruct:
+    return helper.convert_struct_to_vanet(random_struct())
diff --git a/tests/va_test_connection.py b/tests/va_test_connection.py
deleted file mode 100644
index 7f968995297de7f6042927aa0f55a0715842fbb8..0000000000000000000000000000000000000000
--- a/tests/va_test_connection.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# VA is used as a singleton.
-# You can access va in every script, function and method.
-
-# Add va module if it was not installed
-import sys
-sys.path.append( '../Lib/site-packages' ) # deploy structure
-
-import VAPython as va
-
-print( "Testing va extension connection methods." )
-
-if va.connect() :
-	print( "Successfully connected to local server without arguments" )
-	va.disconnect() # direct disconnect
-else :
-	print( "Connection failed" )
-
-if va.connect( "localhost" ) :
-	print( "Successfully connected to local server with localhost argument" )
-else :
-	print( "Connection failed" )
-
-# sensitive disconnect
-if va.is_connected() :
-	va.disconnect()
-
-if va.connect( "localhost", 12340 ) :
-	print( "Successfully connected to local server with localhost and port 12340 argument" )
-else :
-	print( "Connection failed" )
-
-print( "Disconnect." )
-va.disconnect()
-
-import time
-
-import warnings
-
-with warnings.catch_warnings() :
-	warnings.simplefilter( "always" )
-	
-	time.sleep( 1 )
-	print( "Double disconnect:" )
-	va.disconnect() # double disconnect should raise warning
-
-	va.connect()
-
-	time.sleep( 1 )
-	print( "Double connect:" )
-	va.connect() # double connect should raise forced disconnection warning
-
-va.disconnect()
-print( "Test done." )
diff --git a/tests/vanet/__init__.py b/tests/vanet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/vanet/helper_test.py b/tests/vanet/helper_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee39b29ed20d2e65d906cfd6c9e10eb281bc6ec4
--- /dev/null
+++ b/tests/vanet/helper_test.py
@@ -0,0 +1,172 @@
+# SPDX-FileCopyrightText: 2024-present Pascal Palenda <pascal.palenda@akustik.rwth-aachen.de>
+#
+# SPDX-License-Identifier: Apache-2.0
+
+
+import pytest
+
+from vapython._types import VAQuaternion, VAStruct, VAVector
+from vapython.vanet import _helper
+from vapython.vanet._vanet_grpc import Quaternion as VanetQuaternion
+from vapython.vanet._vanet_grpc import SampleBuffer as VanetSampleBuffer
+from vapython.vanet._vanet_grpc import Struct as VanetStruct
+from vapython.vanet._vanet_grpc import Value as VanetStructValue
+from vapython.vanet._vanet_grpc import Vector3 as VanetVector
+
+
+def test_convert_struct_to_vanet():
+    input_struct = {
+        "bool_field": True,
+        "int_field": 10,
+        "float_field": 3.14,
+        "str_field": "Hello",
+        "nested_struct_field": {"nested_bool_field": False, "nested_int_field": 20},
+        "bytes_field": b"\x00\x01\x02",
+        "buffer_field": [1.0, 2.0, 3.0],
+    }
+
+    expected_output = VanetStruct()
+    expected_output.fields["bool_field"] = VanetStructValue(bool_value=True)
+    expected_output.fields["int_field"] = VanetStructValue(integer_value=10)
+    expected_output.fields["float_field"] = VanetStructValue(double_value=3.14)
+    expected_output.fields["str_field"] = VanetStructValue(string_value="Hello")
+    expected_output.fields["nested_struct_field"] = VanetStructValue(struct_value=VanetStruct())
+    expected_output.fields["nested_struct_field"].struct_value.fields["nested_bool_field"] = VanetStructValue(
+        bool_value=False
+    )
+    expected_output.fields["nested_struct_field"].struct_value.fields["nested_int_field"] = VanetStructValue(
+        integer_value=20
+    )
+    expected_output.fields["bytes_field"] = VanetStructValue(data_value=b"\x00\x01\x02")
+    expected_output.fields["buffer_field"] = VanetStructValue(buffer_value=VanetSampleBuffer([1.0, 2.0, 3.0]))
+
+    assert _helper.convert_struct_to_vanet(input_struct) == expected_output
+
+
+def test_convert_struct_to_vanet_invalid_input():
+    with pytest.raises(TypeError, match="input_struct must be of type VAStruct or dict"):
+        _helper.convert_struct_to_vanet("invalid_input")
+
+    with pytest.raises(TypeError, match="input_struct must be of type VAStruct or dict"):
+        _helper.convert_struct_to_vanet(1)
+
+    with pytest.raises(ValueError, match="Unknown type of value in struct!"):
+        _helper.convert_struct_to_vanet({"invalid_field": [1, 2, 3]})
+
+
+def test_convert_struct_to_vanet_correct_input():
+    data: VAStruct = {}
+    _helper.convert_struct_to_vanet(data)  # should not raise an exception
+    _helper.convert_struct_to_vanet({})  # should not raise an exception
+
+
+def test_convert_struct_from_vanet():
+    input_struct = VanetStruct()
+    input_struct.fields["key1"] = VanetStructValue(bool_value=True)
+    input_struct.fields["key2"] = VanetStructValue(integer_value=10)
+    input_struct.fields["key3"] = VanetStructValue(double_value=3.14)
+    input_struct.fields["key4"] = VanetStructValue(string_value="hello")
+
+    nested_struct = VanetStruct()
+    nested_struct.fields["nested_key"] = VanetStructValue(integer_value=20)
+    input_struct.fields["key5"] = VanetStructValue(struct_value=nested_struct)
+
+    input_struct.fields["key6"] = VanetStructValue(data_value=b"\x00\x01\x02")
+    input_struct.fields["key7"] = VanetStructValue(buffer_value=VanetSampleBuffer([1.0, 2.0, 3.0]))
+
+    output_struct = _helper.convert_struct_from_vanet(input_struct)
+
+    assert output_struct == {
+        "key1": True,
+        "key2": 10,
+        "key3": 3.14,
+        "key4": "hello",
+        "key5": {"nested_key": 20},
+        "key6": b"\x00\x01\x02",
+        "key7": [1.0, 2.0, 3.0],
+    }
+
+
+def test_convert_struct_from_vanet_invalid_inputs():
+    with pytest.raises(TypeError, match="input_struct must be of type VanetStruct"):
+        _helper.convert_struct_from_vanet("invalid_input")
+
+    with pytest.raises(TypeError, match="input_struct must be of type VanetStruct"):
+        _helper.convert_struct_from_vanet(1)
+
+    data = VanetStruct()
+    data.fields["key"] = VanetStructValue()
+    with pytest.raises(ValueError, match="Value in struct not set!"):
+        _helper.convert_struct_from_vanet(data)
+
+
+def test_convert_vector_to_vanet_with_va_vector():
+    input_vector = VAVector(x=1.0, y=2.0, z=3.0)
+    expected_output = VanetVector(x=1.0, y=2.0, z=3.0)
+    assert _helper.convert_vector_to_vanet(input_vector) == expected_output
+
+
+def test_convert_vector_to_vanet_with_list():
+    input_vector = [4.0, 5.0, 6.0]
+    expected_output = VanetVector(x=4.0, y=5.0, z=6.0)
+    assert _helper.convert_vector_to_vanet(input_vector) == expected_output
+
+
+def test_convert_vector_to_vanet_with_tuple():
+    input_vector = (7.0, 8.0, 9.0)
+    expected_output = VanetVector(x=7.0, y=8.0, z=9.0)
+    assert _helper.convert_vector_to_vanet(input_vector) == expected_output
+
+
+def test_convert_vector_to_vanet_with_invalid_input():
+    input_vector = "invalid"
+    with pytest.raises(ValueError, match="Vector must be of type VAVector, list or tuple"):
+        _helper.convert_vector_to_vanet(input_vector)
+
+
+def test_convert_vector_to_vanet_with_invalid_length():
+    input_vector = [1.0, 2.0]
+    with pytest.raises(ValueError, match="Vector must be of length 3"):
+        _helper.convert_vector_to_vanet(input_vector)
+
+
+def test_convert_vector_from_vanet():
+    input_vector = VanetVector(x=1, y=2, z=3)
+    expected_output = VAVector(x=1, y=2, z=3)
+    assert _helper.convert_vector_from_vanet(input_vector) == expected_output
+
+
+def test_convert_quaternion_to_vanet_with_va_quaternion():
+    input_quaternion = VAQuaternion(x=1.0, y=2.0, z=3.0, w=4.0)
+    expected_output = VanetQuaternion(x=1.0, y=2.0, z=3.0, w=4.0)
+    assert _helper.convert_quaternion_to_vanet(input_quaternion) == expected_output
+
+
+def test_convert_quaternion_to_vanet_with_list():
+    input_quaternion = [1.0, 2.0, 3.0, 4.0]
+    expected_output = VanetQuaternion(x=1.0, y=2.0, z=3.0, w=4.0)
+    assert _helper.convert_quaternion_to_vanet(input_quaternion) == expected_output
+
+
+def test_convert_quaternion_to_vanet_with_tuple():
+    input_quaternion = (1.0, 2.0, 3.0, 4.0)
+    expected_output = VanetQuaternion(x=1.0, y=2.0, z=3.0, w=4.0)
+    assert _helper.convert_quaternion_to_vanet(input_quaternion) == expected_output
+
+
+def test_convert_quaternion_to_vanet_with_invalid_input():
+    input_quaternion = "invalid"
+    with pytest.raises(ValueError, match="Quaternion must be of type VAQuaternion, list or tuple"):
+        _helper.convert_quaternion_to_vanet(input_quaternion)
+
+
+def test_convert_quaternion_to_vanet_with_invalid_length():
+    input_quaternion = [1.0, 2.0]
+    with pytest.raises(ValueError, match="Quaternion must be of length 4"):
+        _helper.convert_quaternion_to_vanet(input_quaternion)
+
+
+def test_convert_quaternion_from_vanet():
+    input_quaternion = VanetQuaternion(x=1, y=2, z=3, w=4)
+    expected_output = VAQuaternion(x=1, y=2, z=3, w=4)
+    assert _helper.convert_quaternion_from_vanet(input_quaternion) == expected_output