Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • florian.weiss/calorimetry_laboratory
  • philipp.schmidt2/calorimetry_laboratory
  • paul.bobrinskoy/calorimetry_laboratory
  • vincent.jestaedt/calorimetry_laboratory
  • elias.rausch/calorimetry_laboratory
  • dennis.dibbern/calorimetry_laboratory
  • bianca.beer/calorimetry_laboratory
  • luca.sommer/calorimetry_laboratory
  • jannik.hoffmann/calorimetry_laboratory
  • adrian.gabel1/calorimetry_laboratory
  • erwin.durasow/calorimetry_laboratory
  • ole.quiring/calorimetry_laboratory
  • david.buening/calorimetry_laboratory
  • daniel.burgos/calorimetry_laboratory
  • malte.kramp/calorimetry_laboratory
  • vladimir.pascari/calorimetry_laboratory
  • fst-tuda/public/lehre/calorimetry_laboratory
  • nilay.kuslugil/calorimetry_laboratory
  • christoph.froehlich/calorimetry_laboratory
  • lucas.gomiero/calorimetry_laboratory
  • patrick.schell/calorimetry_laboratory
  • noel.schwibus/calorimetry_laboratory
  • gregor.komora/calorimetry-laboratory-komora
  • thomas.gruber/calorimetry_laboratory
  • leo.mensler/calorimetry_laboratory
  • paul_konrad.braun/calorimetry-laboratory-paul
  • jakob.maurer/calorimetry_laboratory
  • jakob.knoblach/calorimetry_laboratory
  • marius.stumpe/calorimetry_laboratory
  • diogo.fernandes_costa/calorimetry_laboratory
  • hiab.berhane/calorimetry_laboratory
  • zidane.buermann/calorimetry_laboratory
  • maximilian.gross1/calorimetry_laboratory
  • tahsin.ahmad/calorimetry_laboratory
  • santiago.ramirez_saldana/calorimetry_laboratory
  • moritz.roth/calorimetry_laboratory
  • noah.waltmann/calorimetry_laboratory
  • keanu.engel/calorimetry_laboratory
  • noah.michel1/calorimetry_laboratory
  • julie.kickstein/calorimetry_laboratory
  • maurizio.fell/calorimetry_laboratory
  • junghyun.seo/calorimetry-laboratory
  • paul.zuendel/calorimetry_laboratory
  • alexander.neubauer/calorimetry_laboratory
  • constantin.messingfeld/calorimetry_laboratory
  • malte.lesche/calorimetry_laboratory
  • felix.meyners/calorimetry_laboratory
  • henri.sprickmann/calorimetry_laboratory
  • zineb.karri/calorimetry_laboratory
  • ilhan_mert.dumlupinar/calorimetry_laboratory
  • tim.ostendorf/calorimetry_laboratory
  • lukas.ripp/calorimetry_laboratory
  • amen.bouzouraa/calorimetry_laboratory
  • ammon.wuendisch/calorimetry_laboratory
  • louis.randa/calorimetry_laboratory
  • mattheo.beyer/calorimetry_laboratory
  • pascal.grym/calorimetry_laboratory
  • bryan.lauren/calorimetry_laboratory
  • hani.husseini/calorimetry_laboratory
  • jonas.dissinger/calorimetry_laboratory
  • zhibo.zhao/calorimetry_laboratory
  • buesra.akkaya/calorimetry_laboratory
  • philipp.bojunga/calorimetry_laboratory
  • soner.elbudak/calorimetry_laboratory
  • pascal.schraut/calorimetry_laboratory
  • alicia.sachs/calorimetry_laboratory
  • tarish.kalra/calorimetry_laboratory
  • lilly.laubenheimer/calorimetry_laboratory
  • simon.peters/calorimetry_laboratory
  • tobias.erdmann/calorimetry_laboratory
  • philip.mahouttchi-hosseini/calorimetry_laboratory
  • yixing.tao/calorimetry_laboratory
  • konstantinos.boiadjiev/calorimetry_laboratory
  • ibrahim.alsaleh/calorimetry_laboratory
  • adonay.johannes/calorimetry_laboratory
  • sotiris.giovannis/calorimetry-laboratory-giovanns
  • manuel.kaster/calorimetry_laboratory
  • samuel.neidert/calorimetry_laboratory
  • rafi.noordin/calorimetry-laboratory-noordin-rafi
  • robert.schaefer1/calorimetry_laboratory
  • artashes.manukyan/calorimetry_laboratory
  • tolga.guelec/calorimetry_laboratory
  • bob.putz/calorimetry_laboratory
  • konrad.winkhaus/calorimetry-laboratory-kw
  • calvin.weide/calorimetry_laboratory
  • julius.damm/calorimetry_laboratory
  • louis.senff/calorimetry_laboratory
  • boris.dasgupta/calorimetry_laboratory
  • leon.herz/calorimetry_laboratory
  • marc.dobner/calorimetry_laboratory
  • benjamin.gross/calorimetry_laboratory
  • leon.dawkins/calorimetry_laboratory
  • nico.sebastian/calorimetry_laboratory
  • francisco.flores/calorimetry_laboratory
  • luca_tobias.nuecker/calorimetry_laboratory
95 results
Show changes
{
"JSON": {
"ID": "1ee5ec04-30cd-678f-a64b-0ce7544ef5e8",
"label": "",
"comment": "UUID6 is used"
},
"sensor": {
"name": "Temperature_Sensor",
"type": "DS18B20",
"manufacturer": "keyestudio",
"serial": "3c01f095b066",
"comment": "",
"range": {
"min": -55,
"max": 125,
"units": "degree_celsius"
},
"accuracy": {
"value": 0.5,
"type": "absolute",
"unit": "degree_celsius"
}
}
}
{
"JSON": {
"ID": "1ee5ec04-c845-69e2-853a-25c11543466f",
"label": "",
"comment": "UUID6 is used"
},
"sensor": {
"name": "Temperature_Sensor",
"type": "DS18B20",
"manufacturer": "keyestudio",
"serial": "3c01f095d465",
"comment": "",
"range": {
"min": -55,
"max": 125,
"units": "degree_celsius"
},
"accuracy": {
"value": 0.5,
"type": "absolute",
"unit": "degree_celsius"
}
}
}
{
"JSON": {
"ID": "1ee5ec05-4aea-68f6-ad82-53b16fffae49",
"label": "",
"comment": "UUID6 is used"
},
"sensor": {
"name": "Temperature_Sensor",
"type": "DS18B20",
"manufacturer": "keyestudio",
"serial": "3c25f6490937",
"comment": "",
"range": {
"min": -55,
"max": 125,
"units": "degree_celsius"
},
"accuracy": {
"value": 0.5,
"type": "absolute",
"unit": "degree_celsius"
}
}
}
{
"JSON": {
"ID": "1ee5ec2a-3557-61fd-88c5-45f35110e2ad",
"label": "",
"comment": "UUID6 is used"
},
"comment": "this setup is used to measure the specific heat capacity of a probe",
"setup": {
"1ee5ec0a-1830-63f5-ac3e-6f8ce4468546": {
"type": "group_info",
"name": "group_info",
"comment": ""
},
"1ee5ec0d-e77c-68b7-90f7-2e33485ff91c": {
"type": "instrument",
"name": "raspberry_pi",
"comment": ""
},
"1ee5ec0c-0b57-68cd-9d39-c9b7e9b18753": {
"type": "calorimeter",
"name": "calorimeter",
"comment": ""
},
"1ee21750-5282-63bc-86e9-b4de622ee43e": {
"type": "actor",
"name": "sous_vide heater",
"comment": "this heater is used to heat the water in the hot water bath for the samples"
},
"1ee5ec00-4a00-68a1-bb1e-873c2dd4dbde": {
"type": "sensor",
"name": "temperature_calorimeter_1",
"comment": ""
},
"1ee5ec03-7e64-6071-8ca3-98dbab0a7719": {
"type": "sensor",
"name": "temperature_calorimeter_2",
"comment": ""
},
"1ee5ec04-30cd-678f-a64b-0ce7544ef5e8": {
"type": "sensor",
"name": "temperature_calorimeter_3",
"comment": ""
},
"1ee5ec04-c845-69e2-853a-25c11543466f": {
"type": "sensor",
"name": "temperature_environment",
"comment": ""
},
"1ee5ec05-4aea-68f6-ad82-53b16fffae49": {
"type": "sensor",
"name": "temperature_hot_water_bath",
"comment": ""
},
"1ee87a91-8f66-67a2-9cf8-36ce3b859240": {
"type": "probe",
"name": "probe_PDC003",
"comment": ""
}
}
}
\ No newline at end of file
{
"JSON": {
"ID": "1ee5ec2a-9a12-6199-af26-20cb6ca9e9ca",
"label": "",
"comment": "UUID6 is used"
},
"comment": "this setup is used to measure the specific heat capacity of a probe",
"setup": {
"1ee5ec0a-1830-63f5-ac3e-6f8ce4468546": {
"type": "group_info",
"name": "group_info",
"comment": ""
},
"1ee5ec0d-e77c-68b7-90f7-2e33485ff91c": {
"type": "instrument",
"name": "raspberry_pi",
"comment": ""
},
"1ee5ec0c-0b57-68cd-9d39-c9b7e9b18753": {
"type": "calorimeter",
"name": "calorimeter",
"comment": ""
},
"1ee21750-5282-63bc-86e9-b4de622ee43e": {
"type": "actor",
"name": "sous_vide heater",
"comment": "this heater is used to heat the water in the hot water bath for the samples"
},
"1ee5ec00-4a00-68a1-bb1e-873c2dd4dbde": {
"type": "sensor",
"name": "temperature_calorimeter_1",
"comment": ""
},
"1ee5ec03-7e64-6071-8ca3-98dbab0a7719": {
"type": "sensor",
"name": "temperature_calorimeter_2",
"comment": ""
},
"1ee5ec04-30cd-678f-a64b-0ce7544ef5e8": {
"type": "sensor",
"name": "temperature_calorimeter_3",
"comment": ""
},
"1ee5ec04-c845-69e2-853a-25c11543466f": {
"type": "sensor",
"name": "temperature_environment",
"comment": ""
},
"1ee5ec05-4aea-68f6-ad82-53b16fffae49": {
"type": "sensor",
"name": "temperature_hot_water_bath",
"comment": ""
},
"1ee87a9d-9978-6070-b6c2-f287dc7964ab": {
"type": "probe",
"name": "probe_PDC012",
"comment": ""
}
}
}
\ No newline at end of file
{
"JSON": {
"ID": "1ee5ec28-ea53-67a4-ac3a-a2b7aab8e64a",
"label": "",
"comment": "UUID6 is used"
},
"comment": "this setup is used to measure the specific heat capacity of a probe",
"setup": {
"1ee5ec0a-1830-63f5-ac3e-6f8ce4468546": {
"type": "group_info",
"name": "group_info",
"comment": ""
},
"1ee5ec0d-e77c-68b7-90f7-2e33485ff91c": {
"type": "instrument",
"name": "raspberry_pi",
"comment": ""
},
"1ee5ec0c-0b57-68cd-9d39-c9b7e9b18753": {
"type": "calorimeter",
"name": "calorimeter",
"comment": ""
},
"1ee21750-5282-63bc-86e9-b4de622ee43e": {
"type": "actor",
"name": "sous_vide heater",
"comment": "this heater is used to heat the water in the hot water bath for the samples"
},
"1ee5ec00-4a00-68a1-bb1e-873c2dd4dbde": {
"type": "sensor",
"name": "temperature_calorimeter_1",
"comment": ""
},
"1ee5ec03-7e64-6071-8ca3-98dbab0a7719": {
"type": "sensor",
"name": "temperature_calorimeter_2",
"comment": ""
},
"1ee5ec04-30cd-678f-a64b-0ce7544ef5e8": {
"type": "sensor",
"name": "temperature_calorimeter_3",
"comment": ""
},
"1ee5ec04-c845-69e2-853a-25c11543466f": {
"type": "sensor",
"name": "temperature_environment",
"comment": ""
},
"1ee5ec05-4aea-68f6-ad82-53b16fffae49": {
"type": "sensor",
"name": "temperature_hot_water_bath",
"comment": ""
},
"1ee87a9e-b060-62d2-8fe3-41db0cd25253": {
"type": "probe",
"name": "probe_PDC028",
"comment": ""
}
}
}
\ No newline at end of file
{
"JSON": {
"ID": "1ee5ec1a-e3f7-66be-b5a8-ad82b1e18627",
"label": "",
"comment": "UUID6 is used"
},
"comment": "this setup is used to measure the calorimeter constant by bringing in a known amount of energy, while measuring the temperature",
"setup": {
"1ee5ec0a-1830-63f5-ac3e-6f8ce4468546": {
"type": "group_info",
"name": "group_info",
"comment": ""
},
"1ee5ec0d-e77c-68b7-90f7-2e33485ff91c": {
"type": "instrument",
"name": "raspberry_pi",
"comment": ""
},
"1ee5ec0c-0b57-68cd-9d39-c9b7e9b18753": {
"type": "calorimeter",
"name": "calorimeter",
"comment": ""
},
"1ee21744-0355-6023-94b4-d5c041dd32cd": {
"type": "actor",
"name": "immersion_heater",
"comment": "this heater is used to warm up the water in the calorimeter for measuring the calorimeter constant"
},
"1ee5ec00-4a00-68a1-bb1e-873c2dd4dbde": {
"type": "sensor",
"name": "temperature_calorimeter_1",
"comment": ""
},
"1ee5ec03-7e64-6071-8ca3-98dbab0a7719": {
"type": "sensor",
"name": "temperature_calorimeter_2",
"comment": ""
},
"1ee5ec04-30cd-678f-a64b-0ce7544ef5e8": {
"type": "sensor",
"name": "temperature_calorimeter_3",
"comment": ""
},
"1ee5ec04-c845-69e2-853a-25c11543466f": {
"type": "sensor",
"name": "temperature_environment",
"comment": ""
}
}
}
\ No newline at end of file
figures/kalorimetrie_pruefstand.jpg

2.75 MiB

import os
import json
import shutil
from typing import List, Any
def get_metadata_from_setup(path: str) -> dict:
"""Extracts IDs, names, and types from a setup JSON file.
This function reads a JSON file with a specific structure and extracts IDs,
names, and types. The returned dictionary contains an 'all' key with lists
of all IDs and names, and keys for each unique type with lists of IDs and names
for each type. Refer to README.md section "Runtime metadata" for a detailed description
of the output data structure.
Args:
path (str): The file path to the setup JSON.
Returns:
dict: A dictionary with the metadata (IDs)
"""
# TODO: Complete the function.
metadata = {}
#Öffnen der json-Datei zum Auslesen der Metadaten in Bezug auf den Versuchsaufbau
with open(path) as f:
data_json = json.load(f)
#Falls in der Datei ein Versuchsaufbau hinterlegt ist, werden der Datei, die Typen und die jeweiligen Werte bzw. UUIDs der Objekte ausgegeben
if 'setup' in data_json:
for ids, data in data_json['setup'].items():
types = data.get('type')
if types not in metadata:
metadata[types] = {'values': []}
metadata[types]['values'].append(ids)
#Hierdurch werden die Objekte aus dem Versuchsaufbau nach dem Alphabet in einem dictinonary sortiert und ausgegeben
metadata = dict(sorted(metadata.items()))
return metadata
# DONE!
#'/home/pi/calorimetry_home/datasheets/setup_newton.json' to save for testing
def add_temperature_sensor_serials(folder_path: str, metadata: dict) -> None:
"""Adds temperature sensor serials to the provided metadata(IDs) dictionary.
This function updates the given metadata dictionary by adding a list of sensor
serial numbers extracted from datasheets located in the specified folder path.
Refer to README.md section "Runtime metadata" for a detailed description
of the location the serials have to be added. The function assumes a specific
structure for the metadata dictionary and the datasheets.
Args:
folder_path (str): The path to the folder containing datasheets.
metadata (dict): The metadata dictionary to be updated. The dictionary is expected
to have a 'sensor' key with a nested 'values' key containing UUIDs.
"""
# TODO: Complete the function.
#Erschaffen einem Attribut der Gruppe "Sensor" in den Metadaten, wo dann die Seriennummer hinterlegt wird
metadata["sensor"].update({'serial':[]})
for item in metadata['sensor']['values']:
sensor_serialnumber = get_json_entry(folder_path, item, ['sensor', 'serial'])
print(sensor_serialnumber)
#Anhängen des Seriennummer-Attributes zu dem Objekt "Sensor" in dem Dictionary
metadata['sensor']['serial'].append(sensor_serialnumber)
return(metadata)
#'/home/pi/calorimetry_home/datasheets/sensor_1.json' to save for testing
#add_temperature_sensor_serials('/home/pi/calorimetry_home/datasheets/',
# DONE #
def get_json_entry(folder_path: str, uuid: str, json_path: List[str]) -> Any:
"""Recursively searches for a specific JSON entry in a directory structure based on the given UUID and JSON path.
This function searches the provided folder for JSON files and checks each file for a matching ID. If found,
it navigates the JSON content using the provided JSON path and retrieves the corresponding entry. If the file
or the desired path does not exist, it continues the search in other JSON files and subdirectories. The function
expects the JSON files to have a structure where they contain a 'JSON' key with a nested 'ID' key.
If a JSON file does not have this structure, a warning is printed, and the search continues.
Args:
folder_path (str): The path to the folder containing JSON files and subfolders.
id (str): The UUID to be searched for within the JSON files.
json_path (List[str]): A list of keys representing the path to the desired entry within a JSON file.
Returns:
Any: The desired JSON entry based on the provided UUID and JSON path.
If the entry is not found, it returns None.
Example:
Given a folder structure:
├── folder_path/
│ ├── file1.json
│ └── subfolder/
│ └── file2.json
And the content of 'file1.json' being:
{
"JSON": {
"ID": "1ee7d8b5-12ad-6faa-a891-1cee8d14824f",
"sensor": {
"serial": "3ce104578aa8"
}
}
}
>>> json_entry = get_json_entry('folder_path', 'some_id', ['JSON', 'sensor', 'serial'])
>>> print(json_entry)
"12345"
"""
# Get the list of items (files and subfolders) in the given folder_path.
folder_content = os.listdir(folder_path)
# Initialize lists to store paths of JSON files and subfolders.
json_files = []
subfolders = []
# Iterate over each item in the folder_content.
for i in folder_content:
# Construct the full path of the item.
path = "{}/{}".format(folder_path, i)
# Check if the item is a file.
if os.path.isfile(path):
# Check if the file has a ".json" extension.
if i.split(".")[-1] == "json":
# Add the path of the JSON file to the json_files list.
json_files.append(path)
# If the item is not a file, it's a subfolder.
else:
# Add the path of the subfolder to the subfolders list.
subfolders.append(path)
# Recursively check each subfolder for the JSON entry.
for folder in subfolders:
# Recursively call get_json_entry on the subfolder.
json_entry = get_json_entry(folder, uuid, json_path)
# If an entry is found in a subfolder, return it.
if json_entry is not None:
return json_entry
# Check each JSON file in the current folder for the desired entry.
for json_file in json_files:
# Open and load the current JSON file.
with open(json_file) as file:
json_dict = json.load(file)
# Try to retrieve the UUID from the JSON content.
try:
json_uuid = json_dict["JSON"]["ID"]
# If the UUID is not present, print a warning and skip this file.
except KeyError:
print("Invalid json file.")
continue
# If the UUID from the JSON file matches the given uuid.
if json_uuid == uuid:
# Start with the entire JSON content.
json_entry = json_dict
# Navigate through the JSON content using the keys in json_path.
for jp in json_path:
# Drill down one level at a time.
json_entry = json_entry[jp]
# Return the desired JSON entry.
return json_entry
# If the function hasn't returned by this point, no matching entry was found, so return None.
return None
def archiv_json(folder_path: str, setup_path: str, archiv_path: str) -> None:
"""Archives matching datasheets from a given folder based on setup datasheet.
This function walks through the directory structure starting from the `folder_path`, looking for
JSON files that match specific UUIDs defined in a setup file (`setup_path`). Any matching files are
copied to the `archiv_path` directory, and their filenames are modified to include their UUIDs.
The function expects the JSON files to have a structure where they contain a 'JSON' key with a nested
'ID' key. If a JSON file does not have this structure, a warning is printed, and the file is skipped.
Args:
folder_path (str): The path to the root folder containing datasheets and subfolders to search.
setup_path (str): The path to the setup datasheet file that contains the UUIDs to match against.
archiv_path (str): The path to the folder where matching JSON files should be archived.
If the directory does not exist, it will be created.
Example:
Given a folder structure:
├── folder_path/
│ ├── file1.json
│ └── subfolder/
│ └── file2.json
And the content of 'file1.json' being:
{
"JSON": {
"ID": "uuid1",
...
}
}
And the content of the setup file being:
{
"setup": {
"uuid1": {...},
"uuid2": {...}
}
}
After calling `archiv_json('folder_path', 'setup_path', 'archiv_path')`:
>>> archiv_json('folder_path', 'setup_path', 'archiv_path')
the `archiv_path` folder will contain:
file1_uuid1.json
"""
# Open and load the setup JSON file.
with open(setup_path, "r") as f:
setup_data = json.load(f)
# Initialize lists to store paths of matching JSON files and their new names.
matching_files = []
copy_names = []
# Walk through the directory structure starting from folder_path.
for root, _, files in os.walk(folder_path):
# Iterate over all files in the current directory.
for file in files:
# Check if the file is a JSON file.
if file.endswith(".json"):
# Construct the full path of the file.
file_path = os.path.join(root, file)
# Open and load the current JSON file.
with open(file_path, "r") as f:
json_dict = json.load(f)
# Try to get the UUID from the JSON file.
try:
json_uuid = json_dict["JSON"]["ID"]
# If the UUID is not present, print a warning and skip this file.
except KeyError:
print("Invalid json file.")
continue
# Iterate over UUIDs in the setup data.
for uuid in tuple(setup_data["setup"].keys()):
# If the UUID from the JSON file matches a UUID in the setup data.
if uuid == json_uuid:
# Append the file path to the matching_files list.
matching_files.append(file_path)
# Construct the new name for the file and append it to the copy_names list.
copy_name = "{}_{}.json".format(file[:-5], uuid)
copy_names.append(copy_name)
# Copy Setup
matching_files.append(setup_path)
copy_names.append(setup_path.split("/")[-1])
# Check if the archive directory exists; if not, create it.
if not os.path.exists(archiv_path):
os.makedirs(archiv_path)
# Iterate over the paths and names of the matching files.
for path, name in zip(matching_files, copy_names):
# Copy each matching file to the archive directory with its new name.
shutil.copyfile(path, os.path.join(archiv_path, name))
if __name__ == "__main__":
# Debug and Test
metadata = get_metadata_from_setup('/home/pi/calorimetry_home/datasheets/setup_newton.json')
print(metadata)
add_temperature_sensor_serials('/home/pi/calorimetry_home/datasheets/',metadata)
print(metadata)
\ No newline at end of file
import h5py as h5
def logging_heater(path: str, uuid: str) -> dict[str, dict[str, float]]:
"""Prompt the user to enter heating data and log it to an HDF5 file.
This function creates a new group in the HDF5 file for a specified UUID (heater's UUID)
and prompts the user to enter current, voltage, and heating time. It validates the
user's input and writes the data to the file under the created group. Each piece
of data is stored in a separate dataset with a corresponding unit attribute.
Args:
path (str): The file path to the HDF5 file where data should be logged.
uuid (str): The UUID to create a group for.
Returns:
dict[str, dict[str, float]]: A dictionary containing the entered data,
organized by UUID and measurement type.
Example:
After executing this function, a new group named after the heater UUID is added to the target HDF5 file.
H5-File-Name (+ Attributes: authors, created[time], experiment, groupNumber, testrig)
RawData
UUID1 (+ Attributes: name, serial)
temperature (this is a data set)
timestamp (this is a data set)
UUID2 (+ Attributes: name, serial)
temperature (this is a data set)
timestamp (this is a data set)
UUID3 (+ Attributes: name, serial)
temperature (this is a data set)
timestamp (this is a data set)
...
UUID_Heater
current (+ Attribute: unit)
heat_time (+ Attribute: unit)
voltage (+ Attribute: unit)
"""
# Define the HDF5 internal path for the heater data group.
h5_path = "RawData/{}".format(uuid)
# Open the HDF5 file and create a new group for the heater using the UUID.
with h5.File(path, "a") as f:
# TODO: Create Group in RawData group.
# This line of code throws an exception. This is just to make sure you can see
# all the code you need to refine. If you already know how to implement the program
# or have done so, then you can safely delete the three lines of code below, as well
sub_group = f.create_group(h5_path)
# as this comment.
# raise NotImplementedError(
# "Delete these 3 lines if you have finished the code or want to test it.".upper()
# DONE #
# Initialize a dictionary to store heater data.
data_dict = {
uuid: {
"Current in Ampere": None,
"Voltage in Volt": None,
"Heat time in Seconds": None,
}
}
# A flag to indicate if the loop for correct data entry is active.
is_wrong = True
# A flag to indicate if the user is entering the data or correcting it.
enter_data = True
# Loop until correct data is entered.
while is_wrong:
# If entering data, prompt the user for each value.
if enter_data:
for i in data_dict[uuid]:
try:
# Attempt to convert the input to float and store it.
data_dict[uuid][i] = float(input("{} = ".format(i)))
except ValueError:
# If conversion fails, print an error message and set the value to None.
print("Invalid input, try again after entering the remaining data.")
data_dict[uuid][i] = None
# Print the current state of data_dict for review.
print(data_dict)
# Prompt the user to confirm if the entered data is correct.
user_input = input(
"Are the input data correct? [y] to store, [n] to re-enter: "
)
# Check the user's decision and set flags accordingly.
if user_input == "y" or user_input == "Y":
# If data is correct, exit the loop.
is_wrong = False
elif user_input == "n" or user_input == "N":
# If data is incorrect, prompt for re-entry.
enter_data = True
else:
# If input is invalid, inform the user and do not re-enter data.
print("Invalid input")
enter_data = False
# Open the HDF5 file to write the data into the corresponding group.
with h5.File(path, "r+") as f:
# TODO: Saves the data entered by the user into the created group.
# Current, voltage and time should be saved into separate data sets
# with the attribute being corresponding unit.
# This line of code throws an exception. This is just to make sure you can see
# all the code you need to refine. If you already know how to implement the program
# or have done so, then you can safely delete the three lines of code below, as well
f[h5_path].create_dataset('Current',data=data_dict[uuid]["Current in Ampere"])
f[h5_path].create_dataset('Voltage',data=data_dict[uuid]["Voltage in Volt"])
f[h5_path].create_dataset('Heat time',data=data_dict[uuid]["Heat time in Seconds"])
# as this comment.
# raise NotImplementedError(
# "Delete these 3 lines if you have finished the code or want to test it.".upper()
# DONE #
# Return the dictionary containing the logged data.
return data_dict
import os
import sys
import time
import json
from w1thermsensor import Sensor, W1ThermSensor
import h5py
import numpy as np
import uuid6
# This if statement enables you to write and run programs that test functions directly at the end of this file.
if __name__ == "__main__":
import pathlib
file_path = os.path.abspath(__file__)
file_path = pathlib.Path(file_path)
root = file_path.parent.parent
sys.path.append(str(root))
from functions import m_json
def check_sensors() -> None:
"""Retrieves and prints the serial number and current temperature of all DS18B20 Temperature Sensors.
This function utilizes the `w1thermsensor` library to interface with the DS18B20 temperature sensors.
For each available sensor, it prints its serial number (or ID) and the current temperature reading.
Examples:
Assuming two DS18B20 sensors are connected:
>>> check_sensors()
Sensor 000005888445 has temperature 25.12
Sensor 000005888446 has temperature 24.89
"""
# TODO: Print serials and temperature values of all connected sensors.
#Ausgabe der angeschlossenen Sensoren mit der zu dem Zeitpunkt gemessenen Temperatur
for sensor in W1ThermSensor.get_available_sensors():
print("Sensor %s has temperature %.2f" % (sensor.id, sensor.get_temperature()))
#Importieren der Bibliothek zum Generieren von UUIDs
import uuid6
#UUID generieren
my_uuid = uuid6.uuid6()
#Ausgabe der generierten UUIDs
print(my_uuid)
# DONE #
def get_meas_data_calorimetry(metadata: dict) -> dict:
"""Collects and returns temperature measurement data from DS18B20 sensors based on the provided metadata.
This function initializes sensor objects based on the metadata, prompts the user to start the
measurement, and then continually reads temperature values until interrupted (e.g., via Ctrl-C).
It logs the temperatures and the corresponding timestamps. Refer to README.md section
"Runtime measurement data" for a detailed description of the output data structure.
Args:
metadata (dict): Contains details like sensor uuid, serials, and names.
Refer to README.md section "Runtime metadata" for a detailed description
of the input data structure.
Returns:
dict: A dictionary with sensor uuid as keys, and corresponding lists of temperatures and timestamps.
Example:
Input metadata:
{
"sensor": {
"values": ["sensor_1", "sensor_2"],
"serials": ["000005888445", "000005888446"],
"names": ["FrontSensor", "BackSensor"]
}
}
Output (example data after interruption):
{
"sensor_1": [[25.12, 25.15], [0, 2]],
"sensor_2": [[24.89, 24.92], [0, 2]]
}
"""
# Initialize an empty dictionary for storing temperature measurements.
# The structure is uuid: [[temperatures], [timestamps]].
data = {i: [[], []] for i in metadata["sensor"]["values"]}
start = time.time()
sensor_list = [
W1ThermSensor(Sensor.DS18B20, id) for id in metadata["sensor"]["serial"]
]
input("Press any key to start measurement... <Ctrl+C> to stop measurement")
try:
while True:
#Schreiben einer for-Schleife um von allen Sensoren die Temperatur zu einem bestimmten Zeitpunkt zu erhalten
for i, sensor in enumerate(sensor_list):
# TODO: Get experimental data.
temperature = sensor.get_temperature()
#Ausgeben der Temperatur des ersten und dann des zweiten Sensors
data[metadata["sensor"]["values"][i]][0].append(temperature)
#Messen der Temperaturdifferenz im Vergleich zur Zeit des Startens des Experiments
data[metadata["sensor"]["values"][i]][1].append(time.time() - start)
#Ausgabe der Temperatur
print(f"Sensor: {metadata['sensor']['values'][i]}, Temperature: {temperature:.2f}°C")
# DONE #
# Print an empty line for better readability in the console.
print("")
# Catch the KeyboardInterrupt (e.g., from Ctrl-C) to stop the measurement loop.
except KeyboardInterrupt:
# Print the collected data in a formatted JSON structure.
print(json.dumps(data, indent=4))
# Always execute the following block.
finally:
# Ensure that the lengths of temperature and timestamp lists are the same for each sensor.
for i in data:
# If the temperature list is longer, truncate it to match the length of the timestamp list.
if len(data[i][0]) > len(data[i][1]):
data[i][0] = data[i][0][0 : len(data[i][1])]
# If the timestamp list is longer, truncate it to match the length of the temperature list.
elif len(data[i][0]) < len(data[i][1]):
data[i][1] = data[i][1][0 : len(data[i][0])]
return data
def logging_calorimetry(
data: dict,
metadata: dict,
data_folder: str,
json_folder: str,
) -> None:
"""Logs the calorimetry measurement data into an H5 file.
This function creates a folder (if not already present) and an H5 file with a
specific structure. The data from the provided dictionaries are written to the
H5 file, along with several attributes.
Args:
data (dict): Contains sensor data including temperature and timestamp.
Refer to README.md section"Runtime measurement data" for a detailed
description of the data structure.
metadata (dict): Contains metadata. Refer to README.md section "Runtime metadata"
for a detailed description of the structure.
data_folder (str): Path to the folder where the H5 file should be created.
json_folder (str): Path to the folder containing the datasheets.
"""
# Extract the folder name from the provided path to be used as the H5 file name.
log_name = data_folder.split("/")[-1]
# Generate the full path for the H5 file.
dataset_path = "{}/{}.h5".format(data_folder, log_name)
# Check and create the logging folder if it doesn't exist.
if not os.path.exists(data_folder):
os.makedirs(data_folder)
# Create a new H5 file.
f = h5py.File(dataset_path, "w")
# Create a 'RawData' group inside the H5 file.
grp_raw = f.create_group("RawData")
# TODO: Add attribute to HDF5.
# Set attributes for the H5 file based on the datasheets.
#Hinzufügen der Attribute der Zeit des Versuchs, dem Versuch selbst, der Versuchsgruppennummer und dem Author des Skripts
f.attrs["created"] = time.strftime("%Y%m%d", time.localtime())
f.attrs["experiment"] = m_json.get_json_entry(json_folder, metadata['group_info']['values'][0],['group','experiment'])
f.attrs["group_number"] = m_json.get_json_entry(json_folder,metadata['group_info']['values'][0],['group','number'])
f.attrs["authors"] = m_json.get_json_entry(json_folder,metadata['group_info']['values'][0],['group','author'])
# DONE #
# TODO: Write data to HDF5.
#Erstellen einer Gruppe, die beide Sensoren beinhaltet
sensors = grp_raw.create_group('Sensors')
sensor_number = [1,2,3,4]
for j in sensor_number:
#Erhalt der jeweiligen UUIDs der beiden Sensoren
sensor_uuid = metadata['sensor']['values'][j-1]
#Der jeweiligen Gruppe wird der Sensor mit der jeweiligen UUIDs zugeordnet
sensor_j = sensors.create_group(sensor_uuid)
#Der Gruppe eines Sensors werden Daten in Form der Temperatur und dem jeweiligen Messzeitpunkt zugeordnet
temperatures = sensor_j.create_dataset('Temperature', data= data[sensor_uuid][0])
timestamps = sensor_j.create_dataset('Timestamp', data= data[sensor_uuid][1])
#Ausgabe der Temperatur zu dem jeweiligen Messzeitpunkt
print(timestamps[:])
print(temperatures[:])
# DONE #
# Close the H5 file.
f.close()
"""
Praktikum Digitalisierung
This code is to generate a universally unique identifier (UUID).
https://pypi.org/project/uuid6/
Author: Benjamin Hermann, M.Sc.
Created: 24.04.2023
Last Changes: 11.05.2023
"""
import uuid6
# Generate a new UUID using the uuid6 library
my_uuid = uuid6.uuid6()
# Print the generated UUID to the console
print(my_uuid)
# Check (assert) that the previously generated UUIDv6 is less than a newly generated one.
# This is based on the fact that UUIDv6 has a timestamp component, making newer UUIDs larger in value.
# If the condition is not met, the program will raise an AssertionError.
assert my_uuid < uuid6.uuid6()
from typing import Dict, List, Tuple
import h5py as h5
import matplotlib.pyplot as plt
import numpy as np
def plot_temp_over_time(
data: List[np.ndarray],
time: List[np.ndarray],
legend: List[str],
x_label: str,
y_label: str,
) -> None:
"""Plots temperature data over time with error bars for multiple datasets.
This function creates a plot representing temperature data against time
for multiple sensors or datasets. Each dataset's standard deviation is visualized
with error bars.
Args:
data (List[np.ndarray]): A list of numpy arrays where each array represents
the temperature data (with standard deviation).
Each array should have a shape of (2, n), with n
representing the number of data points.
time (List[np.ndarray]): A list of numpy arrays with the time data corresponding
to each dataset in `data`.
legend (List[str]): A list of strings that label each dataset in the legend.
x_label (str): The label for the x-axis (time).
y_label (str): The label for the y-axis (temperature).
"""
# init the matplotlib.axes.Axes and matplotlib.figure.Figure Object for later plot
fig, ax = plt.subplots(1, 1)
markers = ["o", "^", "2", "p", "D"]
for i in range(len(data)):
# TODO: draw a plot using the ax.errorbar(...) function
#Dies erstellt Fehlerbalken an den jeweiligen dargestellten Datenpunkten
ax.errorbar(time[i], data[i][0,:], data[i][1,:])
# DONE #
# Errorbars removed from Legend
legend_handles, labels = ax.get_legend_handles_labels()
legend_handles = [h[0] for h in legend_handles]
# TODO: set legend, x- and y- axis label.
#Hinzufügen von Achsen mit jeweiligen Bezeichnungen
ax.set_ylabel(y_label)
ax.set_xlabel(x_label)
ax.legend(legend)
# DONE #
ax.ticklabel_format(scilimits=(0, 3))
def get_plot_data_from_dataset(
data_path: str, group_path: str
) -> Dict[str, np.ndarray]:
"""Get the necessary data from the dataset to plot.
This function returns the data in a HDF5 file in all subgroups of a group in 'group_path'
and automatically categorizes and names the data based on the name of the dataset as well as the metadata.
Args:
data_path (str): path to HDF5 file.
group_path (str): path in HDF5 to group.
Returns:
dict[str, np.ndarray]: Data for plot in a dict.
Example:
Output (example data):
{
"temperature": np.array([
[24.89, 24.92, 24.00, 25.39],
[24.89, 24.92, 24.00, 25.39],
[24.89, 24.92, 24.00, 25.39]
]) -> temperature from each sensor, The first dimension(row) represents the sensor.
"timestamp": np.array([
[0.43, 1.60, 3.05, 4.25],
[0.81, 2.13, 3.49, 4.62],
[1.34, 2.60, 3.85, 5.08],
]) -> timestamp for each sensor, The first dimension(row) represents the sensor.
"name": np.array(["sensor_1", "sensor_2", "sensor_3"]) -> name of each sensor should be hier
}
"""
temperature = []
time = []
name = []
with h5.File(data_path) as data:
group = data[group_path]
subgroups = []
min_len = None
start_time = None
for subgroup in group:
try:
dataset_start_time = group[subgroup]["timestamp"][0]
dataset_len = len(group[subgroup]["timestamp"])
# Find the minimum length of the data set.
if min_len is None:
min_len = dataset_len
elif dataset_len < min_len:
min_len = dataset_len
subgroups.append(subgroup)
# Only group with dataset called timestamp will be read.
except KeyError:
continue
# TODO: Find the start time point of the measurement.
if start_time is None:
start_time = dataset_start_time
elif dataset_start_time < start_time:
start_time = dataset_start_time
# DONE #
# TODO: Save data in to the lists temperature, time and name.
# Data for each sensor must have the same length because of np.ndarray will be use in the output.
#Erstellen leerer Listen für die Temperatur und den jeweligen Zeitabschniten
temperature = np.empty(shape=[len(subgroups), min_len])
time = np.empty(shape=[len(subgroups), min_len])
for i,subgroup in enumerate(subgroups):
#Füllen der Listen für die jeweiligen Sensoren mit den jeweiligen Daten mit bestimmter Temperatur zu einem bestimmten Zeitpunkt
time[i] = group[subgroup]["timestamp"][:min_len]
temperature[i] = group[subgroup]["temperature"][:min_len]
name.append( group[subgroup].attrs["name"])
# DONE #
# TODO: return the output dict.
#Ausgabe des Dictionaries mit den Temperaturen zu dem jeweiligen Zeitpunkt
return {"temperature": temperature, "timestamp": time, "name": name}
# DONE #
def cal_mean_and_standard_deviation(data: np.ndarray) -> np.ndarray:
"""Calculating mean and standard deviation for raw data of multiple sensors.
Args:
data (np.ndarray): raw data in a 2 dimensional array (m, n), the first dimension should not be 1 if
there are multiple measurements at the same time (and place).
Returns:
np.ndarray: mean of raw data with standard deviation in a 2D ndarray with shape (2, n).
"""
# TODO: Calculate the mean and standard deviation of the first dimension and return the result as a
# two-dimensional (2, n)-shaped ndarray.
return np.array([np.mean(data, axis=0), np.std(data, axis=0, dtype=np.float32)])
# DONE #
def get_start_end_temperature(
temperature_data: np.ndarray, threshold: float = 0.05
) -> Tuple[float, float]:
"""Calculates the high and low temperatures from a dataset.
This function computes the (average of) the highest temperatures and the (average of) the lowest temperatures
within a given threshold from the maximum and minimum temperatures recorded in the dataset. These are
considered as the ending and starting temperatures respectively.
Args:
temperature_data (np.ndarray): The temperature dataset as a 2D numpy array.
threshold (float): The threshold percentage used to identify temperatures close to the maximum
and minimum values as high and low temperatures respectively. Default to 0.05
Returns:
Tuple[float, float]: A tuple containing the average high temperature first and the average low
temperature second.
"""
# TODO: You don't have to implement this function exactly as docstring expresses it, it just gives
# an idea that you can refer to. The goal of this function is to obtain from the data the high and
# low temperatures necessary to calculate the heat capacity.
#Alle Temperaturen des Versuches, die größer bzw. kleiner sind als die oben angegebene Schwelle, werden ausgelesen und somit werden daraus durchschnittliche Temperaturen berechnet, die innerhalb dieser Schwelle liegen.
#Zuerst werden dafür in dem jeweiligen Abschnitt die maximalen bzw. minimalen Temperaturen ausgelesen. Das 'nan' ist dafür da um mögliche Werte "die keine Nummer" sind zu ignorieren, um somit Fehlermeldungen zu vermindern.
abs_maximum = np.nanmax(temperature_data)
max = np.mean([i for i in np.nditer(temperature_data) if (abs_maximum-i) < threshold])
abs_minimum = np.nanmin(temperature_data)
min = np.mean([i for i in np.nditer(temperature_data) if (i-abs_minimum) < threshold])
return (max, min)
# DONE #
"""
Praktikum Digitalisierung
Script for the labor experiment.
Before you start, please read all the documentation carefully,
including the comments in the code and the docstring of the
functions. If you have any questions, please ask in the forum
or join the helpdesk!
Author: Benjamin Hermann, M.Sc.
https://git.rwth-aachen.de/benjamin.hermann
Ning Xia, M.Sc. M.Sc.
https://git.rwth-aachen.de/ning.xia
Created: 24.04.2023
Last Changes: 03.11.2023
"""
from functions import m_json
from functions import m_pck
from functions import m_labor
# Define the path to the datasheets folder.
path_json = "./datasheets"
# Define the path to the test rig setup file.
path_setup = "/home/pi/calorimetry_laboratory/datasheets/setup_constant.json"
# path_setup = ""
# Ask for the type of experiment to be performed until the user gives a valid answer.
is_log_heater = None
while is_log_heater != 0 and is_log_heater != 1:
if is_log_heater is None:
print("0 = measuring specific heat capacity of a probe")
print("1 = measuring calorimeter constant")
print()
else:
print("Invalid input")
print("0 = measuring specific heat capacity of a probe")
print("1 = measuring calorimeter constant")
print()
try:
# Attempt to convert the input to int and store it.
is_log_heater = int(input("measurement process = "))
except ValueError:
# If conversion fails, set value to 2, in order to ask user again.
is_log_heater = 2
# Read the metadata necessary to control test rig from the setup file.
metadata = m_json.get_metadata_from_setup(path_setup)
# Read the sensor's serial number from the sensor's datasheets and add it to the dictionary that holds the metadata.
m_json.add_temperature_sensor_serials(path_json, metadata)
# Reads sensor measurements and saves the data to a variable.
data = m_pck.get_meas_data_calorimetry(metadata)
# List of valid characters.
valued_char = "-_.() abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
# Initialize empty filename.
name_logging = ""
# Loop until the user enters a valid filename.
while name_logging == "":
# Read the name of the logging file to be created from stdin (standard input/output).
name_logging = input("logging file name = ")
# Remove spaces at the beginning and end of the filename.
name_logging = name_logging.strip()
# If the filename entered is empty then re-enter it.
if name_logging == "":
print("File name can not be empty.")
continue
# Iterate over all characters of the filename.
for c in name_logging:
# Requires retyping if invalid characters are present.
if c not in valued_char:
print("{} is not a valid name. ({} invalid)".format(name_logging, c))
name_logging = ""
break
# The folder containing the logging files will be created in the data folder in the root directory of the program.
path_logging = "{}/{}".format("data", name_logging)
# Save the datasets and datasheets used in the experiment to the logging directory.
m_pck.logging_calorimetry(data, metadata, path_logging, path_json)
m_json.archiv_json(path_json, path_setup, path_logging)
if is_log_heater:
# Read heater's uuid from metadata.
heater_uuid = metadata["actor"]["values"][
metadata["actor"]["name"].index("immersion_heater")
]
# Save heater-related data to an HDF5 file.
data = m_labor.logging_heater(
"{}/{}.h5".format(path_logging, name_logging), heater_uuid
)