Skip to content
Snippets Groups Projects
Commit c8671b7a authored by Jan Habscheid's avatar Jan Habscheid
Browse files

Docstrings for miscellaneous

parent 3bc80b9e
No related branches found
No related tags found
1 merge request!6Documentation
......@@ -2,7 +2,20 @@ import numpy as np
import xml.etree.ElementTree as ET
# Load XML file to numpy array
def load_xml_to_numpy(file_path):
def load_xml_to_numpy(file_path:str) -> np.ndarray:
'''
Loads an XML file containing a matrix and returns it as a NumPy array.
Parameters
----------
file_path : str
Path to the XML file.
Returns
-------
np.ndarray
NumPy array containing the matrix data.
'''
# Parse the XML file
tree = ET.parse(file_path)
root = tree.getroot()
......@@ -13,12 +26,24 @@ def load_xml_to_numpy(file_path):
# Extract the text, split by whitespace, and convert to floats
data = np.array([np.float32(val) for val in matrix.text.split()])
# ? Experimental. Does this even work???
data = np.nan_to_num(data, copy=False, nan=0.0, posinf=0.0, neginf=0.0)
return data
def get_matrix_size(file_path):
def get_matrix_size(file_path:str) -> int:
'''
Calculates the size of the matrix in the XML file.
Parameters
----------
file_path : str
Path to the XML file.
Returns
-------
int
Number of rows in the matrix.
'''
# Parse the XML file
tree = ET.parse(file_path)
root = tree.getroot()
......@@ -31,107 +56,6 @@ def get_matrix_size(file_path):
return data.shape[0]
def remove_rows_abstract(matrix, atol, remove_nan, stage, removed_rows_identifier):
if stage == 'train':
return remove_rows(matrix=matrix, atol=atol, remove_nan=remove_nan)
elif stage == 'test':
return remove_rows_iloc(matrix=matrix, removed_rows_identifier=removed_rows_identifier, remove_nan=remove_nan)
def remove_rows(matrix, atol=1e-8, remove_nan=True):
i = 0
removed_rows_identifier = []
removed_rows_counter = 0
while i < matrix.shape[0]:
# if np.allclose(matrix[i,:], 0, atol=atol):
if np.allclose(matrix[i,:], 0, atol=atol) or np.isnan(matrix[i,:]).any() or np.isinf(matrix[i,:]).any():
removed_rows_identifier.append(i)
matrix = np.concatenate((matrix[:i,:], matrix[i+1:,:]), axis=0)
i += 1
removed_rows_counter += 1
# if remove_nan:
# matrix = np.nan_to_num(matrix, copy=False, nan=0.0, posinf=0.0, neginf=0.0)
return matrix, np.array(removed_rows_identifier)
def remove_rows_iloc(matrix, removed_rows_identifier, remove_nan=True):
i = 0
while i < matrix.shape[0]:
if i in removed_rows_identifier:
matrix = np.concatenate((matrix[:i,:], matrix[i+1:,:]), axis=0)
i += 1
if remove_nan:
matrix = np.nan_to_num(matrix, copy=False, nan=0.0, posinf=0.0, neginf=0.0)
return matrix
def remove_zero_rows(arr):
"""
Remove rows containing all zeros from a NumPy array and return both
the reduced array and the indices of removed rows.
Parameters:
arr (np.ndarray): Input array
Returns:
tuple: (reduced_array, zero_indices)
- reduced_array: Array with zero rows removed
- zero_indices: Indices of the removed rows
"""
# Find rows that are all zero
zero_mask = ~(arr == 0).all(axis=1)
# Store indices of zero rows for later reconstruction
zero_indices = np.where(~zero_mask)[0]
# Return reduced array and indices
return arr[zero_mask], zero_indices
def remove_special_rows(arr):
"""
Remove rows containing all zeros or any infinite values from a NumPy array.
Parameters:
arr (np.ndarray): Input array
Returns:
tuple: (reduced_array, removed_indices)
- reduced_array: Array with special rows removed
- removed_indices: Indices of the removed rows
"""
# Find rows that are all zero
zero_rows = (arr == 0).all(axis=1)
# Find rows containing infinite values
inf_rows = np.any(np.isinf(arr), axis=1)
# Combine masks to get all rows to remove
rows_to_remove = zero_rows | inf_rows
# Create mask for rows to keep
keep_mask = ~rows_to_remove
# Get indices of removed rows
removed_indices = np.where(rows_to_remove)[0]
# Return reduced array and indices
return arr[keep_mask], removed_indices
def remove_rows_by_indices(arr, indices):
"""
Remove rows specified by indices from an array.
Parameters:
arr (np.ndarray): Input array
indices (np.ndarray): Indices of rows to remove
Returns:
np.ndarray: Array with specified rows removed
"""
# Create a mask of rows to keep
mask = np.ones(len(arr), dtype=bool)
mask[indices] = False
return arr[mask]
def reconstruct_array(reduced_arr, zero_indices, original_shape):
"""
Reconstruct the original array by reinserting zero rows at their
......@@ -158,7 +82,25 @@ def reconstruct_array(reduced_arr, zero_indices, original_shape):
return reconstructed
# Function to save a NumPy array back to an XML file with new-line-separated entries
def save_numpy_to_xml(data, file_path, rows, cols):
def save_numpy_to_xml(
data:np.ndarray,
file_path:str,
rows:float, cols:float
) -> None:
'''
Saves a numpy array to a new XML file with the given number of rows and columns.
Parameters
----------
data : np.ndarray
NumPy array to save.
file_path : str
Path to the XML file.
rows : float
Row identifier for XML file.
cols : float
Column identifier for XML file.
'''
# Create the root element
root = ET.Element("xml")
......
......@@ -57,6 +57,7 @@ def integrate_multipatch(multipatch, fields):
return integration_sum
def compute_integral_error(multipatch, fields_original, fields_recreated, norm="l2"):
"""Compute integral error between original and recreated fields"""
fields_error = []
for patch, field_orig, field_recreated in zip(multipatch.patches, fields_original, fields_recreated):
field_error = patch.copy()
......@@ -77,7 +78,8 @@ def compute_integral_error(multipatch, fields_original, fields_recreated, norm="
return error_integral
# Load XML file to numpy array
def get_solution_vectors(file_path, two_dimensional=False):
def get_solution_vectors(file_path:str, two_dimensional=False):
"""Load XML file to numpy array"""
# Parse the XML file
tree = ET.parse(file_path)
root = tree.getroot()
......@@ -117,96 +119,8 @@ def show_multipatch_field(mp, solution_vectors, data_name="solution"):
data_mp = sp.Multipatch(splines=spline_data_list)
mp.spline_data[data_name] = data_mp
mp.show_options(data=data_name, **common_show_options)
# mp.show()
return mp
# def load_geometry(filename, degree_elevations=0):
# """
# Load geometry and for velocity perform one degree elevation (Taylor-Hood elements)
# filename: str
# Filename of xml-file
# """
# microstructure = sp.io.gismo.load(filename)[0]
# if degree_elevations > 0:
# [patch.elevate_degrees([0,1]*(degree_elevations)) for patch in microstructure.patches]
# patches_p_elevated = []
# for patch in microstructure.patches:
# patch_elevated = patch.copy()
# patch_elevated.elevate_degrees([0,1])
# patches_p_elevated.append(patch_elevated)
# microstructure_vel = sp.Multipatch(splines=patches_p_elevated)
# microstructure_vel.determine_interfaces()
# return microstructure, microstructure_vel
# def load_geometry(filename, degree_elevations=0, h_refinements=0):
# """
# Load geometry and for velocity perform one degree elevation (Taylor-Hood elements)
# filename: str
# Filename of xml-file
# """
# assert(degree_elevations >= 0), "Degree elevations must be non-negative"
# assert(h_refinements >= 0), "H-refinements must be non-negative"
# assert(type(degree_elevations) == int), "Degree elevations must be an integer"
# assert(type(h_refinements) == int), "H-refinements must be an integer"
# # Load geometry from xml-file
# microstructure = sp.io.gismo.load(filename)[0]
# # Perform degree elevations if simulations also were run with degree elevations
# if degree_elevations > 0:
# [patch.elevate_degrees([0,1]*degree_elevations) for patch in microstructure.patches]
# # Create new list of patches for h-refinement
# patches_refined = []
# # Go through patches of microstructure
# for patch in microstructure.patches:
# patch_elevated = patch.copy()
# # Convert Bezier to Bspline
# patch_elevated = patch_elevated.bspline
# # Perform h-refinements
# for _ in range(h_refinements):
# patch_elevated.uniform_refine([0,1])
# # Extract Bezier patches for each knot span
# patches_refined.append(patch_elevated)
# # new_refined_patches = patch_elevated.extract_bezier_patches()
# # # Add patches as new refined patches
# # patches_refined += new_refined_patches
# # Group all patches into one Multipatch object
# microstructure_refined = sp.Multipatch(splines=patches_refined)
# # Determine the interfaces for the multipatch objects
# microstructure_refined.determine_interfaces()
# # Elevate the degrees of the velocity patches for Taylor-Hood elements
# patches_p_elevated = []
# for patch in microstructure.patches:
# patch_elevated = patch.copy()
# patch_elevated.elevate_degrees([0,1])
# patches_p_elevated.append(patch_elevated)
# microstructure_refined_vel = sp.Multipatch(splines=patches_p_elevated)
# microstructure_refined_vel.determine_interfaces()
# # Create a new Multipatch object for the velocity field
# # Create new list of patches for h-refinement
# patches_refined = []
# # Go through patches of microstructure
# for patch in microstructure_refined_vel.patches:
# patch_elevated = patch.copy()
# # Convert Bezier to Bspline
# patch_elevated = patch_elevated.bspline
# # Perform h-refinements
# for _ in range(h_refinements):
# patch_elevated.uniform_refine([0,1])
# # Extract Bezier patches for each knot span
# patches_refined.append(patch_elevated)
# microstructure_refined_vel = sp.Multipatch(splines=patches_refined)
# microstructure_refined_vel.determine_interfaces()
# return microstructure_refined, microstructure_refined_vel
def load_geometry(filename, degree_elevations=0, h_refinements=0):
"""
......@@ -252,9 +166,6 @@ def load_geometry(filename, degree_elevations=0, h_refinements=0):
# Extract Bezier patches for each knot span
patches_refined.append(patch_elevated)
patches_refined_vel.append(patch_elevated_vel)
# new_refined_patches = patch_elevated.extract_bezier_patches()
# # Add patches as new refined patches
# patches_refined += new_refined_patches
# Group all patches into one Multipatch object
microstructure_refined = sp.Multipatch(splines=patches_refined)
......
import splinepy as sp
import numpy as np
import xml.etree.ElementTree as ET
from os import path
from splinepy.helpme.integrate import _get_integral_measure, _get_quadrature_information
DEGREE_ELEVATIONS = 1
WDIR = "--------------------------------------"
GEOMETRY_FILE = "-----------------------------"
PRESSURE_FILE = path.join(WDIR, "pressure_field_patches.xml")
VELOCITY_FILE = path.join(WDIR, "velocity_field_patches.xml")
PRESSURE_REC_FILE = path.join(WDIR, "pressure_field_rec_patches.xml")
VELOCITY_REC_FILE = path.join(WDIR, "velocity_field_rec_patches.xml")
common_show_options = {
"cmap": "jet",
"scalarbar": True,
"lighting": "off",
"control_points": False,
"knots": False,
}
# Define error norm calculations
l1func = lambda orig, rec: np.abs(orig - rec)
l2func = lambda orig, rec: np.power(orig-rec, 2)
l1relfunc = lambda orig, rec: np.abs((orig-rec) / orig)
l2relfunc = lambda orig, rec: np.abs(np.power(orig-rec, 2) / orig)
norm_funcs = {
"l1": l1func,
"l2": l2func,
"l1_rel": l1relfunc,
"l2_rel": l2relfunc
}
def integrate(bezier, field):
"""Integrate field over Bezier patch"""
meas = _get_integral_measure(bezier)
quad_positions, quad_weights = _get_quadrature_information(
bezier, orders=None
)
result = np.einsum(
"i...,i,i->...",
field.evaluate(quad_positions),
meas(bezier, quad_positions),
quad_weights,
optimize=True
)
return result
def integrate_multipatch(multipatch, fields):
"""Integrate field over multipatch geometry"""
integration_sum = 0.0
for patch, field_patch in zip(multipatch.patches, fields):
integration_sum += integrate(patch, field_patch)
return integration_sum
def compute_integral_error(multipatch, fields_original, fields_recreated, norm="l2"):
fields_error = []
for patch, field_orig, field_recreated in zip(multipatch.patches, fields_original, fields_recreated):
field_error = patch.copy()
field_error.cps = norm_funcs[norm](field_orig, field_recreated)
fields_error.append(field_error)
error_integral = integrate_multipatch(multipatch, fields_error)
if norm == "l2" or norm == "l2_rel":
return np.sqrt(error_integral)
else:
return error_integral
# Load XML file to numpy array
def get_solution_vectors(file_path, two_dimensional=False):
# Parse the XML file
tree = ET.parse(file_path)
root = tree.getroot()
solution_vectors = []
for child in root:
solution_vector = np.fromstring(child.text.strip(), sep="\n")
solution_vector = np.nan_to_num(solution_vector, copy=False, nan=0.0, posinf=0.0, neginf=0.0)
if two_dimensional:
solution_vector = solution_vector.reshape(-1, 2)
else:
solution_vector = solution_vector.reshape(-1, 1)
solution_vectors.append(solution_vector)
return solution_vectors
def show_multipatch_field(
mp,
solution_vectors,
data_name="solution",
output_file=None,
vmin=None,
vmax=None
):
"""
Show a field defined on a multipatch
Parameters
------------
mp: splinepy Multipatch
Multipatch geometry object
solution_vectors: list<np.ndarray>
List of patch solution vectors
data_name: str
Name for field (e.g. velocity)
"""
assert isinstance(solution_vectors, list), "Solution vectors have to be a list"
assert len(mp.patches) == len(solution_vectors), "Mismatch between number of patches and patch solution vectors"
spline_data_list = []
for mp_patch, sv in zip(mp.patches, solution_vectors):
data_patch = mp_patch.copy()
data_patch.cps = sv
spline_data_list.append(data_patch)
mp_patch.spline_data[data_name] = spline_data_list[-1]
mp_patch.show_options(data=data_name, **common_show_options)
data_mp = sp.Multipatch(splines=spline_data_list)
mp.spline_data[data_name] = data_mp
mp.show_options(data=data_name, **common_show_options)
if output_file is not None:
assert isinstance(output_file, str), "Output file must be a string"
assert vmin is not None and vmax is not None, "vmin and vmax must be given for scalarbar"
sp.io.svg.export(
output_file,
mp.patches,
scalarbar=True,
vmin=vmin,
vmax=vmax
)
# mp.show()
def load_geometry(filename, degree_elevations=0):
"""
Load geometry and for velocity perform one degree elevation (Taylor-Hood elements)
filename: str
Filename of xml-file
"""
microstructure = sp.io.gismo.load(filename)[0]
if degree_elevations > 0:
[patch.elevate_degrees([0,1]*degree_elevations) for patch in microstructure.patches]
patches_p_elevated = []
for patch in microstructure.patches:
patch_elevated = patch.copy()
patch_elevated.elevate_degrees([0,1])
patches_p_elevated.append(patch_elevated)
microstructure_vel = sp.Multipatch(splines=patches_p_elevated)
microstructure_vel.determine_interfaces()
return microstructure, microstructure_vel
if __name__ == "__main__":
# Get solution vector for pressure and velocity
pressure_data = get_solution_vectors(file_path=PRESSURE_FILE)
velocity_data = get_solution_vectors(file_path=VELOCITY_FILE, two_dimensional=True)
pressure_rec_data = get_solution_vectors(file_path=PRESSURE_REC_FILE)
velocity_rec_data = get_solution_vectors(file_path=VELOCITY_REC_FILE, two_dimensional=True)
# Load the geometry
microstructure, ms_vel = load_geometry(GEOMETRY_FILE, degree_elevations=DEGREE_ELEVATIONS)
# Show pressure and velocity field
show_multipatch_field(microstructure, pressure_data, data_name="pressure")
show_multipatch_field(
ms_vel,
velocity_data,
data_name="velocity",
output_file="velocity_field.svg",
vmin=0.0, # Caution: vmin and vmax for scalarbar are hardcoded right now
vmax=0.00119
)
# Compute errors
norms = list(norm_funcs.keys())
error_values = []
print("-"*80)
print("Pressure")
for norm in norms:
error_value = compute_integral_error(
multipatch=microstructure,
fields_original=pressure_data,
fields_recreated=pressure_rec_data,
norm=norm
)
error_values.append(error_value)
print(norm, error_value)
print("-"*80)
print("Velocity")
error_values = []
for norm in norms:
error_value = compute_integral_error(
multipatch=ms_vel,
fields_original=velocity_data,
fields_recreated=velocity_rec_data,
norm=norm
)
error_values.append(error_value)
print(norm, error_value)
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment