Aufgrund einer Wartung wird GitLab am 18.05. zwischen 8:00 und 9:00 Uhr kurzzeitig nicht zur Verfügung stehen. / Due to maintenance, GitLab will be temporarily unavailable on 18.05. between 8:00 and 9:00 am.

Commit 1b24f753 authored by Lambert Theisen's avatar Lambert Theisen 🔥
Browse files

Add flake8 compliance

parent 6f2fe36a
Pipeline #307447 failed with stages
in 1 minute and 19 seconds
......@@ -69,6 +69,12 @@ build:docs:
tags:
- docker
test:flake8:
extends: .test
script:
- cd ${APP_DIRECTORY}
- flake8 fenicsR13 examples tests
test:pytest:
extends: .test
script:
......
......@@ -10,7 +10,7 @@ if not gui:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(x,y, "-o", label="Channel Flow")
ax.plot(x, y, "-o", label="Channel Flow")
ax.set_xscale("log")
plt.xlabel("Knudsen number")
......
......@@ -10,7 +10,7 @@ if not gui:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(x,y, "-o", label="Channel Flow")
ax.plot(x, y, "-o", label="Channel Flow")
ax.set_xscale("log")
plt.xlabel("Knudsen number")
......
......@@ -7,6 +7,7 @@ This file is executed by ``pytest`` to have good CI.
import subprocess
import pytest
class TestExamples(object):
"""
Class to bundle all examples tests.
......
......@@ -20,6 +20,7 @@ from fenicsR13.input import Input
from fenicsR13.solver import Solver
from fenicsR13.postprocessor import Postprocessor
def print_information():
r"""
Print program name and information.
......@@ -52,6 +53,7 @@ def print_information():
|_| \___|_| |_|_|\___|___/_| \_\_|____/
""")
def main():
"""
Execute the main program.
......@@ -76,7 +78,7 @@ def main():
print_information()
# Dolfin settings
df.set_log_level(100) # 1: all logs
df.set_log_level(100) # 1: all logs
df.parameters["ghost_mode"] = "shared_vertex"
inputfile = sys.argv[1] if len(sys.argv) == 2 else "input.yml"
......@@ -127,7 +129,7 @@ def main():
**errors
})
if p == len(mesh_names)-1: # after last mesh
if p == len(mesh_names) - 1: # after last mesh
postp = Postprocessor(data, params["output_folder"])
postp.write_errors()
postp.plot_errors(show_plot)
......@@ -135,5 +137,6 @@ def main():
solver = None
gc.collect()
if __name__ == '__main__':
main()
......@@ -28,6 +28,7 @@ import dolfin as df
GMSH_PATH = "gmsh"
GEO_NAME = "ring"
def geo_to_h5():
"Convert given geo-file to a h5-mesh."
......
......@@ -10,6 +10,7 @@ from json import dumps
import yaml
from cerberus import Validator
class Input:
"""
Class to handle the input file in YAML_ format.
......@@ -454,7 +455,7 @@ class Input:
"""
try:
return reduce(operator.getitem, map_list, self.dict)
except:
except Exception:
raise Exception("Dict has no entry with the key:", map_list)
def set_in_input(self, map_list, value):
......
......@@ -7,6 +7,7 @@ Currently, the only mesh format is h5.
import os
import dolfin as df
class H5Mesh:
"""
Mesh class.
......
......@@ -11,6 +11,7 @@ from math import sqrt, ceil, log, log10
import numpy as np
import matplotlib.pyplot as plt
class Postprocessor:
"""
The class for postprocessing.
......@@ -73,7 +74,7 @@ class Postprocessor:
matplotlib.use('pdf')
import matplotlib.pyplot as plt # pylint: disable=C0413
"""
filename = "convergence_plot_" + self.output_folder + ".pdf"
filename = "convergence_plot_" + self.output_folder + ".pdf"
if not show_popup:
plt.switch_backend('agg')
......@@ -87,12 +88,12 @@ class Postprocessor:
plt_sidelength_x = ceil(sqrt(num_fields))
plt_sidelength_y = plt_sidelength_x
while plt_sidelength_y * (plt_sidelength_x-1) >= num_fields:
while plt_sidelength_y * (plt_sidelength_x - 1) >= num_fields:
plt_sidelength_x -= 1
for i, key in enumerate(raw_data):
field = [df[key] for df in data]
plot_i = plt.subplot(plt_sidelength_x, plt_sidelength_y, (i+1))
plot_i = plt.subplot(plt_sidelength_x, plt_sidelength_y, (i + 1))
for j, etype in enumerate(field[0]):
values = [df[etype] for df in field]
......@@ -102,32 +103,33 @@ class Postprocessor:
# Add slope marker
use_top = j == 1
bot = np.array([
[h[-1], 0.5*values[-1]],
[h[-2], 0.5*values[-1]],
[h[-2], 0.5*values[-2]]
[h[-1], 0.5 * values[-1]],
[h[-2], 0.5 * values[-1]],
[h[-2], 0.5 * values[-2]]
])
top = np.array([
[h[-1], 2.0*values[-1]],
[h[-2], 2.0*values[-2]],
[h[-1], 2.0*values[-2]]
[h[-1], 2.0 * values[-1]],
[h[-2], 2.0 * values[-2]],
[h[-1], 2.0 * values[-2]]
])
tria = top if use_top else bot
slope_marker = plt.Polygon(
tria, color=plot_i.get_lines()[-1].get_color(),
alpha=1.5, fill=False
)
)
plot_i.add_patch(slope_marker)
conv_rate = (
(log(values[-2]) - log(values[-1]))
/ (log(h[-2]) - log(h[-1]))
)
delta_values = log(values[-2]) - log(values[-1])
delta_h = log(h[-2]) - log(h[-1])
conv_rate = delta_values / delta_h
anchor_x = h[-1] if use_top else h[-2]
anchor_y = (
10**(
(log10(2.0*values[-2])+log10(2.0*values[-1]))/2
(log10(2.0 * values[-2]) + log10(2.0 * values[-1])) / 2
) if use_top
else 10**((log10(0.5*values[-2])+log10(0.5*values[-1]))/2)
else 10**(
(log10(0.5 * values[-2]) + log10(0.5 * values[-1])
) / 2)
)
h_align = "left" if use_top else "right"
plot_i.text(
......@@ -136,9 +138,9 @@ class Postprocessor:
)
# Add order slopes
plt.loglog(h, np.array(2*np.power(h, 1)), "--", label="O(h^1)")
plt.loglog(h, np.array(0.02*np.power(h, 2)), "--", label="O(h^2)")
plt.loglog(h, np.array(0.02*np.power(h, 3)), "--", label="O(h^3)")
plt.loglog(h, np.array(2 * np.power(h, 1)), "--", label="O(h^1)")
plt.loglog(h, np.array(0.02 * np.power(h, 2)), "--", label="O(h^2)")
plt.loglog(h, np.array(0.02 * np.power(h, 3)), "--", label="O(h^3)")
# Add information
plt.xlabel("max(h)")
......@@ -178,7 +180,10 @@ class Postprocessor:
writer.writerow(
["h"] + list(
chain.from_iterable([
[first+"_"+second for second in data[0].get(first, "")]
[
first + "_" + second
for second in data[0].get(first, "")
]
for first in [
first for first in data[0] if first != "h"
]
......
# pylint: disable=invalid-name,too-many-lines
# pylint: disable=not-callable
# noqa: E226
"""
Solver module, contains the Solver class.
......@@ -61,7 +62,7 @@ class Solver:
def __init__(self, params, mesh, time):
"""Initialize solver and setup variables from input parameters."""
self.params = params #: Doctest
self.params = params #: Doctest
self.mesh = mesh.mesh
self.regions = mesh.subdomains
self.boundaries = mesh.boundaries
......@@ -212,7 +213,7 @@ class Solver:
cpp_strings = [str(i) for i in cpp_strings]
if len(cpp_strings) == 2:
return df.Expression(
cpp_strings, # strange that no cast to list is needed
cpp_strings, # strange that no cast to list is needed
degree=2,
phi=phi,
R=R
......@@ -285,7 +286,7 @@ class Solver:
regs_specified = list(self.regs.keys())
for reg_id in region_ids:
if not reg_id in [0] + regs_specified: # inner zero allowed
if reg_id not in [0] + regs_specified: # inner zero allowed
raise Exception("Mesh region {} has no params!".format(reg_id))
def __check_bcs(self):
......@@ -298,7 +299,7 @@ class Solver:
bcs_specified = list(self.bcs.keys())
for edge_id in boundary_ids:
if not edge_id in [0] + bcs_specified: # inner zero allowed
if edge_id not in [0] + bcs_specified: # inner zero allowed
raise Exception("Mesh edge {} has no bcs!".format(edge_id))
def assemble(self):
......@@ -515,7 +516,7 @@ class Solver:
# Define mesh measuers
h_msh = df.CellDiameter(mesh)
h_avg = (h_msh("+") + h_msh("-"))/2.0
h_avg = (h_msh("+") + h_msh("-")) / 2.0
# TODO: Study this, is it more precise?
# fa = df.FacetArea(mesh)
......@@ -556,14 +557,19 @@ class Solver:
# => tangential (tx,ty) = (-ny,nx) = perp(n) only for 2D
n_vec = df.FacetNormal(mesh)
t_vec = ufl.perp(n_vec)
def n(rank1):
return df.dot(rank1, n_vec)
def t(rank1):
return df.dot(rank1, t_vec)
def nn(rank2):
return df.dot(rank2 * n_vec, n_vec)
def tt(rank2):
return df.dot(rank2 * t_vec, t_vec)
def nt(rank2):
return df.dot(rank2 * n_vec, t_vec)
......@@ -574,18 +580,19 @@ class Solver:
# 4/5-24/75 = (60-24)/75 = 36/75 = 12/25
return sum([(
# => 24/25*stf(grad)*grad
+ 24/25 * regs[reg]["kn"] * df.inner(
+ 24 / 25 * regs[reg]["kn"] * df.inner(
df.sym(df.grad(s)), df.sym(df.grad(r))
)
- 24/75 * regs[reg]["kn"] * df.div(s) * df.div(r)
- 24 / 75 * regs[reg]["kn"] * df.div(s) * df.div(r)
# For Delta-term, works for R13 but fails for heat:
+ 4/5 * cpl * regs[reg]["kn"] * df.div(s) * df.div(r)
+ 4/15 * (1/regs[reg]["kn"]) * df.inner(s, r)
+ 4 / 5 * cpl * regs[reg]["kn"] * df.div(s) * df.div(r)
+ 4 / 15 * (1 / regs[reg]["kn"]) * df.inner(s, r)
) * df.dx(reg) for reg in regs.keys()]) + sum([(
+ 1/(2*bcs[bc]["chi_tilde"]) * n(s) * n(r)
+ 11/25 * bcs[bc]["chi_tilde"] * t(s) * t(r)
+ cpl * 1/25 * bcs[bc]["chi_tilde"] * t(s) * t(r)
+ 1 / (2 * bcs[bc]["chi_tilde"]) * n(s) * n(r)
+ 11 / 25 * bcs[bc]["chi_tilde"] * t(s) * t(r)
+ cpl * 1 / 25 * bcs[bc]["chi_tilde"] * t(s) * t(r)
) * df.ds(bc) for bc in bcs.keys()])
def d(si, ps):
# Notes:
# 21/20+3/40=45/40=9/8
......@@ -594,58 +601,67 @@ class Solver:
to.stf3d3(to.grad3dOf2(to.gen3dTF2(si))),
to.stf3d3(to.grad3dOf2(to.gen3dTF2(ps)))
)
+ (1/(2*regs[reg]["kn"])) * df.inner(
+ (1 / (2 * regs[reg]["kn"])) * df.inner(
to.gen3dTF2(si), to.gen3dTF2(ps)
)
) * df.dx(reg) for reg in regs.keys()]) + sum([(
+ bcs[bc]["chi_tilde"] * 21/20 * nn(si) * nn(ps)
+ bcs[bc]["chi_tilde"] * cpl * 3/40 * nn(si) * nn(ps)
+ bcs[bc]["chi_tilde"] * 21 / 20 * nn(si) * nn(ps)
+ bcs[bc]["chi_tilde"] * cpl * 3 / 40 * nn(si) * nn(ps)
+ bcs[bc]["chi_tilde"] * (
(tt(si) + (1/2) * nn(si)) *
(tt(ps) + (1/2) * nn(ps))
(tt(si) + (1 / 2) * nn(si)) *
(tt(ps) + (1 / 2) * nn(ps))
)
+ (1/bcs[bc]["chi_tilde"]) * nt(si) * nt(ps)
+ (1 / bcs[bc]["chi_tilde"]) * nt(si) * nt(ps)
+ bcs[bc]["epsilon_w"] * bcs[bc]["chi_tilde"] * nn(si) * nn(ps)
) * df.ds(bc) for bc in bcs.keys()])
def h(p, q):
return sum([(
bcs[bc]["epsilon_w"] * bcs[bc]["chi_tilde"] * p * q
) * df.ds(bc) for bc in bcs.keys()])
# 2) Offdiagonals:
def b(th, r):
return sum([(
th * df.div(r)
) * df.dx(reg) for reg in regs.keys()])
def c(r, si):
return cpl * (sum([(
2/5 * df.inner(si, df.grad(r))
2 / 5 * df.inner(si, df.grad(r))
) * df.dx(reg) for reg in regs.keys()]) - sum([(
3/20 * nn(si) * n(r)
+ 1/5 * nt(si) * t(r)
3 / 20 * nn(si) * n(r)
+ 1 / 5 * nt(si) * t(r)
) * df.ds(bc) for bc in bcs.keys()]))
def e(u, ps):
return sum([(
df.dot(df.div(ps), u)
) * df.dx(reg) for reg in regs.keys()])
def f(p, ps):
return sum([(
bcs[bc]["epsilon_w"] * bcs[bc]["chi_tilde"] * p * nn(ps)
) * df.ds(bc) for bc in bcs.keys()])
def g(p, v):
return sum([(
df.inner(v, df.grad(p))
) * df.dx(reg) for reg in regs.keys()])
# 3) CIP Stabilization:
def j_theta(theta, kappa):
return (
+ delta_theta * h_avg**3 *
df.jump(df.grad(theta), n_vec) * df.jump(df.grad(kappa), n_vec)
) * df.dS
def j_u(u, v):
return (
+ delta_u * h_avg**3 *
df.dot(df.jump(df.grad(u), n_vec), df.jump(df.grad(v), n_vec))
) * df.dS
def j_p(p, q):
return (
+ delta_p * h_avg *
......@@ -668,7 +684,7 @@ class Solver:
bcs[bc]["theta_w"] * n(r)
) * df.ds(bc) for bc in bcs.keys()])
# Use div(u)=f_mass to remain sym. (density-form doesnt need this):
L[1] = (f_heat-f_mass) * kappa * df.dx
L[1] = (f_heat - f_mass) * kappa * df.dx
L[2] = - sum([(
+ bcs[bc]["u_t_w"] * nt(psi)
+ (
......@@ -779,10 +795,10 @@ class Solver:
raise Exception("Massflow: {} is no boundary.".format(bc_id))
n = df.FacetNormal(self.mesh)
mass_flow_rate = df.assemble(
df.inner(self.sol["u"], n)*df.ds(bc_id)
df.inner(self.sol["u"], n) * df.ds(bc_id)
)
print("mass flow rate of BC", bc_id, ":", mass_flow_rate)
self.write_content_to_file("massflow_"+str(bc_id), mass_flow_rate)
self.write_content_to_file("massflow_" + str(bc_id), mass_flow_rate)
def __load_exact_solution(self):
"""
......@@ -1038,8 +1054,8 @@ class Solver:
)
for i in range(dofs)
]
errs_f_L2 = [x/y for x, y in zip(errs_f_L2, max_esols)]
errs_v_linf = [x/y for x, y in zip(errs_v_linf, max_esols)]
errs_f_L2 = [x / y for x, y in zip(errs_f_L2, max_esols)]
errs_v_linf = [x / y for x, y in zip(errs_v_linf, max_esols)]
print("Error " + str(name_) + " L_2:", errs_f_L2)
print("Error " + str(name_) + " l_inf:", errs_v_linf)
......@@ -1266,7 +1282,7 @@ class Solver:
self.output_folder + name + "_" + str(self.time) + ".xdmf"
)
with df.XDMFFile(self.mesh.mpi_comm(), fname_xdmf) as file:
for degree in range(5): # test until degree five
for degree in range(5): # test until degree five
# Writing symmetric tensors crashes.
# Therefore project symmetric tensor in nonsymmetric space
# This is only a temporary fix, see:
......@@ -1274,15 +1290,15 @@ class Solver:
# ...writing-symmetric-tensor-function-fails/1136
el_symm = df.TensorElement(
df.FiniteElement(
"Lagrange", df.triangle, degree+1
"Lagrange", df.triangle, degree + 1
), symmetry=True
) # symmetric tensor element
) # symmetric tensor element
el_sol = field.ufl_function_space().ufl_element()
if el_sol == el_symm:
# Remove symmetry with projection
field = df.project(
field, df.TensorFunctionSpace(
self.mesh, "Lagrange", degree+1
self.mesh, "Lagrange", degree + 1
)
)
break
......@@ -1293,7 +1309,7 @@ class Solver:
file.write(field, self.time)
if write_pdf:
import matplotlib.pyplot as plt # pylint: disable=C0415
import matplotlib.pyplot as plt # pylint: disable=C0415
plt.switch_backend('agg')
dimension = len(field.value_shape())
......@@ -1327,7 +1343,7 @@ class Solver:
}
}
for i in range(components):
fieldname = name + "_" + str(indexMap[dimension][i+1])
fieldname = name + "_" + str(indexMap[dimension][i + 1])
fname_pdf = (
self.output_folder + fieldname
+ "_" + str(self.time) + ".pdf"
......
......@@ -7,9 +7,9 @@ Module to gather all additional tensor operations not present in UFL.
This especially includes all 3D operations and operations on tensors with rank
above 2.
.. [STR2005] Struchtrup, H. (2005). Macroscopic transport equations for rarefied
gas flows. In Macroscopic Transport Equations for Rarefied Gas Flows.
Springer, Berlin, Heidelberg.
.. [STR2005] Struchtrup, H. (2005). Macroscopic transport equations for
rarefied gas flows. In Macroscopic Transport Equations for Rarefied Gas
Flows. Springer, Berlin, Heidelberg.
.. [TOR2018] Torrilhon, M. et al. (2018). “Kinetic Theory of Non-Equilibrium
Gas Flows:
......@@ -20,6 +20,7 @@ above 2.
import dolfin as df
import ufl
def stf3d2(rank2_2d):
r"""
Return the synthetic 3D symmetric and trace-free part of a 2D 2-tensor.
......@@ -38,8 +39,9 @@ def stf3d2(rank2_2d):
B &= (A)_\mathrm{dev} = \frac{1}{2} (A)_\mathrm{sym}
- \frac{1}{3} \mathrm{tr}(A) I_{2 \times 2}
"""
symm = 1/2 * (rank2_2d + ufl.transpose(rank2_2d))
return symm - (1/3) * ufl.tr(symm) * ufl.Identity(2)
symm = 1 / 2 * (rank2_2d + ufl.transpose(rank2_2d))
return symm - (1 / 3) * ufl.tr(symm) * ufl.Identity(2)
def sym3d3(rank3_3d):
r"""
......@@ -54,13 +56,14 @@ def sym3d3(rank3_3d):
\right)
"""
i, j, k = ufl.indices(3)
symm_ijk = 1/6 * (
symm_ijk = 1 / 6 * (
# All permutations
+ rank3_3d[i, j, k] + rank3_3d[i, k, j] + rank3_3d[j, i, k]
+ rank3_3d[j, k, i] + rank3_3d[k, i, j] + rank3_3d[k, j, i]
)
return ufl.as_tensor(symm_ijk, (i, j, k))
def stf3d3(rank3_3d):
r"""
Return the symmetric and trace-free part of a 3D 3-tensor.
......@@ -107,18 +110,19 @@ def stf3d3(rank3_3d):
\right) \delta_{i j}
\biggr]
"""
i, j, k, l = ufl.indices(4)
i, j, k, L = ufl.indices(4)
delta = df.Identity(3)
sym_ijk = sym3d3(rank3_3d)[i, j, k]
traces_ijk = 1/5 * (
+ sym3d3(rank3_3d)[i, l, l] * delta[j, k]
+ sym3d3(rank3_3d)[l, j, l] * delta[i, k]
+ sym3d3(rank3_3d)[l, l, k] * delta[i, j]
traces_ijk = 1 / 5 * (
+ sym3d3(rank3_3d)[i, L, L] * delta[j, k]
+ sym3d3(rank3_3d)[L, j, L] * delta[i, k]
+ sym3d3(rank3_3d)[L, L, k] * delta[i, j]
)
tracefree_ijk = sym_ijk - traces_ijk
return ufl.as_tensor(tracefree_ijk, (i, j, k))
def gen3dTF2(rank2_2d):
r"""
Generate a 3D tracefree 2-tensor from a 2D 2-tensor.
......@@ -162,9 +166,10 @@ def gen3dTF2(rank2_2d):
return df.as_tensor([
[rank2_2d[0, 0], rank2_2d[0, 1], 0],
[rank2_2d[1, 0], rank2_2d[1, 1], 0],
[0, 0, -rank2_2d[0, 0]-rank2_2d[1, 1]]
[0, 0, -rank2_2d[0, 0] - rank2_2d[1, 1]]
])
def gen3d2(rank2_2d):
r"""
Generate a 3D 2-tensor from a 2D 2-tensor (add zeros to last dimensions).
......@@ -212,6 +217,7 @@ def gen3d2(rank2_2d):
[0, 0, 0]
])
def grad3dOf2(rank2_3d):
r"""
Return 3D gradient of 3D 2-tensor.
......
[flake8]
ignore = E221
exclude = .git,__init__.py
max-complexity = 25
max-line-length = 80
per-file-ignores =
fenicsR13/solver.py:W503,W504
fenicsR13/tensoroperations.py:W503,W504
......@@ -7,6 +7,7 @@ This file is executed by ``pytest`` to have good CI.
import subprocess
import pytest
class TestHeatConvergence(object):
"""
Class to bundle all stress convergence tests.
......
......@@ -7,6 +7,7 @@ This file is executed by ``pytest`` to have good CI.
import subprocess
import pytest
class TestR13Convergence(object):
"""
Class to bundle all stress convergence tests.
......
......@@ -7,6 +7,7 @@ This file is executed by ``pytest`` to have good CI.
import subprocess
import pytest
class TestStressConvergence(object):
"""
Class to bundle all stress convergence tests.
......