Skip to content
Snippets Groups Projects
Commit aea7255c authored by Jan Habscheid's avatar Jan Habscheid
Browse files

Euler in Deepxde

parent aa3b1d4a
Branches
Tags
1 merge request!10Clean up
%% Cell type:code id: tags:
``` python
import deepxde as dde
from deepxde.backend import tf
import tensorflow as tf_flow
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
# Import your physical coefficient functions
from HeatCoefficients import *
```
%% Cell type:code id: tags:
``` python
import deepxde as dde
import numpy as np
import tensorflow as tf
from scipy.interpolate import interp1d
# Define spatial domain
geom = dde.geometry.Interval(0, 1)
# Discretized time steps
time_steps = np.linspace(0, .1, 3) # Example: 100 time steps
# Define boundary conditions
def boundary_l(x, on_boundary):
return on_boundary and np.isclose(x[0], 0)
def boundary_r(x, on_boundary):
return on_boundary and np.isclose(x[0], 1)
bc_l = dde.DirichletBC(
geom,
lambda x: 0,
boundary_l
)
bc_r = dde.DirichletBC(
geom,
lambda x: 1,
boundary_r
)
# Initialize previous solution
x_train = np.linspace(0, 1, 1001)[:, None]
prev_solution_values = np.zeros_like(x_train) # Initial condition T(x,0) = 0
prev_solution = interp1d(x_train.flatten(), prev_solution_values.flatten(), kind="linear", fill_value="extrapolate")
models_trained = []
for n in range(1, len(time_steps)):
t_n = time_steps[n]
t_prev = time_steps[n-1]
dt = t_n - t_prev
# Define PDE residual for the time step using explicit time-stepping
def pde(x, u):
u_xx = dde.grad.hessian(u, x, i=0, j=0) # Second spatial derivative
# Interpolate previous solution (x is a Tensor -> use x.numpy() in NumPy)
u_prev_tf = tf.py_function(func=lambda x: prev_solution(x.numpy()), inp=[x[:, 0:1]], Tout=tf.float32)
if n==1:
u_prev_tf = tf.constant(0.0, shape=(x.shape[0], 1), dtype=tf.float32)
u_t = (u - u_prev_tf) / dt # Explicit time difference
return u_t - .4 * u_xx # Heat equation: u_t = u_xx
# Define training data by sampling from the previous time step
y_train = prev_solution(x_train) # Get previous time step's solution
# Define the dataset for this time step
data = dde.data.PDE(
geom,
pde,
[bc_l, bc_r],
num_domain=1000,
num_boundary=50,
)
# Define and train the model
net = dde.nn.FNN(
[1] + [32] * 3 + [1],
"tanh",
"Glorot uniform"
)
net.apply_output_transform(lambda x, y: x + x * (1 - x) * y) # Enforce boundary conditions
model = dde.Model(data, net)
model.compile(
"adam",
lr=0.001
)
model.train(epochs=5000)
# Store the trained model output for the next time step
prev_solution_values = model.predict(x_train)
# Update interpolator for next step
prev_solution = interp1d(x_train.flatten(), prev_solution_values.flatten(), kind="linear", fill_value="extrapolate")
models_trained.append(model)
# Final model is the last trained model
```
%% Output
Compiling model...
'compile' took 0.001537 s
Warning: epochs is deprecated and will be removed in a future version. Use iterations instead.
Training model...
WARNING:tensorflow:AutoGraph could not transform <function <lambda> at 0x31160cea0> and will run it as-is.
Cause: could not parse the source code of <function <lambda> at 0x31160cea0>: no matching AST found among candidates:
# coding=utf-8
lambda x, on: np.array([on_boundary(x[i], on[i]) for i in range(len(x))])
# coding=utf-8
lambda x, on: np.array([on_boundary1(x[i], on[i]) for i in range(len(x))])
# coding=utf-8
lambda x, on: np.array([on_boundary2(x[i], on[i]) for i in range(len(x))])
To silence this warning, decorate the function with @tf.autograph.experimental.do_not_convert
WARNING: AutoGraph could not transform <function <lambda> at 0x31160cea0> and will run it as-is.
Cause: could not parse the source code of <function <lambda> at 0x31160cea0>: no matching AST found among candidates:
# coding=utf-8
lambda x, on: np.array([on_boundary(x[i], on[i]) for i in range(len(x))])
# coding=utf-8
lambda x, on: np.array([on_boundary1(x[i], on[i]) for i in range(len(x))])
# coding=utf-8
lambda x, on: np.array([on_boundary2(x[i], on[i]) for i in range(len(x))])
To silence this warning, decorate the function with @tf.autograph.experimental.do_not_convert
WARNING:tensorflow:AutoGraph could not transform <function <lambda> at 0x31160d120> and will run it as-is.
Cause: could not parse the source code of <function <lambda> at 0x31160d120>: no matching AST found among candidates:
# coding=utf-8
lambda x, on: np.array([on_boundary(x[i], on[i]) for i in range(len(x))])
# coding=utf-8
lambda x, on: np.array([on_boundary1(x[i], on[i]) for i in range(len(x))])
# coding=utf-8
lambda x, on: np.array([on_boundary2(x[i], on[i]) for i in range(len(x))])
To silence this warning, decorate the function with @tf.autograph.experimental.do_not_convert
WARNING: AutoGraph could not transform <function <lambda> at 0x31160d120> and will run it as-is.
Cause: could not parse the source code of <function <lambda> at 0x31160d120>: no matching AST found among candidates:
# coding=utf-8
lambda x, on: np.array([on_boundary(x[i], on[i]) for i in range(len(x))])
# coding=utf-8
lambda x, on: np.array([on_boundary1(x[i], on[i]) for i in range(len(x))])
# coding=utf-8
lambda x, on: np.array([on_boundary2(x[i], on[i]) for i in range(len(x))])
To silence this warning, decorate the function with @tf.autograph.experimental.do_not_convert
Step Train loss Test loss Test metric
0 [1.74e+02, 0.00e+00, 0.00e+00] [1.74e+02, 0.00e+00, 0.00e+00] []
1000 [5.80e-02, 0.00e+00, 0.00e+00] [5.80e-02, 0.00e+00, 0.00e+00] []
2000 [5.58e-03, 0.00e+00, 0.00e+00] [5.58e-03, 0.00e+00, 0.00e+00] []
3000 [2.12e-03, 0.00e+00, 0.00e+00] [2.12e-03, 0.00e+00, 0.00e+00] []
4000 [1.10e-03, 0.00e+00, 0.00e+00] [1.10e-03, 0.00e+00, 0.00e+00] []
5000 [1.54e-02, 0.00e+00, 0.00e+00] [1.54e-02, 0.00e+00, 0.00e+00] []
Best model at step 4000:
train loss: 1.10e-03
test loss: 1.10e-03
test metric: []
'train' took 9.123625 s
Compiling model...
'compile' took 0.000856 s
Warning: epochs is deprecated and will be removed in a future version. Use iterations instead.
Training model...
Step Train loss Test loss Test metric
0 [8.90e+01, 0.00e+00, 0.00e+00] [8.90e+01, 0.00e+00, 0.00e+00] []
1000 [9.23e-01, 0.00e+00, 0.00e+00] [9.23e-01, 0.00e+00, 0.00e+00] []
2000 [9.05e-01, 0.00e+00, 0.00e+00] [9.05e-01, 0.00e+00, 0.00e+00] []
3000 [1.46e-03, 0.00e+00, 0.00e+00] [1.46e-03, 0.00e+00, 0.00e+00] []
4000 [9.36e-05, 0.00e+00, 0.00e+00] [9.36e-05, 0.00e+00, 0.00e+00] []
5000 [5.56e-05, 0.00e+00, 0.00e+00] [5.56e-05, 0.00e+00, 0.00e+00] []
Best model at step 5000:
train loss: 5.56e-05
test loss: 5.56e-05
test metric: []
'train' took 9.123659 s
%% Cell type:code id: tags:
``` python
x_evaluate = np.linspace(0, 1, 101).reshape(-1, 1)
for model in models_trained:
y_evaluate = model.predict(x_evaluate)
plt.plot(x_evaluate, y_evaluate)
plt.legend()
plt.grid()
plt.show()
```
%% Output
/var/folders/v_/5q1gkdc53z34pdsfnpkx2t340000gn/T/ipykernel_18655/3937913252.py:5: UserWarning: No artists with labels found to put in legend. Note that artists whose label start with an underscore are ignored when legend() is called with no argument.
plt.legend()
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment