diff --git a/.DS_Store b/.DS_Store
index f118b77b97d4584c5b778a0b5e237da7409805e7..40c72f0c8253d186067c2e4acf976c4eac3a713a 100644
Binary files a/.DS_Store and b/.DS_Store differ
diff --git a/Dockerfile b/Dockerfile
index f45a1b7268e837346a137529108d4fa15e586c1c..57caea884c390870d7ef45dd89af4b83f207281a 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -8,7 +8,7 @@ ENV HOME /root
 
 # Create the environment and install packages
 # FEniCSx backend + deepxde (PINN) for tensorflow (ANN)
-RUN conda install -n ${CONDA_DEFAULT_ENV} -c conda-forge fenics-dolfinx=0.8.0 mpich=4.2.1 pyvista=0.43.10 -y
+RUN conda install -n ${CONDA_DEFAULT_ENV} -c conda-forge fenics-dolfinx=0.8.0 mpich=4.2.1 pyvista=0.43.10 sphinx=7.3.7 myst-parser=4.0.0 sphinx-copybutton=0.5.2 sphinx-rtd-theme=3.0.1 -y
 RUN conda install -c conda-forge deepxde==1.12.2 -y
 RUN pip install tensorflow==2.18.0 tensorflow_probability==0.25.0 tf_keras==2.18.0
 
diff --git a/README.md b/README.md
index bc42407a82ba48659d06474ee54315a720b4a21c..8b51ffffa11ec3d56219be9581607bec0192aa37 100644
--- a/README.md
+++ b/README.md
@@ -26,7 +26,7 @@ To rerun the simulation, run the files in the order 01 - 06.
 3. Find the best PINN architecture (use less time steps for tuning, to reduce complexity)
 4. Train the best PINN architecture (now use more time steps)
 5. Evaluate the ANN results
-6. Evaluate the PINN results
+6. Evaluate the PINN
 
 ## Installation
 
@@ -41,7 +41,7 @@ Run the following script to install all necessary dependencies via conda.
 ``` bash
 conda create --name IceMelting python=3.10 -y
 conda activate IceMelting2
-conda install -c conda-forge fenics-dolfinx=0.8.0 mpich=4.2.1 pyvista=0.43.10 -y
+conda install -c conda-forge fenics-dolfinx=0.8.0 mpich=4.2.1 pyvista=0.43.10 sphinx=7.3.7 myst-parser=4.0.0 sphinx-copybutton=0.5.2 sphinx-rtd-theme=3.0.1 -y
 conda install -c conda-forge deepxde==1.12.2 -y
 pip install tensorflow==2.18.0 tensorflow_probability==0.25.0 tf_keras==2.18.0
 ```
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..d0c3cbf1020d5c292abdedf27627c6abe25e2293
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS    ?=
+SPHINXBUILD   ?= sphinx-build
+SOURCEDIR     = source
+BUILDDIR      = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 0000000000000000000000000000000000000000..747ffb7b3033659bdd2d1e6eae41ecb00358a45e
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+	set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=source
+set BUILDDIR=build
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+	echo.
+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+	echo.installed, then set the SPHINXBUILD environment variable to point
+	echo.to the full path of the 'sphinx-build' executable. Alternatively you
+	echo.may add the Sphinx directory to PATH.
+	echo.
+	echo.If you don't have Sphinx installed, grab it from
+	echo.https://www.sphinx-doc.org/
+	exit /b 1
+)
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 0000000000000000000000000000000000000000..7654f03f6738871d0411e1b54e7e2efe74ec61b1
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,50 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# For the full list of built-in configuration values, see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Project information -----------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
+
+project = 'IceMelting'
+copyright = '2025, Jan Habscheid'
+author = 'Jan Habscheid'
+release = '1.0'
+
+# Add src folder to path
+import os
+import sys
+sys.path.insert(0, os.path.abspath(os.path.join("..", "../src")))
+sys.path.insert(0, os.path.abspath('../src'))
+
+
+# -- General configuration ---------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
+
+extensions = [
+    'sphinx.ext.napoleon',
+    'sphinx.ext.autodoc', 
+    'myst_parser',
+    'sphinx_copybutton',
+    'sphinx.ext.coverage',
+    'sphinx.ext.autosectionlabel',
+]
+
+
+
+templates_path = ['_templates']
+exclude_patterns = []
+
+source_suffix = {
+    '.rst': 'restructuredtext',
+    '.txt': 'markdown',
+    '.md': 'markdown',
+}
+
+
+# -- Options for HTML output -------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
+
+# html_theme = 'alabaster'
+html_theme = 'sphinx_rtd_theme'
+html_static_path = ['_static']
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..704038e468d4a65a616b9af182e47158a50fffd3
--- /dev/null
+++ b/docs/source/index.rst
@@ -0,0 +1,90 @@
+.. IceMelting documentation master file, created by
+   sphinx-quickstart on Mon Feb 10 12:11:16 2025.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Performance analysis of Surrogates for Non-Linear heat equation
+==============================================================
+
+Abstract
+========
+
+Develop Surrogate Models for the nonlinear heat equation
+
+.. math::
+
+   \rho(T) c_P(T) \frac{\partial T}{\partial t} = \frac{\partial}{\partial x} \left(\lambda(T) \frac{\partial T}{\partial x}\right)
+   \\
+   \text{with: } T(x=0,t) = T_\text{ICE} = T(x=x,t=0), T(x=2,t) = T_\text{MELTING} = 273.15 \text{K}
+   \\
+   T_\text{ICE} \in [100, 200] \text{K}
+
+Find the material parameters in: [https://doi.org/10.1007/s11157-006-9108-x](https://doi.org/10.1007/s11157-006-9108-x)
+
+Repository Structure
+====================
+
+Find the final presentation in the folder `Presentation`. 
+
+In `src`there is the source code with different versions.
+The PINN was struggling to yield the correct temperature behavior, therefore it was discretized in time. In the subfolder `IssuesPINN` different attempts for full continuous PINNs can be found, without proper documentation.
+
+To rerun the simulation, run the files in the order 01 - 06.
+
+1. Generate FEniCSx data to train the ANN on
+2. Find the best ANN with a hyperparameter tuner and train it
+3. Find the best PINN architecture (use less time steps for tuning, to reduce complexity)
+4. Train the best PINN architecture (now use more time steps)
+5. Evaluate the ANN results
+6. Evaluate the PINN
+
+Installation
+============
+
+As a numerical solver, mainly `FEniCSx` was used and installed via conda.
+All the calculations were performed on a Linux machine. FEniCSx offers some beta versions for Windows support, but it is recommended to use WSL2 instead. Some issues arose with a macOS and therefore an additional docker container was created. This was not tested and therefore cannot be guaranteed to work properly.
+
+For the ANN, `tensorflow2` was used and installed via pip. 
+For PINNs, `DeepXDE` with the `tensorflow2`backend was used.
+
+Run the following script to install all necessary dependencies via conda.
+
+.. code-block:: 
+
+   conda create --name IceMelting python=3.10 -y
+   conda activate IceMelting2
+   conda install -c conda-forge fenics-dolfinx=0.8.0 mpich=4.2.1 pyvista=0.43.10 -y
+   conda install -c conda-forge deepxde==1.12.2 -y
+   pip install tensorflow==2.18.0 tensorflow_probability==0.25.0 tf_keras==2.18.0
+
+
+Alternative installation
+------------------------
+
+Use the "environment.yml" file to install all necessary environments
+
+.. code-block:: 
+
+   conda env create -f environment.yml
+
+
+macOS installation using Docker
+-------------------------------
+
+.. code-block:: 
+      
+   docker compose build
+   docker compose run solver
+
+
+Contact
+=======
+
+- Jan Habscheid
+- Jan.Habscheid@rwth-aachen.de
+
+.. toctree::
+   src
+   :maxdepth: 2
+   :caption: Contents:
+
diff --git a/docs/source/src.rst b/docs/source/src.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0ed0177cf01d76c0db2acb49f00c3cde7f1709bf
--- /dev/null
+++ b/docs/source/src.rst
@@ -0,0 +1,22 @@
+Source Code
+===========
+
+.. automodule:: HeatCoefficients
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+.. automodule:: FEniCSx
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+.. automodule:: ANN
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+.. automodule:: PINN
+    :members:
+    :undoc-members:
+    :show-inheritance:
diff --git a/src/ANN.py b/src/ANN.py
index 78eb27bbe172f2b49751149498a930c68108b6f1..7b4e1b9af3fec9ebd17cc69f7cd70df8fe24299d 100644
--- a/src/ANN.py
+++ b/src/ANN.py
@@ -41,7 +41,23 @@ CALLBACKS = [
 ]
 
 # Load data
-def load_data(skip_starter=1):
+def load_data(skip_starter=1) -> dict:
+    '''
+    Load the data, generated with FEniCSx before
+
+    Loads the training/testing, validation and extrapolation data.
+    Stores the data in a dictionary
+
+    Parameters
+    ----------
+    skip_starter : int, optional
+        Skip the first skip_starter time step to overcome the discontinuity, by default 1
+
+    Returns
+    -------
+    dict
+        Dictionary containing the data
+    '''
     data_traintest = np.load('Data/FEniCSx_heat_flux_TrainTest.npz')
     # data_test = np.load('Data/FEniCSx_heat_flux_Test.npz')
     data_validate = np.load('Data/FEniCSx_heat_flux_Validate.npz')
@@ -96,7 +112,22 @@ def load_data(skip_starter=1):
     return data
 
 # Prepare data
-def prepare_data(data):
+def prepare_data(data:dict) -> dict:
+    '''
+    Prepare the data for the ANN training
+
+    Adds the X_train, X_test, y_train, y_test, X_validate, y_validate, X_extrapolate, y_extrapolate to the data dictionary
+
+    Parameters
+    ----------
+    data : dict
+        Unprepared data dictionary
+
+    Returns
+    -------
+    dict
+        Prepared data dictionary
+    '''
     # X (t_history, T_ICE_K)
     # Load data in X and y format
     for stage_name in ['_traintest', '_validate', '_extrapolate']:
@@ -125,7 +156,26 @@ def prepare_data(data):
     return data
 
 # Define ANN
-def create_ann(neurons, activation='relu', output_activation='linear'):
+def create_ann(neurons:list, activation:str='relu', output_activation:str='linear') -> Sequential:
+    '''
+    _summary_
+
+    _extended_summary_
+
+    Parameters
+    ----------
+    neurons : list
+        Number of neurons for each layer
+    activation : str, optional
+        Activation function for hidden nodes, by default 'relu'
+    output_activation : str, optional
+        Activation function for output, by default 'linear'
+
+    Returns
+    -------
+    Sequential
+        Neural network model
+    '''
     # I/O shape
     input_shape = (2,)
     output_shape = (1)
@@ -145,7 +195,26 @@ def create_ann(neurons, activation='relu', output_activation='linear'):
     return model
 
 # Compile ANN
-def compile_ann(model, optimizer, loss='mse', metrics=['mae', 'mape', 'r2']):
+def compile_ann(model:Sequential, optimizer, loss:str='mse', metrics:list=['mae', 'mape', 'r2']) -> Sequential:
+    '''
+    Compile the ANN to prepare it for training
+
+    Parameters
+    ----------
+    model : Sequential
+        The ANN
+    optimizer : Tensorflow optimizer
+        the optimizer
+    loss : str, optional
+        Loss to use, by default 'mse'
+    metrics : list, optional
+        Additional metrics to use, by default ['mae', 'mape', 'r2']
+
+    Returns
+    -------
+    Sequential
+        _description_
+    '''
     model.compile(
         optimizer=optimizer, 
         loss=loss, 
@@ -155,7 +224,30 @@ def compile_ann(model, optimizer, loss='mse', metrics=['mae', 'mape', 'r2']):
     return model
 
 # Train ANN
-def train_ann(model, data, batch_size=32, epochs=1000, callbacks=None, verbose=0):
+def train_ann(model:Sequential, data:dict, batch_size:int=32, epochs:int=1000, callbacks:list=None, verbose:int=0) -> tuple:
+    '''
+    Trains the ANN
+
+    Parameters
+    ----------
+    model : Sequential
+        ANN
+    data : dict
+        Data dictionary
+    batch_size : int, optional
+        Batch size, by default 32
+    epochs : int, optional
+        Number of epochs for training, by default 1000
+    callbacks : list, optional
+        Callbacks, by default None
+    verbose : int, optional
+        Whether to print each epoch, by default 0
+
+    Returns
+    -------
+    tuple
+        model, history
+    '''
     history = model.fit(
         data['X_train'], data['y_train'],
         validation_data=(data['X_test'], data['y_test']),
@@ -169,6 +261,18 @@ def train_ann(model, data, batch_size=32, epochs=1000, callbacks=None, verbose=0
 
 # Plot training history
 def plot_training_history(history, savefig=None):
+    '''
+    Plots training loss
+
+    _extended_summary_
+
+    Parameters
+    ----------
+    history : _type_
+        tensorflow history
+    savefig : _type_, optional
+        If not None, determines the path to store the figure, by default None
+    '''
     fig, axs = plt.subplots(ncols=2, figsize=(10, 5))
     axs[0].semilogy(history.history['mae'], label='train')
     axs[0].semilogy(history.history['val_mae'], label='validation')
@@ -190,7 +294,19 @@ def plot_training_history(history, savefig=None):
     fig.show()
 
 # plot sample prediction
-def plot_sample_prediction(model, data, sample):
+def plot_sample_prediction(model:Sequential, data:dict, sample:int):
+    '''
+    Plots sample prediction
+
+    Parameters
+    ----------
+    model : Sequential
+        ANN
+    data : dict
+        Data dictionary
+    sample : int
+        Sample to plot
+    '''
     timesteps = data['time_validate'].shape[0]
     y_predicted = model.predict(data['X_validate'])