diff --git a/.DS_Store b/.DS_Store
index b03bbad8de2840f659ba62fc2a67f9c64a7b2311..44a5bbbce45dbd57ddae3f986309557f4dc85f51 100644
Binary files a/.DS_Store and b/.DS_Store differ
diff --git a/Project3/LyX/Abstract.lyx b/Project3/LyX/Abstract.lyx
index d2f530960c25fbaecac3bc38adeb04593c37257a..6bab79e95bea7ba2378130d70cd831b072604f17 100644
--- a/Project3/LyX/Abstract.lyx
+++ b/Project3/LyX/Abstract.lyx
@@ -93,11 +93,30 @@
 \begin_body
 
 \begin_layout Abstract
-###
+Roads are an important part of our lives.
+ We use them to get to work,
+ to our hobbies or just to visit our friends by car,
+ bus or by bike.
+ Too crowded roads can cause traffic jams that stop the flow of traffic.
+ These traffic jams restrict our movement and slow us down in our everyday life.
+\end_layout
+
+\begin_layout Abstract
+Different causes of traffic jams and possibilities to dissolve those were discussed in a previous work.
+\end_layout
+
+\begin_layout Abstract
+This work focus on the uncertainty quantification of mathematical modeling of traffic jams.
+ It assumes that the true density of cars is just known at certain positions,
+ as it can be observed by cameras.
+ With these observations,
+ the uncertainty of the mathematical model will be quantified.
 \end_layout
 
 \begin_layout Keywords
-###
+Scalar Conservation Law,
+ Monte-Carlo Markov-Chain,
+ Uncertainty Quantification
 \end_layout
 
 \end_body
diff --git a/Project3/LyX/DiscussionAndConclusion.lyx b/Project3/LyX/DiscussionAndConclusion.lyx
index 1d625e33aa52dbafa8e7d914f99152465fd24d52..577f1e669f8c305f2dbefa10380ddbb2bc74bb39 100644
--- a/Project3/LyX/DiscussionAndConclusion.lyx
+++ b/Project3/LyX/DiscussionAndConclusion.lyx
@@ -120,7 +120,7 @@ This work presented an accurate algorithm to quantify the uncertainty
 \end_layout
 
 \begin_layout Standard
-The major impact of the resistance function on the solution and the importance to predict this accurately was emphasized.
+The major impact of the resistance function on the solution and the importance to predict this accurately were emphasized.
  An optimal choice for the different hyperparameter was discussed and the influence of those on the solution.
  
 \end_layout
@@ -130,7 +130,7 @@ Increasing the observations from one dataset to two datasets showed a positive e
 \end_layout
 
 \begin_layout Standard
-For future work it remains interesting to find a more efficient method,
+For future work it remains interesting to find a computationally more efficient method,
  as the MCMC algorithm depends on many iterations and is computationally complex.
  Furthermore,
  several efforts can be made to improve the identification of the resistance function.
@@ -144,13 +144,23 @@ Increase the number of MCMC iterations and adapt the control parameter
  when the convergence does not show an improvement about a certain number of steps.
 \end_layout
 
+\begin_deeper
+\begin_layout Enumerate
+Another possible modification can be to check the acceptance ratio of changes after each MCMC iteration and adapt the control parameter 
+\begin_inset Formula $\beta$
+\end_inset
+
+ to accept a certain ratio of changes.
+\end_layout
+
+\end_deeper
 \begin_layout Enumerate
 Run several independent MCMC iterations with different random starts and take the one with the smallest loss.
 \end_layout
 
 \begin_layout Enumerate
-Increase the number of true datasets,
- as going from one to two datasets showed promising results.
+Increase the number of true datasets even further,
+ as going from one to two datasets already showed promising results.
 \end_layout
 
 \end_body
diff --git a/Project3/LyX/Introduction.lyx b/Project3/LyX/Introduction.lyx
index aae56aa9090eac7678f5b220cebfc473dd8b8a82..ec830f0c361622b63c463b5ba038dd6a2cf92d5f 100644
--- a/Project3/LyX/Introduction.lyx
+++ b/Project3/LyX/Introduction.lyx
@@ -226,9 +226,10 @@ Traffic jams are part of everyday life.
 \begin_layout Standard
 The question that arises is,
  how to reduce these traffic jams to increase the traffic’s quality.
- This work assumes that the density of cars is measured at certain parts of the road and tries to quantify the uncertainty of the mathematical model,
- describing traffic jams.
- 
+ This work assumes that the density of cars is measured at certain parts of the road and that a mathematical model is known,
+ up to an uncertain function.
+ The objective of this work is to present a general methodology to identify this uncertain function,
+ based on the known mathematical model and observation data.
 \end_layout
 
 \end_body
diff --git a/Project3/LyX/Results.lyx b/Project3/LyX/Results.lyx
index 29b973c08187e378a914463c8247e4889030ae0b..411bf73e01625519ade654c858b7c85e53e619cc 100644
--- a/Project3/LyX/Results.lyx
+++ b/Project3/LyX/Results.lyx
@@ -133,7 +133,15 @@ Two true solutions are available,
 \begin_inset Formula $d^{II}$
 \end_inset
 
- with initial data
+ with the respective initial data 
+\begin_inset Formula $u_{0}^{I}(x)$
+\end_inset
+
+ and 
+\begin_inset Formula $u_{0}^{II}(x)$
+\end_inset
+
+.
 \end_layout
 
 \begin_layout Standard
@@ -156,10 +164,6 @@ u_{0}^{II}(x) & =\begin{cases}
 
 \end_inset
 
-
-\end_layout
-
-\begin_layout Standard
 If not stated otherwise,
  the artificial diffusion parameter 
 \begin_inset Formula $M$
@@ -170,6 +174,16 @@ If not stated otherwise,
 
 \begin_layout Subsection
 Influence of Resistance Function
+\begin_inset Note Note
+status open
+
+\begin_layout Plain Layout
+Maybe move this to the introduction as a motivation?
+\end_layout
+
+\end_inset
+
+
 \end_layout
 
 \begin_layout Standard
@@ -222,10 +236,6 @@ status open
 \begin_inset Caption Standard
 
 \begin_layout Plain Layout
-\begin_inset Formula $k_{1}(x)$
-\end_inset
-
-
 \begin_inset CommandInset label
 LatexCommand label
 name "fig:InfluenceResistanceFunction-1"
@@ -269,10 +279,6 @@ status open
 \begin_inset Caption Standard
 
 \begin_layout Plain Layout
-\begin_inset Formula $k_{2}(x)$
-\end_inset
-
-
 \begin_inset CommandInset label
 LatexCommand label
 name "fig:InfluenceResistanceFunction-2"
@@ -320,10 +326,6 @@ status open
 \begin_inset Caption Standard
 
 \begin_layout Plain Layout
-\begin_inset Formula $k_{3}(x)$
-\end_inset
-
-
 \begin_inset CommandInset label
 LatexCommand label
 name "fig:InfluenceResistanceFunction-3"
@@ -357,10 +359,9 @@ The resistance function
 \end_inset
 
 .
- The left shows randomly generated,
- normal distributed resistance functions on the spatial domain,
- while the right shows the predicted solution,
- based on the resistance function (solid lines) and the true solution (dotted lines) at observation points 
+ Randomly generated,
+ normal distributed resistance functions are shown on the left,
+ while the predicted solution (solid lines) and the true solution (dotted lines) at observation points 
 \begin_inset Formula $x_{i}\in[0.75,1.5,2.25,3.25]$
 \end_inset
 
@@ -368,7 +369,7 @@ The resistance function
 \begin_inset Formula $u_{0}^{I}(x)$
 \end_inset
 
-.
+ are shown on the right.
 \begin_inset CommandInset label
 LatexCommand label
 name "fig:InfluenceResistanceFunction"
@@ -403,19 +404,19 @@ nolink "false"
  shows three different,
  randomly generated,
  resistance functions (left) and the corresponding solution 
-\begin_inset Formula $u(x,t)$
+\begin_inset Formula $u(x_{i},t)$
 \end_inset
 
- (right).
- The resistance function is visualized over the spatial domain while the solution 
-\begin_inset Formula $u$
+ at 
+\begin_inset Formula $x_{i}\in[0.75,1.5,2.25,3.25]$
 \end_inset
 
- is visualized for four different spatial positions 
-\begin_inset Formula $x_{i}\in[0.75,1.5,2.25,3.25]$
+ (right).
+ The resistance function is visualized over the spatial domain while the solution 
+\begin_inset Formula $u(x_{i},t)$
 \end_inset
 
- over time.
+ is visualized for the different positions over time.
  The solid line is the predicted solution,
  while the dotted points show the true observations.
 \end_layout
@@ -457,7 +458,7 @@ nolink "false"
 
 \end_inset
 
- only show shocks for certain observations.
+ only show shocks for certain observation points.
  Additionally,
  the value of 
 \begin_inset Formula $u$
@@ -469,14 +470,19 @@ nolink "false"
 
 \begin_layout Standard
 Therefore,
- the resistance function has a major influence on the solution of the PDE.
- It is not possible to simply choose any random function to be able to calculate the true solution,
- but the function has to be determined correctly.
- This emphasizes the huge challenge of the MCMC algorithm.
+ the solution of the PDE is strongly sensitive to the chosen resistance function,
+ which emphasizes the huge challenge of the MCMC algorithm.
 \end_layout
 
 \begin_layout Subsection
 Finding the Best Hyperparameter
+\begin_inset CommandInset label
+LatexCommand label
+name "subsec:FindingBestHyperparameter"
+
+\end_inset
+
+
 \end_layout
 
 \begin_layout Standard
@@ -541,7 +547,7 @@ The loss function
 \end_inset
 
  over the different optimization steps.
- The best values for both parameter are found after 22 iterations of the optimization.
+ The best values for both parameter are found after 22 iterations.
 \end_layout
 
 \end_inset
@@ -575,7 +581,7 @@ status open
 \begin_inset Caption Standard
 
 \begin_layout Plain Layout
-The objective of the optimization showing which values for 
+The objective of the optimization showing the tested values for 
 \begin_inset Formula $\beta$
 \end_inset
 
@@ -583,7 +589,7 @@ The objective of the optimization showing which values for
 \begin_inset Formula $\sigma^{}$
 \end_inset
 
- were tested.
+.
 \end_layout
 
 \end_inset
@@ -623,16 +629,6 @@ The gaussian process starts with a default value of
 \end_inset
 
 .
-\begin_inset Note Note
-status open
-
-\begin_layout Plain Layout
-Add the optimal values
-\end_layout
-
-\end_inset
-
-
 \begin_inset CommandInset label
 LatexCommand label
 name "fig:HyperparameterOptimization"
@@ -695,12 +691,8 @@ nolink "false"
 \begin_inset Formula $\beta$
 \end_inset
 
- shows better results for a value smaller than the initial guess.
-\end_layout
-
-\begin_layout Standard
-For the remaining results,
- the optimal parameter will be used.
+ shows better results for a reduced value.
+ The optimal parameter will be used for the remaining results.
 \end_layout
 
 \begin_layout Subsection
@@ -715,23 +707,11 @@ name "subsec:MCMC_SingleSample"
 \end_layout
 
 \begin_layout Standard
-Consider only the initial distribution 
+Initial distribution 
 \begin_inset Formula $u_{0}^{I}(x)$
 \end_inset
 
- and try to reconstruct the resistance function from this data,
- visualized in Figure 
-\begin_inset CommandInset ref
-LatexCommand ref
-reference "fig:MCMC_SingleSample"
-plural "false"
-caps "false"
-noprefix "false"
-nolink "false"
-
-\end_inset
-
-.
+ is considered with the objective to identify the resistance function from this data..
 \end_layout
 
 \begin_layout Standard
@@ -936,7 +916,18 @@ name "fig:MCMC_SingleSample"
 
 \end_inset
 
-It shows the evolution of the loss function (Figure 
+Figure 
+\begin_inset CommandInset ref
+LatexCommand ref
+reference "fig:MCMC_SingleSample"
+plural "false"
+caps "false"
+noprefix "false"
+nolink "false"
+
+\end_inset
+
+ shows the evolution of the loss function (Figure 
 \begin_inset CommandInset ref
 LatexCommand ref
 reference "fig:MCMC_SingleSample_Loss"
@@ -976,8 +967,8 @@ nolink "false"
 
 \begin_layout Standard
 The loss function is displayed in a double logarithmic plot,
- showing consistent improvements for the prediction initially.
- The oscillations on the final plateau emphasize that there are still new predictions accepted,
+ showing consistent improvements for the prediction until it reaches a plateau.
+ New predictions are accepted on this plateau,
  but those new predictions do not improve the accuracy.
  
 \end_layout
@@ -1023,7 +1014,8 @@ nolink "false"
 
  (blue,
  red,
- green purple).
+ green,
+ purple).
  At positions 
 \begin_inset Formula $x\in[0.75,3.25]$
 \end_inset
@@ -1037,7 +1029,7 @@ nolink "false"
  the predicted solution overestimates the true solution,
  partially.
  Generally,
- the solution is very accurate at smooth points struggles with steep gradient and shocks.
+ the solution is very accurate at smooth regions and struggles with steep gradients.
 \end_layout
 
 \begin_layout Subsubsection
@@ -1056,7 +1048,9 @@ The amount of numerical diffusion,
  added to the discretization scheme,
  influences the solution of the PDE.
  The original PDE is no longer solved,
- but a slightly adapted one (compare with project 1 and 2).
+ but a slightly adapted one (compare with project 1 and 2),
+ making the numerical discretization inconsistent.
+ 
 \begin_inset Formula 
 \begin{align}
 u_{t}+\left(k(x)f(u)\right)_{x} & =0\\
@@ -1065,8 +1059,7 @@ u_{t}+\left(k(x)f(u)\right)_{x} & =0\\
 
 \end_inset
 
-This changes make the numerical discretization inconsistent.
- The higher the number of 
+The higher the number of 
 \begin_inset Formula $M$
 \end_inset
 
@@ -1075,8 +1068,8 @@ This changes make the numerical discretization inconsistent.
 \begin_inset Formula $u_{xx}^{\epsilon}$
 \end_inset
 
-.
- This numerical diffusion smoothes the solution,
+,
+ smoothing the solution,
  especially the shocks.
  The shocks are hard to predict with the MCMC algorithm,
  but nevertheless they are in the true dataset.
@@ -1200,12 +1193,8 @@ The parameter
  which results in differences for the resistance function.
  Increasing values of artificial diffusion result in smoothing of the shocks.
  However,
- this smoothing is only in the numerical scheme,
- but not in the true solution,
- which consists of shock.
- Therefore,
- the numerical discretization should be chosen in a way that it can produce the shocks,
- in the best possible way.
+ this smoothing is only present in the numerical scheme,
+ not in the true solution.
 \begin_inset CommandInset label
 LatexCommand label
 name "fig:ArtificialDiffusion"
@@ -1368,10 +1357,7 @@ The loss function
 \end_inset
 
  over the iterations plotted in a log-log plot.
- The algorithm shows a continuous improvement over the first 70-80 steps until it finds an optimum.
- The prediction is still updated at this optimum,
- but it does not improve the loss function significantly,
- anymore.
+ The algorithm shows a continuous improvement over the first 70-80 steps and then oscillates around an optimum.
  
 \begin_inset CommandInset label
 LatexCommand label
@@ -1418,14 +1404,12 @@ Prediction for resistance function
 \begin_inset Formula $x_{2}$
 \end_inset
 
- and the global maximum at 
+ but only one global maximum at 
 \begin_inset Formula $x_{6}$
 \end_inset
 
-.
- Nevertheless,
- the evolution of the resistance function differs over space,
- compared to the solution from section 
+ is present.
+ 
 \begin_inset CommandInset ref
 LatexCommand ref
 reference "subsec:MCMC_SingleSample"
@@ -1459,6 +1443,25 @@ name "fig:MCMC_DoubleSample_k"
 \end_inset
 
 
+\begin_inset Box Frameless
+position "t"
+hor_pos "c"
+has_inner_box 1
+inner_pos "t"
+use_parbox 0
+use_makebox 0
+width "40text%"
+special "none"
+height "1in"
+height_special "totalheight"
+thickness "0.4pt"
+separation "3pt"
+shadowsize "4pt"
+framecolor "foreground"
+backgroundcolor "none"
+status open
+
+\begin_layout Plain Layout
 \begin_inset Float figure
 placement document
 alignment document
@@ -1470,7 +1473,7 @@ status open
 \begin_inset Graphics
 	filename Figures/DoubleSample/predicted_vs_true_both_samples_sample_1.pdf
 	lyxscale 30
-	width 50text%
+	width 100text%
 
 \end_inset
 
@@ -1514,10 +1517,10 @@ nolink "false"
 
  can be identified.
  At positions 
-\begin_inset Formula $x\in1.5,3.25]$
+\begin_inset Formula $x\in[1.5,3.25]$
 \end_inset
 
- no difference between the predicted and the true solution can be seen.
+ almost no difference between the predicted and the true solution can be seen.
  
 \begin_inset CommandInset label
 LatexCommand label
@@ -1536,6 +1539,30 @@ name "fig:MCMC_DoubleSample_u1"
 \end_inset
 
 
+\end_layout
+
+\end_inset
+
+
+\begin_inset Box Frameless
+position "t"
+hor_pos "c"
+has_inner_box 1
+inner_pos "t"
+use_parbox 0
+use_makebox 0
+width "40text%"
+special "none"
+height "1in"
+height_special "totalheight"
+thickness "0.4pt"
+separation "3pt"
+shadowsize "4pt"
+framecolor "foreground"
+backgroundcolor "none"
+status open
+
+\begin_layout Plain Layout
 \begin_inset Float figure
 placement document
 alignment document
@@ -1547,7 +1574,7 @@ status open
 \begin_inset Graphics
 	filename Figures/DoubleSample/predicted_vs_true_both_samples_sample_2.pdf
 	lyxscale 30
-	width 50text%
+	width 100text%
 
 \end_inset
 
@@ -1584,6 +1611,11 @@ name "fig:MCMC_DoubleSample_u2"
 \end_inset
 
 
+\end_layout
+
+\end_inset
+
+
 \begin_inset Caption Standard
 
 \begin_layout Plain Layout
@@ -1597,11 +1629,22 @@ Solution of MCMC for
 
 .
  Considering two datasets,
- instead of only a single datasets,
- rapidly improves the accuracy of the MCMC algorithm.
- It is hard to interpret the absolute value of the loss function,
- compared to the solution with only one solution,
- as it adds the error of both distributions.
+ instead of only a single dataset,
+ improves the accuracy of the MCMC algorithm.
+ It is hard to compare the absolute value of the loss function,
+ compared to the value from section 
+\begin_inset CommandInset ref
+LatexCommand ref
+reference "subsec:MCMC_SingleSample"
+plural "false"
+caps "false"
+noprefix "false"
+nolink "false"
+
+\end_inset
+
+,
+ as it combines the error of both distributions.
  Nevertheless,
  the prediction of 
 \begin_inset Formula $u$
@@ -1702,15 +1745,15 @@ nolink "false"
 \end_inset
 
  shows two critical regions,
- the new found function predicts the true solution in this regions very accurate.
+ the newly identified function predicts the true solution in these regions very accurate.
  Furthermore,
- the observation points for the second datasets are reproduced very accurate,
+ the observation points for the second dataset are reproduced very accurate,
  too.
  The prediction only shows slight problems at sharp gradients,
  which probably occurs because of the remaining numerical diffusion.
  This numerical diffusion changes the numerical solution,
  compared to the true solution,
- even for the perfect resistance function (compare with section 
+ even for a perfectly identified resistance function (compare with section 
 \begin_inset CommandInset ref
 LatexCommand ref
 reference "subsec:ArtificialDiffusion"
diff --git a/Project3/LyX/TheoryAndMethods.lyx b/Project3/LyX/TheoryAndMethods.lyx
index 2cf9c101bbb285e74ac6a9a3ae32cf2c566e6c18..958fc5a143e750428f7ee8a1f2f15df8f6067acb 100644
--- a/Project3/LyX/TheoryAndMethods.lyx
+++ b/Project3/LyX/TheoryAndMethods.lyx
@@ -119,7 +119,8 @@ Consider the general mathematical description of a traffic flow (see project 2 f
 \begin{align}
 u_{t}+f(u)_{x} & =0,\quad x\in[a,b]\\
 u(x,t=0) & =u_{0}(x)\\
-u\big|_{x=x_{\text{in}}} & =u_{\text{in}}
+u_{x}\big|_{x=x_{\text{in}}} & =u_{\text{in}}\\
+u_{x}\big|_{x=x_{\text{out}}} & =u_{\text{out}}
 \end{align}
 
 \end_inset
@@ -158,7 +159,7 @@ resistance
 \begin{align}
 u_{t}+\left(k(x)f(u)\right)_{x} & =0,\quad x\in[a,b]\\
 u(x,t=0) & =u_{0}(x)\\
-u\big|_{x=a}=u\big|_{x=b} & =0
+u_{x}\big|_{x=a}=u_{x}\big|_{x=b} & =0
 \end{align}
 
 \end_inset
@@ -179,7 +180,7 @@ The boundaries are described by Neumann boundary conditions.
 .
  Therefore,
  an inverse problem will be formulated,
- which utilizes Monte-Carlo Markov-Chains to quantify the uncertainty.
+ which utilizes Monte-Carlo Markov-Chains for the uncertainty quantification.
 \end_layout
 
 \begin_layout Subsection
@@ -228,7 +229,7 @@ and the time step size
 \end_inset
 
  adds a certain amount of artificial diffusion.
- 
+ For more reference on the numerical discretization scheme see project 1 and project 2.
 \end_layout
 
 \begin_layout Subsection
@@ -245,7 +246,6 @@ Assume that the resistance function is represented as a piecewise linear functio
 \begin_inset Formula $\{x_{i}\}_{i=0}^{i=10}$
 \end_inset
 
-,
  including the boundary 
 \begin_inset Formula 
 \begin{align}
@@ -284,8 +284,7 @@ Assume that the density of cars is observed by some cameras.
 \end_inset
 
 .
- The density of cars is not observed at every time step,
- but just at some uniformly sampled,
+ The density of cars is observed at some uniformly sampled,
  discrete timeframes
 \begin_inset Formula 
 \begin{equation}
@@ -302,11 +301,12 @@ with the first timeframe observing the initial distribution
 \begin_inset Formula $t_{N}=10=T_{\text{end}}$
 \end_inset
 
- and 
+.
+ 
 \begin_inset Formula $N=40$
 \end_inset
 
- different observations.
+ different observations are made in total.
 \end_layout
 
 \begin_layout Subsection
@@ -325,17 +325,24 @@ The general idea of a Monte-Carlo Marcov-Chain (MCMC) is to identify an unknown
 \begin_inset Formula $k(x)$
 \end_inset
 
-.
+ (see 
+\begin_inset CommandInset citation
+LatexCommand cite
+key "SteinerEvje-Lecture"
+literal "false"
+
+\end_inset
+
+).
  The algorithm iteratively minimizes the loss function 
 \begin_inset Formula $\Phi$
 \end_inset
 
-,
- which described the mismatch between the prediction 
+ describing the mismatch between the prediction 
 \begin_inset Formula $\Theta$
 \end_inset
 
- and the true solution 
+ and true solution 
 \begin_inset Formula $d$
 \end_inset
 
@@ -353,12 +360,11 @@ The general idea of a Monte-Carlo Marcov-Chain (MCMC) is to identify an unknown
 \begin_layout Standard
 The main assumption is,
  that the unknown parameter vector can be described by a normal distribution.
- This assumption can be reformulated in a way,
- that the true solution 
+ This assumption can be reformulated in a way to describe the true solution 
 \begin_inset Formula $d$
 \end_inset
 
- can be described by the current solution 
+ by the current solution 
 \begin_inset Formula $h(\Theta)$
 \end_inset
 
@@ -385,6 +391,10 @@ with the probability for the random variable
 
  given by 
 \begin_inset Formula $P(\eta;0,\gamma^{2}\boldsymbol{I})$
+\end_inset
+
+ and the identity matrix 
+\begin_inset Formula $\boldsymbol{I}$
 \end_inset
 
 .
@@ -423,7 +433,7 @@ p\left(\eta=d-h(\phi);0,\gamma^{2}\boldsymbol{I}\right) & =\frac{1}{\sqrt{2\pi\g
 
 \end_inset
 
-The question that arises is if either 
+The arising question is if either 
 \begin_inset Formula $\Theta$
 \end_inset
 
@@ -431,18 +441,21 @@ The question that arises is if either
 \begin_inset Formula $\phi$
 \end_inset
 
- described the unknown function 
+ describes the unknown function 
 \begin_inset Formula $k(x)$
 \end_inset
 
- better,
- which is identified by the smaller value of 
+ better.
+ The smaller the value of 
 \begin_inset Formula $\Phi(\cdot)\sim\parallel d-h(\cdot)\parallel$
 \end_inset
 
-.
- Rewriting this ends in a rule to choose,
- what has a higher probability
+ the better does 
+\begin_inset Formula $h(\cdot)$
+\end_inset
+
+ describes the unknown function.
+ Rewriting this ends in a rule to choose the property with the higher probability.
 \begin_inset Formula 
 \begin{equation}
 \max\{P_{\Theta},P_{\phi}\}
@@ -478,12 +491,12 @@ If
 
 .
  To avoid ending in local minima,
- an element of randomness is introduced,
- for this define the decision function 
+ an element of randomness is introduced.
+ The decision function 
 \begin_inset Formula $a(\Theta,\phi)$
 \end_inset
 
-
+ is defined for this.
 \begin_inset Formula 
 \begin{equation}
 a(\Theta,\phi)=\min\{1,\frac{P_{\phi}}{P_{\Theta}}\}\qquad\text{with: }\frac{P_{\phi}}{P_{\Theta}}=\exp\left[\Phi(\Theta)-\Phi(\phi)\right]
@@ -491,15 +504,11 @@ a(\Theta,\phi)=\min\{1,\frac{P_{\phi}}{P_{\Theta}}\}\qquad\text{with: }\frac{P_{
 
 \end_inset
 
-
-\end_layout
-
-\begin_layout Standard
-and update the current prediction 
+The current prediction 
 \begin_inset Formula $\Theta$
 \end_inset
 
- only,
+ will be updated,
  if a randomly,
  uniformly sampled number 
 \begin_inset Formula $i$
@@ -510,15 +519,15 @@ and update the current prediction
 \end_inset
 
  is smaller than a,
- otherwise continue with the current guess.
+ otherwise the algorithm continues with the current guess.
 \end_layout
 
 \begin_layout Subsubsection
-The Algorithm in Pseudo-Code
+The Metropolis-Hastings Algorithm
 \end_layout
 
 \begin_layout Standard
-Start with some notation and parameter definition
+Define
 \begin_inset Formula 
 \begin{align}
 a(\Theta,\phi) & =\min\{1,\exp(\Phi(\Theta)-\Phi(\phi)\}\\
@@ -535,7 +544,7 @@ where the newly introduced parameter
 \begin_inset Formula $\mu$
 \end_inset
 
- described the mean of the assumed normal distribution and 
+ describes the mean of the assumed normal distribution and 
 \begin_inset Formula $\tau$
 \end_inset
 
@@ -551,12 +560,27 @@ where the newly introduced parameter
 
  are not fixed,
  yet.
- Their optimal values will be calculated in a hyperparameter optimization.
- The index 
+ Their optimal values will be calculated in a hyperparameter optimization in section 
+\begin_inset CommandInset ref
+LatexCommand ref
+reference "subsec:FindingBestHyperparameter"
+plural "false"
+caps "false"
+noprefix "false"
+nolink "false"
+
+\end_inset
+
+.
+ Index 
 \begin_inset Formula $k$
 \end_inset
 
- described the current iteration index
+ describes the current iteration index.
+\end_layout
+
+\begin_layout Standard
+The algorithm is as follows:
 \end_layout
 
 \begin_layout Enumerate
@@ -570,7 +594,7 @@ Start with initial guess
 \begin_deeper
 \begin_layout Enumerate
 Avoid to much noise by clipping the boundaries of 
-\begin_inset Formula $\Theta^{(0)}\in[0.9,1.3]$
+\begin_inset Formula $\Theta^{(0)}\in[0.8,1.2]$
 \end_inset
 
 
@@ -629,11 +653,12 @@ Else update
 
 \end_deeper
 \begin_layout Standard
-The steps 2-4 are repeated for 
+For the burn-in period,
+ steps 2-4 are repeated for 
 \begin_inset Formula $k\in[0,\tau]$
 \end_inset
 
- for the burn-in period to precondition the prediction vector 
+ to precondition the prediction vector 
 \begin_inset Formula $\Theta$
 \end_inset
 
@@ -649,7 +674,7 @@ The steps 2-4 are repeated for
 \end_layout
 
 \begin_layout Subsubsection
-MCMC Revisited
+Discussing the Parameter of the MCMC Algorithm
 \end_layout
 
 \begin_layout Standard
@@ -664,10 +689,6 @@ The parameter
 
  and the perturbation 
 \begin_inset Formula $\phi^{(k)}$
-\end_inset
-
- of iteration 
-\begin_inset Formula $k$
 \end_inset
 
 .
@@ -680,21 +701,26 @@ The parameter
 \begin_inset Formula $\xi^{(k)}$
 \end_inset
 
-.
- This may lead to a faster improvement of the loss function but comes with the danger to get stuck in a local minimum,
- instead of finding the global minimum.
- Therefore,
- it is of interest to find the best value of 
+,
+ which may lead to a faster improvement of the loss function but comes with the danger to get stuck in a local minimum.
+ A good rule of thumb is to start with large steps and a large 
 \begin_inset Formula $\beta$
 \end_inset
 
-.
- A good rule of thumb is to start with a large 
+ in the beginning to then iteratively reduce this parameter for a better convergence.
+ Nevertheless,
+ this work utilizes a constant parameter 
 \begin_inset Formula $\beta$
 \end_inset
 
- to make large steps in the beginning and the iteratively reduce this parameter to improve the convergence.
- In this work,
+,
+ for the sake of simplicity.
+ 
+\begin_inset Note Note
+status collapsed
+
+\begin_layout Plain Layout
+In this work,
  the parameter 
 \begin_inset Formula $\beta$
 \end_inset
@@ -707,7 +733,13 @@ The parameter
 .
 \end_layout
 
-\begin_layout Enumerate
+\end_inset
+
+
+\begin_inset Note Note
+status collapsed
+
+\begin_layout Plain Layout
 The parameter 
 \begin_inset Formula $\beta$
 \end_inset
@@ -719,7 +751,6 @@ The parameter
 
 \end_layout
 
-\begin_deeper
 \begin_layout Enumerate
 If the acceptance rate is below 
 \begin_inset Formula $\omega_{\min}$
@@ -732,6 +763,7 @@ If the acceptance rate is below
 
 \end_layout
 
+\begin_deeper
 \begin_layout Enumerate
 If the acceptance rate is above 
 \begin_inset Formula $\omega_{\max}$
@@ -742,8 +774,14 @@ If the acceptance rate is above
 \end_inset
 
 
+\end_layout
+
+\end_deeper
+\end_inset
+
+
 \begin_inset Note Note
-status open
+status collapsed
 
 \begin_layout Plain Layout
 The different observations are weighted differently to ensure that the resistance function 
@@ -785,19 +823,18 @@ Calculate
 
 \end_layout
 
-\end_deeper
 \begin_layout Standard
 The randomly generated parameter 
 \begin_inset Formula $\Theta^{(0)}$
 \end_inset
 
- has a large impact on the convergence behavior of the solution,
+ influences the initial convergence behavior,
  as it determines the starting point of the optimization problem.
  If 
 \begin_inset Formula $\Theta^{(0)}$
 \end_inset
 
- is chosen very close to the true solution the algorithm will converge very fast and will yield a good solution.
+ is chosen very close to the true solution the algorithm will converge fast.
  If,
  on the other hand,
  
@@ -805,12 +842,12 @@ The randomly generated parameter
 \end_inset
 
  is chosen poorly,
- it will take more iterations for the algorithm to come to a very good solution.
+ it will take more iterations for the algorithm to come to a good solution.
  It may even happen that the algorithm will get stuck in some local minimum,
  instead of finding the global minimum.
  To reduce randomness in the algorithm,
- it is a good choice to either reduce the variation 
-\begin_inset Formula $\sigma^{2,(0)}$
+ it is a good choice to either reduce the standard deviation 
+\begin_inset Formula $\sigma^{(0)}$
 \end_inset
 
  for the initial guess or to clip the range of 
@@ -827,182 +864,5 @@ The randomly generated parameter
  but it will balance the convergence behavior for different initial guesses.
 \end_layout
 
-\begin_layout Standard
-\begin_inset Note Note
-status open
-
-\begin_layout Plain Layout
-\begin_inset listings
-inline false
-status open
-
-\begin_layout Plain Layout
-
-// initial guess
-\end_layout
-
-\begin_layout Plain Layout
-
-Theta_k = NormalDistribution(mu,
- sigma)
-\end_layout
-
-\begin_layout Plain Layout
-
-// clip the initial guess close to the mean
-\end_layout
-
-\begin_layout Plain Layout
-
-Theta_k = Theta_k.clip(0.9,1.3)
-\end_layout
-
-\begin_layout Plain Layout
-
-\end_layout
-
-\begin_layout Plain Layout
-
-// burn-in period
-\end_layout
-
-\begin_layout Plain Layout
-
-...
-\end_layout
-
-\begin_layout Plain Layout
-
-\end_layout
-
-\begin_layout Plain Layout
-
-//MCMC
-\end_layout
-
-\begin_layout Plain Layout
-
-for k in k_steps:
-\end_layout
-
-\begin_layout Plain Layout
-
-	// Each MCMC step consists of 20 updates
-\end_layout
-
-\begin_layout Plain Layout
-
-	for _ in 20:
-\end_layout
-
-\begin_layout Plain Layout
-
-		// Perturbation
-\end_layout
-
-\begin_layout Plain Layout
-
-		xi_k = NormalDistribution(mu,
- sigma)
-\end_layout
-
-\begin_layout Plain Layout
-
-		Phi_k = sqrt(1 - beta**2) * Theta_k + beta * xi_k
-\end_layout
-
-\begin_layout Plain Layout
-
-		
-\end_layout
-
-\begin_layout Plain Layout
-
-		// Loss functions
-\end_layout
-
-\begin_layout Plain Layout
-
-		h_Theta_k = 1/2 * ((Theta_k-d) @ (Theta_k-d).T)/gamma**2
-\end_layout
-
-\begin_layout Plain Layout
-
-		h_Phi_k = 1/2 * ((PHi_k-d) @ (Phi_k-d).T)/gamma**2
-\end_layout
-
-\begin_layout Plain Layout
-
-\end_layout
-
-\begin_layout Plain Layout
-
-		// Decision function
-\end_layout
-
-\begin_layout Plain Layout
-
-		a = min(1,
- exp(h_Theta_k - h_Phi_k))
-\end_layout
-
-\begin_layout Plain Layout
-
-\end_layout
-
-\begin_layout Plain Layout
-
-		// Make decision
-\end_layout
-
-\begin_layout Plain Layout
-
-		i_k = UniformDistribution(0,1)
-\end_layout
-
-\begin_layout Plain Layout
-
-		if i_k <= a:
-\end_layout
-
-\begin_layout Plain Layout
-
-			Theta_k = Phi_k
-\end_layout
-
-\begin_layout Plain Layout
-
-			h_Theta_k = h_Phi_k
-\end_layout
-
-\begin_layout Plain Layout
-
-		else:
-\end_layout
-
-\begin_layout Plain Layout
-
-			Theta_k = Theta_k
-\end_layout
-
-\begin_layout Plain Layout
-
-			h_Theta_k = h_Theta_k
-\end_layout
-
-\begin_layout Plain Layout
-
-\end_layout
-
-\end_inset
-
-
-\end_layout
-
-\end_inset
-
-
-\end_layout
-
 \end_body
 \end_document