diff --git a/Main.tex b/Main.tex
index 80b7cfca832467a3ff5b0527d939ea2a90d157d9..129ed193c4031714e5b771657e4788e29baa4274 100644
--- a/Main.tex
+++ b/Main.tex
@@ -170,6 +170,16 @@
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Titel   %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 \begin{document}
+\tikzset{
+    mynode/.style={ellipse, draw=black, align=center, fill=yellow!40,very thick, text centered, minimum width={40pt},minimum height={30pt}},
+      mynode2/.style={rectangle, align=center,draw=black, fill=green!40,very thick, text centered, minimum width={75pt},minimum height={15pt}},
+      mynode3/.style={rectangle,align=center, draw=black, fill=green!40,very thick, text centered, minimum width={180pt},minimum height={12pt}},
+       mynodeB/.style={ellipse, draw=red, align=center, fill=yellow!40,very thick, text centered, minimum width={40pt},minimum height={30pt}},
+   edge/.style = {->,> = latex',thick},
+    mynode3B/.style={rectangle,align=center, draw=red, fill=green!40,very thick, text centered, minimum width={200pt},minimum height={15pt}},
+%    myarrow/.style={->, >=latex', shorten >=1pt, thick},
+%    mylabel/.style={text width=7em, text centered}
+}
 	%
 	\setbeamercovered{invisible}
 	%Standardkompiler  txs:///pdflatex | txs:///bibtex | txs:///pdflatex | txs:///pdflatex
@@ -189,20 +199,215 @@
 		%\date{\AdvanceDate[1]\today}
 		\titlepage
 	}
-	
-	
-		\frame{
+
+
+						\frame{
 		\frametitle{Supervised Learning}
-Learning: Pic from Proposal translated in englisch
+		\begin{columns}[T]
+		\begin{column}{0.68\textwidth}
+		\phantom{a}\\\medskip
+	\tikzstyle TestEdge=[-Triangle,line width=3pt,shorten >=3pt,green!40!black,dotted]
+\tikzstyle TrainEdge=[-Triangle,thick,shorten >=3pt,line width=3pt]
+\tikzset{
+  Legend/.style={draw=white, top color=white, bottom color=white, inner sep=0.5em, minimum height=1cm,text width=10mm, align = left}}
+\scalebox{0.7}{
+\begin{tikzpicture}[->, node distance=0.8cm, auto]
+\small
+\node[Legend] (Inst) {};%Trainingsdatenpunkte $i=1 \dots n$
+\node[mynode,below left=-1cm and 0.5cm of Inst] (Feat) {Input $X_1, \dots, X_n$};%Feature-Vektoren
+\node[mynode,below right=-1cm and 0.5cm of Inst] (Out) {Output $Y_1, \dots, Y_n$};
+\node[mynode,below=1.5cm of Inst,align=center] (ML) {ML-Model };
+\node[mynode,below=4cm of Inst,align=center] (Pred) {Trained ML-Model};
+
+\node[mynode,below left =2 cm and 0.5cm of Pred] (TestFeat) {Input $\hat{X}_1,, \dots, \hat{X}_m$};
+\node[mynode,below right=2 cm and 0.5cm of Pred] (OutTest) {Output $Y^*_1,\dots, Y^*_m$};
+
+\only<2>{
+\node[mynodeB,below left=-1cm and 0.5cm of Inst] (Feat) {\textbf{Input $X_1, \dots, X_n$}};%Feature-Vektoren
+\node[mynodeB,below right=-1cm and 0.5cm of Inst] (Out) {\textbf{Output $Y_1, \dots, Y_n$}};
+}
+\only<3>{
+\node[mynodeB,below=1.5cm of Inst,align=center] (ML) {\textbf{ML-Model} };
+}
+\only<4>{
+\node[mynodeB,below left =2 cm and 0.5cm of Pred] (TestFeat) {Input $\hat{X}_1,, \dots, \hat{X}_m$};
+}
+\only<5>{
+\node[mynodeB,below right=2 cm and 0.5cm of Pred] (OutTest) {Output $Y^*_1,\dots, Y^*_m$};
+}
+\path
+%(TestInst) edge[TestEdge]  (TestFeat)
+(TestFeat) edge[TestEdge] (Pred)
+(Pred) edge[TestEdge] (OutTest)
+;
+
+\path
+%(Inst) edge[TrainEdge] node {} (Out)
+%(Inst) edge[TrainEdge] node {} (Feat)
+(Feat) edge[TrainEdge] node {}(ML)
+(Out) edge[TrainEdge]  node {}(ML)
+(ML) edge[TrainEdge]  node {}(Pred)
+;
+\node[Legend, below left = 1.5cm and 1.5cm of Inst] (E1) {Training};
+\node[Legend, below left= 2.5cm and 1.5cm of Inst] (E2) {Test};
+\node[Legend, left = 1cm of E1] (S1) {};
+\node[Legend, left = 1cm of E2] (S2) {};
+
+\path
+(S1) edge[TrainEdge] (E1)
+(S2) edge[TestEdge] (E2);
+\end{tikzpicture}
+}
+	\end{column}
+				\begin{column}{0.3\textwidth}
+				\begin{overprint}
+				\only<2>{
+		\includegraphics[width=\textwidth]{figure/catsdog2}}
+		
+		\only<3>{
+		\includegraphics[width=\textwidth]{figure/GNN}
+		}
+		\only<4->{
+		\includegraphics[width=\textwidth]{figure/catsdog3}
+		\visible<5->{
+		\begin{center}
+		\Large \textbf{It's a dog!}
+		\end{center}
+		}
+		}
+		\end{overprint}
+		\end{column}
+	\end{columns}\pause\bigskip
+\large	
+\begin{center}
+\visible<6->{
+\green{Works well for many classification and regression tasks!}
+}
+\end{center}
+	}
+	
+\begin{frame}
+\frametitle{Operations Research}\footnotesize
+
+\begin{columns}[T]
+\begin{column}{.4\textwidth}
+\adjustbox{max height=0.9 \textheight}
+{
+ \begin{tikzpicture}
+
+\scriptsize
+\node[mynode] at (2,10.5) (1) {Problem};
+\node[mynode2] at (2,9) (2) {OR-expert};
+
+\node[mynode] at (2,7.5) (3a) {Mathematical \\ model}; 
+
+\node[mynode] at (2,6) (5a) {Algorithm}; 
+
+\visible<4->{
+\node[mynodeB] at (0.5,4) (5b) {\textbf{\normalsize{Data set}}};
+\node[mynodeB] at (3.5,4) (6) {\textbf{\normalsize{Solution}}};
+}
+\draw[edge] (1) ->  (2);
+\draw[edge] (2) ->  (3a);
+\draw[edge] (3a) ->  (5a);
+\visible<4->{
+\draw[edge] (5a) ->  (6);
+\draw[edge] (5b) ->  (5a);
+}
+
+
+\onslide<1| trans:1>{
+\node[mynodeB] at (2,10.5) (a) {\normalsize{ \textbf{Problem}}};
+}
+\visible<2| trans:2>{
+\node[mynodeB] at (2,7.5) (b) { \textbf{\normalsize{Mathematical}} \\ \textbf{\normalsize{model}}};
+}
+\onslide<3| trans:3>{
+\node[mynodeB] at (2,6) (c) {\normalsize{\textbf{Algorithm}} };
+}
+%\onslide<4| trans:4>{
+% \node[mynodeB] at (3.4,5.5) (d) {\normalsize{\textbf{Data set}} \\};
+%}
+%\onslide<5| trans:5>{
+%\node[mynodeB] at (1.5,4.25) (e) {\normalsize{\textbf{Solution}}};
+%}
+
+
+\end{tikzpicture}
+}
+\end{column}
+\begin{column}{.6\textwidth}
+\begin{overprint}
+\onslide<1| trans:1>
+    \begin{bspBlock}{(Shortest path problem)}
+    Problem:\hfill\mbox{}\\[2ex]
+    \includegraphics[width=.99\textwidth]{figure/ZIMPL1}
+    \end{bspBlock}
+%
+\onslide<2| trans:2>
+    \begin{bspBlock}{(Shortest path problem)}
+    General mathematical model:\hfill\mbox{}\\[2ex]
+    \includegraphics[width=.99\textwidth]{figure/ZIMPL2}
+    \end{bspBlock}
+%
+\onslide<3| trans:3>
+    \begin{bspBlock}{(Dijkstra's Algorithm)}
+    \tiny
+    \begin{algorithm}[H]
+  \green{\tcp{Initialization}}
+  SET $d(s):= 0$, $pred(s):=undef$, $L:=\{s\}$\;
+  SET $d(j):=\infty$ for all $j\in V\setminus \{s\}$\;
+  \green{\tcp{Loop}}
+  \While{$L \ne \emptyset$}
+  {
+    SELECT $i\in L$ with $d(i)=\min_{k\in L} d(k)$\;
+    SET $L:=L\setminus\{i\}$~~\green{\tcp{$i$ is now ultimately marked}}
+    \For{$j: (i,j)\in\delta^+(i)$}
+    {
+      \If{$d(j)>d(i)+c_{ij}$}
+      {
+        \green{\tcp{Label}}
+        SET $d(j) := d(i)+c_{ij}$ and $pred(j):=i$\;
+        SET $L:=L \cup \{j\}$\;
+      }
+    }
+  }
+  \green{\tcp{Output: predecessors $pred(\cdot)$ and distances $d(\cdot)$}}
+\end{algorithm}
+    \end{bspBlock}
+    %
+%\onslide<4| trans:4>
+%    \begin{bspBlock}{(Shortest path problem)}
+%    \includegraphics[width=.99\textwidth]{figure/ZIMPL0}\\[3ex]
+%    Data set (in .zpl file):\hfill\mbox{}\\[2ex]
+%    \includegraphics[width=.99\textwidth]{figure/ZIMPL3}
+%    \end{bspBlock}
+%
+%%
+%
+%%
+%
+%%
+\onslide<4| trans:4 >
+    \begin{bspBlock}{(Solution)}
+    \includegraphics[width=\textwidth]{figure/ZIMPL9b}
+    \end{bspBlock}
+
+\end{overprint}
+\end{column}
+\end{columns}
+\end{frame}
 
 
-works well for classification and regression tasks
+\frame{
+\frametitle{Machine Learning and Operations Research}
+\Large
+\textbf{Key Question:}\\
+\begin{center}
+\red{How can ML be used for solving optimisation problems?}\\[7ex]\pause
 
-Die Kernfrage
-ist, auf welche Weise ML in OR-Algorithmen eingesetzt werden
-kann, denn kombinatorische Optimierungsprobleme unterscheiden
-sich stark von den meisten Problemen, die aktuell durch ML gelst
-werden\citep{BengioEtAl2021}
+Combinatorial optimisation problems are quite different from most problems currently solved by ML \citep{BengioEtAl2021}
+\end{center}
 	}
 	
 \section{Vehicle Routing Problems}
@@ -383,13 +588,16 @@ Good/optimal solutions are often located in the border region.
 				\frame{
 		\frametitle{Combine ML with OR Algorithms!}
 		\small
-\blue{Apply learning inside OR Algorithms} \citep{BengioEtAl2021}:\bigskip
+		Sometimes expert knowledge is not satisfactory and algorithmic decisions are taken greedily or according to ``best practice''.\\\bigskip\pause
+		{\large
+\blue{$\Rightarrow:$ Apply learning inside OR Algorithms} \citep{BengioEtAl2021}:\medskip}\pause
+
 \begin{itemize}
 %\item End to end learning (only for little constraint problems like TSP)
-\item Learning to configure algorithms (expert knowledge is not satisfactory and the researcher wishes to find better ways of making decisions)
-\item Machine learning alongside optimization algorithms (e.g., call ML Model for each recurring decision in an optimization algorithm)
+\item Learning to configure algorithms (one-time decision)
+\item Machine learning alongside optimization algorithms (recurring decisions)
 \end{itemize}
-\bigskip
+\bigskip\pause
 Most ML Applications in OR focus on heuristics!
 
 
@@ -778,70 +986,32 @@ How can we find such a neighborhood?\\\smallskip\pause \blue{Dynamic Neighborhoo
 
 
 %------------------------------------------------
-\newcommand{\RF}{RF}
-\newcommand{\modA}{DCH}
-\newcommand{\modB}{Hom}
-\newcommand{\modC}{Het}
+\newcommand{\RF}{\texttt{RF}}
+\newcommand{\modA}{\texttt{DCH}}
+\newcommand{\modB}{\texttt{Hom}}
+\newcommand{\modC}{\texttt{Het}}
 \begin{frame}{Learning Models}
 \small
     Models used to perform the classification task:\\\medskip
     \begin{itemize}
-        \item[\RF] Random Forest with engineered variables 
         \item[\modA] Deep classification head of GNN encoded variables
-        \item[\modB] Homegeneous GNN to encode NG-Set
-        \item[\modC] Heterogeneous GNN to encode NG-Set
+        \item[\modB] Homogeneous GNN 
+        \item[\modC] Heterogeneous GNN \pause
+        \item[\RF] Random Forest with engineered variables 
     \end{itemize}
+  
    % Additional attention layers have been omitted due to computing capacity and simplified models' inability to learn connections.
 \end{frame}
 
 %------------------------------------------------
 
-\begin{frame}{Random Forest Classifier Model}
-\small
-
-
-    \textbf{Random Forest Classifier:}
-    \begin{itemize}
-        \item Uses log loss (cross-entropy) as the splitting criterion.
-        \item Limits the depth of each tree to 100 levels to prevent overfitting.
-        \item Each split considers at most 4 features, controlling tree diversity.
-    \end{itemize}
-\end{frame}
 
 %------------------------------------------------
 
-\begin{frame}{Encoding-Decoding GNN for Edge Prediction}
-    \textbf{Graph Encoder:}
-        \begin{itemize}
-            \item \textbf{Graph Type:} Homogeneous (single node and edge type)
-            \item \textbf{Convolution Layers:}
-            \begin{itemize}
-                \item \texttt{GCNConv1}: Projects input features to hidden space with ReLU activation.
-                \item \texttt{GCNConv4}: Produces output node embeddings.
-            \end{itemize}
-            \item \textbf{Output:} Node embeddings used for edge prediction or passed to a classification model.
-        \end{itemize}
-
-    \textbf{Decoding Process:}
-    \begin{itemize}
-        \item Predicts edge scores using dot product of node embeddings.
-        \item Computes full graph adjacency matrix for all possible node pairs.
-    \end{itemize}
+\begin{frame}{Deep classification head of GNN encoded variables (\modA)}
+\textbf{Idea:} Encode nodes and classify with resulting node embeddings if ``$i$ is in neighborhood of $j$ or not''\\\pause\bigskip
 
-    \textbf{Goal:} Predict edges or provide embeddings for downstream tasks.
-\end{frame}
 
-\begin{frame}{Classification Head for Node/Graph-Level Tasks}
-    \textbf{Classification Model:}
-    \begin{itemize}
-        \item Fully connected feed-forward network for downstream tasks.
-        \item \textbf{Structure:}
-        \begin{itemize}
-            \item 3 hidden layers with ReLU activations.
-            \item \texttt{Input Size:} Embedding size from the encoder (e.g., 78).
-            \item \texttt{Output:} Binary classification via sigmoid activation.
-        \end{itemize}
-    \end{itemize}
 
     \textbf{Workflow:}
     \begin{enumerate}
@@ -852,90 +1022,56 @@ How can we find such a neighborhood?\\\smallskip\pause \blue{Dynamic Neighborhoo
             \item Combining graph-based features with external data.
         \end{itemize}
     \end{enumerate}
+    \end{frame}
+    
+ 
+\begin{frame}{Homogeneous GNN (\modB)}
+\textbf{Idea:} Learn the arcs in the $ng$-graph directly\\\bigskip\pause
 
-    \textbf{Goal:} Perform classification tasks using graph-based features.
-\end{frame}
-
-%------------------------------------------------
-
-\begin{frame}{Homogeneous GNN Model for Edge Prediction}
-    \textbf{Architecture:}
+    \textbf{Architecture:}\\\smallskip
     \begin{itemize}
         \item \textbf{Graph Type:} Homogeneous (single node and edge type)
-        \item \textbf{Convolution Layers:}
-        \begin{itemize}
-            \item 4 \texttt{GCNConv} layers for feature propagation and aggregation.
-        \end{itemize}
-        \item \textbf{Linear Layers:}
-        \begin{itemize}
-            \item Two optional linear transformations (\texttt{Linear1}, \texttt{Linear2}) for additional feature refinement.
+        \item 4 \blue{Convolution Layers} for feature propagation and aggregation (\texttt{GCNConv})
+		\item Two linear transformations for additional feature refinement
         \end{itemize}
-    \end{itemize}
-\end{frame}
+    \end{frame}
+%------------------------------------------------
 
-\begin{frame}{Homogeneous GNN Model for Edge Prediction II}
-    \textbf{Encoding Process:}
-    \begin{enumerate}
-        \item Apply \texttt{GCNConv} layers sequentially:
-        \begin{itemize}
-            \item First 3 layers: Extract hierarchical features with ReLU activations.
-            \item Final layer: Produces the output embeddings.
-        \end{itemize}
-        \item Optional: Dropout for regularization and edge dropout to sparsify training graphs.
-    \end{enumerate}
-    
-    \textbf{Decoding Process:}
-    \begin{itemize}
-        \item Computes dot products between node embeddings to predict edge scores.
-    \end{itemize}
 
-    \textbf{Full Graph Prediction:}
-    \begin{itemize}
-        \item Computes pairwise similarities between all node embeddings for adjacency matrix reconstruction.
-    \end{itemize}
 
-    \textbf{Goal:} Predict edges in a homogeneous graph.
-\end{frame}
 
 %------------------------------------------------
 
-\begin{frame}{Heterogeneous GNN Model for Edge Prediction}
-    \textbf{Architecture:}
+\begin{frame}[noframenumbering]{Heterogeneous GNN (\modC)}
+
+\textbf{Idea:} Learn the arcs in the $ng$-gaph directly using different types of nodes and arcs\\\bigskip \pause
+    \textbf{Architecture:}\smallskip
     \begin{itemize}
-        \item \textbf{Node Types:} \texttt{stops}, \texttt{depot}
-        \item \textbf{Edge Types:}
+        \item \textbf{Node Types:} \texttt{customer}, \texttt{depot}
+        \item \textbf{Arc Types:}
         \begin{itemize}
-            \item \texttt{route} (\texttt{stops} $\to$ \texttt{stops})
-            \item \texttt{remember} (\texttt{stops} $\to$ \texttt{stops})
-            \item \texttt{departs} (\texttt{depot} $\to$ \texttt{stops})
-            \item \texttt{return} (\texttt{stops} $\to$ \texttt{depot})
+            \item \texttt{route} (\textit{customer} $\to$ \textit{customer})
+            \item \texttt{remember} (\textit{customer} $\to$ \textit{customer})
+            \item \texttt{departs} (\textit{depot} $\to$ \textit{customer})
+            \item \texttt{return} (\textit{customer} $\to$ \textit{depot})
         \end{itemize}
-        \item \textbf{Convolution Layers:} Uses \texttt{GCNConv} and \texttt{SAGEConv} for message passing.
+        \item \textbf{Convolution Layers:} (\texttt{GCNConv} and \texttt{SAGEConv})
     \end{itemize}
 \end{frame}
 
-\begin{frame}{Heterogeneous GNN Model for Edge Prediction II}
-    \textbf{Encoding Process:}
-    \begin{enumerate}
-        \item Linear projections for \texttt{stops} and \texttt{depot} inputs.
-        \item \textbf{Two-stage Convolution:}
-        \begin{itemize}
-            \item \textbf{Stage 1:} Combines \texttt{route}, \texttt{remember}, and \texttt{depot} connections using GCN/SAGE layers.
-            \item \textbf{Stage 2:} Refines embeddings using updated node features.
-        \end{itemize}
-        \item Produces final embeddings for \texttt{stops} and \texttt{depot}.
-    \end{enumerate}
 
-    \textbf{Decoding Process:}
+
+
+\begin{frame}{Random Forest (\RF)}
+\small
+
     \begin{itemize}
-        \item Predicts sparse, directed \texttt{remember} edges using dot product of \texttt{stops} embeddings.
+        \item Uses log loss (cross-entropy) as the splitting criterion.
+        \item Limits the depth of each tree to 100 levels to prevent overfitting.
+        \item Each split considers at most 4 features, controlling tree diversity.
     \end{itemize}
-    
-    \textbf{Goal:} Predict sparse directed edges between \texttt{stops}.
 \end{frame}
 
-
-
 %\begin{frame}{Random forest summary}
 %\red{??? How to interpret?}
 %    \begin{columns}[c]
@@ -956,7 +1092,7 @@ How can we find such a neighborhood?\\\smallskip\pause \blue{Dynamic Neighborhoo
 %    \end{columns}
 %\end{frame}
 
-\begin{frame}{Preliminary Results -- Summary}
+\begin{frame}{Preliminary Results -- \modA}
     % \begin{table}
     %     \begin{tabular}{l l l}
     %         \toprule
@@ -969,44 +1105,37 @@ How can we find such a neighborhood?\\\smallskip\pause \blue{Dynamic Neighborhoo
     %     \end{tabular}
     %     \caption{Table caption}
     % \end{table}
-
-        \begin{columns}[c]
-        \column{.5\textwidth}
-        \begin{figure}[h]           
-            \centering
-            \includegraphics[width=0.9\textwidth]{figure/rf_clf1_roc.png}
-            \caption{\RF, Acc=0.923}
-        \end{figure}
-
-        \column{.5\textwidth}
-    \begin{figure}[h]
-            
-            \centering
-            \includegraphics[width=\textwidth]{figure/m1n.png}
-            \caption{\modA, Acc=0.895}
-        \end{figure}
-
-    \end{columns}
+\begin{figure}[h]
+            \includegraphics[width=0.9\textwidth]{figure/m1n.png}
+\end{figure}\Large
+Accuracy 0.895
     \end{frame}
-\begin{frame}{Preliminary Results -- Summary}
+\begin{frame}{Preliminary Results -- \modB~and \modC}
+\Large
       \begin{columns}[c]
         \column{.5\textwidth}
         \begin{figure}[h]
             
             \centering
             \includegraphics[width=\textwidth]{figure/m2n.png}
-            \caption{\modB, Acc=0.876}
+            \caption{\modB, Accuracy 0.876}
         \end{figure}
         \column{.5\textwidth}
              \begin{figure}[h]
             
             \centering
             \includegraphics[width=\textwidth]{figure/m3n.png}
-            \caption{\modC, Acc=0.785}
+            \caption{\modC, Accuracy 0.785}
         \end{figure}
     \end{columns}
 \end{frame}
-
+\begin{frame}{Preliminary Results -- \RF}
+        \begin{figure}[h]           
+            \centering
+            \includegraphics[height=0.8\textheight]{figure/rf_clf1_roc.png}
+            \caption{\RF, Accuracy 0.923}
+        \end{figure}
+        \end{frame}
 \begin{frame}{DNE-neighborhood vs learned neighborhood}
 
 
@@ -1035,14 +1164,16 @@ Picture neighborhood vs learned neighborhood - are the learned rather sparse or
 	\begin{frame}{Conclusion}
 
 	\begin{itemize}\setlength\itemsep{1em}
-	\item Result using the encoding head were rather disappointing
-     \item (node-) encoding a complete graph seems to have disadvantages and some heuristics must be performed (e.g.,kNN).
+	\item  Performance of GNNs rather disappointing (compared to \RF)
+     %\item (node-) encoding a complete graph seems to have disadvantages and some heuristics must be performed (e.g.,kNN).
    \item Do we need more/other engineered variables?
-    \item Heterogeneity in the nature of the graph hasn't shown increase in performance. 
+   \item What are good attention layers?
+    \item Using heterogeneity in the nature of the graph has shown decrease in performance. 
     \end{itemize}\pause
     \end{frame}
     \begin{frame}{Outlook}
     \begin{itemize}\setlength\itemsep{1em}
+    \item Evaluate importance of input features
 	\item Evaluate the learning success by using the learned neighborhood in a fully-fledged branch-price-and-cut algorithm.
 	\item Extend the research on other VRP-variants
 	\item Use Learning in other parts of the branch-price-and-cut algorithm
@@ -1057,7 +1188,9 @@ Picture neighborhood vs learned neighborhood - are the learned rather sparse or
 	Questions or remarks?!
 	\end{center}
 	}
-	
+	\scriptsize
+	\bibliographystyle{apalike}
+	\bibliography{References}	
 	%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 	%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% References  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 	%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1101,10 +1234,94 @@ Picture neighborhood vs learned neighborhood - are the learned rather sparse or
 \end{tikzpicture}
 \footnotesize For clarity only, $N_1=N_4$, $N_2=N_3$ and $N_5=N_6$ have been chosen here.
 \end{frame}
-	\scriptsize
-	\bibliographystyle{apalike}
-	\bibliography{References}
-	
-	
+
+   \begin{frame}[noframenumbering]{\modA}
+    \textbf{Graph Encoder:}
+        \begin{itemize}
+            \item \textbf{Graph Type:} Homogeneous (single node and edge type)
+            \item \textbf{Convolution Layers:}
+            \begin{itemize}
+                \item \texttt{GCNConv1}: Projects input features to hidden space with ReLU activation.
+                \item \texttt{GCNConv4}: Produces output node embeddings.
+            \end{itemize}
+            \item \textbf{Output:} Node embeddings 
+        \end{itemize}
+
+%    \textbf{Decoding Process:}
+%    \begin{itemize}
+%        \item Predicts edge scores using dot product of node embeddings.
+%        \item Computes full graph adjacency matrix for all possible node pairs.
+%    \end{itemize}
+
+    \textbf{Goal:} Predict edges or provide embeddings for downstream tasks.
+\end{frame}
+
+\begin{frame}[noframenumbering]{\modA}
+    \textbf{Classification Model:}
+    \begin{itemize}
+        \item Fully connected feed-forward network for downstream tasks.
+        \item \textbf{Structure:}
+        \begin{itemize}
+            \item 3 hidden layers with ReLU activations.
+            \item \texttt{Input Size:} Embedding size from the encoder (e.g., 78).
+            \item \texttt{Output:} Binary classification via sigmoid activation.
+        \end{itemize}
+    \end{itemize}
+
+    \textbf{Workflow:}
+    \begin{enumerate}
+        \item Encode node embeddings using GNN layers.
+        \item Pass embeddings to classification head for:
+        \begin{itemize}
+            \item Node/graph classification tasks.
+            \item Combining graph-based features with external data.
+        \end{itemize}
+    \end{enumerate}
+
+    \textbf{Goal:} Perform classification tasks using graph-based features.
+\end{frame}	
+	\begin{frame}[noframenumbering]{Homogeneous GNN Model}
+    \textbf{Encoding Process:}
+    \begin{enumerate}
+        \item Apply \texttt{GCNConv} layers sequentially:
+        \begin{itemize}
+            \item First 3 layers: Extract hierarchical features with ReLU activations.
+            \item Final layer: Produces the output embeddings.
+        \end{itemize}
+        \item Optional: Dropout for regularization and edge dropout to sparsify training graphs.
+    \end{enumerate}
+    
+    \textbf{Decoding Process:}
+    \begin{itemize}
+        \item Computes dot products between node embeddings to predict edge scores.
+    \end{itemize}
+
+    \textbf{Full Graph Prediction:}
+    \begin{itemize}
+        \item Computes pairwise similarities between all node embeddings for adjacency matrix reconstruction.
+    \end{itemize}
+
+    \textbf{Goal:} Predict edges in a homogeneous graph.
+\end{frame}
+
+\begin{frame}[noframenumbering]{Heterogeneous GNN Model}
+    \textbf{Encoding Process:}
+    \begin{enumerate}
+        \item Linear projections for \texttt{customer} and \texttt{depot} inputs.
+        \item \textbf{Two-stage Convolution:}
+        \begin{itemize}
+            \item \textbf{Stage 1:} Combines \texttt{customer}, \texttt{ng}, and \texttt{depot} connections using GCN/SAGE layers.
+            \item \textbf{Stage 2:} Refines embeddings using updated node features.
+        \end{itemize}
+        \item Produces final embeddings for \texttt{customer} and \texttt{depot}.
+    \end{enumerate}
+
+    \textbf{Decoding Process:}
+    \begin{itemize}
+        \item Predicts sparse, directed \texttt{ng} edges using dot product of \texttt{customer} embeddings.
+    \end{itemize}
+    
+    \textbf{Goal:} Predict sparse directed edges between \texttt{customer}.
+\end{frame}
 	
 \end{document}
diff --git a/figure/GNN.jpg b/figure/GNN.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a8652b50c814f06c63482541b712720fa6274c44
Binary files /dev/null and b/figure/GNN.jpg differ
diff --git a/figure/ML_anw1.tex b/figure/ML_anw1.tex
new file mode 100644
index 0000000000000000000000000000000000000000..641bbdb2539749eee9191460923e467ade25f39d
--- /dev/null
+++ b/figure/ML_anw1.tex
@@ -0,0 +1,22 @@
+
+\begin{center}
+
+
+\begin{tikzpicture}[->, node distance=0.8cm, auto]
+\small
+\node[mynode] (Inst) {Testdatenpunkt};
+\node[mynode,below=0.5 cm of Inst] (Feat) {Feature-Vector};
+\node[mynode,below=0.5 cm of Feat] (ML) {Trainiertes ML-Verfahren};
+\node[mynode,below=0.5 cm of ML] (Out) {Vorhergesagter Output};
+\path
+(Inst) edge[-Triangle,thick,shorten >=3pt]  (Feat)
+(Feat) edge[-Triangle,thick,shorten >=3pt] (ML)
+(ML) edge[-Triangle,thick,shorten >=3pt] (Out)
+;
+\end{tikzpicture}
+
+
+
+
+
+\end{center}
\ No newline at end of file
diff --git a/figure/ML_anw2.tex b/figure/ML_anw2.tex
new file mode 100644
index 0000000000000000000000000000000000000000..7cf86b33952181e658cfd88b49b182a197031940
--- /dev/null
+++ b/figure/ML_anw2.tex
@@ -0,0 +1,27 @@
+
+\begin{center}
+
+
+
+
+\begin{tikzpicture}[->, node distance=0.8cm, auto]
+\small
+\node[large, below=1cm of Inst,text depth = 7 cm] (OR) {};
+\node[mynode, above = 1cm of OR] (Inst) {Testinstanz};
+\node[mynode,below right=-4.5cm and -9cm of OR] (Feat) {Feature-Vector};
+\node[mynode,below right=-4.5cm and -4cm of OR] (Out) {Output};
+\node[mynode,below =-7cm of OR] (Ver) {OR-Verfahren};
+\node[mynode,below =-2cm of OR] (ML) {Trainiertes ML-Verfahren};
+\node[mynode2,right =2cm of OR] (Los) {Lösung};
+
+\path
+(Inst) edge[thick] (OR)
+(Ver) edge[thick] (Feat)
+(Feat) edge[thick] (ML)
+(ML) edge[thick]  (Out)
+(Out) edge[thick]  (Ver)
+(OR) edge[thick]  (Los)
+;
+\end{tikzpicture}
+
+\end{center}
\ No newline at end of file
diff --git a/figure/ML_train.tex b/figure/ML_train.tex
new file mode 100644
index 0000000000000000000000000000000000000000..4dadbe7388555c5f699d154401877919bf7d4db9
--- /dev/null
+++ b/figure/ML_train.tex
@@ -0,0 +1,17 @@
+
+\begin{center}
+\begin{tikzpicture}[->, node distance=0.8cm, auto]
+\small
+
+\node[mynode] (Inst) {Trainingsdatenpunkt};
+\node[mynode,below left=1cm and -0.5cm of Inst] (Feat) {Feature-Vector};
+\node[mynode,below right=1cm and -0.5cm of Inst] (Out) {Berechneter Output};
+\node[mynode,below=3cm of Inst,align=center] (ML) {ML-Verfahren };
+\path
+(Inst) edge[-Triangle,thick,shorten >=3pt] node {} (Out)
+(Inst) edge[-Triangle,thick,shorten >=3pt] node {} (Feat)
+(Feat) edge[-Triangle,thick,shorten >=3pt] node {}(ML)
+(Out) edge[-Triangle,thick,shorten >=3pt]  node {}(ML)
+;
+\end{tikzpicture}
+\end{center}
\ No newline at end of file
diff --git a/figure/ML_train_new.tex b/figure/ML_train_new.tex
new file mode 100644
index 0000000000000000000000000000000000000000..1c1366f921c9863ba624ba53709839b8b2737fab
--- /dev/null
+++ b/figure/ML_train_new.tex
@@ -0,0 +1,44 @@
+\tikzstyle TestEdge=[-Triangle,line width=3pt,shorten >=3pt,green!40!black,dotted]
+\tikzstyle TrainEdge=[-Triangle,thick,shorten >=3pt,line width=3pt]
+\tikzset{
+  Legend/.style={draw=white, top color=white, bottom color=white, inner sep=0.5em, minimum height=1cm,text width=10mm, align = left}}
+
+\begin{tikzpicture}[->, node distance=0.8cm, auto]
+\small
+
+\node[Legend] (Inst) {};%Trainingsdatenpunkte $i=1 \dots n$
+\node[mynode,below left=-1cm and -0.5cm of Inst] (Feat) {Input $X_1, \dots, X_n$};%Feature-Vektoren
+\node[mynode,below right=-1cm and -0.5cm of Inst] (Out) {Output $Y_1, \dots, Y_n$};
+\node[mynode,below=1.5cm of Inst,align=center] (ML) {ML-Model };
+\node[mynode,below=4cm of Inst,align=center] (Pred) {Prediction};
+
+
+\node[Legend, above left = 1cm and 2.5cm of Pred] (TestInst) {}; %Testdatenpunkt $j$
+\node[mynode,below=1 cm of TestInst] (TestFeat) {Input $\hat{X}_1,, \dots, \hat{X}_m$};
+
+\node[mynode,right=1.5 cm of Pred] (OutTest) {Output $Y^*_1,\dots, Y^*_m$};
+\path
+%(TestInst) edge[TestEdge]  (TestFeat)
+(TestFeat) edge[TestEdge] (Pred)
+(Pred) edge[TestEdge] (OutTest)
+;
+
+\path
+%(Inst) edge[TrainEdge] node {} (Out)
+%(Inst) edge[TrainEdge] node {} (Feat)
+(Feat) edge[TrainEdge] node {}(ML)
+(Out) edge[TrainEdge]  node {}(ML)
+(ML) edge[TrainEdge]  node {}(Pred)
+;
+
+
+
+\node[Legend, below right = 1cm and 5cm of Inst] (E1) {Training};
+\node[Legend, below = -0.25cm of E1] (E2) {Test};
+\node[Legend, left = 1cm of E1] (S1) {};
+\node[Legend, left = 1cm of E2] (S2) {};
+
+\path
+(S1) edge[TrainEdge] (E1)
+(S2) edge[TestEdge] (E2);
+\end{tikzpicture}
diff --git a/figure/OR_algo.tex b/figure/OR_algo.tex
new file mode 100644
index 0000000000000000000000000000000000000000..1c1366f921c9863ba624ba53709839b8b2737fab
--- /dev/null
+++ b/figure/OR_algo.tex
@@ -0,0 +1,44 @@
+\tikzstyle TestEdge=[-Triangle,line width=3pt,shorten >=3pt,green!40!black,dotted]
+\tikzstyle TrainEdge=[-Triangle,thick,shorten >=3pt,line width=3pt]
+\tikzset{
+  Legend/.style={draw=white, top color=white, bottom color=white, inner sep=0.5em, minimum height=1cm,text width=10mm, align = left}}
+
+\begin{tikzpicture}[->, node distance=0.8cm, auto]
+\small
+
+\node[Legend] (Inst) {};%Trainingsdatenpunkte $i=1 \dots n$
+\node[mynode,below left=-1cm and -0.5cm of Inst] (Feat) {Input $X_1, \dots, X_n$};%Feature-Vektoren
+\node[mynode,below right=-1cm and -0.5cm of Inst] (Out) {Output $Y_1, \dots, Y_n$};
+\node[mynode,below=1.5cm of Inst,align=center] (ML) {ML-Model };
+\node[mynode,below=4cm of Inst,align=center] (Pred) {Prediction};
+
+
+\node[Legend, above left = 1cm and 2.5cm of Pred] (TestInst) {}; %Testdatenpunkt $j$
+\node[mynode,below=1 cm of TestInst] (TestFeat) {Input $\hat{X}_1,, \dots, \hat{X}_m$};
+
+\node[mynode,right=1.5 cm of Pred] (OutTest) {Output $Y^*_1,\dots, Y^*_m$};
+\path
+%(TestInst) edge[TestEdge]  (TestFeat)
+(TestFeat) edge[TestEdge] (Pred)
+(Pred) edge[TestEdge] (OutTest)
+;
+
+\path
+%(Inst) edge[TrainEdge] node {} (Out)
+%(Inst) edge[TrainEdge] node {} (Feat)
+(Feat) edge[TrainEdge] node {}(ML)
+(Out) edge[TrainEdge]  node {}(ML)
+(ML) edge[TrainEdge]  node {}(Pred)
+;
+
+
+
+\node[Legend, below right = 1cm and 5cm of Inst] (E1) {Training};
+\node[Legend, below = -0.25cm of E1] (E2) {Test};
+\node[Legend, left = 1cm of E1] (S1) {};
+\node[Legend, left = 1cm of E2] (S2) {};
+
+\path
+(S1) edge[TrainEdge] (E1)
+(S2) edge[TestEdge] (E2);
+\end{tikzpicture}
diff --git a/figure/ZIMPL0.PNG b/figure/ZIMPL0.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..43064a86b2e7b96cf43b1073ded1aa35b4e769ef
Binary files /dev/null and b/figure/ZIMPL0.PNG differ
diff --git a/figure/ZIMPL1.PNG b/figure/ZIMPL1.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..aca5a7c288293b94425018566ff57c214fe32ef8
Binary files /dev/null and b/figure/ZIMPL1.PNG differ
diff --git a/figure/ZIMPL2.PNG b/figure/ZIMPL2.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..92ef1ccf9641c114cd822e34cf44733580f49653
Binary files /dev/null and b/figure/ZIMPL2.PNG differ
diff --git a/figure/ZIMPL3.PNG b/figure/ZIMPL3.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..d16840d19707ec29021c6290e398cef2ba0f78fd
Binary files /dev/null and b/figure/ZIMPL3.PNG differ
diff --git a/figure/ZIMPL4.PNG b/figure/ZIMPL4.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..3ed1d702234a768c4dc552b2a63ed028c9deb31b
Binary files /dev/null and b/figure/ZIMPL4.PNG differ
diff --git a/figure/ZIMPL5.PNG b/figure/ZIMPL5.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..ca002d8b4bfaf5af13480ac8c34f5bebe8201847
Binary files /dev/null and b/figure/ZIMPL5.PNG differ
diff --git a/figure/ZIMPL8.PNG b/figure/ZIMPL8.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..15636ca87450b2400a26f0ee48bebe5f24141bc2
Binary files /dev/null and b/figure/ZIMPL8.PNG differ
diff --git a/figure/ZIMPL9a.PNG b/figure/ZIMPL9a.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..21e728dee86478698dde507d111dbd62f43a4df6
Binary files /dev/null and b/figure/ZIMPL9a.PNG differ
diff --git a/figure/ZIMPL9b.PNG b/figure/ZIMPL9b.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..54e9568137cf696e7861dd501130a248ad250960
Binary files /dev/null and b/figure/ZIMPL9b.PNG differ
diff --git a/figure/catsdog.png b/figure/catsdog.png
new file mode 100644
index 0000000000000000000000000000000000000000..6c02839e65976099346068592af983db241331dc
Binary files /dev/null and b/figure/catsdog.png differ
diff --git a/figure/catsdog2.png b/figure/catsdog2.png
new file mode 100644
index 0000000000000000000000000000000000000000..37d4a0f62ab9b1d0b7b46010def8420d49ffaf5c
Binary files /dev/null and b/figure/catsdog2.png differ
diff --git a/figure/catsdog3.jpg b/figure/catsdog3.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d04b3eb28f51b6224633a7d40c485350f93deb1a
Binary files /dev/null and b/figure/catsdog3.jpg differ