diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index ac837cfcf1248d4b4c779308d58d795383122b6c..3c68d73055bb33f696d5743c8db191e72552eb0b 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,7 +1,12 @@
-image: localhost:5000/mkdocs-imgw
+#image: localhost:5000/mkdocs-imgw
+image: python:3.9-buster
+
+# Install all required packages
+before_script:
+  - pip install -r requirements.txt
 
 stages:
-    - build
+    - test
     - deploy
 
 Build:
@@ -9,11 +14,11 @@ Build:
     tags:
         - dev
     script:
-        - mkdocs build
+        - mkdocs build --strict --verbose
 
 Deploy:
     stage: deploy
     tags:
         - dev
     script:
-        - sshpass -p "$WOLKE_PASSWORD" scp -r /tmp/cr-site/ $WOLKE_USER@wolke.img.univie.ac.at:/var/www/html/
+        - sshpass -p "$WOLKE_PASSWORD" scp -r /tmp/cr-site/ $WOLKE_USER@wolke.img.univie.ac.at:/var/www/html/documentation/general/
diff --git a/.pages b/.pages
new file mode 100644
index 0000000000000000000000000000000000000000..08377bfb9488ce783f1ab35c8f84cb8dea23d1df
--- /dev/null
+++ b/.pages
@@ -0,0 +1,10 @@
+nav:
+    - README.md
+    - SRVX1.md
+    - SRVX8.md
+    - Jet-Cluster.md
+    - VSC.md
+    - ECMWF.md
+    - TeachingHub.md
+    - SSH-VPN-VNC
+    - ...
\ No newline at end of file
diff --git a/Data/Guidelines.md b/Data/Guidelines.md
index d386ad96cac9bb3c0266f3635ec42954d7f2b34a..35990a0e19ed4c7a18744e4130ea78e69dcec666 100644
--- a/Data/Guidelines.md
+++ b/Data/Guidelines.md
@@ -5,10 +5,12 @@
     @phone: 53715
     @date: Fri Sep 25 09:15:10 CEST 2020
 
+
 Data currently shared on the jet cluster:
 `PATH /jetfs/shared-data`
 
 Steps:
+
 1. **Be careful.** Do not remove data, unless you are really certain, that this is ok for everyone.
 2. Create directories with clear names. **Add permission for others**
 3. Ask for help if needed.
@@ -24,7 +26,6 @@ Typical Permissions are:
     folders: 755   rwxr-xr-x  allows all users to read
     files:   644   rw-r--r--  allows all users to read
 
-
 Change permission for all directories under that DIR:
 
 `find [DIR] -type d -exec chmod 755 {} \;`
diff --git a/Data/README.md b/Data/README.md
index 42f699d0ed7f370fd323941883be13ba4e4ba54a..998932ce773a9a34982f3a6ef1dd9ef334383b3b 100644
--- a/Data/README.md
+++ b/Data/README.md
@@ -1,5 +1,6 @@
-# Data available at the department of Meteorology and Geophysics
+# Data Repositories
 
+available at the Department of Meteorology and Geophysics. 
 Edit this file also on [gitlab](https://gitlab.phaidra.org/imgw/computer-resources/-/blob/master/Data/README.md)
 
 # Reanalysis data
diff --git a/ECMWF.md b/ECMWF.md
new file mode 100644
index 0000000000000000000000000000000000000000..d9025382b4ed9bfd4882ea195cd980b365053508
--- /dev/null
+++ b/ECMWF.md
@@ -0,0 +1,84 @@
+
+# European Center for Medium Weather Forecast
+![](./mkdocs/img/logo_ecmwf.png)
+
+[website](https://www.ecmwf.int) / [service status](https://www.ecmwf.int/en/service-status) / [confluence](https://confluence.ecmwf.int) / [support](https://support.ecmwf.int) / [accounting](https://www.ecmwf.int/user)
+
+
+Available Services:
+
+- [ecaccess](https://confluence.ecmwf.int/display/ECAC/ECaccess+Home)
+- [srvx1.gateway](https://srvx1.img.univie.ac.at/ecmwf/ecmwf) / [gateway.docs](https://confluence.ecmwf.int/display/ECAC/Releases+-+Gateway+package) / [boaccess](https://boaccess.ecmwf.int)
+
+  
+## Connecting to ECMWF Services
+
+A ECMWF user can connect to the ECS/ATOS using teleport
+
+```bash title="Using teleport"
+module load teleport
+** INFO: Default jumphost now: jump.ecmwf.int
+** INFO: Module loaded. SSH Agent required for login, run 'startagent',                                                       
+**       run ssh-agent -k to kill the agent.
+         Login using: python3 -m teleport.login and your ECMWF credentials.
+
+# Activate the ssh-agent (required to store the key/certificate)
+startagent
+# Check if it is running
+ssh-add -l
+```
+
+```bash title="Connecting to ECMWF"
+# Login to the default teleport jump host (shell.ecmwf.int) Reading
+python3 -m teleport.login
+tsh status
+# run ssh agent again
+ssh-add -l 
+# now there should be two keys!!!
+# Login to the jump host in Bologna 
+python3 -m teleport.login
+
+# Check Status
+tsh status
+# ssh to the login nodes
+ssh -J [user]@jump.ecmwf.int [user]@ecs-login
+ssh -J [user]@jump.ecmwf.int [user]@hpc-login
+```
+
+Environment variables configuration:
+
+- `ECMWF_USERNAME` - The ECMWF Username  
+- `ECMWF_PASSWORD` - The ECMWF Password  
+- `TSH_EXEC` - The Teleport binary tsh path  
+- `TSH_PROXY` - The ECMWF Teleport proxy
+
+
+
+### SSH-agent
+It is required to have an SSH-agent running in order to connect to the ECMWF servers. The teleport module includes a `startagent` function to allow to reconnect to an existing ssh-agent. Do not start too many agents!
+
+```bash title="start ssh-agent"
+# load the module
+module load teleport
+# start a new agent or reconnect
+startagent
+```
+
+
+## ECMWF Access Server (ECS)
+
+There is an issue with ssh-keys
+
+```bash
+# Generate a new SSH key
+ssh-keygen -t ed25519
+# Add the public key to your own authorized_keys on ECS/HPC
+cat .ssh/id_ed25519.pub >> .ssh/authorized_keys
+```
+
+This will solve some `ecaccess` issues.
+
+
+## Connecting via ECaccess
+
+using a local installation of ecaccess tools can be used to submit jobs and monitor jobs from a remote location.
diff --git a/Editors/README.md b/Editors/README.md
index 33d058a956ccbc43d00c1b436b0df0e3e4c6dfb3..3302b1d93f7546b39cab495d09d785fe658b1949 100644
--- a/Editors/README.md
+++ b/Editors/README.md
@@ -1,4 +1,4 @@
-# Editors for development
+# Recommendations
 
 Here one can find some help on getting started with some editors or useful configurations or packages to use.
 
@@ -6,7 +6,6 @@ If you have a nice addition please submit it (create an issue).
 
 Thanks. :elephant:
 
-[[_TOC_]]
 
 ## Vim 
 ![](https://upload.wikimedia.org/wikipedia/commons/thumb/9/9f/Vimlogo.svg/64px-Vimlogo.svg.png)
@@ -20,7 +19,7 @@ Alternative: [VIM](https://www.vim.org/)
 
 Some useful VIM modifications in `~/.vimrc` :
 
-```vim
+```vim title="Configuration file"
 filetype plugin indent on
 " show existing tab with 4 spaces width
 set tabstop=4
diff --git a/Fortran/Debugging.md b/Fortran/Debugging.md
new file mode 100644
index 0000000000000000000000000000000000000000..a88793d91c970d66365839895d6a867ea462c370
--- /dev/null
+++ b/Fortran/Debugging.md
@@ -0,0 +1,152 @@
+# Debugging 
+
+Please have a look at the debugging options for your compilers, which allow to add debugging information into the executable. This makes the executable larger, but for debugging purposes that allows to read the source code where it happens. Sometimes and depending on your code the compiler will change your code due to the optimization flags. Please consider removing them for debugging.
+
+## Coredump 
+What is a coredump ?
+
+*A core dump is a file containing a process's address space (memory) when the process terminates unexpectedly. Core dumps may be produced on-demand (such as by a debugger), or automatically upon termination. Core dumps are triggered by the kernel in response to program crashes, and may be passed to a helper program (such as systemd-coredump) for further processing. A core dump is not typically used by an average user, but may be passed on to developers upon request where it can be invaluable as a post-mortem snapshot of the program's state at the time of the crash, especially if the fault is hard to reliably reproduce.*
+[coredump@ArchWiki](https://wiki.archlinux.org/title/Core_dump)
+
+Most of our servers and the VSC have the coredump service available. You can check that simply by running `coredumpctl`, which should be available if it is installed.
+
+on most systems the core dump is limited, run `ulimit -c` to see how large your core dump can be. Some systems allow to change these by the user with `ulimit -c [number]`. This needs to be set before the core file is dumped.
+
+Core dumps are configured to persist for at least 3 days, before they are automatically cleaned.
+
+### coredump utilities
+
+As a user you can only access your own coredump information, available dumps can be found like this.
+
+```bash
+[user@srvx1 ~]$ coredumpctl list 
+TIME                            PID   UID   GID SIG COREFILE  EXE
+Thu 2022-08-18 09:58:55 CEST 1869359 12345  100  11 none      /usr/lib64/firefox/firefox
+Wed 2022-08-24 14:33:49 CEST 1603205 12345   100  6 none      /jetfs/home/user/Documents/test_coredump.x
+Wed 2022-08-24 14:36:11 CEST 1608700 12345   100  6 truncated /jetfs/home/user/Documents/test_coredump.x
+Wed 2022-08-24 14:47:47 CEST 1640330 12345   100  6 none      /jetfs/home/user/Documents/test_coredump.x
+Wed 2022-08-24 14:57:01 CEST 1664822 12345   100  6 present   /jetfs/home/user/Documents/test_coredump.x
+```
+
+Relevant are especially the `SIG` and the `COREFILE` column, which give you a reason why your process was killed. Please find some useful information on the Signal in the table below. If `COREFILE` is none then the system probably disabled that or the ulimit is 0. If truncated, then the ulimit is too small for your dump core. If present, then the file can be used for debugging. 
+
+![Linux Signal](linux-signals.png)
+
+## Test a coredump
+
+Use the following C program to create a coredump and look at it. The program does something wrong. Maybe you can figure it out.
+
+```c
+#include <stdio.h>
+#include <stdlib.h>
+void main(){
+        int x;
+        free(&x);
+}
+```
+
+Write to a file called `test_coredump.c` and compile 
+
+<pre>
+# compile (with -g for debugging information)
+[user@srvx1 ~]$ gcc -g -o test_coredump.x test_coredump.c 
+# execute
+[user@srvx1 ~]$ ./test_coredump.x
+Segmentation fault (core dumped)
+# check the coredump
+[user@srvx1 ~]$ coredumpctl
+ TIME                            PID   UID   GID SIG COREFILE  EXE
+Wed 2022-08-24 14:09:10 CEST 512174    1234  100  11 present   /home/user/test_coredump.x
+# inspect the core dump
+[user@srvx1 ~]$ coredumpctl info 512174
+Hint: You are currently not seeing messages from other users and the system.
+      Users in groups 'adm', 'systemd-journal', 'wheel' can see all messages.
+      Pass -q to turn off this notice.
+           PID: 512174 (test_coredump.x)
+           UID: 1234 (user)
+           GID: 100 (users)
+        Signal: 6 (ABRT)
+     Timestamp: Wed 2022-08-24 14:57:00 CEST (9min ago)
+  Command Line: ./test_coredump.x
+    Executable: /home/user/Documents/test_coredump.x
+ Control Group: /user.slice/user-1234.slice/session-257306.scope
+          Unit: session-257306.scope    
+         Slice: user-1234.slice
+       Session: 257306
+     Owner UID: 1234 (user)
+       Boot ID: 521d3ca4537d4cdb92bc4eefba12072a
+    Machine ID: e9055dc0f93045278fcbdde4b6828bc8
+      Hostname: srvx1.img.univie.ac.at
+       Storage: /var/lib/systemd/coredump/core.test_coredump\x2ex.1234.521d3ca4537d4cdb92bc4eefba12072a.512174.1661345820000>
+       Message: Process 512174 (test_coredump.x) of user 1234 dumped core.                                                  
+
+                Stack trace of thread 512174:
+                #0  0x00007f637fc4737f raise (libc.so.6)
+                #1  0x00007f637fc31db5 abort (libc.so.6)
+                #2  0x00007f637fc8a4e7 __libc_message (libc.so.6)
+                #3  0x00007f637fc915ec malloc_printerr (libc.so.6)
+                #4  0x00007f637fc9189c munmap_chunk (libc.so.6)
+                #5  0x000000000040059a main (test_coredump.x)
+                #6  0x00007f637fc33493 __libc_start_main (libc.so.6)
+                #7  0x00000000004004ce _start (test_coredump.x)   
+</pre>
+
+This tells you where the core dump is and a bit of a stack trace as well.
+Let's have a look at the dump file. 
+
+<pre>
+# run gdb with the core dump file
+[user@srvx1 ~]$ coredumpctl gdb 512174
+...
+This GDB was configured as "x86_64-redhat-linux-gnu".[20/29541]Type "show configuration" for configuration details.
+...
+Reading symbols from /home/user/Documents/test_coredump.x...done.
+Core was generated by `./test_coredump.x'.
+Program terminated with signal SIGABRT, Aborted.
+#0  0x00007f1a84fd137f in raise () from /lib64/libc.so.6
+(gdb) 
+# now let's have a look at where we are.
+(gdb) l
+1       #include <stdio.h>
+2       #include <stdlib.h>
+3       void main(){
+4                       int x;
+5                       free(&x);
+6       }
+# let's run the program and see what problems it has
+(gdb) r
+Starting program: /home/user/Documents/test_coredump.x
+...
+munmap_chunk(): invalid pointer
+
+Program received signal SIGABRT, Aborted.
+0x00007ffff7a4237f in raise () from /lib64/libc.so.6
+(gdb) 
+# so we ask the debugger where that happens:
+(gdb) where
+#0  0x00007ffff7a4237f in raise () from /lib64/libc.so.6
+#1  0x00007ffff7a2cdb5 in abort () from /lib64/libc.so.6
+#2  0x00007ffff7a854e7 in __libc_message () from /lib64/libc.so.6
+#3  0x00007ffff7a8c5ec in malloc_printerr () from /lib64/libc.so.6
+#4  0x00007ffff7a8c89c in munmap_chunk () from /lib64/libc.so.6
+#5  0x000000000040059a in main () at test_coredump.c:5
+
+# and because that is not totally clear, we can do a backtrace
+(gdb) bt full
+#0  0x00007ffff7a4237f in raise () from /lib64/libc.so.6
+No symbol table info available.
+#1  0x00007ffff7a2cdb5 in abort () from /lib64/libc.so.6
+No symbol table info available.
+#2  0x00007ffff7a854e7 in __libc_message () from /lib64/libc.so.6                                                             
+No symbol table info available.
+#3  0x00007ffff7a8c5ec in malloc_printerr () from /lib64/libc.so.6                                                            
+No symbol table info available.
+#4  0x00007ffff7a8c89c in munmap_chunk () from /lib64/libc.so.6                                                               
+No symbol table info available.
+#5  0x000000000040059a in main () at test_coredump.c:5
+        x = 0
+
+# a x is an integer, not malloc'ated, thus no free
+</pre>
+
+Problem solved. We can not free something that is not allocated. 
\ No newline at end of file
diff --git a/Fortran/README.md b/Fortran/README.md
index c0fea9892e6be4a76b8fbd55820583cb21db922b..8233e849f6a2592e7c9125273cad87edbf4ca321 100644
--- a/Fortran/README.md
+++ b/Fortran/README.md
@@ -1,3 +1,9 @@
+# Coding
+
+Most often codes are written in Python or Fortran. Here some information is given on Fortran and a little bit of C as well.
+
+Have a look at [Debugging](./Debugging.md)
+
 # Fortran
 
 Fortran is quite popular in Meteorology and Geophysics.
@@ -6,39 +12,33 @@ Please find some help on solving common problems.
 Get some information on Fortran:
 - [Fortran Language, Learning, Compilers](https://fortran-lang.org)
 
-
-
-
 # Compilers
 
 There are a few compilers, but most commonly GNU (Gfortran) and INTEL (ifort) are used on our servers. 
 
-|                                  | gfortran                           | ifort                             |
-|----------------------------------|------------------------------------|-----------------------------------|
-|                                  | gfortran                           | ifort                             | 
-| double precision real            | -fdefault-real-8                   | -r8                               | 
-| check array bounds               | -fbounds-check                     | -check                            | 
-| call chain traceback             | -fbacktrace                        | -traceback                        | 
-| convert little/big endian        | -fconvert=big-endian/little-endian | -convert big_endian/little_endian |  
-| default optimisation             | -O0                                | -O2                               |
-| highest recommended optimisation | -O3                                | -O2maybe -O3 or -fast             |
+| | GNU Fortran | INTEL Fortran |
+|---|---|---|
+| double precision real| `-fdefault-real-8`| `-r8`| 
+| check array bounds| `-fbounds-check`| `-check`| 
+| call chain traceback|` -fbacktrace`| `-traceback`| 
+| convert little/big endian| `-fconvert=big-endian/little-endian` | `-convert big_endian/little_endian` |  
+| default optimisation| `-O0`| `-O2`|
+| highest recommended optimisation | `-O3`| `-O2` maybe `-O3` or `-fast`|
+
 
 ## Intel Compiler
 
 from P. Seibert using ifort for the fastest code (srvx1):
-```lang-mk
-FFLAGS    =  -cpp -xAVX -ipo -O3 -no-prec-div -opt-prefetch  -m64 -mcmodel=medium  -I$(INCPATH)
-LIBPATH = /home/tmc/TestEnv/Libraries/grib_api-1.12.3_ifort/lib 
-LDFLAGS   =  $(FFLAGS) -L$(LIBPATH) -Bstatic -lgrib_api_f90 -lgrib_api -lm -ljasper 
+```makefile
+# get GRIP_PATH from environment modules
+INCPATH = GRIP_API/include
+LIBPATH = GRIP_API/lib 
+FFLAGS = -cpp -xAVX -ipo -O3 -no-prec-div -opt-prefetch  -m64 -mcmodel=medium  -I$(INCPATH)
+LDFLAGS = $(FFLAGS) -L$(LIBPATH) -Bstatic -lgrib_api_f90 -lgrib_api -lm -ljasper 
 ```
 
-Remark: for FLEXPART, otherwise you won't need `grib_api` and `jasper`, or `mcmodel=medium`)
+Remark: for FLEXPART, otherwise you won't need `grib_api` and `jasper`, or `mcmodel=medium`
 
-Remarks
-these are settings for FLEXPART
-in general, you probably won't need the gribapi  and jasper library
- -cpp -mcmodel=medium  -I$(INCPATH)
--lm includes the intel math lib; it is improtant that the linking step is something like $(FC) $(FFLAGS) *.o -o a.out $(LDFLAGS) or you may loose the math lib
 
 ## Tricky Issues
 
diff --git a/Fortran/linux-signals.png b/Fortran/linux-signals.png
new file mode 100644
index 0000000000000000000000000000000000000000..9d4ec7e4d2726736b57a4adddd4275819bb60388
Binary files /dev/null and b/Fortran/linux-signals.png differ
diff --git a/Jet-Cluster.md b/Jet-Cluster.md
index 0942dea61bedca602285d21427de8cc110da2ffa..020e0eb652e826ad8cd3d8a7455956e7b2190481 100644
--- a/Jet-Cluster.md
+++ b/Jet-Cluster.md
@@ -28,7 +28,7 @@ Node Setup:
  - 2x Login Nodes
  - 7x Compute Nodes
 
-![GPFS](/mkdocs/img/GPFS-jet.png)
+![GPFS](./mkdocs/img/GPFS-jet.png)
 
 
 | Name | Value |
@@ -59,47 +59,92 @@ Major Libraries:
 
 These software libraries are usually handled by environment modules.
 
-![](/mkdocs/img/envmodules.png)
+![](./mkdocs/img/envmodules.png)
 
 ## Currently installed modules
 
 ```bash
 $ module av
------------------------------------- /jetfs/spack/share/spack/modules/linux-rhel8-haswell -------------------------------------
+--------- /jetfs/spack/share/spack/modules/linux-rhel8-skylake_avx512 ----------
+anaconda3/2020.11-gcc-8.5.0-gf52svn                         
+anaconda3/2021.05-gcc-8.5.0-gefwhbz                         
+cdo/1.9.10-gcc-8.5.0-y4q2l2h                                
+cdo/2.0.1-gcc-8.5.0-xgalz67                                 
+eccodes/2.18.0-intel-20.0.2-6tadpgr                         
+eccodes/2.19.1-gcc-8.5.0-74y7rih                            
+eccodes/2.19.1-gcc-8.5.0-MPI3.1.6-q3prgpi                   
+eccodes/2.21.0-gcc-8.5.0-lq54nls                            
+eccodes/2.21.0-gcc-8.5.0-MPI3.1.6-uu4b62w                   
+eccodes/2.21.0-intel-2021.4.0-cscplox                       
+eccodes/2.21.0-intel-2021.4.0-xnc5g2f                       
+gcc/8.5.0-gcc-8.5rhel8-7ka2e42                              
+gcc/9.1.0-gcc-8.5rhel8-hmyhbce                              
+geos/3.8.1-gcc-8.5.0-bymxoyq                                
+geos/3.9.1-gcc-8.5.0-smhcud5                                
+geos/3.9.1-intel-2021.4.0-wdqirxs                           
+hdf5/1.10.7-gcc-8.5.0-MPI3.1.6-zia454a                      
+hdf5/1.10.7-gcc-8.5.0-t247okg                               
+hdf5/1.10.7-intel-2021.4.0-l6tbvga                          
+hdf5/1.10.7-intel-2021.4.0-n7frjgz                          
+hdf5/1.12.0-intel-20.0.2-ezeotzr                            
+intel-mkl/2020.3.279-intel-20.0.2-m7bxged                   
+intel-mkl/2020.4.304-intel-2021.4.0-mcf5ggn                 
+intel-oneapi-compilers/2021.4.0-gcc-9.1.0-x5kx6di           
+intel-oneapi-mkl/2021.4.0-intel-2021.4.0-d2aqurq            
+intel-oneapi-mpi/2021.4.0-intel-2021.4.0-eoone6i            
+intel-parallel-studio/composer.2020.2-intel-20.0.2-zuot22y  
+libemos/4.5.9-gcc-8.5.0-MPI3.1.6-kcv3tlk                    
+libemos/4.5.9-gcc-8.5.0-vgk5xbg                             
+libemos/4.5.9-intel-2021.4.0-2q2qpc3                        
+miniconda2/4.7.12.1-gcc-8.5.0-hkx7ovs                       
+miniconda3/4.10.3-gcc-8.5.0-eyq4jvx                         
+nco/4.9.3-intel-20.0.2-dhlqiyo                              
+nco/5.0.1-gcc-8.5.0-oxngdn5                                 
+ncview/2.1.8-gcc-8.5.0-c7tcblp                              
+ncview/2.1.8-intel-20.0.2-3taqdda                           
+netcdf-c/4.6.3-gcc-8.5.0-MPI3.1.6-2ggkkoh                   
+netcdf-c/4.6.3-intel-2021.4.0-eaqh45b                       
+netcdf-c/4.7.4-gcc-8.5.0-o7ahi5o                            
+netcdf-c/4.7.4-intel-20.0.2-337uqtc                         
+netcdf-c/4.7.4-intel-2021.4.0-vvk6sk5                       
+netcdf-fortran/4.5.2-gcc-8.5.0-MPI3.1.6-needvux             
+netcdf-fortran/4.5.2-intel-2021.4.0-6avm4dp                 
+netcdf-fortran/4.5.3-gcc-8.5.0-3bqsedn                      
+netcdf-fortran/4.5.3-intel-20.0.2-irdm5gq                   
+netcdf-fortran/4.5.3-intel-2021.4.0-pii33is                 
+netlib-lapack/3.9.1-gcc-8.5.0-ipqdnxj                       
+netlib-scalapack/2.1.0-gcc-8.5.0-bukelua                    
+netlib-scalapack/2.1.0-gcc-8.5.0-MPI3.1.6-rllmmt4           
+openblas/0.3.18-gcc-8.5.0-zv6qss4                           
+openmpi/3.1.6-gcc-8.5.0-ie6e7fw                             
+openmpi/3.1.6-intel-20.0.2-ubasrpk                          
+openmpi/4.0.5-gcc-8.5.0-ryfwodt                             
+openmpi/4.0.5-intel-20.0.2-4wfaaz4                          
+parallel-netcdf/1.12.1-intel-20.0.2-sgz3yqs                 
+parallel-netcdf/1.12.2-gcc-8.5.0-MPI3.1.6-y4btiof           
+parallel-netcdf/1.12.2-gcc-8.5.0-zwftkwr                    
+parallel-netcdf/1.12.2-intel-2021.4.0-bykumdv               
+perl/5.32.0-intel-20.0.2-2d23x7l                            
+proj/7.1.0-gcc-8.5.0-k3kp5sb                                
+proj/7.1.0-intel-2021.4.0-bub3jtf                           
+proj/8.1.0-gcc-8.5.0-4ydzmxc                                
+proj/8.1.0-intel-2021.4.0-omzgfdy                           
+zlib/1.2.11-intel-20.0.2-3h374ov                            
+
+------------- /jetfs/spack/share/spack/modules/linux-rhel8-haswell -------------
 intel-parallel-studio/composer.2017.7-intel-17.0.7-disfj2g  
 
---------------------------------- /jetfs/spack/share/spack/modules/linux-rhel8-skylake_avx512 ---------------------------------
-anaconda2/2019.10-gcc-8.3.1-5pou6ji                         nco/4.9.3-intel-20.0.2-dhlqiyo                     
-anaconda3/2019.10-gcc-8.3.1-tmy5mgp                         ncview/2.1.8-gcc-8.3.1-s2owtzw                     
-anaconda3/2020.07-gcc-8.3.1-weugqkf                         ncview/2.1.8-intel-20.0.2-3taqdda                  
-anaconda3/2020.11-gcc-8.3.1-gramgir                         netcdf-c/4.7.4-gcc-8.3.1-fh4nn6k                   
-cdo/1.9.8-gcc-8.3.1-ipgvzeh                                 netcdf-c/4.7.4-gcc-8.3.1-MPI3.1.6-y5gknpt          
-eccodes/2.18.0-gcc-8.3.1-s7clum3                            netcdf-c/4.7.4-intel-20.0.2-337uqtc                
-eccodes/2.18.0-intel-20.0.2-6tadpgr                         netcdf-fortran/4.5.3-gcc-8.3.1-kfd2vkj             
-enstools/2020.11.dev-gcc-8.3.1-fes7kgo                      netcdf-fortran/4.5.3-gcc-8.3.1-MPI3.1.6-rjcxqb6    
-esmf/7.1.0r-gcc-8.3.1-4fijz4q                               netcdf-fortran/4.5.3-intel-20.0.2-irdm5gq          
-gcc/8.3.1-gcc-8.3.1-pp3wjou                                 netlib-lapack/3.8.0-gcc-8.3.1-ue37lic              
-geos/3.8.1-gcc-8.3.1-o76leir                                netlib-scalapack/2.1.0-gcc-8.3.1-pbkjymd           
-hdf5/1.10.7-gcc-8.3.1-MPI3.1.6-vm3avor                      openblas/0.3.10-gcc-8.3.1-ncess5c                  
-hdf5/1.12.0-gcc-8.3.1-awl4atl                               openmpi/3.1.6-gcc-8.3.1-rk5av53                    
-hdf5/1.12.0-intel-20.0.2-ezeotzr                            openmpi/3.1.6-intel-20.0.2-ubasrpk                 
-intel-mkl/2020.3.279-gcc-8.3.1-5xeezjw                      openmpi/4.0.5-gcc-8.3.1-773ztsv                    
-intel-mkl/2020.3.279-intel-20.0.2-m7bxged                   openmpi/4.0.5-intel-20.0.2-4wfaaz4                 
-intel-oneapi-compilers/2021.2.0-oneapi-2021.2.0-6kdzddx     openmpi/4.0.5-oneapi-2021.2.0-hrfsxrd              
-intel-oneapi-mpi/2021.2.0-oneapi-2021.2.0-haqpxfl           parallel-netcdf/1.12.1-gcc-8.3.1-MPI3.1.6-gng2jcu  
-intel-parallel-studio/composer.2020.2-intel-20.0.2-zuot22y  parallel-netcdf/1.12.1-gcc-8.3.1-xxrhtxn           
-julia/1.5.2-gcc-8.3.1-3iwgkf7                               parallel-netcdf/1.12.1-intel-20.0.2-sgz3yqs        
-libemos/4.5.9-gcc-8.3.1-h3lqu2n                             perl/5.32.0-intel-20.0.2-2d23x7l                   
-miniconda2/4.7.12.1-gcc-8.3.1-zduqggv                       proj/7.1.0-gcc-8.3.1-xcjaco5                       
-miniconda3/4.8.2-gcc-8.3.1-3m7b6t2                          zlib/1.2.11-gcc-8.3.1-bbbpnzp                      
-ncl/6.6.2-gcc-8.3.1-MPI3.1.6-3dxuv5f                        zlib/1.2.11-intel-20.0.2-3h374ov                   
-nco/4.9.3-gcc-8.3.1-g7o6lao 
+---------------------------- /jetfs/manual/modules -----------------------------
+enstools/v2020.11  enstools/v2021.11  teleport/10.1.4  
+
+--------- /opt/spack-jet01/share/spack/lmod/linux-rhel8-skylake_avx512 ---------
+anaconda3/2020.11-gcc-8.3.1-bqubbbt  
 ```
 on how to use environment modules go to [Using Environment Modules](Misc/Environment-Modules.md)
 
 
 ## Jupyterhub
-<img src="/mkdocs/img/jupyterhub-logo.svg" width="100px">
+<img src="./mkdocs/img/jupyterhub-logo.svg" width="100px">
 
 The Jet Cluster serves a [jupyterhub](https://jupyterhub.readthedocs.io/en/stable/) with a [jupyterlab](https://jupyterlab.readthedocs.io/en/stable/) that launches on the jet-cluster compute nodes and allows users to work directly on the cluster as well as submit jobs.
 
diff --git a/Misc/README.md b/Misc/README.md
index e475ae6c33671428a9256c6621a514b4088005f9..19112c8e4f07c2110eadabdbc20cb02baa17fc52 100644
--- a/Misc/README.md
+++ b/Misc/README.md
@@ -36,13 +36,13 @@ If you experience processes that are slower than they should be or you are unsur
 
 ### Process States
 For example if you run `ps ux` or `htop` and check your process, you should see a state with the following meaning:
-
+<pre>
     D = UNINTERRUPTABLE_SLEEP
     R = RUNNING & RUNNABLE
     S = INTERRRUPTABLE_SLEEP
     T = STOPPED
     Z = ZOMBIE
-
+</pre>
 Processes in a **"D"** or uninterruptible sleep state are usually waiting on I/O. The ps command shows a "D" on processes in an uninterruptible sleep state. The vmstat command also shows the current processes that are "blocked" or waiting on I/O. The vmstat and ps will not agree on the number of processes in a "D" state, so don't be too concerned. You cannot kill "D" state processes, even with `SIGKILL` or `kill -9`. As the name implies, they are uninterruptible. You can only clear them by rebooting the server or waiting for the I/O to respond. It is normal to see processes in a "D" state when the server performs I/O intensive operations.
 
 As root you can do `echo w > /proc/sysrq-trigger` and check the `dmesg -T` what the stacktrace reports.
diff --git a/Misc/Slurm.md b/Misc/Slurm.md
new file mode 100644
index 0000000000000000000000000000000000000000..e8271f41f1cecb7132babffd0b16e39ea430d060
--- /dev/null
+++ b/Misc/Slurm.md
@@ -0,0 +1,79 @@
+# Slurm
+
+We use SLURM (https://slurm.schedmd.com/overview.html ) as a workload manager to schedule jobs onto compute resources. Via SLURM we can ensure that each user gets a fair share of the limited compute resources and that multiple users do not interfere with each other when e.g. running benchmarks.
+
+*Important: You can only access a node via SSH when you have a SLURM allocation of that node.*
+
+## Basics
+
+
+
+## IMGW special commands
+
+There are currently a few extra commands that can be used on the Jet Cluster to facilitate usage of the nodes.
+
+Tools:
+- `jobinfo`
+- `jobinfo_remaining`
+- `nodeinfo`
+- `queueinfo`
+- `watchjob`
+
+```bash
+# Get information on your job
+jobinfo
+# or use a JOBID
+jobinfo 123456
+# 
+jobinfo_remaining
+```
+
+## jobs
+
+
+## MPI 
+
+
+
+## status and reason codes
+
+The `squeue` command details a variety of information on an active job’s status with state and reason codes. *__Job state codes__* describe a job’s current state in queue (e.g. pending, completed). *__Job reason codes__* describe the reason why the job is in its current state. 
+
+The following tables outline a variety of job state and reason codes you may encounter when using squeue to check on your jobs.
+
+### Job State Codes
+
+| Status        | Code  | Explaination                                                           |
+| ------------- | :---: | ---------------------------------------------------------------------- |
+| COMPLETED	| `CD`	| The job has completed successfully.                                    |
+| COMPLETING	| `CG`	| The job is finishing but some processes are still active.              |
+| FAILED	| `F`	| The job terminated with a non-zero exit code and failed to execute.    |
+| PENDING	| `PD`	| The job is waiting for resource allocation. It will eventually run.    |
+| PREEMPTED	| `PR`	| The job was terminated because of preemption by another job.           |
+| RUNNING	| `R`	| The job currently is allocated to a node and is running.               |
+| SUSPENDED	| `S`	| A running job has been stopped with its cores released to other jobs.  |
+| STOPPED	| `ST`	| A running job has been stopped with its cores retained.                |
+
+A full list of these Job State codes can be found in [Slurm’s documentation.](https://slurm.schedmd.com/squeue.html#lbAG)
+
+
+### Job Reason Codes
+
+| Reason Code              | Explaination                                                                                |
+| ------------------------ | ------------------------------------------------------------------------------------------- |
+| `Priority`	           | One or more higher priority jobs is in queue for running. Your job will eventually run.     |
+| `Dependency`	           | This job is waiting for a dependent job to complete and will run afterwards.                |
+| `Resources`	           | The job is waiting for resources to become available and will eventually run.               |
+| `InvalidAccount`	   | The job’s account is invalid. Cancel the job and rerun with correct account.             |
+| `InvaldQoS`              | The job’s QoS is invalid. Cancel the job and rerun with correct account.                 |
+| `QOSGrpCpuLimit` 	   | All CPUs assigned to your job’s specified QoS are in use; job will run eventually.          |
+| `QOSGrpMaxJobsLimit`	   | Maximum number of jobs for your job’s QoS have been met; job will run eventually.           |
+| `QOSGrpNodeLimit`	   | All nodes assigned to your job’s specified QoS are in use; job will run eventually.         |
+| `PartitionCpuLimit`	   | All CPUs assigned to your job’s specified partition are in use; job will run eventually.    |
+| `PartitionMaxJobsLimit`  | Maximum number of jobs for your job’s partition have been met; job will run eventually.     |
+| `PartitionNodeLimit`	   | All nodes assigned to your job’s specified partition are in use; job will run eventually.   |
+| `AssociationCpuLimit`	   | All CPUs assigned to your job’s specified association are in use; job will run eventually.  |
+| `AssociationMaxJobsLimit`| Maximum number of jobs for your job’s association have been met; job will run eventually.   |
+| `AssociationNodeLimit`   | All nodes assigned to your job’s specified association are in use; job will run eventually. |
+
+A full list of these Job Reason Codes can be found [in Slurm’s documentation.](https://slurm.schedmd.com/squeue.html#lbAF)
diff --git a/Python/QA-003-Conda-Environment.ipynb b/Python/QA-003-Conda-Environment.ipynb
index ecd58457d91efc495fa686b4349d003c753bc96b..0609dd41c7d72ec3a74cd3d5cad8fecd72e18e5c 100644
--- a/Python/QA-003-Conda-Environment.ipynb
+++ b/Python/QA-003-Conda-Environment.ipynb
@@ -259,16 +259,22 @@
     "will edit your `.bashrc` and that is not nice, especially when you might move to another module. \n",
     "**Therefore this is not recommended to do.**\n",
     "\n",
-    "This script is used by `conda init bash` to set the configuration, we simply source (load) it and get the same functionality as we would be using `conda init bash`. **(Do not use `conda init bash`)**\n",
+    "This script is used by `conda init bash` to set the configuration, we simply source (load) it and get the same functionality as we would be using `conda init bash`. \n",
+    "\n",
+    "**(Do not use `conda init bash`)**\n",
+    "\n",
     "\n",
     "```bash\n",
+    "# source the conda functionality\n",
+    "eval \"$(conda shell.bash hook)\"\n",
+    "# or\n",
     "source $(dirname $(which conda))/../etc/profile.d/conda.sh\n",
     "# now activating the environment works\n",
     "conda activate myenv\n",
     "```\n",
     "\n",
     "#### bash conda_activate function\n",
-    "optionaly you can add this bash function to your `.bashrc` for convenience:\n",
+    "optionally you can add this bash function to your `.bashrc` for convenience:\n",
     "\n",
     "```bash\n",
     "conda_activate(){\n",
@@ -285,8 +291,7 @@
     "    fi\n",
     "}\n",
     "```\n",
-    "\n",
-    "After your `.bashrc` has been loaded (e.g. logout/login again) you should be able to use\n",
+    "Source your bashrc by running: `source ~/.bashrc` or logout/login again. Then you should be able to use the function:\n",
     "\n",
     "```bash\n",
     "module load anaconda3\n",
diff --git a/Python/README.md b/Python/README.md
index aa23581e4672deadc4ddb122a078b4b32b2722fe..99122a3c8d6c431cfd59617c5a3445fbf4685725 100644
--- a/Python/README.md
+++ b/Python/README.md
@@ -2,6 +2,31 @@
 
 [[_TOC_]]
 
+## Introduction to conda / micromamba
+
+**What is conda ?**
+
+[link]()
+This is the package manager for the anaconda or miniconda distribution of Python. It allows to install packages and create environments and much much more. However, mostly you will use it to install packages and create environments for your code. It is slow.
+
+**What is micromamba ?**
+
+This is a replacement package manager for Python that is independent from a distribution, 
+[link](https://github.com/mamba-org/mamba)
+
+**What should I use?**
+
+It is recommended to use is micromamba, which is way faster than conda. It has a smaller footprint and it works almost identical as conda. It is a c++ replacement of conda/mamba.
+
+
+### install packages
+
+
+There are sometimes different versions available with different build options as well.
+
+`conda install -c <channel> <package_name>=<version>=<build_string>`
+
+
 ## Q: Installing Cartopy on Jet Cluster or any other module based environment?
 [Cartopy Installation Notebook](QA-001-Cartopy-Installation-Jet.ipynb)
 
diff --git a/README.md b/README.md
index 97e74c8e7c19d1c4628983c78ae4355a53cc178e..f1330cac8949dc7115ca3b7e5311138e5fbb33f6 100644
--- a/README.md
+++ b/README.md
@@ -25,7 +25,7 @@ Locations:
 
 - Staff + Students [SRVX1](SRVX1.md)
 - Staff + Remote Desktop [SRVX8](SRVX8.md)
-- Staff + Remote Desktop + Jupyterhub [Jet Cluster](Jet-Cluser.md)
+- Staff + Remote Desktop + Jupyterhub [Jet Cluster](Jet-Cluster.md)
 - Staff + Students, Jupyterhub called [TeachingHub](TeachingHub.md)
 - Staff [Vienna Scientific Cluster (VSC)](VSC.md)
     - [VSC Training](https://vsc.ac.at/training)
diff --git a/SRVX1.md b/SRVX1.md
index daf0c24db23890daae90afa1319f56633d6c101d..3c23c82ecffeb9246545f0733d45b0b86541557d 100644
--- a/SRVX1.md
+++ b/SRVX1.md
@@ -23,8 +23,8 @@ Steps:
 | Memory | 754 GB Total |
 | Memory/Core | 9.4 GB |
 
-### Greeter   
-```
+
+``` title="Mountain Greeter"
 ----------------------------------------------
  131.130.157.11 _    .  ,   .           .
     *  / \_ *  / \_      _  *        *   /\'_
@@ -51,7 +51,7 @@ Currently running:
 
 
 ## Jupyterhub
-<img src="/mkdocs/img/jupyterhub-logo.svg" width="300px">
+<img src="./mkdocs/img/jupyterhub-logo.svg" width="150px">
 
 SRVX1 serves a teaching [jupyterhub](https://jupyterhub.readthedocs.io/en/stable/) with a [jupyterlab](https://jupyterlab.readthedocs.io/en/stable/). It allows easy access for students and teachers. Access: [https://srvx1.img.univie.ac.at/hub](https://srvx1.img.univie.ac.at/hub)
 
@@ -83,14 +83,14 @@ Major Libraries:
 
 These software libraries are usually handled by environment modules.
 
-![](/mkdocs/img/envmodules.png)
+![](./mkdocs/img/envmodules.png)
 
 ## Currently installed modules
 Please note that new versions might already be installed.
 
-```bash
-$ module av
---------------- /home/swd/spack/share/spack/modules/linux-rhel8-skylake_avx512 ----------------
+```bash title="available modules"
+module av
+--- /home/swd/spack/share/spack/modules/linux-rhel8-skylake_avx512 
 anaconda3/2020.11-gcc-8.5.0                         matlab/R2020b-gcc-8.5.0                     proj/8.1.0-gcc-8.5.0     
 anaconda3/2021.05-gcc-8.5.0                         miniconda2/4.7.12.1-gcc-8.5.0               python/3.8.12-gcc-8.5.0  
 autoconf/2.69-oneapi-2021.2.0                       miniconda3/4.10.3-gcc-8.5.0                 
@@ -118,7 +118,7 @@ intel-oneapi-mpi/2021.2.0-oneapi-2021.2.0           parallel-netcdf/1.12.2-gcc-8
 intel-parallel-studio/composer.2020.4-intel-20.0.4  parallel-netcdf/1.12.2-gcc-8.5.0-MPI3.1.6   
 libemos/4.5.9-gcc-8.5.0-MPI3.1.6                    perl/5.32.0-intel-20.0.4                    
 
------------------------------------------------------- /home/swd/modules ------------------------------------------------------
+--- /home/swd/modules 
 anaconda3/leo-current-gcc-8.3.1  idl/8.2-sp1                micromamba/0.15.2  pypy/7.3.5   xconv/1.94  
 ecaccess/4.0.2                   intelpython/2021.4.0.3353  ncl/6.6.2          shpc/0.0.33       
 ```
diff --git a/SRVX8.md b/SRVX8.md
index 0e4b0ebcbd429dd979e657ab5626ecdc88c2f017..01bebcc1610f573f84f8ab6198864c4b41a28be7 100644
--- a/SRVX8.md
+++ b/SRVX8.md
@@ -1,4 +1,3 @@
-
 # S R V X 8
 
 > Remote Desktop and Virtual Machines
@@ -22,21 +21,20 @@ Steps:
 | Memory | 504 GB Total |
 | Memory/Core |  18 Gb |
 
-### Greeter
-```
+``` title="Cloud Greeter"
 ----------------------------------------------
-                _                             
-              (`  ).                          
-             (     ).                     
-)           _( SRVX8 '`.                
-        .=(`(      .   )     .--       
-       ((    (..__.:'-'   .+(   )            
-`.     `(       ) )       (   .  )     
-  )      ` __.:'   )     (   (   )) 
-)  )  ( )       --'       `- __.'   
-.-'  (_.'          .')              
-                  (_  )                       
-                              131.130.157.8   
+                 _
+              (`  ).
+             (     ).
+)           _( SRVX8 '`.
+        .=(`(      .   )     .--
+       ((    (..__.:'-'   .+(   )
+`.     `(       ) )       (   .  )
+  )      ` __.:'   )     (   (   ))
+)  )  ( )       --'       `- __.'
+.-'  (_.'          .')
+                  (_  )
+                              131.130.157.8
 --..,___.--,--'`,---..-.--+--.,,-,,.-..-._.-.-
 ----------------------------------------------
 ```
@@ -48,7 +46,7 @@ The typcial installation of a intel-server has the INTEL Compiler suite (`intel-
 Major Libraries:
 
  - OpenMPI (3.1.6, 4.0.5)
- - HDF5 
+ - HDF5
  - NetCDF (C, Fortran)
  - ECCODES from [ECMWF](https://confluence.ecmwf.int/display/ECC)
  - Math libraries e.g. intel-mkl, lapack,scalapack
@@ -62,9 +60,10 @@ These software libraries are usually handled by environment modules.
 ## Currently installed modules
 
 on how to use environment modules go to [Using Environment Modules](Misc/Environment-Modules.md)
-```bash
-$ module av
-------------------- /home/swd/spack/share/spack/modules/linux-rhel7-haswell -------------------
+
+```sh title="Environment Modules"
+module av
+--- /home/swd/spack/share/spack/modules/linux-rhel7-haswell 
 anaconda2/2019.10-gcc-8.4.0
 anaconda3/2021.05-gcc-8.4.0
 eccodes/2.21.0-gcc-8.4.0
@@ -95,14 +94,14 @@ openmpi/4.1.1-gcc-8.4.0
 proj/8.1.0-gcc-8.4.0
 python/3.8.9-gcc-4.8.5
 
--------------------------------------- /home/swd/modules --------------------------------------
+---/home/swd/modules
 micromamba/latest
 ```
 
 ## User services
-There is a script collection that is accessible via the `userservices` command. e.g. running 
-```bash
-$ userservices
+There is a script collection that is accessible via the `userservices` command. e.g. running
+```bash title="userservices"
+userservices
 
 Usage: userservices [service] [Options]
 Available Services:
@@ -122,8 +121,8 @@ Available Services:
 
 These scripts are intended to help with certain known problems.
 Report problems to: michael.blaschek@univie.ac.at
-
 ```
+
 These are scripts in a common directory. Feel free to copy or edit as you like. Note that some services like filesender require an **ACONET account** (accessible via your u:account). **Please note the available VNC services** for the remote desktop environment.
 For VNC the user can choose between installed desktops:
 
diff --git a/SSH-VPN-VNC/IPA.md b/SSH-VPN-VNC/IPA.md
new file mode 100644
index 0000000000000000000000000000000000000000..dbc23e79b1a2ba0b188210ab386304c2174165f0
--- /dev/null
+++ b/SSH-VPN-VNC/IPA.md
@@ -0,0 +1,60 @@
+# IPA
+
+*Identity, Policy and Audit* management system for distributed servers and central managed identities. More information [here](https://www.freeipa.org/page/About). This is currently hosted under [wolke](https://wolke.img.univie.ac.at/ipa/ui), which is the web interface and allows easy access for users and managers.
+
+## Access
+
+The IPA has a web interface ([UI](https://wolke.img.univie.ac.at/ipa/ui)) and can only be accessed from UNIVIE Networks. Please use a VPN or connect via [SSH](./SSH.md).
+
+### Password Rules
+A new password must use at least 2 of the following classes:
+
+- Upper-case characters
+- Lower-case characters
+- Digits
+- Special characters (for example, punctuation)
+
+**Minimum** length is **8**, password **history** is **4**. The minimum **lifetime** of a password is **1 hour**.
+
+### SSH Login
+Please use the given credentials (*username, first-time password*) and login to e.g. *srvx1.img.univie.ac.at*:
+
+![](../mkdocs/img/ipa-ssh-login.png)
+
+You will be asked to change your password immediately.
+
+### Web interface
+When inside the UNIVIE network you can access the web interface under [https://wolke.img.univie.ac.at/ipa/ui](https://wolke.img.univie.ac.at/ipa/ui) and login with your credentials. If your password has been reset or the first time you will be asked to change your onetime password immediately.
+
+#### Login
+Please use the given credentials (*username, first-time password*) and login to the web interface:
+
+![](../mkdocs/img/ipa-login.png)
+
+You will be asked to change your first-time password immediately. 
+
+#### Add SSH key
+Please login to the UI web interface and go to your user page:
+
+![](../mkdocs/img/ipa-add-ssh.png)
+
+Choose *Add* in the SSH public keys section and add your **public** SSH Key here. If you do not know how to create one look into the [SSH](./SSH.md) Section or simply use this command: `ssh-keygen`. It is strongly advised to use a passphrase to secure your key. More information in the SSH Section.
+
+![](../mkdocs/img/ipa-add-ssh-pub.png)
+![](../mkdocs/img/ipa-add-ssh-save.png)
+Do not forget to save your changes and finally you should see the fingerprint of your key added.
+![](../mkdocs/img/ipa-add-ssh-final.png)
+Done.
+The IPA needs to sync this to all connected servers and this might take up to 5 minutes. Please be patient.
+
+## OTP
+
+This section will be added soon.
+
+:construction:
+
+
+## Kerberos
+
+:construction:
+
diff --git a/SSH-VPN-VNC/Questions.md b/SSH-VPN-VNC/Questions.md
index e41cce55b5e52729966a11076df880e55838e6dd..8999957cddf9972c3f28997dd9c6e6a6fd098505 100644
--- a/SSH-VPN-VNC/Questions.md
+++ b/SSH-VPN-VNC/Questions.md
@@ -53,6 +53,10 @@ ssh-agent -k
 
 [Nice summary of how an ssh-agent works](https://smallstep.com/blog/ssh-agent-explained/)
 
+<img src="https://keepassxc.org/images/keepassxc-logo.svg" style="height:200px">
+
+Keep in mind that you can use the ssh-agent with [KeepassXC](https://keepassxc.org/docs/#faq-ssh-agent-how), find a nice tutorial [here](https://ferrario.me/using-keepassxc-to-manage-ssh-keys/). This is really convenient as it allows you to use all keys in the Keepass as long as it is unlocked. The keys will be automatically removed when the keepass is locked. :)
+
 ## Q: How to transfer files between two VPN networks?
 
 You should be able to use an SSH tunnel via a gateway server
@@ -124,3 +128,23 @@ This will mount the remote directory to the local directory. The local directory
     ```
 
 
+## Q: How to use an SSH tunnel for private browsing?
+based on a tutorial from [Linuxize](https://linuxize.com/post/how-to-setup-ssh-socks-tunnel-for-private-browsing/).
+
+It can be really useful to access resources from inside the IMGW / UNIVIE network without using the VPN from the ZID. This can be done super easily. You need an SSH client (e.g. ssh, Putty) and [Firefox](https://www.mozilla.org/en-US/firefox/new/).
+
+I'm showing the things here only for Linux, but Windows with Putty should be straight forward too. Connect to SRVX1 for example:
+
+```bash
+ssh -N -D 8091 [USER]@srvx1.img.univie.ac.at
+```
+Options:
+- `-N` - Tells SSH not to execute a remote command.
+- `-D 8091` - Opens a SOCKS tunnel on the specified port number.
+- To run the command in the background use the `-f` option.
+
+Authenticate at the server and check that the connection is working. Next open Firefox and go to settings - network and select manual proxy configuration. 
+
+![](../mkdocs/img/ssh-tunnel-firefox.jpg)
+
+**Voila.** You can access websites from within the UNIVIE / IMGW network.
\ No newline at end of file
diff --git a/SSH-VPN-VNC/README.md b/SSH-VPN-VNC/README.md
index 249eefa38df0eba0d339fc2c6bbb8be812c27449..251cac02a2223ef1cc4353e123d1e622d2f77b0e 100644
--- a/SSH-VPN-VNC/README.md
+++ b/SSH-VPN-VNC/README.md
@@ -27,8 +27,9 @@ Connect to either :
 [Screen](https://wiki.ubuntuusers.de/Screen/) is terminal session manager, that allows to start processes and reconnect to these processes after disconnection.
 This starts a new session
 
-```bash
-$ screen -S longjob
+```bash title="Screen"
+# open a interactive screen session
+screen -S longjob
 ```
 
 You can detach from this session with `CTRL + A D` and reconnect again with `screen -r`.
@@ -39,8 +40,8 @@ Multiple Sessions can be created and the output saved (`-L` Option).
 
 ## Tmux
 [Tmux](https://wiki.ubuntuusers.de/tmux/) is a terminal multiplexer, that allows to open more consoles and allows to detach the session. It is much more complex and powerful compared to screen.
-```bash
-$ tmux
+``` bash title="Terminal Multiplexer"
+tmux
 ```
 Launches a new virtual terminal, with `CTRL + B D` it can bed detached and with `tmux a` it can be reconnected. 
 ![](https://linuxacademy.com/site-content/uploads/2016/08/tmux.png)
diff --git a/SSH-VPN-VNC/SSH.md b/SSH-VPN-VNC/SSH.md
index ce5a515f4d838f6ce84f75961b57b8d25cef3755..21f877a742f2643dafc459ee3719f19c798a3a86 100644
--- a/SSH-VPN-VNC/SSH.md
+++ b/SSH-VPN-VNC/SSH.md
@@ -1,7 +1,8 @@
 # SSH
 
 **From any computer in the IMGW subnet**: Log in via ssh by typing either of the following in a terminal (there are two redundant login nodes, jet01 and jet02). Replace `[USERNAME]` with your own.
-```bash
+
+```bash title="SSH commands"
 ssh -X [USERNAME]@srvx1.img.univie.ac.at
 ssh -X [USERNAME]@srvx8.img.univie.ac.at
 ssh -X [USERNAME]@jet01.img.univie.ac.at
@@ -9,10 +10,12 @@ ssh -X [USERNAME]@131.130.157.215
 ssh -X [USERNAME]@jet02.img.univie.ac.at
 ssh -X [USERNAME]@131.130.157.216
 ```
+
 The `-X` option enables X11 forwarding via ssh, i.e., permits opening graphical windows.
 
 Consider using a `~/.ssh/config` configuration file to allow easier access like this:
-```
+
+``` title="./ssh/config"
 Host *
     ServerAliveInterval 60
     ServerAliveCountMax 2
@@ -38,6 +41,22 @@ Host ecaccess
     HostKeyAlgorithms ssh-dss
     KexAlgorithms diffie-hellman-group1-sha1
     Ciphers aes256-cbc
+Host jump.ecmwf.int shell.ecmwf.int
+    HostKeyAlgorithms +ssh-rsa*,rsa-sha2-512
+    PubkeyAcceptedKeyTypes +ssh-rsa*
+    User [ECMWF USERNAME]  
+# For ecgate and Cray HPCF
+Host ecg* cc*
+    HostKeyAlgorithms +ssh-rsa*,rsa-sha2-512
+    PubkeyAcceptedKeyTypes +ssh-rsa*
+    User [ECMWF USERNAME]
+    ProxyJump shell.ecmwf.int
+# For Atos HPCF
+Host a?-* a??-* hpc-* hpc2020-* ecs-*
+    HostKeyAlgorithms +ssh-rsa*,rsa-sha2-512
+    PubkeyAcceptedKeyTypes +ssh-rsa*
+    User [ECMWF USERNAME]
+    ProxyJump jump.ecmwf.int
 ```
 and replacing `[USERNAME]` and `[U:Account USERNAME]` with your usernames. Using such a file allows to connect like this `ssh srvx1` using the correct server adress and specified username. Copy this file as well on `login.univie.ac.at` and you can use commands like this: `ssh -t login ssh jet` to connect directly to `jet` via the `login` gateway.
 
@@ -66,7 +85,7 @@ If you are a guest, you can apply for a [guest u:account](https://zid.univie.ac.
 Find a solution [Questions - How to use ssh-key authentication?](Questions.md#q-how-to-use-ssh-key-authentication) or [Questions - How to use an ssh-agent?](Questions.md#q-how-to-use-an-ssh-agent)
 
 ## Connect Script
-If you are using a terminal (Mac, Linux, WSL, ...) you can use the script [Download: connect2jet](connect2jet) like this:
+If you are using a terminal (Mac, Linux, WSL, ...) you can use the script [Download: connect2jet](./connect2jet) like this:
 ```bash
 connect2jet -g [U:Account-Username]@login.univie.ac.at [Jet-Username]@jet01.img.univie.ac.at
 ```
diff --git a/VSC.md b/VSC.md
index 7ec6e33d7850164f3fc4cb36b40ccbd4cbb2d87f..8842acbc1111ebd11e79466b87391d7530e69270 100644
--- a/VSC.md
+++ b/VSC.md
@@ -1,28 +1,71 @@
 # Vienna Scientific Cluster
 
 > High Performance Computing available to Staff
+> Austrian HPC effort
+> part of EuroCC 
 >
 
 Links:
 
 - [VSC](https://vsc.ac.at/home/)
 - [VSC-Wiki](https://wiki.vsc.ac.at)
+- [EuroCC - Austria](https://eurocc-austria.at)
 
 
-We have the privilege to be part of the VSC and have private nodes at VSC-4 (since 2020) and VSC-3 (since 2014)
+We have the privilege to be part of the VSC and have private nodes at VSC-5 (since 2022), VSC-4 (since 2020) and VSC-3 (since 2014), which is retired by 2022.
 
 Access is primarily via SSH:
 
-```bash
+``` bash
+$ ssh user@vsc5.vsc.ac.at
 $ ssh user@vsc4.vsc.ac.at
-$ ssh user@vsc3.vsc.ac.at
+$ ssh user@vsc3.vsc.ac.at (old does not work anymore)
 ```
 
 Please follow some connection instruction on the [wiki](https://wiki.vsc.ac.at) which is similar to all other servers (e.g. [SRVX1](SRVX1.md)).
-The VSC is only available from within the UNINET (VPN, ...).
+The VSC is only available from within the UNINET (VPN, ...). Authentication requires a mobile phone.
 
 We have private nodes at our disposal and in order for you to use these you need to specify the correct account in the jobs you submit to the queueing system (SLURM). The correct information will be given to you in the registration email. 
 
+## IMGW customizations in the shell
+
+If you want you can use some shared shell scripts that provide information for users about the VSC system.
+
+```bash
+# run the install script, that just appends to your PATH variable.
+/gpfs/data/fs71386/imgw/install_imgw.sh
+```
+
+Please find the following commands available:
+- `imgw-quota` shows the current quota on VSC for both HOME and DATA
+- `imgw-container` singularity/apptainer container run script, see [below](#containers)
+- `imgw-transfersh` Transfer-sh service on [srvx1](https://srvx1.img.univie.ac.at/filetransfer), easily share small files.
+- `imgw-cpuinfo` Show CPU information
+
+Please find a shared folder in `/gpfs/data/fs71386/imgw/shared` and add data there that needs to be used by multiple people. Please make sure that things are removed again as soon as possible. Thanks.
+
+## Node Informaiton VSC-5
+```
+CPU model:  AMD EPYC 7713 64-Core Processor
+1 CPU,  64 physical cores per CPU, total 128 logical CPU units
+
+512 GB Memory
+```
+
+We have access to 11 private Nodes of that kind. We also have access to 1 GPU node with Nvidia A100 accelerators. Find the partition information with:
+
+```bash
+$ sqos
+            qos_name total  used  free     walltime   priority partitions  
+=========================================================================
+         p71386_0512    11     0    11  10-00:00:00     100000 zen3_0512   
+     p71386_a100dual     1     0     0  10-00:00:00     100000 gpu_a100_dual
+```
+
+## Storage on VSC-5
+the HOME and DATA partition are the same as on [VSC-4](#storage-on-vsc-4).
+
+
 ## Node Information VSC-4
 ```
 CPU model: Intel(R) Xeon(R) Platinum 8174 CPU @ 3.10GHz
@@ -43,18 +86,19 @@ $ sqos
 
 ## Storage on VSC-4
 
-All quotas are shared between users:
+All quotas are **shared between all** IMGW/Project users:
 
-- `$HOME` (up to 100 GB, all home directories)
+- `$HOME` (up to 100 GB, **all home directories**)
 - `$DATA`  (up to 10 TB, backed up)
-- `$BINFL` (up to 1TB, fast scratch)
-- `$BINFS` (up to 2GB, SSD fast)
+- `$BINFL` (up to 1TB, fast scratch), **will be retired**
+- `$BINFS` (up to 2GB, SSD fast), **will be retired**
 - `$TMPDIR` (50% of main memory, deletes after job finishes)
 - `/local` (Compute Nodes, 480 GB SSD, deletes after Job finishes)
 
 ![VSC Storage Performance](https://service.vsc.ac.at/slides/introduction-to-vsc/08_storage_infrastructure/pictures/vsc_write_performance.png)
 
-Check quotas:
+Check quotas running the following commands yourself, including your PROJECTID or use the `imgw-quota` command as from the [imgw shell extensions](#imgw-customizations-in-the-shell)
+
 ```bash
 $ mmlsquota --block-size auto -j data_fs71386 data
                          Block Limits                                    |     File Limits
@@ -103,13 +147,12 @@ $ beegfs-ctl --getquota --cfgFile=/etc/beegfs/global3.d/beegfs-client.conf --gid
         p70653| 70653||    5.62 TiB|   12.00 TiB||   175886|  1000000
 ```
 
-**However, it seems that most filesystems are quite full.**
 
 ## Run time limits
 
 On VSC-3 we have a max runtime of 10 days for the private Queue. The normal queues have 3 days. the devel only 10 min (for testing)
 
-```bash
+<pre>
 $ sacctmgr show qos format=name%20s,priority,grpnodes,maxwall,description%40s
 
                 Name   Priority GrpNodes     MaxWall                                    Descr 
@@ -119,13 +162,12 @@ $ sacctmgr show qos format=name%20s,priority,grpnodes,maxwall,description%40s
          normal_0256       2000        6  3-00:00:00                                 all user 
          normal_0128       2000       11  3-00:00:00                                 all user 
           devel_0128    5000000       10    00:10:00         for developing and testing codes 
+</pre>
 
-```
+on VSC-4 accordingly:
 
-on VSC-4 accordingly.
-```bash
+<pre>
 $ sacctmgr show qos  format=name%20s,priority,grpnodes,maxwall,description%40s
-
                 Name   Priority GrpNodes     MaxWall                                    Descr 
 -------------------- ---------- -------- ----------- ---------------------------------------- 
          p71386_0384     100000          10-00:00:00                 private nodes haimberger 
@@ -137,9 +179,10 @@ $ sacctmgr show qos  format=name%20s,priority,grpnodes,maxwall,description%40s
             mem_0096       1000           3-00:00:00 vsc-4 regular nodes with 96 gb of memory 
             mem_0384       1000           3-00:00:00 vsc-4 regular nodes with 384 gb of memo+ 
             mem_0768       1000           3-00:00:00 vsc-4 regular nodes with 768 gb of memo+ 
-```
+</pre>
 
 SLURM allows for setting a run time limit below the default QOS's run time limit. After the specified time is elapsed, the job is killed:
+
 ```bash
 #SBATCH --time=<time> 
 ```
@@ -161,14 +204,14 @@ We have 16 CPUs per Node. In order to fill:
 The core hours will be charged to the specified account. If not specified, the default account will be used. 
 
 on VSC-3 our account is called:
-```bash
+
+<pre>
 $ sacctmgr show user `id -u` withassoc format=user,defaultaccount,account,qos%40s,defaultqos%20s
 
      User   Def Acct    Account                                      QOS              Def QOS 
 ---------- ---------- ---------- ---------------------------------------- -------------------- 
      71633     p70653     p70653                   devel_0128,p70653_0128          p70653_0128 
-
-```
+</pre>
 
 Put this in the Job file:
 
@@ -218,14 +261,14 @@ We have 48 CPUs per Node. In order to fill:
 The core hours will be charged to the specified account. If not specified, the default account will be used. 
 
 on VSC-4 our account is called:
-```bash
+<pre>
 $ sacctmgr show user `id -u` withassoc format=user,defaultaccount,account,qos%40s,defaultqos%20s
 
       User   Def Acct    Account                                      QOS              Def QOS 
 ---------- ---------- ---------- ---------------------------------------- -------------------- 
      74101     p71386     p71386                              p71386_0384          p71386_0384 
+</pre>
 
-```
 Put this in the Job file:
 
 ```bash
diff --git a/mkdocs.yml b/mkdocs.yml
index b4a22c5ea2b040d4d4226b7233843f99914bf391..898a0efa1501f577de89e2783b83e5e56b2bb866 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -1,28 +1,61 @@
 site_name: Computer Resources @ IMGW
 # default is ./site/
 # could change this to /var/www/html/docs
+site_url: "https://wolke.img.univie.ac.at/documentation/general/"
 site_dir: "/tmp/cr-site/"
-docs_dir: '.'
+# site_dir: "/var/www/html/documentation/general/"
+docs_dir: "."
 repo_url: https://gitlab.phaidra.org/imgw/computer-resources
 repo_name: IMGW/Computer-Resources
+# this makes relative links  valid
+use_directory_urls: false
+# this adds the feature to directly edit the file on gitlab
+edit_uri: edit/master/
+copyright: Copyright &copy; 2022 - 2022 Michael Blaschek
+
 theme:
   name: material
   palette:
     scheme: uniwien
+  features:
+    - navigation.indexes 
+    - navigation.top
+  logo: mkdocs/img/mkdocs.png
+  favicon: mkdocs/img/favicon.ico
+  custom_dir: mkdocs/overrides
 plugins:
   - tags
   - same-dir
   - search
+  - mkdocs-jupyter:
+      include_source: True
+  - git-revision-date-localized:
+      enable_creation_date: true
+  - awesome-pages
+  #    collapse_single_pages: true
+
+extra:
+  social:
+    - icon: fontawesome/solid/compass
+      link: https://univie.ac.at
+      name: Universität Wien
+    - icon: fontawesome/solid/cloud
+      link: https://img.univie.ac.at
+      name: Institut für Meteorologie und Geophysik
+
 
 extra_css:
   - mkdocs/stylesheets/extra.css
+
 markdown_extensions:
   - pymdownx.highlight:
+      use_pygments: true
       linenums: true
+      pygments_style: xcode
   - pymdownx.inlinehilite
   - pymdownx.snippets:
       check_paths: true
   - pymdownx.superfences
   - pymdownx.tasklist
   - pymdownx.details
-  - pymdownx.emoji
\ No newline at end of file
+  - pymdownx.emoji
diff --git a/mkdocs/img/favicon.ico b/mkdocs/img/favicon.ico
new file mode 100644
index 0000000000000000000000000000000000000000..208335925e0f7be5a6ce47845c22b5f4978f298c
Binary files /dev/null and b/mkdocs/img/favicon.ico differ
diff --git a/mkdocs/img/ipa-add-otp.png b/mkdocs/img/ipa-add-otp.png
new file mode 100644
index 0000000000000000000000000000000000000000..b9c574d5234892b09160246da4c71c0d409e2cb5
Binary files /dev/null and b/mkdocs/img/ipa-add-otp.png differ
diff --git a/mkdocs/img/ipa-add-ssh-final.png b/mkdocs/img/ipa-add-ssh-final.png
new file mode 100644
index 0000000000000000000000000000000000000000..3102eb9b62b1584d6fbcda34d4f59a64a1b37894
Binary files /dev/null and b/mkdocs/img/ipa-add-ssh-final.png differ
diff --git a/mkdocs/img/ipa-add-ssh-pub.png b/mkdocs/img/ipa-add-ssh-pub.png
new file mode 100644
index 0000000000000000000000000000000000000000..012bee270b291a3d93fdd393868ec06aa858dcfb
Binary files /dev/null and b/mkdocs/img/ipa-add-ssh-pub.png differ
diff --git a/mkdocs/img/ipa-add-ssh-save.png b/mkdocs/img/ipa-add-ssh-save.png
new file mode 100644
index 0000000000000000000000000000000000000000..22823065b9ddd10741f67decf33f92385a2945a6
Binary files /dev/null and b/mkdocs/img/ipa-add-ssh-save.png differ
diff --git a/mkdocs/img/ipa-add-ssh.png b/mkdocs/img/ipa-add-ssh.png
new file mode 100644
index 0000000000000000000000000000000000000000..9ffc362e0967ff305474b88b3ff5e2af72418157
Binary files /dev/null and b/mkdocs/img/ipa-add-ssh.png differ
diff --git a/mkdocs/img/ipa-login.png b/mkdocs/img/ipa-login.png
new file mode 100644
index 0000000000000000000000000000000000000000..6d59662039cbbd7aac9b9983e10afdedaeb12a1d
Binary files /dev/null and b/mkdocs/img/ipa-login.png differ
diff --git a/mkdocs/img/ipa-otp-final.png b/mkdocs/img/ipa-otp-final.png
new file mode 100644
index 0000000000000000000000000000000000000000..90bc9dd8ddfef422f261f82f49ab00060bd8740a
Binary files /dev/null and b/mkdocs/img/ipa-otp-final.png differ
diff --git a/mkdocs/img/ipa-otp-qr.png b/mkdocs/img/ipa-otp-qr.png
new file mode 100644
index 0000000000000000000000000000000000000000..706989aa41a243bdbf31f37fa0707d92f2790db5
Binary files /dev/null and b/mkdocs/img/ipa-otp-qr.png differ
diff --git a/mkdocs/img/ipa-otp-totp.png b/mkdocs/img/ipa-otp-totp.png
new file mode 100644
index 0000000000000000000000000000000000000000..dec7b11f50f5960e3ead18744148774472429b2e
Binary files /dev/null and b/mkdocs/img/ipa-otp-totp.png differ
diff --git a/mkdocs/img/ipa-ssh-login.png b/mkdocs/img/ipa-ssh-login.png
new file mode 100644
index 0000000000000000000000000000000000000000..16be6cb67d86cc954af10c3f62a7db3a3dddad99
Binary files /dev/null and b/mkdocs/img/ipa-ssh-login.png differ
diff --git a/mkdocs/img/logo_ecmwf.png b/mkdocs/img/logo_ecmwf.png
new file mode 100644
index 0000000000000000000000000000000000000000..684700e6b29c96d844dc2a474b671d26ce74d1c4
Binary files /dev/null and b/mkdocs/img/logo_ecmwf.png differ
diff --git a/mkdocs/img/mkdocs.png b/mkdocs/img/mkdocs.png
new file mode 100644
index 0000000000000000000000000000000000000000..75c791704564a6bff3a8394859c494497410415f
Binary files /dev/null and b/mkdocs/img/mkdocs.png differ
diff --git a/mkdocs/img/ssh-tunnel-firefox.jpg b/mkdocs/img/ssh-tunnel-firefox.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3d4e37d244e28864d0493703086432c7bb3db325
Binary files /dev/null and b/mkdocs/img/ssh-tunnel-firefox.jpg differ
diff --git a/mkdocs/overrides/main.html b/mkdocs/overrides/main.html
new file mode 100644
index 0000000000000000000000000000000000000000..f223d92c9d342dcbf2193bb443235c0e79fc614e
--- /dev/null
+++ b/mkdocs/overrides/main.html
@@ -0,0 +1,11 @@
+{% extends "base.html" %}
+
+{% block content %}
+{% if page.nb_url %}
+    <a href="{{ page.nb_url }}" title="Download Notebook" class="md-content__button md-icon">
+        {% include ".icons/material/download.svg" %}
+    </a>
+{% endif %}
+
+{{ super() }}
+{% endblock content %}
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d99982d3dd0d8e5aa854a77d3de86f85db4bc89f
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,10 @@
+# Documentation static site generator & deployment tool
+mkdocs>=1.4.2
+# Theme
+mkdocs-material>=8.5.2
+mkdocs-material-extensions>=1.1
+# Plugins
+mkdocs-awesome-pages-plugin
+mkdocs-git-revision-date-localized-plugin
+mkdocs-jupyter
+mkdocs-same-dir