diff --git a/config/cluster.py b/config/cluster.py
deleted file mode 100755
index 2a986d3cdba990d9accc21520e1448baaf31a24c..0000000000000000000000000000000000000000
--- a/config/cluster.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import os, sys
-import datetime as dt
-from dartwrf import utils
-from config.cfg import exp
-
-"""Configuration name docs
-
-When coding, use attributes of a dictionary like this: 
-$ from cfg import exp, cluster
-$ path = cluster.archivedir
-
-
-attribute name    |     description
-------------------------------------------------------
-name                    any string (currently unused)
-
-python                  path of python version to use
-python_enstools         path of python version to use for verification script (not provided)
-ncks                    path to 'ncks' program; type 'which ncks' to find the path,
-                            if it doesn't exist, try to load the module first ('module load nco')
-ideal                   path to WRF's ideal.exe
-wrfexe                  path to WRF's wrf.exe
-
-wrf_rundir_base         path for temporary files for WRF
-dart_rundir_base        path for temporary files for DART
-archive_base            path for long-time output storage
-
-srcdir                  path to where WRF has been compiled, including the 'run' folder of WRF, e.g. /home/WRF-4.3/run
-dart_srcdir             path to DART compile directory, e.g. /home/DART-9.11.9/models/wrf/work
-rttov_srcdir            path to RTTOV compile directory, e.g. /home/RTTOV13/rtcoef_rttov13/
-scriptsdir              path where DART-WRF scripts reside, e.g. /home/DART-WRF/scripts
-
-namelist                path to a namelist template; strings like <hist_interval>, will be overwritten in scripts/prepare_namelist.py
-run_WRF                 path to script which runs WRF on a node of the cluster
-obs_impact_filename     path to obs_impact_filename (see DART guide; module assim_tools_mod and program obs_impact_tool)
-geo_em                  path to NetCDF file of WRF domain (see WRF guide)
-
-slurm_cfg               python dictionary, containing options of SLURM
-                            defined in SLURM docs (https://slurm.schedmd.com/sbatch.html)
-                            this configuration can be overwritten later on, for example:
-                            'dict(cluster.slurm_cfg, **cfg_update)' where
-                            'cfg_update = {"nodes": "2"}'
-"""
-
-cluster = utils.ClusterConfig(exp)
-cluster.name = 'srvx1'
-cluster.max_nproc = 6
-cluster.use_slurm = False
-
-# binaries
-cluster.python = 'python'
-cluster.python_verif = '/users/staff/lkugler/miniconda3/bin/python'
-cluster.ncks = '/home/swd/spack/opt/spack/linux-rhel8-skylake_avx512/gcc-8.5.0/nco-5.0.1-ntu44aoxlvwtr2tsrobfr4lht7cpvccf/bin/ncks'
-cluster.ideal = '' #/jetfs/home/lkugler/bin/ideal-v4.3_v1.22.exe'
-cluster.wrfexe = '' #/jetfs/home/lkugler/bin/wrf-v4.3_v1.22.exe'
-cluster.container = ''
-
-# paths for data output
-cluster.wrf_rundir_base = utils.userhome+'/AdvDA23/run_WRF/'  # path for temporary files
-cluster.dart_rundir_base = utils.userhome+'/AdvDA23/run_DART/'  # path for temporary files
-cluster.archive_base = utils.userhome+'/data/sim_archive/'
-
-# paths used as input
-cluster.srcdir = '/users/staff/lkugler/AdvDA23/DART/WRF-4.3/run'
-cluster.dart_srcdir = '/users/students/lehre/advDA_s2023/DART/models/wrf/work'
-cluster.rttov_srcdir = '/users/students/lehre/advDA_s2023/RTTOV13/rtcoef_rttov13/'
-cluster.scriptsdir = utils.userhome+'/DART-WRF/dartwrf/'
-cluster.geo_em = '/users/students/lehre/advDA_s2023/data/geo_em.d01.nc'
-
-# templates/run scripts
-cluster.namelist = cluster.scriptsdir+'/../templates/namelist.input'
-cluster.run_WRF = cluster.scriptsdir+'/run_ens.jet.sh'
-
-cluster.slurm_cfg = {"account": "lkugler", "partition": "compute",
-                 "ntasks": "1", "ntasks-per-core": "1", "mem": "50G",
-                 "mail-type": "FAIL", "mail-user": "lukas.kugler@univie.ac.at"}
diff --git a/cycled_exp.py b/cycled_exp.py
index de7cc31336f8bb41a0e39de80f9f75d926c76172..243ff265a1247f20b1c3e442f1efc1492289cc25 100755
--- a/cycled_exp.py
+++ b/cycled_exp.py
@@ -34,7 +34,7 @@ if __name__ == "__main__":
         init_time = dt.datetime(2008, 7, 30, 12)
         time = dt.datetime(2008, 7, 30, 13)
         last_assim_time = dt.datetime(2008, 7, 30, 14)
-        forecast_until = dt.datetime(2008, 7, 30, 14, 18)
+        forecast_until = dt.datetime(2008, 7, 30, 18)
 
         w.prepare_WRFrundir(init_time)
         # id = w.run_ideal(depends_on=id)
@@ -86,4 +86,3 @@ if __name__ == "__main__":
 
     w.verify_sat(id_sat)
     w.verify_wrf(id)
-    w.verify_fast(id)
diff --git a/dartwrf/workflows.py b/dartwrf/workflows.py
index 3c06dd2135c82d71d5538c1f55ea5ccec9f742f2..96883bb7b1de1b531aaf5e2ba41eae2a92ccdae8 100644
--- a/dartwrf/workflows.py
+++ b/dartwrf/workflows.py
@@ -151,7 +151,7 @@ class WorkFlows(object):
         mv $rundir/rsl.out.0000 $rundir/rsl.out.input
     done
     """
-        id = self.cluster.run_job(cmd, "ideal"+exp.expname, cfg_update={"ntasks": str(exp.n_ens),
+        id = self.cluster.run_job(cmd, "ideal-"+exp.expname, cfg_update={"ntasks": str(exp.n_ens),
                             "time": "10", "mem": "100G"}, depends_on=[depends_on])
         return id
 
@@ -165,7 +165,7 @@ class WorkFlows(object):
             pstr = ' perturb'
         cmd = self.cluster.python+' '+self.cluster.scripts_rundir+'/create_wbubble_wrfinput.py'+pstr
 
-        id = self.cluster.run_job(cmd, "ins_wbubble"+exp.expname, cfg_update={"time": "5"}, depends_on=[depends_on])
+        id = self.cluster.run_job(cmd, "ins_wbub-"+exp.expname, cfg_update={"time": "5"}, depends_on=[depends_on])
         return id
 
     def run_ENS(self, begin, end, depends_on=None, first_minute=True, 
@@ -232,7 +232,7 @@ class WorkFlows(object):
         time_in_simulation_hours = (end-begin).total_seconds()/3600
         runtime_wallclock_mins_expected = int(8+time_in_simulation_hours*9.5)  # usually below 9 min/hour
 
-        id = self.cluster.run_job(cmd, "WRF"+exp.expname, cfg_update={"array": "1-"+str(self.cluster.size_jobarray), "ntasks": "10", "nodes": "1",
+        id = self.cluster.run_job(cmd, "WRF-"+exp.expname, cfg_update={"array": "1-"+str(self.cluster.size_jobarray), "ntasks": "10", "nodes": "1",
                             "time": str(runtime_wallclock_mins_expected), "mem": "40G"}, depends_on=[id])
         return id
 
@@ -258,7 +258,7 @@ class WorkFlows(object):
                 +prior_valid_time.strftime('%Y-%m-%d_%H:%M ')
                 +prior_path_exp)
 
-        id = self.cluster.run_job(cmd, "Assim"+exp.expname, cfg_update={"ntasks": "12", "time": "60",
+        id = self.cluster.run_job(cmd, "Assim-"+exp.expname, cfg_update={"ntasks": "12", "time": "60",
                                 "mem": "200G", "ntasks-per-node": "12", "ntasks-per-core": "2"}, depends_on=[depends_on])
         return id
 
@@ -275,19 +275,19 @@ class WorkFlows(object):
                     +prior_init_time.strftime(' %Y-%m-%d_%H:%M')
                     +prior_valid_time.strftime(' %Y-%m-%d_%H:%M')
                     +tnew)
-        id = self.cluster.run_job(cmd, "IC-prior"+exp.expname, cfg_update=dict(time="8"), depends_on=[depends_on])
+        id = self.cluster.run_job(cmd, "IC-prior-"+exp.expname, cfg_update=dict(time="8"), depends_on=[depends_on])
         return id
 
 
     def update_IC_from_DA(self, assim_time, depends_on=None):
         cmd = self.cluster.python+' '+self.cluster.scripts_rundir+'/update_IC.py '+assim_time.strftime('%Y-%m-%d_%H:%M')
-        id = self.cluster.run_job(cmd, "IC-update"+exp.expname, cfg_update=dict(time="8"), depends_on=[depends_on])
+        id = self.cluster.run_job(cmd, "IC-update-"+exp.expname, cfg_update=dict(time="8"), depends_on=[depends_on])
         return id
 
 
     def create_satimages(self, init_time, depends_on=None):
         cmd = 'module purge; module load netcdf-fortran/4.5.3-gcc-8.5.0-qsqbozc; python ~/RTTOV-WRF/run_init.py '+self.cluster.archivedir+init_time.strftime('/%Y-%m-%d_%H:%M/')
-        id = self.cluster.run_job(cmd, "RTTOV"+exp.expname, cfg_update={"ntasks": "12", "time": "120", "mem": "200G"}, depends_on=[depends_on])
+        id = self.cluster.run_job(cmd, "RTTOV-"+exp.expname, cfg_update={"ntasks": "12", "time": "120", "mem": "200G"}, depends_on=[depends_on])
         return id