diff --git a/config/cfg.py b/config/cfg.py
index b34d51cd7c6e1be849d1fd1cb785d2bc7806b575..dc9c7ca338f05775b1d38c4c58183b9efb713620 100755
--- a/config/cfg.py
+++ b/config/cfg.py
@@ -3,10 +3,9 @@ from config import clusters  # from . = problem in archivedir
 cluster = clusters.jet  # change cluster configuration here
 
 exp = utils.ExperimentConfiguration()
-exp.expname = "test_jet" #"exp_v1.22_P3_wbub7_WV62_obs10_loc20_oe1"
+exp.expname = "test_srvx1" #"exp_v1.22_P3_wbub7_WV62_obs10_loc20_oe1"
 exp.model_dx = 2000
 exp.n_ens = 40
-exp.n_nodes = 40
 
 exp.filter_kind = 1
 exp.inflation = True
diff --git a/config/clusters.py b/config/clusters.py
index 27751defe3d4376b37fb9056cd2676adcb1338d3..c91118ec1490be6f540abf28ff1b75fd27c76de3 100755
--- a/config/clusters.py
+++ b/config/clusters.py
@@ -45,6 +45,7 @@ slurm_cfg               python dictionary, containing options of SLURM
 vsc = utils.ClusterConfig()
 vsc.name = 'vsc' 
 vsc.max_nproc = 20
+vsc.size_jobarray = 10  # 10 jobs with each 4 WRF processes per node
 vsc.use_slurm = True
 
 # binaries
@@ -78,6 +79,7 @@ jet = utils.ClusterConfig()
 jet.name = 'jet'
 jet.max_nproc = 12
 jet.use_slurm = True
+jet.size_jobarray = 40
 
 # binaries
 jet.python = '/jetfs/home/lkugler/miniconda3/envs/DART/bin/python'
@@ -111,6 +113,7 @@ jet.slurm_cfg = {"account": "lkugler", "partition": "compute",
 srvx1 = utils.ClusterConfig()
 srvx1.name = 'srvx1'
 srvx1.max_nproc = 6
+srvx1.size_jobarray = 40
 srvx1.use_slurm = False
 
 # binaries
diff --git a/cycled_exp.py b/cycled_exp.py
index 0fc2a61db8c141a881a77c1294d8cf036368f012..4e30e9930dcba29d7d50f59dbb9857a7aba17ef1 100755
--- a/cycled_exp.py
+++ b/cycled_exp.py
@@ -103,7 +103,7 @@ def run_ENS(begin, end, depends_on=None, first_minute=True,
 
     time_in_simulation_hours = (end-begin).total_seconds()/3600
     runtime_wallclock_mins_expected = int(8+time_in_simulation_hours*9.5)  # usually below 9 min/hour
-    s = create_job("WRF", cfg_update={"array": "1-"+str(exp.n_nodes), "ntasks": "10", "nodes": "1",
+    s = create_job("WRF", cfg_update={"array": "1-"+str(cluster.size_jobarray), "ntasks": "10", "nodes": "1",
                 "time": str(runtime_wallclock_mins_expected), "mem": "140G"})
     cmd = script_to_str(cluster.run_WRF).replace('<exp.expname>', exp.expname
                                        ).replace('<cluster.wrf_rundir_base>', cluster.wrf_rundir_base)
@@ -186,7 +186,7 @@ def verify_sat(depends_on=None):
 def verify_wrf(depends_on=None):
     s = create_job("verif-WRF-"+exp.expname, cfg_update={"time": "120", "mail-type": "FAIL,END", "ntasks": "20", 
                  "ntasks-per-node": "20", "ntasks-per-core": "1", "mem": "250G"})
-    cmd = cluster.python_verif+' /jetfs/home/lkugler/osse_analysis/plot_from_raw/analyze_fc.py '+exp.expname+' has_node wrf verif1d FSS BS'
+    cmd = cluster.python_verif+' /jetfs/home/lkugler/osse_analysis/plot_from_raw/analyze_fc.py '+exp.expname+' has_node wrf verif1d verif3d FSS BS'
     s.run(cmd, depends_on=[depends_on])
 
 def verify_fast(depends_on=None):
@@ -273,7 +273,6 @@ if __name__ == "__main__":
         # update time variables
         prior_init_time = time - timedelta_btw_assim
 
-    #id = gen_obsseq(id)
     verify_sat(id_sat)
     verify_wrf(id)
     verify_fast(id)