diff --git a/config/cfg.py b/config/cfg.py
index 973314411765c7e882ae5448be980bb01c78f945..3cdf2024e1ebf2b9b85a52a0edee8c5d3591de1f 100755
--- a/config/cfg.py
+++ b/config/cfg.py
@@ -1,7 +1,7 @@
 from dartwrf import utils
 
 exp = utils.Experiment()
-exp.expname = "test_newcode"
+exp.expname = "test_newcode" #exp_v1.22_P2_rr_VIS_obs10_loc20_oe3"
 exp.model_dx = 2000
 exp.n_ens = 40
 exp.superob_km = False  # False or int (spatial averaging of observations)
@@ -16,20 +16,35 @@ exp.input_profile = '/mnt/jetfs/home/lkugler/data/initial_profiles/wrf/ens/2022-
 
 
 exp.dart_nml = {'&assim_tools_nml':
-                    dict(assim_tools_nml='.false.',
-                            filter_kind='1',
-                            sampling_error_correction='.true.',
-                            # obs_impact_filename='/jetfs/home/lkugler/DART-WRF/templates/impactfactor_T.txt',
-                            ),
+                    dict(filter_kind='1',
+                        sampling_error_correction='.true.',
+                        # obs_impact_filename='/jetfs/home/lkugler/DART-WRF/templates/impactfactor_T.txt',
+                        ),
                 '&filter_nml':
-                    dict(ens_size=str(exp.n_ens),
-                            num_output_state_members=str(exp.n_ens),
-                            num_output_obs_members=str(exp.n_ens),
-                            inf_flavor=['2', '0'],
+                    dict(ens_size=exp.n_ens,
+                            num_output_state_members=exp.n_ens,
+                            num_output_obs_members=exp.n_ens,
+                            inf_flavor=['0', '0'],
                         ),
                 '&location_nml':
                     dict(horiz_dist_only='.true.',
                         ),
+                '&model_nml':
+                    dict(wrf_state_variables = 
+                        [['U',     'QTY_U_WIND_COMPONENT',     'TYPE_U',    'UPDATE','999',],
+                         ['V',     'QTY_V_WIND_COMPONENT',     'TYPE_V',    'UPDATE','999',],
+                         ['W',     'QTY_VERTICAL_VELOCITY',    'TYPE_W',    'UPDATE','999',],
+                         ['PH',    'QTY_GEOPOTENTIAL_HEIGHT',  'TYPE_GZ',   'UPDATE','999',],
+                         ['THM',   'QTY_POTENTIAL_TEMPERATURE','TYPE_T',    'UPDATE','999',],
+                         ['MU',    'QTY_PRESSURE',             'TYPE_MU',   'UPDATE','999',],
+                         ['QVAPOR','QTY_VAPOR_MIXING_RATIO',   'TYPE_QV',   'UPDATE','999',],
+                         ['QICE',  'QTY_ICE_MIXING_RATIO',     'TYPE_QI',   'UPDATE','999',],
+                         ['QCLOUD','QTY_CLOUDWATER_MIXING_RATIO','TYPE_QC', 'UPDATE','999',],
+                         ['CLDFRA','QTY_CLOUD_FRACTION',       'TYPE_CFRAC','UPDATE','999',],
+                         ['PSFC',  'QTY_SURFACE_PRESSURE',     'TYPE_PSFC', 'UPDATE','999',],
+                         ['T2',    'QTY_2M_TEMPERATURE',       'TYPE_T',    'UPDATE','999',],
+                         ['TSK',   'QTY_SKIN_TEMPERATURE',     'TYPE_T',    'UPDATE','999',],
+                         ['REFL_10CM','QTY_RADAR_REFLECTIVITY','TYPE_REFL', 'UPDATE','999',]]),
                 }
 
 
diff --git a/config/cluster.py b/config/cluster.py
new file mode 100755
index 0000000000000000000000000000000000000000..c1c779118d5b01ba92626a27d268c7c414480a74
--- /dev/null
+++ b/config/cluster.py
@@ -0,0 +1,77 @@
+import os, sys
+import datetime as dt
+from dartwrf import utils
+from config.cfg import exp
+
+"""Configuration name docs
+
+When coding, use attributes of a dictionary like this: 
+$ from cfg import exp, cluster
+$ path = cluster.archivedir
+
+
+attribute name    |     description
+------------------------------------------------------
+name                    any string (currently unused)
+
+python                  path of python version to use
+python_enstools         path of python version to use for verification script (not provided)
+ncks                    path to 'ncks' program; type 'which ncks' to find the path,
+                            if it doesn't exist, try to load the module first ('module load nco')
+ideal                   path to WRF's ideal.exe
+wrfexe                  path to WRF's wrf.exe
+
+wrf_rundir_base         path for temporary files for WRF
+dart_rundir_base        path for temporary files for DART
+archive_base            path for long-time output storage
+
+srcdir                  path to where WRF has been compiled, including the 'run' folder of WRF, e.g. /home/WRF-4.3/run
+dart_srcdir             path to DART compile directory, e.g. /home/DART-9.11.9/models/wrf/work
+rttov_srcdir            path to RTTOV compile directory, e.g. /home/RTTOV13/rtcoef_rttov13/
+scriptsdir              path where DART-WRF scripts reside, e.g. /home/DART-WRF/scripts
+
+namelist                path to a namelist template; strings like <hist_interval>, will be overwritten in scripts/prepare_namelist.py
+run_WRF                 path to script which runs WRF on a node of the cluster
+obs_impact_filename     path to obs_impact_filename (see DART guide; module assim_tools_mod and program obs_impact_tool)
+geo_em                  path to NetCDF file of WRF domain (see WRF guide)
+
+slurm_cfg               python dictionary, containing options of SLURM
+                            defined in SLURM docs (https://slurm.schedmd.com/sbatch.html)
+                            this configuration can be overwritten later on, for example:
+                            'dict(cluster.slurm_cfg, **cfg_update)' where
+                            'cfg_update = {"nodes": "2"}'
+"""
+
+cluster = utils.ClusterConfig(exp)
+cluster.name = 'srvx1'
+cluster.max_nproc = 6
+cluster.use_slurm = False
+
+# binaries
+cluster.python = 'python'
+cluster.python_verif = '/users/staff/lkugler/miniconda3/bin/python'
+cluster.ncks = '/home/swd/spack/opt/spack/linux-rhel8-skylake_avx512/gcc-8.5.0/nco-5.0.1-ntu44aoxlvwtr2tsrobfr4lht7cpvccf/bin/ncks'
+cluster.ideal = '' #/jetfs/home/lkugler/bin/ideal-v4.3_v1.22.exe'
+cluster.wrfexe = '' #/jetfs/home/lkugler/bin/wrf-v4.3_v1.22.exe'
+cluster.dart_modules = ''
+cluster.wrf_modules = ''
+
+# paths for data output
+cluster.wrf_rundir_base = utils.userhome+'/AdvDA23/run_WRF/'  # path for temporary files
+cluster.dart_rundir_base = utils.userhome+'/AdvDA23/run_DART/'  # path for temporary files
+cluster.archive_base = utils.userhome+'/data/sim_archive/'
+
+# paths used as input
+cluster.srcdir = '/users/staff/lkugler/AdvDA23/DART/WRF-4.3/run'
+cluster.dart_srcdir = '/users/students/lehre/advDA_s2023/DART/models/wrf/work'
+cluster.rttov_srcdir = '/users/students/lehre/advDA_s2023/RTTOV13/rtcoef_rttov13/'
+cluster.scriptsdir = utils.userhome+'/AdvDA23/DART-WRF/dartwrf/'
+cluster.geo_em = '/users/students/lehre/advDA_s2023/data/geo_em.d01.nc'
+
+# templates/run scripts
+cluster.namelist = cluster.scriptsdir+'/../templates/namelist.input'
+cluster.run_WRF = cluster.scriptsdir+'/run_ens.jet.sh'
+
+cluster.slurm_cfg = {"account": "lkugler", "partition": "compute",
+                 "ntasks": "1", "ntasks-per-core": "1", "mem": "50G",
+                 "mail-type": "FAIL", "mail-user": "lukas.kugler@univie.ac.at"}
diff --git a/config/srvx1.py b/config/srvx1.py
index c1c779118d5b01ba92626a27d268c7c414480a74..9a22186d45adfb62e2d130eb040c5b92e525584a 100755
--- a/config/srvx1.py
+++ b/config/srvx1.py
@@ -65,12 +65,12 @@ cluster.archive_base = utils.userhome+'/data/sim_archive/'
 cluster.srcdir = '/users/staff/lkugler/AdvDA23/DART/WRF-4.3/run'
 cluster.dart_srcdir = '/users/students/lehre/advDA_s2023/DART/models/wrf/work'
 cluster.rttov_srcdir = '/users/students/lehre/advDA_s2023/RTTOV13/rtcoef_rttov13/'
-cluster.scriptsdir = utils.userhome+'/AdvDA23/DART-WRF/dartwrf/'
+cluster.dartwrf_dir = utils.userhome+'/AdvDA23/DART-WRF/'
 cluster.geo_em = '/users/students/lehre/advDA_s2023/data/geo_em.d01.nc'
 
 # templates/run scripts
-cluster.namelist = cluster.scriptsdir+'/../templates/namelist.input'
-cluster.run_WRF = cluster.scriptsdir+'/run_ens.jet.sh'
+cluster.namelist = cluster.dartwrf_dir+'/../templates/namelist.input'
+cluster.run_WRF = cluster.dartwrf_dir+'/run_ens.jet.sh'
 
 cluster.slurm_cfg = {"account": "lkugler", "partition": "compute",
                  "ntasks": "1", "ntasks-per-core": "1", "mem": "50G",
diff --git a/dartwrf/assim_synth_obs.py b/dartwrf/assim_synth_obs.py
index 64c4653c25ea6a04e4e418d96bc264b7ce457e55..9c79deb7c7214579618273d5ecd8b439871ff62f 100755
--- a/dartwrf/assim_synth_obs.py
+++ b/dartwrf/assim_synth_obs.py
@@ -50,6 +50,7 @@ def prepare_prior_ensemble(assim_time, prior_init_time, prior_valid_time, prior_
     """
     print("prepare prior state estimate")
     for iens in range(1, exp.n_ens + 1):
+
         print("link wrfout file to DART background file")
         wrfout_run = (
             prior_path_exp
@@ -65,7 +66,7 @@ def prepare_prior_ensemble(assim_time, prior_init_time, prior_valid_time, prior_
         copy(wrfout_run, wrfout_dart)
         symlink(wrfout_dart, dart_ensdir + "/wrfinput_d01")
 
-        # ensure prior time matches assim time (can be off intentionally)
+        # ensure prior time matches assim time (can be intentionally different)
         if assim_time != prior_valid_time:
             print("overwriting time in prior from nature wrfout")
             shell(cluster.ncks+ " -A -v XTIME,Times "+ 
@@ -86,18 +87,22 @@ def prepare_prior_ensemble(assim_time, prior_init_time, prior_valid_time, prior_
     os.system("rm -rf " + cluster.dart_rundir + "/obs_seq.fina*")
 
 def write_list_of_inputfiles_prior():
-     files = []
-     for iens in range(1, exp.n_ens+1):
-          files.append("./prior_ens" + str(iens) + "/wrfout_d01")
-     write_txt(files, cluster.dart_rundir+'/input_list.txt')
+    """Instruct DART to use the prior ensemble as input
+    """
+    files = []
+    for iens in range(1, exp.n_ens+1):
+        files.append("./prior_ens" + str(iens) + "/wrfout_d01")
+    write_txt(files, cluster.dart_rundir+'/input_list.txt')
 
 def write_list_of_inputfiles_posterior(assim_time):
-     filedir = cluster.archivedir+assim_time.strftime("/%Y-%m-%d_%H:%M/assim_stage0/")
+    """Use posterior as input for DART, e.g. to evaluate the analysis in observation space
+    """
+    filedir = cluster.archivedir+assim_time.strftime("/%Y-%m-%d_%H:%M/assim_stage0/")
 
-     files = []
-     for iens in range(1, exp.n_ens+1):
-          files.append(filedir+'filter_restart_d01.'+str(iens).zfill(4))
-     write_txt(files, cluster.dart_rundir+'/input_list.txt')
+    files = []
+    for iens in range(1, exp.n_ens+1):
+        files.append(filedir+'filter_restart_d01.'+str(iens).zfill(4))
+    write_txt(files, cluster.dart_rundir+'/input_list.txt')
 
 def write_list_of_outputfiles():
     files = []
@@ -300,8 +305,7 @@ def evaluate(assim_time,
     os.makedirs(cluster.dart_rundir, exist_ok=True)  # create directory to run DART in
     os.chdir(cluster.dart_rundir)
 
-    # link DART binaries to run_DART
-    os.system(cluster.python + " " + cluster.scripts_rundir + "/link_dart_rttov.py")  
+    link_DART_binaries_and_RTTOV_files() 
 
     # remove any existing observation files
     os.system("rm -f input.nml obs_seq.final")  
@@ -488,6 +492,15 @@ def link_DART_binaries_and_RTTOV_files():
         else:
             pass  # we dont need RTTOV anyway
 
+def prepare_run_DART_folder():
+    os.makedirs(cluster.dart_rundir, exist_ok=True)  # create directory to run DART in
+    os.chdir(cluster.dart_rundir)
+
+    link_DART_binaries_and_RTTOV_files()
+
+    # remove any existing observation files
+    os.system("rm -f input.nml obs_seq.in obs_seq.out obs_seq.out-orig obs_seq.final")  
+
 
 def main(time, prior_init_time, prior_valid_time, prior_path_exp):
     """Assimilate observations
@@ -511,14 +524,7 @@ def main(time, prior_init_time, prior_valid_time, prior_path_exp):
     """
     nproc = cluster.max_nproc
 
-    archive_time = cluster.archivedir + time.strftime("/%Y-%m-%d_%H:%M/")
-    os.makedirs(cluster.dart_rundir, exist_ok=True)  # create directory to run DART in
-    os.chdir(cluster.dart_rundir)
-
-    link_DART_binaries_and_RTTOV_files()
-
-    # remove any existing observation files
-    os.system("rm -f input.nml obs_seq.in obs_seq.out obs_seq.out-orig obs_seq.final")  
+    prepare_run_DART_folder()
     dart_nml.write_namelist()
 
     print("prepare nature")
diff --git a/dartwrf/config b/dartwrf/config
deleted file mode 120000
index 4088526854d4f1d637653a0436624cdcf75dceb9..0000000000000000000000000000000000000000
--- a/dartwrf/config
+++ /dev/null
@@ -1 +0,0 @@
-../config/
\ No newline at end of file
diff --git a/dartwrf/dart_nml.py b/dartwrf/dart_nml.py
index e2b490023f4097663771853945e63b15dc04aee2..37c9be9f00d0fac7b84f0128142e0ded89c69dd2 100644
--- a/dartwrf/dart_nml.py
+++ b/dartwrf/dart_nml.py
@@ -37,7 +37,7 @@ def read_namelist(filepath):
                 d[section] = dict()
                 continue
             
-            if '/' in line:
+            if line == '/':
                 continue  # skip end of namelist section
 
             line = line.strip().strip(',')
@@ -57,11 +57,21 @@ def read_namelist(filepath):
 
             val = val.strip().strip(',').split(',')
 
-            # ensure that we have strings
-            if isinstance(val, list):
-                val = [str(v) for v in val]
-            else:
-                val = [str(val)]
+            # # ensure that we have list of strings
+            # if isinstance(val, list) and len(val) == 1:
+            #     val = [val]
+
+
+            # try:
+            #     # convert to float/int
+            #     val = [float(v) for v in val]
+
+            #     # convert to int when they are equal
+            #     val = [int(v) for v in val if int(v)==v]
+            # except:
+            # it is not a numeric value => string
+            val = [v.strip() for v in val]
+
 
             param_data.append(val)
 
@@ -96,15 +106,35 @@ def write_namelist_from_dict(d, filepath):
             for parameter in parameters:
                 lines = d[section][parameter]
 
-                if isinstance(lines, str):
-                    lines = [lines,]
+                # lines (list(list(str))): 
+                # outer list: one element per line in the text file
+                # inner list: one element per value in that line
+
+
+                # we should have a list here
+                # if we instead have a single value, then make a list
+                # because we assume that lines consists of multiple lines
+                assert isinstance(lines, list)
 
                 for i, line in enumerate(lines):
 
-                    try:
-                        line = ', '.join(line)  # write line (is a list)
-                    except:
-                        pass
+                    assert isinstance(line, list)
+                    if line == []:
+                        line = ['',]
+                    
+
+                    first_entry = line[0]
+                    if isinstance(first_entry, str) and not first_entry.startswith('.'):
+                        try:
+                            float(first_entry)
+                            line = ', '.join(str(v) for v in line)
+                        except:
+                            # contains strings
+                            line = [entry.strip("'").strip('"') for entry in line]  # remove pre-existing quotes
+                            line = ', '.join('"'+v+'"' for v in line)
+                    else:
+                        # numerical values
+                        line = ', '.join(str(v) for v in line)
 
 
                     if i == 0:
@@ -154,54 +184,78 @@ def _get_list_of_localizations():
 
         l_obstypes.append(obscfg["kind"])
         loc_horiz_km = obscfg["loc_horiz_km"]
+        if not loc_horiz_km >= 0:
+            raise ValueError('Invalid value for `loc_horiz_km`, set loc_horiz_km >= 0 !')
 
         # compute horizontal localization
-        loc_horiz_rad = str(to_radian_horizontal(loc_horiz_km))
+        loc_horiz_rad = to_radian_horizontal(loc_horiz_km)
         l_loc_horiz_rad.append(loc_horiz_rad)
 
         # compute vertical localization
 
+        # do we have vertical localization?
+        if not hasattr(obscfg, "loc_vert_km") and not hasattr(obscfg, "loc_vert_scaleheight"):
+            l_loc_vert_km.append(-1)
+            l_loc_vert_scaleheight.append(-1)
+            # if not add dummy value
+
         # choose either localization by height or by scale height
         if hasattr(obscfg, "loc_vert_km") and hasattr(obscfg, "loc_vert_scaleheight"):
             raise ValueError("Observation config contains both loc_vert_km and loc_vert_scaleheight. Please choose one.")
         
         elif hasattr(obscfg, "loc_vert_km"):  # localization by height
-            loc_vert_km = str(obscfg["loc_vert_km"])
+            loc_vert_km = obscfg["loc_vert_km"]
 
             vert_norm_hgt = to_vertical_normalization(loc_vert_km, loc_horiz_km)
             l_loc_vert_km.append(vert_norm_hgt)
 
         elif hasattr(obscfg, "loc_vert_scaleheight"):  # localization by scale height
-            loc_vert_scaleheight = str(obscfg["loc_vert_scaleheight"])
+            loc_vert_scaleheight = obscfg["loc_vert_scaleheight"]
 
             # no conversion necessary, take the values as defined in obscfg
             l_loc_vert_scaleheight.append(loc_vert_scaleheight)
 
-
-    # fail when both localization by height and scale height are requested
-    if len(l_loc_vert_km) > 0 and len(l_loc_vert_scaleheight) > 0:
-        raise ValueError("List of observation configurations contain both height and scale-height localization. Please choose one.")
-    
     # set the other (unused) list to a dummy value
     if len(l_loc_vert_km) > 0:
-        l_loc_vert_scaleheight = ["-1",]
+        l_loc_vert_scaleheight = [-1,]
     else:
-        l_loc_vert_km = ["-1",]
+        l_loc_vert_km = [-1,]
     
     return l_obstypes, l_loc_horiz_rad, l_loc_vert_km, l_loc_vert_scaleheight
 
 
-def _to_fortran_list(l):
-    """Ensure formatting with quotation mark, e.g. parameter = "arg1", "arg2", 
-    """
-    assert isinstance(l, list)
+# def _fortran_format(l):
+
+#     # do we have multiples entries?
+#     # Caution: a string is iterable
+#     if isinstance(l, list):
+#         pass
+#     else:
+#         l = [l,]
+
+#     # do we have strings as elements?
+#     if isinstance(l[0], str):
+        
+
+#     return l
+
+# def _as_fortran_list(l):
+#     """Convert parameter list 
+    
+#     if l contains strings:
+#         output: "arg1", "arg2", "arg3"
+#     else
+#         output 1,2,3 
+#     """
+#     assert isinstance(l, list)
+
+#     if isinstance(l[0], str):
+#         # contains strings
+#         l = ['"'+a+'"' for a in l]  # add quotation marks
+        
+
+    
 
-    if len(l) > 1:  # multiple entries
-        return ', '.join(['"'+v+'"' for v in l])
-    elif len(l) == 1:  # single entry
-        return '"'+l[0]+'"'
-    else:  # no entry
-        return '' 
 
 def write_namelist(just_prior_values=False):
     """Set DART namelist variables in 'input.nml' file.
@@ -225,26 +279,28 @@ def write_namelist(just_prior_values=False):
     nml = read_namelist(cluster.dart_srcdir + "/input.nml")
 
     # make sure that observations defined in `exp.observations` are assimilated
-    nml['&obs_kind_nml']['assimilate_these_obs_types'] = _to_fortran_list(list_obstypes)
+    nml['&obs_kind_nml']['assimilate_these_obs_types'] = [list_obstypes]
     
     # dont compute posterior, just evaluate prior
     if just_prior_values:  
-        nml['&filter_nml']['compute_posterior'] = '.false.'
-        nml['&filter_nml']['output_members'] = '.false.'
-        nml['&filter_nml']['output_mean'] = '.false.'
-        nml['&filter_nml']['output_sd'] = '.false.'
-        nml['&obs_kind_nml']['assimilate_these_obs_types'] = []
-        nml['&obs_kind_nml']['evaluate_these_obs_types'] = [_to_fortran_list(list_obstypes)]
+        nml['&filter_nml']['compute_posterior'] = [['.false.']]
+        nml['&filter_nml']['output_members'] = [['.false.']]
+        nml['&filter_nml']['output_mean'] = [['.false.']]
+        nml['&filter_nml']['output_sd'] = [['.false.']]
+        nml['&obs_kind_nml']['assimilate_these_obs_types'] = [[]]
+        nml['&obs_kind_nml']['evaluate_these_obs_types'] = [list_obstypes]
 
 
     # write localization variables
-    nml['&assim_tools_nml']['special_localization_obs_types'] = [_to_fortran_list(list_obstypes)]
-    nml['&assim_tools_nml']['special_localization_cutoffs'] = [_to_fortran_list(list_loc_horiz_rad)]
+    nml['&assim_tools_nml']['special_localization_obs_types'] = [list_obstypes]
+    nml['&assim_tools_nml']['special_localization_cutoffs'] = [list_loc_horiz_rad]
 
-    nml['&location_nml']['special_vert_normalization_obs_types'] = [_to_fortran_list(list_obstypes)]
-    nml['&location_nml']['special_vert_normalization_heights'] = [_to_fortran_list(list_loc_vert_km)]
-    nml['&location_nml']['special_vert_normalization_scale_heights'] = [_to_fortran_list(list_loc_vert_scaleheight)]
+    nml['&location_nml']['special_vert_normalization_obs_types'] = [list_obstypes]
+    nml['&location_nml']['special_vert_normalization_heights'] = [list_loc_vert_km]
+    nml['&location_nml']['special_vert_normalization_scale_heights'] = [list_loc_vert_scaleheight]
 
+    nml['&location_nml']['special_vert_normalization_levels'] = [[-1,]]
+    nml['&location_nml']['special_vert_normalization_pressures'] = [[-1,]]
 
     # overwrite namelist with experiment configuration
     for section, sdata in exp.dart_nml.items():
@@ -255,8 +311,12 @@ def write_namelist(just_prior_values=False):
 
         for parameter, value in sdata.items():
 
-            if isinstance(value, list) and len(value) > 1:  # it is a real list
-                value = [value]  # value was a list of parameter values, but just one line
+            if isinstance(value, list) and len(value) > 1:  # it is a list
+
+                if isinstance(value[0], list):
+                    pass  # nothing to do, value is list(list())
+                else:
+                    value = [value]  # value was a list of parameter values, but just one line
             else:
                 value = [[value]]  # value was a single entry
 
@@ -274,5 +334,5 @@ def write_namelist(just_prior_values=False):
     write_namelist_from_dict(nml, cluster.dart_rundir + "/input.nml")
 
     # append section for RTTOV
-    rttov_nml = cluster.scriptsdir + "/../templates/obs_def_rttov.VIS.nml"
+    rttov_nml = cluster.dartwrf_dir + "/../templates/obs_def_rttov.VIS.nml"
     append_file(cluster.dart_rundir + "/input.nml", rttov_nml)
\ No newline at end of file
diff --git a/dartwrf/evaluate_obs_space.py b/dartwrf/evaluate_obs_space.py
index 91cae073f7fcabd2b4f61edc575f86898252d188..5cabadeb955352a1d128edafad82d0a79b073dd0 100755
--- a/dartwrf/evaluate_obs_space.py
+++ b/dartwrf/evaluate_obs_space.py
@@ -8,7 +8,7 @@ from config.cluster import cluster
 from dartwrf import assim_synth_obs as aso
 
 def get_previous_obsseq_file(time):
-     oso_input = cluster.archivedir+'/obs_seq_out' + init.strftime("/%Y-%m-%d_%H:%M_obs_seq.out-beforeQC")
+     oso_input = cluster.archivedir+'/obs_seq_out' + time.strftime("/%Y-%m-%d_%H:%M_obs_seq.out-beforeQC")
 
      if not os.path.isfile(oso_input):  # fallback
           oso_input = cluster.archivedir+'/obs_seq_out' + time.strftime("/%Y-%m-%d_%H:%M_obs_seq.out")
@@ -28,15 +28,22 @@ if __name__ == "__main__":
 
      use_other_obsseq = False
 
-     aso.write_list_of_inputfiles_posterior(time)
+     # we need an existing run_DART folder
+     aso.prepare_run_DART_folder()
 
-     # use the last assimilation obsseq file for the observation locations (note: observed values are not valid)
+     # # prepare nature and prior ensemble
+     aso.prepare_nature_dart(time)
+     aso.prepare_prior_ensemble(time, prior_init_time=init, prior_valid_time=time, prior_path_exp=cluster.archivedir)
+
+     # tell DART to use the prior as input
+     aso.write_list_of_inputfiles_prior()
 
      if use_other_obsseq:  # use a different obsseq file
           oso_input = use_other_obsseq
      else:  # from same exp
-          
-          oso_input = get_previous_obsseq_file(time)
+
+          # use the last assimilation obsseq file for the observation locations (note: observed values are not valid)     
+          oso_input = get_previous_obsseq_file(init)
           shutil.copy(oso_input, cluster.dart_rundir+'/obs_seq.out')
 
      aso.evaluate(time, output_format="%Y-%m-%d_%H:%M_obs_seq.final-evaluate")
\ No newline at end of file
diff --git a/dartwrf/prepare_wrfrundir.py b/dartwrf/prepare_wrfrundir.py
index 3abe07441de4dd0b0a0fc283f40dc26395ab32a4..32e8c984f65d376f1f3741e437350afaf90250a9 100755
--- a/dartwrf/prepare_wrfrundir.py
+++ b/dartwrf/prepare_wrfrundir.py
@@ -11,8 +11,9 @@ import datetime as dt
 
 from config.cfg import exp
 from config.cluster import cluster
-from dartwrf.utils import symlink, copy, link_contents
-from dartwrf import prepare_namelist
+
+from utils import symlink, copy, link_contents
+import prepare_namelist
 
 if __name__ == '__main__':
 
diff --git a/dartwrf/workflows.py b/dartwrf/workflows.py
index 036bd46e2c935639ad34908b00d2968f55d65b2c..27d7d8dfd3c45ed0a15ee1b657616b8da04561bf 100644
--- a/dartwrf/workflows.py
+++ b/dartwrf/workflows.py
@@ -30,14 +30,20 @@ class WorkFlows(object):
             # Copy scripts to self.cluster.archivedir folder
             os.makedirs(self.cluster.archivedir, exist_ok=True)
             try:
-                shutil.copytree(self.cluster.scriptsdir, self.cluster.scripts_rundir)
+                shutil.copytree(self.cluster.dartwrf_dir, self.cluster.scripts_rundir)
                 print('scripts have been copied to', self.cluster.archivedir)
             except FileExistsError as e:
                 warnings.warn(str(e))
+                if input('Scripts already exist and will not be overwritten. Continue? (Y/n) ') in ['Y', 'y']:
+                    pass
+                else:
+                    raise e
             except:
                 raise
 
         def copy_config_to_archive():
+            os.makedirs(self.cluster.scripts_rundir+'/config/', exist_ok=True)
+
             # later, we can load the exp cfg with `from config.cfg import exp`
             shutil.copyfile('config/'+exp_config, self.cluster.scripts_rundir+'/config/cfg.py')
 
@@ -54,6 +60,9 @@ class WorkFlows(object):
         self.cluster = importlib.import_module('config.'+server_config.strip('.py')).cluster
         self.exp = importlib.import_module('config.'+exp_config.strip('.py')).exp
 
+        # we set the path from where python should import dartwrf modules
+        self.cluster.python = 'export PYTHONPATH='+self.cluster.scripts_rundir+'; '+self.cluster.python
+
         copy_dartwrf_to_archive()
         copy_config_to_archive()
 
@@ -111,9 +120,7 @@ class WorkFlows(object):
                 txt += '}'
                 f.write(txt)
 
-        _dict_to_py(_obskind_read(), self.cluster.scriptsdir+'/../config/obskind.py')
-        
-
+        _dict_to_py(_obskind_read(), self.cluster.scripts_rundir+'/config/obskind.py')
         
         # probably not needed
         # shutil.copy('config/'+server_config, 'config/cluster.py')  # whatever server, the config name is always the same!
@@ -133,7 +140,7 @@ class WorkFlows(object):
         Returns:
             None
         """
-        cmd = 'cd '+self.cluster.scripts_rundir+'; '+self.cluster.python+' prepare_wrfrundir.py '+init_time.strftime('%Y-%m-%d_%H:%M')
+        cmd = self.cluster.python+' '+self.cluster.dartwrf_dir+'/dartwrf/prepare_wrfrundir.py '+init_time.strftime('%Y-%m-%d_%H:%M')
         print(cmd)
         os.system(cmd)
 
@@ -251,7 +258,7 @@ class WorkFlows(object):
         if not os.path.exists(prior_path_exp):
             raise IOError('prior_path_exp does not exist: '+prior_path_exp)
 
-        cmd = ('cd '+self.cluster.scripts_rundir+'; '+self.cluster.python+' assim_synth_obs.py '
+        cmd = (self.cluster.python+' '+self.cluster.dartwrf_dir+'/dartwrf/assim_synth_obs.py '
                 +assim_time.strftime('%Y-%m-%d_%H:%M ')
                 +prior_init_time.strftime('%Y-%m-%d_%H:%M ')
                 +prior_valid_time.strftime('%Y-%m-%d_%H:%M ')
diff --git a/tests/input.nml.desired_output b/tests/input.nml.desired_output
index 64635bd3a885d1243c24f5553df7343b47d1c613..7362dc412778c8cd9c5219a3928f23ec31a09448 100644
--- a/tests/input.nml.desired_output
+++ b/tests/input.nml.desired_output
@@ -46,7 +46,7 @@
    output_interval          = 1,
    num_groups               = 1,
    distributed_state        = .true.
-   compute_posterior        = .true.
+   compute_posterior        = .false.
    output_forward_op_errors = .false.,
    output_timestamps        = .false.,
    trace_execution          = .false.,
diff --git a/tests/test_inputnml.py b/tests/test_inputnml.py
index 26466033dd02a2a1fca25dbf326359e26307fbc4..6117ef08c0bafd7cd837036c3e1c513be4c3f868 100644
--- a/tests/test_inputnml.py
+++ b/tests/test_inputnml.py
@@ -14,9 +14,10 @@ def test_input_nml():
     nml = dart_nml.read_namelist(test_input)
 
     # modify one parameter
-    nml['&filter_nml']['ens_size'] = [[str(999)]]
-    nml['&filter_nml']['num_output_state_members'] = [[str(999)]]
-    nml['&filter_nml']['num_output_obs_members'] = [[str(999)]]
+    nml['&filter_nml']['ens_size'] = [[999,]]
+    nml['&filter_nml']['num_output_state_members'] = [[999,]]
+    nml['&filter_nml']['num_output_obs_members'] = [[999,]]
+    nml['&filter_nml']['compute_posterior'] = [['.false.']]
 
     # save the configuration as input.nml
     dart_nml.write_namelist_from_dict(nml, test_output)
@@ -26,20 +27,29 @@ def test_input_nml():
     nml_desired = dart_nml.read_namelist(desired_output)
     nml_test = dart_nml.read_namelist(test_output)
 
+    # section e.g. '&preprocess_nml'
     for section, _ in nml_desired.items():
 
+        # param e.g. 'filter_kind'
         for param, value in nml_desired[section].items():
 
             should_have = nml_desired[section][param]
-            should_have = [v.strip() for line in should_have for v in line]
-
             have = nml_test[section][param]
-            have = [v.strip() for line in have for v in line]
-            
-            if should_have != have:
 
-                raise ValueError(section, param, 'should be', should_have, 'but is', have)
+            for i, line in enumerate(should_have):
+
+                for j, expected in enumerate(line):
+
+                    if expected != have[i][j]:
 
+                        # if one has "wrfinput" and other has 'wrfinput'
+                        # thats ok
+                        this = "'"+have[i][j].strip('"')+"'"
+                        if this == expected:
+                            pass
+                            # print(this, expected)
+                        else:
+                            raise ValueError('expected', expected, 'got', have[i][j])
     
     os.remove(test_output)