From 441eb32bcaedbf52f730419f6f36fcac678ba1f2 Mon Sep 17 00:00:00 2001
From: lkugler <lukas.kugler@gmail.com>
Date: Tue, 20 Jun 2023 11:06:12 +0200
Subject: [PATCH] hist_interval_s

---
 dartwrf/obs/create_obsseq_out.py | 14 +++++++++++---
 dartwrf/workflows.py             | 30 ++++++++++--------------------
 2 files changed, 21 insertions(+), 23 deletions(-)

diff --git a/dartwrf/obs/create_obsseq_out.py b/dartwrf/obs/create_obsseq_out.py
index 8eb99b6..be91eb4 100644
--- a/dartwrf/obs/create_obsseq_out.py
+++ b/dartwrf/obs/create_obsseq_out.py
@@ -3,6 +3,7 @@ import os, shutil, warnings
 from dartwrf.utils import try_remove, print, shell, symlink
 import dartwrf.obs.create_obsseq_in as osi
 from dartwrf.obs import obsseq
+from dartwrf import assim_synth_obs as aso
 
 from dartwrf.exp_config import exp
 from dartwrf.server_config import cluster
@@ -11,6 +12,11 @@ from dartwrf.server_config import cluster
 def _prepare_DART_grid_template():
     # DART needs a wrfinput file as a template for the grid
     # No data will be read from this file, but the grid information must match exactly.
+    
+    # error if the source does not exist
+    if not os.path.exists(cluster.dart_rundir + "/wrfout_d01"):
+        raise FileNotFoundError("wrfout_d01 not found in " + cluster.dart_rundir, 
+                                "but necessary to create observations")
     symlink(cluster.dart_rundir + "/wrfout_d01", 
             cluster.dart_rundir + "/wrfinput_d01")
 
@@ -38,7 +44,7 @@ def generate_obsseq_out(time):
 
     def apply_superobbing(oso):
         try:
-            f_oso = dir_obsseq + time.strftime("/%Y-%m-%d_%H:%M_obs_seq.out-before_superob")
+            f_oso = dir_obsseq + time.strftime("/%Y-%m-%d_%H:%M:%S_obs_seq.out-before_superob")
             shutil.copy(cluster.dart_rundir + "/obs_seq.out-before_superob", f_oso)
             print('saved', f_oso)
         except Exception as e:
@@ -68,7 +74,7 @@ def generate_obsseq_out(time):
         oso = apply_superobbing(oso)
 
     # archive complete obsseqout
-    f_oso = dir_obsseq + time.strftime("/%Y-%m-%d_%H:%M_obs_seq.out")
+    f_oso = dir_obsseq + time.strftime(aso.pattern_obs_seq_out)
     shutil.copy(cluster.dart_rundir + "/obs_seq.out", f_oso)
     print('saved', f_oso)
     return oso
@@ -90,9 +96,11 @@ def run_perfect_model_obs(nproc=12):
         raise RuntimeError("obs_seq.in does not exist in " + cluster.dart_rundir)
     shell(cluster.dart_modules+' mpirun -np '+str(nproc)+" ./perfect_model_obs > log.perfect_model_obs")
     if not os.path.exists(cluster.dart_rundir + "/obs_seq.out"):
+        log_file_content = str(open(cluster.dart_rundir + "/log.perfect_model_obs",'r').read())
         raise RuntimeError(
             "obs_seq.out does not exist in " + cluster.dart_rundir,
-            "\n look for " + cluster.dart_rundir + "/log.perfect_model_obs")
+            "\n probably perfect_model_obs failed, log file says:\n",
+            log_file_content)
     
 if __name__ == '__main__':
     """Generate obs_seq.out files from an experiment
diff --git a/dartwrf/workflows.py b/dartwrf/workflows.py
index 9dc8fa2..5c8877c 100644
--- a/dartwrf/workflows.py
+++ b/dartwrf/workflows.py
@@ -217,18 +217,18 @@ class WorkFlows(object):
             first_minutes (bool, optional): if True, get wrfout of first 5 minutes every minute
             input_is_restart (bool, optional): if True, start WRF from WRFrst file (restart mode)
             output_restart_interval (int, optional): interval in minutes between output of WRFrst files
-            hist_interval (int, optional): interval in minutes between output of WRF history files
+            hist_interval (float, optional): interval in minutes between output of WRF history files
             radt (int, optional): time step of radiation scheme
 
         Returns:
             str: job ID of the submitted job
         """
 
-        def prepare_WRF_inputfiles(begin, end, hist_interval=5, radt=5, output_restart_interval=False, depends_on=None):
+        def prepare_WRF_inputfiles(begin, end, hist_interval_s=300, radt=5, output_restart_interval=False, depends_on=None):
             
             args = [self.cluster.python, self.cluster.scripts_rundir+'/prepare_namelist.py',
-                    begin.strftime('%Y-%m-%d_%H:%M'), end.strftime('%Y-%m-%d_%H:%M'),
-                    str(hist_interval), '--radt='+str(radt), '--restart='+restart_flag,]
+                    begin.strftime('%Y-%m-%d_%H:%M:%S'), end.strftime('%Y-%m-%d_%H:%M:%S'),
+                    str(hist_interval_s), '--radt='+str(radt), '--restart='+restart_flag,]
 
             if output_restart_interval:
                 args.append('--restart_interval='+str(int(float(output_restart_interval))))
@@ -245,8 +245,8 @@ class WorkFlows(object):
 
         # every minute output within first 5 minutes (needed for validating a radiance assimilation)
         if first_minutes:
-            id = prepare_WRF_inputfiles(begin, begin+dt.timedelta(minutes=4), 
-                    hist_interval=1,  # to get an output after 1 minute
+            id = prepare_WRF_inputfiles(begin, begin+dt.timedelta(minutes=3), 
+                    hist_interval_s=30,  # to get an output every 30 seconds
                     radt = 1,  # to get a cloud fraction CFRAC after 1 minute
                     output_restart_interval=output_restart_interval, 
                     depends_on=id)
@@ -258,7 +258,7 @@ class WorkFlows(object):
 
         # forecast for the whole forecast duration       
         id = prepare_WRF_inputfiles(begin, end, 
-                                    hist_interval=hist_interval, 
+                                    hist_interval_s=hist_interval*60, 
                                     radt=radt,
                                     output_restart_interval=output_restart_interval,
                                     depends_on=id)
@@ -332,22 +332,12 @@ class WorkFlows(object):
                 depends_on=[depends_on])
         return id
     
-    def evaluate_plus1(self, list_assim_times, depends_on=None):
-        list_of_tuples = [(init, (init+dt.timedelta(minutes=1))) for init in list_assim_times]
-        arg = ' '.join([ttuple[0].strftime('%Y-%m-%d_%H:%M,')+ttuple[1].strftime('%Y-%m-%d_%H:%M') for ttuple in list_of_tuples])
+    def evaluate_obs_posterior_after_analysis(self, init_valid_tuples, depends_on=None):
 
-        cmd = self.cluster.python+' '+self.cluster.scripts_rundir+'/evaluate_obs_space.py '+arg
-        id = self.cluster.run_job(cmd, 'eval+1'+self.exp.expname, cfg_update={"ntasks": "12", "mem": "50G", "ntasks-per-node": "12", "ntasks-per-core": "2", 
-                                                                              "time": "15", "mail-type": "FAIL"}, 
-                depends_on=[depends_on])
-        return id
-
-    def evaluate_plus0(self, list_assim_times, depends_on=None):
-        list_of_tuples = [(init, init) for init in list_assim_times]
-        arg = ' '.join([ttuple[0].strftime('%Y-%m-%d_%H:%M,')+ttuple[1].strftime('%Y-%m-%d_%H:%M') for ttuple in list_of_tuples])
+        arg = ' '.join([init.strftime('%Y-%m-%d_%H:%M,')+valid.strftime('%Y-%m-%d_%H:%M:%S') for (init, valid) in init_valid_tuples])
 
         cmd = self.cluster.python+' '+self.cluster.scripts_rundir+'/evaluate_obs_space.py '+arg
-        id = self.cluster.run_job(cmd, 'eval+0'+self.exp.expname, cfg_update={"ntasks": "12", "mem": "50G", "ntasks-per-node": "12", "ntasks-per-core": "2", 
+        id = self.cluster.run_job(cmd, 'eval+1'+self.exp.expname, cfg_update={"ntasks": "12", "mem": "50G", "ntasks-per-node": "12", "ntasks-per-core": "2", 
                                                                               "time": "15", "mail-type": "FAIL"}, 
                 depends_on=[depends_on])
         return id
-- 
GitLab