diff --git a/config/jet.py b/config/jet.py index 7223136469a7700bc3b61b9e0cf8cac696d891ff..24bb5338f15119c870e88c294e9b656740a87836 100755 --- a/config/jet.py +++ b/config/jet.py @@ -55,7 +55,7 @@ cluster.python_verif = '/jetfs/home/lkugler/miniconda3/envs/enstools/bin/python' cluster.ncks = '/jetfs/spack/opt/spack/linux-rhel8-skylake_avx512/intel-2021.7.1/nco-5.1.0-izrhxv24jqco5epjhf5ledsqwanojc5m/bin/ncks' cluster.ideal = '/jetfs/home/lkugler/bin/ideal-v4.3_v1.22.exe' cluster.wrfexe = '/jetfs/home/lkugler/bin/wrf-v4.3_v1.22_ifort_20230413.exe' -cluster.container = '' +cluster.dart_modules = 'module purge; module load netcdf-fortran/4.5.3-gcc-8.5.0-qsqbozc;' # paths for data output cluster.wrf_rundir_base = '/jetfs/home/lkugler/data/run_WRF/' # path for temporary files diff --git a/config/srvx1.py b/config/srvx1.py index 2a986d3cdba990d9accc21520e1448baaf31a24c..6e1cf0a49e1039c53b2a6dd6579731a11295a491 100755 --- a/config/srvx1.py +++ b/config/srvx1.py @@ -53,7 +53,6 @@ cluster.python_verif = '/users/staff/lkugler/miniconda3/bin/python' cluster.ncks = '/home/swd/spack/opt/spack/linux-rhel8-skylake_avx512/gcc-8.5.0/nco-5.0.1-ntu44aoxlvwtr2tsrobfr4lht7cpvccf/bin/ncks' cluster.ideal = '' #/jetfs/home/lkugler/bin/ideal-v4.3_v1.22.exe' cluster.wrfexe = '' #/jetfs/home/lkugler/bin/wrf-v4.3_v1.22.exe' -cluster.container = '' # paths for data output cluster.wrf_rundir_base = utils.userhome+'/AdvDA23/run_WRF/' # path for temporary files diff --git a/dartwrf/assim_synth_obs.py b/dartwrf/assim_synth_obs.py index 297ede4db4e86f4fc4ebe960a44db38be312f96f..414278876717d1c44d33fb377ac135b3aad3bc04 100755 --- a/dartwrf/assim_synth_obs.py +++ b/dartwrf/assim_synth_obs.py @@ -169,7 +169,7 @@ def run_perfect_model_obs(nproc=12, verbose=True): try_remove(cluster.dartrundir + "/obs_seq.out") if not os.path.exists(cluster.dartrundir + "/obs_seq.in"): raise RuntimeError("obs_seq.in does not exist in " + cluster.dartrundir) - shell('mpirun -np '+str(nproc)+' '+cluster.container+" ./perfect_model_obs > log.perfect_model_obs") + shell(cluster.dart_modules+' mpirun -np '+str(nproc)+" ./perfect_model_obs > log.perfect_model_obs") if not os.path.exists(cluster.dartrundir + "/obs_seq.out"): raise RuntimeError( "obs_seq.out does not exist in " + cluster.dartrundir, @@ -182,9 +182,9 @@ def filter(nproc=12): try_remove(cluster.dartrundir + "/obs_seq.final") t = time_module.time() if nproc < 12: - shell('mpirun -np 12 '+cluster.container+' ./filter &> log.filter') + shell(cluster.dart_modules+' mpirun -np 12 ./filter &> log.filter') else: # -genv I_MPI_PIN_PROCESSOR_LIST=0-"+str(int(nproc) - 1) - shell("mpirun -np "+str(int(nproc))+' '+cluster.container+" ./filter > log.filter") + shell(cluster.dart_modules+" mpirun -np "+str(int(nproc))+" ./filter > log.filter") print("./filter took", int(time_module.time() - t), "seconds") if not os.path.isfile(cluster.dartrundir + "/obs_seq.final"): raise RuntimeError( diff --git a/dartwrf/run_obs_diag.py b/dartwrf/run_obs_diag.py index 4df596fe42ad863fe2d90efda4b95ca1dfdfc893..bd1ef75b30d53ea9671b48c72485d22ba9607e34 100644 --- a/dartwrf/run_obs_diag.py +++ b/dartwrf/run_obs_diag.py @@ -39,7 +39,7 @@ def run_obsdiag(filepaths, f_out='./obsdiag.nc'): print('------ running obs_diag program') os.chdir(rundir_program) symlink(cluster.dart_srcdir+'/obs_diag', rundir_program+'/obs_diag') - shell(cluster.container, './obs_diag >& obs_diag.log') # caution, this overwrites obs_seq_to_netcdf + shell(cluster.dart_modules+' ./obs_diag >& obs_diag.log') # caution, this overwrites obs_seq_to_netcdf # move output to archive #outdir = os.path.dirname(f_out) #'/'.join(folder_obs_seq_final.split('/')[:-1]) @@ -57,7 +57,7 @@ def run_obs_seq_to_netcdf(filepaths, f_out='./obs_epoch.nc'): print('------ running obs_seq_to_netcdf program') #shutil.copy(cluster.dart_srcdir+'/obs_seq_to_netcdf-bak', rundir_program+'/obs_seq_to_netcdf') os.chdir(rundir_program) - shell(cluster.container, './obs_seq_to_netcdf >& obs_seq_to_netcdf.log') # caution, overwrites its own binary?! + shell(cluster.dart_modules+' ./obs_seq_to_netcdf >& obs_seq_to_netcdf.log') # caution, overwrites its own binary?! shutil.move(rundir_program+'/obs_epoch_001.nc', f_out) print(f_out, 'saved.') diff --git a/dartwrf/utils.py b/dartwrf/utils.py index 1cab996d61dfcec6aeed0fcf19eefc0567d24819..9844bc3667375e7c3d6169e25d1825f377f57793 100755 --- a/dartwrf/utils.py +++ b/dartwrf/utils.py @@ -14,6 +14,7 @@ class ClusterConfig(object): """Collection of variables regarding the cluster configuration""" def __init__(self, exp): self.exp = exp + self.dart_modules = '' # default value @property def archivedir(self): diff --git a/dartwrf/workflows.py b/dartwrf/workflows.py index 0fa0b8dea0379e9257a1405e7a337fa3b1fff89f..b4f82e420b562a28a6bfacf87739b2b0d9c726bc 100644 --- a/dartwrf/workflows.py +++ b/dartwrf/workflows.py @@ -29,6 +29,9 @@ class WorkFlows(object): exp (str): Path to exp config file config (str): Path to the cluster config file + Attributes: + cluster (obj): cluster configuration as defined in server_config file + Note: in WorkFlows, we load the config from the git cloned folder in all other dartwrf scripts, load the config from cluster.scripts_rundir @@ -196,7 +199,7 @@ class WorkFlows(object): runtime_wallclock_mins_expected = int(8+time_in_simulation_hours*9.5) # usually below 9 min/hour id = self.cluster.run_job(cmd, "WRF", cfg_update={"array": "1-"+str(self.cluster.size_jobarray), "ntasks": "10", "nodes": "1", - "time": str(runtime_wallclock_mins_expected), "mem": "140G"}, depends_on=[id]) + "time": str(runtime_wallclock_mins_expected), "mem": "100G"}, depends_on=[id]) return id