Skip to content
Snippets Groups Projects
Commit 93f4573c authored by lkugler's avatar lkugler
Browse files

explicitly load modules

parent 710515be
No related branches found
No related tags found
No related merge requests found
...@@ -55,7 +55,7 @@ cluster.python_verif = '/jetfs/home/lkugler/miniconda3/envs/enstools/bin/python' ...@@ -55,7 +55,7 @@ cluster.python_verif = '/jetfs/home/lkugler/miniconda3/envs/enstools/bin/python'
cluster.ncks = '/jetfs/spack/opt/spack/linux-rhel8-skylake_avx512/intel-2021.7.1/nco-5.1.0-izrhxv24jqco5epjhf5ledsqwanojc5m/bin/ncks' cluster.ncks = '/jetfs/spack/opt/spack/linux-rhel8-skylake_avx512/intel-2021.7.1/nco-5.1.0-izrhxv24jqco5epjhf5ledsqwanojc5m/bin/ncks'
cluster.ideal = '/jetfs/home/lkugler/bin/ideal-v4.3_v1.22.exe' cluster.ideal = '/jetfs/home/lkugler/bin/ideal-v4.3_v1.22.exe'
cluster.wrfexe = '/jetfs/home/lkugler/bin/wrf-v4.3_v1.22_ifort_20230413.exe' cluster.wrfexe = '/jetfs/home/lkugler/bin/wrf-v4.3_v1.22_ifort_20230413.exe'
cluster.container = '' cluster.dart_modules = 'module purge; module load netcdf-fortran/4.5.3-gcc-8.5.0-qsqbozc;'
# paths for data output # paths for data output
cluster.wrf_rundir_base = '/jetfs/home/lkugler/data/run_WRF/' # path for temporary files cluster.wrf_rundir_base = '/jetfs/home/lkugler/data/run_WRF/' # path for temporary files
......
...@@ -53,7 +53,6 @@ cluster.python_verif = '/users/staff/lkugler/miniconda3/bin/python' ...@@ -53,7 +53,6 @@ cluster.python_verif = '/users/staff/lkugler/miniconda3/bin/python'
cluster.ncks = '/home/swd/spack/opt/spack/linux-rhel8-skylake_avx512/gcc-8.5.0/nco-5.0.1-ntu44aoxlvwtr2tsrobfr4lht7cpvccf/bin/ncks' cluster.ncks = '/home/swd/spack/opt/spack/linux-rhel8-skylake_avx512/gcc-8.5.0/nco-5.0.1-ntu44aoxlvwtr2tsrobfr4lht7cpvccf/bin/ncks'
cluster.ideal = '' #/jetfs/home/lkugler/bin/ideal-v4.3_v1.22.exe' cluster.ideal = '' #/jetfs/home/lkugler/bin/ideal-v4.3_v1.22.exe'
cluster.wrfexe = '' #/jetfs/home/lkugler/bin/wrf-v4.3_v1.22.exe' cluster.wrfexe = '' #/jetfs/home/lkugler/bin/wrf-v4.3_v1.22.exe'
cluster.container = ''
# paths for data output # paths for data output
cluster.wrf_rundir_base = utils.userhome+'/AdvDA23/run_WRF/' # path for temporary files cluster.wrf_rundir_base = utils.userhome+'/AdvDA23/run_WRF/' # path for temporary files
......
...@@ -169,7 +169,7 @@ def run_perfect_model_obs(nproc=12, verbose=True): ...@@ -169,7 +169,7 @@ def run_perfect_model_obs(nproc=12, verbose=True):
try_remove(cluster.dartrundir + "/obs_seq.out") try_remove(cluster.dartrundir + "/obs_seq.out")
if not os.path.exists(cluster.dartrundir + "/obs_seq.in"): if not os.path.exists(cluster.dartrundir + "/obs_seq.in"):
raise RuntimeError("obs_seq.in does not exist in " + cluster.dartrundir) raise RuntimeError("obs_seq.in does not exist in " + cluster.dartrundir)
shell('mpirun -np '+str(nproc)+' '+cluster.container+" ./perfect_model_obs > log.perfect_model_obs") shell(cluster.dart_modules+' mpirun -np '+str(nproc)+" ./perfect_model_obs > log.perfect_model_obs")
if not os.path.exists(cluster.dartrundir + "/obs_seq.out"): if not os.path.exists(cluster.dartrundir + "/obs_seq.out"):
raise RuntimeError( raise RuntimeError(
"obs_seq.out does not exist in " + cluster.dartrundir, "obs_seq.out does not exist in " + cluster.dartrundir,
...@@ -182,9 +182,9 @@ def filter(nproc=12): ...@@ -182,9 +182,9 @@ def filter(nproc=12):
try_remove(cluster.dartrundir + "/obs_seq.final") try_remove(cluster.dartrundir + "/obs_seq.final")
t = time_module.time() t = time_module.time()
if nproc < 12: if nproc < 12:
shell('mpirun -np 12 '+cluster.container+' ./filter &> log.filter') shell(cluster.dart_modules+' mpirun -np 12 ./filter &> log.filter')
else: # -genv I_MPI_PIN_PROCESSOR_LIST=0-"+str(int(nproc) - 1) else: # -genv I_MPI_PIN_PROCESSOR_LIST=0-"+str(int(nproc) - 1)
shell("mpirun -np "+str(int(nproc))+' '+cluster.container+" ./filter > log.filter") shell(cluster.dart_modules+" mpirun -np "+str(int(nproc))+" ./filter > log.filter")
print("./filter took", int(time_module.time() - t), "seconds") print("./filter took", int(time_module.time() - t), "seconds")
if not os.path.isfile(cluster.dartrundir + "/obs_seq.final"): if not os.path.isfile(cluster.dartrundir + "/obs_seq.final"):
raise RuntimeError( raise RuntimeError(
......
...@@ -39,7 +39,7 @@ def run_obsdiag(filepaths, f_out='./obsdiag.nc'): ...@@ -39,7 +39,7 @@ def run_obsdiag(filepaths, f_out='./obsdiag.nc'):
print('------ running obs_diag program') print('------ running obs_diag program')
os.chdir(rundir_program) os.chdir(rundir_program)
symlink(cluster.dart_srcdir+'/obs_diag', rundir_program+'/obs_diag') symlink(cluster.dart_srcdir+'/obs_diag', rundir_program+'/obs_diag')
shell(cluster.container, './obs_diag >& obs_diag.log') # caution, this overwrites obs_seq_to_netcdf shell(cluster.dart_modules+' ./obs_diag >& obs_diag.log') # caution, this overwrites obs_seq_to_netcdf
# move output to archive # move output to archive
#outdir = os.path.dirname(f_out) #'/'.join(folder_obs_seq_final.split('/')[:-1]) #outdir = os.path.dirname(f_out) #'/'.join(folder_obs_seq_final.split('/')[:-1])
...@@ -57,7 +57,7 @@ def run_obs_seq_to_netcdf(filepaths, f_out='./obs_epoch.nc'): ...@@ -57,7 +57,7 @@ def run_obs_seq_to_netcdf(filepaths, f_out='./obs_epoch.nc'):
print('------ running obs_seq_to_netcdf program') print('------ running obs_seq_to_netcdf program')
#shutil.copy(cluster.dart_srcdir+'/obs_seq_to_netcdf-bak', rundir_program+'/obs_seq_to_netcdf') #shutil.copy(cluster.dart_srcdir+'/obs_seq_to_netcdf-bak', rundir_program+'/obs_seq_to_netcdf')
os.chdir(rundir_program) os.chdir(rundir_program)
shell(cluster.container, './obs_seq_to_netcdf >& obs_seq_to_netcdf.log') # caution, overwrites its own binary?! shell(cluster.dart_modules+' ./obs_seq_to_netcdf >& obs_seq_to_netcdf.log') # caution, overwrites its own binary?!
shutil.move(rundir_program+'/obs_epoch_001.nc', f_out) shutil.move(rundir_program+'/obs_epoch_001.nc', f_out)
print(f_out, 'saved.') print(f_out, 'saved.')
......
...@@ -14,6 +14,7 @@ class ClusterConfig(object): ...@@ -14,6 +14,7 @@ class ClusterConfig(object):
"""Collection of variables regarding the cluster configuration""" """Collection of variables regarding the cluster configuration"""
def __init__(self, exp): def __init__(self, exp):
self.exp = exp self.exp = exp
self.dart_modules = '' # default value
@property @property
def archivedir(self): def archivedir(self):
......
...@@ -29,6 +29,9 @@ class WorkFlows(object): ...@@ -29,6 +29,9 @@ class WorkFlows(object):
exp (str): Path to exp config file exp (str): Path to exp config file
config (str): Path to the cluster config file config (str): Path to the cluster config file
Attributes:
cluster (obj): cluster configuration as defined in server_config file
Note: Note:
in WorkFlows, we load the config from the git cloned folder in WorkFlows, we load the config from the git cloned folder
in all other dartwrf scripts, load the config from cluster.scripts_rundir in all other dartwrf scripts, load the config from cluster.scripts_rundir
...@@ -196,7 +199,7 @@ class WorkFlows(object): ...@@ -196,7 +199,7 @@ class WorkFlows(object):
runtime_wallclock_mins_expected = int(8+time_in_simulation_hours*9.5) # usually below 9 min/hour runtime_wallclock_mins_expected = int(8+time_in_simulation_hours*9.5) # usually below 9 min/hour
id = self.cluster.run_job(cmd, "WRF", cfg_update={"array": "1-"+str(self.cluster.size_jobarray), "ntasks": "10", "nodes": "1", id = self.cluster.run_job(cmd, "WRF", cfg_update={"array": "1-"+str(self.cluster.size_jobarray), "ntasks": "10", "nodes": "1",
"time": str(runtime_wallclock_mins_expected), "mem": "140G"}, depends_on=[id]) "time": str(runtime_wallclock_mins_expected), "mem": "100G"}, depends_on=[id])
return id return id
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment