diff --git a/cases/cosmo-ghg-spinup-test/cosmo_runjob.cfg b/cases/cosmo-ghg-spinup-test/cosmo_runjob.cfg index 28c0cbb6..608b8d15 100644 --- a/cases/cosmo-ghg-spinup-test/cosmo_runjob.cfg +++ b/cases/cosmo-ghg-spinup-test/cosmo_runjob.cfg @@ -34,7 +34,7 @@ echo "============== StartTime: `date +%s` s" echo "============== StartTime: `date`" echo "=====================================================" -srun -u ./{execname} >> {logfile} 2>&1 +srun -u ./{cfg.cosmo_execname} >> {logfile} 2>&1 pid=$? echo "=====================================================" diff --git a/cases/cosmo-ghg-test/cosmo_runjob.cfg b/cases/cosmo-ghg-test/cosmo_runjob.cfg index 28c0cbb6..608b8d15 100644 --- a/cases/cosmo-ghg-test/cosmo_runjob.cfg +++ b/cases/cosmo-ghg-test/cosmo_runjob.cfg @@ -34,7 +34,7 @@ echo "============== StartTime: `date +%s` s" echo "============== StartTime: `date`" echo "=====================================================" -srun -u ./{execname} >> {logfile} 2>&1 +srun -u ./{cfg.cosmo_execname} >> {logfile} 2>&1 pid=$? echo "=====================================================" diff --git a/cases/icon-art-global-test/icon_runjob.cfg b/cases/icon-art-global-test/icon_runjob.cfg index 99b0de5e..d241a8cf 100644 --- a/cases/icon-art-global-test/icon_runjob.cfg +++ b/cases/icon-art-global-test/icon_runjob.cfg @@ -396,4 +396,4 @@ handle_error(){{ exit 1 fi }} -srun ./icon.exe || handle_error +srun ./{cfg.icon_execname} || handle_error diff --git a/cases/icon-art-oem-test/icon_runjob.cfg b/cases/icon-art-oem-test/icon_runjob.cfg index eec72b77..883c8d86 100644 --- a/cases/icon-art-oem-test/icon_runjob.cfg +++ b/cases/icon-art-oem-test/icon_runjob.cfg @@ -376,4 +376,4 @@ handle_error(){{ exit 1 fi }} -srun ./icon.exe || handle_error +srun ./{cfg.icon_execname} || handle_error diff --git a/cases/icon-test/icon_runjob.cfg b/cases/icon-test/icon_runjob.cfg index 07fabaaf..88c8b735 100755 --- a/cases/icon-test/icon_runjob.cfg +++ b/cases/icon-test/icon_runjob.cfg @@ -342,4 +342,4 @@ EOF # ---------------------------------------------------------------------- # run the model! # ---------------------------------------------------------------------- - srun ./icon.exe +srun ./{cfg.icon_execname} || handle_error diff --git a/config.py b/config.py index 188893dc..18293554 100644 --- a/config.py +++ b/config.py @@ -319,36 +319,18 @@ def create_vars_from_dicts(self, dct=None, key=None): else: setattr(self, subkey, v) - def format_duration(self, duration): - """ - Format a duration represented by a datetime.timedelta object into a human-readable string. - - Parameters: - - duration (datetime.timedelta): The duration to be formatted. - - Returns: - - str: A string representing the formatted duration in the "0d 0h 0m 0s" format. - """ - seconds = duration.total_seconds() - days, remainder = divmod(seconds, 86400) - hours, remainder = divmod(remainder, 3600) - minutes, seconds = divmod(remainder, 60) - - formatted_duration = f"{int(days)}d {int(hours)}h {int(minutes)}m {int(seconds)}s" - return formatted_duration - def get_chunk_list(self): self.chunk_list = [] for startdate_sim in tools.iter_hours(self.startdate, self.enddate, self.restart_step_hours): + enddate_sim = startdate_sim + timedelta( + hours=self.restart_step_hours) if 'spinup' in self.workflow['features'] and hasattr( self, 'spinup'): if startdate_sim > self.startdate: startdate_sim = startdate_sim - timedelta( hours=self.spinup) - enddate_sim = startdate_sim + timedelta( - hours=self.restart_step_hours) startdate_sim_yyyymmddhh = startdate_sim.strftime("%Y%m%d%H") enddate_sim_yyyymmddhh = enddate_sim.strftime("%Y%m%d%H") chunk_id = f"{startdate_sim_yyyymmddhh}_{enddate_sim_yyyymmddhh}" @@ -359,7 +341,7 @@ def get_chunk_list(self): self.chunk_list.append(chunk_id) def get_previous_chunk_id(self, current_chunk_id): - """Get the previous chunk ID based on the current chunk ID.""" + """Get the previous chunk ID based on the current `chunk_id`""" index = self.chunk_list.index(current_chunk_id) if index > 0: self.chunk_id_prev = self.chunk_list[index - 1] @@ -381,34 +363,30 @@ def get_dep_ids(self, job_name, add_dep=None): dep_id_list = [] # Add job dependencies - if not self.force_sync: - # Could be that job has no dependency, even in an async config, - # e.g., prepare_data - if deps := self.workflow['dependencies'].get(job_name): - for stage in 'previous', 'current': - if dep_stage := deps.get(stage): - for job in dep_stage: - # Could be that dep job id does not exist, e.g., - # if dep job is deactivated or it's the first chunk - if dep_id := self.job_ids[stage].get(job): - dep_id_list.extend(dep_id) + if deps := self.workflow['dependencies'].get(job_name): + for stage in 'previous', 'current': + if dep_stage := deps.get(stage): + for job in dep_stage: + # Could be that dep job id does not exist, e.g., + # if dep job is deactivated or it's the first chunk + if dep_id := self.job_ids[stage].get(job): + dep_id_list.extend(dep_id) return dep_id_list def get_dep_cmd(self, job_name, add_dep=None): - """Generate the part of the sbatch command that sepcifies dependencies for job_name.""" - if not self.force_sync: - # Default: async case - if dep_ids := self.get_dep_ids(job_name, add_dep=add_dep): - dep_str = ':'.join(map(str, dep_ids)) - return f'--dependency=afterok:{dep_str}' - else: - # job_name has no dependencies but still belongs to an async workflow - # so don't use --wait - return None - else: - # Needed for nested run_chain.py + """Generate the part of the sbatch command that sepcifies dependencies for `job_name`""" + # Needed for nested run_chain.py + if self.force_sync: return '--wait' + if dep_ids := self.get_dep_ids(job_name, add_dep=add_dep): + dep_str = ':'.join(map(str, dep_ids)) + return f'--dependency=afterok:{dep_str}' + + # job_name has no dependencies but still belongs to an async workflow + # so don't use --wait + return None + def submit(self, job_name, script, add_dep=None): """Submit job with dependencies""" script_path = Path(script) @@ -437,10 +415,11 @@ def submit(self, job_name, script, add_dep=None): return job_id - def create_sbatch_script(self, job_name): - """Create an sbatch script to launch jobs individually. + def submit_basic_python(self, job_name): + """Create an sbatch script to launch basic python jobs individually. Use run_chain.py arguments to submit those jobs. """ + # Build job script walltime = getattr(self, 'walltime', {}).get(job_name, "00:30:00") script_lines = [ '#!/usr/bin/env bash', @@ -464,7 +443,8 @@ def create_sbatch_script(self, job_name): with open(job_file, mode='w') as job_script: job_script.write('\n'.join(script_lines)) - return job_file + # Submit job + self.submit(job_name, job_file) def wait_for_previous(self): """Wait for all jobs of the previous stage to be finished. diff --git a/env/environment.yml b/env/environment.yml index 12f9efcf..09381fd4 100644 --- a/env/environment.yml +++ b/env/environment.yml @@ -15,6 +15,7 @@ dependencies: - pillow - xarray - cdsapi + - scikit-learn - sphinx - sphinx_rtd_theme - sphinx-copybutton diff --git a/jobs/cosmo.py b/jobs/cosmo.py index 1afdca96..a110661e 100644 --- a/jobs/cosmo.py +++ b/jobs/cosmo.py @@ -14,28 +14,20 @@ def main(cfg): - """Setup the namelists for a COSMO tracer run and submit the job to the queue. - - Necessary for both COSMO and COSMOART simulations. - - Decide if the soil model should be TERRA or TERRA multi-layer depending on - the ``startdate`` of the simulation. + """Setup the namelists for a COSMO run and submit the job to the queue. Create necessary directory structure to run COSMO (run, output, and restart directories, defined in ``cfg.cosmo_run``, ``cfg.cosmo_output``, and ``cfg.cosmo_restart_out``). Copy the COSMO-executable from - ``cfg.cosmo_bin`` to ``cfg.cosmo_run/cosmo``. + ``cfg.cosmo['binary_file']`` to ``cfg.cosmo_run/cfg.cosmo['execname']``. - Convert the tracer-csv-file to a COSMO-namelist file. + Convert the tracer csv file to a COSMO namelist file. - Format the COSMO-namelist-templates - (COSMO: ``AF,ORG,IO,DYN,PHY,DIA,ASS``, - COSMOART: ``ART,ASS,DIA,DYN,EPS,INI,IO,ORG,PHY``) - using the information in ``cfg``. + Format the COSMO namelist templates using the information in ``cfg``. - Format the runscript-template and submit the job. + Format the runscript template and submit the job. Parameters ---------- @@ -128,9 +120,9 @@ def main(cfg): tools.create_dir(cfg.cosmo_restart_out, "cosmo_restart_out") # Copy cosmo executable - cfg.cosmo['execname'] = 'cosmo.exe' + cfg.cosmo_execname = Path(cfg.cosmo['binary_file']).name tools.copy_file(cfg.cosmo['binary_file'], - os.path.join(cfg.cosmo_run, cfg.cosmo['execname'])) + cfg.cosmo_run / cfg.cosmo_execname) # Prepare namelist and submit job tracer_csvfile = os.path.join(cfg.chain_src_dir, 'cases', cfg.casename, diff --git a/jobs/icon.py b/jobs/icon.py index 393f740e..250df513 100644 --- a/jobs/icon.py +++ b/jobs/icon.py @@ -2,26 +2,19 @@ # -*- coding: utf-8 -*- import logging +from pathlib import Path from . import tools, prepare_icon BASIC_PYTHON_JOB = False def main(cfg): - """Setup the namelists for an ICON tracer run and submit the job to + """Setup the namelists for an ICON run and submit the job to the queue. - Necessary for both ICON and ICONART simulations. - - Create necessary directory structure to run ICON (run, output, and - restart directories, defined in ``cfg.icon_work``, ``cfg.icon_output``, - and ``cfg.icon_restart_out``). - Copy the ICON-executable from ``cfg.icon_binary_file`` to ``cfg.icon_work/icon.exe``. - Use the tracer-csv-file to append ICON-namelist file. - Format the ICON-namelist-templates: ``icon_master.namelist.cfg, icon_NAMELIST_NWP.cfg``, using the information in ``cfg``. @@ -40,9 +33,9 @@ def main(cfg): "submit the job to the queue") # Copy icon executable - execname = 'icon.exe' + cfg.icon_execname = Path(cfg.icon['binary_file']).name tools.create_dir(cfg.icon_work, "icon_work") - tools.copy_file(cfg.icon_binary_file, cfg.icon_work / execname) + tools.copy_file(cfg.icon_binary_file, cfg.icon_work / cfg.icon_execname) # Symlink the restart file to the last run into the icon/run folder if cfg.lrestart == '.TRUE.': diff --git a/jobs/icontools.py b/jobs/icontools.py index 2ee7c2eb..a0f367b8 100644 --- a/jobs/icontools.py +++ b/jobs/icontools.py @@ -82,7 +82,8 @@ def main(cfg): merged_file = os.path.join(cfg.icon_input_icbc, merged_filename) # Copy GEOSP file from last run if not present - if not os.path.exists(geosp_file): + if hasattr(cfg, + 'icon_input_icbc_prev') and not os.path.exists(geosp_file): geosp_src_file = os.path.join(cfg.icon_input_icbc_prev, geosp_filename) tools.copy_file(geosp_src_file, diff --git a/jobs/tools/ICON_to_point.py b/jobs/tools/ICON_to_point.py new file mode 100644 index 00000000..65371be1 --- /dev/null +++ b/jobs/tools/ICON_to_point.py @@ -0,0 +1,355 @@ +import numpy as np +import xarray as xr +from sklearn.neighbors import BallTree +from scipy import argmin +import argparse + + +def get_horizontal_distances(longitude, latitude, icon_grid_path, k=5): + """ + Get horizontal distances between points and their k nearest + neighbours on the ICON grid using a quick BallTree algorithm + + Parameters + ---------- + longitude : list or 1D np.array + e.g., [12] or np.array([12,13,14]) + + latitude : list or 1D np.array + e.g., [52] or np.array([52,53,54]) + + icon_grid_path : str + Contains the path to the ICON grid + + k : int, default is 5 + Sets the number of nearest neighbours desired + + Returns + ------- + distances: 2D np.array + Contains the distance-on-a-sphere between the target point(s) + and its nearest neighbours + + indices: 2D np.array + Contains the indices to the ICON grid cells of the corresponding + nearest neighbours + """ + # Get ICON grid specifics + icon_grid = xr.open_dataset(icon_grid_path) + clon = icon_grid.clon.values + clat = icon_grid.clat.values + + # Generate BallTree + icon_lat_lon = np.column_stack([clat, clon]) + tree = BallTree(icon_lat_lon, metric='haversine') + + # Query BallTree + target_lat_lon = np.column_stack( + [np.deg2rad(latitude), np.deg2rad(longitude)]) + (distances, indices) = tree.query(target_lat_lon, + k=k, + return_distance=True) + + if np.any(distances == 0): + print( + 'The longitude/latitude coincides identically with an ICON cell, which is an issue for the inverse distance weighting.' + ) + print('I will slightly modify this value to avoid errors.') + distances[distances == 0] = 1e-12 + + if np.any(distances is np.nan): + raise ValueError( + 'The distance between ICON and your lat/lon point could not be established...' + ) + + # NB: the 'distances' are in units of radians; i.e., it assumes the Earth is a unit sphere! + # To get realistic distances, you need to multiply 'distances' with 6371e3 meters, i.e., the + # radius of the earth. However, such a constant factor cancels out when we compute the + # horizontal interpolation weights (which are normalized!), so there is no need to apply the + # multiplication with 6371e3. + + return distances, indices + + +def get_nearest_vertical_distances(model_topography, model_levels, + base_height_msl, inlet_height_agl, + interpolation_strategy): + """ + Get the 2 nearest distances between ICON grid points and specified + station altitudes + + Parameters + ---------- + model_topography : 1D np.array + This is the elevation over mean sea level of the ICON grid + + model_levels : 2D np.array + Dimensions [ICON_heights, number_of_samples] + + base_height_msl : list or 1D np.array + e.g., [20,] or np.array([72,180,40]) + + inlet_height_agl : list or 1D np.array + e.g., [15,] or np.array([15, 21, 42]) + + interpolation_strategy : list of strings + e.g., ['ground',] or ['ground','mountain','ground'] + Can be 'ground' or 'mountain', or 'middle' (the latter is between the ground and mountain approach) + 'ground' uses the model topography + station altitude over ground + 'mountain' uses the absolute altitude over mean sea level + + Returns + ------- + vertical_distances : 3D np.array + Contains the absolute (!) distance between the target point(s) + and its 2 nearest neighbour levels + + vertical_indices: 3D np.array + Contains the indices to the ICON height levels of the corresponding 2 + nearest neighbour levels + """ + # Get the target sampling altitude with a list comprehension + target_altitude = [ + model_topography.isel({ + "station": i + }).values + inlet_height_agl[i] if strategy == 'ground' else + np.repeat(base_height_msl[i], model_topography.shape[1]) + + inlet_height_agl[i] if strategy == 'mountain' else + np.repeat(base_height_msl[i], model_topography.shape[1]) / 2 + + model_topography.isel({ + "station": i + }).values / 2 + inlet_height_agl[i] + # if strategy=='middle' + for (i, strategy) in enumerate(interpolation_strategy) + ] + target_altitude = xr.DataArray(target_altitude, dims=['station', 'ncells']) + + # Select 2 closest neighbouring levels + first_negative = (model_levels <= target_altitude).argmax( + dim=model_levels.dims[0]) # First index where model lies below target + vertical_indices = np.stack( + [first_negative, first_negative - 1], + axis=0) # Second index thus lies /above/ the target + vertical_indices[:, first_negative == 0] = model_levels.values.shape[ + 0] - 1 # If no result found: sample lies below lowest model level. Set it to the lowest model level + + # Sample the corresponding vertical distances between the target and the model levels + vertical_distances = np.take_along_axis( + (model_levels - target_altitude).values, vertical_indices, axis=0) + + return np.abs(vertical_distances).T, vertical_indices.T + + +def icon_to_point(longitude, + latitude, + inlet_height_agl, + base_height_msl, + icon_field_path, + icon_grid_path, + interpolation_strategy, + k=5, + field_name=None): + """ + Function to interpolate ICON fields to point locations + + Parameters + ---------- + longitude : list or 1D np.array + e.g., [12,] or np.array([12,13,14]) + + latitude : list or 1D np.array + e.g., [52,] or np.array([52,53,54]) + + inlet_height_agl : list or 1D np.array + e.g., [20,] or np.array([72,180,40]) + This is the height of the *base station over mean sea level* + (e.g., for Cabau: base_height_msl=0, + inlet_height_agl=27) + + base_height_msl : list or 1D np.array + e.g., [15,] or np.array([15, 21, 42]) + This is the altitude of the *station above the ground* + (e.g., for Jungfraujoch: base_height_msl=3850, + inlet_height_agl=5) + + icon_field_path : str + Contains the path to the unstructured ICON output + + icon_grid_path : str + Contains the path to the ICON grid + + interpolation_strategy : list of strings + e.g., ['ground',] or ['ground','mountain','ground'] + Can be 'ground' or 'mountain', or 'middle' (the latter is between the ground and mountain approach) + 'ground' uses the model topography + station altitude over ground + 'mountain' uses the absolute altitude over mean sea level + + k : int, default is 5 + Sets the number of horizontal nearest neighbours desired + + field_name : str, or list of strings, optional + e.g. 'qv', or ['qv','temp'], or None + If no field_name is set, the whole dataset is interpolated + in the vertical and horizontal directions. + + Returns + ------- + xr.Dataset + An Xarray dataset organised by 'station', containing the original + input specifications, and the vertically and horizontally interpolated + values + """ + + # Load dataset + icon_field = xr.open_dataset(icon_field_path) + # Get dimension names + icon_heights = icon_field.z_mc.dims[ + 0] # Dimension name (something like "heights_5") + icon_cells = icon_field.z_mc.dims[ + 1] # Dimension name (something like "ncells") + icon_field[icon_cells] = icon_field[ + icon_cells] # Explicitly assign 'ncells' + + # --- Horizontal grid selection & interpolation weights + # Get k nearest horizontal distances (for use in inverse distance weighing) + horizontal_distances, icon_grid_indices = get_horizontal_distances( + longitude, latitude, icon_grid_path, k=k) + + horizontal_interp = 1 / horizontal_distances / ( + 1 / horizontal_distances).sum(axis=1, keepdims=True) + weights_horizontal = xr.DataArray(horizontal_interp, + dims=["station", icon_cells]) + ind_X = xr.DataArray(icon_grid_indices, dims=["station", icon_cells]) + icon_subset = icon_field.isel({icon_cells: ind_X}) + + # --- Vertical level selection & interpolation weights + # Get 2 nearest vertical distances (for use in linear interpolation) + model_topography = icon_subset.z_ifc[-1] + model_levels = icon_subset.z_mc + vertical_distances, icon_level_indices = get_nearest_vertical_distances( + model_topography, model_levels, inlet_height_agl, base_height_msl, + interpolation_strategy) + + vertical_interp = vertical_distances[:, :, ::-1] / (vertical_distances.sum( + axis=-1, keepdims=True)) + # Say, you have the point's vertical position, and the next two model layers are positioned at [-5, +15] meters offset. + # Then linear interpolation between those two points is simply [15/(15+5), 5/(15+5)]=[3/4 1/4]. That is what the code does (and why it reverses the order on the last axis; and why I only need the absolute vertical distances). + # (As a curiosity, linear interpolation is the same as inverse distance weighting with 2 points. But this formulation is more stable than the inverse distance weighting, as divisions with 0 may otherwise occur!) + + weights_vertical = xr.DataArray(vertical_interp, + dims=["ncells", "station", icon_heights]) + ind_Z = xr.DataArray(icon_level_indices, + dims=["ncells", "station", icon_heights]) + + # --- Generate output + # Subset the ICON field if we want only a few fields of output + if field_name is not None: + icon_subset = icon_subset[field_name] + # Include the input station parameters in the output + ds = xr.Dataset({ + 'longitude': (['station'], longitude), + 'latitude': (['station'], latitude), + 'inlet_height_agl': (['station'], inlet_height_agl), + 'base_height_msl': (['station'], base_height_msl), + 'interpolation_strategy': (['station'], interpolation_strategy) + }) + # Perform the interpolations + icon_subset = icon_subset.isel({icon_heights: ind_Z}) + icon_out = icon_subset.weighted(weights_vertical.fillna(0)).sum( + dim=icon_heights, + skipna=True).weighted(weights_horizontal).sum(dim=icon_cells) + icon_out = icon_out.where( + ~(weights_vertical.sum(dim=[icon_cells, icon_heights], + skipna=False)).isnull() + ) # Remove out of bounds values where weights_vertical has NaNs + return xr.merge([icon_out, ds]) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Interpolate ICON output to point locations.') + parser.add_argument('-lon', + dest='longitude', + default=None, + type=float, + help='Longitude of interpolation target') + parser.add_argument('-lat', + dest='latitude', + default=None, + type=float, + help='Latitude of interpolation target') + parser.add_argument( + '-asl', + dest='elevation', + default=None, + type=float, + help= + 'Station surface elevation above sea level [absolute height asl: elevation+altitude]' + ) + parser.add_argument( + '-alt', + dest='altitude', + default=None, + type=float, + help= + 'Station altitude over surface [absolute height asl: elevation+altitude]' + ) + parser.add_argument('-fields', + dest='icon_field', + default=None, + type=str, + help='The ICON output fields') + parser.add_argument('-grid', + dest='icon_grid', + default=None, + type=str, + help='The ICON grid dynamic grid file') + parser.add_argument( + '-strat', + dest='strategy', + default='ground', + type=str, + help= + 'The interpolation strategy (should be "mountain", "ground", or "middle")' + ) + parser.add_argument( + '-k', + dest='k', + default=4, + type=int, + help='Number of nearest neighbours to interpolate with (e.g., 4 or 5)') + parser.add_argument( + '-field_name', + dest='field_name', + default=None, + type=str, + help='Field name to extract (if left out, all variables are extracted)' + ) + parser.add_argument('-output', + dest='output_dest', + default=None, + type=str, + help='Output NetCDF destination') + args = parser.parse_args() + + # Example run (note: most inputs should be lists, and the performance is optimized for these lists!) + output = icon_to_point(longitude=[ + args.longitude, + ], + latitude=[ + args.latitude, + ], + inlet_height_agl=[ + args.elevation, + ], + base_height_msl=[ + args.altitude, + ], + icon_field_path=args.icon_field, + icon_grid_path=args.icon_grid, + interpolation_strategy=[ + args.strategy, + ], + k=args.k, + field_name=args.field_name) + output.to_netcdf(args.output_dest) diff --git a/run_chain.py b/run_chain.py index 02b49950..3f27949b 100755 --- a/run_chain.py +++ b/run_chain.py @@ -3,11 +3,7 @@ from datetime import datetime, timedelta import pytz - import logging -import os -import sys -import time import shutil import argparse @@ -174,7 +170,7 @@ def run_chunk(cfg, force, resume): print(f' └── Skipping "{job_name}" job') skip = True else: - print(f' └── Starting "{job_name}" job') + print(f' └── Submitting "{job_name}" job') # Logfile settings cfg.logfile = cfg.log_working_dir / job_name @@ -183,8 +179,7 @@ def run_chunk(cfg, force, resume): # Submit the job job = getattr(jobs, job_name) if hasattr(job, 'BASIC_PYTHON_JOB') and job.BASIC_PYTHON_JOB: - script = cfg.create_sbatch_script(job_name) - cfg.submit(job_name, script) + cfg.submit_basic_python(job_name) else: job.main(cfg) @@ -207,7 +202,8 @@ def run_chunk(cfg, force, resume): exitcode = 0 except Exception: - subject = "ERROR or TIMEOUT in job '%s' for chain '%s'" % ( + exitcode = 1 + subject = "ERROR or TIMEOUT in job '%s' for chunk '%s'" % ( job_name, cfg.chunk_id) logging.exception(subject) if cfg.user_mail: @@ -217,7 +213,7 @@ def run_chunk(cfg, force, resume): tools.send_mail(cfg.user_mail, subject, message) if exitcode != 0 or not (cfg.log_finished_dir / job_name).exists(): - subject = "ERROR or TIMEOUT in job '%s' for chain '%s'" % ( + subject = "ERROR or TIMEOUT in job '%s' for chunk '%s'" % ( job_name, cfg.chunk_id) if cfg.user_mail: message = tools.prepare_message(cfg.log_working_dir / @@ -351,7 +347,7 @@ def main(): print("╔════════════════════════════════════════╗") print("║ Starting Processing Chain ║") - print("║════════════════════════════════════════║") + print("╠════════════════════════════════════════╣") print(f"║ Case: {casename: <27} ║") print(f"║ Workflow: {cfg.workflow_name: <27} ║") print("╚════════════════════════════════════════╝")