-
Notifications
You must be signed in to change notification settings - Fork 6
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Server Submit & Setting Scripts #96
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,43 @@ | ||
""" | ||
T3's settings | ||
|
||
You may keep a short version of this file in a local ".t3" folder under your home folder. | ||
Any definitions made to the local file will take precedence over this file. | ||
""" | ||
|
||
|
||
# The execution type can be either 'incore', i.e., executed in the same processor, | ||
# or 'local', i.e., to be submitted to the server queue if running on a server. | ||
# If running on a local server, ARC's settings for ``local`` will be used. | ||
execution_type = { | ||
'rmg': 'local', | ||
'arc': 'incore', | ||
} | ||
|
||
servers = { | ||
'local': { | ||
'cluster_soft': 'PBS', | ||
'cpus': 16, | ||
'max mem': 40, # GB | ||
}, | ||
} | ||
|
||
check_status_command = {'OGE': 'export SGE_ROOT=/opt/sge; /opt/sge/bin/lx24-amd64/qstat -u $USER', | ||
'Slurm': '/usr/bin/squeue -u $USER', | ||
'PBS': '/usr/local/bin/qstat -u $USER', | ||
'HTCondor': """condor_q -cons 'Member(Jobstatus,{1,2})' -af:j '{"0","P","R","X","C","H",">","S"}[JobStatus]' RequestCpus RequestMemory JobName '(Time() - EnteredCurrentStatus)'""", | ||
} | ||
|
||
submit_command = {'OGE': 'export SGE_ROOT=/opt/sge; /opt/sge/bin/lx24-amd64/qsub', | ||
'Slurm': '/usr/bin/sbatch', | ||
'PBS': '/usr/local/bin/qsub', | ||
'HTCondor': 'condor_submit', | ||
} | ||
|
||
submit_filenames = {'OGE': 'submit.sh', | ||
'Slurm': 'submit.sl', | ||
'PBS': 'submit.sh', | ||
'HTCondor': 'submit.sub', | ||
} | ||
|
||
rmg_initial_memory = 25 # The initial memory for an RMG job when submitted to the queue, in GB |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,43 @@ | ||
""" | ||
T3's settings | ||
|
||
You may keep a short version of this file in a local ".t3" folder under your home folder. | ||
Any definitions made to the local file will take precedence over this file. | ||
""" | ||
|
||
|
||
# The execution type can be either 'incore', i.e., executed in the same processor, | ||
# or 'local', i.e., to be submitted to the server queue if running on a server. | ||
# If running on a local server, ARC's settings for ``local`` will be used. | ||
execution_type = { | ||
'rmg': 'local', | ||
'arc': 'incore', | ||
} | ||
|
||
servers = { | ||
'local': { | ||
'cluster_soft': 'PBS', | ||
'cpus': 16, | ||
'max mem': 40, # GB | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. please bump this to 360 GB |
||
}, | ||
} | ||
|
||
check_status_command = {'OGE': 'export SGE_ROOT=/opt/sge; /opt/sge/bin/lx24-amd64/qstat -u $USER', | ||
'Slurm': '/usr/bin/squeue -u $USER', | ||
'PBS': '/opt/pbs/bin/qstat -u $USER', | ||
'HTCondor': """condor_q -cons 'Member(Jobstatus,{1,2})' -af:j '{"0","P","R","X","C","H",">","S"}[JobStatus]' RequestCpus RequestMemory JobName '(Time() - EnteredCurrentStatus)'""", | ||
} | ||
|
||
submit_command = {'OGE': 'export SGE_ROOT=/opt/sge; /opt/sge/bin/lx24-amd64/qsub', | ||
'Slurm': '/usr/bin/sbatch', | ||
'PBS': '/opt/pbs/bin/qsub', | ||
'HTCondor': 'condor_submit', | ||
} | ||
|
||
submit_filenames = {'OGE': 'submit.sh', | ||
'Slurm': 'submit.sl', | ||
'PBS': 'submit.sh', | ||
'HTCondor': 'submit.sub', | ||
} | ||
|
||
rmg_initial_memory = 25 # The initial memory for an RMG job when submitted to the queue, in GB |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,60 @@ | ||
""" | ||
Submit scripts | ||
""" | ||
|
||
# Submission scripts stored as a dictionary with software as the primary key. | ||
submit_scripts = { | ||
# 'rmg': """#!/bin/bash -l | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can remove the commented out section, this is an example for a Slurm server, it's enough to have it in the general submit.py file |
||
# #SBATCH -J {name} | ||
# #SBATCH -t 05-00:00:00 | ||
# #SBATCH -o out.txt | ||
# #SBATCH -e err.txt | ||
# #SBATCH --ntasks={cpus} | ||
# #SBATCH --mem-per-cpu=9500 | ||
# | ||
# | ||
# export PYTHONPATH=$PYTHONPATH:~/Code/RMG-Py/ | ||
# | ||
# conda activate rmg_env | ||
# | ||
# touch initial_time | ||
# | ||
# python-jl ~/Code/RMG-Py/rmg.py -n {cpus} input.py | ||
# | ||
# touch final_time | ||
# | ||
# """, | ||
'rmg': """Universe = vanilla | ||
|
||
+JobName = "{name}" | ||
|
||
log = job.log | ||
output = out.txt | ||
error = err.txt | ||
|
||
getenv = True | ||
|
||
should_transfer_files = no | ||
|
||
executable = job.sh | ||
|
||
request_cpus = {cpus} | ||
request_memory = {memory}MB | ||
|
||
queue | ||
|
||
""", | ||
'rmg_job': """#!/bin/bash -l | ||
|
||
touch initial_time | ||
|
||
source /srv01/technion/$USER/.bashrc | ||
|
||
conda activate rmg_env | ||
|
||
python-jl /Local/ce_dana/Code/RMG-Py/rmg.py -n {cpus} input.py{max_iterations} | ||
|
||
touch final_time | ||
|
||
""", | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,81 @@ | ||
""" | ||
Submit scripts | ||
""" | ||
|
||
# Submission scripts stored as a dictionary with software as the primary key. | ||
submit_scripts = { | ||
# 'rmg': """#!/bin/bash -l | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can remove the commented out section, this is an example for a Slurm server, it's enough to have it in the general submit.py file |
||
# #SBATCH -J {name} | ||
# #SBATCH -t 05-00:00:00 | ||
# #SBATCH -o out.txt | ||
# #SBATCH -e err.txt | ||
# #SBATCH --ntasks={cpus} | ||
# #SBATCH --mem-per-cpu={memory / cpus} | ||
# | ||
# | ||
# export PYTHONPATH=$PYTHONPATH:~/Code/RMG-Py/ | ||
# | ||
# conda activate rmg_env | ||
# | ||
# touch initial_time | ||
# | ||
# python-jl ~/Code/RMG-Py/rmg.py -n {cpus} input.py{max_iterations} | ||
# | ||
# touch final_time | ||
# | ||
# """, | ||
# 'rmg': """Universe = vanilla | ||
# | ||
# +JobName = "{name}" | ||
# | ||
# log = job.log | ||
# output = out.txt | ||
# error = err.txt | ||
# | ||
# getenv = True | ||
# | ||
# should_transfer_files = no | ||
# | ||
# executable = job.sh | ||
# | ||
# request_cpus = {cpus} | ||
# request_memory = {memory}MB | ||
# | ||
# queue | ||
# | ||
# """, | ||
# 'rmg_job': """#!/bin/bash -l | ||
# | ||
# touch initial_time | ||
# | ||
# source /srv01/technion/$USER/.bashrc | ||
# | ||
# conda activate rmg_env | ||
# | ||
# python-jl /Local/ce_dana/Code/RMG-Py/rmg.py -n {cpus} input.py{max_iterations} | ||
# | ||
# touch final_time | ||
# | ||
# """, | ||
'rmg': """#!/bin/bash -l | ||
|
||
#PBS -N {name} | ||
#PBS -q zeus_long_q | ||
#PBS -l walltime=168:00:00 | ||
#PBS -l select=1:ncpus={cpus} | ||
#PBS -o out.txt | ||
#PBS -e err.txt | ||
|
||
PBS_O_WORKDIR={workdir} | ||
cd $PBS_O_WORKDIR | ||
|
||
conda activate rmg_env | ||
|
||
touch initial_time | ||
|
||
python-jl ~/Code/RMG-Py/rmg.py -n {cpus} input.py{max_iterations} | ||
|
||
touch final_time | ||
|
||
""", | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
max cpu on Atlas is 10