Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Modified MRCI implementation in Molpro and Orca adapters #746

Open
wants to merge 16 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
90 changes: 52 additions & 38 deletions arc/job/adapters/molpro.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,10 @@
import math
import os
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import socket

from mako.template import Template

from arc.common import get_logger
from arc.exceptions import JobError
from arc.imports import incore_commands, settings
from arc.job.adapter import JobAdapter
from arc.job.adapters.common import (_initialize_adapter,
Expand All @@ -37,7 +35,7 @@
settings['output_filenames'], settings['servers'], settings['submit_filenames']

input_template = """***,${label}
memory,${memory},m;
memory,Total=${memory},m;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The also requires a change to the defaults we use in the settings.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The memory in settings.py is the max memory the user allowed for troubleshooting purposes (when ARC needs to increase the memory, that is the limit). It shouldn't affect the memory in the input file


geometry={angstrom;
${xyz}}
Expand All @@ -47,11 +45,13 @@
${auxiliary_basis}
${cabs}
int;

{hf;${shift}
maxit,1000;
wf,spin=${spin},charge=${charge};}
maxit,999;
wf,spin=${spin},charge=${charge};
}

${restricted}${method};
${restricted}${method}

${job_type_1}
${job_type_2}${block}
Expand Down Expand Up @@ -213,7 +213,6 @@
'keywords',
'memory',
'method',
'orbitals',
'restricted',
]:
input_dict[key] = ''
Expand All @@ -224,10 +223,11 @@
input_dict['charge'] = self.charge
input_dict['label'] = self.species_label
input_dict['memory'] = self.input_file_memory
input_dict['method'] = self.level.method
input_dict['method'] = f'{self.level.method};'
input_dict['shift'] = self.args['trsh']['shift'] if 'shift' in self.args['trsh'].keys() else ''
input_dict['spin'] = self.multiplicity - 1
input_dict['xyz'] = xyz_to_str(self.xyz)
input_dict['orbitals'] = '\ngprint,orbitals;\n'

if not is_restricted(self):
input_dict['restricted'] = 'u'
Expand All @@ -249,23 +249,44 @@
pass

if 'IGNORE_ERROR in the ORBITAL directive' in self.args['trsh'].keys():
keywords.append('ORBITAL,IGNORE_ERROR')

if 'mrci' in self.level.method:
if self.species[0].occ > 16:
raise JobError(f'Will not execute an MRCI calculation with more than 16 occupied orbitals '
f'(got {self.species[0].occ}).\n'
f'Selective occ, closed, core, frozen keyword still not implemented.')
input_dict['orbitals'] = '\ngprint,orbitals;\n'
input_dict['block'] += '\n\nE_mrci=energy;\nE_mrci_Davidson=energd;\n\ntable,E_mrci,E_mrci_Davidson;'
input_dict['method'] = input_dict['rerstricted'] = ''
input_dict['shift'] = 'shift,-1.0,-0.5;'
input_dict['job_type_1'] = f"""{{multi;
{self.species[0].occ}noextra,failsafe,config,csf;
wf,spin={input_dict['spin']},charge={input_dict['charge']};
natorb,print,ci;}}"""
input_dict['job_type_2'] = f"""{{mrci;
${self.species[0].occ}wf,spin=${input_dict['spin']},charge=${input_dict['charge']};}}"""
keywords.append(' ORBITAL,IGNORE_ERROR;')

Check warning on line 252 in arc/job/adapters/molpro.py

View check run for this annotation

Codecov / codecov/patch

arc/job/adapters/molpro.py#L252

Added line #L252 was not covered by tests

if 'mrci' in self.level.method or 'rs2' in self.level.method:
active = self.species[0].active
input_dict['restricted'] = ''
if '_' in self.level.method:
methods = self.level.method.split('_')
input_dict['method'] = ''
for method in methods:
input_dict['method'] += '\n{' + method.lower() + ';\n'
if 'mp2' not in method.lower():
input_dict['method'] += ' maxit,999;\n'
input_dict['method'] += f' wf,spin={input_dict["spin"]},charge={input_dict["charge"]};\n'
if 'casscf' in method.lower() and active is not None:
if 'occ' in active:
input_dict['method'] += f' occ,{",".join([str(i) for i in active["occ"]])};\n'
if 'closed' in active:
input_dict['method'] += f' closed,{",".join([str(i) for i in active["closed"]])};\n'
input_dict['method'] += ' state,1;\n' # ground state
input_dict['method'] += '}\n'
else:
input_dict['method'] = f"""{{casscf;
maxit,999;
wf,spin={input_dict['spin']},charge={input_dict['charge']};
"""
if active is not None:
if 'occ' in active:
input_dict['method'] += f' occ,{",".join([str(i) for i in active["occ"]])};\n'
if 'closed' in active:
input_dict['method'] += f' closed,{",".join([str(i) for i in active["closed"]])};\n'
input_dict['method'] += ' state,1;\n' # ground state

Check warning on line 282 in arc/job/adapters/molpro.py

View check run for this annotation

Codecov / codecov/patch

arc/job/adapters/molpro.py#L282

Added line #L282 was not covered by tests
input_dict['method'] += '}\n\n'
input_dict['method'] += f"""{{mrci{"-f12" if "f12" in self.level.method.lower() else ""};
maxit,999;
wf,spin={input_dict['spin']},charge={input_dict['charge']};
}}"""
if 'mrci' in self.level.method:
input_dict['block'] += '\n\nE_mrci=energy;\nE_mrci_Davidson=energd;\n\ntable,E_mrci,E_mrci_Davidson;'

input_dict = update_input_dict_with_args(args=self.args, input_dict=input_dict)

Expand Down Expand Up @@ -325,19 +346,12 @@
"""
Set the input_file_memory attribute.
"""
# Molpro's memory is per cpu core and in MW (mega word; 1000 MW = 7.45 GB on a 64-bit machine)
# The conversion from mW to GB was done using this (https://deviceanalytics.com/words-to-bytes-converter/)
# specifying a 64-bit architecture.
#
# See also:
# https://www.molpro.net/pipermail/molpro-user/2010-April/003723.html
# In the link, they describe the conversion of 100,000,000 Words (100Mword) is equivalent to
# 800,000,000 bytes (800 mb).
# Formula - (100,000,000 [Words]/( 800,000,000 [Bytes] / (job mem in gb * 1000,000,000 [Bytes])))/ 1000,000 [Words -> MegaWords]
# The division by 1E6 is for converting into MWords
# Due to Zeus's configuration, there is only 1 nproc so the memory should not be divided by cpu_cores.
self.input_file_memory = math.ceil(self.job_memory_gb / (7.45e-3 * self.cpu_cores)) if 'zeus' not in socket.gethostname() else math.ceil(self.job_memory_gb / (7.45e-3))

# Molpro's memory is per cpu core, but here we ask for Total memory.
# Molpro measures memory in MW (mega word; 1000 MW = 7.45 GB on a 64-bit machine)
# The conversion from mW to GB was done using https://www.molpro.net/manual/doku.php?id=general_program_structure#memory_option_in_command_line
# 3.2 GB = 100 mw (case sensitive) total (as in this implimentation) -> 31.25 mw/GB is the conversion rate
self.input_file_memory = math.ceil(self.job_memory_gb * 31.25)

def execute_incore(self):
"""
Execute a job incore.
Expand Down
Loading
Loading