diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index cafdc811bb..dd5d8f93dc 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -89,6 +89,7 @@ jobs: micromamba activate a2 pytest --splits 3 --group ${{ matrix.split }} --durations-path tests/.pytest-split-durations --splitting-algorithm least_duration --ignore=tests/ase --cov=atomate2 --cov-report=xml + - uses: codecov/codecov-action@v1 if: matrix.python-version == '3.10' && github.repository == 'materialsproject/atomate2' with: @@ -150,7 +151,7 @@ jobs: - name: Test Notebooks run: | micromamba activate a2 - pytest --nbmake ./tutorials --ignore=./tutorials/openmm_tutorial.ipynb + pytest --nbmake ./tutorials --ignore=./tutorials/openmm_tutorial.ipynb --ignore=./tutorials/force_fields - name: Test ASE env: @@ -165,6 +166,81 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} file: ./coverage.xml + test-force-field-notebook: + # prevent this action from running on forks + if: github.repository == 'materialsproject/atomate2' + + services: + local_mongodb: + image: mongo:4.0 + ports: + - 27017:27017 + + runs-on: ubuntu-latest + defaults: + run: + shell: bash -l {0} # enables conda/mamba env activation by reading bash profile + strategy: + matrix: + python-version: ["3.10", "3.11", "3.12"] + + steps: + - name: Check out repo + uses: actions/checkout@v4 + + - name: Set up micromamba + uses: mamba-org/setup-micromamba@main + + - name: Create mamba environment + run: | + micromamba create -n a2 python=${{ matrix.python-version }} --yes + + - name: Install uv + run: micromamba run -n a2 pip install uv + + - name: Install conda dependencies + run: | + micromamba install -n a2 -c conda-forge enumlib packmol bader openbabel openff-toolkit==0.16.2 openff-interchange==0.3.22 --yes + + - name: Install dependencies + run: | + micromamba activate a2 + python -m pip install --upgrade pip + mkdir -p ~/.abinit/pseudos + cp -r tests/test_data/abinit/pseudos/ONCVPSP-PBE-SR-PDv0.4 ~/.abinit/pseudos + uv pip install .[strict,strict-forcefields,tests,abinit] + uv pip install torch-runstats + uv pip install --no-deps nequip==0.5.6 + + - name: Install pymatgen from master if triggered by pymatgen repo dispatch + if: github.event_name == 'repository_dispatch' && github.event.action == 'pymatgen-ci-trigger' + run: | + micromamba activate a2 + uv pip install --upgrade 'git+https://github.com/materialsproject/pymatgen@${{ github.event.client_payload.pymatgen_ref }}' + + - name: Forcefield tutorial + env: + MP_API_KEY: ${{ secrets.MP_API_KEY }} + + # regenerate durations file with `pytest --store-durations --durations-path tests/.pytest-split-durations` + # Note the use of `--splitting-algorithm least_duration`. + # This helps prevent a test split having no tests to run, and then the GH action failing, see: + # https://github.com/jerry-git/pytest-split/issues/95 + # However this `splitting-algorithm` means that tests cannot depend sensitively on the order they're executed in. + run: | + micromamba activate a2 + pytest --nbmake ./tutorials/force_fields + + + - uses: codecov/codecov-action@v1 + if: matrix.python-version == '3.10' && github.repository == 'materialsproject/atomate2' + with: + token: ${{ secrets.CODECOV_TOKEN }} + name: coverage + file: ./coverage.xml + + + docs: runs-on: ubuntu-latest @@ -186,7 +262,7 @@ jobs: run: sphinx-build docs docs_build automerge: - needs: [lint, test-non-ase, test-notebooks-and-ase, docs] + needs: [lint, test-non-ase, test-notebooks-and-ase, test-force-field-notebook, docs] runs-on: ubuntu-latest permissions: diff --git a/docs/user/codes/vasp.md b/docs/user/codes/vasp.md index d9788e1a98..a75cc0664c 100644 --- a/docs/user/codes/vasp.md +++ b/docs/user/codes/vasp.md @@ -248,6 +248,25 @@ adjust them if necessary. The default might not be strict enough for your specific case. ``` +You can use the following code to start the standard version of the workflow: +```py +from atomate2.vasp.flows.phonons import PhononMaker +from pymatgen.core.structure import Structure + +structure = Structure( + lattice=[[0, 2.13, 2.13], [2.13, 0, 2.13], [2.13, 2.13, 0]], + species=["Mg", "O"], + coords=[[0, 0, 0], [0.5, 0.5, 0.5]], +) + +phonon_flow = PhononMaker(min_length=15.0, store_force_constants=False).make( + structure=struct +) +``` + + + + ### Gruneisen parameter workflow Calculates mode-dependent Grüneisen parameters with the help of Phonopy. @@ -352,8 +371,85 @@ lobster = update_user_incar_settings(lobster, {"NPAR": 4}) run_locally(lobster, create_folders=True, store=SETTINGS.JOB_STORE) ``` -It is, however, computationally very beneficial to define two different types of job scripts for the VASP and Lobster runs, as VASP and Lobster runs are parallelized differently (MPI vs. OpenMP). -[FireWorks](https://github.com/materialsproject/fireworks) allows one to run the VASP and Lobster jobs with different job scripts. Please check out the [jobflow documentation on FireWorks](https://materialsproject.github.io/jobflow/tutorials/8-fireworks.html#setting-the-manager-configs) for more information. +There are currently three different ways available to run the workflow efficiently, as VASP and LOBSTER rely on a different parallelization (MPI vs. OpenMP). +One can use a job script (with some restrictions), or [Jobflow-remote](https://matgenix.github.io/jobflow-remote/) / [Fireworks](https://github.com/materialsproject/fireworks) for high-throughput runs. + + +#### Running the LOBSTER workflow without database and with one job script only + +It is possible to run the VASP-LOBSTER workflow efficiently with a minimal setup. +In this case, you will run the VASP calculations on the same node as the LOBSTER calculations. +In between, the different computations you will switch from MPI to OpenMP parallelization. + +For example, for a node with 48 cores, you could use an adapted version of the following SLURM script: + +```bash +#!/bin/bash +#SBATCH -J vasplobsterjob +#SBATCH -o ./%x.%j.out +#SBATCH -e ./%x.%j.err +#SBATCH -D ./ +#SBATCH --mail-type=END +#SBATCH --mail-user=you@you.de +#SBATCH --time=24:00:00 +#SBATCH --nodes=1 +#This needs to be adapted if you run with different cores +#SBATCH --ntasks=48 + +# ensure you load the modules to run VASP, e.g., module load vasp +module load my_vasp_module +# please activate the required conda environment +conda activate my_environment +cd my_folder +# the following script needs to contain the workflow +python xyz.py +``` + +The `LOBSTER_CMD` now needs an additional export of the number of threads. + +```yaml +VASP_CMD: <> +LOBSTER_CMD: OMP_NUM_THREADS=48 <> +``` + + +#### Jobflow-remote +Please refer first to the general documentation of jobflow-remote: [https://matgenix.github.io/jobflow-remote/](https://matgenix.github.io/jobflow-remote/). + +```py +from atomate2.vasp.flows.lobster import VaspLobsterMaker +from pymatgen.core.structure import Structure +from jobflow_remote import submit_flow, set_run_config +from atomate2.vasp.powerups import update_user_incar_settings + +structure = Structure( + lattice=[[0, 2.13, 2.13], [2.13, 0, 2.13], [2.13, 2.13, 0]], + species=["Mg", "O"], + coords=[[0, 0, 0], [0.5, 0.5, 0.5]], +) + +lobster = VaspLobsterMaker().make(structure) + +resources = {"nodes": 3, "partition": "micro", "time": "00:55:00", "ntasks": 144} + +resources_lobster = {"nodes": 1, "partition": "micro", "time": "02:55:00", "ntasks": 48} +lobster = set_run_config(lobster, name_filter="lobster", resources=resources_lobster) + +lobster = update_user_incar_settings(lobster, {"NPAR": 4}) +submit_flow(lobster, worker="my_worker", resources=resources, project="my_project") +``` + +The `LOBSTER_CMD` also needs an export of the threads. + +```yaml +VASP_CMD: <> +LOBSTER_CMD: OMP_NUM_THREADS=48 <> +``` + + + +#### Fireworks +Please first refer to the general documentation on running atomate2 workflows with fireworks: [https://materialsproject.github.io/atomate2/user/fireworks.html](https://materialsproject.github.io/atomate2/user/fireworks.html) Specifically, you might want to change the `_fworker` for the LOBSTER runs and define a separate `lobster` worker within FireWorks: @@ -389,6 +485,16 @@ lpad = LaunchPad.auto_load() lpad.add_wf(wf) ``` + +The `LOBSTER_CMD` can now be adapted to not include the number of threads: + +```yaml +VASP_CMD: <> +LOBSTER_CMD: <> +``` + +#### Analyzing outputs + Outputs from the automatic analysis with LobsterPy can easily be extracted from the database and also plotted: ```py @@ -425,42 +531,6 @@ for number, (key, cohp) in enumerate( plotter.save_plot(f"plots_cation_anion_bonds{number}.pdf") ``` -#### Running the LOBSTER workflow without database and with one job script only - -It is also possible to run the VASP-LOBSTER workflow with a minimal setup. -In this case, you will run the VASP calculations on the same node as the LOBSTER calculations. -In between, the different computations you will switch from MPI to OpenMP parallelization. - -For example, for a node with 48 cores, you could use an adapted version of the following SLURM script: - -```bash -#!/bin/bash -#SBATCH -J vasplobsterjob -#SBATCH -o ./%x.%j.out -#SBATCH -e ./%x.%j.err -#SBATCH -D ./ -#SBATCH --mail-type=END -#SBATCH --mail-user=you@you.de -#SBATCH --time=24:00:00 -#SBATCH --nodes=1 -#This needs to be adapted if you run with different cores -#SBATCH --ntasks=48 - -# ensure you load the modules to run VASP, e.g., module load vasp -module load my_vasp_module -# please activate the required conda environment -conda activate my_environment -cd my_folder -# the following script needs to contain the workflow -python xyz.py -``` - -The `LOBSTER_CMD` now needs an additional export of the number of threads. - -```yaml -VASP_CMD: <> -LOBSTER_CMD: OMP_NUM_THREADS=48 <> -``` (modifying_input_sets)= Modifying input sets diff --git a/pyproject.toml b/pyproject.toml index 0dfadb588c..d1f8818f29 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -123,7 +123,7 @@ strict = [ strict-forcefields = [ "calorine==3.0", "chgnet==0.4.0", - "mace-torch>=0.3.6", + "mace-torch==0.3.10", "matgl==1.1.3", "quippy-ase==0.9.14; python_version < '3.12'", "sevenn==0.10.3", diff --git a/src/atomate2/lobster/schemas.py b/src/atomate2/lobster/schemas.py index ae4352669e..d67a2782d1 100644 --- a/src/atomate2/lobster/schemas.py +++ b/src/atomate2/lobster/schemas.py @@ -597,6 +597,7 @@ def from_directory( "bva_comp": True, **calc_quality_kwargs, } + cal_quality_dict = Analysis.get_lobster_calc_quality_summary( path_to_poscar=structure_path, path_to_vasprun=vasprun_path, @@ -844,34 +845,32 @@ def from_directory( calc_quality_text = None describe = None describe_ionic = None - if analyze_outputs: - if ( - icohplist_path.exists() - and cohpcar_path.exists() - and charge_path.exists() - ): - ( - condensed_bonding_analysis, - describe, - sb_all, - ) = CondensedBondingAnalysis.from_directory( - dir_name, - save_cohp_plots=save_cohp_plots, - plot_kwargs=plot_kwargs, - lobsterpy_kwargs=lobsterpy_kwargs, - which_bonds="all", - ) - ( - condensed_bonding_analysis_ionic, - describe_ionic, - sb_ionic, - ) = CondensedBondingAnalysis.from_directory( - dir_name, - save_cohp_plots=save_cohp_plots, - plot_kwargs=plot_kwargs, - lobsterpy_kwargs=lobsterpy_kwargs, - which_bonds="cation-anion", - ) + + if analyze_outputs and ( + icohplist_path.exists() and cohpcar_path.exists() and charge_path.exists() + ): + ( + condensed_bonding_analysis, + describe, + sb_all, + ) = CondensedBondingAnalysis.from_directory( + dir_name, + save_cohp_plots=save_cohp_plots, + plot_kwargs=plot_kwargs, + lobsterpy_kwargs=lobsterpy_kwargs, + which_bonds="all", + ) + ( + condensed_bonding_analysis_ionic, + describe_ionic, + sb_ionic, + ) = CondensedBondingAnalysis.from_directory( + dir_name, + save_cohp_plots=save_cohp_plots, + plot_kwargs=plot_kwargs, + lobsterpy_kwargs=lobsterpy_kwargs, + which_bonds="cation-anion", + ) # Get lobster calculation quality summary data calc_quality_summary = CalcQualitySummary.from_directory( @@ -971,7 +970,9 @@ def from_directory( if describe_ionic is not None else None, strongest_bonds_cation_anion=sb_ionic, - calc_quality_summary=calc_quality_summary, + calc_quality_summary=calc_quality_summary + if calc_quality_summary is not None + else None, calc_quality_text=" ".join(calc_quality_text) if calc_quality_text is not None else None, diff --git a/src/atomate2/utils/testing/lobster.py b/src/atomate2/utils/testing/lobster.py new file mode 100644 index 0000000000..9808deb90f --- /dev/null +++ b/src/atomate2/utils/testing/lobster.py @@ -0,0 +1,197 @@ +"""Utilities for testing LOBSTER calculations.""" + +from __future__ import annotations + +import logging +import shutil +from pathlib import Path +from typing import TYPE_CHECKING + +import pytest +from pymatgen.io.lobster import Lobsterin + +import atomate2.lobster.jobs +import atomate2.lobster.run + +if TYPE_CHECKING: + from collections.abc import Callable, Generator, Sequence + +logger = logging.getLogger("atomate2") + +_LFILES = "lobsterin" +_DFT_FILES = ("WAVECAR", "POSCAR", "INCAR", "KPOINTS", "POTCAR") +_LOBS_REF_PATHS: dict[str, str | Path] = {} +_FAKE_RUN_LOBSTER_KWARGS: dict[str, dict[str, Sequence]] = {} + + +@pytest.fixture(scope="session") +def lobster_test_dir(test_dir: str | Path) -> Path: + """Fixture to provide the test directory for LOBSTER tests. + + Args: + test_dir: The base test directory. + + Returns + ------- + Path: The test directory for LOBSTER tests. + """ + return Path(test_dir) / "lobster" + + +def monkeypatch_lobster( + monkeypatch: pytest.MonkeyPatch, lobster_test_dir: Path +) -> Generator[ + Callable[[dict[str, str | Path], dict[str, dict[str, Sequence]]], None], + None, + None, +]: + """Monkeypatch LOBSTER run calls for testing purposes. + + This generator can be used as a context manager or pytest fixture ("mock_lobster"). + It replaces calls to run_lobster with a mock function that copies reference files + instead of running LOBSTER. + + The primary idea is that instead of running LOBSTER to + generate the output files, reference files will be copied + into the directory instead. This ensures that the calculation + inputs are generated correctly and that the outputs are + parsed properly. + + To use the fixture successfully, follow these steps: + 1. Include "mock_lobster" as an argument to any test that + would like to use its functionality. + 2. For each job in your workflow, prepare a reference + directory containing two folders: + "inputs" (containing the reference input files expected + to be produced by Lobsterin.standard_calculations_from_vasp_files) + and "outputs" (containing the expected + output files to be produced by run_lobster). + These files should reside in a subdirectory + of "tests/test_data/lobster". + 3. Create a dictionary mapping each job name + to its reference directory. Note that you should + supply the reference directory relative + to the "tests/test_data/lobster" folder. For example, + if your calculation has one job named + "lobster_run_0" and the reference files are present in + "tests/test_data/lobster/Si_lobster_run_0", + the dictionary would look like: + {"lobster_run_0": "Si_lobster_run_0"}. + 4. Optionally, create a dictionary mapping each + job name to custom keyword arguments that will be + supplied to fake_run_lobster. This way you can + configure which lobsterin settings are expected + for each job. For example, if your calculation + has one job named "lobster_run_0" and you wish + to validate that "basisfunctions" is set correctly + in the lobsterin, your dictionary would look like: + {"lobster_run_0": {"lobsterin_settings": + {"basisfunctions": Ba 5p 5s 6s}}. + 5. Inside the test function, call + `mock_lobster(ref_paths, fake_lobster_kwargs)`, + where ref_paths is the + dictionary created in step 3 and + fake_lobster_kwargs is the dictionary created in step 4. + 6. Run your LOBSTER job after calling `mock_lobster`. + + Args: + monkeypatch (pytest.MonkeyPatch): + The pytest monkeypatch fixture. + lobster_test_dir (Path): + The directory containing reference files for LOBSTER tests. + """ + + def mock_run_lobster(*_args, **_kwargs) -> None: + from jobflow import CURRENT_JOB + + name = CURRENT_JOB.job.name + ref_path = lobster_test_dir / _LOBS_REF_PATHS[name] + fake_run_lobster(ref_path, **_FAKE_RUN_LOBSTER_KWARGS.get(name, {})) + + monkeypatch.setattr(atomate2.lobster.run, "run_lobster", mock_run_lobster) + monkeypatch.setattr(atomate2.lobster.jobs, "run_lobster", mock_run_lobster) + + def _run( + ref_paths: dict[str, str | Path], + fake_run_lobster_kwargs: dict[str, dict[str, Sequence]], + ) -> None: + _LOBS_REF_PATHS.update(ref_paths) + _FAKE_RUN_LOBSTER_KWARGS.update(fake_run_lobster_kwargs) + + yield _run + + monkeypatch.undo() + _LOBS_REF_PATHS.clear() + + +def fake_run_lobster( + ref_path: str | Path, + check_lobster_inputs: Sequence[str] = _LFILES, + check_dft_inputs: Sequence[str] = _DFT_FILES, + lobsterin_settings: Sequence[str] = (), +) -> None: + """ + Emulate running LOBSTER and validate LOBSTER input files. + + Parameters + ---------- + ref_path + Path to reference directory with VASP input files in the folder named 'inputs' + and output files in the folder named 'outputs'. + check_lobster_inputs + A list of lobster input files to check. Supported options are "lobsterin.gz". + check_dft_inputs + A list of VASP files that need to be copied to start the LOBSTER runs. + lobsterin_settings + A list of LOBSTER settings to check. + """ + logger.info("Running fake LOBSTER.") + ref_path = Path(ref_path) + + # Checks if DFT files have been copied + for file in check_dft_inputs: + Path(file).exists() + logger.info("Verified copying of VASP files successfully") + # zipped or not zipped? + if "lobsterin" in check_lobster_inputs: + verify_inputs(ref_path, lobsterin_settings) + + logger.info("Verified LOBSTER inputs successfully") + + copy_lobster_outputs(ref_path) + + # pretend to run LOBSTER by copying pre-generated outputs from reference dir + logger.info("ran fake LOBSTER, generated outputs") + + +def verify_inputs(ref_path: str | Path, lobsterin_settings: Sequence[str]) -> None: + """Verify LOBSTER input files against reference settings. + + Args: + ref_path (str | Path): Path to the reference directory containing input files. + lobsterin_settings (Sequence[str]): A list of LOBSTER settings to check. + """ + user = Lobsterin.from_file("lobsterin") + + # Check lobsterin + ref = Lobsterin.from_file(Path(ref_path) / "inputs" / "lobsterin") + + for key in lobsterin_settings: + ref_val, user_val = ref.get(key), user.get(key) + if ref_val != user_val: + raise ValueError( + f"lobsterin value of {key} is inconsistent, got {user_val} but " + f"expected {ref_val}!" + ) + + +def copy_lobster_outputs(ref_path: str | Path) -> None: + """Copy LOBSTER output files from the reference directory to the current directory. + + Args: + ref_path (str | Path): Path to the reference directory containing output files. + """ + output_path = Path(ref_path) / "outputs" + for output_file in output_path.iterdir(): + if output_file.is_file(): + shutil.copy(output_file, ".") diff --git a/tests/vasp/lobster/conftest.py b/tests/vasp/lobster/conftest.py index 9743334bd3..e7ef4fbce1 100644 --- a/tests/vasp/lobster/conftest.py +++ b/tests/vasp/lobster/conftest.py @@ -1,143 +1,16 @@ -from __future__ import annotations - -import logging -import shutil +from collections.abc import Callable, Generator from pathlib import Path -from typing import TYPE_CHECKING, Literal +from typing import Any import pytest -from pymatgen.io.lobster import Lobsterin - -import atomate2.lobster.jobs -import atomate2.lobster.run - -if TYPE_CHECKING: - from collections.abc import Sequence - -logger = logging.getLogger("atomate2") - -_LFILES = "lobsterin" -_DFT_FILES = ("WAVECAR", "POSCAR", "INCAR", "KPOINTS", "POTCAR") -_LOBS_REF_PATHS = {} -_FAKE_RUN_LOBSTER_KWARGS = {} - +from pytest import MonkeyPatch -@pytest.fixture(scope="session") -def lobster_test_dir(test_dir): - return test_dir / "lobster" +from atomate2.utils.testing.lobster import monkeypatch_lobster @pytest.fixture -def mock_lobster(monkeypatch, lobster_test_dir): - """ - This fixture allows one to mock (fake) running LOBSTER. - It works by monkeypatching (replacing) calls to run_lobster that will - work when the lobster executables - are not present. - The primary idea is that instead of running LOBSTER to generate the output files, - reference files will be copied into the directory instead. As we do not want to - test whether LOBSTER is giving the correct output rather that the calculation inputs - are generated correctly and that the outputs are parsed properly, this should be - sufficient for our needs. - To use the fixture successfully, the following steps must be followed: - 1. "mock_lobster" should be included as an argument to any test that would - like to use its functionally. - 2. For each job in your workflow, you should prepare a reference directory - containing two folders "inputs" (containing the reference input files - expected to be produced by Lobsterin.standard_calculations_from_vasp_files - and "outputs" (containing the expected - output files to be produced by run_lobster). These files should reside in a - subdirectory of "tests/test_data/lobster". - 3. Create a dictionary mapping each job name to its reference directory. - Note that you should supply the reference directory relative to the - "tests/test_data/lobster" folder. For example, if your calculation - has one job named "lobster_run_0" and the reference files are present in - "tests/test_data/lobster/Si_lobster_run_0", the dictionary - would look like: ``{"lobster_run_0": "Si_lobster_run_0"}``. - 4. Optional: create a dictionary mapping each job name to custom - keyword arguments that will be supplied to fake_run_lobster. - This way you can configure which lobsterin settings are expected for each job. - For example, if your calculation has one job named "lobster_run_0" - and you wish to validate that "basisfunctions" is set correctly - in the lobsterin, your dictionary would look like - ``{"lobster_run_0": {"lobsterin_settings": {"basisfunctions": Ba 5p 5s 6s}}``. - 5. Inside the test function, call `mock_lobster(ref_paths, fake_lobster_kwargs)`, - where ref_paths is the dictionary created in step 3 - and fake_lobster_kwargs is the - dictionary created in step 4. - 6. Run your lobster job after calling `mock_lobster`. - """ - - def mock_run_lobster(*args, **kwargs): - from jobflow import CURRENT_JOB - - name = CURRENT_JOB.job.name - ref_path = lobster_test_dir / _LOBS_REF_PATHS[name] - fake_run_lobster(ref_path, **_FAKE_RUN_LOBSTER_KWARGS.get(name, {})) - - monkeypatch.setattr(atomate2.lobster.run, "run_lobster", mock_run_lobster) - monkeypatch.setattr(atomate2.lobster.jobs, "run_lobster", mock_run_lobster) - - def _run(ref_paths, fake_run_lobster_kwargs): - _LOBS_REF_PATHS.update(ref_paths) - _FAKE_RUN_LOBSTER_KWARGS.update(fake_run_lobster_kwargs) - - yield _run - - monkeypatch.undo() - _LOBS_REF_PATHS.clear() - - -def fake_run_lobster( - ref_path: str | Path, - check_lobster_inputs: Sequence[Literal["lobsterin"]] = _LFILES, - check_dft_inputs: Sequence[Literal["WAVECAR", "POSCAR"]] = _DFT_FILES, - lobsterin_settings: Sequence[str] = (), -): - """ - Emulate running LOBSTER and validate LOBSTER input files. - Parameters - ---------- - ref_path - Path to reference directory with VASP input files in the folder named 'inputs' - and output files in the folder named 'outputs'. - check_lobster_inputs - A list of lobster input files to check. Supported options are "lobsterin.gz". - lobsterin_settings - A list of LOBSTER settings to check. - """ - logger.info("Running fake LOBSTER.") - ref_path = Path(ref_path) - - # Checks if DFT files have been copied - for file in check_dft_inputs: - Path(file).exists() - logger.info("Verified copying of VASP files successfully") - # zipped or not zipped? - if "lobsterin" in check_lobster_inputs: - verify_inputs(ref_path, lobsterin_settings) - - logger.info("Verified LOBSTER inputs successfully") - - copy_lobster_outputs(ref_path) - - # pretend to run LOBSTER by copying pre-generated outputs from reference dir - logger.info("ran fake LOBSTER, generated outputs") - - -def verify_inputs(ref_path: str | Path, lobsterin_settings: Sequence[str]): - user = Lobsterin.from_file("lobsterin") - - # Check lobsterin - ref = Lobsterin.from_file(ref_path / "inputs" / "lobsterin") - - for key in lobsterin_settings: - if user.get(key) != ref.get(key): - raise ValueError(f"lobsterin value of {key} is inconsistent!") - - -def copy_lobster_outputs(ref_path: str | Path): - output_path = ref_path / "outputs" - for output_file in output_path.iterdir(): - if output_file.is_file(): - shutil.copy(output_file, ".") +def mock_lobster( + monkeypatch: MonkeyPatch, lobster_test_dir: Path +) -> Generator[Callable[[Any, Any], Any], None, None]: + """ """ + yield from monkeypatch_lobster(monkeypatch, lobster_test_dir) diff --git a/tutorials/force_fields/__init__.py b/tutorials/force_fields/__init__.py new file mode 100644 index 0000000000..978848c13a --- /dev/null +++ b/tutorials/force_fields/__init__.py @@ -0,0 +1 @@ +"""Forcefield-based tutorials.""" diff --git a/tutorials/force_fields/phonon_workflow.ipynb b/tutorials/force_fields/phonon_workflow.ipynb new file mode 100644 index 0000000000..e2537bb392 --- /dev/null +++ b/tutorials/force_fields/phonon_workflow.ipynb @@ -0,0 +1,165 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "We start with imports necessary to test the tutorial automatically. In practice, you can load a structure file from any other place and you are also not required to generate the data in a temporary directory." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "import tempfile\n", + "from pathlib import Path\n", + "\n", + "tmp_dir = tempfile.mkdtemp()\n", + "TEST_ROOT = Path().cwd().parent.parent / \"tests\"\n", + "TEST_DIR = TEST_ROOT / \"test_data\"" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "First, we load a structure from a file." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "from pymatgen.core.structure import Structure\n", + "\n", + "si_structure = Structure.from_file(TEST_DIR / \"structures\" / \"Si.cif\")" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "Then, we load the `PhononMaker` and run_locally to perform the calculation directly here in the notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "from jobflow import run_locally\n", + "\n", + "from atomate2.forcefields.flows.phonons import PhononMaker\n", + "\n", + "flow = PhononMaker(\n", + " min_length=3.0,\n", + " born_maker=None,\n", + " use_symmetrized_structure=\"conventional\",\n", + " create_thermal_displacements=False,\n", + " store_force_constants=False,\n", + " prefer_90_degrees=False,\n", + " generate_frequencies_eigenvectors_kwargs={\"tstep\": 100},\n", + ").make(si_structure)\n", + "run_locally(flow, create_folders=True, raise_immediately=True, root_dir=tmp_dir)" + ] + }, + { + "cell_type": "markdown", + "id": "6", + "metadata": {}, + "source": [ + "One can switch to a different force field as well!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "from atomate2.forcefields.jobs import ForceFieldRelaxMaker, ForceFieldStaticMaker\n", + "\n", + "flow = PhononMaker(\n", + " min_length=3.0,\n", + " use_symmetrized_structure=\"conventional\",\n", + " create_thermal_displacements=False,\n", + " store_force_constants=False,\n", + " prefer_90_degrees=False,\n", + " generate_frequencies_eigenvectors_kwargs={\"tstep\": 100},\n", + " static_energy_maker=ForceFieldStaticMaker(force_field_name=\"MACE_MP_0B3\"),\n", + " bulk_relax_maker=ForceFieldRelaxMaker(force_field_name=\"MACE_MP_0B3\"),\n", + " phonon_displacement_maker=ForceFieldStaticMaker(force_field_name=\"MACE_MP_0B3\"),\n", + ").make(si_structure)\n", + "\n", + "run_locally(flow, create_folders=True, raise_immediately=True, root_dir=tmp_dir)" + ] + }, + { + "cell_type": "markdown", + "id": "8", + "metadata": {}, + "source": [ + "Or by using the name only:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "PhononMaker.from_force_field_name(force_field_name=\"MACE_MP_0B3\")\n", + "run_locally(flow, create_folders=True, raise_immediately=True, root_dir=tmp_dir)" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "Now, we clean up the temporary directory that we made. In reality, you might want to keep this data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "import shutil\n", + "\n", + "shutil.rmtree(tmp_dir)" + ] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tutorials/lobster_workflow.ipynb b/tutorials/lobster_workflow.ipynb new file mode 100644 index 0000000000..ac9ebc1230 --- /dev/null +++ b/tutorials/lobster_workflow.ipynb @@ -0,0 +1,178 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "0", + "metadata": {}, + "source": [ + "The first lines are needed to ensure that we can mock VASP and LOBSTER runs. The test files here might not belong to the same calculation but are good enough for testing." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "from mock_lobster import mock_lobster\n", + "from mock_vasp import TEST_DIR, mock_vasp\n", + "\n", + "ref_paths = {\n", + " \"relax 1\": \"Si_lobster_uniform/relax_1\",\n", + " \"relax 2\": \"Si_lobster_uniform/relax_2\",\n", + " \"static\": \"Si_lobster_uniform/static\",\n", + " \"non-scf uniform\": \"Si_lobster_uniform/non-scf_uniform\",\n", + "}\n", + "ref_paths_lobster = {\n", + " \"lobster_run_0\": \"Si_lobster/lobster_0\",\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "We first load a structure that we want to analyze with bonding analysis." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "from jobflow import JobStore, run_locally\n", + "from maggma.stores import MemoryStore\n", + "from pymatgen.core import Structure\n", + "\n", + "from atomate2.vasp.flows.lobster import LobsterMaker, VaspLobsterMaker\n", + "from atomate2.vasp.powerups import update_user_incar_settings\n", + "\n", + "job_store = JobStore(MemoryStore(), additional_stores={\"data\": MemoryStore()})\n", + "si_structure = Structure.from_file(TEST_DIR / \"structures\" / \"Si.cif\")" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "Then, we initialize a workflow:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "job = VaspLobsterMaker(\n", + " lobster_maker=LobsterMaker(\n", + " task_document_kwargs={\n", + " \"calc_quality_kwargs\": {\"potcar_symbols\": [\"Si\"], \"n_bins\": 10},\n", + " \"add_coxxcar_to_task_document\": True,\n", + " },\n", + " user_lobsterin_settings={\n", + " \"COHPstartEnergy\": -5.0,\n", + " \"COHPEndEnergy\": 5.0,\n", + " \"cohpGenerator\": \"from 0.1 to 3.0 orbitalwise\",\n", + " },\n", + " ),\n", + " delete_wavecars=True,\n", + ").make(si_structure)\n", + "job = update_user_incar_settings(job, {\"NPAR\": 4})\n", + "\n", + "# run the flow or job and ensure that it finished running successfully" + ] + }, + { + "cell_type": "markdown", + "id": "6", + "metadata": {}, + "source": [ + "We then run this workflow locally to show-case the capabilities. In real-life, you would omit the `mock*` parts." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "with mock_vasp(ref_paths) as mf, mock_lobster(ref_paths_lobster) as mf2:\n", + " responses = run_locally(\n", + " job,\n", + " store=job_store,\n", + " create_folders=True,\n", + " ensure_success=True,\n", + " raise_immediately=True,\n", + " )" + ] + }, + { + "cell_type": "markdown", + "id": "8", + "metadata": {}, + "source": [ + "We can also analyze the data from the database" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "from pymatgen.electronic_structure.cohp import Cohp\n", + "from pymatgen.electronic_structure.plotter import CohpPlotter\n", + "\n", + "store = job_store\n", + "\n", + "result = store.query_one(\n", + " {\"name\": \"lobster_run_0\"},\n", + " properties=[\n", + " \"output.lobsterpy_data.cohp_plot_data\",\n", + " \"output.lobsterpy_data_cation_anion.cohp_plot_data\",\n", + " ],\n", + " load=True,\n", + ")\n", + "\n", + "for key, cohp in result[\"output\"][\"lobsterpy_data\"][\"cohp_plot_data\"][\"data\"].items():\n", + " plotter = CohpPlotter()\n", + " cohp_obj = Cohp.from_dict(cohp)\n", + " plotter.add_cohp(key, cohp_obj)\n", + " plotter.show()\n", + "\n", + "for key, cohp in result[\"output\"][\"lobsterpy_data_cation_anion\"][\"cohp_plot_data\"][\n", + " \"data\"\n", + "].items():\n", + " plotter = CohpPlotter()\n", + " cohp_obj = Cohp.from_dict(cohp)\n", + " plotter.add_cohp(key, cohp_obj)\n", + " plotter.show()" + ] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tutorials/mock_lobster.py b/tutorials/mock_lobster.py new file mode 100644 index 0000000000..97c1ba01be --- /dev/null +++ b/tutorials/mock_lobster.py @@ -0,0 +1,40 @@ +"""Mock LOBSTER functions for executing tutorials.""" + +import contextlib +import os +import shutil +import tempfile +from collections.abc import Generator +from pathlib import Path + +from pytest import MonkeyPatch + +from atomate2.utils.testing.lobster import monkeypatch_lobster + +TEST_ROOT = Path(__file__).parent.parent / "tests" +TEST_DIR = TEST_ROOT / "test_data" + + +@contextlib.contextmanager +def mock_lobster(ref_paths: dict) -> Generator: + """Mock LOBSTER functions. + + Parameters + ---------- + ref_paths (dict): A dictionary of reference paths to the test data. + + Yields + ------ + function: A function that mocks calls to VASP. + """ + for mf in monkeypatch_lobster(MonkeyPatch(), TEST_DIR / "lobster"): + fake_run_lobster_kwargs = {k: {"check_lobster_inputs": ()} for k in ref_paths} + + old_cwd = os.getcwd() + new_path = tempfile.mkdtemp() + os.chdir(new_path) + try: + yield mf(ref_paths, fake_run_lobster_kwargs=fake_run_lobster_kwargs) + finally: + os.chdir(old_cwd) + shutil.rmtree(new_path) diff --git a/tutorials/phonon_workflow.ipynb b/tutorials/phonon_workflow.ipynb new file mode 100644 index 0000000000..f4186583c2 --- /dev/null +++ b/tutorials/phonon_workflow.ipynb @@ -0,0 +1,223 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "This first part is only needed as we have to mock VASP here as we cannot run it directly in a jupyter notebook:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "from mock_vasp import TEST_DIR, mock_vasp\n", + "\n", + "ref_paths = {\n", + " \"phonon static 1/1\": \"Si_phonons_3/phonon_static_1_1\",\n", + " \"static\": \"Si_phonons_3/static\",\n", + " \"tight relax 1\": \"Si_phonons_3/tight_relax_1\",\n", + " \"tight relax 2\": \"Si_phonons_3/tight_relax_2\",\n", + " \"dielectric\": \"Si_phonons_3/dielectric\",\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "# Phonon Workflow" + ] + }, + { + "cell_type": "raw", + "id": "3", + "metadata": {}, + "source": [ + "This tutorial has been written based on a previous version from Aakash Naik." + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "## Background\n", + "The Phonon workflow is based on the finite displacement approach as implemented in Phonopy.\n", + "\n", + "If you want to read more about Phonopy, please read Togo’s paper: https://doi.org/10.7566/JPSJ.92.012001" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "## Let's run the workflow\n", + "Now, we load a structure and other important functions and classes for running the phonon workflow." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "from jobflow import JobStore, run_locally\n", + "from maggma.stores import MemoryStore\n", + "from pymatgen.core import Structure\n", + "\n", + "from atomate2.vasp.flows.phonons import PhononMaker\n", + "\n", + "job_store = JobStore(MemoryStore(), additional_stores={\"data\": MemoryStore()})\n", + "si_structure = Structure.from_file(TEST_DIR / \"structures\" / \"Si.cif\")" + ] + }, + { + "cell_type": "markdown", + "id": "7", + "metadata": {}, + "source": [ + "Then one can use the `PhononMaker` to generate a `Flow`. For testing here, we are choosing a very small supercell length (`min_length`). Ideally, a larger cell should be chosen. For non-metallic systems with more than one element, one might need to add the non-analytical term correction considering very long-ranging forces by computing the `BORN` charges with the `born_maker`. Of course, the structure should also be relaxed in advance with the `bulk_relax_maker`. Please make sure this is done very accurately." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "flow = PhononMaker(\n", + " min_length=3.0,\n", + " use_symmetrized_structure=None,\n", + " generate_frequencies_eigenvectors_kwargs={\"tstep\": 100},\n", + " create_thermal_displacements=True,\n", + " store_force_constants=True,\n", + " born_maker=None,\n", + ").make(si_structure)" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "The phonon run will first perform a bulk relaxation, then the displacements are generated and run. As we currently don’t have a way to compute BORN charges with such potentials, a non-analytical term correction is not performed here. We can visualize the flow first." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "flow.draw_graph().show()" + ] + }, + { + "cell_type": "markdown", + "id": "11", + "metadata": {}, + "source": [ + "We now run the flow with `run_locally`. We mock the run here. Normally, you would simply use `run_locally` without the `with mock_vasp`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "with mock_vasp(ref_paths=ref_paths) as mf:\n", + " run_locally(\n", + " flow,\n", + " create_folders=True,\n", + " ensure_success=True,\n", + " raise_immediately=True,\n", + " store=job_store,\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine\n", + "from pymatgen.phonon.dos import PhononDos\n", + "from pymatgen.phonon.plotter import PhononBSPlotter, PhononDosPlotter\n", + "\n", + "job_store.connect()\n", + "\n", + "result = job_store.query_one(\n", + " {\"name\": \"generate_frequencies_eigenvectors\"},\n", + " properties=[\n", + " \"output.phonon_dos\",\n", + " \"output.phonon_bandstructure\",\n", + " ],\n", + " load=True,\n", + " sort={\"completed_at\": -1}, # to get the latest computation\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "ph_bs = PhononBandStructureSymmLine.from_dict(\n", + " result[\"output\"][\"phonon_bandstructure\"]\n", + ") # get pymatgen bandstructure object\n", + "ph_dos = PhononDos.from_dict(\n", + " result[\"output\"][\"phonon_dos\"]\n", + ") # get pymatgen phonon dos object\n", + "\n", + "# initialize dos plotter and visualize dos plot\n", + "dos_plot = PhononDosPlotter()\n", + "dos_plot.add_dos(label=\"a\", dos=ph_dos)\n", + "dos_plot.get_plot()\n", + "\n", + "# initialize Phonon bandstructure plotter and visualize band structure plot\n", + "bs_plot = PhononBSPlotter(bs=ph_bs)\n", + "bs_plot.get_plot()" + ] + }, + { + "cell_type": "markdown", + "id": "15", + "metadata": {}, + "source": [ + "One can run the same workflow with a forcefield as well. Here, we cannot consider BORN charges yet as there is no forcefield equivalent. You can find this tutorial in the force field tutorials." + ] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}