From 7e14342635ecdde028df43e6ad602785225d5e25 Mon Sep 17 00:00:00 2001 From: Alexander Goscinski Date: Tue, 5 Dec 2023 15:07:06 +0100 Subject: [PATCH] Merge TorchScript tests with regular tests (#32) Co-authored-by: Luthaf Co-authored-by: frostedoyster --- setup.py | 29 ++++++------ tests/test_spherical_expansions.py | 65 +++++++++----------------- tests/test_torchscript.py | 73 ------------------------------ 3 files changed, 37 insertions(+), 130 deletions(-) delete mode 100644 tests/test_torchscript.py diff --git a/setup.py b/setup.py index c8c1353..ff7391a 100644 --- a/setup.py +++ b/setup.py @@ -1,11 +1,10 @@ -from setuptools import setup, find_packages -import sys import subprocess +from setuptools import find_packages, setup # Detecting if pytorch with or without CUDA support should be installed try: - subprocess.check_output('nvidia-smi') + subprocess.check_output("nvidia-smi") HAS_NVIDIA = True except: HAS_NVIDIA = False @@ -13,19 +12,21 @@ if HAS_NVIDIA: dependency_links = [] else: - dependency_links = ['https://download.pytorch.org/whl/cpu'] - print("torch_spex setup info: Did not find NVIDIA card defaulting to CPU-only installation") + dependency_links = ["https://download.pytorch.org/whl/cpu"] + print( + "torch_spex setup info: Did not find NVIDIA card, defaulting to CPU-only installation" + ) setup( - name='torch_spex', - packages = find_packages(), + name="torch_spex", + packages=find_packages(), install_requires=[ - 'sphericart[torch] @ git+https://github.com/lab-cosmo/sphericart.git@99761b0', # pre-built wheels don't work - 'numpy', - 'ase', - 'torch', - 'scipy', - 'metatensor[torch] @ https://github.com/lab-cosmo/metatensor/archive/0436e27.zip', + "sphericart[torch] @ git+https://github.com/lab-cosmo/sphericart.git@ecf4145", # pre-built wheels don't work + "numpy", + "ase", + "torch", + "scipy", + "metatensor[torch]", ], - dependency_links = dependency_links + dependency_links=dependency_links, ) diff --git a/tests/test_spherical_expansions.py b/tests/test_spherical_expansions.py index 6dfbd58..91eb974 100644 --- a/tests/test_spherical_expansions.py +++ b/tests/test_spherical_expansions.py @@ -4,7 +4,6 @@ import metatensor.torch from metatensor.torch import Labels, TensorBlock, TensorMap -import numpy as np import ase.io from torch_spex.spherical_expansions import VectorExpansion, SphericalExpansion @@ -18,7 +17,9 @@ class TestEthanol1SphericalExpansion: device = "cpu" dtype = torch.float32 frames = ase.io.read('datasets/rmd17/ethanol1.extxyz', ':1') - all_species = list(np.unique([frame.numbers for frame in frames])) + all_species = torch.unique(torch.concatenate([torch.tensor(frame.numbers) + for frame in frames])) + all_species = [int(species) for species in all_species] with open("tests/data/expansion_coeffs-ethanol1_0-hypers.json", "r") as f: hypers = json.load(f) @@ -34,16 +35,22 @@ def test_vector_expansion_coeffs(self): tm_ref = metatensor.torch.to(tm_ref, device=self.device, dtype=self.dtype) # we need to sort both computed and reference pair expansion coeffs, # because ase.neighborlist can get different neighborlist order for some reasons - tm_ref = sort_tm(tm_ref) + tm_ref = metatensor.torch.sort(tm_ref) vector_expansion = VectorExpansion(self.hypers, self.all_species, device=self.device, dtype=self.dtype) with torch.no_grad(): - tm = sort_tm(vector_expansion.forward(**self.batch)) + tm = metatensor.torch.sort(vector_expansion.forward(**self.batch)) # Default types are float32 so we cannot get higher accuracy than 1e-7. # Because the reference value have been cacluated using float32 and # now we using float64 computation the accuracy had to be decreased again assert metatensor.torch.allclose(tm_ref, tm, atol=1e-5, rtol=1e-5) + vector_expansion_script = torch.jit.script(vector_expansion) + with torch.no_grad(): + tm_script = metatensor.torch.sort(vector_expansion_script.forward(**self.batch)) + assert metatensor.torch.allclose(tm, tm_script, atol=1e-5, + rtol=torch.finfo(self.dtype).eps*10) + def test_spherical_expansion_coeffs(self): tm_ref = metatensor.torch.load("tests/data/spherical_expansion_coeffs-ethanol1_0-data.npz") tm_ref = metatensor.torch.to(tm_ref, device=self.device, dtype=self.dtype) @@ -56,6 +63,12 @@ def test_spherical_expansion_coeffs(self): # now we using float64 computation the accuracy had to be decreased again assert metatensor.torch.allclose(tm_ref, tm, atol=1e-5, rtol=1e-5) + spherical_expansion_script = torch.jit.script(spherical_expansion_calculator) + with torch.no_grad(): + tm_script = metatensor.torch.sort(spherical_expansion_script.forward(**self.batch)) + assert metatensor.torch.allclose(tm, tm_script, atol=1e-5, + rtol=torch.finfo(self.dtype).eps*10) + def test_spherical_expansion_coeffs_alchemical(self): with open("tests/data/expansion_coeffs-ethanol1_0-alchemical-hypers.json", "r") as f: hypers = json.load(f) @@ -88,7 +101,9 @@ class TestArtificialSphericalExpansion: device = "cpu" dtype = torch.float32 frames = ase.io.read('tests/datasets/artificial.extxyz', ':') - all_species = list(np.unique(np.hstack([frame.numbers for frame in frames]))) + all_species = torch.unique(torch.concatenate([torch.tensor(frame.numbers) + for frame in frames])) + all_species = [int(species) for species in all_species] with open("tests/data/expansion_coeffs-artificial-hypers.json", "r") as f: hypers = json.load(f) @@ -101,11 +116,11 @@ class TestArtificialSphericalExpansion: def test_vector_expansion_coeffs(self): tm_ref = metatensor.torch.load("tests/data/vector_expansion_coeffs-artificial-data.npz") tm_ref = metatensor.torch.to(tm_ref, device=self.device, dtype=self.dtype) - tm_ref = sort_tm(tm_ref) + tm_ref = metatensor.torch.sort(tm_ref) vector_expansion = VectorExpansion(self.hypers, self.all_species, device=self.device, dtype=self.dtype) with torch.no_grad(): - tm = sort_tm(vector_expansion.forward(**self.batch)) + tm = metatensor.torch.sort(vector_expansion.forward(**self.batch)) assert metatensor.torch.allclose(tm_ref, tm, atol=1e-5, rtol=1e-5) def test_spherical_expansion_coeffs(self): @@ -137,39 +152,3 @@ def test_spherical_expansion_coeffs_artificial(self): with torch.no_grad(): tm = spherical_expansion_calculator.forward(**self.batch) assert metatensor.torch.allclose(tm_ref, tm, atol=1e-5, rtol=1e-5) - -### these util functions will be removed once lab-cosmo/metatensor/pull/281 is merged -def native_list_argsort(native_list): - return sorted(range(len(native_list)), key=native_list.__getitem__) - -def sort_tm(tm): - blocks = [] - for _, block in tm.items(): - values = block.values - - samples_values = block.samples.values - sorted_idx = native_list_argsort([tuple(row.tolist()) for row in block.samples.values]) - samples_values = samples_values[sorted_idx] - values = values[sorted_idx] - - components_values = [] - for i, component in enumerate(block.components): - component_values = component.values - sorted_idx = native_list_argsort([tuple(row.tolist()) for row in component.values]) - components_values.append( component_values[sorted_idx] ) - values = np.take(values, sorted_idx, axis=i+1) - - properties_values = block.properties.values - sorted_idx = native_list_argsort([tuple(row.tolist()) for row in block.properties.values]) - properties_values = properties_values[sorted_idx] - values = values[..., sorted_idx] - - blocks.append( - TensorBlock( - values=values, - samples=Labels(values=samples_values, names=block.samples.names), - components=[Labels(values=components_values[i], names=component.names) for i, component in enumerate(block.components)], - properties=Labels(values=properties_values, names=block.properties.names) - ) - ) - return TensorMap(keys=tm.keys, blocks=blocks) diff --git a/tests/test_torchscript.py b/tests/test_torchscript.py deleted file mode 100644 index 8bbeac8..0000000 --- a/tests/test_torchscript.py +++ /dev/null @@ -1,73 +0,0 @@ -import json - -import torch - -import metatensor.torch -from metatensor.torch import Labels, TensorBlock, TensorMap -import numpy as np -import ase.io - -from torch_spex.spherical_expansions import VectorExpansion, SphericalExpansion -from torch_spex.structures import InMemoryDataset, TransformerNeighborList, collate_nl -from torch.utils.data import DataLoader - - -class TestEthanol1SphericalExpansion: - """ - Tests on the ethanol1 dataset - """ - device = "cpu" - frames = ase.io.read('datasets/rmd17/ethanol1.extxyz', ':1') - all_species = list(np.unique([frame.numbers for frame in frames])) - all_species = [int(species) for species in all_species] - with open("tests/data/expansion_coeffs-ethanol1_0-hypers.json", "r") as f: - hypers = json.load(f) - - transformers = [TransformerNeighborList(cutoff=hypers["cutoff radius"])] - dataset = InMemoryDataset(frames, transformers) - loader = DataLoader(dataset, batch_size=1, collate_fn=collate_nl) - batch = next(iter(loader)) - - def test_vector_expansion_coeffs(self): - vector_expansion = torch.jit.script(VectorExpansion(self.hypers, self.all_species, device=self.device)) - vector_expansion.forward(**self.batch) - - def test_spherical_expansion_coeffs(self): - spherical_expansion_calculator = torch.jit.script(SphericalExpansion(self.hypers, self.all_species, device=self.device)) - spherical_expansion_calculator.forward(**self.batch) - - def test_spherical_expansion_coeffs_alchemical(self): - with open("tests/data/expansion_coeffs-ethanol1_0-alchemical-hypers.json", "r") as f: - hypers = json.load(f) - spherical_expansion_calculator = torch.jit.script(SphericalExpansion(hypers, self.all_species, device=self.device)) - spherical_expansion_calculator.forward(**self.batch) - -class TestArtificialSphericalExpansion: - """ - Tests on the artificial dataset - """ - device = "cpu" - frames = ase.io.read('tests/datasets/artificial.extxyz', ':') - all_species = list(np.unique(np.hstack([frame.numbers for frame in frames]))) - all_species = [int(species) for species in all_species] - with open("tests/data/expansion_coeffs-artificial-hypers.json", "r") as f: - hypers = json.load(f) - - transformers = [TransformerNeighborList(cutoff=hypers["cutoff radius"])] - dataset = InMemoryDataset(frames, transformers) - loader = DataLoader(dataset, batch_size=len(frames), collate_fn=collate_nl) - batch = next(iter(loader)) - - def test_vector_expansion_coeffs(self): - vector_expansion = torch.jit.script(VectorExpansion(self.hypers, self.all_species, device=self.device)) - vector_expansion.forward(**self.batch) - - def test_spherical_expansion_coeffs(self): - spherical_expansion_calculator = torch.jit.script(SphericalExpansion(self.hypers, self.all_species, device=self.device)) - spherical_expansion_calculator.forward(**self.batch) - - def test_spherical_expansion_coeffs_artificial(self): - with open("tests/data/expansion_coeffs-artificial-alchemical-hypers.json", "r") as f: - hypers = json.load(f) - spherical_expansion_calculator = torch.jit.script(SphericalExpansion(hypers, self.all_species, device=self.device)) - spherical_expansion_calculator.forward(**self.batch)