diff --git a/conda/torchdrug/meta.yaml b/conda/torchdrug/meta.yaml index 0fed9682..b48a11ab 100644 --- a/conda/torchdrug/meta.yaml +++ b/conda/torchdrug/meta.yaml @@ -1,21 +1,21 @@ package: name: torchdrug - version: 0.1.2 + version: 0.1.3 source: path: ../.. requirements: host: - - python >=3.7,<3.9 + - python >=3.7,<3.10 - pip run: - - python >=3.7,<3.9 + - python >=3.7,<3.10 - pytorch >=1.8.0 - pytorch-scatter >=2.0.8 - decorator - numpy >=1.11 - - rdkit + - rdkit >=2020.09 - matplotlib - tqdm - networkx diff --git a/setup.py b/setup.py index a9243870..c72475cb 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,7 @@ long_description_content_type="text/markdown", url="https://torchdrug.ai/", author="TorchDrug Team", - version="0.1.2", + version="0.1.3", license="Apache-2.0", keywords=["deep-learning", "pytorch", "drug-discovery"], packages=setuptools.find_packages(), @@ -24,23 +24,23 @@ "layers/functional/extension/*.cpp", "layers/functional/extension/*.cu", "utils/extension/*.cpp", - "utils/template/*.html" - ]}, + "utils/template/*.html", + ] + }, test_suite="nose.collector", - install_requires= - [ - "torch>=1.8.0", - "torch-scatter>=2.0.8", - "decorator", - "numpy>=1.11", - "rdkit-pypi", - "matplotlib", - "tqdm", - "networkx", - "ninja", - "jinja2", - ], - python_requires=">=3.7,<3.9", + install_requires=[ + "torch>=1.8.0", + "torch-scatter>=2.0.8", + "decorator", + "numpy>=1.11", + "rdkit-pypi>=2020.9", + "matplotlib", + "tqdm", + "networkx", + "ninja", + "jinja2", + ], + python_requires=">=3.7,<3.10", classifiers=[ "Development Status :: 4 - Beta", 'Intended Audience :: Developers', diff --git a/test/layers/test_conv.py b/test/layers/test_conv.py index 30eb8755..045865d1 100644 --- a/test/layers/test_conv.py +++ b/test/layers/test_conv.py @@ -38,7 +38,7 @@ def test_graph_conv(self): adjacency /= adjacency.sum(dim=0, keepdim=True).sqrt() * adjacency.sum(dim=1, keepdim=True).sqrt() x = adjacency.t() @ self.input truth = conv.activation(conv.linear(x)) - self.assertTrue(torch.allclose(result, truth, rtol=1e-4, atol=1e-7), "Incorrect graph convolution") + self.assertTrue(torch.allclose(result, truth, rtol=1e-2, atol=1e-3), "Incorrect graph convolution") num_head = 2 conv = layers.GraphAttentionConv(self.input_dim, self.output_dim, num_head=num_head).cuda() @@ -55,7 +55,7 @@ def test_graph_conv(self): outputs.append(output) truth = torch.cat(outputs, dim=-1) truth = conv.activation(truth) - self.assertTrue(torch.allclose(result, truth), "Incorrect graph attention convolution") + self.assertTrue(torch.allclose(result, truth, rtol=1e-2, atol=1e-3), "Incorrect graph attention convolution") eps = 1 conv = layers.GraphIsomorphismConv(self.input_dim, self.output_dim, eps=eps).cuda() @@ -63,7 +63,7 @@ def test_graph_conv(self): adjacency = self.graph.adjacency.to_dense().sum(dim=-1) x = (1 + eps) * self.input + adjacency.t() @ self.input truth = conv.activation(conv.mlp(x)) - self.assertTrue(torch.allclose(result, truth, atol=1e-4, rtol=1e-7), "Incorrect graph isomorphism convolution") + self.assertTrue(torch.allclose(result, truth, rtol=1e-2, atol=1e-2), "Incorrect graph isomorphism convolution") conv = layers.RelationalGraphConv(self.input_dim, self.output_dim, self.num_relation).cuda() result = conv(self.graph, self.input) @@ -72,7 +72,7 @@ def test_graph_conv(self): x = torch.einsum("htr, hd -> trd", adjacency, self.input) x = conv.linear(x.flatten(1)) + conv.self_loop(self.input) truth = conv.activation(x) - self.assertTrue(torch.allclose(result, truth, atol=1e-4, rtol=1e-7), "Incorrect relational graph convolution") + self.assertTrue(torch.allclose(result, truth, rtol=1e-2, atol=1e-3), "Incorrect relational graph convolution") conv = layers.ChebyshevConv(self.input_dim, self.output_dim, k=2).cuda() result = conv(self.graph, self.input) @@ -83,7 +83,7 @@ def test_graph_conv(self): bases = [self.input, laplacian.t() @ self.input, (2 * laplacian.t() @ laplacian.t() - identity) @ self.input] x = conv.linear(torch.cat(bases, dim=-1)) truth = conv.activation(x) - self.assertTrue(torch.allclose(result, truth, atol=1e-4, rtol=1e-7), "Incorrect chebyshev graph convolution") + self.assertTrue(torch.allclose(result, truth, rtol=1e-2, atol=1e-3), "Incorrect chebyshev graph convolution") if __name__ == "__main__": diff --git a/test/layers/test_pool.py b/test/layers/test_pool.py index 3425b0d3..c7e63eb9 100644 --- a/test/layers/test_pool.py +++ b/test/layers/test_pool.py @@ -43,8 +43,8 @@ def test_pool(self): truth_adj = torch.einsum("bna, bnm, bmc -> bac", assignment, adjacency, assignment) index = torch.arange(self.output_node, device=truth.device) truth_adj[:, index, index] = 0 - self.assertTrue(torch.allclose(result, truth), "Incorrect diffpool node feature") - self.assertTrue(torch.allclose(result_adj, truth_adj), "Incorrect diffpool adjacency") + self.assertTrue(torch.allclose(result, truth, rtol=1e-3, atol=1e-4), "Incorrect diffpool node feature") + self.assertTrue(torch.allclose(result_adj, truth_adj, rtol=1e-3, atol=1e-4), "Incorrect diffpool adjacency") graph = self.graph[0] rng_state = torch.get_rng_state() @@ -60,8 +60,8 @@ def test_pool(self): truth_adj = torch.einsum("na, nm, mc -> ac", assignment, adjacency, assignment) index = torch.arange(self.output_node, device=truth.device) truth_adj[index, index] = 0 - self.assertTrue(torch.allclose(result, truth), "Incorrect diffpool node feature") - self.assertTrue(torch.allclose(result_adj, truth_adj), "Incorrect diffpool adjacency") + self.assertTrue(torch.allclose(result, truth, rtol=1e-3, atol=1e-4), "Incorrect diffpool node feature") + self.assertTrue(torch.allclose(result_adj, truth_adj, rtol=1e-3, atol=1e-4), "Incorrect diffpool adjacency") pool = layers.MinCutPool(self.input_dim, self.output_node, self.feature_layer, self.pool_layer).cuda() all_loss = torch.tensor(0, dtype=torch.float32, device="cuda") @@ -89,10 +89,10 @@ def test_pool(self): x = x - torch.eye(self.output_node, device=x.device) / (self.output_node ** 0.5) regularization = x.flatten(-2).norm(dim=-1).mean() truth_metric = {"normalized cut loss": cut_loss, "orthogonal regularization": regularization} - self.assertTrue(torch.allclose(result, truth), "Incorrect min cut pool feature") - self.assertTrue(torch.allclose(result_adj, truth_adj), "Incorrect min cut pool adjcency") + self.assertTrue(torch.allclose(result, truth, rtol=1e-3, atol=1e-4), "Incorrect min cut pool feature") + self.assertTrue(torch.allclose(result_adj, truth_adj, rtol=1e-3, atol=1e-4), "Incorrect min cut pool adjcency") for key in result_metric: - self.assertTrue(torch.allclose(result_metric[key], truth_metric[key], atol=1e-4, rtol=1e-7), + self.assertTrue(torch.allclose(result_metric[key], truth_metric[key], rtol=1e-3, atol=1e-4), "Incorrect min cut pool metric") diff --git a/torchdrug/__init__.py b/torchdrug/__init__.py index 7dca7a07..b0989c37 100644 --- a/torchdrug/__init__.py +++ b/torchdrug/__init__.py @@ -12,4 +12,4 @@ handler.setFormatter(format) logger.addHandler(handler) -__version__ = "0.1.2" \ No newline at end of file +__version__ = "0.1.3" \ No newline at end of file diff --git a/torchdrug/utils/decorator.py b/torchdrug/utils/decorator.py index cd41fe81..9a49ca73 100644 --- a/torchdrug/utils/decorator.py +++ b/torchdrug/utils/decorator.py @@ -1,5 +1,6 @@ import inspect import warnings +import functools from decorator import decorator @@ -100,14 +101,19 @@ def deprecated_alias(**alias): Handle argument alias for a function and output deprecated warnings. """ - def wrapper(func, *args, **kwargs): - for key, value in alias.items(): - if key in kwargs: - if value in kwargs: - raise TypeError("%s() got values for both `%s` and `%s`" % (func.__name__, value, key)) - warnings.warn("%s(): argument `%s` is deprecated in favor of `%s`" % (func.__name__, key, value)) - kwargs[value] = kwargs.pop(key) + def decorate(func): - return func(*args, **kwargs) + @functools.wraps(func) + def wrapper(*args, **kwargs): + for key, value in alias.items(): + if key in kwargs: + if value in kwargs: + raise TypeError("%s() got values for both `%s` and `%s`" % (func.__name__, value, key)) + warnings.warn("%s(): argument `%s` is deprecated in favor of `%s`" % (func.__name__, key, value)) + kwargs[value] = kwargs.pop(key) - return decorator(wrapper, kwsyntax=True) + return func(*args, **kwargs) + + return wrapper + + return decorate