Skip to content

Commit

Permalink
Merge pull request #1082 from lab-cosmo/action-metatensor
Browse files Browse the repository at this point in the history
Interface to metatensor to use arbitrary machine learning models as collective variables
  • Loading branch information
GiovanniBussi authored Jun 27, 2024
2 parents 531abd8 + 3ba18d7 commit 120447e
Show file tree
Hide file tree
Showing 30 changed files with 2,839 additions and 0 deletions.
201 changes: 201 additions & 0 deletions configure
Original file line number Diff line number Diff line change
Expand Up @@ -751,6 +751,7 @@ enable_af_ocl
enable_af_cuda
enable_af_cpu
enable_libtorch
enable_metatensor
enable_openmp
'
ac_precious_vars='build_alias
Expand Down Expand Up @@ -1452,6 +1453,7 @@ Optional Features:
--enable-af_cuda enable search for arrayfire_cuda, default: no
--enable-af_cpu enable search for arrayfire_cpu, default: no
--enable-libtorch enable search for libtorch, default: no
--enable-metatensor enable search for metatensor, default: no
--disable-openmp do not use OpenMP
Some influential environment variables:
Expand Down Expand Up @@ -3220,6 +3222,24 @@ fi

#added by luigibonati

metatensor=
# Check whether --enable-metatensor was given.
if test "${enable_metatensor+set}" = set; then :
enableval=$enable_metatensor; case "${enableval}" in
(yes) metatensor=true ;;
(no) metatensor=false ;;
(*) as_fn_error $? "wrong argument to --enable-metatensor" "$LINENO" 5 ;;
esac
else
case "no" in
(yes) metatensor=true ;;
(no) metatensor=false ;;
esac

fi






Expand Down Expand Up @@ -9635,6 +9655,11 @@ $as_echo "$as_me: WARNING: cannot enable __PLUMED_HAS_ARRAYFIRE" >&2;}

fi

# metatensor requires libtorch
if test $metatensor = true ; then
libtorch=true;
fi

#added by luigibonati
if test $libtorch = true ; then
# disable as-needed in linking libraries (both static and shared)
Expand Down Expand Up @@ -9955,6 +9980,182 @@ $as_echo "$as_me: WARNING: cannot enable __PLUMED_HAS_LIBTORCH" >&2;}
fi
fi

if test $metatensor = true ; then
# find metatensor and metatensor_torch

found=ko
__PLUMED_HAS_METATENSOR=no
if test "${libsearch}" = true ; then
testlibs="metatensor metatensor_torch"
else
testlibs=""
fi
save_LIBS="$LIBS"

# check if multiple libraries are required simultaneously
multiple="no"
if test "true" = "true"; then
multiple="yes"
all_LIBS=""
for testlib in $testlibs;
do
all_LIBS="$all_LIBS -l$testlib"
done
testlibs=" " # to check only without libraries, and later with all together
fi

# check without libraries
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking metatensor without extra libs" >&5
$as_echo_n "checking metatensor without extra libs... " >&6; }
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
// torch header creates a lot a pedantic warnings, which we can't do anything about
// we disable them to make finding the relevant part in the config.log easier
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpedantic"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wfloat-equal"
#pragma GCC diagnostic ignored "-Wfloat-conversion"
#pragma GCC diagnostic ignored "-Wimplicit-float-conversion"
#pragma GCC diagnostic ignored "-Wimplicit-int-conversion"
#pragma GCC diagnostic ignored "-Wshorten-64-to-32"
#pragma GCC diagnostic ignored "-Wsign-conversion"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#include <torch/torch.h>
#include <torch/script.h>
#pragma GCC diagnostic pop
#include <metatensor/torch.hpp>
#if METATENSOR_TORCH_VERSION_MAJOR != 0 || METATENSOR_TORCH_VERSION_MINOR != 5
#error "this code is only compatible with metatensor-torch >=0.5.1,<0.6"
#endif
int main() {
metatensor_torch::version();
return 0;
}
_ACEOF
if ac_fn_cxx_try_link "$LINENO"; then :
found=ok
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }

fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext

if test "$found" = "ko" ; then
if test "$multiple" = "yes" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking metatensor with $all_LIBS" >&5
$as_echo_n "checking metatensor with $all_LIBS... " >&6; }
LIBS="$all_LIBS $LIBS"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
// torch header creates a lot a pedantic warnings, which we can't do anything about
// we disable them to make finding the relevant part in the config.log easier
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpedantic"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wfloat-equal"
#pragma GCC diagnostic ignored "-Wfloat-conversion"
#pragma GCC diagnostic ignored "-Wimplicit-float-conversion"
#pragma GCC diagnostic ignored "-Wimplicit-int-conversion"
#pragma GCC diagnostic ignored "-Wshorten-64-to-32"
#pragma GCC diagnostic ignored "-Wsign-conversion"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#include <torch/torch.h>
#include <torch/script.h>
#pragma GCC diagnostic pop
#include <metatensor/torch.hpp>
#if METATENSOR_TORCH_VERSION_MAJOR != 0 || METATENSOR_TORCH_VERSION_MINOR != 5
#error "this code is only compatible with metatensor-torch >=0.5.1,<0.6"
#endif
int main() {
metatensor_torch::version();
return 0;
}
_ACEOF
if ac_fn_cxx_try_link "$LINENO"; then :
found=ok
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }

fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
else
for testlib in $testlibs
do
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking metatensor with -l$testlib" >&5
$as_echo_n "checking metatensor with -l$testlib... " >&6; }
LIBS="-l$testlib $LIBS"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
// torch header creates a lot a pedantic warnings, which we can't do anything about
// we disable them to make finding the relevant part in the config.log easier
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpedantic"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wfloat-equal"
#pragma GCC diagnostic ignored "-Wfloat-conversion"
#pragma GCC diagnostic ignored "-Wimplicit-float-conversion"
#pragma GCC diagnostic ignored "-Wimplicit-int-conversion"
#pragma GCC diagnostic ignored "-Wshorten-64-to-32"
#pragma GCC diagnostic ignored "-Wsign-conversion"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#include <torch/torch.h>
#include <torch/script.h>
#pragma GCC diagnostic pop
#include <metatensor/torch.hpp>
#if METATENSOR_TORCH_VERSION_MAJOR != 0 || METATENSOR_TORCH_VERSION_MINOR != 5
#error "this code is only compatible with metatensor-torch >=0.5.1,<0.6"
#endif
int main() {
metatensor_torch::version();
return 0;
}
_ACEOF
if ac_fn_cxx_try_link "$LINENO"; then :
found=ok
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }

fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
if test $found = ok ; then
break
fi
LIBS="$save_LIBS"
done
fi
fi

if test $found = ok ; then
$as_echo "#define __PLUMED_HAS_METATENSOR 1" >>confdefs.h

__PLUMED_HAS_METATENSOR=yes
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cannot enable __PLUMED_HAS_METATENSOR" >&5
$as_echo "$as_me: WARNING: cannot enable __PLUMED_HAS_METATENSOR" >&2;}
LIBS="$save_LIBS"
fi

fi

# in non-debug mode, add -DNDEBUG
if test "$debug" = false ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: Release mode, adding -DNDEBUG" >&5
Expand Down
35 changes: 35 additions & 0 deletions configure.ac
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,7 @@ PLUMED_CONFIG_ENABLE([af_ocl],[search for arrayfire_ocl],[no])
PLUMED_CONFIG_ENABLE([af_cuda],[search for arrayfire_cuda],[no])
PLUMED_CONFIG_ENABLE([af_cpu],[search for arrayfire_cpu],[no])
PLUMED_CONFIG_ENABLE([libtorch],[search for libtorch],[no]) #added by luigibonati
PLUMED_CONFIG_ENABLE([metatensor],[search for metatensor],[no])

AC_ARG_VAR(SOEXT,[extension of dynamic libraries (so/dylib)])
AC_ARG_VAR(STATIC_LIBS,[variables that should be linked statically directly to MD code - configure will add here -ldl if necessary ])
Expand Down Expand Up @@ -926,6 +927,11 @@ if test "$af_cpu" = true ; then
PLUMED_CHECK_PACKAGE([arrayfire.h],[af_is_double],[__PLUMED_HAS_ARRAYFIRE],[afcpu])
fi

# metatensor requires libtorch
if test $metatensor = true ; then
libtorch=true;
fi

#added by luigibonati
if test $libtorch = true ; then
# disable as-needed in linking libraries (both static and shared)
Expand Down Expand Up @@ -963,6 +969,35 @@ if test $libtorch = true ; then
fi
fi

if test $metatensor = true ; then
# find metatensor and metatensor_torch
PLUMED_CHECK_CXX_PACKAGE([metatensor],[
// torch header creates a lot a pedantic warnings, which we can't do anything about
// we disable them to make finding the relevant part in the config.log easier
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpedantic"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wfloat-equal"
#pragma GCC diagnostic ignored "-Wfloat-conversion"
#pragma GCC diagnostic ignored "-Wimplicit-float-conversion"
#pragma GCC diagnostic ignored "-Wimplicit-int-conversion"
#pragma GCC diagnostic ignored "-Wshorten-64-to-32"
#pragma GCC diagnostic ignored "-Wsign-conversion"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#include <torch/torch.h>
#include <torch/script.h>
#pragma GCC diagnostic pop
#include <metatensor/torch.hpp>
#if METATENSOR_TORCH_VERSION_MAJOR != 0 || METATENSOR_TORCH_VERSION_MINOR != 5
#error "this code is only compatible with metatensor-torch >=0.5.1,<0.6"
#endif
int main() {
metatensor_torch::version();
return 0;
}
], [__PLUMED_HAS_METATENSOR], [metatensor metatensor_torch], [true])
fi

# in non-debug mode, add -DNDEBUG
if test "$debug" = false ; then
AC_MSG_NOTICE([Release mode, adding -DNDEBUG])
Expand Down
1 change: 1 addition & 0 deletions regtest/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
!/clusters
!/unittest
!/wham
!/metatensor
# These files we just want to ignore completely
tmp
report.txt
1 change: 1 addition & 0 deletions regtest/metatensor/rt-basic/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
include ../../scripts/test.make
8 changes: 8 additions & 0 deletions regtest/metatensor/rt-basic/config
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
plumed_modules=metatensor
plumed_needs=metatensor
type=driver

# NOTE: to enable --debug-forces, also change the dtype of the models to float64
arg="--plumed plumed.dat --ixyz structure.xyz --length-units A --dump-forces forces --dump-forces-fmt %8.2f" # --debug-forces forces.num"

PLUMED_ALLOW_SKIP_ON_TRAVIS=yes
Loading

1 comment on commit 120447e

@PlumedBot
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Found broken examples in automatic/ANGLES.tmp
Found broken examples in automatic/ANN.tmp
Found broken examples in automatic/CAVITY.tmp
Found broken examples in automatic/CLASSICAL_MDS.tmp
Found broken examples in automatic/CLUSTER_DIAMETER.tmp
Found broken examples in automatic/CLUSTER_DISTRIBUTION.tmp
Found broken examples in automatic/CLUSTER_PROPERTIES.tmp
Found broken examples in automatic/CONSTANT.tmp
Found broken examples in automatic/CONTACT_MATRIX.tmp
Found broken examples in automatic/CONTACT_MATRIX_PROPER.tmp
Found broken examples in automatic/COORDINATIONNUMBER.tmp
Found broken examples in automatic/DFSCLUSTERING.tmp
Found broken examples in automatic/DISTANCE_FROM_CONTOUR.tmp
Found broken examples in automatic/EDS.tmp
Found broken examples in automatic/EMMI.tmp
Found broken examples in automatic/ENVIRONMENTSIMILARITY.tmp
Found broken examples in automatic/FIND_CONTOUR.tmp
Found broken examples in automatic/FIND_CONTOUR_SURFACE.tmp
Found broken examples in automatic/FIND_SPHERICAL_CONTOUR.tmp
Found broken examples in automatic/FOURIER_TRANSFORM.tmp
Found broken examples in automatic/FUNCPATHGENERAL.tmp
Found broken examples in automatic/FUNCPATHMSD.tmp
Found broken examples in automatic/FUNNEL.tmp
Found broken examples in automatic/FUNNEL_PS.tmp
Found broken examples in automatic/GHBFIX.tmp
Found broken examples in automatic/GPROPERTYMAP.tmp
Found broken examples in automatic/HBOND_MATRIX.tmp
Found broken examples in automatic/INCLUDE.tmp
Found broken examples in automatic/INCYLINDER.tmp
Found broken examples in automatic/INENVELOPE.tmp
Found broken examples in automatic/INTERPOLATE_GRID.tmp
Found broken examples in automatic/LOCAL_AVERAGE.tmp
Found broken examples in automatic/MAZE_OPTIMIZER_BIAS.tmp
Found broken examples in automatic/MAZE_RANDOM_ACCELERATION_MD.tmp
Found broken examples in automatic/MAZE_SIMULATED_ANNEALING.tmp
Found broken examples in automatic/MAZE_STEERED_MD.tmp
Found broken examples in automatic/METATENSOR.tmp
Found broken examples in automatic/MULTICOLVARDENS.tmp
Found broken examples in automatic/OUTPUT_CLUSTER.tmp
Found broken examples in automatic/PAMM.tmp
Found broken examples in automatic/PCA.tmp
Found broken examples in automatic/PCAVARS.tmp
Found broken examples in automatic/PIV.tmp
Found broken examples in automatic/PLUMED.tmp
Found broken examples in automatic/PYCVINTERFACE.tmp
Found broken examples in automatic/PYTHONFUNCTION.tmp
Found broken examples in automatic/Q3.tmp
Found broken examples in automatic/Q4.tmp
Found broken examples in automatic/Q6.tmp
Found broken examples in automatic/QUATERNION.tmp
Found broken examples in automatic/SIZESHAPE_POSITION_LINEAR_PROJ.tmp
Found broken examples in automatic/SIZESHAPE_POSITION_MAHA_DIST.tmp
Found broken examples in automatic/SPRINT.tmp
Found broken examples in automatic/TETRAHEDRALPORE.tmp
Found broken examples in automatic/TORSIONS.tmp
Found broken examples in automatic/WHAM_WEIGHTS.tmp
Found broken examples in AnalysisPP.md
Found broken examples in CollectiveVariablesPP.md
Found broken examples in MiscelaneousPP.md

Please sign in to comment.