diff --git a/.clang-format.ignore b/.clang-format.ignore
deleted file mode 100644
index ea50abb3..00000000
--- a/.clang-format.ignore
+++ /dev/null
@@ -1 +0,0 @@
-external/**
diff --git a/.git_archival.txt b/.git_archival.txt
new file mode 100644
index 00000000..0376d2ec
--- /dev/null
+++ b/.git_archival.txt
@@ -0,0 +1,5 @@
+node: $Format:%H$
+node-date: $Format:%cI$
+describe-name: $Format:%(describe:tags=true,match=*[0-9]*)$
+ref-names: $Format:%D$
+
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 00000000..d619e54d
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,2 @@
+.git_archival.txt export-subst
+
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000..9fcab7c4
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,12 @@
+version: 2
+updates:
+ # Maintain dependencies for GitHub Actions
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ ignore:
+ # cibuildwheel updates often and cannot be tracked by
+ # a major version tag like 'v1'. Mute updates since
+ # this is not a critical component
+ - dependency-name: "pypa/cibuildwheel*"
diff --git a/.github/environment.yml b/.github/environment.yml
index b60af515..bb7ccc01 100644
--- a/.github/environment.yml
+++ b/.github/environment.yml
@@ -2,14 +2,6 @@ name: cppe-gha
channels:
- conda-forge
dependencies:
- - cmake
- cxx-compiler
- eigen
- - h5py
- - ninja
- - numpy
- - pandas
- pip
- - pybind11
- - pytest
- - scipy
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 03c5bfc0..0600287b 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -10,19 +10,17 @@ on:
branches:
- master
-env:
- BUILD_TYPE: Release
-
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
+env:
+ FORCE_COLOR: 3
+
jobs:
build:
runs-on: ${{ matrix.os }}
- continue-on-error: true
-
strategy:
fail-fast: false
matrix:
@@ -32,84 +30,64 @@ jobs:
- windows-latest
python-version:
- "3.8"
- - "3.11"
+ - "3.12"
defaults:
run:
- shell: bash -l {0}
-
- steps:
- - uses: actions/checkout@v3
-
- # this will set the system compiler;
- # This must be done *before* setting up miniconda, see:
- # https://github.com/ilammy/msvc-dev-cmd/issues/34
- - name: Set Windows env
- if: matrix.os == 'windows-latest'
- uses: ilammy/msvc-dev-cmd@v1
+ shell: bash -el {0}
- - name: Cache conda
- uses: actions/cache@v3
- env:
- # Increase this value to reset cache if .github/environment.yml has not changed
- CACHE_NUMBER: 1
- with:
- path: ~/conda_pkgs_dir
- key:
- ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-${{ hashFiles('.github/environment.yml') }}
+ env:
+ NINJA_STATUS: "[Built edge %f of %t in %e sec] "
- - uses: conda-incubator/setup-miniconda@v2
- with:
- mamba-version: "*"
- auto-update-conda: true
- auto-activate-base: false
- activate-environment: cppe-gha
- environment-file: .github/environment.yml
- channel-priority: strict
- python-version: ${{ matrix.python-version }}
-
- - name: Select CMake CLI options
- run: |
- echo "We are running on ${{ matrix.os }}"
- if [ "${{ matrix.os }}" == "ubuntu-latest" ]; then
- echo "CXX=g++" >> $GITHUB_ENV
- elif [ "${{ matrix.os }}" == "macos-latest" ]; then
- echo "CXX=clang++" >> $GITHUB_ENV
- else
- echo "CXX=clang-cl" >> $GITHUB_ENV
- fi
-
- - name: Configure
- run: |
- cmake -S. \
- -Bbuild \
- -GNinja \
- -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} \
- -DCMAKE_CXX_COMPILER=${CXX} \
- -DCMAKE_INSTALL_PREFIX=~/Software
-
- - name: Build and install
- run: |
- cmake --build build --config ${{ env.BUILD_TYPE }} --target install -- -v -d stats
-
- - name: Test
- run: |
- python -m pip install git+https://gitlab.com/robertodr/polarizationsolver.git@master
-
- cd
-
- if [ "${{ matrix.os }}" == "windows-latest" ]; then
- export KMP_DUPLICATE_LIB_OK=TRUE
- export PYTHONPATH=Software/Lib/site-packages
- python -m pytest --capture=no --log-cli-level=INFO --color=yes --pyargs cppe
- else
- export PYTHONPATH=$PYTHONPATH:Software/lib/python${{ matrix.python-version }}/site-packages
- python -m pytest --capture=no --log-cli-level=INFO --color=yes --pyargs cppe
- fi
-
- # TODO move to separate workflow
- #- name: Build and Test Setuptools
- # run: |
- # rm -rf build
- # python setup.py install
- # py.test
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0 # such that setuptools_scm can do its job correctly
+
+ # this will set the system compiler;
+ # This must be done *before* setting up miniconda, see:
+ # https://github.com/ilammy/msvc-dev-cmd/issues/34
+ - name: Set Windows env
+ if: matrix.os == 'windows-latest'
+ uses: ilammy/msvc-dev-cmd@v1
+ env:
+ KMP_DUPLICATE_LIB_OK: "TRUE"
+
+ - name: Cache conda
+ uses: actions/cache@v4
+ env:
+ # Increase this value to reset cache if .github/environment.yml has not changed
+ CACHE_NUMBER: 2
+ with:
+ path: ~/conda_pkgs_dir
+ key:
+ ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-${{ hashFiles('.github/environment.yml') }}
+
+ - uses: conda-incubator/setup-miniconda@v3
+ with:
+ python-version: ${{ matrix.python-version }}
+ auto-update-conda: true
+ auto-activate-base: false
+ channels: conda-forge
+ channel-priority: true
+ activate-environment: cppe-gha
+ environment-file: .github/environment.yml
+
+ - name: Select CMake CLI options
+ run: |
+ echo "We are running on ${{ matrix.os }}"
+ if [ "${{ matrix.os }}" == "ubuntu-latest" ]; then
+ echo "SKBUILD_CMAKE_ARGS=-DCMAKE_CXX_COMPILER=g++;-GNinja" >> $GITHUB_ENV
+ elif [ "${{ matrix.os }}" == "macos-latest" ]; then
+ echo "SKBUILD_CMAKE_ARGS=-DCMAKE_CXX_COMPILER=clang++;-GNinja" >> $GITHUB_ENV
+ else
+ echo "SKBUILD_CMAKE_ARGS=-DCMAKE_CXX_COMPILER=clang-cl;-GNinja" >> $GITHUB_ENV
+ fi
+
+ - name: Configure, build, install
+ run: |
+ python -m pip install -v .[test]
+
+ - name: Test
+ run: |
+ python -m pytest -vvv --ignore=tests/test_fields.py --ignore=tests/test_functionality.py --ignore=tests/test_gradients.py --ignore=tests/test_solver.py
diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
index 7653a670..a8c053c0 100644
--- a/.github/workflows/publish.yml
+++ b/.github/workflows/publish.yml
@@ -1,31 +1,140 @@
name: Publish package
on:
- push:
- branches: [master]
workflow_dispatch:
+ pull_request:
+ push:
+ branches:
+ - master
+ release:
+ types:
+ - published
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+env:
+ FORCE_COLOR: 3
jobs:
- publish:
+ build_sdist:
+ name: Build source distribution
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v1
+ - uses: actions/checkout@v4
with:
- python-version: '3.7'
- - name: Install setuptools and wheel
- run: |
- python -m pip install --user setuptools wheel
- - name: Build a source tarball
+ fetch-depth: 0 # such that setuptools_scm can do its job correctly
+
+ - name: Build SDist
+ run: pipx run build --sdist
+
+ - uses: actions/upload-artifact@v4
+ with:
+ name: cibw-sdist
+ path: dist/*.tar.gz
+
+ generate-wheels-matrix:
+ name: Generate wheels matrix
+ runs-on: ubuntu-latest
+ outputs:
+ include: ${{ steps.set-matrix.outputs.include }}
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install cibuildwheel
+ run: pipx install cibuildwheel==2.19.2
+ - id: set-matrix
run: |
- pip install pytest numpy h5py scipy pandas scikit-build pybind11
- pip install git+https://gitlab.com/reinholdt/polarizationsolver.git@master
- python setup.py install
- pytest --pyargs cppe
- python setup.py sdist
- - name: Publish distribution 📦 to Test PyPI
- if: startsWith(github.ref, 'refs/tags')
- uses: pypa/gh-action-pypi-publish@master
- with:
- user: __token__
- password: ${{ secrets.pypi_password }}
+ MATRIX=$(
+ {
+ cibuildwheel --print-build-identifiers --platform linux \
+ | jq -nRc '{"only": inputs, "os": "ubuntu-latest"}' \
+ && cibuildwheel --print-build-identifiers --platform macos \
+ | jq -nRc '{"only": inputs, "os": "macos-latest"}' \
+ && cibuildwheel --print-build-identifiers --platform windows \
+ | jq -nRc '{"only": inputs, "os": "windows-latest"}'
+ } | jq -sc
+ )
+ echo "Full build matrix"
+ echo "$MATRIX"
+ echo "include=$MATRIX" >> $GITHUB_OUTPUT
+ env:
+ CIBW_ARCHS_LINUX: x86_64 # aarch64 # skipping as h5py stopped providing pre-build wheels for aarch64 on PyPI
+ CIBW_ARCHS_MACOS: x86_64 arm64
+ CIBW_ARCHS_WINDOWS: AMD64
+ # skip musl builds
+ CIBW_SKIP: "*-musllinux_*"
+ # disable free-threaded support
+ CIBW_FREE_THREADED_SUPPORT: False
+ # exclude latest Python beta
+ CIBW_PRERELEASE_PYTHONS: False
+
+ build_wheels:
+ name: Build ${{ matrix.only }}
+ needs: generate-wheels-matrix
+ strategy:
+ matrix:
+ include: ${{ fromJson(needs.generate-wheels-matrix.outputs.include) }}
+ runs-on: ${{ matrix.os }}
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0 # such that setuptools_scm can do its job correctly
+
+ - name: Set up QEMU
+ if: runner.os == 'Linux'
+ uses: docker/setup-qemu-action@v3
+ with:
+ platforms: all
+
+ # this will set the system compiler
+ - name: Set Windows env
+ if: matrix.os == 'windows-latest'
+ uses: ilammy/msvc-dev-cmd@v1
+
+ - uses: pypa/cibuildwheel@v2.19.2
+ env:
+ # skip testing PyPy builds
+ CIBW_TEST_SKIP: "pp*"
+
+ CIBW_ENVIRONMENT_LINUX: >
+ SKBUILD_CMAKE_ARGS="-DENABLE_ARCH_FLAGS=OFF;-DENABLE_OPENMP=OFF"
+
+ CIBW_ENVIRONMENT_MACOS: >
+ SKBUILD_CMAKE_ARGS="-DENABLE_ARCH_FLAGS=OFF;-DENABLE_OPENMP=OFF"
+
+ CIBW_ENVIRONMENT_WINDOWS: >
+ SKBUILD_CMAKE_ARGS="-GNinja;-DCMAKE_CXX_COMPILER=clang-cl;-DENABLE_ARCH_FLAGS=OFF;-DENABLE_OPENMP=OFF"
+ with:
+ only: ${{ matrix.only }}
+
+ - name: Verify clean directory
+ run: git diff --exit-code
+ shell: bash
+
+ - uses: actions/upload-artifact@v4
+ with:
+ name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }}
+ path: ./wheelhouse/*.whl
+
+ upload_all:
+ name: Upload if release
+ needs:
+ - build_wheels
+ - build_sdist
+ environment: pypi
+ permissions:
+ id-token: write
+ runs-on: ubuntu-latest
+ #if: github.event_name == 'release' && github.event.action == 'published'
+
+ steps:
+ - uses: actions/download-artifact@v4
+ with:
+ pattern: cibw-*
+ path: dist
+ merge-multiple: true
+
+ - uses: pypa/gh-action-pypi-publish@release/v1
+ with:
+ repository-url: https://test.pypi.org/legacy/
diff --git a/.gitignore b/.gitignore
index c0cfe4d8..79d452a0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -42,3 +42,6 @@ __pycache__
*.egg-info
*.eggs
.ipynb_checkpoints/
+
+# autogenerated by setuptools-scm
+_version.py
diff --git a/CMakeLists.txt b/CMakeLists.txt
index eb9cca86..5a394d49 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -2,8 +2,32 @@
cmake_minimum_required(VERSION 3.16...3.20)
# project name
-project(cppe LANGUAGES CXX VERSION 0.3.2)
+project(cppe LANGUAGES CXX)
+if(NOT SKBUILD)
+ message(WARNING "\
+ This CMake file is meant to be executed using 'scikit-build'. Running
+ it directly will almost certainly not produce the desired result. If
+ you are a user trying to install this package, please use the command
+ below, which will install all necessary build dependencies, compile
+ the package in an isolated environment, and then install it.
+ =====================================================================
+ $ pip install .
+ =====================================================================
+ If you are a software developer, and this is your own package, then
+ it is usually much more efficient to install the build dependencies
+ in your environment once and use the following command that avoids
+ a costly creation of a new virtual environment at every compilation:
+ =====================================================================
+ $ pip install scikit-build-core[pyproject] cmake ninja pybind11 setuptools_scm
+ $ pip install --no-build-isolation -ve .
+ =====================================================================
+ You may optionally add -Ceditable.rebuild=true to auto-rebuild when
+ the package is imported. Otherwise, you need to re-run the above
+ after editing C++ files.")
+endif()
+
+# Try to import all Python components potentially needed by nanobind
# do not rebuild if rules (compiler flags) change
set(CMAKE_SKIP_RULE_DEPENDENCY TRUE)
@@ -47,7 +71,6 @@ endmacro()
option_with_print(ENABLE_OPENMP "Enables OpenMP parallelization" ON)
# included cmake modules
-include(${PROJECT_SOURCE_DIR}/cmake/downloaded/autocmake_default_build_paths.cmake)
include(${PROJECT_SOURCE_DIR}/cmake/downloaded/autocmake_safeguards.cmake)
include(${PROJECT_SOURCE_DIR}/cmake/compiler_flags/CXXFlags.cmake)
diff --git a/MANIFEST.in b/MANIFEST.in
deleted file mode 100644
index 3123c8ff..00000000
--- a/MANIFEST.in
+++ /dev/null
@@ -1,13 +0,0 @@
-# Exclude test stuff
-global-exclude test_*.py
-exclude tests
-exclude scripts
-
-include README.md
-include LICENSE*
-
-recursive-include cppe *.hh
-recursive-include cppe *.cc
-
-include external/eigen3/Eigen/*
-recursive-include external/eigen3/Eigen/src *.h
diff --git a/cmake/compiler_flags/CXXFlags.cmake b/cmake/compiler_flags/CXXFlags.cmake
index a84a8af2..71882281 100644
--- a/cmake/compiler_flags/CXXFlags.cmake
+++ b/cmake/compiler_flags/CXXFlags.cmake
@@ -64,6 +64,9 @@ if(ENABLE_ARCH_FLAGS)
set(ARCH_FLAG "-march=native")
endif()
endif()
+ if(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
+ set(ARCH_FLAG "/arch:AVX2")
+ endif()
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
set(ARCH_FLAG "-xHost")
endif()
@@ -73,3 +76,4 @@ set(CPPE_CXX_FLAGS "")
include(${CMAKE_CURRENT_LIST_DIR}/GNU.CXX.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/Intel.CXX.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/Clang.CXX.cmake)
+include(${CMAKE_CURRENT_LIST_DIR}/MSVC.CXX.cmake)
diff --git a/cmake/compiler_flags/MSVC.CXX.cmake b/cmake/compiler_flags/MSVC.CXX.cmake
new file mode 100644
index 00000000..df065da2
--- /dev/null
+++ b/cmake/compiler_flags/MSVC.CXX.cmake
@@ -0,0 +1,6 @@
+if(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
+ set(CPPE_CXX_FLAGS "/W3 /EHsc /bigobj")
+ set(CMAKE_CXX_FLAGS_RELEASE "/O2")
+ set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "/O2")
+ set(CMAKE_CXX_FLAGS_DEBUG "/Od /W4")
+endif()
diff --git a/cmake/downloaded/autocmake_default_build_paths.cmake b/cmake/downloaded/autocmake_default_build_paths.cmake
deleted file mode 100644
index 9000882f..00000000
--- a/cmake/downloaded/autocmake_default_build_paths.cmake
+++ /dev/null
@@ -1,19 +0,0 @@
-# (c) https://github.com/dev-cafe/autocmake/blob/master/AUTHORS.md
-# licensed under BSD-3: https://github.com/dev-cafe/autocmake/blob/master/LICENSE
-
-#.rst:
-#
-# Sets binary and library output directories to ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}
-# and ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}, respectively.
-#
-# Variables modified::
-#
-# CMAKE_ARCHIVE_OUTPUT_DIRECTORY
-# CMAKE_LIBRARY_OUTPUT_DIRECTORY
-# CMAKE_RUNTIME_OUTPUT_DIRECTORY
-
-include(GNUInstallDirs)
-
-set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR})
-set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR})
-set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR})
diff --git a/pyproject.toml b/pyproject.toml
index 93976541..27f3508c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,3 +1,107 @@
[build-system]
-requires = ["setuptools", "wheel", "scikit-build", "cmake", "ninja"]
+requires = ["scikit-build-core >=0.8", "pybind11"]
+build-backend = "scikit_build_core.build"
+[project]
+name = "cppe"
+dynamic = ["version"]
+description = "C++ and Python library for Polarizable Embedding"
+readme = "README.md"
+requires-python = ">=3.8"
+authors = [
+ { name = "Maximilian Scheurer", email = "maximilian.scheurer@iwr.uni-heidelberg.de" },
+ { name = "Peter Reinholdt" },
+ { name = "Michael F. Herbst" },
+ { name = "Lori A. Burns" },
+]
+classifiers = [
+ "Development Status :: 5 - Production/Stable",
+ "License :: OSI Approved :: GNU Lesser",
+ "General Public License v3 (LGPLv3)",
+ "License :: Free For Educational Use",
+ "Intended Audience :: Science/Research",
+ "Topic :: Education",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3 :: Only",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Topic :: Scientific/Engineering :: Chemistry",
+]
+
+[project.optional-dependencies]
+test = [
+ "pytest",
+ "numpy",
+ "h5py",
+ "scipy",
+ "pandas",
+ "polarizationsolver@git+https://gitlab.com/robertodr/polarizationsolver.git",
+]
+
+[project.urls]
+Homepage = "https://github.com/maxscheurer/cppe"
+Source = "https://github.com/maxscheurer/cppe"
+"Bug Tracker" = "https://github.com/maxscheurer/cppe/issues"
+Changelog = "https://github.com/org/maxscheurer/releases"
+
+[tool.scikit-build]
+# Protect the configuration against future changes in scikit-build-core
+minimum-version = "0.8"
+
+# Setuptools-style build caching in a local directory
+build-dir = "build/{wheel_tag}"
+
+# Build stable ABI wheels for CPython 3.12+
+wheel.py-api = "cp312"
+
+cmake.verbose = true
+cmake.version = ">=3.16"
+ninja.make-fallback = false
+logging.level = "INFO"
+metadata.version.provider = "scikit_build_core.metadata.setuptools_scm"
+sdist.cmake = true
+sdist.exclude = [".github", "scripts"]
+sdist.include = [
+ "src/cppe/_version.py",
+ "eigen3",
+]
+
+[tool.setuptools_scm]
+write_to = "src/cppe/_version.py"
+fallback_version = "0.3.2"
+
+[tool.pytest.ini_options]
+minversion = "6.0"
+addopts = ["-ra", "--showlocals", "--strict-markers", "--strict-config"]
+xfail_strict = true
+filterwarnings = ["error"]
+log_cli_level = "INFO"
+testpaths = [
+ "tests",
+]
+
+[tool.cibuildwheel]
+# Necessary to see build output from the actual compilation
+build-verbosity = 1
+
+# Run pytest to ensure that the package was correctly built
+test-command = "pytest {project}/tests"
+test-requires = [
+ "pytest",
+ "numpy",
+ "h5py",
+ "scipy",
+ "pandas",
+ "polarizationsolver@git+https://gitlab.com/robertodr/polarizationsolver.git",
+]
+
+[tool.cibuildwheel.environment]
+NINJA_STATUS = "[Built edge %f of %t in %e sec] "
+
+# Needed for full C++17 support
+[tool.cibuildwheel.macos.environment]
+MACOSX_DEPLOYMENT_TARGET = "10.14"
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index 3a396883..00000000
--- a/requirements.txt
+++ /dev/null
@@ -1,2 +0,0 @@
---index-url https://pypi.python.org/simple/
--e .
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index 2f03264c..00000000
--- a/setup.cfg
+++ /dev/null
@@ -1,10 +0,0 @@
-[bumpversion]
-current_version = 0.3.2
-commit = True
-tag = True
-
-[bumpversion:file:CMakeLists.txt]
-
-[bumpversion:file:src/metadata.cc]
-
-[bumpversion:file:setup.py]
diff --git a/setup.py b/setup.py
deleted file mode 100755
index 5ad80009..00000000
--- a/setup.py
+++ /dev/null
@@ -1,199 +0,0 @@
-#!/usr/bin/env python3
-
-"""Setup for cppe"""
-import sys
-import glob
-import setuptools
-
-from setuptools import Extension, setup
-from setuptools.command.build_ext import build_ext as BuildCommand
-
-try:
- from sphinx.setup_command import BuildDoc as BuildSphinxDoc
-except ImportError:
- # No sphinx found -> make a dummy class
- class BuildSphinxDoc(setuptools.Command):
- user_options = []
-
- def initialize_options(self):
- pass
-
- def finalize_options(self):
- pass
-
-
-__version__ = "0.3.2"
-
-
-def strip_readme():
- with open("README.md") as fp:
- return "".join([line for line in fp if not line.startswith("= 2.2"],
- install_requires=["pybind11 >= 2.2"],
- tests_require=[
- "pytest", "numpy", "h5py", "scipy", "pandas"
- ],
- # extras_require={
- # "build_docs": ["sphinx>=2", "breathe", "sphinxcontrib-bibtex",
- # "sphinx-automodapi"],
- # },
- cmdclass={"build_ext": BuildExt,
- # "build_docs": BuildDocs,
- },
-)
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 6c9b7203..ad3a8d40 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -1,13 +1,3 @@
-# define object library for sources common to C library and Python bindings
-add_library(cppe-objs
- OBJECT
- metadata.cc
- )
-
-# create empty list of CPPE private header files
-add_subdirectory(utils)
-add_subdirectory(core)
-
# OpenMP
if(ENABLE_OPENMP)
find_package(OpenMP COMPONENTS CXX)
@@ -16,11 +6,21 @@ if(ENABLE_OPENMP)
endif()
endif()
+# define object library for sources
+add_library(cppe-objs
+ OBJECT
+ ""
+ )
+
+# create empty list of CPPE private header files
+add_subdirectory(utils)
+add_subdirectory(core)
+
# report on compiler flags in use
message(STATUS "Configuring a ${CMAKE_BUILD_TYPE} build")
string(TOUPPER ${CMAKE_BUILD_TYPE} _cmake_build_type_upper)
-message(STATUS "C++ compiler flags")
+message(STATUS "Compiler flags for ${CMAKE_CXX_COMPILER_ID}")
message(STATUS " From environment : ${CMAKE_CXX_FLAGS}")
message(STATUS " Build-type-specific : ${CMAKE_CXX_FLAGS_${_cmake_build_type_upper}}")
message(STATUS " Vectorization flag : ${ARCH_FLAG}")
@@ -29,14 +29,13 @@ message(STATUS " User-appended : ${EXTRA_CXXFLAGS}")
message(STATUS " OpenMP parallelization : ${OpenMP_CXX_FLAGS}")
# transform CPPE_CXX_FLAGS and EXTRA_CXXFLAGS to ;-separated lists
-string(REPLACE " " ";" CPPE_CXX_FLAGS ${CPPE_CXX_FLAGS})
+if(DEFINED CPPE_CXX_FLAGS)
+ string(REPLACE " " ";" CPPE_CXX_FLAGS ${CPPE_CXX_FLAGS})
+endif()
if(DEFINED EXTRA_CXXFLAGS)
string(REPLACE " " ";" EXTRA_CXXFLAGS ${EXTRA_CXXFLAGS})
endif()
-# Eigen3
-find_package(Eigen3 3.4 CONFIG REQUIRED)
-
# flags to prepend
target_compile_options(cppe-objs
BEFORE
@@ -60,13 +59,40 @@ if(WIN32)
)
endif()
+# Eigen3
+find_package(Eigen3 3.4 CONFIG QUIET)
+if(TARGET Eigen3::Eigen)
+ message(STATUS "Using Eigen3: ${EIGEN3_ROOT_DIR} (version ${Eigen3_VERSION})")
+else()
+ message(STATUS "Suitable Eigen3 could not be located. Fetching and building!")
+ include(FetchContent)
+
+ if(NOT SKBUILD_STATE STREQUAL "sdist"
+ AND EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/eigen3/CMakeLists.txt")
+ message(STATUS "Using integrated Eigen3")
+ set(FETCHCONTENT_FULLY_DISCONNECTED ON)
+ endif()
+
+ FetchContent_Declare(eigen3
+ QUIET
+ URL
+ https://gitlab.com/libeigen/eigen/-/archive/3.4.0/eigen-3.4.0.tar.gz
+ SOURCE_DIR
+ ${CMAKE_CURRENT_SOURCE_DIR}/eigen3
+ )
+
+ set(CMAKE_POLICY_DEFAULT_CMP0077 NEW)
+ set(BUILD_TESTING OFF)
+ set(CMAKE_C_COMPILER_WORKS ON)
+ set(CMAKE_Fortran_COMPILER_WORKS ON)
+ FetchContent_MakeAvailable(eigen3)
+endif()
+
+
target_link_libraries(cppe-objs
PUBLIC
Eigen3::Eigen
$<$,$>:OpenMP::OpenMP_CXX>
)
-# standard locations of libraries, headers, etc.
-include(GNUInstallDirs)
-
add_subdirectory(python_iface)
diff --git a/src/core/bmatrix.cc b/src/core/bmatrix.cc
index 9936bdfc..47be0e86 100644
--- a/src/core/bmatrix.cc
+++ b/src/core/bmatrix.cc
@@ -13,7 +13,7 @@ Eigen::VectorXd BMatrix::apply(const Eigen::VectorXd& induced_moments) {
int order = m_options.tree_expansion_order;
double theta = m_options.theta;
std::vector S(3 * m_n_polsites);
- for (int i = 0; i < m_n_polsites; ++i) {
+ for (decltype(m_n_polsites) i = 0; i < m_n_polsites; ++i) {
int l = i * 3;
Eigen::Vector3d s = induced_moments.segment<3>(l);
S[i * 3 + 0] = s(0);
@@ -42,7 +42,7 @@ Eigen::VectorXd BMatrix::apply(const Eigen::VectorXd& induced_moments) {
Eigen::VectorXd BMatrix::apply_diagonal_inverse(const Eigen::VectorXd& in) {
Eigen::VectorXd ret = Eigen::VectorXd::Zero(3 * m_n_polsites);
#pragma omp parallel for
- for (int i = 0; i < m_n_polsites; ++i) {
+ for (decltype(m_n_polsites) i = 0; i < m_n_polsites; ++i) {
int l = i * 3;
Polarizability& alpha_i = m_polsites[i].get_polarizability();
ret.segment<3>(l) = alpha_i.get_matrix() * in.segment<3>(l);
@@ -54,7 +54,7 @@ Eigen::VectorXd BMatrix::apply_diagonal(const Eigen::VectorXd& in) {
Eigen::VectorXd ret = Eigen::VectorXd::Zero(3 * m_n_polsites);
#pragma omp parallel for
- for (int i = 0; i < m_n_polsites; ++i) {
+ for (decltype(m_n_polsites) i = 0; i < m_n_polsites; ++i) {
int l = i * 3;
ret.segment<3>(l) = m_alpha_inverse[i] * in.segment<3>(l);
}
@@ -64,10 +64,10 @@ Eigen::VectorXd BMatrix::apply_diagonal(const Eigen::VectorXd& in) {
Eigen::MatrixXd BMatrix::to_dense_matrix() {
Eigen::MatrixXd B = Eigen::MatrixXd::Zero(m_n_polsites * 3, m_n_polsites * 3);
#pragma omp parallel for
- for (int i = 0; i < m_n_polsites; ++i) {
+ for (decltype(m_n_polsites) i = 0; i < m_n_polsites; ++i) {
int l = i * 3;
Potential& pot1 = m_polsites[i];
- for (int j = 0; j < m_n_polsites; ++j) {
+ for (decltype(m_n_polsites) j = 0; j < m_n_polsites; ++j) {
int m = j * 3;
Potential& pot2 = m_polsites[j];
if (pot1.excludes_site(pot2.index) || i == j) continue;
diff --git a/src/core/bmatrix.hh b/src/core/bmatrix.hh
index 8839dcc3..ad88279e 100644
--- a/src/core/bmatrix.hh
+++ b/src/core/bmatrix.hh
@@ -27,7 +27,7 @@ class BMatrix {
return p.get_polarizability().get_matrix().inverse();
});
#pragma omp parallel for
- for (int i = 0; i < m_n_polsites; ++i) {
+ for (decltype(m_n_polsites) i = 0; i < m_n_polsites; ++i) {
Potential& pot1 = m_polsites[i];
m_positions[i * 3 + 0] = pot1.m_x;
m_positions[i * 3 + 1] = pot1.m_y;
diff --git a/src/core/cppe_state.cc b/src/core/cppe_state.cc
index 698fac62..dfe0e681 100644
--- a/src/core/cppe_state.cc
+++ b/src/core/cppe_state.cc
@@ -40,14 +40,14 @@ void CppeState::set_potentials(std::vector potentials) {
m_positions = Eigen::MatrixXd::Zero(m_potentials.size(), 3);
m_positions_polarizable = Eigen::MatrixXd::Zero(m_polarizable_sites, 3);
- for (int i = 0; i < m_potentials.size(); ++i) {
+ for (auto i = 0; i < m_potentials.size(); ++i) {
m_positions(i, 0) = m_potentials[i].m_x;
m_positions(i, 1) = m_potentials[i].m_y;
m_positions(i, 2) = m_potentials[i].m_z;
}
auto m_potentials_polarizable = get_polarizable_sites(m_potentials);
- for (int i = 0; i < m_polarizable_sites; ++i) {
+ for (auto i = 0; i < m_polarizable_sites; ++i) {
m_positions_polarizable(i, 0) = m_potentials_polarizable[i].m_x;
m_positions_polarizable(i, 1) = m_potentials_polarizable[i].m_y;
m_positions_polarizable(i, 2) = m_potentials_polarizable[i].m_z;
@@ -114,13 +114,13 @@ Eigen::MatrixXd CppeState::induced_moments_eef() {
Eigen::MatrixXd ret = Eigen::MatrixXd::Zero(m_polarizable_sites * 3, 3);
Eigen::MatrixXd Fdn = Eigen::MatrixXd::Zero(m_polarizable_sites * 3, 3);
- for (int s = 0; s < m_polarizable_sites; ++s) {
+ for (auto s = 0; s < m_polarizable_sites; ++s) {
int l = 3 * s;
Fdn(l, 0) = 1;
Fdn(l + 1, 1) = 1;
Fdn(l + 2, 2) = 1;
}
- for (int a = 0; a < 3; ++a) {
+ for (auto a = 0; a < 3; ++a) {
Eigen::VectorXd ind_mom = Eigen::VectorXd::Zero(m_polarizable_sites * 3);
ret.col(a) = ind.compute(Fdn.col(a), ind_mom, true);
}
diff --git a/src/core/electric_fields.cc b/src/core/electric_fields.cc
index fd8d2e5f..7b67ba4b 100644
--- a/src/core/electric_fields.cc
+++ b/src/core/electric_fields.cc
@@ -38,9 +38,9 @@ Eigen::VectorXd multipole_derivative(int k, int l, const Eigen::Vector3d& Rji,
for (z = k + l; z > -1; z--) {
if (x + y + z != k + l) continue;
i = xyz2idx(x, y, z);
- for (int a = x; a > -1; a--) {
- for (int b = y; b > -1; b--) {
- for (int c = z; c > -1; c--) {
+ for (auto a = x; a > -1; a--) {
+ for (auto b = y; b > -1; b--) {
+ for (auto c = z; c > -1; c--) {
if (a + b + c != k) continue;
j = xyz2idx(a, b, c);
m = xyz2idx(x - a, y - b, z - c);
@@ -58,7 +58,7 @@ Eigen::VectorXd multipole_derivative(int k, int l, const Eigen::Vector3d& Rji,
Eigen::VectorXd NuclearFields::compute() {
Eigen::VectorXd nuc_fields = Eigen::VectorXd::Zero(3 * m_n_polsites);
#pragma omp parallel for
- for (size_t i = 0; i < m_n_polsites; i++) {
+ for (auto i = 0; i < m_n_polsites; i++) {
size_t site_counter = 3 * i;
Potential& potential = m_polsites[i];
Eigen::Vector3d site_position = potential.get_site_position();
@@ -78,11 +78,11 @@ Eigen::MatrixXd NuclearFields::nuclear_gradient() {
int natoms = m_mol.size();
Eigen::MatrixXd grad = Eigen::MatrixXd::Zero(3 * natoms, 3 * m_n_polsites);
#pragma omp parallel for
- for (size_t i = 0; i < m_n_polsites; i++) {
+ for (auto i = 0; i < m_n_polsites; i++) {
size_t site_counter = 3 * i;
Potential& potential = m_polsites[i];
Eigen::Vector3d site_position = potential.get_site_position();
- for (int ai = 0; ai < natoms; ++ai) {
+ for (auto ai = 0; ai < natoms; ++ai) {
auto& atom = m_mol[ai];
Eigen::Vector3d core_position = atom.get_position();
Eigen::Vector3d diff = site_position - core_position;
@@ -120,7 +120,7 @@ Eigen::VectorXd MultipoleFields::compute() {
}
int max_order = 0;
- for (int i = 0; i < n_sites; ++i) {
+ for (auto i = 0; i < n_sites; ++i) {
int max_multipole_order = m_potentials[i].max_multipole_order();
if (max_multipole_order > max_order) {
max_order = max_multipole_order;
@@ -210,7 +210,7 @@ Eigen::VectorXd InducedMoments::compute(const Eigen::VectorXd& rhs, Eigen::Vecto
std::vector x{x0};
std::vector r{r0};
std::vector z{z0};
- for (int k = 0; k < m_options.maxiter; ++k) {
+ for (auto k = 0; k < m_options.maxiter; ++k) {
Eigen::VectorXd Ap = bmat.apply(p);
alpha_k = r[k].dot(z[k]) / p.dot(Ap);
x_k1 = x[k] + alpha_k * p;
diff --git a/src/core/fmm/calculate.cc b/src/core/fmm/calculate.cc
index a4b4950b..3d6d2342 100644
--- a/src/core/fmm/calculate.cc
+++ b/src/core/fmm/calculate.cc
@@ -14,7 +14,7 @@ using namespace libcppe;
void M_sanity_check(const std::vector& cells) {
double M0 = 0;
- for (size_t c = 1; c < cells.size(); c++) {
+ for (auto c = 1; c < cells.size(); c++) {
if (cells[c].nchild == 0) {
M0 += cells[c].M[0];
}
@@ -31,9 +31,9 @@ void P2P_Cells(size_t A, size_t B, std::vector& cells,
std::vector& particles, double* F) {
// A - target
// B - source
- for (size_t p1 = 0; p1 < cells[A].nleaf; p1++) {
+ for (auto p1 = 0; p1 < cells[A].nleaf; p1++) {
size_t l1 = cells[A].leaf[p1];
- for (size_t p2 = 0; p2 < cells[B].nleaf; p2++) {
+ for (auto p2 = 0; p2 < cells[B].nleaf; p2++) {
size_t l2 = cells[B].leaf[p2];
if (l2 != l1 && !particles[l1].excludes_particle(l2)) {
double dx = (particles[l1].r[0] - particles[l2].r[0]);
@@ -50,9 +50,9 @@ void P2P_Cells_damping(size_t A, size_t B, std::vector& cells,
std::vector& particles, double* F, double damping) {
// A - target
// B - source
- for (size_t p1 = 0; p1 < cells[A].nleaf; p1++) {
+ for (auto p1 = 0; p1 < cells[A].nleaf; p1++) {
size_t l1 = cells[A].leaf[p1];
- for (size_t p2 = 0; p2 < cells[B].nleaf; p2++) {
+ for (auto p2 = 0; p2 < cells[B].nleaf; p2++) {
size_t l2 = cells[B].leaf[p2];
if (l2 != l1 && !particles[l1].excludes_particle(l2)) {
double dx = (particles[l1].r[0] - particles[l2].r[0]);
@@ -104,7 +104,7 @@ void interact_dehnen_lazy(const size_t A, const size_t B, const std::vector= cells[B].rmax && cells[A].nchild != 0)) {
- for (int oa = 0; oa < 8; oa++) {
+ for (auto oa = 0; oa < 8; oa++) {
// For all 8 children of A, if child exists
if (cells[A].nchild & (1 << oa)) {
int a = cells[A].child[oa];
@@ -115,7 +115,7 @@ void interact_dehnen_lazy(const size_t A, const size_t B, const std::vector& particles, std::vector& cells, si
size_t ncrit, size_t exporder) {
int sourcesize = multipole_components(m_order);
#pragma omp for
- for (size_t c = 0; c < cells.size(); c++) {
+ for (auto c = 0; c < cells.size(); c++) {
// std::cout << "Cell " << c << std::endl;
// std::cout << " Msize = " << Msize(exporder, FMMGEN_SOURCEORDER) << std::endl;
size_t msize = Msize(exporder, m_order);
double* M = new double[msize]();
if (cells[c].nleaf < ncrit) {
- for (size_t i = 0; i < cells[c].nleaf; i++) {
+ for (auto i = 0; i < cells[c].nleaf; i++) {
size_t l = cells[c].leaf[i];
// Walter dehnen's definition:
// (-1)^m / m! (x_a - z_a)^m
double dx = (cells[c].x - particles[l].r[0]);
double dy = (cells[c].y - particles[l].r[1]);
double dz = (cells[c].z - particles[l].r[2]);
- for (int k = 0; k < sourcesize; k++) {
+ for (auto k = 0; k < sourcesize; k++) {
// std::cout << particles[l].S[k] << std::endl;
M[k] = particles[l].S[k];
}
@@ -174,7 +174,7 @@ void evaluate_M2M(std::vector& particles, std::vector& cells,
// Dehnen definition:
// M_m(z_p) = (z_p - z_c)^n / n! M_{m - n}
- for (size_t c = cells.size() - 1; c > 0; c--) {
+ for (auto c = cells.size() - 1; c > 0; c--) {
size_t p = cells[c].parent;
double dx = (cells[p].x - cells[c].x);
double dy = (cells[p].y - cells[c].y);
@@ -187,7 +187,7 @@ template
void evaluate_M2L_lazy(std::vector& cells,
std::vector>& M2L_list, size_t order) {
#pragma omp for
- for (size_t i = 0; i < M2L_list.size(); i++) {
+ for (auto i = 0; i < M2L_list.size(); i++) {
size_t B = M2L_list[i].first;
size_t A = M2L_list[i].second;
// Dehnen definition:
@@ -209,14 +209,14 @@ void evaluate_P2P_lazy(std::vector& cells, std::vector& particle
if (damping > 0.0) {
#pragma omp for
- for (size_t i = 0; i < P2P_list.size(); i++) {
+ for (auto i = 0; i < P2P_list.size(); i++) {
size_t A = P2P_list[i].first;
size_t B = P2P_list[i].second;
P2P_Cells_damping(A, B, cells, particles, F, damping);
}
} else {
#pragma omp for
- for (size_t i = 0; i < P2P_list.size(); i++) {
+ for (auto i = 0; i < P2P_list.size(); i++) {
size_t A = P2P_list[i].first;
size_t B = P2P_list[i].second;
P2P_Cells(A, B, cells, particles, F);
@@ -228,8 +228,8 @@ template
void evaluate_L2L(std::vector& cells, size_t exporder) {
// Can't currently go down the tree in parallel!
// needs to be recursive or summing not correct.
- for (size_t p = 0; p < cells.size(); p++) {
- for (int octant = 0; octant < 8; octant++) {
+ for (auto p = 0; p < cells.size(); p++) {
+ for (auto octant = 0; octant < 8; octant++) {
if (cells[p].nchild & (1 << octant)) {
// for child c in cell p
size_t c = cells[p].child[octant];
@@ -246,9 +246,9 @@ template
void evaluate_L2P(std::vector& particles, std::vector& cells, double* F,
size_t ncrit, size_t exporder) {
#pragma omp for schedule(runtime)
- for (size_t i = 0; i < cells.size(); i++) {
+ for (auto i = 0; i < cells.size(); i++) {
if (cells[i].nleaf < ncrit) {
- for (size_t p = 0; p < cells[i].nleaf; p++) {
+ for (auto p = 0; p < cells[i].nleaf; p++) {
size_t k = cells[i].leaf[p];
double dx = particles[k].r[0] - cells[i].x;
double dy = particles[k].r[1] - cells[i].y;
@@ -263,8 +263,8 @@ template
void evaluate_direct(std::vector& particles, double* F) {
int n = particles.size();
#pragma omp parallel for schedule(runtime)
- for (size_t i = 0; i < n; i++) {
- for (size_t j = 0; j < n; j++) {
+ for (auto i = 0; i < n; i++) {
+ for (auto j = 0; j < n; j++) {
if (i != j && !particles[i].excludes_particle(j)) {
double dx = particles[i].r[0] - particles[j].r[0];
double dy = particles[i].r[1] - particles[j].r[1];
@@ -280,8 +280,8 @@ void evaluate_direct_damping(std::vector& particles, double* F,
double damping) {
int n = particles.size();
#pragma omp parallel for schedule(runtime)
- for (size_t i = 0; i < n; i++) {
- for (size_t j = 0; j < n; j++) {
+ for (auto i = 0; i < n; i++) {
+ for (auto j = 0; j < n; j++) {
if (i != j && !particles[i].excludes_particle(j)) {
double dx = particles[i].r[0] - particles[j].r[0];
double dy = particles[i].r[1] - particles[j].r[1];
@@ -397,4 +397,4 @@ void evaluate_M2P_and_P2P(std::vector& particles, unsigned int p,
INSTANTIATE(0, 3)
INSTANTIATE(1, 3)
-INSTANTIATE(2, 3)
\ No newline at end of file
+INSTANTIATE(2, 3)
diff --git a/src/core/fmm/tree.cc b/src/core/fmm/tree.cc
index 4c9e9ebf..69351fb0 100644
--- a/src/core/fmm/tree.cc
+++ b/src/core/fmm/tree.cc
@@ -63,20 +63,20 @@ Cell::Cell(Cell&& other) {
}
void printTreeParticles(std::vector& cells, size_t cell, size_t depth) {
- for (size_t i = 0; i < depth; i++) {
+ for (auto i = 0; i < depth; i++) {
std::cout << " ";
}
std::cout << cell << " (" << cells[cell].x << "," << cells[cell].y << ","
<< cells[cell].z << ") : (";
size_t nchild = 0;
- for (size_t octant = 0; octant < 8; octant++) {
+ for (auto octant = 0; octant < 8; octant++) {
if (cells[cell].nchild & (1 << octant)) {
nchild += 1;
}
}
if (nchild == 0) {
- for (size_t i = 0; i < cells[cell].nleaf; i++) {
+ for (auto i = 0; i < cells[cell].nleaf; i++) {
std::cout << cells[cell].leaf[i];
if (i != (cells[cell].nleaf - 1)) {
std::cout << ",";
@@ -84,7 +84,7 @@ void printTreeParticles(std::vector& cells, size_t cell, size_t depth) {
}
}
std::cout << ")" << std::endl;
- for (size_t octant = 0; octant < 8; octant++) {
+ for (auto octant = 0; octant < 8; octant++) {
if (cells[cell].nchild & (1 << octant)) {
printTreeParticles(cells, cells[cell].child[octant], depth + 1);
}
@@ -114,7 +114,7 @@ void split_cell(std::vector& cells, std::vector& particles, size
// Do not change octant to size_t - otherwise the calculation
// of x, y, z position in add_child is not correct!
int octant;
- for (size_t i = 0; i < cells[p].leaf.size(); i++) {
+ for (auto i = 0; i < cells[p].leaf.size(); i++) {
l = cells[p].leaf[i];
octant = (particles[l].r[0] > cells[p].x) + ((particles[l].r[1] > cells[p].y) << 1) +
((particles[l].r[2] > cells[p].z) << 2);
@@ -139,7 +139,7 @@ std::shared_ptr> build_shared_tree(
int nparticles = potentials.size();
std::vector particles(nparticles);
bool damping_enabled = damping > 0.0;
- for (size_t i = 0; i < nparticles; i++) {
+ for (auto i = 0; i < nparticles; i++) {
particles[i].r = potentials[i].ptr_position();
particles[i].S = &S[sourcesize * i];
particles[i].exclusions = potentials[i].get_exclusions();
@@ -157,7 +157,7 @@ std::shared_ptr> build_shared_tree(
double xavg = 0;
double yavg = 0;
double zavg = 0;
- for (size_t i = 0; i < particles.size(); i++) {
+ for (auto i = 0; i < particles.size(); i++) {
xavg += particles[i].r[0];
yavg += particles[i].r[1];
zavg += particles[i].r[2];
@@ -174,7 +174,7 @@ std::shared_ptr> build_shared_tree(
double ymax = 0;
double zmax = 0;
- for (size_t i = 0; i < particles.size(); i++) {
+ for (auto i = 0; i < particles.size(); i++) {
double x = std::abs(particles[i].r[0] - xavg);
double y = std::abs(particles[i].r[1] - yavg);
double z = std::abs(particles[i].r[2] - zavg);
@@ -189,7 +189,7 @@ std::shared_ptr> build_shared_tree(
auto root = Cell(xavg, yavg, zavg, r, 0, order, 0, ncrit);
cells.push_back(root);
- for (size_t i = 0; i < particles.size(); i++) {
+ for (auto i = 0; i < particles.size(); i++) {
curr = 0;
while (cells[curr].nleaf >= ncrit) {
cells[curr].nleaf += 1;
@@ -229,7 +229,7 @@ std::shared_ptr> build_shared_tree(
// Create memory into which each cell can point for the multipole arrays.
tree->M.resize(tree->cells.size() * Msize(order, m_order), 0.0);
tree->L.resize(tree->cells.size() * Lsize(order, m_order), 0.0);
- for (size_t i = 0; i < tree->cells.size(); i++) {
+ for (auto i = 0; i < tree->cells.size(); i++) {
tree->cells[i].M = &tree->M[i * Msize(order, m_order)];
tree->cells[i].L = &tree->L[i * Lsize(order, m_order)];
}
@@ -243,7 +243,7 @@ std::shared_ptr> build_shared_tree(
int sourcesize = multipole_components(m_order);
// Create particles list for convenience
std::vector particles(nparticles);
- for (size_t i = 0; i < nparticles; i++) {
+ for (auto i = 0; i < nparticles; i++) {
particles[i].r = &pos[3 * i];
particles[i].S = &S[sourcesize * i];
particles[i].exclusions = exclusion_lists[i];
@@ -258,7 +258,7 @@ std::shared_ptr> build_shared_tree(
double xavg = 0;
double yavg = 0;
double zavg = 0;
- for (size_t i = 0; i < particles.size(); i++) {
+ for (auto i = 0; i < particles.size(); i++) {
xavg += particles[i].r[0];
yavg += particles[i].r[1];
zavg += particles[i].r[2];
@@ -275,7 +275,7 @@ std::shared_ptr> build_shared_tree(
double ymax = 0;
double zmax = 0;
- for (size_t i = 0; i < particles.size(); i++) {
+ for (auto i = 0; i < particles.size(); i++) {
double x = std::abs(particles[i].r[0] - xavg);
double y = std::abs(particles[i].r[1] - yavg);
double z = std::abs(particles[i].r[2] - zavg);
@@ -290,7 +290,7 @@ std::shared_ptr> build_shared_tree(
auto root = Cell(xavg, yavg, zavg, r, 0, order, 0, ncrit);
cells.push_back(root);
- for (size_t i = 0; i < particles.size(); i++) {
+ for (auto i = 0; i < particles.size(); i++) {
curr = 0;
while (cells[curr].nleaf >= ncrit) {
cells[curr].nleaf += 1;
@@ -329,7 +329,7 @@ std::shared_ptr> build_shared_tree(
// Create memory into which each cell can point for the multipole arrays.
tree->M.resize(tree->cells.size() * Msize(order, m_order), 0.0);
tree->L.resize(tree->cells.size() * Lsize(order, m_order), 0.0);
- for (size_t i = 0; i < tree->cells.size(); i++) {
+ for (auto i = 0; i < tree->cells.size(); i++) {
tree->cells[i].M = &tree->M[i * Msize(order, m_order)];
tree->cells[i].L = &tree->L[i * Lsize(order, m_order)];
}
@@ -351,7 +351,7 @@ void Tree::set_sources(double* S) {
int sourcesize = multipole_components(m_order);
clear_M();
clear_L();
- for (size_t i = 0; i < particles.size(); i++) {
+ for (auto i = 0; i < particles.size(); i++) {
particles[i].S = &S[sourcesize * i];
}
}
@@ -359,7 +359,7 @@ void Tree::set_sources(double* S) {
template
void Tree::compute_field_fmm(double* F) {
// std::cout << "Computing FMM fields." << std::endl;
- for (size_t i = 0; i < osize * particles.size(); i++) {
+ for (auto i = 0; i < osize * particles.size(); i++) {
F[i] = 0.0;
}
clear_M();
diff --git a/src/core/fmm/utils.cc b/src/core/fmm/utils.cc
index 8391b282..bef8bad1 100644
--- a/src/core/fmm/utils.cc
+++ b/src/core/fmm/utils.cc
@@ -4,7 +4,7 @@ size_t TriangleNumbers(size_t n) { return (n * (n + 1)) / 2; }
size_t Nterms(size_t p) {
size_t result = 0;
- for (size_t i = 0; i < p + 2; i++) {
+ for (auto i = 0; i < p + 2; i++) {
result += TriangleNumbers(i);
}
return result;
diff --git a/src/core/math.cc b/src/core/math.cc
index 59b18356..23883b21 100644
--- a/src/core/math.cc
+++ b/src/core/math.cc
@@ -33,7 +33,7 @@ Eigen::VectorXd mat_to_triangle(Eigen::Matrix3d m) {
double factorial(int n) {
if (n < 2) return 1.0;
double x = 1.0;
- for (int i = 2; i <= n; i++) x *= double(i);
+ for (auto i = 2; i <= n; i++) x *= double(i);
return x;
}
@@ -75,7 +75,7 @@ std::vector prefactors(unsigned k) {
// changed signs here because electron charges are included downstream
// (integral library)
std::vector pref = symmetry_factors(k);
- for (size_t i = 0; i < pref.size(); i++) {
+ for (auto i = 0; i < pref.size(); i++) {
pref[i] *= taylor;
}
return pref;
@@ -90,7 +90,7 @@ std::vector prefactors_nuclei(unsigned k) {
}
std::vector pf = symmetry_factors(k);
- for (size_t i = 0; i < pf.size(); i++) {
+ for (auto i = 0; i < pf.size(); i++) {
pf[i] *= taylor;
}
return pf;
diff --git a/src/core/multipole_expansion.cc b/src/core/multipole_expansion.cc
index 0988f93f..f9541bce 100644
--- a/src/core/multipole_expansion.cc
+++ b/src/core/multipole_expansion.cc
@@ -12,7 +12,7 @@ double MultipoleExpansion::interaction_energy() {
int npots = m_potentials.size();
#pragma omp parallel for reduction(+ : total_energy)
- for (size_t i = 0; i < npots; i++) {
+ for (auto i = 0; i < npots; i++) {
Potential& potential = m_potentials[i];
Eigen::Vector3d site_position = potential.get_site_position();
for (auto& multipole : potential.get_multipoles()) {
@@ -38,7 +38,7 @@ Eigen::MatrixXd MultipoleExpansion::nuclear_gradient() {
Eigen::MatrixXd grad = Eigen::MatrixXd::Zero(natoms, 3);
#pragma omp parallel for
- for (size_t i = 0; i < npots; i++) {
+ for (auto i = 0; i < npots; i++) {
Potential& potential = m_potentials[i];
Eigen::Vector3d site_position = potential.get_site_position();
for (auto& multipole : potential.get_multipoles()) {
@@ -47,7 +47,7 @@ Eigen::MatrixXd MultipoleExpansion::nuclear_gradient() {
Eigen::VectorXd pref_v =
Eigen::Map(std::move(pref.data()), pref.size());
Eigen::VectorXd mul_v = multipole.get_values_vec();
- for (int ai = 0; ai < natoms; ++ai) {
+ for (auto ai = 0; ai < natoms; ++ai) {
auto& atom = m_mol[ai];
Eigen::Vector3d core_position = atom.get_position();
Eigen::Vector3d diff = core_position - site_position;
diff --git a/src/core/pot_manipulation.cc b/src/core/pot_manipulation.cc
index 7198c1e5..613e44ec 100644
--- a/src/core/pot_manipulation.cc
+++ b/src/core/pot_manipulation.cc
@@ -73,7 +73,7 @@ std::vector PotManipulator::manipulate_border(const PeOptions& pe_opt
neighbor_list.push_back(std::pair(pot.index, dist));
}
sort(neighbor_list.begin(), neighbor_list.end(), sortbysec);
- for (int k = 0; k < nredist; ++k) {
+ for (auto k = 0; k < nredist; ++k) {
Potential& pot = m_potentials[neighbor_list[k].first];
if (pot.index == site) continue;
m_printer(" to neighbor " + std::to_string(pot.index));
@@ -89,7 +89,7 @@ std::vector PotManipulator::manipulate_border(const PeOptions& pe_opt
} else {
// std::cout << "Before: " << std::endl;
// std::cout << m.get_values_vec() << std::endl;
- for (size_t i = 0; i < multipole_components(m.m_k); i++) {
+ for (auto i = 0; i < multipole_components(m.m_k); i++) {
m.get_values()[i] +=
m_potentials[site].get_multipoles()[m_idx].get_values()[i] /
static_cast(nredist);
diff --git a/src/core/potfile_reader.cc b/src/core/potfile_reader.cc
index 9c13fa05..aa10e5b4 100644
--- a/src/core/potfile_reader.cc
+++ b/src/core/potfile_reader.cc
@@ -57,7 +57,7 @@ std::vector PotfileReader::read() {
throw std::runtime_error("Invalid unit for potential file.");
}
- for (size_t i = 0; i < num_sites; i++) {
+ for (auto i = 0; i < num_sites; i++) {
Site site;
getline(infile, line);
std::vector temp = split(reduce(line), ' ');
@@ -81,7 +81,7 @@ std::vector PotfileReader::read() {
getline(infile, line);
int num_multipoles = stoi(line);
int site_before = -1;
- for (size_t n_mul = 0; n_mul < num_multipoles; n_mul++) {
+ for (auto n_mul = 0; n_mul < num_multipoles; n_mul++) {
getline(infile, line);
temp = split(reduce(line), ' ');
int site_num = stoi(temp[0]) - 1;
@@ -89,9 +89,9 @@ std::vector PotfileReader::read() {
// fill up the array if values were not defined for all sites
if (site_num != site_before + 1) {
int diff = site_num - site_before;
- for (size_t d = 1; d < diff; d++) {
+ for (auto d = 1; d < diff; d++) {
Multipole mul(order);
- for (size_t vl = 1; vl <= multipole_components(order); vl++) {
+ for (auto vl = 1; vl <= multipole_components(order); vl++) {
mul.add_value(0.0);
}
potentials[site_before + d].add_multipole(mul);
@@ -99,7 +99,7 @@ std::vector PotfileReader::read() {
}
Multipole mul(order);
- for (size_t vl = 1; vl <= multipole_components(order); vl++) {
+ for (auto vl = 1; vl <= multipole_components(order); vl++) {
mul.add_value(stod(temp[vl]));
}
mul.remove_trace();
@@ -109,9 +109,9 @@ std::vector PotfileReader::read() {
// check if multipoles at the end of the list are missing
if ((n_mul == num_multipoles - 1) && site_num != (num_sites - 1)) {
int diff = num_sites - site_num;
- for (size_t d = 1; d < diff; d++) {
+ for (auto d = 1; d < diff; d++) {
Multipole mul(order);
- for (size_t vl = 1; vl <= multipole_components(order); vl++) {
+ for (auto vl = 1; vl <= multipole_components(order); vl++) {
mul.add_value(0.0);
}
potentials[site_num + d].add_multipole(mul);
@@ -128,14 +128,14 @@ std::vector PotfileReader::read() {
}
getline(infile, line);
int num_polarizabilities = stoi(line);
- for (size_t n_pol = 0; n_pol < num_polarizabilities; n_pol++) {
+ for (auto n_pol = 0; n_pol < num_polarizabilities; n_pol++) {
getline(infile, line);
temp = split(reduce(line), ' ');
int site_num = stoi(temp[0]) - 1;
// std::cout << site.x << " " << site.y << " " << site.z << " " <<
// site_num + 1 << std::endl;
std::vector pol_tmp;
- for (size_t vl = 1; vl <= multipole_components(order1 + order2); vl++) {
+ for (auto vl = 1; vl <= multipole_components(order1 + order2); vl++) {
pol_tmp.push_back(stod(temp[vl]));
}
Polarizability pol{pol_tmp};
@@ -152,7 +152,7 @@ std::vector PotfileReader::read() {
getline(infile, line);
int num_excl = stoi(split(line, ' ')[0]);
std::vector temp;
- for (size_t i = 0; i < num_excl; i++) {
+ for (auto i = 0; i < num_excl; i++) {
getline(infile, line);
temp = split(reduce(line), ' ');
int site_num = stoi(temp[0]) - 1;
diff --git a/src/core/tensors/tensors_recursive.cc b/src/core/tensors/tensors_recursive.cc
index 9288bb1b..727e2b6d 100644
--- a/src/core/tensors/tensors_recursive.cc
+++ b/src/core/tensors/tensors_recursive.cc
@@ -24,9 +24,9 @@ Eigen::VectorXd T_recursive(int k, const Eigen::Vector3d& Rij, double damping_fa
int xyz2idx(int x, int y, int z) {
int idx = 0;
int k = x + y + z;
- for (int a = k; a > -1; a--) {
- for (int b = k; b > -1; b--) {
- for (int c = k; c > -1; c--) {
+ for (auto a = k; a > -1; a--) {
+ for (auto b = k; b > -1; b--) {
+ for (auto c = k; c > -1; c--) {
if (a + b + c != k) continue;
if (a != x || b != y || c != z) {
idx++;
@@ -55,11 +55,11 @@ double T(const Eigen::Vector3d& Rij, int x, int y, int z,
scr_facs = thole_screening_factors(R * damping_factor, k);
}
- for (size_t l = 0; l <= x; l++) {
+ for (auto l = 0; l <= x; l++) {
Cx = Cijn[0](x, l) * pow((Rij(0) / R), l);
- for (size_t m = 0; m <= y; m++) {
+ for (auto m = 0; m <= y; m++) {
Cy = Cx * Cijn[l + x](y, m) * pow((Rij(1) / R), m);
- for (size_t n = 0; n <= z; n++) {
+ for (auto n = 0; n <= z; n++) {
Cz = Cy * Cijn[l + x + m + y](z, n) * pow((Rij(2) / R), n);
int kk = l + m + n;
// Thole damping
@@ -116,7 +116,7 @@ std::vector thole_screening_factors(double v, int k) {
std::vector Tk_coefficients(int max_order) {
int maxi = 2 * max_order + 3;
std::vector Cijn;
- for (int n = 0; n < maxi; ++n) {
+ for (auto n = 0; n < maxi; ++n) {
int k;
Eigen::MatrixXi mat = Eigen::MatrixXi::Zero(max_order + 2, max_order + 2);
mat(0, 0) = 1;
@@ -124,13 +124,13 @@ std::vector Tk_coefficients(int max_order) {
Cijn.push_back(mat);
continue;
}
- for (size_t i = 1; i <= max_order + 1; i++) {
+ for (auto i = 1; i <= max_order + 1; i++) {
if (i % 2 != 0) {
k = i - 1;
} else if (i % 2 == 0) {
k = i;
}
- for (size_t j = 0; j <= i; j++) {
+ for (auto j = 0; j <= i; j++) {
if ((i + j) % 2 != 0) continue;
if (j == 0) {
mat(i, j) = mat(i - 1, j + 1);
diff --git a/src/cppe/__init__.py b/src/cppe/__init__.py
new file mode 100644
index 00000000..293272cc
--- /dev/null
+++ b/src/cppe/__init__.py
@@ -0,0 +1,8 @@
+from .pycppe import *
+from .pycppe import __build_type__, __parallel__
+from .pycppe.tensors import *
+
+all = [
+ "__build_type__",
+ "__parallel__",
+]
diff --git a/src/metadata.cc b/src/metadata.cc
deleted file mode 100644
index 2f24dd7d..00000000
--- a/src/metadata.cc
+++ /dev/null
@@ -1,54 +0,0 @@
-#include "metadata.hh"
-#include
-#include
-
-namespace libcppe {
-namespace {
-static const std::string static_version_string = "0.3.2";
-
-static const std::vector version_split = [](const std::string& in) {
- std::vector parts;
- std::stringstream ss(in);
- std::string item;
- while (std::getline(ss, item, '.')) parts.push_back(item);
- return parts;
-}(static_version_string);
-
-static int get_version_part(size_t part) {
- int ret;
- std::stringstream ss(version_split[part]);
- ss >> ret;
- return ret;
-}
-} // namespace
-
-int version::major_part() { return get_version_part(0); }
-int version::minor_part() { return get_version_part(1); }
-int version::patch_part() { return get_version_part(2); }
-bool version::is_debug() {
-#ifdef NDEBUG
- return false;
-#else
- return true;
-#endif // NDEBUG
-}
-
-bool version::has_openmp() {
-#ifdef _OPENMP
- return true;
-#else
- return false;
-#endif
-}
-
-std::string version::version_string() { return static_version_string; }
-
-std::string __authors__() { return "Maximilian Scheurer"; }
-
-std::string __contributors__() {
- return "Peter Reinholdt, Michael F. Herbst, Lori A. Burns";
-}
-
-std::string __email__() { return "maximilian.scheurer@iwr.uni-heidelberg.de"; }
-
-} // namespace libcppe
\ No newline at end of file
diff --git a/src/metadata.hh b/src/metadata.hh
deleted file mode 100644
index ac8c87ed..00000000
--- a/src/metadata.hh
+++ /dev/null
@@ -1,36 +0,0 @@
-#pragma once
-#include
-#include
-
-namespace libcppe {
-
-struct version {
- /** Return the major part of the version */
- static int major_part();
-
- /** Return the minor part of the version */
- static int minor_part();
-
- /** Return the patch part of the version */
- static int patch_part();
-
- /** Is the compiled version a Debug version */
- static bool is_debug();
-
- /** Is the compiled version using OpenMP */
- static bool has_openmp();
-
- /** Return the version as a string */
- static std::string version_string();
-};
-
-/** Return the authors string */
-std::string __authors__();
-
-/** Return the contributors string */
-std::string __contributors__();
-
-/** Return the email string */
-std::string __email__();
-
-} // namespace libcppe
diff --git a/src/python_iface/CMakeLists.txt b/src/python_iface/CMakeLists.txt
index c54832bd..d09d7164 100644
--- a/src/python_iface/CMakeLists.txt
+++ b/src/python_iface/CMakeLists.txt
@@ -1,31 +1,13 @@
-find_package(Python 3.8 REQUIRED COMPONENTS Interpreter Development)
-
-# figure out where to put the Python module
-if(NOT DEFINED PYMOD_INSTALL_FULLDIR)
- if(NOT WIN32)
- set(PYMOD_INSTALL_FULLDIR
- "lib/python${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}/site-packages/cppe"
- CACHE STRING
- "Location under CMAKE_INSTALL_PREFIX to which Python modules are installed"
- FORCE
- )
- else()
- set(PYMOD_INSTALL_FULLDIR
- "Lib/site-packages/cppe"
- CACHE STRING
- "Location under CMAKE_INSTALL_PREFIX to which Python modules are installed"
- FORCE
- )
- endif()
-endif()
-message(STATUS "Setting PYMOD_INSTALL_FULLDIR: ${PYMOD_INSTALL_FULLDIR}")
-file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/${PYMOD_INSTALL_FULLDIR})
+find_package(Python 3.8
+ REQUIRED COMPONENTS Interpreter Development.Module
+ OPTIONAL_COMPONENTS Development.SABIModule
+ )
find_package(pybind11 2.6 CONFIG REQUIRED)
pybind11_add_module(pycppe
MODULE
- THIN_LTO
+ NO_EXTRAS
export_molecule.cc
export_potential.cc
export_fields.cc
@@ -42,62 +24,9 @@ target_link_libraries(pycppe
cppe-objs
)
-set_target_properties(pycppe
- PROPERTIES
- ARCHIVE_OUTPUT_DIRECTORY
- ${PROJECT_BINARY_DIR}/${PYMOD_INSTALL_FULLDIR}
- LIBRARY_OUTPUT_DIRECTORY
- ${PROJECT_BINARY_DIR}/${PYMOD_INSTALL_FULLDIR}
- RUNTIME_OUTPUT_DIRECTORY
- ${PROJECT_BINARY_DIR}/${PYMOD_INSTALL_FULLDIR}
- )
-
-# we glob the Python files in this folder and let CMake add a rule such that
-# the glob is repeated every time we rebuild.
-# This is NOT RECOMMENDED by CMake
-# (https://cmake.org/cmake/help/v3.16/command/file.html#filesystem) but you only
-# live once!
-file(
- GLOB
- _pys
- LIST_DIRECTORIES
- FALSE
- CONFIGURE_DEPENDS
- ${CMAKE_CURRENT_SOURCE_DIR}/*.py
- )
-
-# link the Python files under the build folder
-foreach(_py IN LISTS _pys)
- get_filename_component(__py ${_py} NAME)
- file(
- CREATE_LINK
- ${_py}
- ${PROJECT_BINARY_DIR}/${PYMOD_INSTALL_FULLDIR}/${__py}
- COPY_ON_ERROR
- SYMBOLIC
- )
-endforeach()
-
install(
TARGETS
pycppe
- DESTINATION
- ${PYMOD_INSTALL_FULLDIR}
- )
-
-install(
- FILES
- ${_pys}
- DESTINATION
- ${PYMOD_INSTALL_FULLDIR}
- )
-
-# install the Python tests
-install(
- DIRECTORY
- ${PROJECT_SOURCE_DIR}/tests/
- DESTINATION
- ${PYMOD_INSTALL_FULLDIR}/tests
- PATTERN "CMakeLists.txt" EXCLUDE
- PATTERN "__pycache__" EXCLUDE
+ LIBRARY DESTINATION
+ cppe
)
diff --git a/src/python_iface/__init__.py b/src/python_iface/__init__.py
deleted file mode 100644
index 76ee7471..00000000
--- a/src/python_iface/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from .pycppe import *
-from .pycppe.tensors import *
-from .pycppe import (
- __version__, __build_type__, __authors__, __contributors__, __email__,
- __parallel__,
-)
-
-all = [
- "__version__",
- "__build_type__",
- "__parallel__",
- "__authors__",
- "__contributors__",
- "__email__",
-]
diff --git a/src/python_iface/export_cppe.cc b/src/python_iface/export_cppe.cc
index 3d44e857..aaff9a82 100644
--- a/src/python_iface/export_cppe.cc
+++ b/src/python_iface/export_cppe.cc
@@ -1,4 +1,3 @@
-#include "../metadata.hh"
#include
namespace py = pybind11;
@@ -13,13 +12,21 @@ void export_tensors(py::module&);
void export_fmm(py::module&);
PYBIND11_MODULE(pycppe, cppe) {
- cppe.doc() = "Python interface for CPPE";
- cppe.attr("__version__") = libcppe::version::version_string();
- cppe.attr("__build_type__") = libcppe::version::is_debug() ? "Debug" : "Release";
- cppe.attr("__parallel__") = libcppe::version::has_openmp();
- cppe.attr("__authors__") = libcppe::__authors__();
- cppe.attr("__contributors__") = libcppe::__contributors__();
- cppe.attr("__email__") = libcppe::__email__();
+ cppe.doc() = "Python interface for CPPE";
+ cppe.attr("__build_type__") = [] {
+#ifdef NDEBUG
+ return "Release";
+#else
+ return "Debug";
+#endif
+ }();
+ cppe.attr("__parallel__") = [] {
+#ifdef _OPENMP
+ return true;
+#else
+ return false;
+#endif
+ }();
export_molecule(cppe);
export_multipole(cppe);
@@ -29,4 +36,4 @@ PYBIND11_MODULE(pycppe, cppe) {
export_math(cppe);
export_tensors(cppe);
export_fmm(cppe);
-}
\ No newline at end of file
+}
diff --git a/tests/test_math.py b/tests/test_math.py
index 254c738a..f0a10150 100644
--- a/tests/test_math.py
+++ b/tests/test_math.py
@@ -1,5 +1,6 @@
import unittest
import pytest
+import math
import numpy as np
from cppe import T_recursive, xyz2idx, T, T_damp_thole
from cppe import (factorial, prefactors,
@@ -22,7 +23,7 @@ def test_basic_math(self):
# factorial
rng = np.arange(0, 10, 1)
for a in rng:
- assert factorial(a) == np.math.factorial(a)
+ assert factorial(a) == math.factorial(a)
def test_prefactors(self):
for k in range(3):
| | | | | | | | | | | | | |