diff --git a/.gitignore b/.gitignore index 2c75ddc7..ab58a40d 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,8 @@ coverage.xml __pycache__ RUN/ *.so +test-xinmeng-electrostatic/* +*.h5 +!examples/*.h5 +!test-xinmeng-electrostatic/run-dppc/ +#!test-xinmeng-electrostatic/run-sds/ diff --git a/README.md b/README.md index e2b2b581..e7600cf0 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,11 @@ HyMD testing and development · [![License: GPL v3](https://img.shields.io/badge/License-LGPLv3-blue.svg)](https://www.gnu.org/licenses/lgpl-3.0.html) ![build](https://github.com/Cascella-Group-UiO/HyMD-2021/workflows/build/badge.svg) --------- -Compile FORTRAN modules: +HyMD is a software that can run coarse-grained molecular dynamics simulations +using the hybrid-particle field model approach, introduced initially in [1]. +In HyMD we implement the formulation presented in [2]. + +## Setup +First compile the FORTRAN modules: ```bash > cd hymd/ > make clean @@ -8,11 +13,25 @@ Compile FORTRAN modules: > cd .. ``` -Run a simple example simulation with +Check out all the available options with ```bash -> mpirun -n 4 python3 hymd/main.py config.toml dppc.h5 --verbose -logfile log.txt +> python3 hymd/main.py --help ``` -Notes: -- Changed numpy data types (Float32, Int32 -> float32, int32) to run in local computer -- TODO: Fix for running monoatomic (or non molecular) systems +Start a simple example simulation with: +```bash +> mpirun -n 4 python3 hymd/main.py peptide.toml peptide.h5 -v +``` + +## References +[1] Milano, G. & Kawakatsu, T. Hybrid particle-field molecular dynamics +simulations for dense polymer systems Journal of Chemical Physics, American +Institute of Physics, 2009, 130, 214106 + +[2] Bore, S. L. & Cascella, M. Hamiltonian and alias-free hybrid +particle--field molecular dynamics The Journal of Chemical Physics, AIP +Publishing LLC, 2020, 153, 094106 + + + + diff --git a/config.toml b/config.toml index 9e4b880d..0c22c985 100644 --- a/config.toml +++ b/config.toml @@ -1,57 +1,82 @@ -[meta] +# [meta] # Name of the simulation. May be ommitted. -name = "DPPC bilayer with ML interaction parameters" +# name = "PRWG lipopeptide micelle in water" # Tags classifying the simulation. May be ommitted. -tags = ["bilayer", "solvent", "DPPC"] +# tags = ["lipopeptides", "PRWG"] + +# Here's the topology of a PRWG molecule +# +# WSC2 -- WSC3 +# | | +# PSC(+) WSC1 -- WSC4 +# | | +# PBB -- RBB -- WBB -- GBB -- C -- C -- C -- C +# | +# RSC -- RSC(+) +# [particles] # Number of total particles in the simulation. If an input .hdf5 format file is # specified, the number of particles will be inferred from this and *may* be -# ommited. -n_particles = 20336 +# ommited here. +n_particles = 11876 +# specify number of ghost particles, used for dipole point charges in the CBT dihedral potential (default = 0) +# n_ghots = 0 # Mass of the particles in [g/mol]. All masses are assumed equal. mass = 72.0 # Maximum number of particles per molecules present in the system. A default of # 200 is assumed, and this keyword may be ommitted for any system with smaller # molecules. -max_molecule_size = 15 +# max_molecule_size = 15 [simulation] -# Number of total time steps in the simulation in [picoseconds]. -n_steps = 25000 +# Number of total time steps in the simulation. +n_steps = 200 # Frequency of trajectory/energy file output in time steps. -n_print = 5000 +n_print = 1 # Frequency of requesting that the HDF5 library flush the file output buffers # to disk after in number of n_print timesteps. -n_flush = 5000 +n_flush = 10 # Time step used in the simulation in [picoseconds]. -time_step = 0.3 +time_step = 0.03 # Simulation box size in [nanometers]. -box_size = [13.0, 13.0, 14.0] +box_size = [10.73911, 10.76184, 11.30546] +# box_size = [5.0, 5.0, 5.0] # for single PRWG molecule test # Time integrator used in the simulation. Either "velocity-verlet" or "respa". # If "respa", specify also the number of small rRESPA time steps per large # time_step with the 'respa_inner' keyword. integrator = "respa" -respa_inner = 10 +respa_inner = 5 # Perform MPI rank domain decomposition every x time steps to (hopefully) # reduce the amount of neccessary communication between ranks in the pmesh # procedures. Ommit or set to 'false' or '0' to not perform any domain # decomposition. domain_decomposition = 1000 +# domain_decomposition = false +# Remove linear center of mass momentum from the system before integration +# starts. +cancel_com_momentum = true # Starting temperature to generate before simulation begins in [kelvin]. Ommit # or set to 'false' to not change the temperature before starting. -start_temperature = 323 +start_temperature = 300 # Target temperature used in the velocity rescale thermostat in [kelvin]. Ommit # or set to 'false' to use no thermostat, i.e. a constant energy simulation. -target_temperature = 323 +target_temperature = 300 # Thermostat collision frequency in [1/picoseconds]. -tau = 0.1 +tau = 1 +# Couple groups of particles species to individual different thermostats. +thermostat_coupling_groups = [ + ["PBB", "PSC", "RBB", "RSC", "WBB", "WSC1", "WSC2", "WSC3", "WSC4", "GBB", "C"], + ["W", "CL"] +] # The energy functional W[phi] to use. Options: # "SquaredPhi": φ² / 2κφ₀, # "DefaultNoChi": (φ - φ₀)² / 2κφ₀ # "DefaultWithChi": (φ - φ₀)² / 2κφ₀ + Σ χφφ' / φ₀ # Subclass Hamiltonian to create a new energy functional. hamiltonian = 'DefaultWithChi' +coulombtype = 'PIC_Spectral' +dielectric_const = 80.0 [field] # Particle-mesh grid size, either a single integer or an array of 3 integers @@ -64,39 +89,9 @@ mesh_size = [24, 24, 24] # [mol/kJ]. kappa = 0.05 # Standard deviation in the Gaussian filter (window function) in [nanometers]. -# This value is a characzteristic length scale for the size of the particles in +# This value is a characteristic length scale for the size of the particles in # the simulation. sigma = 0.5 # Interaction matrix, chi, ((atom name 1, atom name 2), (mixing energy in # [kJ/mol])). -chi = [ - ["C", "W", 42.24], - ["G", "C", 10.47], - ["N", "W", -3.77], - ["G", "W", 4.53], - ["N", "P", -9.34], - ["P", "G", 8.04], - ["N", "G", 1.97], - ["P", "C", 14.72], - ["P", "W", -1.51], - ["N", "C", 13.56], -] - -[bonds] -# Two-particle bonds, ((atom name 1, atom name 2), (equilibrium length in -# [nanometers], bond strenght in [kJ/mol])). Note the two -bonds = [ - ["N", "P", 0.47, 1250.0], - ["P", "G", 0.47, 1250.0], - ["G", "G", 0.37, 1250.0], - ["G", "C", 0.47, 1250.0], - ["C", "C", 0.47, 1250.0], -] -# Three-particle angular bonds, ((atom name 1, atom name 2, atom name 3), -# (equilibrium angle in [degrees], bond strenght in [kJ/mol])). -angle_bonds = [ - ["P", "G", "G", 120.0, 25.0], - ["P", "G", "C", 180.0, 25.0], - ["G", "C", "C", 180.0, 25.0], - ["C", "C", "C", 180.0, 25.0], -] +chi = [] \ No newline at end of file diff --git a/examples/dppc.h5 b/examples/dppc.h5 new file mode 100644 index 00000000..f8d732f4 Binary files /dev/null and b/examples/dppc.h5 differ diff --git a/examples/dppc.toml b/examples/dppc.toml new file mode 100644 index 00000000..dde1905e --- /dev/null +++ b/examples/dppc.toml @@ -0,0 +1,102 @@ +[meta] +# Name of the simulation. May be ommitted. +name = "DPPC bilayer with ML interaction parameters" +# Tags classifying the simulation. May be ommitted. +tags = ["bilayer", "solvent", "DPPC"] + +[particles] +# Number of total particles in the simulation. If an input .hdf5 format file is +# specified, the number of particles will be inferred from this and *may* be +# ommited. +n_particles = 20336 +# Mass of the particles in [g/mol]. All masses are assumed equal. +mass = 72.0 +# Maximum number of particles per molecules present in the system. A default of +# 200 is assumed, and this keyword may be ommitted for any system with smaller +# molecules. +max_molecule_size = 15 + +[simulation] +# Number of total time steps in the simulation in [picoseconds]. +n_steps = 25000 +# Frequency of trajectory/energy file output in time steps. +n_print = 5000 +# Frequency of requesting that the HDF5 library flush the file output buffers +# to disk after in number of n_print timesteps. +n_flush = 5000 +# Time step used in the simulation in [picoseconds]. +time_step = 0.3 +# Simulation box size in [nanometers]. +box_size = [13.0, 13.0, 14.0] +# Time integrator used in the simulation. Either "velocity-verlet" or "respa". +# If "respa", specify also the number of small rRESPA time steps per large +# time_step with the 'respa_inner' keyword. +integrator = "respa" +respa_inner = 10 +# Perform MPI rank domain decomposition every x time steps to (hopefully) +# reduce the amount of neccessary communication between ranks in the pmesh +# procedures. Ommit or set to 'false' or '0' to not perform any domain +# decomposition. +domain_decomposition = 1000 +# Starting temperature to generate before simulation begins in [kelvin]. Ommit +# or set to 'false' to not change the temperature before starting. +start_temperature = 323 +# Target temperature used in the velocity rescale thermostat in [kelvin]. Ommit +# or set to 'false' to use no thermostat, i.e. a constant energy simulation. +target_temperature = 323 +# Thermostat collision frequency in [1/picoseconds]. +tau = 0.1 +# The energy functional W[phi] to use. Options: +# "SquaredPhi": φ² / 2κφ₀, +# "DefaultNoChi": (φ - φ₀)² / 2κφ₀ +# "DefaultWithChi": (φ - φ₀)² / 2κφ₀ + Σ χφφ' / φ₀ +# Subclass Hamiltonian to create a new energy functional. +hamiltonian = 'DefaultWithChi' + +[field] +# Particle-mesh grid size, either a single integer or an array of 3 integers +# (number of grid points in each dimension). In order to guarantee consistency +# and speed in the PFFT routines, the actual mesh grid will be changed to ensure +# that each dimension of the 2d PFFT process grid divides each dimension of the +# mesh grid size. +mesh_size = [24, 24, 24] +# Compressibility used in the relaxed incompressibility term of W(phi) in +# [mol/kJ]. +kappa = 0.05 +# Standard deviation in the Gaussian filter (window function) in [nanometers]. +# This value is a characzteristic length scale for the size of the particles in +# the simulation. +sigma = 0.5 +# Interaction matrix, chi, ((atom name 1, atom name 2), (mixing energy in +# [kJ/mol])). +chi = [ + [["C", "W"], [42.24]], + [["G", "C"], [10.47]], + [["N", "W"], [-3.77]], + [["G", "W"], [4.53]], + [["N", "P"], [-9.34]], + [["P", "G"], [8.04]], + [["N", "G"], [1.97]], + [["P", "C"], [14.72]], + [["P", "W"], [-1.51]], + [["N", "C"], [13.56]], +] + +[bonds] +# Two-particle bonds, ((atom name 1, atom name 2), (equilibrium length in +# [nanometers], bond strenght in [kJ/mol])). Note the two +bonds = [ + [["N", "P"], [0.47, 1250.0]], + [["P", "G"], [0.47, 1250.0]], + [["G", "G"], [0.37, 1250.0]], + [["G", "C"], [0.47, 1250.0]], + [["C", "C"], [0.47, 1250.0]], +] +# Three-particle angular bonds, ((atom name 1, atom name 2, atom name 3), +# (equilibrium angle in [degrees], bond strenght in [kJ/mol])). +angle_bonds = [ + [["P", "G", "G"], [120.0, 25.0]], + [["P", "G", "C"], [180.0, 25.0]], + [["G", "C", "C"], [180.0, 25.0]], + [["C", "C", "C"], [180.0, 25.0]], +] diff --git a/examples/lipidA.h5 b/examples/lipidA.h5 new file mode 100644 index 00000000..3c924892 Binary files /dev/null and b/examples/lipidA.h5 differ diff --git a/examples/lipidA.toml b/examples/lipidA.toml new file mode 100644 index 00000000..7e2605da --- /dev/null +++ b/examples/lipidA.toml @@ -0,0 +1,118 @@ +[meta] +# Name of the simulation. May be ommitted. +name = "LipidA with Ca2+ in solution" +# Tags classifying the simulation. May be ommitted. +tags = ["vesicle", "solvent", "LipidA"] + +[particles] +# Number of total particles in the simulation. If an input .hdf5 format file is +# specified, the number of particles will be inferred from this and *may* be +# ommited. +n_particles = 71318 +# Mass of the particles in [g/mol]. All masses are assumed equal. +mass = 72.0 +# Maximum number of particles per molecules present in the system. A default of +# 200 is assumed, and this keyword may be ommitted for any system with smaller +# molecules. +max_molecule_size = 30 + +[simulation] +# Number of total time steps in the simulation. +n_steps = 3 +# Frequency of trajectory/energy file output in time steps. +n_print = 1 +# Frequency of requesting that the HDF5 library flush the file output buffers +# to disk after in number of n_print timesteps. +n_flush = 1 +# Time step used in the simulation in [picoseconds]. +time_step = 0.3 +# Simulation box size in [nanometers]. +box_size = [20.0, 20.0, 20.0] +# Time integrator used in the simulation. Either "velocity-verlet" or "respa". +# If "respa", specify also the number of small rRESPA time steps per large +# time_step with the 'respa_inner' keyword. +integrator = "respa" +respa_inner = 10 +# Perform MPI rank domain decomposition every x time steps to (hopefully) +# reduce the amount of neccessary communication between ranks in the pmesh +# procedures. Ommit or set to 'false' or '0' to not perform any domain +# decomposition. +domain_decomposition = 50000 +# Remove linear center of mass momentum from the system before integration +# starts and at every x steps subsequently. If 'true', the linear momentum is +# removed before starting. Ommit or set to 'false' or 0 to never remove the +# center of mass momentum. +cancel_com_momentum = 20 +# Starting temperature to generate before simulation begins in [kelvin]. Ommit +# or set to 'false' to not change the temperature before starting. +start_temperature = 323 +# Target temperature used in the velocity rescale thermostat in [kelvin]. Ommit +# or set to 'false' to use no thermostat, i.e. a constant energy simulation. +target_temperature = 323 +# Thermostat collision frequency in [1/picoseconds]. +tau = 0.7 +# Couple groups of particles species to individual different thermostats. +thermostat_coupling_groups = [ + ["P", "G", "L", "C"], + ["W"], + ["CA"] +] +# The energy functional W[phi] to use. Options: +# "SquaredPhi": φ² / 2κφ₀, +# "DefaultNoChi": (φ - φ₀)² / 2κφ₀ +# "DefaultWithChi": (φ - φ₀)² / 2κφ₀ + Σ χφφ' / φ₀ +# Subclass Hamiltonian to create a new energy functional. +hamiltonian = 'DefaultNoChi' + + +[field] +# Particle-mesh grid size, either a single integer or an array of 3 integers +# (number of grid points in each dimension). In order to guarantee consistency +# and speed in the PFFT routines, the actual mesh grid will be changed to ensure +# that each dimension of the 2d PFFT process grid divides each dimension of the +# mesh grid size. +mesh_size = [24, 24, 24] +# Compressibility used in the relaxed incompressibility term of W(phi) in +# [mol/kJ]. +kappa = 0.05 +# Standard deviation in the Gaussian filter (window function) in [nanometers]. +# This value is a characteristic length scale for the size of the particles in +# the simulation. +sigma = 0.5 +# Electrostatics +coulombtype = 'PIC_Spectral' +dielectric_const = 20.0 +# Interaction matrix, chi, ((atom name 1, atom name 2), (mixing energy in +# [kJ/mol])). +chi = [ + [["L", "G"], [4.5]], + [["L", "C"], [13.25]], + [["P", "G"], [4.5]], + [["P", "C"], [20]], + [["P", "CA"], [-7.2]], + [["P", "W"], [-3.6]], + [["G", "C"], [8.3]], + [["G", "W"], [4.5]], + [["C", "CA"], [13.25]], + [["C", "W"], [33.75]], +] + +[bonds] +# Two-particle bonds, ((atom name 1, atom name 2), (equilibrium length in +# [nanometers], bond strength in [kJ/mol])). Note the two +bonds = [ + [["L", "G"], [0.47, 1250.0]], + [["L", "C"], [0.47, 1250.0]], + [["G", "P"], [0.37, 1250.0]], + [["G", "G"], [0.47, 1000.0]], + [["G", "C"], [0.47, 1250.0]], + [["C", "C"], [0.47, 1250.0]], +] +# Three-particle angular bonds, ((atom name 1, atom name 2, atom name 3), +# (equilibrium angle in [degrees], bond strength in [kJ/mol])). +angle_bonds = [ + [["G", "G", "G"], [120.0, 850.0]], + [["G", "G", "L"], [120.0, 25.0]], + [["L", "C", "C"], [180.0, 25.0]], + [["C", "C", "C"], [180.0, 25.0]], +] diff --git a/examples/model_protein.toml b/examples/model_protein.toml new file mode 100644 index 00000000..dea50dd3 --- /dev/null +++ b/examples/model_protein.toml @@ -0,0 +1,123 @@ +# [meta] +# Name of the simulation. May be ommitted. +# name = "Alanine octa-peptide in vacuum" +# Tags classifying the simulation. May be ommitted. +# tags = ["peptide", "ALA"] + +[particles] +# Number of total particles in the simulation. If an input .hdf5 format file is +# specified, the number of particles will be inferred from this and *may* be +# ommited here. +n_particles = 2143 +# Mass of the particles in [g/mol]. All masses are assumed equal. +mass = 72.0 +# Maximum number of particles per molecules present in the system. A default of +# 200 is assumed, and this keyword may be ommitted for any system with smaller +# molecules. +# max_molecule_size = 15 + +[simulation] +# Number of total time steps in the simulation. +n_steps = 10000 +# Frequency of trajectory/energy file output in time steps. +n_print = 10 +# Frequency of requesting that the HDF5 library flush the file output buffers +# to disk after in number of n_print timesteps. +n_flush = 10000 +# Time step used in the simulation in [picoseconds]. +time_step = 0.3 +# Simulation box size in [nanometers]. +box_size = [6.25, 6.25, 6.25] +# Time integrator used in the simulation. Either "velocity-verlet" or "respa". +# If "respa", specify also the number of small rRESPA time steps per large +# time_step with the 'respa_inner' keyword. +integrator = "respa" +respa_inner = 5 +# Perform MPI rank domain decomposition every x time steps to (hopefully) +# reduce the amount of neccessary communication between ranks in the pmesh +# procedures. Ommit or set to 'false' or '0' to not perform any domain +# decomposition. +domain_decomposition = false +# Remove linear center of mass momentum from the system before integration +# starts. +cancel_com_momentum = true +# Starting temperature to generate before simulation begins in [kelvin]. Ommit +# or set to 'false' to not change the temperature before starting. +start_temperature = 300 +# Target temperature used in the velocity rescale thermostat in [kelvin]. Ommit +# or set to 'false' to use no thermostat, i.e. a constant energy simulation. +target_temperature = 300 +# Thermostat collision frequency in [1/picoseconds]. +tau = 1 +# Couple groups of particles species to individual different thermostats. +thermostat_coupling_groups = [ + ["BB", "SC"], + ["W"] +] +# The energy functional W[phi] to use. Options: +# "SquaredPhi": φ² / 2κφ₀, +# "DefaultNoChi": (φ - φ₀)² / 2κφ₀ +# "DefaultWithChi": (φ - φ₀)² / 2κφ₀ + Σ χφφ' / φ₀ +# Subclass Hamiltonian to create a new energy functional. +hamiltonian = 'DefaultNoChi' +dielectric_const = 5.0 + +[field] +# Particle-mesh grid size, either a single integer or an array of 3 integers +# (number of grid points in each dimension). In order to guarantee consistency +# and speed in the PFFT routines, the actual mesh grid will be changed to ensure +# that each dimension of the 2d PFFT process grid divides each dimension of the +# mesh grid size. +mesh_size = [40, 40, 40] +# Compressibility used in the relaxed incompressibility term of W(phi) in +# [mol/kJ]. +kappa = 0.03 +# Standard deviation in the Gaussian filter (window function) in [nanometers]. +# This value is a characteristic length scale for the size of the particles in +# the simulation. +sigma = 0.5 +# Interaction matrix, chi, ((atom name 1, atom name 2), (mixing energy in +# [kJ/mol])). +chi = [ + [["BB", "SC"], [0]], + [["BB", "W"], [0]], + [["SC", "W"], [0]], +] + +[bonds] +# Two-particle bonds, ((atom name 1, atom name 2), (equilibrium length in +# [nanometers], bond strength in [kJ/mol])). Note the two +# Same for the angles since he used the combined angle-torsion potential. +# IMPORTANT: numbers in arrays must have the same type! +bonds = [ + [["BB", "BB"], [0.35, 1000.0]], + [["BB", "SC"], [0.27, 1000.0]], +] +# Three-particle angular bonds, ((atom name 1, atom name 2, atom name 3), +# (equilibrium angle in [degrees], bond strength in [kJ/mol]) +angle_bonds = [ + [["BB", "BB", "SC"], [100.0, 100.0]], +] +# Four-particles torsion angles defined with a fourier series potential. +# ((atom name 1, atom name 2, atom name 3, atom name 4), +# (coefficients), +# (dihedral type)) +dihedrals = [ + [ + ["BB", "BB", "BB", "BB"], + [ + # Move this out and provide propensity instead? + # α propensity (λ = -1) + [7.406, -5.298, -2.570, 1.336, 0.739], # c_v (kJ/mol) + [-16.405, 69.323, 67.679, 28.118, 56.434], # d_v (deg) + # β propensity (λ = 1) + # [3.770, 5.929, -4.151, -0.846, 0.190], # c_v + # [-13.182, -3.342, 56.919, 59.151, 166.250], # d_v + # No propensity (λ = 0) + # [1.416, -0.739, 0.990, -0.397, 0.136], # c_v + # [77.326, 26.155, 132.033, -7.033, -15.000], # d_v + [0.13680, 0.18589, -0.16586, 0.07664, -0.02589], # c_k (kJ/mol deg-2) + [4.412, 26.488, 94.723, -55.004, 21.662], # d_k + ], + [1.0] # Type +]] diff --git a/examples/prot_15.h5 b/examples/prot_15.h5 new file mode 100644 index 00000000..9c034290 Binary files /dev/null and b/examples/prot_15.h5 differ diff --git a/examples/prot_15.toml b/examples/prot_15.toml new file mode 100644 index 00000000..e64fe665 --- /dev/null +++ b/examples/prot_15.toml @@ -0,0 +1,166 @@ +# [meta] +# Name of the simulation. May be ommitted. +# name = "Alanine octa-peptide in vacuum" +# Tags classifying the simulation. May be ommitted. +# tags = ["peptide", "ALA"] + +[particles] +# Number of total particles in the simulation. If an input .hdf5 format file is +# specified, the number of particles will be inferred from this and *may* be +# ommited here. +n_particles = 2143 +# Mass of the particles in [g/mol]. All masses are assumed equal. +mass = 72.0 +# Maximum number of particles per molecules present in the system. A default of +# 200 is assumed, and this keyword may be ommitted for any system with smaller +# molecules. +# max_molecule_size = 15 + +[simulation] +# Number of total time steps in the simulation. +n_steps = 2000 +# Frequency of trajectory/energy file output in time steps. +n_print = 1 +# Frequency of requesting that the HDF5 library flush the file output buffers +# to disk after in number of n_print timesteps. +n_flush = 10 +# Time step used in the simulation in [picoseconds]. +time_step = 0.3 +# Simulation box size in [nanometers]. +box_size = [6.25, 6.25, 6.25] +# Time integrator used in the simulation. Either "velocity-verlet" or "respa". +# If "respa", specify also the number of small rRESPA time steps per large +# time_step with the 'respa_inner' keyword. +integrator = "respa" +respa_inner = 10 +# Perform MPI rank domain decomposition every x time steps to (hopefully) +# reduce the amount of neccessary communication between ranks in the pmesh +# procedures. Ommit or set to 'false' or '0' to not perform any domain +# decomposition. +domain_decomposition = 1000 +# Remove linear center of mass momentum from the system before integration +# starts. +cancel_com_momentum = true +# Starting temperature to generate before simulation begins in [kelvin]. Ommit +# or set to 'false' to not change the temperature before starting. +start_temperature = 300 +# Target temperature used in the velocity rescale thermostat in [kelvin]. Ommit +# or set to 'false' to use no thermostat, i.e. a constant energy simulation. +target_temperature = 300 +# Add nssttcouple? +#nssttcouple = 10 +# Thermostat collision frequency in [1/picoseconds]. +tau = 1 +# Couple groups of particles species to individual different thermostats. +thermostat_coupling_groups = [ + ["BB", "SC"], + ["W"] +] +# The energy functional W[phi] to use. Options: +# "SquaredPhi": φ² / 2κφ₀, +# "DefaultNoChi": (φ - φ₀)² / 2κφ₀ +# "DefaultWithChi": (φ - φ₀)² / 2κφ₀ + Σ χφφ' / φ₀ +# Subclass Hamiltonian to create a new energy functional. +hamiltonian = 'DefaultWithChi' +dielectric_const = 5.0 + +[field] +# Particle-mesh grid size, either a single integer or an array of 3 integers +# (number of grid points in each dimension). In order to guarantee consistency +# and speed in the PFFT routines, the actual mesh grid will be changed to ensure +# that each dimension of the 2d PFFT process grid divides each dimension of the +# mesh grid size. +mesh_size = [24, 24, 24] +# Compressibility used in the relaxed incompressibility term of W(phi) in +# [mol/kJ]. +kappa = 0.05 +# Standard deviation in the Gaussian filter (window function) in [nanometers]. +# This value is a characteristic length scale for the size of the particles in +# the simulation. +sigma = 0.4 +# Interaction matrix, chi, ((atom name 1, atom name 2), (mixing energy in +# [kJ/mol])). +chi = [ + [["BB", "SC"], [0]], + [["BB", "W"], [0]], + [["SC", "W"], [20]], +] + +[bonds] +# Two-particle bonds, ((atom name 1, atom name 2), (equilibrium length in +# [nanometers], bond strength in [kJ/mol])). Note the two +# Same for the angles since he used the combined angle-torsion potential. +# IMPORTANT: numbers in arrays must have the same type! +bonds = [ + [["BB", "BB"], [0.38, 1250.0]], + [["BB", "SC"], [0.18, 5000.0]], +] +# Three-particle angular bonds, ((atom name 1, atom name 2, atom name 3), +# (equilibrium angle in [degrees], bond strength in [kJ/mol]) +angle_bonds = [ + [["BB", "BB", "SC"], [120.0, 25.0]], # β sheet + [["BB", "BB", "SC"], [108.0, 25.0]], # α helix + [["BB", "BB", "SC"], [108.0, 25.0]], # α helix +] +# Four-particles torsion angles, the definition is as follows: +# ((atom name 1, atom name 2, atom name 3, atom name 4), +# (coefficients), +# (dihedral type)) +# Here are some possible ways the coefficients can be defined: +# +# 1) Cosine series, dih_type == 0 +# V_prop(φ) = ∑ c_prop[n] * (1 + cos(n * φ - d_prop[n])) +# lambda (λ) is a float in the range [-1.0, 1.0], which determines the +# secondary structure propensity (i.e. the values of c_prop and d_prop). +# λ = -1.0 => alpha, λ = 1.0 => beta, λ = 0.0 => coil +# In the above thres cases two lists of 5 coefficients will be returned, +# otherewise 4 lists will be returned to account for the mixed propensity. +# +# dihedrals = [ +# ["A", "B", "C", "D"], +# [lambda], +# [dih_type] +# ] +# or you can directly provide two lists (5 elements long), one for the +# coefficients and one for the phases. +# dihedrals = [ +# ["A", "B", "C", "D"], +# [[c_prop], [d_prop]], +# [dih_type] +# ] +# In both cases dih_type can be omitted, since 0 is the default. +# +# 2) CBT, dih_type == 1 +# V(φ, γ) = V_prop(φ) + 0.5 * K(φ) * (γ - γ0)² +# Here K(φ), the force constant of the harmonic angle potential, has the same +# functional form as V_prop(φ), so 2 more lists are needed. +# dihedrals = [ +# ["A", "B", "C", "D"], +# [[lambda], [c_k], [d_k]], +# [dih_type] # dih_type == 1 +# ] +# or +# dihedrals = [ +# ["A", "B", "C", "D"], +# [[c_prop], [d_prop], [c_k], [d_k]], +# [dih_type] +# ] +# 3) Improper dihedral, dih_type == 2 (not yet implemented) +# dihedrals = [ +# ["A", "B", "C", "D"], +# [eq, strenght], +# [dih_type] +# ] +dihedrals = [ + [ + ["BB", "BB", "BB", "BB"], + [ + [1], + # [0.13680, 0.18589, -0.16586, 0.07664, -0.02589], # c_k, kJ mol⁻¹ deg⁻² + [449.08790868, 610.2408724, -544.48626121, 251.59427866, -84.9918564], # c_k, kJ mol⁻¹ rad⁻² + # [44.90879087, 61.02408724, -54.44862612, 25.15942787, -8.49918564], + [0.07700393, 0.46230281, 1.65322823, -0.9600009, 0.37807322], + ], + [1.0] + ] +] diff --git a/examples/tetra_alanine.h5 b/examples/tetra_alanine.h5 new file mode 100644 index 00000000..54e346ea Binary files /dev/null and b/examples/tetra_alanine.h5 differ diff --git a/examples/tetra_alanine.toml b/examples/tetra_alanine.toml new file mode 100644 index 00000000..7770c6f3 --- /dev/null +++ b/examples/tetra_alanine.toml @@ -0,0 +1,104 @@ +# [meta] +# Name of the simulation. May be ommitted. +# name = "Alanine octa-peptide in vacuum" +# Tags classifying the simulation. May be ommitted. +# tags = ["peptide", "ALA"] + +[particles] +# Number of total particles in the simulation. If an input .hdf5 format file is +# specified, the number of particles will be inferred from this and *may* be +# ommited. +n_particles = 16 +# Mass of the particles in [g/mol]. All masses are assumed equal. +mass = 72.0 +# Maximum number of particles per molecules present in the system. A default of +# 200 is assumed, and this keyword may be ommitted for any system with smaller +# molecules. +# max_molecule_size = 15 + +[simulation] +# Number of total time steps in the simulation. +n_steps = 1000 +# Frequency of trajectory/energy file output in time steps. +n_print = 10 +# Frequency of requesting that the HDF5 library flush the file output buffers +# to disk after in number of n_print timesteps. +n_flush = 10000 +# Time step used in the simulation in [picoseconds]. +time_step = 0.3 +# Simulation box size in [nanometers]. +box_size = [5.0, 5.0, 5.0] +# Time integrator used in the simulation. Either "velocity-verlet" or "respa". +# If "respa", specify also the number of small rRESPA time steps per large +# time_step with the 'respa_inner' keyword. +integrator = "respa" +respa_inner = 5 +# Perform MPI rank domain decomposition every x time steps to (hopefully) +# reduce the amount of neccessary communication between ranks in the pmesh +# procedures. Ommit or set to 'false' or '0' to not perform any domain +# decomposition. +domain_decomposition = false +# Remove linear center of mass momentum from the system before integration +# starts. + cancel_com_momentum = true +# Starting temperature to generate before simulation begins in [kelvin]. Ommit +# or set to 'false' to not change the temperature before starting. +start_temperature = false +# Target temperature used in the velocity rescale thermostat in [kelvin]. Ommit +# or set to 'false' to use no thermostat, i.e. a constant energy simulation. +target_temperature = false +# Thermostat collision frequency in [1/picoseconds]. +tau = 1 +# Couple groups of particles species to individual different thermostats. +thermostat_coupling_groups = [ + ["BB", "SC"], +] +# The energy functional W[phi] to use. Options: +# "SquaredPhi": φ² / 2κφ₀, +# "DefaultNoChi": (φ - φ₀)² / 2κφ₀ +# "DefaultWithChi": (φ - φ₀)² / 2κφ₀ + Σ χφφ' / φ₀ +# Subclass Hamiltonian to create a new energy functional. +hamiltonian = 'DefaultNoChi' + +[field] +# Particle-mesh grid size, either a single integer or an array of 3 integers +# (number of grid points in each dimension). In order to guarantee consistency +# and speed in the PFFT routines, the actual mesh grid will be changed to ensure +# that each dimension of the 2d PFFT process grid divides each dimension of the +# mesh grid size. +mesh_size = [40, 40, 40] +# Compressibility used in the relaxed incompressibility term of W(phi) in +# [mol/kJ]. +kappa = 0.05 +# Standard deviation in the Gaussian filter (window function) in [nanometers]. +# This value is a characteristic length scale for the size of the particles in +# the simulation. +sigma = 0.5 +# Interaction matrix, chi, ((atom name 1, atom name 2), (mixing energy in +# [kJ/mol])). +# Why aren't these just dictionaries? +chi = [[["BB", "SC"], [0]]] + +[bonds] +# Two-particle bonds, ((atom name 1, atom name 2), (equilibrium length in +# [nanometers], bond strength in [kJ/mol])). Note the two +# Second term is missing in Sigbjørn's fort.3. (k = 0) +# Same for the angles since he used the combined angle-torsion potential. +# IMPORTANT: numbers in arrays must have the same type! +bonds = [ + [["BB", "BB"], [0.35, 1000.0]], + [["BB", "SC"], [0.27, 1000.0]], +] +# Three-particle angular bonds, ((atom name 1, atom name 2, atom name 3), +# (equilibrium angle in [degrees], bond strength in [kJ/mol])). +# 127, 25 +angle_bonds = [ + [["BB", "BB", "BB"], [127.0, 100.0, 1.0]], + [["BB", "BB", "SC"], [100.0, 100.0]], +] +# Four-particles torsion angles defined with a fourier series potential. +# ((atom name 1, atom name 2, atom name 3, atom name 4), +# ((fourier coefficients), (phase coefficients))) +dihedrals = [ + [["BB", "BB", "BB", "BB"], [[1, 1, 1, 1, 1], [0, 0, 0, 0, 0]]] +] diff --git a/examples/water/water.gro b/examples/water/water.gro new file mode 100644 index 00000000..fe8f91a4 --- /dev/null +++ b/examples/water/water.gro @@ -0,0 +1,1174 @@ +Generated by gmx solvate + 1171 + 1W W 1 0.084 3.595 3.359 + 2W W 2 0.460 2.488 2.882 + 3W W 3 3.165 2.218 0.652 + 4W W 4 3.295 3.303 2.679 + 5W W 5 1.213 3.294 0.205 + 6W W 6 1.296 1.446 1.188 + 7W W 7 0.811 1.294 1.352 + 8W W 8 1.802 2.109 0.786 + 9W W 9 0.126 0.762 3.104 + 10W W 10 0.888 0.042 2.906 + 11W W 11 2.582 0.430 1.793 + 12W W 12 2.231 1.301 0.363 + 13W W 13 2.801 0.735 3.454 + 14W W 14 0.167 0.527 2.579 + 15W W 15 0.266 3.146 3.600 + 16W W 16 1.788 2.536 0.332 + 17W W 17 1.113 1.210 3.262 + 18W W 18 3.572 1.069 1.761 + 19W W 19 3.336 2.112 2.620 + 20W W 20 1.644 1.445 1.647 + 21W W 21 2.190 2.784 2.016 + 22W W 22 0.690 2.218 2.248 + 23W W 23 0.049 1.746 1.819 + 24W W 24 1.085 1.718 3.011 + 25W W 25 0.972 2.945 1.710 + 26W W 26 2.093 2.634 2.844 + 27W W 27 1.313 1.274 2.069 + 28W W 28 1.605 2.058 1.712 + 29W W 29 2.602 2.831 3.560 + 30W W 30 1.681 2.151 3.570 + 31W W 31 1.662 1.867 3.048 + 32W W 32 0.071 1.258 3.570 + 33W W 33 0.484 0.375 1.167 + 34W W 34 1.011 2.633 2.166 + 35W W 35 3.474 2.416 1.456 + 36W W 36 2.887 0.816 2.023 + 37W W 37 3.172 0.769 0.118 + 38W W 38 3.637 1.570 2.424 + 39W W 39 1.604 3.277 2.177 + 40W W 40 3.378 2.743 0.615 + 41W W 41 0.900 3.575 1.045 + 42W W 42 0.697 0.329 2.471 + 43W W 43 3.320 1.347 2.746 + 44W W 44 2.725 1.769 1.205 + 45W W 45 2.691 1.457 0.232 + 46W W 46 1.899 2.601 2.372 + 47W W 47 1.705 0.931 1.571 + 48W W 48 2.305 3.018 2.565 + 49W W 49 2.078 0.467 1.003 + 50W W 50 0.735 3.161 0.686 + 51W W 51 0.106 0.660 1.285 + 52W W 52 2.892 1.267 1.759 + 53W W 53 0.350 2.884 1.674 + 54W W 54 1.691 3.234 0.544 + 55W W 55 3.083 1.260 0.008 + 56W W 56 0.106 3.018 2.172 + 57W W 57 1.270 1.950 1.271 + 58W W 58 1.292 0.355 2.364 + 59W W 59 3.521 0.117 1.661 + 60W W 60 0.740 1.989 0.129 + 61W W 61 0.395 1.022 0.308 + 62W W 62 0.457 2.978 1.102 + 63W W 63 1.588 2.461 1.350 + 64W W 64 2.920 3.320 3.064 + 65W W 65 3.229 3.360 1.520 + 66W W 66 2.906 1.213 3.118 + 67W W 67 3.619 2.869 1.252 + 68W W 68 2.246 2.161 2.736 + 69W W 69 0.012 1.712 0.942 + 70W W 70 1.123 2.127 2.515 + 71W W 71 2.327 1.391 0.893 + 72W W 72 2.204 1.881 0.997 + 73W W 73 0.532 1.177 3.393 + 74W W 74 0.422 0.094 1.552 + 75W W 75 2.190 0.441 2.113 + 76W W 76 1.838 0.626 3.276 + 77W W 77 2.193 0.775 0.454 + 78W W 78 3.376 3.168 3.208 + 79W W 79 2.343 0.249 3.247 + 80W W 80 3.362 2.550 3.369 + 81W W 81 2.592 0.655 1.235 + 82W W 82 2.985 1.789 3.639 + 83W W 83 0.725 3.365 1.625 + 84W W 84 1.383 1.082 0.611 + 85W W 85 0.952 0.366 3.275 + 86W W 86 3.004 0.850 0.956 + 87W W 87 3.371 1.332 0.901 + 88W W 88 1.114 3.549 3.372 + 89W W 89 3.361 0.619 1.642 + 90W W 90 3.511 2.868 0.087 + 91W W 91 0.418 2.071 1.789 + 92W W 92 2.335 0.658 0.019 + 93W W 93 3.469 2.990 1.739 + 94W W 94 3.508 0.184 2.489 + 95W W 95 1.953 1.396 3.043 + 96W W 96 2.046 2.735 0.746 + 97W W 97 2.120 0.006 1.795 + 98W W 98 0.732 0.909 1.858 + 99W W 99 0.286 2.109 2.632 + 100W W 100 2.145 3.302 1.338 + 101W W 101 3.339 0.899 2.210 + 102W W 102 1.976 3.246 2.977 + 103W W 103 3.319 2.305 0.182 + 104W W 104 1.754 1.788 1.243 + 105W W 105 1.755 2.134 2.578 + 106W W 106 1.972 1.020 2.023 + 107W W 107 2.824 1.318 0.861 + 108W W 108 3.111 1.051 0.503 + 109W W 109 2.920 3.091 0.892 + 110W W 110 2.754 2.694 1.688 + 111W W 111 2.650 1.773 0.666 + 112W W 112 2.083 1.983 1.652 + 113W W 113 0.339 0.786 2.143 + 114W W 114 3.117 1.838 2.970 + 115W W 115 3.468 3.456 2.116 + 116W W 116 1.398 2.900 1.518 + 117W W 117 2.405 3.433 2.842 + 118W W 118 2.399 0.763 2.606 + 119W W 119 1.606 3.471 2.667 + 120W W 120 3.303 0.425 1.133 + 121W W 121 0.077 1.065 2.472 + 122W W 122 2.937 0.860 1.490 + 123W W 123 3.348 1.011 3.267 + 124W W 124 2.233 2.710 1.485 + 125W W 125 1.489 3.301 1.098 + 126W W 126 0.604 0.481 0.356 + 127W W 127 1.708 0.135 2.166 + 128W W 128 0.736 3.255 0.014 + 129W W 129 1.764 2.350 3.078 + 130W W 130 1.594 0.708 1.068 + 131W W 131 2.950 0.305 1.496 + 132W W 132 2.670 3.587 1.824 + 133W W 133 2.082 3.235 2.164 + 134W W 134 1.478 2.272 2.125 + 135W W 135 3.248 2.995 2.251 + 136W W 136 0.942 0.498 1.020 + 137W W 137 0.947 0.726 0.045 + 138W W 138 1.748 1.335 2.439 + 139W W 139 2.596 0.201 2.309 + 140W W 140 0.016 2.044 2.194 + 141W W 141 1.720 1.862 0.352 + 142W W 142 0.988 0.177 0.122 + 143W W 143 1.726 1.258 1.173 + 144W W 144 2.696 0.234 2.872 + 145W W 145 2.088 2.028 3.198 + 146W W 146 2.705 2.058 3.091 + 147W W 147 1.989 0.198 3.596 + 148W W 148 1.211 0.814 1.989 + 149W W 149 0.637 0.770 1.387 + 150W W 150 0.423 3.482 1.063 + 151W W 151 1.755 2.812 3.188 + 152W W 152 0.790 2.297 0.547 + 153W W 153 2.750 1.481 2.167 + 154W W 154 1.955 2.155 2.101 + 155W W 155 2.425 2.529 0.233 + 156W W 156 2.097 0.550 1.561 + 157W W 157 3.576 0.828 0.829 + 158W W 158 0.070 3.417 2.860 + 159W W 159 0.487 2.643 2.392 + 160W W 160 0.054 3.489 0.624 + 161W W 161 1.316 2.782 0.279 + 162W W 162 3.221 3.164 0.465 + 163W W 163 2.196 1.817 0.479 + 164W W 164 1.039 1.008 0.996 + 165W W 165 1.871 0.918 0.048 + 166W W 166 2.455 2.483 3.226 + 167W W 167 2.808 0.389 0.843 + 168W W 168 0.138 0.237 0.291 + 169W W 169 1.223 2.472 1.673 + 170W W 170 0.965 2.424 0.071 + 171W W 171 0.206 2.665 0.839 + 172W W 172 0.710 1.735 3.281 + 173W W 173 1.414 0.295 3.467 + 174W W 174 2.123 0.437 2.823 + 175W W 175 1.318 0.205 1.104 + 176W W 176 2.377 0.282 0.609 + 177W W 177 0.370 1.177 1.556 + 178W W 178 3.059 0.285 1.962 + 179W W 179 0.481 0.783 0.776 + 180W W 180 3.052 0.471 2.392 + 181W W 181 3.448 1.674 0.077 + 182W W 182 0.740 0.432 1.896 + 183W W 183 1.854 1.004 2.836 + 184W W 184 0.002 0.694 0.314 + 185W W 185 2.146 1.481 2.054 + 186W W 186 2.013 3.560 0.350 + 187W W 187 0.159 2.067 0.047 + 188W W 188 0.894 2.478 3.173 + 189W W 189 0.415 1.645 2.804 + 190W W 190 1.406 1.750 2.598 + 191W W 191 3.174 0.109 2.860 + 192W W 192 3.572 1.719 2.851 + 193W W 193 2.423 2.251 0.636 + 194W W 194 2.897 3.551 0.331 + 195W W 195 2.074 1.541 3.473 + 196W W 196 0.404 2.508 3.513 + 197W W 197 3.610 0.430 3.465 + 198W W 198 3.328 1.358 2.086 + 199W W 199 1.636 3.488 3.563 + 200W W 200 1.491 0.117 0.290 + 201W W 201 0.799 0.809 3.119 + 202W W 202 1.233 2.803 2.981 + 203W W 203 3.359 0.794 2.779 + 204W W 204 2.570 3.453 0.747 + 205W W 205 2.809 2.072 2.114 + 206W W 206 1.059 0.488 2.781 + 207W W 207 1.528 1.557 3.538 + 208W W 208 1.031 2.736 0.732 + 209W W 209 0.554 0.860 2.720 + 210W W 210 1.883 2.922 1.184 + 211W W 211 3.633 1.482 1.365 + 212W W 212 1.855 0.115 3.084 + 213W W 213 2.635 2.820 2.163 + 214W W 214 2.938 3.164 1.871 + 215W W 215 2.157 2.966 0.299 + 216W W 216 0.167 2.858 3.141 + 217W W 217 0.880 0.198 1.489 + 218W W 218 2.501 1.040 2.181 + 219W W 219 1.299 2.211 0.381 + 220W W 220 3.297 2.153 1.880 + 221W W 221 0.624 2.911 3.303 + 222W W 222 3.011 3.593 1.100 + 223W W 223 1.110 0.613 1.545 + 224W W 224 0.879 2.106 1.635 + 225W W 225 3.259 3.613 3.417 + 226W W 226 2.418 1.801 2.350 + 227W W 227 0.267 1.599 0.265 + 228W W 228 3.401 3.257 0.962 + 229W W 229 3.238 0.581 0.581 + 230W W 230 1.377 0.100 2.984 + 231W W 231 3.083 2.902 1.442 + 232W W 232 3.112 1.458 1.347 + 233W W 233 1.474 1.393 2.887 + 234W W 234 2.585 2.546 2.750 + 235W W 235 1.235 1.000 2.862 + 236W W 236 1.214 3.466 1.478 + 237W W 237 0.531 3.425 3.112 + 238W W 238 0.643 2.786 0.242 + 239W W 239 3.160 2.544 1.052 + 240W W 240 0.764 1.336 2.938 + 241W W 241 1.282 2.323 0.890 + 242W W 242 2.381 1.705 2.971 + 243W W 243 2.272 1.333 2.515 + 244W W 244 0.832 1.705 2.544 + 245W W 245 3.042 3.593 2.373 + 246W W 246 2.833 0.227 3.367 + 247W W 247 0.497 0.377 2.954 + 248W W 248 0.836 1.400 2.007 + 249W W 249 0.008 2.973 2.674 + 250W W 250 1.593 1.682 2.117 + 251W W 251 1.416 0.707 0.222 + 252W W 252 0.551 1.523 1.003 + 253W W 253 0.322 3.375 2.440 + 254W W 254 3.117 1.601 0.548 + 255W W 255 3.212 0.517 3.234 + 256W W 256 2.648 2.726 1.197 + 257W W 257 3.212 0.114 0.670 + 258W W 258 2.732 2.589 0.681 + 259W W 259 1.634 1.647 0.766 + 260W W 260 2.821 0.451 0.289 + 261W W 261 2.886 1.058 2.475 + 262W W 262 0.595 1.651 1.682 + 263W W 263 0.889 2.590 1.331 + 264W W 264 2.652 1.210 1.296 + 265W W 265 3.637 2.500 2.503 + 266W W 266 3.555 0.487 2.064 + 267W W 267 0.094 3.317 1.440 + 268W W 268 0.603 2.504 0.976 + 269W W 269 0.518 0.121 3.529 + 270W W 270 1.637 2.974 3.631 + 271W W 271 3.392 1.942 1.421 + 272W W 272 2.746 2.119 0.301 + 273W W 273 2.898 2.362 3.550 + 274W W 274 1.713 0.072 0.830 + 275W W 275 2.426 0.941 1.689 + 276W W 276 0.112 2.528 2.021 + 277W W 277 1.365 0.490 0.662 + 278W W 278 1.169 1.974 3.511 + 279W W 279 2.163 0.955 1.225 + 280W W 280 2.368 2.365 2.347 + 281W W 281 1.113 2.953 3.505 + 282W W 282 0.964 0.750 0.519 + 283W W 283 2.481 0.892 0.844 + 284W W 284 2.439 3.000 3.081 + 285W W 285 1.124 1.592 1.680 + 286W W 286 0.779 3.290 2.545 + 287W W 287 3.524 3.407 0.146 + 288W W 288 0.387 2.079 3.142 + 289W W 289 0.818 1.990 1.072 + 290W W 290 1.189 2.997 2.467 + 291W W 291 1.567 0.416 1.508 + 292W W 292 0.178 2.400 0.438 + 293W W 293 1.697 1.351 0.308 + 294W W 294 0.343 1.900 1.327 + 295W W 295 1.633 0.542 2.791 + 296W W 296 3.305 0.251 0.218 + 297W W 297 2.052 3.254 3.469 + 298W W 298 3.610 2.348 2.991 + 299W W 299 3.018 3.146 3.602 + 300W W 300 0.757 3.602 2.080 + 301W W 301 3.616 2.224 0.954 + 302W W 302 2.146 1.491 1.467 + 303W W 303 1.913 0.103 1.357 + 304W W 304 0.972 3.087 1.194 + 305W W 305 1.171 0.197 1.888 + 306W W 306 0.234 3.019 0.436 + 307W W 307 2.050 2.383 1.154 + 308W W 308 0.925 3.178 3.033 + 309W W 309 0.402 2.001 0.729 + 310W W 310 0.287 0.544 1.728 + 311W W 311 1.835 2.509 1.785 + 312W W 312 0.087 0.363 0.805 + 313W W 313 2.628 0.955 0.304 + 314W W 314 3.145 2.627 1.952 + 315W W 315 1.806 3.111 1.783 + 316W W 316 1.218 3.215 0.694 + 317W W 317 2.500 0.118 0.124 + 318W W 318 0.300 3.378 1.865 + 319W W 319 2.497 1.136 3.500 + 320W W 320 0.480 0.666 3.499 + 321W W 321 0.902 1.255 0.557 + 322W W 322 2.665 1.582 3.374 + 323W W 323 1.008 1.863 2.083 + 324W W 324 2.482 0.149 1.203 + 325W W 325 1.613 3.580 1.706 + 326W W 326 2.827 3.042 2.643 + 327W W 327 2.411 3.191 1.754 + 328W W 328 2.430 2.171 1.348 + 329W W 329 3.573 0.024 1.141 + 330W W 330 1.261 1.659 0.304 + 331W W 331 3.427 0.996 1.294 + 332W W 332 2.119 2.166 0.171 + 333W W 333 3.055 2.366 2.919 + 334W W 334 2.709 3.067 0.386 + 335W W 335 1.324 0.742 3.292 + 336W W 336 1.144 1.326 2.533 + 337W W 337 2.336 0.771 3.187 + 338W W 338 0.180 1.242 2.964 + 339W W 339 1.940 1.719 2.690 + 340W W 340 0.297 2.477 1.347 + 341W W 341 0.444 1.774 2.237 + 342W W 342 2.761 2.060 2.587 + 343W W 343 2.533 3.440 3.398 + 344W W 344 2.429 1.179 2.951 + 345W W 345 1.708 2.953 2.679 + 346W W 346 1.193 3.308 1.948 + 347W W 347 2.998 2.479 2.367 + 348W W 348 2.048 2.551 3.504 + 349W W 349 0.653 2.504 1.843 + 350W W 350 2.999 2.263 1.488 + 351W W 351 2.640 3.329 2.239 + 352W W 352 0.618 3.058 2.109 + 353W W 353 2.090 3.276 0.830 + 354W W 354 1.398 2.531 3.468 + 355W W 355 1.524 2.769 0.845 + 356W W 356 0.745 1.509 0.104 + 357W W 357 3.165 2.218 4.296 + 358W W 358 1.213 3.294 3.849 + 359W W 359 1.296 1.446 4.832 + 360W W 360 0.811 1.294 4.996 + 361W W 361 2.582 0.430 5.437 + 362W W 362 2.231 1.301 4.007 + 363W W 363 1.788 2.536 3.976 + 364W W 364 3.572 1.069 5.405 + 365W W 365 1.644 1.445 5.291 + 366W W 366 0.049 1.746 5.463 + 367W W 367 0.972 2.945 5.354 + 368W W 368 1.605 2.058 5.356 + 369W W 369 3.474 2.416 5.100 + 370W W 370 3.172 0.769 3.762 + 371W W 371 3.378 2.743 4.259 + 372W W 372 0.507 3.529 4.013 + 373W W 373 0.900 3.575 4.689 + 374W W 374 2.725 1.769 4.849 + 375W W 375 2.691 1.457 3.876 + 376W W 376 3.176 1.809 4.631 + 377W W 377 0.735 3.161 4.330 + 378W W 378 0.106 0.660 4.929 + 379W W 379 2.892 1.267 5.403 + 380W W 380 0.350 2.884 5.318 + 381W W 381 1.691 3.234 4.188 + 382W W 382 3.083 1.260 3.652 + 383W W 383 1.270 1.950 4.915 + 384W W 384 3.521 0.117 5.305 + 385W W 385 0.395 1.022 3.952 + 386W W 386 0.457 2.978 4.746 + 387W W 387 1.588 2.461 4.994 + 388W W 388 1.846 0.962 4.392 + 389W W 389 3.229 3.360 5.164 + 390W W 390 3.619 2.869 4.896 + 391W W 391 0.012 1.712 4.586 + 392W W 392 2.327 1.391 4.537 + 393W W 393 0.422 0.094 5.196 + 394W W 394 2.193 0.775 4.098 + 395W W 395 2.592 0.655 4.879 + 396W W 396 1.383 1.082 4.255 + 397W W 397 3.004 0.850 4.600 + 398W W 398 3.371 1.332 4.545 + 399W W 399 1.040 0.085 4.251 + 400W W 400 3.361 0.619 5.286 + 401W W 401 0.418 2.071 5.433 + 402W W 402 2.335 0.658 3.663 + 403W W 403 2.046 2.735 4.390 + 404W W 404 2.120 0.006 5.439 + 405W W 405 0.732 0.909 5.502 + 406W W 406 2.145 3.302 4.982 + 407W W 407 3.319 2.305 3.826 + 408W W 408 1.754 1.788 4.887 + 409W W 409 3.111 1.051 4.147 + 410W W 410 2.920 3.091 4.536 + 411W W 411 2.754 2.694 5.332 + 412W W 412 2.650 1.773 4.310 + 413W W 413 2.964 1.771 5.363 + 414W W 414 1.398 2.900 5.162 + 415W W 415 3.303 0.425 4.777 + 416W W 416 2.937 0.860 5.134 + 417W W 417 1.489 3.301 4.742 + 418W W 418 0.604 0.481 4.000 + 419W W 419 0.736 3.255 3.658 + 420W W 420 1.110 1.721 4.409 + 421W W 421 1.594 0.708 4.712 + 422W W 422 2.950 0.305 5.140 + 423W W 423 2.670 3.587 5.468 + 424W W 424 0.942 0.498 4.664 + 425W W 425 1.720 1.862 3.996 + 426W W 426 0.988 0.177 3.766 + 427W W 427 1.726 1.258 4.817 + 428W W 428 0.637 0.770 5.031 + 429W W 429 0.423 3.482 4.707 + 430W W 430 0.790 2.297 4.191 + 431W W 431 2.470 1.682 5.412 + 432W W 432 2.425 2.529 3.877 + 433W W 433 3.576 0.828 4.473 + 434W W 434 0.054 3.489 4.268 + 435W W 435 1.316 2.782 3.923 + 436W W 436 3.221 3.164 4.109 + 437W W 437 2.196 1.817 4.123 + 438W W 438 1.039 1.008 4.640 + 439W W 439 1.871 0.918 3.692 + 440W W 440 2.808 0.389 4.487 + 441W W 441 1.223 2.472 5.317 + 442W W 442 0.965 2.424 3.715 + 443W W 443 0.206 2.665 4.483 + 444W W 444 1.318 0.205 4.748 + 445W W 445 2.377 0.282 4.253 + 446W W 446 0.370 1.177 5.200 + 447W W 447 0.481 0.783 4.420 + 448W W 448 3.448 1.674 3.721 + 449W W 449 2.013 3.560 3.994 + 450W W 450 0.159 2.067 3.691 + 451W W 451 3.560 1.919 4.181 + 452W W 452 2.423 2.251 4.280 + 453W W 453 2.897 3.551 3.975 + 454W W 454 1.491 0.117 3.934 + 455W W 455 3.030 2.716 3.877 + 456W W 456 2.570 3.453 4.391 + 457W W 457 2.786 2.234 4.640 + 458W W 458 1.883 2.922 4.828 + 459W W 459 3.633 1.482 5.009 + 460W W 460 2.157 2.966 3.943 + 461W W 461 0.880 0.198 5.133 + 462W W 462 1.299 2.211 4.025 + 463W W 463 3.011 3.593 4.744 + 464W W 464 0.267 1.599 3.909 + 465W W 465 3.238 0.581 4.225 + 466W W 466 3.083 2.902 5.086 + 467W W 467 3.112 1.458 4.991 + 468W W 468 1.214 3.466 5.122 + 469W W 469 0.643 2.786 3.886 + 470W W 470 3.160 2.544 4.696 + 471W W 471 3.537 1.266 4.063 + 472W W 472 1.282 2.323 4.534 + 473W W 473 1.416 0.707 3.866 + 474W W 474 0.551 1.523 4.647 + 475W W 475 3.117 1.601 4.192 + 476W W 476 2.648 2.726 4.841 + 477W W 477 3.212 0.114 4.314 + 478W W 478 2.732 2.589 4.325 + 479W W 479 1.634 1.647 4.410 + 480W W 480 2.821 0.451 3.933 + 481W W 481 2.652 1.210 4.940 + 482W W 482 0.094 3.317 5.084 + 483W W 483 0.603 2.504 4.620 + 484W W 484 3.392 1.942 5.065 + 485W W 485 2.746 2.119 3.945 + 486W W 486 1.713 0.072 4.474 + 487W W 487 2.426 0.941 5.333 + 488W W 488 1.365 0.490 4.306 + 489W W 489 0.964 0.750 4.163 + 490W W 490 2.481 0.892 4.488 + 491W W 491 1.124 1.592 5.324 + 492W W 492 3.524 3.407 3.790 + 493W W 493 0.818 1.990 4.716 + 494W W 494 1.567 0.416 5.152 + 495W W 495 0.178 2.400 4.082 + 496W W 496 1.697 1.351 3.952 + 497W W 497 3.305 0.251 3.862 + 498W W 498 3.616 2.224 4.598 + 499W W 499 2.146 1.491 5.111 + 500W W 500 1.913 0.103 5.001 + 501W W 501 0.972 3.087 4.838 + 502W W 502 0.234 3.019 4.080 + 503W W 503 2.050 2.383 4.798 + 504W W 504 0.402 2.001 4.373 + 505W W 505 0.087 0.363 4.449 + 506W W 506 2.628 0.955 3.948 + 507W W 507 1.806 3.111 5.427 + 508W W 508 1.218 3.215 4.338 + 509W W 509 2.500 0.118 3.768 + 510W W 510 0.300 3.378 5.509 + 511W W 511 0.211 1.266 4.476 + 512W W 512 0.902 1.255 4.201 + 513W W 513 1.613 3.580 5.350 + 514W W 514 2.411 3.191 5.398 + 515W W 515 2.430 2.171 4.992 + 516W W 516 1.892 0.396 4.063 + 517W W 517 3.573 0.024 4.785 + 518W W 518 1.261 1.659 3.948 + 519W W 519 3.427 0.996 4.938 + 520W W 520 2.119 2.166 3.815 + 521W W 521 0.297 2.477 4.991 + 522W W 522 2.706 3.233 5.000 + 523W W 523 0.653 2.504 5.487 + 524W W 524 2.999 2.263 5.132 + 525W W 525 2.090 3.276 4.474 + 526W W 526 1.524 2.769 4.489 + 527W W 527 0.745 1.509 3.748 + 528W W 528 1.296 5.090 1.188 + 529W W 529 0.126 4.406 3.104 + 530W W 530 0.888 3.686 2.906 + 531W W 531 2.582 4.074 1.793 + 532W W 532 2.231 4.945 0.363 + 533W W 533 2.801 4.379 3.454 + 534W W 534 0.167 4.171 2.579 + 535W W 535 0.220 4.963 2.025 + 536W W 536 1.113 4.854 3.262 + 537W W 537 1.644 5.089 1.647 + 538W W 538 0.049 5.390 1.819 + 539W W 539 1.085 5.362 3.011 + 540W W 540 3.356 5.145 3.265 + 541W W 541 1.313 4.918 2.069 + 542W W 542 1.662 5.511 3.048 + 543W W 543 0.071 4.902 3.570 + 544W W 544 0.484 4.019 1.167 + 545W W 545 3.172 4.413 0.118 + 546W W 546 3.637 5.214 2.424 + 547W W 547 0.697 3.973 2.471 + 548W W 548 3.320 4.991 2.746 + 549W W 549 2.725 5.413 1.205 + 550W W 550 2.691 5.101 0.232 + 551W W 551 1.705 4.575 1.571 + 552W W 552 3.176 5.453 0.987 + 553W W 553 0.106 4.304 1.285 + 554W W 554 2.892 4.911 1.759 + 555W W 555 3.083 4.904 0.008 + 556W W 556 1.292 3.999 2.364 + 557W W 557 3.521 3.761 1.661 + 558W W 558 0.395 4.666 0.308 + 559W W 559 1.846 4.606 0.748 + 560W W 560 2.906 4.857 3.118 + 561W W 561 2.327 5.035 0.893 + 562W W 562 0.547 4.945 2.471 + 563W W 563 0.532 4.821 3.393 + 564W W 564 0.422 3.738 1.552 + 565W W 565 2.190 4.085 2.113 + 566W W 566 1.838 4.270 3.276 + 567W W 567 2.193 4.419 0.454 + 568W W 568 2.343 3.893 3.247 + 569W W 569 2.592 4.299 1.235 + 570W W 570 2.985 5.433 3.639 + 571W W 571 1.383 4.726 0.611 + 572W W 572 0.952 4.010 3.275 + 573W W 573 3.004 4.494 0.956 + 574W W 574 3.371 4.976 0.901 + 575W W 575 1.040 3.729 0.607 + 576W W 576 3.361 4.263 1.642 + 577W W 577 3.508 3.828 2.489 + 578W W 578 1.953 5.040 3.043 + 579W W 579 2.120 3.650 1.795 + 580W W 580 0.732 4.553 1.858 + 581W W 581 3.339 4.543 2.210 + 582W W 582 1.754 5.432 1.243 + 583W W 583 3.606 3.943 2.984 + 584W W 584 1.972 4.664 2.023 + 585W W 585 3.111 4.695 0.503 + 586W W 586 2.650 5.417 0.666 + 587W W 587 2.964 5.415 1.719 + 588W W 588 0.339 4.430 2.143 + 589W W 589 3.117 5.482 2.970 + 590W W 590 2.399 4.407 2.606 + 591W W 591 3.303 4.069 1.133 + 592W W 592 3.094 5.388 2.434 + 593W W 593 2.937 4.504 1.490 + 594W W 594 3.348 4.655 3.267 + 595W W 595 0.604 4.125 0.356 + 596W W 596 1.708 3.779 2.166 + 597W W 597 1.594 4.352 1.068 + 598W W 598 2.950 3.949 1.496 + 599W W 599 0.942 4.142 1.020 + 600W W 600 0.947 4.370 0.045 + 601W W 601 2.596 3.845 2.309 + 602W W 602 0.988 3.821 0.122 + 603W W 603 1.726 4.902 1.173 + 604W W 604 2.696 3.878 2.872 + 605W W 605 1.989 3.842 3.596 + 606W W 606 1.211 4.458 1.989 + 607W W 607 0.637 4.414 1.387 + 608W W 608 2.470 5.326 1.768 + 609W W 609 2.097 4.194 1.561 + 610W W 610 3.576 4.472 0.829 + 611W W 611 1.953 4.403 2.430 + 612W W 612 2.196 5.461 0.479 + 613W W 613 1.039 4.652 0.996 + 614W W 614 1.871 4.562 0.048 + 615W W 615 2.808 4.033 0.843 + 616W W 616 0.138 3.881 0.291 + 617W W 617 1.414 3.939 3.467 + 618W W 618 2.123 4.081 2.823 + 619W W 619 1.318 3.849 1.104 + 620W W 620 2.377 3.926 0.609 + 621W W 621 0.322 3.858 2.186 + 622W W 622 0.370 4.821 1.556 + 623W W 623 3.059 3.929 1.962 + 624W W 624 0.481 4.427 0.776 + 625W W 625 0.740 4.076 1.896 + 626W W 626 1.854 4.648 2.836 + 627W W 627 0.002 4.338 0.314 + 628W W 628 2.146 5.125 2.054 + 629W W 629 0.415 5.289 2.804 + 630W W 630 1.406 5.394 2.598 + 631W W 631 3.174 3.753 2.860 + 632W W 632 2.074 5.185 3.473 + 633W W 633 3.328 5.002 2.086 + 634W W 634 1.491 3.761 0.290 + 635W W 635 0.799 4.453 3.119 + 636W W 636 3.359 4.438 2.779 + 637W W 637 1.059 4.132 2.781 + 638W W 638 1.528 5.201 3.538 + 639W W 639 0.554 4.504 2.720 + 640W W 640 3.633 5.126 1.365 + 641W W 641 0.910 4.494 2.379 + 642W W 642 0.880 3.842 1.489 + 643W W 643 2.501 4.684 2.181 + 644W W 644 1.110 4.257 1.545 + 645W W 645 0.267 5.243 0.265 + 646W W 646 3.238 4.225 0.581 + 647W W 647 1.377 3.744 2.984 + 648W W 648 3.112 5.102 1.347 + 649W W 649 1.474 5.037 2.887 + 650W W 650 1.235 4.644 2.862 + 651W W 651 0.764 4.980 2.938 + 652W W 652 3.537 4.910 0.419 + 653W W 653 2.381 5.349 2.971 + 654W W 654 2.272 4.977 2.515 + 655W W 655 0.832 5.349 2.544 + 656W W 656 2.833 3.871 3.367 + 657W W 657 0.836 5.044 2.007 + 658W W 658 1.205 4.858 0.129 + 659W W 659 1.416 4.351 0.222 + 660W W 660 0.551 5.167 1.003 + 661W W 661 3.212 4.161 3.234 + 662W W 662 0.246 5.294 3.374 + 663W W 663 3.212 3.758 0.670 + 664W W 664 2.821 4.095 0.289 + 665W W 665 0.595 5.295 1.682 + 666W W 666 2.652 4.854 1.296 + 667W W 667 1.675 4.233 1.992 + 668W W 668 3.555 4.131 2.064 + 669W W 669 0.518 3.765 3.529 + 670W W 670 1.713 3.716 0.830 + 671W W 671 2.426 4.585 1.689 + 672W W 672 1.365 4.134 0.662 + 673W W 673 0.964 4.394 0.519 + 674W W 674 2.481 4.536 0.844 + 675W W 675 1.124 5.236 1.680 + 676W W 676 1.567 4.060 1.508 + 677W W 677 1.697 4.995 0.308 + 678W W 678 1.633 4.186 2.791 + 679W W 679 3.305 3.895 0.218 + 680W W 680 2.146 5.135 1.467 + 681W W 681 1.471 4.472 2.419 + 682W W 682 1.171 3.841 1.888 + 683W W 683 0.287 4.188 1.728 + 684W W 684 0.087 4.007 0.805 + 685W W 685 2.751 5.213 2.732 + 686W W 686 2.628 4.599 0.304 + 687W W 687 2.500 3.762 0.124 + 688W W 688 2.497 4.780 3.500 + 689W W 689 0.211 4.910 0.832 + 690W W 690 0.902 4.899 0.557 + 691W W 691 2.665 5.226 3.374 + 692W W 692 1.008 5.507 2.083 + 693W W 693 2.482 3.793 1.203 + 694W W 694 1.892 4.040 0.419 + 695W W 695 3.573 3.668 1.141 + 696W W 696 1.261 5.303 0.304 + 697W W 697 1.324 4.386 3.292 + 698W W 698 1.144 4.970 2.533 + 699W W 699 2.336 4.415 3.187 + 700W W 700 0.180 4.886 2.964 + 701W W 701 1.940 5.363 2.690 + 702W W 702 2.429 4.823 2.951 + 703W W 703 1.639 4.754 3.319 + 704W W 704 0.745 5.153 0.104 + 705W W 705 0.811 4.938 4.996 + 706W W 706 2.582 4.074 5.437 + 707W W 707 2.231 4.945 4.007 + 708W W 708 3.572 4.713 5.405 + 709W W 709 1.644 5.089 5.291 + 710W W 710 0.484 4.019 4.811 + 711W W 711 3.172 4.413 3.762 + 712W W 712 2.725 5.413 4.849 + 713W W 713 3.176 5.453 4.631 + 714W W 714 2.078 4.111 4.647 + 715W W 715 0.106 4.304 4.929 + 716W W 716 2.892 4.911 5.403 + 717W W 717 3.083 4.904 3.652 + 718W W 718 3.521 3.761 5.305 + 719W W 719 0.395 4.666 3.952 + 720W W 720 1.846 4.606 4.392 + 721W W 721 2.327 5.035 4.537 + 722W W 722 0.422 3.738 5.196 + 723W W 723 2.193 4.419 4.098 + 724W W 724 2.592 4.299 4.879 + 725W W 725 1.383 4.726 4.255 + 726W W 726 3.004 4.494 4.600 + 727W W 727 3.371 4.976 4.545 + 728W W 728 1.040 3.729 4.251 + 729W W 729 2.335 4.302 3.663 + 730W W 730 0.732 4.553 5.502 + 731W W 731 1.754 5.432 4.887 + 732W W 732 2.824 4.962 4.505 + 733W W 733 3.111 4.695 4.147 + 734W W 734 2.650 5.417 4.310 + 735W W 735 2.964 5.415 5.363 + 736W W 736 3.303 4.069 4.777 + 737W W 737 0.604 4.125 4.000 + 738W W 738 1.594 4.352 4.712 + 739W W 739 2.950 3.949 5.140 + 740W W 740 0.942 4.142 4.664 + 741W W 741 0.947 4.370 3.689 + 742W W 742 0.988 3.821 3.766 + 743W W 743 1.726 4.902 4.817 + 744W W 744 0.637 4.414 5.031 + 745W W 745 3.576 4.472 4.473 + 746W W 746 2.196 5.461 4.123 + 747W W 747 1.039 4.652 4.640 + 748W W 748 1.871 4.562 3.692 + 749W W 749 2.808 4.033 4.487 + 750W W 750 0.138 3.881 3.935 + 751W W 751 1.318 3.849 4.748 + 752W W 752 2.377 3.926 4.253 + 753W W 753 0.481 4.427 4.420 + 754W W 754 3.448 5.318 3.721 + 755W W 755 0.002 4.338 3.958 + 756W W 756 1.491 3.761 3.934 + 757W W 757 3.633 5.126 5.009 + 758W W 758 0.880 3.842 5.133 + 759W W 759 0.267 5.243 3.909 + 760W W 760 3.238 4.225 4.225 + 761W W 761 3.537 4.910 4.063 + 762W W 762 1.205 4.858 3.773 + 763W W 763 1.416 4.351 3.866 + 764W W 764 0.551 5.167 4.647 + 765W W 765 3.212 3.758 4.314 + 766W W 766 2.821 4.095 3.933 + 767W W 767 2.652 4.854 4.940 + 768W W 768 1.713 3.716 4.474 + 769W W 769 1.365 4.134 4.306 + 770W W 770 2.163 4.599 4.869 + 771W W 771 0.964 4.394 4.163 + 772W W 772 2.481 4.536 4.488 + 773W W 773 1.567 4.060 5.152 + 774W W 774 1.697 4.995 3.952 + 775W W 775 3.305 3.895 3.862 + 776W W 776 2.146 5.135 5.111 + 777W W 777 0.287 4.188 5.372 + 778W W 778 0.087 4.007 4.449 + 779W W 779 2.628 4.599 3.948 + 780W W 780 2.500 3.762 3.768 + 781W W 781 0.211 4.910 4.476 + 782W W 782 0.902 4.899 4.201 + 783W W 783 2.482 3.793 4.847 + 784W W 784 1.892 4.040 4.063 + 785W W 785 1.261 5.303 3.948 + 786W W 786 3.427 4.640 4.938 + 787W W 787 0.745 5.153 3.748 + 788W W 788 3.728 3.595 3.359 + 789W W 789 4.104 2.488 2.882 + 790W W 790 4.857 3.294 0.205 + 791W W 791 4.940 1.446 1.188 + 792W W 792 4.455 1.294 1.352 + 793W W 793 3.770 0.762 3.104 + 794W W 794 4.532 0.042 2.906 + 795W W 795 3.811 0.527 2.579 + 796W W 796 3.910 3.146 3.600 + 797W W 797 3.864 1.319 2.025 + 798W W 798 4.757 1.210 3.262 + 799W W 799 5.288 1.445 1.647 + 800W W 800 4.334 2.218 2.248 + 801W W 801 4.729 1.718 3.011 + 802W W 802 4.616 2.945 1.710 + 803W W 803 4.957 1.274 2.069 + 804W W 804 5.249 2.058 1.712 + 805W W 805 5.306 1.867 3.048 + 806W W 806 3.715 1.258 3.570 + 807W W 807 4.128 0.375 1.167 + 808W W 808 4.655 2.633 2.166 + 809W W 809 4.151 3.529 0.369 + 810W W 810 4.544 3.575 1.045 + 811W W 811 4.341 0.329 2.471 + 812W W 812 5.349 0.931 1.571 + 813W W 813 4.379 3.161 0.686 + 814W W 814 3.750 0.660 1.285 + 815W W 815 3.994 2.884 1.674 + 816W W 816 5.335 3.234 0.544 + 817W W 817 4.541 2.590 2.652 + 818W W 818 4.914 1.950 1.271 + 819W W 819 4.936 0.355 2.364 + 820W W 820 4.384 1.989 0.129 + 821W W 821 4.039 1.022 0.308 + 822W W 822 4.101 2.978 1.102 + 823W W 823 5.232 2.461 1.350 + 824W W 824 5.490 0.962 0.748 + 825W W 825 4.767 2.127 2.515 + 826W W 826 4.191 1.301 2.471 + 827W W 827 4.176 1.177 3.393 + 828W W 828 4.066 0.094 1.552 + 829W W 829 5.482 0.626 3.276 + 830W W 830 4.369 3.365 1.625 + 831W W 831 5.027 1.082 0.611 + 832W W 832 4.596 0.366 3.275 + 833W W 833 4.684 0.085 0.607 + 834W W 834 4.062 2.071 1.789 + 835W W 835 4.376 0.909 1.858 + 836W W 836 3.930 2.109 2.632 + 837W W 837 3.983 0.786 2.143 + 838W W 838 5.042 2.900 1.518 + 839W W 839 3.721 1.065 2.472 + 840W W 840 5.133 3.301 1.098 + 841W W 841 4.380 3.255 0.014 + 842W W 842 4.754 1.721 0.765 + 843W W 843 5.408 2.350 3.078 + 844W W 844 5.238 0.708 1.068 + 845W W 845 5.122 2.272 2.125 + 846W W 846 4.586 0.498 1.020 + 847W W 847 5.108 2.795 1.980 + 848W W 848 4.591 0.726 0.045 + 849W W 849 3.660 2.044 2.194 + 850W W 850 4.632 0.177 0.122 + 851W W 851 5.370 1.258 1.173 + 852W W 852 4.928 2.246 2.981 + 853W W 853 4.855 0.814 1.989 + 854W W 854 4.281 0.770 1.387 + 855W W 855 4.067 3.482 1.063 + 856W W 856 4.434 2.297 0.547 + 857W W 857 4.131 2.643 2.392 + 858W W 858 3.698 3.489 0.624 + 859W W 859 4.960 2.782 0.279 + 860W W 860 4.683 1.008 0.996 + 861W W 861 3.782 0.237 0.291 + 862W W 862 5.068 2.556 2.581 + 863W W 863 4.867 2.472 1.673 + 864W W 864 4.609 2.424 0.071 + 865W W 865 4.354 1.735 3.281 + 866W W 866 5.058 0.295 3.467 + 867W W 867 4.962 0.205 1.104 + 868W W 868 3.966 0.214 2.186 + 869W W 869 4.014 1.177 1.556 + 870W W 870 4.125 0.783 0.776 + 871W W 871 4.384 0.432 1.896 + 872W W 872 5.498 1.004 2.836 + 873W W 873 4.432 2.083 2.875 + 874W W 874 3.803 2.067 0.047 + 875W W 875 4.538 2.478 3.173 + 876W W 876 4.059 1.645 2.804 + 877W W 877 5.050 1.750 2.598 + 878W W 878 4.048 2.508 3.513 + 879W W 879 5.280 3.488 3.563 + 880W W 880 5.135 0.117 0.290 + 881W W 881 4.877 2.803 2.981 + 882W W 882 4.703 0.488 2.781 + 883W W 883 5.172 1.557 3.538 + 884W W 884 4.675 2.736 0.732 + 885W W 885 4.198 0.860 2.720 + 886W W 886 5.499 0.115 3.084 + 887W W 887 3.811 2.858 3.141 + 888W W 888 4.554 0.850 2.379 + 889W W 889 4.943 2.211 0.381 + 890W W 890 4.268 2.911 3.303 + 891W W 891 4.754 0.613 1.545 + 892W W 892 4.523 2.106 1.635 + 893W W 893 4.836 3.517 2.385 + 894W W 894 3.911 1.599 0.265 + 895W W 895 5.021 0.100 2.984 + 896W W 896 4.873 1.131 1.563 + 897W W 897 4.879 1.000 2.862 + 898W W 898 4.858 3.466 1.478 + 899W W 899 4.175 3.425 3.112 + 900W W 900 4.287 2.786 0.242 + 901W W 901 4.408 1.336 2.938 + 902W W 902 4.926 2.323 0.890 + 903W W 903 4.476 1.705 2.544 + 904W W 904 4.141 0.377 2.954 + 905W W 905 4.849 1.214 0.129 + 906W W 906 3.652 2.973 2.674 + 907W W 907 5.237 1.682 2.117 + 908W W 908 4.195 1.523 1.003 + 909W W 909 3.966 3.375 2.440 + 910W W 910 3.890 1.650 3.374 + 911W W 911 4.239 1.651 1.682 + 912W W 912 4.533 2.590 1.331 + 913W W 913 3.738 3.317 1.440 + 914W W 914 4.247 2.504 0.976 + 915W W 915 4.162 0.121 3.529 + 916W W 916 5.281 2.974 3.631 + 917W W 917 3.756 2.528 2.021 + 918W W 918 5.009 0.490 0.662 + 919W W 919 4.813 1.974 3.511 + 920W W 920 4.166 2.970 2.782 + 921W W 921 4.608 0.750 0.519 + 922W W 922 4.768 1.592 1.680 + 923W W 923 4.423 3.290 2.545 + 924W W 924 4.031 2.079 3.142 + 925W W 925 4.462 1.990 1.072 + 926W W 926 4.833 2.997 2.467 + 927W W 927 5.211 0.416 1.508 + 928W W 928 3.822 2.400 0.438 + 929W W 929 3.987 1.900 1.327 + 930W W 930 5.277 0.542 2.791 + 931W W 931 4.401 3.602 2.080 + 932W W 932 4.616 3.087 1.194 + 933W W 933 5.115 0.828 2.419 + 934W W 934 4.815 0.197 1.888 + 935W W 935 3.878 3.019 0.436 + 936W W 936 4.569 3.178 3.033 + 937W W 937 3.931 0.544 1.728 + 938W W 938 5.479 2.509 1.785 + 939W W 939 3.731 0.363 0.805 + 940W W 940 5.450 3.111 1.783 + 941W W 941 4.862 3.215 0.694 + 942W W 942 3.944 3.378 1.865 + 943W W 943 4.124 0.666 3.499 + 944W W 944 3.855 1.266 0.832 + 945W W 945 4.652 1.863 2.083 + 946W W 946 5.257 3.580 1.706 + 947W W 947 4.905 1.659 0.304 + 948W W 948 4.968 0.742 3.292 + 949W W 949 4.788 1.326 2.533 + 950W W 950 3.824 1.242 2.964 + 951W W 951 3.941 2.477 1.347 + 952W W 952 4.088 1.774 2.237 + 953W W 953 4.297 2.504 1.843 + 954W W 954 4.262 3.058 2.109 + 955W W 955 5.283 1.110 3.319 + 956W W 956 5.042 2.531 3.468 + 957W W 957 5.168 2.769 0.845 + 958W W 958 4.389 1.509 0.104 + 959W W 959 4.857 3.294 3.849 + 960W W 960 4.940 1.446 4.832 + 961W W 961 5.288 1.445 5.291 + 962W W 962 3.693 1.746 5.463 + 963W W 963 4.616 2.945 5.354 + 964W W 964 4.128 0.375 4.811 + 965W W 965 4.151 3.529 4.013 + 966W W 966 4.544 3.575 4.689 + 967W W 967 5.349 0.931 5.215 + 968W W 968 4.379 3.161 4.330 + 969W W 969 3.994 2.884 5.318 + 970W W 970 5.335 3.234 4.188 + 971W W 971 4.914 1.950 4.915 + 972W W 972 4.384 1.989 3.773 + 973W W 973 4.039 1.022 3.952 + 974W W 974 4.101 2.978 4.746 + 975W W 975 5.232 2.461 4.994 + 976W W 976 5.490 0.962 4.392 + 977W W 977 4.066 0.094 5.196 + 978W W 978 5.027 1.082 4.255 + 979W W 979 4.684 0.085 4.251 + 980W W 980 4.062 2.071 5.433 + 981W W 981 4.376 0.909 5.502 + 982W W 982 5.042 2.900 5.162 + 983W W 983 5.133 3.301 4.742 + 984W W 984 4.248 0.481 4.000 + 985W W 985 4.754 1.721 4.409 + 986W W 986 5.238 0.708 4.712 + 987W W 987 4.586 0.498 4.664 + 988W W 988 4.591 0.726 3.689 + 989W W 989 5.364 1.862 3.996 + 990W W 990 4.632 0.177 3.766 + 991W W 991 5.370 1.258 4.817 + 992W W 992 4.281 0.770 5.031 + 993W W 993 4.434 2.297 4.191 + 994W W 994 3.698 3.489 4.268 + 995W W 995 4.960 2.782 3.923 + 996W W 996 4.683 1.008 4.640 + 997W W 997 3.782 0.237 3.935 + 998W W 998 4.867 2.472 5.317 + 999W W 999 4.609 2.424 3.715 + 1000W W 1000 3.850 2.665 4.483 + 1001W W 1001 4.014 1.177 5.200 + 1002W W 1002 4.125 0.783 4.420 + 1003W W 1003 3.646 0.694 3.958 + 1004W W 1004 3.803 2.067 3.691 + 1005W W 1005 5.135 0.117 3.934 + 1006W W 1006 4.675 2.736 4.376 + 1007W W 1007 4.524 0.198 5.133 + 1008W W 1008 4.943 2.211 4.025 + 1009W W 1009 4.858 3.466 5.122 + 1010W W 1010 4.287 2.786 3.886 + 1011W W 1011 4.926 2.323 4.534 + 1012W W 1012 4.849 1.214 3.773 + 1013W W 1013 4.195 1.523 4.647 + 1014W W 1014 4.533 2.590 4.975 + 1015W W 1015 3.738 3.317 5.084 + 1016W W 1016 4.247 2.504 4.620 + 1017W W 1017 4.608 0.750 4.163 + 1018W W 1018 4.768 1.592 5.324 + 1019W W 1019 4.462 1.990 4.716 + 1020W W 1020 5.211 0.416 5.152 + 1021W W 1021 3.822 2.400 4.082 + 1022W W 1022 5.341 1.351 3.952 + 1023W W 1023 3.987 1.900 4.971 + 1024W W 1024 4.616 3.087 4.838 + 1025W W 1025 4.046 2.001 4.373 + 1026W W 1026 3.931 0.544 5.372 + 1027W W 1027 5.479 2.509 5.429 + 1028W W 1028 3.731 0.363 4.449 + 1029W W 1029 5.450 3.111 5.427 + 1030W W 1030 4.862 3.215 4.338 + 1031W W 1031 3.944 3.378 5.509 + 1032W W 1032 3.855 1.266 4.476 + 1033W W 1033 5.257 3.580 5.350 + 1034W W 1034 4.905 1.659 3.948 + 1035W W 1035 3.941 2.477 4.991 + 1036W W 1036 4.297 2.504 5.487 + 1037W W 1037 5.168 2.769 4.489 + 1038W W 1038 4.389 1.509 3.748 + 1039W W 1039 4.940 5.090 1.188 + 1040W W 1040 4.455 4.938 1.352 + 1041W W 1041 4.532 3.686 2.906 + 1042W W 1042 3.811 4.171 2.579 + 1043W W 1043 3.864 4.963 2.025 + 1044W W 1044 4.757 4.854 3.262 + 1045W W 1045 5.288 5.089 1.647 + 1046W W 1046 3.693 5.390 1.819 + 1047W W 1047 4.729 5.362 3.011 + 1048W W 1048 4.957 4.918 2.069 + 1049W W 1049 3.715 4.902 3.570 + 1050W W 1050 4.128 4.019 1.167 + 1051W W 1051 4.341 3.973 2.471 + 1052W W 1052 5.349 4.575 1.571 + 1053W W 1053 3.750 4.304 1.285 + 1054W W 1054 4.936 3.999 2.364 + 1055W W 1055 4.039 4.666 0.308 + 1056W W 1056 5.490 4.606 0.748 + 1057W W 1057 4.191 4.945 2.471 + 1058W W 1058 4.176 4.821 3.393 + 1059W W 1059 4.066 3.738 1.552 + 1060W W 1060 5.482 4.270 3.276 + 1061W W 1061 5.027 4.726 0.611 + 1062W W 1062 4.596 4.010 3.275 + 1063W W 1063 4.684 3.729 0.607 + 1064W W 1064 4.376 4.553 1.858 + 1065W W 1065 3.983 4.430 2.143 + 1066W W 1066 3.721 4.709 2.472 + 1067W W 1067 4.248 4.125 0.356 + 1068W W 1068 5.238 4.352 1.068 + 1069W W 1069 4.586 4.142 1.020 + 1070W W 1070 4.591 4.370 0.045 + 1071W W 1071 5.392 4.979 2.439 + 1072W W 1072 4.632 3.821 0.122 + 1073W W 1073 4.855 4.458 1.989 + 1074W W 1074 4.281 4.414 1.387 + 1075W W 1075 4.683 4.652 0.996 + 1076W W 1076 3.782 3.881 0.291 + 1077W W 1077 4.354 5.379 3.281 + 1078W W 1078 5.058 3.939 3.467 + 1079W W 1079 4.962 3.849 1.104 + 1080W W 1080 3.966 3.858 2.186 + 1081W W 1081 4.125 4.427 0.776 + 1082W W 1082 4.384 4.076 1.896 + 1083W W 1083 5.498 4.648 2.836 + 1084W W 1084 3.646 4.338 0.314 + 1085W W 1085 4.059 5.289 2.804 + 1086W W 1086 5.050 5.394 2.598 + 1087W W 1087 5.135 3.761 0.290 + 1088W W 1088 4.443 4.453 3.119 + 1089W W 1089 5.172 5.201 3.538 + 1090W W 1090 4.198 4.504 2.720 + 1091W W 1091 5.499 3.759 3.084 + 1092W W 1092 4.554 4.494 2.379 + 1093W W 1093 4.524 3.842 1.489 + 1094W W 1094 4.754 4.257 1.545 + 1095W W 1095 3.911 5.243 0.265 + 1096W W 1096 5.021 3.744 2.984 + 1097W W 1097 5.118 5.037 2.887 + 1098W W 1098 4.879 4.644 2.862 + 1099W W 1099 4.408 4.980 2.938 + 1100W W 1100 4.476 5.349 2.544 + 1101W W 1101 4.141 4.021 2.954 + 1102W W 1102 4.480 5.044 2.007 + 1103W W 1103 4.849 4.858 0.129 + 1104W W 1104 5.237 5.326 2.117 + 1105W W 1105 3.890 5.294 3.374 + 1106W W 1106 4.239 5.295 1.682 + 1107W W 1107 5.319 4.233 1.992 + 1108W W 1108 4.162 3.765 3.529 + 1109W W 1109 5.357 3.716 0.830 + 1110W W 1110 5.009 4.134 0.662 + 1111W W 1111 4.608 4.394 0.519 + 1112W W 1112 4.768 5.236 1.680 + 1113W W 1113 5.341 4.995 0.308 + 1114W W 1114 5.277 4.186 2.791 + 1115W W 1115 5.115 4.472 2.419 + 1116W W 1116 4.815 3.841 1.888 + 1117W W 1117 3.931 4.188 1.728 + 1118W W 1118 3.731 4.007 0.805 + 1119W W 1119 4.124 4.310 3.499 + 1120W W 1120 3.855 4.910 0.832 + 1121W W 1121 4.546 4.899 0.557 + 1122W W 1122 4.652 5.507 2.083 + 1123W W 1123 4.905 5.303 0.304 + 1124W W 1124 4.968 4.386 3.292 + 1125W W 1125 4.788 4.970 2.533 + 1126W W 1126 3.824 4.886 2.964 + 1127W W 1127 5.283 4.754 3.319 + 1128W W 1128 4.389 5.153 0.104 + 1129W W 1129 4.940 5.090 4.832 + 1130W W 1130 4.455 4.938 4.996 + 1131W W 1131 5.288 5.089 5.291 + 1132W W 1132 4.128 4.019 4.811 + 1133W W 1133 5.349 4.575 5.215 + 1134W W 1134 3.750 4.304 4.929 + 1135W W 1135 4.039 4.666 3.952 + 1136W W 1136 5.490 4.606 4.392 + 1137W W 1137 3.656 5.356 4.586 + 1138W W 1138 4.066 3.738 5.196 + 1139W W 1139 5.027 4.726 4.255 + 1140W W 1140 4.684 3.729 4.251 + 1141W W 1141 4.376 4.553 5.502 + 1142W W 1142 5.398 5.432 4.887 + 1143W W 1143 4.248 4.125 4.000 + 1144W W 1144 5.238 4.352 4.712 + 1145W W 1145 4.586 4.142 4.664 + 1146W W 1146 4.591 4.370 3.689 + 1147W W 1147 4.632 3.821 3.766 + 1148W W 1148 5.370 4.902 4.817 + 1149W W 1149 4.281 4.414 5.031 + 1150W W 1150 4.683 4.652 4.640 + 1151W W 1151 3.782 3.881 3.935 + 1152W W 1152 4.962 3.849 4.748 + 1153W W 1153 4.014 4.821 5.200 + 1154W W 1154 4.125 4.427 4.420 + 1155W W 1155 3.646 4.338 3.958 + 1156W W 1156 5.135 3.761 3.934 + 1157W W 1157 4.524 3.842 5.133 + 1158W W 1158 3.911 5.243 3.909 + 1159W W 1159 4.849 4.858 3.773 + 1160W W 1160 4.195 5.167 4.647 + 1161W W 1161 5.357 3.716 4.474 + 1162W W 1162 5.009 4.134 4.306 + 1163W W 1163 4.608 4.394 4.163 + 1164W W 1164 5.211 4.060 5.152 + 1165W W 1165 5.341 4.995 3.952 + 1166W W 1166 3.931 4.188 5.372 + 1167W W 1167 3.731 4.007 4.449 + 1168W W 1168 3.855 4.910 4.476 + 1169W W 1169 4.546 4.899 4.201 + 1170W W 1170 4.905 5.303 3.948 + 1171W W 1171 4.389 5.153 3.748 + 5.20000 5.20000 5.20000 diff --git a/hymd/Makefile b/hymd/Makefile index 358cbf3e..097eef72 100644 --- a/hymd/Makefile +++ b/hymd/Makefile @@ -4,24 +4,19 @@ FC := f2py3 --verbose --f90flags=${F90FLAGS} SINGLE_TO_DOUBLE := "s/real(4)/real(8)/g" all: \ - compute_angle_forces__single$(EXT_SUFFIX) \ - compute_angle_forces__double.f90 \ - compute_angle_forces__double$(EXT_SUFFIX) \ - compute_bond_forces__single$(EXT_SUFFIX) \ - compute_bond_forces__double$(EXT_SUFFIX) \ - compute_bond_forces__double.f90 \ - compute_gaussian_core$(EXT_SUFFIX) \ + compute_bond_forces__double.f90 \ + compute_bond_forces__single$(EXT_SUFFIX) \ + compute_bond_forces__double$(EXT_SUFFIX) \ + compute_angle_forces__double.f90 \ + compute_angle_forces__single$(EXT_SUFFIX) \ + compute_angle_forces__double$(EXT_SUFFIX) \ + compute_dihedral_forces__double.f90 \ + compute_dihedral_forces__single$(EXT_SUFFIX) \ + compute_dihedral_forces__double$(EXT_SUFFIX) \ + compute_gaussian_core$(EXT_SUFFIX) \ - -compute_angle_forces__single$(EXT_SUFFIX): compute_angle_forces.f90 - $(FC) -c compute_angle_forces.f90 -m compute_angle_forces - -compute_angle_forces__double.f90: compute_angle_forces.f90 - sed ${SINGLE_TO_DOUBLE} compute_angle_forces.f90 > compute_angle_forces__double.f90 - -compute_angle_forces__double$(EXT_SUFFIX): compute_angle_forces__double.f90 - $(FC) -c compute_angle_forces__double.f90 -m compute_angle_forces__double - $(RM) compute_angle_forces__double.f90 +compute_gaussian_core$(EXT_SUFFIX): compute_gaussian_core.f90 + $(FC) -c compute_gaussian_core.f90 -m compute_gaussian_core compute_bond_forces__single$(EXT_SUFFIX): compute_bond_forces.f90 $(FC) -c compute_bond_forces.f90 -m compute_bond_forces @@ -33,12 +28,32 @@ compute_bond_forces__double$(EXT_SUFFIX): compute_bond_forces__double.f90 $(FC) -c compute_bond_forces__double.f90 -m compute_bond_forces__double $(RM) compute_bond_forces__double.f90 -compute_gaussian_core$(EXT_SUFFIX): compute_gaussian_core.f90 - $(FC) -c compute_gaussian_core.f90 -m compute_gaussian_core +compute_angle_forces__single$(EXT_SUFFIX): compute_angle_forces.f90 + $(FC) -c compute_angle_forces.f90 dipole_reconstruction.f90 -m compute_angle_forces + +compute_angle_forces__double.f90: compute_angle_forces.f90 + sed ${SINGLE_TO_DOUBLE} compute_angle_forces.f90 > compute_angle_forces__double.f90 + sed ${SINGLE_TO_DOUBLE} dipole_reconstruction.f90 > dipole_reconstruction__double.f90 + +compute_angle_forces__double$(EXT_SUFFIX): compute_angle_forces__double.f90 + $(FC) -c compute_angle_forces__double.f90 dipole_reconstruction__double.f90 -m compute_angle_forces__double + $(RM) compute_angle_forces__double.f90 dipole_reconstruction__double.f90 + +compute_dihedral_forces__single$(EXT_SUFFIX): compute_dihedral_forces.f90 + $(FC) -c compute_dihedral_forces.f90 dipole_reconstruction.f90 -m compute_dihedral_forces + +compute_dihedral_forces__double.f90: compute_dihedral_forces.f90 + sed ${SINGLE_TO_DOUBLE} compute_dihedral_forces.f90 > compute_dihedral_forces__double.f90 + sed ${SINGLE_TO_DOUBLE} dipole_reconstruction.f90 > dipole_reconstruction__double.f90 + +compute_dihedral_forces__double$(EXT_SUFFIX): compute_dihedral_forces__double.f90 + $(FC) -c compute_dihedral_forces__double.f90 dipole_reconstruction__double.f90 -m compute_dihedral_forces__double + $(RM) compute_dihedral_forces__double.f90 dipole_reconstruction__double.f90 clean: $(RM) *$(EXT_SUFFIX) $(RM) *__double.f90 - $(RM) -r compute_angle_forces/ - $(RM) -r compute_bond_forces/ $(RM) -r compute_gaussian_core/ + $(RM) -r compute_bond_forces/ + $(RM) -r compute_angle_forces/ + $(RM) -r compute_dihedral_forces/ diff --git a/hymd/__init__.py b/hymd/__init__.py index 1a6e914e..73ceedcc 100644 --- a/hymd/__init__.py +++ b/hymd/__init__.py @@ -2,10 +2,12 @@ from force import ( Bond, Angle, + Dihedral, Chi, prepare_bonds, compute_bond_forces, compute_angle_forces, + compute_dihedral_forces, ) from hamiltonian import W, DefaultNoChi, DefaultWithChi from input_parser import ( @@ -18,6 +20,7 @@ _find_unique_names, check_bonds, check_angles, + check_dihedrals, check_chi, check_box_size, check_integrator, @@ -29,10 +32,12 @@ "distribute_input", "Bond", "Angle", + "Dihedral", "Chi", "prepare_bonds", "compute_bond_forces", "compute_angle_forces", + "compute_dihedral_forces", "W", "DefaultNoChi", "DefaultWithChi", @@ -45,6 +50,7 @@ "_find_unique_names", "check_bonds", "check_angles", + "check_dihedrals", "check_chi", "check_box_size", "check_integrator", diff --git a/hymd/compute_angle_forces.f90 b/hymd/compute_angle_forces.f90 index 17f1e4d0..d0d10d4f 100644 --- a/hymd/compute_angle_forces.f90 +++ b/hymd/compute_angle_forces.f90 @@ -9,96 +9,62 @@ subroutine caf(f, r, box, a, b, c, t0, k, energy) ! ============================================================================== implicit none - real(4), dimension(:,:), intent(in out) :: f - real(4), dimension(:,:), intent(in) :: r - real(8), dimension(:), intent(in) :: box - integer, dimension(:), intent(in) :: a - integer, dimension(:), intent(in) :: b - integer, dimension(:), intent(in) :: c - real(8), dimension(:), intent(in) :: t0 - real(8), dimension(:), intent(in) :: k - real(8), intent(out) :: energy + real(4), dimension(:,:), intent(in out) :: f + real(4), dimension(:,:), intent(in) :: r + real(8), dimension(:), intent(in) :: box + integer, dimension(:), intent(in) :: a + integer, dimension(:), intent(in) :: b + integer, dimension(:), intent(in) :: c + real(8), dimension(:), intent(in) :: t0 + real(8), dimension(:), intent(in) :: k + real(8), intent(out) :: energy integer :: ind, aa, bb, cc - real(8) :: ra_x, ra_y, ra_z, rc_x, rc_y, rc_z - real(8) :: ea_x, ea_y, ea_z, ec_x, ec_y, ec_z - real(8) :: fa_x, fa_y, fa_z, fc_x, fc_y, fc_z - real(8) :: d, ff, bx, by, bz, xsinph, xra, xrc + real(8), dimension(3) :: ra, rc, ea, ec, fa, fc + real(8) :: d, ff, xsinph, norm_a, norm_c real(8) :: xrasin, xrcsin - real(8) :: cosphi, cosphi2, theta + real(8) :: cosphi, cosphi2, sinphi, theta energy = 0.0d00 f = 0.0d00 - bx = 1.0d0 / box(1) - by = 1.0d0 / box(2) - bz = 1.0d0 / box(3) - do ind = 1, size(a) aa = a(ind) + 1 bb = b(ind) + 1 cc = c(ind) + 1 - ra_x = r(aa, 1) - r(bb, 1) - ra_y = r(aa, 2) - r(bb, 2) - ra_z = r(aa, 3) - r(bb, 3) - ra_x = ra_x - box(1) * nint(ra_x * bx) - ra_y = ra_y - box(2) * nint(ra_y * by) - ra_z = ra_z - box(3) * nint(ra_z * bz) - - rc_x = r(cc, 1) - r(bb, 1) - rc_y = r(cc, 2) - r(bb, 2) - rc_z = r(cc, 3) - r(bb, 3) - rc_x = rc_x - box(1) * nint(rc_x * bx) - rc_y = rc_y - box(2) * nint(rc_y * by) - rc_z = rc_z - box(3) * nint(rc_z * bz) - - xra = 1.0d0 / sqrt(ra_x * ra_x + ra_y * ra_y + ra_z * ra_z) - xrc = 1.0d0 / sqrt(rc_x * rc_x + rc_y * rc_y + rc_z * rc_z) + ra = r(aa, :) - r(bb, :) + rc = r(cc, :) - r(bb, :) - ea_x = ra_x * xra - ea_y = ra_y * xra - ea_z = ra_z * xra + ra = ra - box * nint(ra / box) + rc = rc - box * nint(rc / box) - ec_x = rc_x * xrc - ec_y = rc_y * xrc - ec_z = rc_z * xrc + norm_a = norm2(ra) + norm_c = norm2(rc) + ea = ra / norm_a + ec = rc / norm_c - cosphi = ea_x * ec_x + ea_y * ec_y + ea_z * ec_z + cosphi = dot_product(ea, ec) cosphi2 = cosphi * cosphi if (cosphi2 < 1.0) then theta = acos(cosphi) - - xsinph = 1.0d0 / sqrt(1.0d0 - cosphi2) + sinphi = sin(theta) d = theta - t0(ind) - ff = - k(ind) * d - - xrasin = xra * xsinph * ff - xrcsin = xrc * xsinph * ff - - fa_x = (ea_x * cosphi - ec_x) * xrasin - fa_y = (ea_y * cosphi - ec_y) * xrasin - fa_z = (ea_z * cosphi - ec_z) * xrasin - - fc_x = (ec_x * cosphi - ea_x) * xrcsin - fc_y = (ec_y * cosphi - ea_y) * xrcsin - fc_z = (ec_z * cosphi - ea_z) * xrcsin - - f(aa, 1) = f(aa, 1) + fa_x - f(aa, 2) = f(aa, 2) + fa_y - f(aa, 3) = f(aa, 3) + fa_z - - f(cc, 1) = f(cc, 1) + fc_x - f(cc, 2) = f(cc, 2) + fc_y - f(cc, 3) = f(cc, 3) + fc_z - - f(bb, 1) = f(bb, 1) - (fa_x + fc_x) - f(bb, 2) = f(bb, 2) - (fa_y + fc_y) - f(bb, 3) = f(bb, 3) - (fa_z + fc_z) - - energy = energy - 0.5d0 * ff * d + ff = k(ind) * d + + xrasin = -ff / (norm_a * sinphi) + xrcsin = -ff / (norm_c * sinphi) + ! 𝜕θ/𝜕cos(θ) * 𝜕cos(θ)/𝜕r + fa = (ec - cosphi * ea) * xrasin + fc = (ea - cosphi * ec) * xrcsin + + f(aa, :) = f(aa, :) - fa + f(cc, :) = f(cc, :) - fc + f(bb, :) = f(bb, :) + fa + fc + + energy = energy + 0.5d0 * ff * d end if end do end subroutine diff --git a/hymd/compute_bond_forces.f90 b/hymd/compute_bond_forces.f90 index faef5067..cccdffa2 100644 --- a/hymd/compute_bond_forces.f90 +++ b/hymd/compute_bond_forces.f90 @@ -1,5 +1,4 @@ - -subroutine cbf(f, r, box, i, j, r0, k, energy) +subroutine cbf(f, r, box, a, b, r0, k, energy) ! ============================================================================== ! compute_bond_forces() speedup attempt. ! @@ -12,50 +11,34 @@ subroutine cbf(f, r, box, i, j, r0, k, energy) real(4), dimension(:,:), intent(in out) :: f real(4), dimension(:,:), intent(in) :: r - real(4), dimension(:), intent(in) :: box - integer, dimension(:), intent(in) :: i - integer, dimension(:), intent(in) :: j - real(4), dimension(:), intent(in) :: r0 - real(4), dimension(:), intent(in) :: k - real(4), intent(out) :: energy - - integer :: ind, ii, jj - real(4) :: rij, rij_x, rij_y, rij_z - real(4) :: df - real(4) :: bx, by, bz - - energy = 0.0d00 - f = 0.0d00 ! Set all array elements - - bx = 1.0d00 / box(1) - by = 1.0d00 / box(2) - bz = 1.0d00 / box(3) + real(8), dimension(:), intent(in) :: box + integer, dimension(:), intent(in) :: a + integer, dimension(:), intent(in) :: b + real(8), dimension(:), intent(in) :: r0 + real(8), dimension(:), intent(in) :: k + real(8), intent(out) :: energy - do ind = 1, size(i) - ii = i(ind) + 1 - jj = j(ind) + 1 + integer :: ind, aa, bb + real(8), dimension(3) :: rab, fa + real(8) :: df, rab_norm - rij_x = r(jj, 1) - r(ii, 1) - rij_x = rij_x - box(1) * nint(rij_x * bx) - - rij_y = r(jj, 2) - r(ii, 2) - rij_y = rij_y - box(2) * nint(rij_y * by) - - rij_z = r(jj, 3) - r(ii, 3) - rij_z = rij_z - box(3) * nint(rij_z * bz) + energy = 0.0d00 + f = 0.0d00 - rij = sqrt(rij_x * rij_x + rij_y * rij_y + rij_z * rij_z) - df = -k(ind) * (rij - r0(ind)) + do ind = 1, size(a) + aa = a(ind) + 1 + bb = b(ind) + 1 - f(ii, 1) = f(ii, 1) - df * rij_x / rij - f(jj, 1) = f(jj, 1) + df * rij_x / rij + rab = r(bb, :) - r(aa, :) + rab = rab - box * nint(rab / box) + rab_norm = norm2(rab) - f(ii, 2) = f(ii, 2) - df * rij_y / rij - f(jj, 2) = f(jj, 2) + df * rij_y / rij + df = k(ind) * (rab_norm - r0(ind)) + fa = -df * rab / rab_norm - f(ii, 3) = f(ii, 3) - df * rij_z / rij - f(jj, 3) = f(jj, 3) + df * rij_z / rij + f(aa, :) = f(aa, :) - fa + f(bb, :) = f(bb, :) + fa - energy = energy + 0.5d00 * k(ind) * (rij - r0(ind))**2 + energy = energy + 0.5d00 * k(ind) * (rab_norm - r0(ind))**2 end do end subroutine diff --git a/hymd/compute_dihedral_forces.f90 b/hymd/compute_dihedral_forces.f90 new file mode 100644 index 00000000..702e9897 --- /dev/null +++ b/hymd/compute_dihedral_forces.f90 @@ -0,0 +1,146 @@ +subroutine cdf(force, r, dipoles, transfer_matrix, box, a, b, c, d, coeff, dtype, bb_index, dipole_flag, energy) + use dipole_reconstruction + implicit none + + real(4), intent(in out) :: force(:,:) + real(4), intent(in) :: r(:,:) + real(4), intent(in out) :: dipoles(:,:,:) + real(4), intent(in out) :: transfer_matrix(:,:,:,:) + real(8), intent(in) :: box(:) + integer, intent(in) :: a(:), b(:), c(:), d(:), dtype(:), bb_index(:), dipole_flag + real(4), intent(in) :: coeff(:,:,:) + real(8), intent(out) :: energy + + integer :: ind, aa, bb, cc, dd, i + integer, dimension(2) :: c_shape + real(8), dimension(3) :: f, g, h, v, w, sc, fa, fb, fc, fd + real(8), dimension(5) :: c_v, c_k, c_coil, d_coil, d_v, d_k + ! real(8), dimension(5) :: c_g, d_g + real(8) :: energy_cbt, df_cbt + real(8) :: force_const, eq_value + real(8) :: g_norm, v_sq, w_sq, f_dot_g, h_dot_g + real(8) :: df_dih, df_ang, cos_phi, sin_phi, phi + + energy = 0.d0 + force = 0.d0 + dipoles = 0.d0 + transfer_matrix = 0.d0 + + do ind = 1, size(a) + aa = a(ind) + 1 + bb = b(ind) + 1 + cc = c(ind) + 1 + dd = d(ind) + 1 + + f = r(aa, :) - r(bb, :) + g = r(bb, :) - r(cc, :) + h = r(dd, :) - r(cc, :) + + f = f - box * nint(f / box) + g = g - box * nint(g / box) + h = h - box * nint(h / box) + + v = cross(f, g) + w = cross(h, g) + v_sq = dot_product(v, v) + w_sq = dot_product(w, w) + g_norm = norm2(g) + + cos_phi = dot_product(v, w) + sin_phi = dot_product(w, f) * g_norm + phi = atan2(sin_phi, cos_phi) + + f_dot_g = dot_product(f, g) + h_dot_g = dot_product(h, g) + + ! Cosine series, V_prop + if (dtype(ind) == 0 .or. dtype(ind) == 1) then + df_dih = 0.d0 + c_v = coeff(ind, 1, :) + d_v = coeff(ind, 2, :) + call cosine_series(c_v, d_v, phi, energy, df_dih) + + c_coil = coeff(ind, 3, :) + d_coil = coeff(ind, 4, :) + if (count(c_coil == 0) /= size(c_coil) .and. & + count(d_coil == 0) /= size(d_coil)) then + call cosine_series(c_coil, d_coil, phi, energy, df_dih) + end if + end if + + ! CBT potential + if (dtype(ind) == 1) then + ! V = V_prop + k * (gamma - gamma_0)**2 + + c_k = coeff(ind, 5, :) + d_k = coeff(ind, 6, :) + + ! These are needed if gamma_0 is expressed as cosine series, not implemented + ! c_g = coeff(ind, 7, :) + ! d_g = phase(ind, 8, :) + + call reconstruct( & + f, r(bb, :), -g, box, c_k, d_k, phi, dipole_flag, & + energy_cbt, df_cbt, fa, fb, fc, dipoles(ind, 1:2, :), transfer_matrix(ind, 1:3, :, :)) + + energy = energy + energy_cbt + df_dih = df_dih + df_cbt + + ! Angle forces + force(aa, :) = force(aa, :) - fa + force(bb, :) = force(bb, :) - fb + force(cc, :) = force(cc, :) - fc + + if (bb_index(ind) == 1) then + ! calculate last angle in the chain + call reconstruct( & + g, r(cc, :), h, box, c_k, d_k, phi, dipole_flag, & + energy_cbt, df_cbt, fb, fc, fd, dipoles(ind, 3:4, :), transfer_matrix(ind, 4:6, :, :)) + + energy = energy + energy_cbt + df_dih = df_dih + df_cbt + + ! Angle forces + force(bb, :) = force(bb, :) - fb + force(cc, :) = force(cc, :) - fc + force(dd, :) = force(dd, :) - fd + end if + end if + + ! Improper dihedrals, needs to be fixed, I don't like it + if (dtype(ind) == 2) then + eq_value = coeff(ind, 1, 1) + force_const = coeff(ind, 1, 2) + df_dih = force_const * (phi - eq_value) + energy = energy + 0.5 * force_const * (phi - eq_value) ** 2 + end if + + ! Dihedral forces + sc = v * f_dot_g / (v_sq * g_norm) - w * h_dot_g / (w_sq * g_norm) + + fa = -df_dih * g_norm * v / v_sq + fd = df_dih * g_norm * w / w_sq + fb = df_dih * sc - fa + fc = -df_dih * sc - fd + + ! Subtract negative gradient + force(aa, :) = force(aa, :) + fa + force(bb, :) = force(bb, :) + fb + force(cc, :) = force(cc, :) + fc + force(dd, :) = force(dd, :) + fd + + ! For debugging forces + ! if (ind == 1 .or. ind == 2) then + ! print *, "coords" + ! print *, ind, r(aa, :) + ! print *, ind, r(bb, :) + ! print *, ind, r(cc, :) + ! print *, ind, r(dd, :) + ! print *, "tot_forces" + ! print *, ind, force(aa, :) + ! print *, ind, force(bb, :) + ! print *, ind, force(cc, :) + ! print *, ind, force(dd, :) + ! end if + end do +end subroutine cdf diff --git a/hymd/dipole_reconstruction.f90 b/hymd/dipole_reconstruction.f90 new file mode 100644 index 00000000..1b2d99ee --- /dev/null +++ b/hymd/dipole_reconstruction.f90 @@ -0,0 +1,221 @@ +module dipole_reconstruction +implicit none + +contains +function cross(vector1, vector2) result(vector3) + real(8), dimension(3), intent(in) :: vector1, vector2 + real(8), dimension(3) :: vector3 + + vector3(1) = vector1(2) * vector2(3) - vector1(3) * vector2(2) + vector3(2) = vector1(3) * vector2(1) - vector1(1) * vector2(3) + vector3(3) = vector1(1) * vector2(2) - vector1(2) * vector2(1) +end function + +function cross_matrix(matrix, vector) result(output) + ! The i-th row of the output matrix is the cross product + ! between the i-th row of the input matrix and the input vector. + real(8), dimension(3,3), intent(in) :: matrix + real(8), dimension(3), intent(in) :: vector + real(8), dimension(3,3) :: output + + output(1, :) = cross(matrix(1, :), vector) + output(2, :) = cross(matrix(2, :), vector) + output(3, :) = cross(matrix(3, :), vector) +end function + +function outer_product(vector1, vector2) result(output) + ! The i-th row of the output matrix is vector2 + ! multiplied by the i-th component of vector1. + real(8), dimension(3), intent(in) :: vector1, vector2 + real(8), dimension(3,3) :: output + + output(1, :) = vector1(1) * vector2 + output(2, :) = vector1(2) * vector2 + output(3, :) = vector1(3) * vector2 +end function outer_product + +subroutine cosine_series(c_n, d_n, phi, energy, dE_dphi) + real(8), dimension(:), intent(in) :: c_n, d_n + real(8), intent(in) :: phi + real(8), intent(in out) :: energy, dE_dphi + integer :: i + + do i = 0, size(c_n) - 1 + energy = energy + c_n(i + 1) * (1.d0 + cos(i * phi - d_n(i + 1))) + dE_dphi = dE_dphi - i * c_n(i + 1) * sin(i * phi - d_n(i + 1)) + end do + +end subroutine cosine_series + +subroutine reconstruct(rab, rb, rcb, box, c_k, d_k, phi, dipole_flag, energy_cbt, df_cbt, fa, fb, fc, dipole, transfer_matrix) + real(8), dimension(3), intent(in) :: rab, rcb, box + real(4), dimension(3), intent(in) :: rb + real(8), dimension(:), intent(in) :: c_k, d_k + ! real(8), dimension(:), intent(in) :: c_g, d_g + real(8), intent(in) :: phi + integer :: dipole_flag + real(8), intent(out) :: energy_cbt, df_cbt + real(8), dimension(3), intent(out) :: fa, fb, fc + real(4), dimension(2, 3), intent(in out) :: dipole + real(4), dimension(3, 3, 3), intent(in out) :: transfer_matrix + + integer :: i, j + real(8) :: k, gamma_0, dk, dg, norm_a, norm_c, df_ang, var_sq + real(8) :: theta, d_theta, cos_theta, sin_theta, fac + real(8) :: gamm, cos_gamma, sin_gamma, cos2, cos_phi, sin_phi + real(8), dimension(3) :: w, v, n, m, r0, d + real(8), dimension(3, 3) :: W_a, W_b, V_b, V_c + real(8), dimension(3, 3) :: N_a, N_b, N_c + real(8), dimension(3, 3) :: M_a, M_b, M_c + real(8), dimension(3, 3) :: FN_a, fN_b, fN_c + real(8), dimension(3, 3) :: FM_a, FM_b, FM_c + real(8), parameter :: delta = 0.3d0 , cos_psi = cos(1.392947), sin_psi = sin(1.392947), small = 0.001d0 + + cos_phi = cos(phi) + sin_phi = sin(phi) + ! real(8), parameter :: small = 0.001d0 + ! cos_phi = 0,17890101 + ! sin_phi = 0,983867079 + ! energy_cbt = 0.d0 + + ! 1 - Angle forces calculation + ! Levitt-Warshel + ! gamma_0 = 106 - 13 * cos(phi - 45) + gamma_0 = 1.85d0 - 0.227d0 * cos(phi - 0.785d0) + dg = 0.227d0 * sin(phi - 0.785d0) + ! Not used in the end? + ! We use another Fourier expansion? + + k = 0.d0 + dk = 0.d0 + ! gamma_0 = 0.d0 + ! dg = 0.d0 + + call cosine_series(c_k, d_k, phi, k, dk) + ! call cosine_series(c_g, d_g, gamma_0, dg) + + norm_a = norm2(rab) + norm_c = norm2(rcb) + + ! w == ea, v == ec (in angle routine) + w = rab / norm_a + v = rcb / norm_c + + cos_gamma = dot_product(w, v) + cos2 = cos_gamma * cos_gamma + + ! This prevents sin_gamma == 0 + if (cos2 < 1.0) then + gamm = acos(cos_gamma) + sin_gamma = sqrt(1 - cos2) + + if (sin_gamma < 0.1) then + print *, "DIHEDRAL ROUTINE WARNING (bending potential):" + print '(a, f5.2, a)', "The angle γ =", gamm, " is too close to 0 or π." + print *, "There's probably something wrong with the simulation. Setting sin(γ) = 0.1" + sin_gamma = 0.1 + end if + + ! Bending "forces" == f_gamma_i in the paper + ! 1/sin(γ) ∂cos(γ)/∂γ + fa = (v - cos_gamma * w) / norm_a + fc = (w - cos_gamma * v) / norm_c + + fa = -fa / sin_gamma + fc = -fc / sin_gamma + + fb = -(fa + fc) + + ! CBT energy and force factors + df_ang = k * (gamm - gamma_0) + var_sq = (gamm - gamma_0)**2 + + energy_cbt = 0.5d0 * k * var_sq + ! Positive gradient, add to V_prop gradient + df_cbt = 0.5d0 * dk * var_sq - df_ang * dg + + ! Exit subroutine if we only need the forces + if (dipole_flag == 0) then + fa = df_ang * fa + fb = df_ang * fb + fc = df_ang * fc + return + end if + + ! 2 - Dipole reconstruction + ! θ(γ) + ! This function needs to be fit again + fac = exp((gamm - 1.73d0) / 0.025d0) + theta = -1.607d0 * gamm + 0.094d0 + 1.883d0 / (1.d0 + fac) + d_theta = -1.607d0 - 1.883d0 / 0.025d0 * fac / ((1.d0 + fac)**2) + cos_theta = cos(theta) + sin_theta = sin(theta) + + n = cross(w, v) / sin_gamma + m = cross(n, v) + + ! Dipole coordinates + r0 = rb + 0.5d0 * rcb + ! From Michele's paper, it's wrong in Sigbjorn's + d = 0.5d0 * delta * (cos_psi * v + sin_psi * (cos_theta * n + sin_theta * m)) + + dipole(1, :) = r0 + d + dipole(2, :) = r0 - d + + ! PBC + dipole(1, :) = dipole(1, :) - box * nint(dipole(1, :) / box) + dipole(2, :) = dipole(2, :) - box * nint(dipole(2, :) / box) + + ! Set up transfer matrices + do j = 1, 3 + do i = 1, 3 + V_b(i, j) = v(i) * v(j) + W_b(i, j) = w(i) * w(j) + if (i == j) then + V_b(i, j) = V_b(i, j) - 1.d0 + W_b(i, j) = W_b(i, j) - 1.d0 + end if + end do + end do + + V_b = V_b / norm_c + W_b = W_b / norm_a + + V_c = -V_b + W_a = -W_b + + ! Last term is 0 for N_a, second term is 0 for N_c (S19) + ! Minus in the last term because inverse cross_matrix + ! 1 / sin(γ) is already inside fa, fb, and fc + N_a = (cos_gamma * outer_product(fa, n) + cross_matrix(W_a, v) ) / sin_gamma + N_b = (cos_gamma * outer_product(fb, n) + cross_matrix(W_b, v) - cross_matrix(V_b, w)) / sin_gamma + N_c = (cos_gamma * outer_product(fc, n) - cross_matrix(V_c, w)) / sin_gamma + + M_a = cross_matrix(N_a, v) + M_b = cross_matrix(N_b, v) - cross_matrix(V_b, n) + M_c = cross_matrix(N_c, v) - cross_matrix(V_c, n) + + ! A lot of terms in (S10) go away because ∂φ/∂γ = 0, + ! since φ = const. + ! 1 / sin(γ) is already inside fa, fb, and fc + FN_a = sin_theta * d_theta * outer_product(fa, n) + FN_b = sin_theta * d_theta * outer_product(fb, n) + FN_c = sin_theta * d_theta * outer_product(fc, n) + + FM_a = cos_theta * d_theta * outer_product(fa, m) + FM_b = cos_theta * d_theta * outer_product(fb, m) + FM_c = cos_theta * d_theta * outer_product(fc, m) + + ! Final transfer matrices D_i + ! 0.5 cause we have two equally distant points + transfer_matrix(1, :, :) = 0.5d0 * delta * ( sin_psi * (cos_theta * N_a + sin_theta * M_a + FN_a - FM_a)) + transfer_matrix(2, :, :) = 0.5d0 * delta * (cos_psi * V_b + sin_psi * (cos_theta * N_b + sin_theta * M_b + FN_b - FM_b)) + transfer_matrix(3, :, :) = 0.5d0 * delta * (cos_psi * V_c + sin_psi * (cos_theta * N_c + sin_theta * M_c + FN_c - FM_c)) + + ! Final angle forces + fa = df_ang * fa + fb = df_ang * fb + fc = df_ang * fc + end if +end subroutine reconstruct +end module dipole_reconstruction diff --git a/hymd/field.py b/hymd/field.py index 4f9b63fd..bbd7cdb2 100644 --- a/hymd/field.py +++ b/hymd/field.py @@ -11,6 +11,200 @@ def compute_field_force(layouts, r, force_mesh, force, types, n_types): force[ind, d] = force_mesh[t][d].readout(r[ind], layout=layouts[t]) +def compute_field_energy_q( + config, + phi_q_fourier, + elec_energy_field, # for energy calculation + field_q_energy, + comm=MPI.COMM_WORLD, +): + + COULK_GMX = 138.935458 / config.dielectric_const + + def transfer_energy(k, v): + ### potential field is electric field / (-ik) --> potential field * q --> + return ( + 4.0 * np.pi * COULK_GMX * np.abs(v) ** 2 / k.normp(p=2, zeromode=1) + ) ## zeromode = 1 needed here? + + phi_q_fourier.apply(transfer_energy, kind="wavenumber", out=elec_energy_field) + + V = np.prod(config.box_size) + + field_q_energy = 0.5 * V * comm.allreduce(np.sum(elec_energy_field.value)) + + return field_q_energy.real + + +def update_field_force_q( + charges, # charge + phi_q, # chage density + phi_q_fourier, + elec_field_fourier, # for force calculation + elec_field, + elec_forces, + layout_q, #### general terms + pm, + positions, + config, +): + """ + - added for the simple piosson equation eletrostatics (follow PIC) + - this funciton get the electrostatic forces + - refering to the test-pure-sphere-new.py, this inlcudes: + [O] hpf_init_simple_gmx_units(grid_num,box_size,coords,charges,masses) + ## ^----- define the pm and layout, already out outside this fucntion + [Y] gen_qe_hpf_use_self(out_phiq_paraview_file) + [Y] calc_phiq_fft_use_self_applyH_checkq(grid_num, out_phiq_paraview_file2 ) + [Y] poisson_solver(calc_energy, out_elec_field_paraview_file) + [Y] compute_electric_field_on_particle() + [Y] compute_electric_force_on_particle() + """ + ## basic setup + V = np.prod(config.box_size) + n_mesh_cells = np.prod(np.full(3, config.mesh_size)) + volume_per_cell = V / n_mesh_cells + + ## paint ## pm.paint(positions[types == t], layout=layouts[t], out=phi[t]) + ## old protocol in gen_qe_hpf_use_self + pm.paint(positions, layout=layout_q, mass=charges, out=phi_q) ## + ## scale and fft + ## old protocol in gen_qe_hpf_use_self + phi_q /= volume_per_cell + phi_q.r2c(out=phi_q_fourier) + + # Provide different sigma value if using dipoles + # if dipole_flag: + # sigma = 0.1 + # else: + # sigma = config.sigma + def phi_transfer_function(k, v): + return v * np.exp(-0.5 * config.sigma ** 2 * k.normp(p=2, zeromode=1)) + + phi_q_fourier.apply(phi_transfer_function, out=phi_q_fourier) + ## ^------ use the same gaussian as the \kai interaciton + ## ^------ tbr; phi_transfer_funciton by hamiltonian.H ?? + phi_q_fourier.c2r(out=phi_q) ## this phi_q is after applying the smearing function + + ## electric field via solving poisson equation + ## old protol in poisson_solver + _SPACE_DIM = 3 + + COULK_GMX = 138.935458 / config.dielectric_const + + for _d in np.arange(_SPACE_DIM): + + def poisson_transfer_function(k, v, d=_d): + return -1j * k[d] * 4.0 * np.pi * COULK_GMX * v / k.normp(p=2, zeromode=1) + ######return - 1j * k[_d] * 4.0 * np.pi * v /k.normp(p=2) #hymd.py:173: RuntimeWarning: invalid value encountered in true_divide + + phi_q_fourier.apply(poisson_transfer_function, out=elec_field_fourier[_d]) + elec_field_fourier[_d].c2r(out=elec_field[_d]) + + ## calculate electric forces on particles + ## old protocol in compute_electric_force_on_particle_onestep + for _d in np.arange(_SPACE_DIM): + elec_forces[:, _d] = charges * ( + elec_field[_d].readout(positions, layout=layout_q) + ) + ###^------ here the use the column, as the elec_forces are defined as (N,3) dimension + + +def update_field_force_energy_q( + charges, # charge + phi_q, # chage density + phi_q_fourier, + elec_field_fourier, # for force calculation + elec_field, + elec_forces, + elec_energy_field, # for energy calculation + field_q_energy, # electric energy + layout_q, #### general terms + pm, + positions, + config, + compute_energy=False, + comm=MPI.COMM_WORLD, +): + """ + - added for the simple piosson equation eletrostatics (follow PIC) + - this funciton get the electrostatic forces + - refering to the test-pure-sphere-new.py, this inlcudes: + [O] hpf_init_simple_gmx_units(grid_num,box_size,coords,charges,masses) + ## ^----- define the pm and layout, already out outside this fucntion + [Y] gen_qe_hpf_use_self(out_phiq_paraview_file) + [Y] calc_phiq_fft_use_self_applyH_checkq(grid_num, out_phiq_paraview_file2 ) + [Y] poisson_solver(calc_energy, out_elec_field_paraview_file) + [Y] compute_electric_field_on_particle() + [Y] compute_electric_force_on_particle() + """ + ## basic setup + V = np.prod(config.box_size) + n_mesh_cells = np.prod(np.full(3, config.mesh_size)) + volume_per_cell = V / n_mesh_cells + + ## paint ## pm.paint(positions[types == t], layout=layouts[t], out=phi[t]) + ## old protocol in gen_qe_hpf_use_self + pm.paint(positions, layout=layout_q, mass=charges, out=phi_q) ## + ## scale and fft + ## old protocol in gen_qe_hpf_use_self + phi_q /= volume_per_cell + phi_q.r2c(out=phi_q_fourier) + + # IDEA: Provide different sigma value if using dipoles + # if dipole_flag: + # sigma = 0.1 + # else: + # sigma = config.sigma + def phi_transfer_function(k, v): + return v * np.exp(-0.5 * config.sigma ** 2 * k.normp(p=2, zeromode=1)) + + phi_q_fourier.apply(phi_transfer_function, out=phi_q_fourier) + ## ^------ use the same gaussian as the \kai interaciton + ## ^------ tbr; phi_transfer_funciton by hamiltonian.H ?? + phi_q_fourier.c2r(out=phi_q) ## this phi_q is after applying the smearing function + + ## electric field via solving poisson equation + ## old protol in poisson_solver + _SPACE_DIM = 3 + + COULK_GMX = 138.935458 / config.dielectric_const + + for _d in np.arange(_SPACE_DIM): + + def poisson_transfer_function(k, v, d=_d): + return -1j * k[d] * 4.0 * np.pi * COULK_GMX * v / k.normp(p=2, zeromode=1) + ######return - 1j * k[_d] * 4.0 * np.pi * v /k.normp(p=2) #hymd.py:173: RuntimeWarning: invalid value encountered in true_divide + + phi_q_fourier.apply(poisson_transfer_function, out=elec_field_fourier[_d]) + elec_field_fourier[_d].c2r(out=elec_field[_d]) + + ## calculate electric forces on particles + ## old protocol in compute_electric_force_on_particle_onestep + for _d in np.arange(_SPACE_DIM): + elec_forces[:, _d] = charges * ( + elec_field[_d].readout(positions, layout=layout_q) + ) + ###^------ here the use the column, as the elec_forces are defined as (N,3) dimension + + ## calculate electric energy in Fourier space + ## old protocol in poisson_solver [ if calc_energy: ] block + if compute_energy: + + def transfer_energy( + k, v + ): ### potential field is electric field / (-ik) --> potential field * q --> + return ( + 4.0 * np.pi * COULK_GMX * np.abs(v) ** 2 / k.normp(p=2, zeromode=1) + ) ## zeromode = 1 needed here? + + phi_q_fourier.apply(transfer_energy, kind="wavenumber", out=elec_energy_field) + + field_q_energy = 0.5 * comm.allreduce(np.sum(elec_energy_field.value)) + + return field_q_energy.real + + def update_field( phi, layouts, @@ -85,13 +279,7 @@ def compute_field_and_kinetic_energy( def domain_decomposition( - positions, - pm, - *args, - molecules=None, - bonds=None, - verbose=0, - comm=MPI.COMM_WORLD + positions, pm, *args, molecules=None, bonds=None, verbose=0, comm=MPI.COMM_WORLD ): if molecules is not None: assert bonds is not None, "bonds must be provided when molecules are present" diff --git a/hymd/file_io.py b/hymd/file_io.py index fcf06fd5..4d5f7061 100644 --- a/hymd/file_io.py +++ b/hymd/file_io.py @@ -8,8 +8,14 @@ class OutDataset: - def __init__(self, dest_directory, config, double_out=False, - disable_mpio=False, comm=MPI.COMM_WORLD): + def __init__( + self, + dest_directory, + config, + double_out=False, + disable_mpio=False, + comm=MPI.COMM_WORLD, + ): self.disable_mpio = disable_mpio self.config = config if double_out: @@ -60,6 +66,7 @@ def store_static( bonds_2_atom2, velocity_out=False, force_out=False, + charges=False, # Provide charge array here comm=MPI.COMM_WORLD, ): dtype = h5md.float_dtype @@ -106,6 +113,21 @@ def store_static( h5md.all_particles = h5md.particles_group.create_group("all") mass = h5md.all_particles.create_dataset("mass", (config.n_particles,), dtype) mass[:].fill(config.mass) + ### WARNING THIS DOES NOT FILL THE config.mass, still all zero + ### TBF, ok for now + + ### add charge + if charges is not False: + charge = h5md.all_particles.create_dataset( + "charge", (config.n_particles,), dtype="float32" + ) # charges.shape + charge[indices] = charges + # print(indices, len(indices)) + # print('shape of charges inside the static', charges.shape) + ### NO USE http://pdebuyl.be/blog/2014/pmi-and-h5py.html + ### NO USE https://github.com/pdebuyl/pmi-h5py/blob/master/test_pmi_mod.py + ### NO USE https://gist.github.com/apdavison/36126ee26067592ee69bf51b57fd3f31 + box = h5md.all_particles.create_group("box") box.attrs["dimension"] = 3 box.attrs["boundary"] = np.array( @@ -139,7 +161,7 @@ def store_static( n_frames, (config.n_particles, 3), dtype, - units="nanometers", + units="nm", ) if velocity_out: ( @@ -153,21 +175,21 @@ def store_static( n_frames, (config.n_particles, 3), dtype, - units="nanometers/picosecond", + units="nm ps-1", ) if force_out: ( _, h5md.forces_step, h5md.forces_time, - h5md.forces + h5md.forces, ) = setup_time_dependent_element( - 'force', + "force", h5md.all_particles, n_frames, (config.n_particles, 3), dtype, - units='kJ/mol nanometer' + units="kJ nm mol-1", ) ( _, @@ -175,7 +197,7 @@ def store_static( h5md.total_energy_time, h5md.total_energy, ) = setup_time_dependent_element( - "total_energy", h5md.observables, n_frames, (1,), dtype, units="kJ/mol" + "total_energy", h5md.observables, n_frames, (1,), dtype, units="kJ mol-1" ) ( _, @@ -183,7 +205,7 @@ def store_static( h5md.kinetc_energy_time, h5md.kinetc_energy, ) = setup_time_dependent_element( - "kinetic_energy", h5md.observables, n_frames, (1,), dtype, units="kJ/mol" + "kinetic_energy", h5md.observables, n_frames, (1,), dtype, units="kJ mol-1" ) ( _, @@ -191,7 +213,7 @@ def store_static( h5md.potential_energy_time, h5md.potential_energy, ) = setup_time_dependent_element( # noqa: E501 - "potential_energy", h5md.observables, n_frames, (1,), dtype, units="kJ/mol" + "potential_energy", h5md.observables, n_frames, (1,), dtype, units="kJ mol-1" ) ( _, @@ -199,7 +221,7 @@ def store_static( h5md.bond_energy_time, h5md.bond_energy, ) = setup_time_dependent_element( - "bond_energy", h5md.observables, n_frames, (1,), dtype, units="kJ/mol" + "bond_energy", h5md.observables, n_frames, (1,), dtype, units="kJ mol-1" ) ( _, @@ -207,7 +229,15 @@ def store_static( h5md.angle_energy_time, h5md.angle_energy, ) = setup_time_dependent_element( - "angle_energy", h5md.observables, n_frames, (1,), dtype, units="kJ/mol" + "angle_energy", h5md.observables, n_frames, (1,), dtype, units="kJ mol-1" + ) + ( + _, + h5md.dihedral_energy_step, + h5md.dihedral_energy_time, + h5md.dihedral_energy, + ) = setup_time_dependent_element( + "dihedral_energy", h5md.observables, n_frames, (1,), dtype, units="kJ mol-1" ) ( _, @@ -215,8 +245,17 @@ def store_static( h5md.field_energy_time, h5md.field_energy, ) = setup_time_dependent_element( - "field_energy", h5md.observables, n_frames, (1,), dtype, units="kJ/mol" + "field_energy", h5md.observables, n_frames, (1,), dtype, units="kJ mol-1" ) + if charges is not False: + ( + _, + h5md.field_q_energy_step, + h5md.field_q_energy_time, + h5md.field_q_energy, + ) = setup_time_dependent_element( + "field_q_energy", h5md.observables, n_frames, (1,), dtype, units="kJ mol-1" + ) # <-------- xinmeng ( _, h5md.total_momentum_step, @@ -228,7 +267,33 @@ def store_static( n_frames, (3,), dtype, - units="nanometers g/picosecond mol", + units="nm g ps-1 mol-1", + ) + ( + _, + h5md.angular_momentum_step, + h5md.angular_momentum_time, + h5md.angular_momentum, + ) = setup_time_dependent_element( # noqa: E501 + "angular_momentum", + h5md.observables, + n_frames, + (3,), + dtype, + units="nm+2 g ps-1 mol-1", + ) + ( + _, + h5md.torque_step, + h5md.torque_time, + h5md.torque, + ) = setup_time_dependent_element( # noqa: E501 + "torque", + h5md.observables, + n_frames, + (3,), + dtype, + units="kJ nm+2 mol-1", ) ( _, @@ -236,7 +301,7 @@ def store_static( h5md.temperature_time, h5md.temperature, ) = setup_time_dependent_element( - "temperature", h5md.observables, n_frames, (3,), dtype, units="Kelvin" + "temperature", h5md.observables, n_frames, (3,), dtype, units="K" ) ( _, @@ -244,7 +309,7 @@ def store_static( h5md.thermostat_work_time, h5md.thermostat_work, ) = setup_time_dependent_element( - "thermostat_work", h5md.observables, n_frames, (1,), "float32", units="kJ/mol" + "thermostat_work", h5md.observables, n_frames, (1,), "float32", units="kJ mol-1" ) ind_sort = np.argsort(indices) @@ -258,10 +323,12 @@ def store_static( ) index_of_species[:] = np.array(list(range(config.n_types))) - # VMD-h5mdplugin maximum name/type name length is 16 characters (for - # whatever reason [VMD internals?]). + # # VMD-h5mdplugin maximum name/type name length is 16 characters (for + # # whatever reason [VMD internals?]). name_dataset = vmd_group.create_dataset("name", (config.n_types,), "S16") type_dataset = vmd_group.create_dataset("type", (config.n_types,), "S16") + + # Change this for i, n in config.type_to_name_map.items(): name_dataset[i] = np.string_(n[:16]) if n == "W": @@ -269,23 +336,24 @@ def store_static( else: type_dataset[i] = np.string_("membrane") - total_bonds = comm.allreduce(len(bonds_2_atom1), MPI.SUM) - n_bonds_local = len(bonds_2_atom1) + # This bonds implementation causes problems with the VMD-h5md plugin + # total_bonds = comm.allreduce(len(bonds_2_atom1), MPI.SUM) + # n_bonds_local = len(bonds_2_atom1) - receive_buffer = comm.gather(n_bonds_local, root=0) - n_bonds_global = None - if comm.Get_rank() == 0: - n_bonds_global = receive_buffer - n_bonds_global = np.array(comm.bcast(n_bonds_global, root=0)) - rank_bond_start = np.sum(n_bonds_global[: comm.Get_rank()]) - bonds_from = vmd_group.create_dataset("bond_from", (total_bonds,), "i") - bonds_to = vmd_group.create_dataset("bond_to", (total_bonds,), "i") + # receive_buffer = comm.gather(n_bonds_local, root=0) + # n_bonds_global = None + # if comm.Get_rank() == 0: + # n_bonds_global = receive_buffer + # n_bonds_global = np.array(comm.bcast(n_bonds_global, root=0)) + # rank_bond_start = np.sum(n_bonds_global[: comm.Get_rank()]) - for i in range(n_bonds_local): - a = bonds_2_atom1[i] - b = bonds_2_atom2[i] - bonds_from[rank_bond_start + i] = indices[a] - bonds_to[rank_bond_start + i] = indices[b] + # bonds_from = vmd_group.create_dataset("bond_from", (total_bonds,), "i") + # bonds_to = vmd_group.create_dataset("bond_to", (total_bonds,), "i") + # for i in range(n_bonds_local): + # a = bonds_2_atom1[i] + # b = bonds_2_atom2[i] + # bonds_from[rank_bond_start + i] = indices[a] + # bonds_to[rank_bond_start + i] = indices[b] def store_data( @@ -301,11 +369,14 @@ def store_data( kinetic_energy, bond2_energy, bond3_energy, + bond4_energy, field_energy, + field_q_energy, time_step, config, velocity_out=False, force_out=False, + charge_out=False, dump_per_particle=False, comm=MPI.COMM_WORLD, ): @@ -316,8 +387,11 @@ def store_data( h5md.kinetc_energy_step, h5md.bond_energy_step, h5md.angle_energy_step, + h5md.dihedral_energy_step, h5md.field_energy_step, h5md.total_momentum_step, + h5md.angular_momentum_step, + h5md.torque_step, h5md.temperature_step, h5md.thermostat_work_step, ): @@ -330,8 +404,11 @@ def store_data( h5md.kinetc_energy_time, h5md.bond_energy_time, h5md.angle_energy_time, + h5md.dihedral_energy_time, h5md.field_energy_time, h5md.total_momentum_time, + h5md.angular_momentum_time, + h5md.torque_time, h5md.temperature_time, h5md.thermostat_work_time, ): @@ -343,6 +420,9 @@ def store_data( if force_out: h5md.forces_step[frame] = step h5md.forces_time[frame] = step * time_step + if charge_out: + h5md.field_q_energy_step[frame] = step + h5md.field_q_energy_time[frame] = step * time_step # Time dependent box, fix this later. # h5md.box_step[frame] = step @@ -356,34 +436,50 @@ def store_data( h5md.velocities[frame, indices[ind_sort]] = velocities[ind_sort] if force_out: h5md.forces[frame, indices[ind_sort]] = forces[ind_sort] + if charge_out: + h5md.field_q_energy[frame] = field_q_energy + + potential_energy = ( + bond2_energy + bond3_energy + bond4_energy + field_energy + field_q_energy + ) - potential_energy = bond2_energy + bond3_energy + field_energy total_momentum = config.mass * comm.allreduce(np.sum(velocities, axis=0), MPI.SUM) + angular_momentum = config.mass * comm.allreduce( + np.sum(np.cross(positions, velocities), axis=0), MPI.SUM + ) + torque = config.mass * comm.allreduce( + np.sum(np.cross(positions, forces), axis=0), MPI.SUM + ) h5md.total_energy[frame] = kinetic_energy + potential_energy h5md.potential_energy[frame] = potential_energy h5md.kinetc_energy[frame] = kinetic_energy h5md.bond_energy[frame] = bond2_energy h5md.angle_energy[frame] = bond3_energy + h5md.dihedral_energy[frame] = bond4_energy h5md.field_energy[frame] = field_energy h5md.total_momentum[frame, :] = total_momentum + h5md.angular_momentum[frame, :] = angular_momentum + h5md.torque[frame, :] = torque h5md.temperature[frame] = temperature h5md.thermostat_work[frame] = config.thermostat_work - header_ = 13 * "{:>15}" + header_ = 14 * "{:>13}" fmt_ = [ "step", "time", - "temperature", - "total E", - "kinetic E", - "potential E", + "temp", + "tot E", + "kin E", + "pot E", "field E", + # "field q E", "bond E", - "angle E", - "total Px", - "total Py", - "total Pz", - "ΔH tilde" if config.target_temperature else "ΔE", + "ang E", + "dih E", + "Px", + "Py", + "Pz", + "ΔH" if config.target_temperature else "ΔE", ] if config.initial_energy is None: fmt_[-1] = "" @@ -404,7 +500,7 @@ def store_data( H_tilde = 0.0 header = header_.format(*fmt_) - data_fmt = f'{"{:15}"}{12 * "{:15.8g}" }' + data_fmt = f'{"{:13}"}{13 * "{:13.5g}" }' data = data_fmt.format( step, time_step * step, @@ -413,8 +509,10 @@ def store_data( kinetic_energy / divide_by, potential_energy / divide_by, field_energy / divide_by, + # field_q_energy / divide_by, bond2_energy / divide_by, bond3_energy / divide_by, + bond4_energy / divide_by, total_momentum[0] / divide_by, total_momentum[1] / divide_by, total_momentum[2] / divide_by, diff --git a/hymd/force.py b/hymd/force.py index 3e5505ca..b789fc7e 100644 --- a/hymd/force.py +++ b/hymd/force.py @@ -6,12 +6,18 @@ from compute_angle_forces import ( caf as compute_angle_forces__fortran, ) # noqa: F401, E501 +from compute_dihedral_forces import ( + cdf as compute_dihedral_forces__fortran, +) # noqa: F401, E501 from compute_bond_forces__double import ( cbf as compute_bond_forces__fortran__double, ) # noqa: F401, E501 from compute_angle_forces__double import ( caf as compute_angle_forces__fortran__double, ) # noqa: F401, E501 +from compute_dihedral_forces__double import ( + cdf as compute_dihedral_forces__fortran__double, +) @dataclass @@ -27,6 +33,32 @@ class Angle(Bond): atom_3: str +# 1- Fourier series +@dataclass +class Dihedral: + atom_1: str + atom_2: str + atom_3: str + atom_4: str + coeffs: np.ndarray + # type: (0) Fourier or (1) CBT + # Impropers to be specified in the toml? + dih_type: int + + +# 2- Combined bending-torsional potential +# @dataclass +# class CBT_potential(Dihedral): +# coeff_k: list +# phase_k: list + +# 3- Harmonic potential for impropers +# @dataclass +# class improper(Dihedral): +# equilibrium: float +# strength: float + + @dataclass class Chi: atom_1: str @@ -34,12 +66,28 @@ class Chi: interaction_energy: float +def findPathsNoLC(G, u, n): + if n == 0: + return [[u]] + paths = [] + for neighbor in G.neighbors(u): + for path in findPathsNoLC(G, neighbor, n - 1): + if u not in path: + paths.append([u] + path) + return paths + + def prepare_bonds_old(molecules, names, bonds, indices, config): bonds_2 = [] bonds_3 = [] + bonds_4 = [] + bb_index = [] + different_molecules = np.unique(molecules) for mol in different_molecules: + bb_dihedral = 0 bond_graph = nx.Graph() + for local_index, global_index in enumerate(indices): if molecules[local_index] != mol: continue @@ -63,7 +111,7 @@ def prepare_bonds_old(molecules, names, bonds, indices, config): for b in config.bonds: match_forward = name_i == b.atom_1 and name_j == b.atom_2 - match_backward = name_j == b.atom_2 and name_i == b.atom_1 + match_backward = name_i == b.atom_2 and name_j == b.atom_1 if match_forward or match_backward: bonds_2.append( [ @@ -100,11 +148,47 @@ def prepare_bonds_old(molecules, names, bonds, indices, config): a.strength, ] ) - return bonds_2, bonds_3 + + all_paths_len_four = findPathsNoLC(bond_graph, i, 3) + for p in all_paths_len_four: + name_i = bond_graph.nodes()[i]["name"] + name_mid_1 = bond_graph.nodes()[p[1]]["name"] + name_mid_2 = bond_graph.nodes()[p[2]]["name"] + name_j = bond_graph.nodes()[p[3]]["name"] + + for a in config.dihedrals: + match_forward = ( + name_i == a.atom_1 + and name_mid_1 == a.atom_2 + and name_mid_2 == a.atom_3 + and name_j == a.atom_4 + ) + if match_forward: + bonds_4.append( + [ + bond_graph.nodes()[i]["local_index"], + bond_graph.nodes()[p[1]]["local_index"], + bond_graph.nodes()[p[2]]["local_index"], + bond_graph.nodes()[p[3]]["local_index"], + a.coeffs, + a.dih_type, + ] + ) + # This works for protein inside molecule, but not for block peptides + if a.dih_type == 1: + bb_dihedral = len(bonds_4) + + if bb_dihedral: + bb_index.append(bb_dihedral - 1) + + return bonds_2, bonds_3, bonds_4, bb_index def prepare_bonds(molecules, names, bonds, indices, config): - bonds_2, bonds_3 = prepare_bonds_old(molecules, names, bonds, indices, config) + bonds_2, bonds_3, bonds_4, bb_index = prepare_bonds_old( + molecules, names, bonds, indices, config + ) + # Bonds bonds_2_atom1 = np.empty(len(bonds_2), dtype=int) bonds_2_atom2 = np.empty(len(bonds_2), dtype=int) bonds_2_equilibrium = np.empty(len(bonds_2), dtype=np.float64) @@ -114,6 +198,7 @@ def prepare_bonds(molecules, names, bonds, indices, config): bonds_2_atom2[i] = b[1] bonds_2_equilibrium[i] = b[2] bonds_2_stength[i] = b[3] + # Angles bonds_3_atom1 = np.empty(len(bonds_3), dtype=int) bonds_3_atom2 = np.empty(len(bonds_3), dtype=int) bonds_3_atom3 = np.empty(len(bonds_3), dtype=int) @@ -125,6 +210,29 @@ def prepare_bonds(molecules, names, bonds, indices, config): bonds_3_atom3[i] = b[2] bonds_3_equilibrium[i] = b[3] bonds_3_stength[i] = b[4] + # Dihedrals + bonds_4_atom1 = np.empty(len(bonds_4), dtype=int) + bonds_4_atom2 = np.empty(len(bonds_4), dtype=int) + bonds_4_atom3 = np.empty(len(bonds_4), dtype=int) + bonds_4_atom4 = np.empty(len(bonds_4), dtype=int) + # 4 => 2 sets of 2 parameters + # Might it be useful to decouple dihedral types to prevent having lots of zeros/empty arrays? + number_of_coeff = 6 + len_of_coeff = 5 + bonds_4_coeff = np.empty( + (len(bonds_4), number_of_coeff, len_of_coeff), dtype=np.float64 + ) + bonds_4_type = np.empty(len(bonds_4), dtype=int) + bonds_4_last = np.zeros(len(bonds_4), dtype=int) + for i, b in enumerate(bonds_4): + bonds_4_atom1[i] = b[0] + bonds_4_atom2[i] = b[1] + bonds_4_atom3[i] = b[2] + bonds_4_atom4[i] = b[3] + bonds_4_coeff[i] = np.resize(b[4], (number_of_coeff, len_of_coeff)) + bonds_4_type[i] = b[5] + bonds_4_last[bb_index] = 1 + return ( bonds_2_atom1, bonds_2_atom2, @@ -135,6 +243,13 @@ def prepare_bonds(molecules, names, bonds, indices, config): bonds_3_atom3, bonds_3_equilibrium, bonds_3_stength, + bonds_4_atom1, + bonds_4_atom2, + bonds_4_atom3, + bonds_4_atom4, + bonds_4_coeff, + bonds_4_type, + bonds_4_last, ) @@ -289,8 +404,78 @@ def compute_angle_forces__plain(f_angles, r, bonds_3, box_size): f_angles[a, :] += fa f_angles[c, :] += fc - f_angles[b, :] += -(fa + fc) + f_angles[b, :] -= fa + fc + # f_angles[b, :] += -(fa + fc) energy -= 0.5 * f * d return energy + + +def compute_dihedral_forces__plain(f_dihedrals, r, bonds_4, box_size): + """Calculates dihedral forces with a cosine sum potential. A sign + is probably wrong somewhere""" + f_dihedrals.fill(0.0) + energy = 0.0 + + for a, b, c, d, coeff, phase in bonds_4: + f = r[a, :] - r[b, :] + g = r[b, :] - r[c, :] + h = r[d, :] - r[c, :] + + for dim in range(3): + f -= box_size[dim] * np.around(f[dim] / box_size[dim]) + g -= box_size[dim] * np.around(g[dim] / box_size[dim]) + h -= box_size[dim] * np.around(h[dim] / box_size[dim]) + + v = np.cross(f, g) + w = np.cross(h, g) + vv = np.dot(v, v) + ww = np.dot(w, w) + gn = np.linalg.norm(g) + + cosphi = np.dot(v, w) + sinphi = np.dot(np.cross(v, w), g) / gn + phi = np.arctan2(sinphi, cosphi) + + fg = np.dot(f, g) + hg = np.dot(h, g) + sc = v * fg / (vv * gn) - w * hg / (ww * gn) + + df = 0 + + for m in range(len(coeff)): + energy += coeff[m] * (1 + np.cos(m * phi - phase[m])) + df += m * coeff[m] * np.sin(m * phi - phase[m]) + + force_on_a = df * gn * v / vv + force_on_d = df * gn * w / ww + + f_dihedrals[a, :] -= force_on_a + f_dihedrals[b, :] += df * sc + force_on_a + f_dihedrals[c, :] -= df * sc + force_on_d + f_dihedrals[d, :] += force_on_d + return energy + + +def dipole_forces_redistribution( + f_on_bead, f_dipoles, trans_matrices, a, b, c, d, type_array, last_bb +): + """Redistribute electrostatic forces calculated from ghost dipole point + charges to the backcone atoms of the protein.""" + + f_on_bead.fill(0.0) + for i, j, k, l, fd, matrix, dih_type, is_last in zip( + a, b, c, d, f_dipoles, trans_matrices, type_array, last_bb + ): + if dih_type == 1: + tot_force = fd[0] + fd[1] + f_on_bead[i] += matrix[0] @ tot_force # Atom A + f_on_bead[j] += matrix[1] @ tot_force + 0.5 * tot_force # Atom B + f_on_bead[k] += matrix[2] @ tot_force + 0.5 * tot_force # Atom C + + if is_last == 1: + tot_force = fd[2] + fd[3] + f_on_bead[j] += matrix[3] @ tot_force # Atom B + f_on_bead[k] += matrix[4] @ tot_force + 0.5 * tot_force # Atom C + f_on_bead[l] += matrix[5] @ tot_force + 0.5 * tot_force # Atom D diff --git a/hymd/input_parser.py b/hymd/input_parser.py index c956be0f..89bda69c 100644 --- a/hymd/input_parser.py +++ b/hymd/input_parser.py @@ -7,7 +7,7 @@ from mpi4py import MPI from dataclasses import dataclass, field from typing import List, Union, ClassVar -from force import Bond, Angle, Chi +from force import Bond, Angle, Dihedral, Chi from logger import Logger @@ -34,9 +34,10 @@ class Config: file_name: str = "" name: str = None tags: List[str] = field(default_factory=list) - chi: List[Chi] = field(default_factory=list) - angle_bonds: List[Angle] = field(default_factory=list) bonds: List[Bond] = field(default_factory=list) + angle_bonds: List[Angle] = field(default_factory=list) + dihedrals: List[Dihedral] = field(default_factory=list) + chi: List[Chi] = field(default_factory=list) n_particles: int = None max_molecule_size: int = None n_flush: int = None @@ -44,6 +45,8 @@ class Config: thermostat_coupling_groups: List[List[str]] = field(default_factory=list) initial_energy: float = None cancel_com_momentum: Union[int, bool] = False + coulombtype: str = None + dielectric_const: float = None def __str__(self): bonds_str = "\tbonds:\n" + "".join( @@ -61,6 +64,34 @@ def __str__(self): for k in self.angle_bonds ] ) + dihedrals_str = "\tdihedrals:\n" + "".join( + [ + ( + f"\t\t{k.atom_1} {k.atom_2} {k.atom_3} {k.atom_4}: " + # This might need to be fixed/made prettier, probably there's an easier way + + ( + "\n\t\t" + + " " * len(f"{k.atom_1} {k.atom_2} {k.atom_3} {k.atom_4}: ") + ).join( + map( + str, + [ + [round(num, 3) for num in c_in] + if isinstance(c_in, list) + else c_in + for c_in in k.coeffs + ], + ) + ) + + ( + "\n\t\t" + + " " * len(f"{k.atom_1} {k.atom_2} {k.atom_3} {k.atom_4}: ") + ) + + f"dih_type = {k.dih_type}\n" + ) + for k in self.dihedrals + ] + ) chi_str = "\tchi:\n" + "".join( [ (f"\t\t{k.atom_1} {k.atom_2}: " + f"{k.interaction_energy}\n") @@ -81,9 +112,21 @@ def __str__(self): ret_str = f'\n\n\tConfig: {self.file_name}\n\t{50 * "-"}\n' for k, v in self.__dict__.items(): - if k not in ("bonds", "angle_bonds", "chi", "thermostat_coupling_groups"): + if k not in ( + "bonds", + "angle_bonds", + "dihedrals", + "chi", + "thermostat_coupling_groups", + ): ret_str += f"\t{k}: {v}\n" - ret_str += bonds_str + angle_str + chi_str + thermostat_coupling_groups_str + ret_str += ( + bonds_str + + angle_str + + dihedrals_str + + chi_str + + thermostat_coupling_groups_str + ) return ret_str @@ -201,6 +244,53 @@ def read_config_toml(file_path): return toml_content +def propensity_potential_coeffs(x: float, comm): + alpha_coeffs = np.array( + [ + [7.406, -5.298, -2.570, 1.336, 0.739], + [-0.28632126, 1.2099146, 1.18122138, 0.49075168, 0.98495911], + ] + ) + beta_coeffs = np.array( + [ + [3.770, 5.929, -4.151, -0.846, 0.190], + [-0.2300693, -0.0583289, 0.99342396, 1.03237971, 2.90160988], + ] + ) + coil_coeffs = np.array( + [ + [1.416, -0.739, 0.990, -0.397, 0.136], + [1.3495933, 0.45649087, 2.30441057, -0.12274901, -0.26179939], + ] + ) + + zero_add = np.zeros((2, 5)) + if x == -1: + return np.concatenate((alpha_coeffs, zero_add)) + elif x == 0: + return np.concatenate((coil_coeffs, zero_add)) + elif x == 1: + return np.concatenate((beta_coeffs, zero_add)) + + abs_x = np.abs(x) + if abs_x > 1: + err_str = ( + f"The provided value of λ = {x} is out of λ definition range, [-1.0, 1.0]." + ) + Logger.rank0.log(logging.ERROR, err_str) + if comm.Get_rank() == 0: + raise ValueError(err_str) + + else: + coil_coeffs[0] *= 1 - abs_x + if x < 0: + alpha_coeffs[0] *= 0.5 * (abs_x - x) + return np.concatenate((alpha_coeffs, coil_coeffs)) + else: + beta_coeffs[0] *= 0.5 * (abs_x + x) + return np.concatenate((beta_coeffs, coil_coeffs)) + + def parse_config_toml(toml_content, file_path=None, comm=MPI.COMM_WORLD): parsed_toml = tomli.loads(toml_content) config_dict = {} @@ -218,11 +308,13 @@ def parse_config_toml(toml_content, file_path=None, comm=MPI.COMM_WORLD): "name", "n_particles", "max_molecule_size", + "coulombtype", + "dielectric_const", ): config_dict[n] = None # Defaults = [] - for n in ("bonds", "angle_bonds", "chi", "tags"): + for n in ("bonds", "angle_bonds", "dihedrals", "chi", "tags"): config_dict[n] = [] # Flatten the .toml dictionary, ignoring the top level [tag] directives (if @@ -253,6 +345,54 @@ def parse_config_toml(toml_content, file_path=None, comm=MPI.COMM_WORLD): equilibrium=b[3], strength=b[4], ) + if k == "dihedrals": + config_dict["dihedrals"] = [None] * len(v) + for i, b in enumerate(v): + try: + dih_type = int(b[2][0]) + except IndexError: + Logger.rank0.log( + logging.WARNING, "Dihedral type not provided, defaulting to 0." + ) + dih_type = 0 + + # Probably it's better to move this in check_dihedrals? + wrong_len = len(b[1]) not in (1, 2) + wrong_type_1 = len(b[1]) == 1 and not isinstance(b[1][0], float) + wrong_type_2 = len(b[1]) == 2 and not isinstance(b[1][0], list) + if wrong_len or wrong_type_1 or wrong_type_2: + err_str = ( + "The coefficients specified for the dihedral type (0) do not match the correct structure." + + "Either use [lambda] or [[cn_prop], [dn_prop]], or select the correct dihedral type." + ) + Logger.rank0.log(logging.ERROR, err_str) + if comm.Get_rank() == 0: + raise RuntimeError(err_str) + + # FIXME: this is messy af, I don't like it + if dih_type == 0 and isinstance(b[1][0], (float, int)): + coeff = propensity_potential_coeffs(b[1][0], comm) + elif dih_type == 1 and len(b[1]) == 3: + coeff = np.array( + propensity_potential_coeffs(b[1][0][0], comm).tolist() + + b[1][1:] + ) + elif dih_type == 2: + coeff = np.array(b[1]) + else: + coeff = np.insert(np.array(b[1]), 2, np.zeros((2, 5)), axis=0) + + config_dict["dihedrals"][i] = Dihedral( + atom_1=b[0][0], + atom_2=b[0][1], + atom_3=b[0][2], + atom_4=b[0][3], + coeffs=coeff, + dih_type=dih_type, + ) + # if k == "improper dihedrals": + # config_dict["improper dihedrals"] = [None] * len(v) + # ... if k == "chi": config_dict["chi"] = [None] * len(v) for i, c in enumerate(v): @@ -430,6 +570,42 @@ def check_angles(config, names, comm=MPI.COMM_WORLD): return config +def check_dihedrals(config, names, comm=MPI.COMM_WORLD): + if not hasattr(config, "unique_names"): + config = _find_unique_names(config, names) + unique_names = config.unique_names + + for a in config.dihedrals: + if ( + a.atom_1 not in unique_names + or a.atom_2 not in unique_names + or a.atom_3 not in unique_names + or a.atom_4 not in unique_names + ): + missing = [ + a.atom_1 not in unique_names, + a.atom_2 not in unique_names, + a.atom_3 not in unique_names, + a.atom_4 not in unique_names, + ] + missing_names = [ + atom + for i, atom in enumerate([a.atom_1, a.atom_2, a.atom_3, a.atom_4]) + if missing[i] + ] + missing_str = ", ".join(np.unique(missing_names)) + + warn_str = ( + f"Dihedral type {a.atom_1}--{a.atom_2}--{a.atom_3}--{a.atom_4} " + f"specified in {config.file_name} but no {missing_str} atoms " + f"are present in the specified system (names array)" + ) + Logger.rank0.log(logging.WARNING, warn_str) + if comm.Get_rank() == 0: + warnings.warn(warn_str) + return config + + def check_chi(config, names, comm=MPI.COMM_WORLD): if not hasattr(config, "unique_names"): config = _find_unique_names(config, names) @@ -467,6 +643,7 @@ def check_chi(config, names, comm=MPI.COMM_WORLD): ): found = True if not found: + config.chi.append(Chi(atom_1=n, atom_2=m, interaction_energy=0.0)) warn_str = ( f"Atom types {n} and {m} found in the " f"system, but no chi interaction {n}--{m} " @@ -748,8 +925,9 @@ def check_config(config, indices, names, types, comm=MPI.COMM_WORLD): config = check_name(config, comm=comm) config = check_n_particles(config, indices, comm=comm) config = check_chi(config, names, comm=comm) - config = check_angles(config, names, comm=comm) config = check_bonds(config, names, comm=comm) + config = check_angles(config, names, comm=comm) + config = check_dihedrals(config, names, comm=comm) config = check_hamiltonian(config, comm=comm) config = check_thermostat_coupling_groups(config, comm=comm) config = check_cancel_com_momentum(config, comm=comm) diff --git a/hymd/main.py b/hymd/main.py index 0fe9bfaf..dea459e5 100644 --- a/hymd/main.py +++ b/hymd/main.py @@ -13,28 +13,43 @@ from types import ModuleType as moduleobj import warnings - from hamiltonian import DefaultNoChi, DefaultWithChi from field import ( compute_field_force, update_field, compute_field_and_kinetic_energy, domain_decomposition, + # update_field_force_energy_q, # elec related + update_field_force_q, + compute_field_energy_q, +) + +from file_io import ( + distribute_input, + OutDataset, + store_static, + store_data, ) -from file_io import distribute_input, OutDataset, store_static, store_data + from force import compute_bond_forces__fortran as compute_bond_forces from force import compute_angle_forces__fortran as compute_angle_forces +from force import compute_dihedral_forces__fortran as compute_dihedral_forces +from force import dipole_forces_redistribution from force import prepare_bonds + from input_parser import ( read_config_toml, parse_config_toml, check_config, convert_CONF_to_config, ) + from integrator import integrate_velocity, integrate_position from logger import Logger from thermostat import csvr_thermostat +# pyright: reportUnboundVariable=false + def fmtdt(timedelta): ### FIX ME (move this somewhere else) days = timedelta.days @@ -82,6 +97,18 @@ def configure_runtime(comm): action="store_true", help="Disable three-particle angle bond forces", ) + ap.add_argument( + "--disable-dihedrals", + default=False, + action="store_true", + help="Disable four-particle dihedral forces", + ) + ap.add_argument( + "--disable-dipole", + default=False, + action="store_true", + help="Disable BB dipole calculation", + ) ap.add_argument( "--double-precision", default=False, @@ -128,7 +155,7 @@ def configure_runtime(comm): help="Set the numpy random generator seed for every rank", ) ap.add_argument( - "--logfile", default=None, help="Redirect event logging to specified file" + "--logfile", default="sim.log", help="Redirect event logging to specified file" ) ap.add_argument("config", help="Config .py or .toml input configuration script") ap.add_argument("input", help="input.hdf5") @@ -143,6 +170,7 @@ def configure_runtime(comm): os.makedirs(args.destdir, exist_ok=True) comm.barrier() + # Is this used anywhere? if args.seed is not None: np.random.seed(args.seed) else: @@ -150,7 +178,9 @@ def configure_runtime(comm): # Setup logger Logger.setup( - default_level=logging.INFO, log_file=args.logfile, verbose=args.verbose + default_level=logging.INFO, + log_file=f"{args.destdir}/{args.logfile}", + verbose=args.verbose, ) if args.profile: @@ -260,6 +290,26 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): return velocities +def add_forces(): + if charges_flag: + return ( + field_forces + + bond_forces + + angle_forces + + dihedral_forces + + reconstructed_forces + + elec_forces + ) + else: + return ( + field_forces + + bond_forces + + angle_forces + + dihedral_forces + + reconstructed_forces + ) + + if __name__ == "__main__": comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -272,6 +322,7 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): if args.double_precision: dtype = np.float64 + # Pointelss if condition? if dtype == np.float64: from force import ( compute_bond_forces__fortran__double as compute_bond_forces, @@ -279,9 +330,13 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): from force import ( compute_angle_forces__fortran__double as compute_angle_forces, ) # noqa: E501, F811 + from force import ( + compute_dihedral_forces__fortran__double as compute_dihedral_forces, + ) # noqa: E501, F811 else: dtype = np.float32 + ###### Access the information in h5md file (e.g. .h5) driver = "mpio" if not args.disable_mpio else None with h5py.File(args.input, "r", driver=driver, comm=comm) as in_file: rank_range, molecules_flag = distribute_input( @@ -312,6 +367,12 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): if molecules_flag: molecules = in_file["molecules"][rank_range] bonds = in_file["bonds"][rank_range] + ## charges xinmeng + if "charge" in in_file: + charges = in_file["charge"][rank_range] + charges_flag = True + else: + charges_flag = False config = check_config(config, indices, names, types, comm=comm) if config.n_print: @@ -323,20 +384,35 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): elif config.cancel_com_momentum: velocities = cancel_com_momentum(velocities, config, comm=comm) - positions = np.mod(positions, config.box_size[None, :]) + bond_forces = np.zeros_like(positions) + angle_forces = np.zeros_like(positions) + dihedral_forces = np.zeros_like(positions) + # Initialize only for proteins? + reconstructed_forces = np.zeros_like(positions) + field_forces = np.zeros_like(positions) + elec_forces = np.zeros_like(positions) - bond_forces = np.zeros( - shape=(len(positions), 3), dtype=dtype - ) # , order='F') # noqa: E501 - angle_forces = np.zeros( - shape=(len(positions), 3), dtype=dtype - ) # , order='F') # noqa: E501 - field_forces = np.zeros(shape=(len(positions), 3), dtype=dtype) + # TODO: Get box_size from h5, not from toml? + positions = np.mod(positions, config.box_size[None, :]) + # Initialize dipoles, populate them if protein_flag = True + if args.disable_dipole: + dipole_flag = 0 + else: + dipole_flag = 1 # Only calculate dipole in the outer Respa step + protein_flag = 0 + dipole_positions = np.zeros(shape=(4, 3), dtype=dtype) + dipole_forces = np.zeros(shape=(4, 3)) + dipole_charges = np.zeros(shape=4) + transfer_matrices = np.zeros(shape=(6, 3, 3), dtype=dtype) + + # Initialize energies field_energy = 0.0 bond_energy = 0.0 angle_energy = 0.0 + dihedral_energy = 0.0 kinetic_energy = 0.0 + field_q_energy = 0.0 ## q related # Ignore numpy numpy.VisibleDeprecationWarning: Creating an ndarray from # ragged nested sequences until it is fixed in pmesh @@ -346,7 +422,6 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): category=np.VisibleDeprecationWarning, message=r"Creating an ndarray from ragged nested sequences", ) - # The first argument of ParticleMesh has to be a tuple pm = pmesh.ParticleMesh( config.mesh_size, BoxSize=config.box_size, dtype="f4", comm=comm ) @@ -368,60 +443,82 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): Logger.rank0.log(logging.INFO, f"pfft-python processor mesh: {str(pm.np)}") + # Initialize density fields phi = [pm.create("real", value=0.0) for _ in range(config.n_types)] phi_fourier = [ pm.create("complex", value=0.0) for _ in range(config.n_types) ] # noqa: E501 force_on_grid = [ - [pm.create("real", value=0.0) for d in range(3)] for _ in range(config.n_types) + [pm.create("real", value=0.0) for _ in range(3)] for _ in range(config.n_types) ] v_ext_fourier = [pm.create("complex", value=0.0) for _ in range(4)] v_ext = [pm.create("real", value=0.0) for _ in range(config.n_types)] + # Initialize charge density fields + _SPACE_DIM = 3 + if charges_flag and config.coulombtype == "PIC_Spectral": + phi_q = pm.create("real", value=0.0) + phi_q_fourier = pm.create("complex", value=0.0) + elec_field_fourier = [ + pm.create("complex", value=0.0) for _ in range(_SPACE_DIM) + ] # for force calculation + elec_field = [ + pm.create("real", value=0.0) for _ in range(_SPACE_DIM) + ] # for force calculation + elec_energy_field = pm.create( + "complex", value=0.0 + ) # for energy calculation --> complex form needed as its converted from complex field; Imaginary part as zero; + + args_in = [ + velocities, + indices, + bond_forces, + angle_forces, + dihedral_forces, + reconstructed_forces, + field_forces, + names, + types, + ] + args_recv = [ + "positions", + "velocities", + "indices", + "bond_forces", + "angle_forces", + "dihedral_forces", + "reconstructed_forces", + "field_forces", + "names", + "types", + ] + + if charges_flag: ## add charge related + args_in.append(charges) + args_in.append(elec_forces) + args_recv.append("charges") + args_recv.append("elec_forces") + if molecules_flag: + args_recv.append("bonds") + args_recv.append("molecules") + + ## cmd string to excecut the (...) = dd + _str_receive_dd = ",".join(args_recv) + _cmd_receive_dd = f"({_str_receive_dd}) = dd" + + ############### DD if config.domain_decomposition: dd = domain_decomposition( positions, pm, - velocities, - indices, - bond_forces, - angle_forces, - field_forces, - names, - types, + *tuple(args_in), molecules=molecules if molecules_flag else None, bonds=bonds if molecules_flag else None, verbose=args.verbose, comm=comm, ) - if molecules_flag: - ( - positions, - velocities, - indices, - bond_forces, - angle_forces, - field_forces, - names, - types, - bonds, - molecules, - ) = dd - else: - ( - positions, - velocities, - indices, - bond_forces, - angle_forces, - field_forces, - names, - types, - ) = dd - positions = np.asfortranarray(positions) - velocities = np.asfortranarray(velocities) - bond_forces = np.asfortranarray(bond_forces) - angle_forces = np.asfortranarray(angle_forces) + + exec(_cmd_receive_dd) if not args.disable_field: layouts = [pm.decompose(positions[types == t]) for t in range(config.n_types)] @@ -456,8 +553,35 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): else: kinetic_energy = comm.allreduce(0.5 * config.mass * np.sum(velocities ** 2)) + ## Add Simple Poisson Equation Electrostatic: compute field/force/energy together + ##field_q_energy = 0.0 + if charges_flag and config.coulombtype == "PIC_Spectral": + layout_q = pm.decompose(positions) + update_field_force_q( + charges, # charge + phi_q, # chage density + phi_q_fourier, + elec_field_fourier, # for force calculation + elec_field, + elec_forces, + layout_q, #### general terms + pm, + positions, + config, + ) + + field_q_energy = compute_field_energy_q( + config, + phi_q_fourier, + elec_energy_field, # for energy calculation + field_q_energy, + comm=comm, + ) + if molecules_flag: - if not (args.disable_bonds and args.disable_angle_bonds): + if not ( + args.disable_bonds and args.disable_angle_bonds and args.disable_dihedrals + ): bonds_prep = prepare_bonds(molecules, names, bonds, indices, config) ( bonds_2_atom1, @@ -469,7 +593,63 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): bonds_3_atom3, bonds_3_equilibrium, bonds_3_stength, + bonds_4_atom1, + bonds_4_atom2, + bonds_4_atom3, + bonds_4_atom4, + bonds_4_coeff, + bonds_4_type, + bonds_4_last, ) = bonds_prep + + bonds_4_coeff = np.asfortranarray(bonds_4_coeff) + + if bonds_4_type.any() > 1: + err_str = ( + "0 and 1 are the only currently supported dihedral angle types." + ) + Logger.rank0.log(logging.ERROR, err_str) + if rank == 0: + raise NotImplementedError(err_str) + + # Check if we have a protein + protein_flag = comm.allreduce(bonds_4_type.any() == 1) + if protein_flag and not args.disable_dipole: + # each rank will have different n_tors, don't need to domain decompose dipoles + n_tors = len(bonds_4_atom1) + dipole_positions = np.zeros((n_tors, 4, 3), dtype=dtype) + # 4 cause we need to take into account the last angle in the molecule + dipole_charges = np.array( + [ + 2 * [0.25, -0.25] + if (bonds_4_type[i], bonds_4_last[i]) == (1, 1) + else [0.25, -0.25, 0.0, 0.0] + if bonds_4_type[i] == 1 + else 2 * [0.0, 0.0] + for i in range(n_tors) + ], + dtype=dtype, + ).flatten() + dipole_forces = np.zeros_like(dipole_positions) + transfer_matrices = np.zeros(shape=(n_tors, 6, 3, 3), dtype=dtype) + # Fields + phi_dipoles = pm.create("real", value=0.0) + phi_dipoles_fourier = pm.create("complex", value=0.0) + dipoles_field_fourier = [ + pm.create("complex", value=0.0) for _ in range(_SPACE_DIM) + ] + dipoles_field = [ + pm.create("real", value=0.0) for _ in range(_SPACE_DIM) + ] + + positions = np.asfortranarray(positions) + velocities = np.asfortranarray(velocities) + bond_forces = np.asfortranarray(bond_forces) + angle_forces = np.asfortranarray(angle_forces) + dihedral_forces = np.asfortranarray(dihedral_forces) + dipole_positions = np.asfortranarray(dipole_positions) + transfer_matrices = np.asfortranarray(transfer_matrices) + if not args.disable_bonds: bond_energy_ = compute_bond_forces( bond_forces, @@ -481,6 +661,8 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): bonds_2_stength, ) bond_energy = comm.allreduce(bond_energy_, MPI.SUM) + else: + bonds_2_atom1, bonds_2_atom2 = [], [] if not args.disable_angle_bonds: angle_energy_ = compute_angle_forces( angle_forces, @@ -493,16 +675,72 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): bonds_3_stength, ) angle_energy = comm.allreduce(angle_energy_, MPI.SUM) - else: + if not args.disable_dihedrals: + dihedral_energy_ = compute_dihedral_forces( + dihedral_forces, + positions, + dipole_positions, + transfer_matrices, + config.box_size, + bonds_4_atom1, + bonds_4_atom2, + bonds_4_atom3, + bonds_4_atom4, + bonds_4_coeff, + bonds_4_type, + bonds_4_last, + dipole_flag, + ) + dihedral_energy = comm.allreduce(dihedral_energy_, MPI.SUM) + + if protein_flag and not args.disable_dipole: + dipole_positions = np.reshape(dipole_positions, (4 * n_tors, 3)) + dipole_forces = np.reshape(dipole_forces, (4 * n_tors, 3)) + + layout_dipoles = pm.decompose(dipole_positions) + update_field_force_q( + dipole_charges, + phi_dipoles, + phi_dipoles_fourier, + dipoles_field_fourier, + dipoles_field, + dipole_forces, + layout_dipoles, + pm, + dipole_positions, + config, + ) + + dipole_positions = np.reshape(dipole_positions, (n_tors, 4, 3)) + dipole_forces = np.reshape(dipole_forces, (n_tors, 4, 3)) + + dipole_positions = np.asfortranarray(dipole_positions) + dipole_forces_redistribution( + reconstructed_forces, + dipole_forces, + transfer_matrices, + bonds_4_atom1, + bonds_4_atom2, + bonds_4_atom3, + bonds_4_atom4, + bonds_4_type, + bonds_4_last, + ) - bonds_2_atom1, bonds_2_atom2 = [], [] else: bonds_2_atom1, bonds_2_atom2 = [], [] - config.initial_energy = field_energy + kinetic_energy + bond_energy + angle_energy - out_dataset = OutDataset(args.destdir, config, - double_out=args.double_output, - disable_mpio=args.disable_mpio) + config.initial_energy = ( + field_energy + kinetic_energy + bond_energy + angle_energy + dihedral_energy + ) + + out_dataset = OutDataset( + args.destdir, + config, + double_out=args.double_output, + disable_mpio=args.disable_mpio, + ) + store_static( out_dataset, rank_range, @@ -514,6 +752,7 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): bonds_2_atom2, velocity_out=args.velocity_output, force_out=args.force_output, + charges=charges if charges_flag else False, comm=comm, ) @@ -534,7 +773,11 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): ) else: kinetic_energy = comm.allreduce(0.5 * config.mass * np.sum(velocities ** 2)) - temperature = (2 / 3) * kinetic_energy / (config.R * config.n_particles) # noqa: E501 + + temperature = ( + (2 / 3) * kinetic_energy / (config.R * config.n_particles) + ) # noqa: E501 + store_data( out_dataset, step, @@ -542,30 +785,36 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): indices, positions, velocities, - field_forces + bond_forces + angle_forces, + add_forces(), config.box_size, temperature, kinetic_energy, bond_energy, angle_energy, + dihedral_energy, field_energy, + field_q_energy, ##<---------- config.time_step, config, velocity_out=args.velocity_output, force_out=args.force_output, + charge_out=charges_flag, dump_per_particle=args.dump_per_particle, comm=comm, ) + if rank == 0: loop_start_time = datetime.datetime.now() last_step_time = datetime.datetime.now() flush_step = 0 + # ======================================================================= # # ================= |\/| |¯¯\ | |¯¯| |¯¯| |¯¯) ================= # # ================= | | |__/ |___ |__| |__| |¯¯ ================= # # ======================================================================= # for step in range(config.n_steps): + # if comm.Get_rank() == 0: current_step_time = datetime.datetime.now() if step == 0 and args.verbose > 1: @@ -608,12 +857,20 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): velocities = integrate_velocity( velocities, field_forces / config.mass, config.time_step ) + if charges_flag and config.coulombtype == "PIC_Spectral": + velocities = integrate_velocity( + velocities, elec_forces / config.mass, config.time_step + ) + if protein_flag: + velocities = integrate_velocity( + velocities, reconstructed_forces / config.mass, config.time_step + ) # Inner rRESPA steps for inner in range(config.respa_inner): velocities = integrate_velocity( velocities, - (bond_forces + angle_forces) / config.mass, + (bond_forces + angle_forces + dihedral_forces) / config.mass, config.time_step / config.respa_inner, ) positions = integrate_position( @@ -644,9 +901,30 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): bonds_3_equilibrium, bonds_3_stength, ) + if not args.disable_dihedrals: + if inner == config.respa_inner - 1 and not args.disable_dipole: + dipole_flag = 1 + else: + dipole_flag = 0 + dihedral_energy_ = compute_dihedral_forces( + dihedral_forces, + positions, + dipole_positions, + transfer_matrices, + config.box_size, + bonds_4_atom1, + bonds_4_atom2, + bonds_4_atom3, + bonds_4_atom4, + bonds_4_coeff, + bonds_4_type, + bonds_4_last, + dipole_flag, + ) + velocities = integrate_velocity( velocities, - (bond_forces + angle_forces) / config.mass, + (bond_forces + angle_forces + dihedral_forces) / config.mass, config.time_step / config.respa_inner, ) @@ -672,11 +950,81 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): layouts, positions, force_on_grid, field_forces, types, config.n_types ) + ## add q related + if charges_flag and config.coulombtype == "PIC_Spectral": + layout_q = pm.decompose(positions) + ### split + update_field_force_q( + charges, # charge + phi_q, # chage density + phi_q_fourier, + elec_field_fourier, # for force calculation + elec_field, + elec_forces, + layout_q, #### general terms + pm, + positions, + config, + ) + + field_q_energy = compute_field_energy_q( + config, + phi_q_fourier, + elec_energy_field, # for energy calculation + field_q_energy, + comm=comm, + ) + + if protein_flag and not args.disable_dipole: + dipole_positions = np.reshape(dipole_positions, (4 * n_tors, 3)) + dipole_forces = np.reshape(dipole_forces, (4 * n_tors, 3)) + + layout_dipoles = pm.decompose(dipole_positions) + update_field_force_q( + dipole_charges, + phi_dipoles, + phi_dipoles_fourier, + dipoles_field_fourier, + dipoles_field, + dipole_forces, + layout_dipoles, + pm, + dipole_positions, + config, + ) + + dipole_positions = np.reshape(dipole_positions, (n_tors, 4, 3)) + dipole_forces = np.reshape(dipole_forces, (n_tors, 4, 3)) + + dipole_positions = np.asfortranarray(dipole_positions) + dipole_forces_redistribution( + reconstructed_forces, + dipole_forces, + transfer_matrices, + bonds_4_atom1, + bonds_4_atom2, + bonds_4_atom3, + bonds_4_atom4, + bonds_4_type, + bonds_4_last, + ) + # Second rRESPA velocity step + # Move these under the previous ifs instead of having new ones? + # Field forces are zeros anyway even if disable_field == True velocities = integrate_velocity( velocities, field_forces / config.mass, config.time_step ) + if charges_flag and config.coulombtype == "PIC_Spectral": + velocities = integrate_velocity( + velocities, elec_forces / config.mass, config.time_step + ) + if protein_flag and not args.disable_dipole: + velocities = integrate_velocity( + velocities, reconstructed_forces / config.mass, config.time_step + ) + # Only compute and keep the molecular bond energy from the last rRESPA # inner step if molecules_flag: @@ -684,61 +1032,55 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): bond_energy = comm.allreduce(bond_energy_, MPI.SUM) if not args.disable_angle_bonds: angle_energy = comm.allreduce(angle_energy_, MPI.SUM) + if not args.disable_dihedrals: + dihedral_energy = comm.allreduce(dihedral_energy_, MPI.SUM) if step != 0 and config.domain_decomposition: if np.mod(step, config.domain_decomposition) == 0: positions = np.ascontiguousarray(positions) bond_forces = np.ascontiguousarray(bond_forces) angle_forces = np.ascontiguousarray(angle_forces) + dihedral_forces = np.ascontiguousarray(dihedral_forces) - dd = domain_decomposition( - positions, - pm, + args_in = [ velocities, indices, bond_forces, angle_forces, + dihedral_forces, + reconstructed_forces, field_forces, names, types, + ] + + if charges_flag: ## add charge related + args_in.append(charges) + args_in.append(elec_forces) + + dd = domain_decomposition( + positions, + pm, + *tuple(args_in), molecules=molecules if molecules_flag else None, bonds=bonds if molecules_flag else None, verbose=args.verbose, comm=comm, ) - if molecules_flag: - ( - positions, - velocities, - indices, - bond_forces, - angle_forces, - field_forces, - names, - types, - bonds, - molecules, - ) = dd - else: - ( - positions, - velocities, - indices, - bond_forces, - angle_forces, - field_forces, - names, - types, - ) = dd + + exec(_cmd_receive_dd) positions = np.asfortranarray(positions) bond_forces = np.asfortranarray(bond_forces) angle_forces = np.asfortranarray(angle_forces) + dihedral_forces = np.asfortranarray(dihedral_forces) + # Why are we creating new layouts here? layouts = [ pm.decompose(positions[types == t]) for t in range(config.n_types) ] + # Why do we need to do this again? Molecules moving to different ranks? if molecules_flag: bonds_prep = prepare_bonds(molecules, names, bonds, indices, config) ( @@ -751,8 +1093,41 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): bonds_3_atom3, bonds_3_equilibrium, bonds_3_stength, + bonds_4_atom1, + bonds_4_atom2, + bonds_4_atom3, + bonds_4_atom4, + bonds_4_coeff, + bonds_4_type, + bonds_4_last, ) = bonds_prep + bonds_4_coeff = np.asfortranarray(bonds_4_coeff) + + # Reinitialize dipoles so each rank has the right amount + if protein_flag and not args.disable_dipole: + # each rank will have different n_tors, don't need to domain decompose dipoles + n_tors = len(bonds_4_atom1) + dipole_positions = np.zeros((n_tors, 4, 3), dtype=dtype) + # 4 cause we need to take into account the last angle in the molecule + dipole_charges = np.array( + [ + 2 * [0.25, -0.25] + if (bonds_4_type[i], bonds_4_last[i]) == (1, 1) + else [0.25, -0.25, 0.0, 0.0] + if bonds_4_type[i] == 1 + else 2 * [0.0, 0.0] + for i in range(n_tors) + ], + dtype=dtype, + ).flatten() + dipole_forces = np.zeros_like(dipole_positions) + transfer_matrices = np.zeros( + shape=(n_tors, 6, 3, 3), dtype=dtype + ) + dipole_positions = np.asfortranarray(dipole_positions) + transfer_matrices = np.asfortranarray(transfer_matrices) + for t in range(config.n_types): if args.verbose > 2: exchange_cost = layouts[t].get_exchange_cost() @@ -767,6 +1142,8 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): # Thermostat if config.target_temperature: + # Add loop if multiple groups/temperatures are defined + # csrv_thermostat(velocities_grp_i, config_T_i, config_tau_i) csvr_thermostat(velocities, names, config, comm=comm) # Remove total linear momentum @@ -793,13 +1170,20 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): layouts, comm=comm, ) + + if charges_flag and config.coulombtype == "PIC_Spectral": + field_q_energy = compute_field_energy_q( + config, + phi_q_fourier, + elec_energy_field, # for energy calculation + field_q_energy, + comm=comm, + ) else: kinetic_energy = comm.allreduce( 0.5 * config.mass * np.sum(velocities ** 2) ) - temperature = ( - (2 / 3) * kinetic_energy / (config.R * config.n_particles) - ) + temperature = (2 / 3) * kinetic_energy / (config.R * config.n_particles) if args.disable_field: field_energy = 0.0 store_data( @@ -809,17 +1193,20 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): indices, positions, velocities, - field_forces + bond_forces + angle_forces, + add_forces(), config.box_size, temperature, kinetic_energy, bond_energy, angle_energy, + dihedral_energy, field_energy, + field_q_energy, # <--------- config.time_step, config, velocity_out=args.velocity_output, force_out=args.force_output, + charge_out=charges_flag, dump_per_particle=args.dump_per_particle, comm=comm, ) @@ -869,10 +1256,38 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): layouts, comm=comm, ) + + ## add q related + if charges_flag and config.coulombtype == "PIC_Spectral": + layout_q = pm.decompose(positions) + ### split + update_field_force_q( + charges, # charge + phi_q, # chage density + phi_q_fourier, + elec_field_fourier, # for force calculation + elec_field, + elec_forces, + layout_q, #### general terms + pm, + positions, + config, + ) + + field_q_energy = compute_field_energy_q( + config, + phi_q_fourier, + elec_energy_field, # for energy calculation + field_q_energy, + comm=comm, + ) + else: kinetic_energy = comm.allreduce(0.5 * config.mass * np.sum(velocities ** 2)) frame = (step + 1) // config.n_print - temperature = (2 / 3) * kinetic_energy / (config.R * config.n_particles) # noqa: E501 + temperature = ( + (2 / 3) * kinetic_energy / (config.R * config.n_particles) + ) # noqa: E501 if args.disable_field: field_energy = 0.0 store_data( @@ -882,18 +1297,22 @@ def generate_initial_velocities(velocities, config, comm=MPI.COMM_WORLD): indices, positions, velocities, - field_forces + bond_forces + angle_forces, + add_forces(), config.box_size, temperature, kinetic_energy, bond_energy, angle_energy, + dihedral_energy, field_energy, + field_q_energy, # <----------- config.time_step, config, velocity_out=args.velocity_output, force_out=args.force_output, + charge_out=charges_flag, dump_per_particle=args.dump_per_particle, comm=comm, ) + out_dataset.close_file() diff --git a/hymd/protein_backbone.f90 b/hymd/protein_backbone.f90 new file mode 100644 index 00000000..8b2b00cc --- /dev/null +++ b/hymd/protein_backbone.f90 @@ -0,0 +1,82 @@ +subroutine cpbbd(force, r, dipoles, trans_matrix, box, a, b, c, d, coeff, phase, energy) +! Compute Protein BackBone Dihedrals (and dipoles) + use dipole_reconstruction + implicit none + + real(4), dimension(:,:), intent(in out) :: force + real(4), dimension(:,:), intent(in) :: r + real(8), dimension(:,:,:), intent(in out) :: dipoles + real(8), dimension(:), intent(in) :: box + integer, dimension(:), intent(in) :: a + integer, dimension(:), intent(in) :: b + integer, dimension(:), intent(in) :: c + integer, dimension(:), intent(in) :: d + real(8), dimension(:,:), intent(in) :: coeff + real(8), dimension(:,:), intent(in) :: phase + real(8), intent(out) :: energy + real(8), dimension(:,:,:,:), intent(in out) :: trans_matrix + + integer :: ind, aa, bb, cc, dd, i + real(8), dimension(3) :: f, g, h, v, w, sc, force_on_a, force_on_d + real(8), dimension(5) :: coeff_, phase_ + real(8) :: g_norm, vv, ww, fg, hg, df, cos_phi, sin_phi, phi + + ! Use this routine only for backbone dihedrals + energy = 0.d0 + force = 0.d0 + dipoles = 0.d0 + + do ind = 1, size(a) + aa = a(ind) + 1 + bb = b(ind) + 1 + cc = c(ind) + 1 + dd = d(ind) + 1 + + f = [r(aa,:) - r(bb,:)] + g = [r(bb,:) - r(cc,:)] + h = [r(dd,:) - r(cc,:)] + + f = f - box * nint(f / box) + g = g - box * nint(g / box) + h = h - box * nint(h / box) + + v = cross(f, g) + w = cross(h, g) + vv = dot_product(v, v) + ww = dot_product(w, w) + g_norm = norm2(g) + + cos_phi = dot_product(v, w) + + ! Add check if cosphi > 1 or cosphi < -1? + ! if (cosphi > 1) then + ! cosphi = 1.d0 + ! if (cosphi < -1) then + ! cosphi = -1.d0 + + sin_phi = dot_product(cross(v, w), g) / g_norm + phi = atan2(sin_phi, cos_phi) + + fg = dot_product(f, g) + hg = dot_product(h, g) + + coeff_ = coeff(ind, :) + phase_ = phase(ind, :) + df = 0.d0 + + do i = 0, 4 + energy = energy + coeff_(i + 1) * (1.d0 + cos(i * phi + phase_(i + 1))) + df = df + i * coeff_(i + 1) * sin(i * phi + phase_(i + 1)) + end do + + sc = v * fg / (vv * g_norm) - w * hg / (ww * g_norm) + force_on_a = df * g_norm * v / vv + force_on_d = df * g_norm * w / ww + + force(aa,:) = force(aa,:) - force_on_a + force(bb,:) = force(bb,:) + df * sc + force_on_a + force(cc,:) = force(cc,:) - df * sc - force_on_d + force(dd,:) = force(dd,:) + force_on_d + + call reconstruct(g, h, g_norm, r(cc,:), box, dipoles(ind,:,:), trans_matrix(ind,:,:,:)) +end subroutine cdf diff --git a/test/conftest.py b/test/conftest.py index 71b91dc2..67b0e360 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -6,6 +6,7 @@ import pytest import collections +# fmt: off # TODO: Remove this when we have a working pip installable main package and # can test against installed package by # @@ -104,6 +105,103 @@ def dppc_single(): return indices, bonds, names, molecules, r, CONF +@pytest.fixture +def alanine_octapeptide(): + """ + Sets up a octa-alanine system to test dihedrals + + Notes + ----- + Type names (indices) and bonds:: + + SC(1) SC(5) + | | + BB(0) -- BB(2) -- BB(4) -- BB(6) -- x2 + | | + SC(3) SC(7) + """ + indices = np.array(range(16), dtype=int) + molecules = np.array([0 for _ in range(16)], dtype=int) + r = np.array( + [ + [3.797, 5.461, 4.763], + [3.662, 5.324, 4.663], + [4.039, 5.203, 4.851], + [4.119, 5.327, 5.010], + [4.367, 5.086, 4.740], + [4.286, 5.034, 4.547], + [4.574, 4.786, 4.766], + [4.583, 4.808, 4.981], + [4.925, 4.693, 4.732], + [4.923, 4.751, 4.523], + [5.124, 4.392, 4.674], + [5.053, 4.306, 4.860], + [5.467, 4.278, 4.720], + [5.539, 4.428, 4.582], + [5.710, 3.987, 4.640], + [5.559, 3.863, 4.675], + ], + dtype=np.float64) + bonds = np.array( + [ + [ 1, 2, -1], # BB(0) + [ 0, -1, -1], # SC(1) + [ 0, 3, 4], # BB(2) + [ 2, -1, -1], # SC(3) + [ 2, 5, 6], # BB(4) + [ 4, -1, -1], # SC(5) + [ 4, 7, 8], # BB(6) + [ 6, -1, -1], # SC(7) + [ 6, 9, 10], # BB(8) + [ 8, -1, -1], # SC(9) + [ 8, 11, 12], # BB(10) + [10, -1, -1], # SC(11) + [10, 13, 14], # BB(12) + [12, -1, -1], # SC(13) + [12, 15, -1], # BB(14) + [14, -1, -1], # SC(15) + ], + dtype=int) + names = np.array( + [ + b"BB", b"SC", b"BB", b"SC", b"BB", b"SC", b"BB", b"SC", + b"BB", b"SC", b"BB", b"SC", b"BB", b"SC", b"BB", b"SC", + ], + dtype="S5" + ) + CONF = {} + Bond = collections.namedtuple( + "Bond", ["atom_1", "atom_2", "equilibrium", "strength"] + ) + Angle = collections.namedtuple( + "Angle", ["atom_1", "atom_2", "atom_3", "equilibrium", "strength"] + ) + Dihedral = collections.namedtuple( + "Dihedral", ["atom_1", "atom_2", "atom_3", "atom_4", "coeff", "phase"] + ) + # Values for bonds and angles taken from MARTINI 3 parameters. + # Not used to test dihedral forces. + CONF["bond_2"] = ( + Bond("BB", "SC", 0.27, 100000), + Bond("BB", "BB", 0.35, 4000), + ) + CONF["bond_3"] = ( + Angle("BB", "BB", "BB", 127, 20), + Angle("BB", "BB", "SC", 100, 25), + #Angle("SC", "BB", "BB", 100, 25), # In martini they have only the first angle of this type + ) + # Symbolic arrays of 1s and 0s for analytical check + CONF["bond_4"] = ( + Dihedral( + "BB", "BB", "BB", "BB", + [1 for _ in range(5)], + [0 for _ in range(5)], + ), + ) + for k, v in {"Np": 8, "types": 2, "mass": 72.0, "L": [5.0, 5.0, 5.0]}.items(): + CONF[k] = v + return indices, bonds, names, molecules, r, CONF + @pytest.fixture() def h5py_molecules_file(mpi_file_name): n_particles = 1000 diff --git a/test/test_force.py b/test/test_force.py index 56e9dc43..0a2a9ac4 100644 --- a/test/test_force.py +++ b/test/test_force.py @@ -2,16 +2,18 @@ import numpy as np from force import compute_bond_forces__plain as compute_bond_forces from force import compute_angle_forces__plain as compute_angle_forces +from force import compute_dihedral_forces__plain as compute_dihedral_forces from force import prepare_bonds_old as prepare_bonds from input_parser import Config - +# This doesn't check fortran routines? +# fmt: off def test_prepare_bonds_2(dppc_single): indices, bonds, names, molecules, r, CONF = dppc_single config = Config(n_steps=1, time_step=0.03, mesh_size=[30, 30, 30], box_size=np.array([13.0, 13.0, 14.0]), sigma=0.5, kappa=1) config.bonds = CONF['bond_2'] - bonds_2, _ = prepare_bonds(molecules, names, bonds, indices, config) + bonds_2, _, _ = prepare_bonds(molecules, names, bonds, indices, config) bonds_2_ind = [b[:2] for b in bonds_2] bonds_2_val = [b[2:] for b in bonds_2] @@ -40,7 +42,7 @@ def test_comp_bonds(dppc_single): config = Config(n_steps=1, time_step=0.03, mesh_size=[30, 30, 30], box_size=np.array([13.0, 13.0, 14.0]), sigma=0.5, kappa=1) config.bonds = CONF['bond_2'] - bonds_2, _ = prepare_bonds(molecules, names, bonds, indices, config) + bonds_2, _, _ = prepare_bonds(molecules, names, bonds, indices, config) expected_energies = np.array([0.24545803261508981, 0.76287125411373635, @@ -98,7 +100,7 @@ def test_prepare_bonds_3(dppc_single): config = Config(n_steps=1, time_step=0.03, mesh_size=[30, 30, 30], box_size=np.array([13.0, 13.0, 14.0]), sigma=0.5, kappa=1) config.angle_bonds = CONF['bond_3'] - _, bonds_3 = prepare_bonds(molecules, names, bonds, indices, config) + _, bonds_3, _ = prepare_bonds(molecules, names, bonds, indices, config) bonds_3_ind = [b[:3] for b in bonds_3] bonds_3_val = [b[3:] for b in bonds_3] @@ -125,7 +127,7 @@ def test_comp_angles(dppc_single): config = Config(n_steps=1, time_step=0.03, mesh_size=[30, 30, 30], box_size=np.array([13.0, 13.0, 14.0]), sigma=0.5, kappa=1) config.angle_bonds = CONF['bond_3'] - _, bonds_3 = prepare_bonds(molecules, names, bonds, indices, config) + _, bonds_3, _ = prepare_bonds(molecules, names, bonds, indices, config) expected_energies = np.array([0.24138227262192161, 12.962077271327919, @@ -180,3 +182,92 @@ def test_comp_angles(dppc_single): abs=1e-13) assert f_angles[b[2], :] == pytest.approx(expected_forces_k[i], abs=1e-13) + +def test_prepare_bonds_4(alanine_octapeptide): + indices, bonds, names, molecules, r, CONF = alanine_octapeptide + config = Config(n_steps=1, time_step=0.03, mesh_size=[30, 30, 30], + box_size=np.array([5.0, 5.0, 5.0]), sigma=0.5, kappa=1) + config.dihedrals = CONF['bond_4'] + _, _, bonds_4 = prepare_bonds(molecules, names, bonds, indices, config) + bonds_4_ind = [b[:4] for b in bonds_4] + bonds_4_val = [b[4:] for b in bonds_4] + + assert len(bonds_4) == 5 + + expected = [ + [0, 2, 4, 6, [1 for _ in range(5)], [0 for _ in range(5)]], + [2, 4, 6, 8, [1 for _ in range(5)], [0 for _ in range(5)]], + [4, 6, 8, 10, [1 for _ in range(5)], [0 for _ in range(5)]], + [6, 8, 10, 12, [1 for _ in range(5)], [0 for _ in range(5)]], + [8, 10, 12, 14, [1 for _ in range(5)], [0 for _ in range(5)]], + ] + for e in expected: + assert e[:4] in bonds_4_ind + for ind, val in zip(bonds_4_ind, bonds_4_val): + if ind == e[:4]: + # check if this works for arrays? + assert e[4] == pytest.approx(val[0], abs=1e-13) + assert e[5] == pytest.approx(val[1], abs=1e-13) + + +def test_comp_dihedrals(alanine_octapeptide): + indices, bonds, names, molecules, r, CONF = alanine_octapeptide + config = Config(n_steps=1, time_step=0.03, mesh_size=[30, 30, 30], + box_size=np.array([5.0, 5.0, 5.0]), sigma=0.5, kappa=1) + config.dihedrals = CONF['bond_4'] + _, _, bonds_4 = prepare_bonds(molecules, names, bonds, indices, config) + + expected_energies = np.array([ + 5.512306711980792, + 5.501816505737047, + 5.4980072293920745, + 5.50682559136485, + 5.898283428858097], + dtype=np.float64 + ) + expected_forces_i = np.array([ + [4.167404131528236, 5.964780465237133, 6.027290456833508], + [-3.88119996455415, -3.364625086216446, -7.922274353931893], + [1.3438193964197254, 1.721469069878616, 9.164234842488652], + [-0.5176523283261323, 1.4537046377291065, -9.32030878092003], + [-1.303516982371019, -1.850843743470206, 5.13282909125343]], + dtype=np.float64 + ) + expected_forces_j = np.array([ + [-4.374426036878856, -7.901212696584612, -4.597926617980788], + [5.786331030404696, 4.600082932906689, 7.009782945316923], + [-1.9629100610700134, -4.100983501069195, -9.046763701062986], + [2.5517409922373115, -0.20115938578524672, 9.79905918235492], + [0.9717585303396479, 0.9912206160390491, -4.7894353408271675]], + dtype=np.float64 + ) + expected_forces_k = np.array([ + [-3.6709627463157175, -1.4254054894919777, -9.345075121810787], + [-0.5590864711877845, 0.4888617624497804, 10.091901087338766], + [0.10213174348825493, 3.831271798222488, -9.42529515500928], + [-4.236386262425887, -4.37955378853861, 8.193186622885671], + [1.8761151329061132, 3.5095816408063003, -5.2916344246712885]], + dtype=np.float64 + ) + expected_forces_l = np.array([ + [3.8779846516663383, 3.3618377208394574, 7.915711282958067], + [-1.346044594662762, -1.7243196091400235, -9.179409678723795], + [0.5169589211620331, -1.4517573670319093, 9.307824013583614], + [2.202297598514707, 3.1270085365947504, -8.671937024320561], + [-1.544356680874742, -2.649958513375143, 4.948240674245026]], + dtype=np.float64 + ) + + for i, b in enumerate(bonds_4): + f_dihedrals = np.zeros(shape=r.shape, dtype=np.float64) + energy = 0.0 + energy = compute_dihedral_forces(f_dihedrals, r, (b,), CONF['L']) + assert energy == pytest.approx(expected_energies[i], abs=1e-13) + assert f_dihedrals[b[0], :] == pytest.approx(expected_forces_i[i], + abs=1e-13) + assert f_dihedrals[b[1], :] == pytest.approx(expected_forces_j[i], + abs=1e-13) + assert f_dihedrals[b[2], :] == pytest.approx(expected_forces_k[i], + abs=1e-13) + assert f_dihedrals[b[3], :] == pytest.approx(expected_forces_l[i], + abs=1e-13) diff --git a/utils/gmx_2_hymd.py b/utils/gmx_2_hymd.py new file mode 100644 index 00000000..1843cbc4 --- /dev/null +++ b/utils/gmx_2_hymd.py @@ -0,0 +1,51 @@ +import numpy as np +import os +from .topologyParser import gmx_to_h5_from_more_hand +import pandas as pd + +print("------------------- starting ") + +# provide file names, type names ("W" for water), masses (72 is default), charges +work_folder = "./" +out_h5_filename = "converted.h5" +in_gro_file = "start.gro" +in_top_name = "topol.top" +atomtype_id = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]) +atomtype_name = np.array(["T", "C", "B", "S", "O", "M", "N", "W", "F"]) +atomtype_mass = np.array([72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0]) +atomtype_charge = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0]) + +print("---- generating atomtype.csv ") +df = pd.DataFrame( + { + "atomtypeID": atomtype_id, + "atomName": atomtype_name, + "atomMass": atomtype_mass, + "atomCharge": atomtype_charge, + } +) +atomtype_csv = os.path.join("./", "atomtype.csv") +df.to_csv(atomtype_csv, index=False) + + +out_h5_file = os.path.join(work_folder, out_h5_filename) +in_gro_file = os.path.join(work_folder, in_gro_file) +in_top_file = os.path.join(work_folder, in_top_name) + +# Set to false if the system isn't charged +electric_label = True + +# the key is the molecule resname from the gmx topology +# the value is the hPF itp file for that molecule +# eg "AZT": "AZT" --> it will read AZT.itp +alias_mol_dict = { + "AZT": work_folder + "AZT", + "W": work_folder + "SOL", + "CL-": work_folder + "CL", +} + +print("--------- generating h5 file ") +gmx_to_h5_from_more_hand( + out_h5_file, in_gro_file, in_top_file, atomtype_csv, alias_mol_dict, electric_label +) +print("----------------------- done ") diff --git a/utils/plot_traj.py b/utils/plot_traj.py index 43df1a25..62a78a05 100644 --- a/utils/plot_traj.py +++ b/utils/plot_traj.py @@ -109,8 +109,10 @@ def extract_property(h5md_file, property, args): "KE": "kinetic_energy", "BE": "bond_energy", "AE": "angle_energy", + "DE": "dihedral_energy", "FE": "field_energy", "P": "total_momentum", + "EE": "field_q_energy", } name = keyword_to_group_name[property] property_group = observables_group[name] @@ -159,7 +161,7 @@ def __init__( if __name__ == "__main__": args = parse_args() if "all" in args.property: - args.property = ["E", "PE", "KE", "BE", "FE", "AE", "P"] + args.property = ["E", "PE", "KE", "BE", "FE", "AE", "DE", "P"] file_path = os.path.abspath(args.file) h5md_file = open_h5md_file(file_path) diff --git a/utils/topologyParser.py b/utils/topologyParser.py new file mode 100644 index 00000000..ed786bf5 --- /dev/null +++ b/utils/topologyParser.py @@ -0,0 +1,2456 @@ +""" + ,----, + ____ ,/ .`| ,-.----. + ,' , `. ,---, ,` .' : ,--, \ / \ + ,-+-,.' _ | .' .' `\ ; ; / ,-.----. ,--.'| | : \ + ,-+-. ; , ||,---.' \ .'___,/ ,' ,---. \ / \ ,---. | | : ,---. | | .\ : __ ,-. __ ,-. + ,--.'|' | ;|| | .`\ | | : | ' ,'\ | : | ' ,'\ : : ' ' ,'\ ,----._,. . : |: | ,' ,'/ /| .--.--. ,' ,'/ /| +| | ,', | ':: : | ' | ; |.'; ; / / || | .\ : / / || ' | / / | / / ' / | | \ : ,--.--. ' | |' | / / ' ,---. ' | |' | +| | / | | ||| ' ' ; : `----' | |. ; ,. :. : |: |. ; ,. :' | | . ; ,. :| : | | : . // \ | | ,'| : /`./ / \ | | ,' +' | : | : |,' | ; . | ' : ;' | |: :| | \ :' | |: :| | : ' | |: :| | .\ . ; | |`-'.--. .-. |' : / | : ;_ / / |' : / +; . | ; |--' | | : | ' | | '' | .; :| : . |' | .; :' : |__' | .; :. ; '; | | | ; \__\/: . .| | ' \ \ `. . ' / || | ' +| : | | , ' : | / ; ' : || : |: |`-'| : || | '.'| : |' . . | : ' | ," .--.; |; : | `----. \' ; /|; : | +| : ' |/ | | '` ,/ ; |.' \ \ / : : : \ \ / ; : ;\ \ / `---`-'| | : : : / / ,. || , ; / /`--' /' | / || , ; +; | |`-' ; : .' '---' `----' | | : `----' | , / `----' .'__/\_: | | | : ; : .' \---' '--'. / | : | ---' +| ;/ | ,.' `---'.| ---`-' | : : `---'.| | , .-./ `--'---' \ \ / +'---' '---' `---` \ \ / `---` `--`---' `----' + `--`-' +MD Topology Parser +adapted from gmxParse.py +xinmengli2020@gmal.com +noted 2021-04-23 Oslo +'ascii art from http://patorjk.com/software/taag/#p=display&f=Graffiti&t=Type%20Something%20' + + +- here the class is more like struct as not method is defined +- could define a general class with maximum information; less efficient than xx_convert2_yy() +""" + +import h5py +import sys +import re +import numpy as np +import math as m +import time +import collections ## ordered dictionary +import pandas as pd +import os +import numpy.ma as ma ## mask array to get the bond indices +from itertools import combinations, product +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import seaborn as sns +import toml +from pathlib import Path +from glob import glob +import shutil + +class ItpMolecule: + def __init__(self, molname, molnum): + self.molname = molname + self.molnum = molnum + +class ItpAtom: + def __init__(self,index,atomtype,resnr,resname,atomname,cgnr, charge,mass): + self.index = index + self.atomtype = atomtype + self.resnr = resnr + self.resname = resname + self.atomname = atomname + self.cgnr = cgnr + self.charge = charge + self.mass = mass + + +class ItpBond: + def __init__(self, head,tail,func,length,strength): + self.head = head + self.tail = tail + self.func = func + self.length = length + self.strength = strength + +class ItpAngle: + def __init__(self, up, middle, down, num, angle0,strength,nameA,nameB,nameC): + self.up = up + self.middle = middle + self.down = down + self.num = num + self.angle0 = angle0 + self.strength = strength + self.nameA = nameA + self.nameB = nameB + self.nameC = nameC + + +class ItpVdwPair: + """ + e.g. martini [ nonbond_params ] + """ + def __init__(self, vdw_head, vdw_tail, vdw_func, vdw_c6, vdw_c12 ): + self.vdwHeadType = vdw_head + self.vdwTailType = vdw_tail + self.vdwFunc = vdw_func + self.vdwC6 = vdw_c6 + self.vdwC12 = vdw_c12 + + if self.vdwHeadType == self.vdwTailType: + self.sametype = True + else: + self.sametype = False + + self.epsilon = self.vdwC6 **2 / (4*self.vdwC12) + self.sigma = (self.vdwC12/self.vdwC6)**(1/6) + + + + + + + + +#class ItpMartiniVdw: +# """ +# added 06-07-2021 +# df = pd.DataFrame({ 'vdwHeadType':vdw_head, +# 'vdwTailType':vdw_tail, +# 'vdwSigma' :vdw_sigma , +# 'vdwEpsilon' :vdw_epsilon, +# 'vdwC6' :vdw_c6, +# 'vdwC12' :vdw_c12, +# 'vdwKai' :vdw_kai +# }) +# """ +# def __init__(self, vdw_head, vdw_tail, vdw_sigma, vdw_epsilon, vdw_c6, vdw_c12, vdw_kai): +# self.vdwHeadType = vdw_head +# self.vdwTailType = vdw_tail +# self.vdwSigma = vdw_sigma +# self.vdwEpsilon = vdw_epsilon +# self.vdwC6 = vdw_c6 +# self.vdwC12 = vdw_c12 +# self.vdwKai = vdw_kai +# + + + +def get_parameter_angle(index1,index2,index3,itpAtoms_F,atoms_F): + for itpAtom in itpAtoms_F: + if itpAtom.index == index1: + nameA_F = itpAtom.name + massA_F = itpAtom.mass + if itpAtom.index == index2: + nameB_F = itpAtom.name + massB_F = itpAtom.mass + if itpAtom.index == index3: + nameC_F = itpAtom.name + massC_F = itpAtom.mass + + if (massA_F > 1.008000) and (massB_F > 1.008000) and (massC_F > 1.008000): + strength_F = STRENGTH2 + else: + strength_F = STRENGTH1 + + for atom in atoms_F: + if atom.index == index1: + up_x = atom.x + up_y = atom.y + up_z = atom.z + if atom.index == index2: + middle_x = atom.x + middle_y = atom.y + middle_z = atom.z + if atom.index == index3: + down_x = atom.x + down_y = atom.y + down_z = atom.z + + xx = np.array([up_x-middle_x, up_y-middle_y, up_z-middle_z]) + yy = np.array([down_x-middle_x, down_y-middle_y, down_z-middle_z]) + + lxx = np.sqrt(xx.dot(xx)) + lyy = np.sqrt(yy.dot(yy)) + + cos_angle = xx.dot(yy)/(lxx*lyy) + angle_F = np.arccos(cos_angle)*180/np.pi + + return angle_F, strength_F,nameA_F,nameB_F,nameC_F + + +def get_parameter_angle_from_typecsv(up,middle,down,itpAtom_list,df_atom,df_angle): + """ + - the trouble some here is the angle types are define accoridg to the atomtypeID, which is missing in the itpAtom_list and has to get the atomtypeID using the df_atom information + #### df_atom atomtypeID,atomName,atomMass,atomCharge + #### df_angle angleUpType,angleMiddleType,angleDownType,angleTheta,angleStrength + #### ---> up, middle, down, num, angle0,strength,nameA,nameB,nameC + """ + _FUNC = 1 + func = 1 + up_atomname = itpAtom_list[up-1].atomname + middle_atomname = itpAtom_list[middle-1].atomname + down_atomname = itpAtom_list[down-1].atomname + + [up_atomtypeID, middle_atomtypeID, down_atomtypeID] = [ df_atom.copy()[df_atom["atomName"].isin([x])].atomtypeID.values.tolist()[0] for x in [up_atomname, middle_atomname, down_atomname ] ] + ## fas operation for the below; see e.g. https://www.programiz.com/python-programming/list + #_item_atom = df_atom.copy()[df_atom["atomName"].isin([up_atomname])] + #up_atomtypeID = _item_atom.atomtypeID.values.tolist()[0] + #print(up_atomname, middle_atomname, down_atomname, up_atomtypeID, middle_atomtypeID, down_atomtypeID) + + _angleitem = df_angle.loc[(df_angle['angleUpType']==up_atomtypeID) & (df_angle['angleMiddleType']==middle_atomtypeID) & (df_angle['angleDownType']==down_atomtypeID) ] + if _angleitem.empty: + _angleitem = df_angle.loc[(df_angle['angleUpType']==down_atomtypeID) & (df_angle['angleMiddleType']==middle_atomtypeID) & (df_angle['angleDownType']==up_atomtypeID) ] + if _angleitem.empty: + raise Exception("ERROR, angle parameters not found") + + angle0 = _angleitem.angleTheta.values.tolist()[0] + strength = _angleitem.angleStrength.values.tolist()[0] + #itpAngle_list.append(ItpAngle(up, middle, down, func, angle0,strength,up_atomname,middle_atomname,down_atomname)) + + return ItpAngle(up, middle, down, func, angle0,strength,up_atomname,middle_atomname,down_atomname) + + +def write_singlebead_itp( itp_file,molName, atomtype, atomcsv): + + itp_lines = [] + _nrexcl = 1 + ###### section moleculetype + itp_lines.append("[ moleculetype ]") + itp_lines.append("; molname nrexcl") + itp_lines.append(f"{molName} {_nrexcl}") + itp_lines.append('') + + ###### section atoms + itp_lines.append("[ atoms ]") + itp_lines.append("; id type resnr residu atom cgnr charge mass") + + df_atomtype = pd.read_csv( atomcsv ) + _source_item = df_atomtype[ df_atomtype["atomName"].isin([ atomtype ]) ] + charge = _source_item.atomCharge.values.tolist()[0] + mass = _source_item.atomMass.values.tolist()[0] + itp_lines.append(f" {1} {atomtype} {1} {molName} {atomtype} {1} {charge} {mass}") + itp_lines.append('') + + ##for line in itp_lines: + ## print(line) + ###### write out + + f=open( itp_file ,'w') + s1='\n'.join(itp_lines) + f.write(s1) + f.write('\n') + f.write('\n') + f.close() + + + + + + +def write_molecule_itp( itp_file,molName, itpAtoms,itpBonds,itpAngles): + + itp_lines = [] + _nrexcl = 1 + ###### section moleculetype + itp_lines.append("[ moleculetype ]") + itp_lines.append("; molname nrexcl") + itp_lines.append(f"{molName} {_nrexcl}") + itp_lines.append('') + + ###### section atoms + itp_lines.append("[ atoms ]") + itp_lines.append("; id type resnr residu atom cgnr charge mass") + for item in itpAtoms: + ## index,atomtype,resnr,resname,atomname,cgnr, charge,mass + itp_lines.append(f" {item.index} {item.atomtype} {item.resnr} {item.resname} {item.atomname} {item.cgnr} {item.charge} {item.mass}") + itp_lines.append('') + + ###### section bonds + itp_lines.append("[ bonds ]") + itp_lines.append("; i j funct length strength") + for item in itpBonds: + ## head,tail,func,length,strength + itp_lines.append(f" {item.head} {item.tail} {item.func} {item.length} {item.strength} ") + itp_lines.append('') + + ###### section angles + itp_lines.append("[ angles ]") + itp_lines.append("; i j k funct angle strength") + for item in itpAngles: + ## up, middle, down, num, angle0,strength,nameA,nameB,nameC + itp_lines.append(f" {item.up} {item.middle} {item.down} {item.num} {item.angle0} {item.strength} ; {item.nameA} {item.nameB} {item.nameC} ") + itp_lines.append('') + + ##for line in itp_lines: + ## print(line) + ###### write out + + f=open( itp_file ,'w') + s1='\n'.join(itp_lines) + f.write(s1) + f.write('\n') + f.write('\n') + f.close() + + + + + +def write_top_file(topfile,casename, ffpath, topcsv): + + top_lines = [] + + df_molecules = pd.read_csv( topcsv ) ## molName, molNum + molecules = df_molecules.molName.values.tolist() + nums = df_molecules.molNum.values.tolist() + print( molecules, nums ) + + ##### section: include the forcefield.itp + top_lines.append(f"; include forcefield.tip") + ff_file = os.path.join(ffpath, 'forcefield.itp') + top_lines.append(f"#include \"{ff_file}\"") + top_lines.append('') + + ##### section: include separate molecule itp files + for molecule in molecules: + molecule_itp_file = os.path.join(ffpath, f"{molecule}.itp") + top_lines.append(f"#include \"{molecule_itp_file }\"") + top_lines.append('') + + #### section: [ system ] + top_lines.append("[ system ]") + top_lines.append(f"{casename}") + top_lines.append('') + + #### section: [ molecules ] + top_lines.append("[ molecules ]") + for name, num in zip(molecules, nums): + top_lines.append(f"{name} {num}") + top_lines.append('') + + for line in top_lines: + print(line) + + #in_top_file = _df1.emTop.values[0] + #out_top_file = os.path.join(work_folder, "md.top") + #change_line_list = [] ## single chain!! + #for chain in chains: + # line = f'Protein_chain_{chain}' + # change_line_list.append(line) + #list_top_file = [] + #with open(in_top_file,'r') as f: + # data = f.readlines() + # for line in data: + # line_new = line.strip('\n') + # try: + # if line_new.split()[0] in change_line_list and int(line_new.split()[1]) == 1: ## some may not have the second thus do not merge to a same condition + # #print(line_new) + # #### == 1 , excludes the + # #### [ moleculetype ] + # #### ; Name nrexcl + # #### Protein_chain_A 60 + # list_top_file.append(line_new.split()[0] + ' ' + str(N_repeat)) + # else: + # list_top_file.append(line_new) + # except: + # list_top_file.append(line_new) + ##list_top_file.append( "\n " ) + ############################### write out top file + for _line in top_lines: + print(_line) + f=open(topfile,'w') + s1='\n'.join(top_lines) + f.write(s1) + f.write('\n') + f.close() + + + + + + + +def write_ff_itp_file(path, ffname, vdwcsv, atomcsv, bondcsv, anglecsv=False, dihedralcsv=False): + ''' + ----- this one follow the martini type e.g. http://cgmartini.nl/images/parameters/ITP/martini_v2.2.itp; ffnonbonded.itp and ffbonded.itp are include inside the file explicltiy + - about the nonbond_params: cit[1] https://manual.gromacs.org/documentation/2019.1/reference-manual/topologies/parameter-files.html + - _nbfunc=1 + - _comb_rule=1 ; use the v(c6) and w(c12) + ''' + _nbfunc=1 + _comb_rule=1 + _element_num = 36 ## NOW the element number is set to a fix value + _ptype = 'A' ##By default + _hymd_label = ';===>HymdKai' + _func_nonbond = 1 + _func_bond = 1 + _func_angle = 1 + float_accu = 8 + ###### + itp_lines = [] + ###### note + #_note = '; DEMOOOOOO ' + #itp_lines.append(_note) + #itp_lines.append('') + ###### define ff name + itp_lines.append(f"#define _FF_{ffname}") + itp_lines.append('') + ###### section: default + itp_lines.append(f"[ defaults ]") + itp_lines.append("; nbfunc comb-rule") + itp_lines.append(f"{_nbfunc} {_comb_rule}") + itp_lines.append('') + ###### section ffnonbonded.itp [ atomtypes ] + ## name at.num mass charge ptype V(c6) W(c12) + ## in the martini itp the the columns are treated as + ## name mass charge ptype c6 c12 + ## following the cit[1], add the at.num all as 36 + itp_lines.append("[ atomtypes ]") + itp_lines.append("; name at.num mass charge ptype V(c6) W(c12)") + df_vdw = pd.read_csv( vdwcsv ) ## contains the vdw self-and pair lj interaction + ## extraBurden, has to read the atomcsv to get the atom type names; need to get the names of atomtypes in atomcsv + df_atomtype = pd.read_csv( atomcsv ) + + ## loop and add the atomtypes + for index, row in df_vdw.iterrows(): + ## print(index, int(row["vdwHeadType"]), type(row["vdwHeadType"])) + if int(row["vdwHeadType"]) == int(row["vdwTailType"]): + check_id = int(row["vdwHeadType"]) + _source_item = df_atomtype[ df_atomtype["atomtypeID"].isin([ check_id ]) ] + check_id_name = _source_item.atomName.values.tolist()[0] + check_id_mass = _source_item.atomMass.values.tolist()[0] + check_id_charge = _source_item.atomCharge.values.tolist()[0] + check_id_vdwC6 = np.round(row["vdwC6"], float_accu) + check_id_vdwC12 = np.round(row["vdwC12"],float_accu) + check_id_kai = np.round(row["vdwKai"],float_accu) + #print( check_id, check_id_name ) + itp_lines.append(f" {check_id_name} {_element_num} {check_id_mass} {check_id_charge} {_ptype} {check_id_vdwC6} {check_id_vdwC12} {_hymd_label} {check_id_kai}") + + ###### section ffnonbonded.itp nonbonded pair intereacitons + ### ; i j func V(c6) W(c12) + itp_lines.append('') + itp_lines.append("[ nonbond_params ]") + itp_lines.append("; i j func V(c6) W(c12)") + for index, row in df_vdw.iterrows(): + if int(row["vdwHeadType"]) != int(row["vdwTailType"]): + check_id_i = int(row["vdwHeadType"]) + check_id_j = int(row["vdwTailType"]) + _source_item_i = df_atomtype[ df_atomtype["atomtypeID"].isin([ check_id_i ]) ] + _source_item_j = df_atomtype[ df_atomtype["atomtypeID"].isin([ check_id_j ]) ] + check_id_i_name = _source_item_i.atomName.values.tolist()[0] + check_id_j_name = _source_item_j.atomName.values.tolist()[0] + ##print(check_id_i_name, check_id_j_name) + check_vdwC6 = np.round(row["vdwC6"], float_accu) + check_vdwC12 = np.round(row["vdwC12"],float_accu) + check_kai = np.round(row["vdwKai"],float_accu) + itp_lines.append(f" {check_id_i_name} {check_id_j_name} {_func_nonbond} {check_vdwC6} {check_vdwC12} {_hymd_label} {check_kai}") + + ###### section [ bondtypes ] e.g. from ffbonded.itp + df_bondtype = pd.read_csv( bondcsv ) + itp_lines.append('') + itp_lines.append("[ bondtypes ]") + itp_lines.append("; i j func b0 kb ") + for index, row in df_bondtype.iterrows(): + check_id_i = int(row["bondHeadType"]) + check_id_j = int(row["bondTailType"]) + _source_item_i = df_atomtype[ df_atomtype["atomtypeID"].isin([ check_id_i ]) ] + _source_item_j = df_atomtype[ df_atomtype["atomtypeID"].isin([ check_id_j ]) ] + check_id_i_name = _source_item_i.atomName.values.tolist()[0] + check_id_j_name = _source_item_j.atomName.values.tolist()[0] + check_length = np.round(row["bondLength"], float_accu) + check_strength = np.round(row["bondStrength"],float_accu) + itp_lines.append(f" {check_id_i_name} {check_id_j_name} {_func_bond} {check_length} {check_strength}") + + ###### section [ angletypes ] + if anglecsv: + df_angletype = pd.read_csv( anglecsv ) + itp_lines.append('') + itp_lines.append("[ angletypes ]") + itp_lines.append("; i j k func th0 cth ") + for index, row in df_angletype.iterrows(): + check_id_i = int(row["angleUpType"]) + check_id_j = int(row["angleMiddleType"]) + check_id_k = int(row["angleDownType"]) + _source_item_i = df_atomtype[ df_atomtype["atomtypeID"].isin([ check_id_i ]) ] + _source_item_j = df_atomtype[ df_atomtype["atomtypeID"].isin([ check_id_j ]) ] + _source_item_k = df_atomtype[ df_atomtype["atomtypeID"].isin([ check_id_j ]) ] + check_id_i_name = _source_item_i.atomName.values.tolist()[0] + check_id_j_name = _source_item_j.atomName.values.tolist()[0] + check_id_k_name = _source_item_k.atomName.values.tolist()[0] + check_theta = np.round(row["angleTheta"], float_accu) + check_strength = np.round(row["angleStrength"],float_accu) + itp_lines.append(f" {check_id_i_name} {check_id_j_name} {check_id_j_name} {_func_angle} {check_theta} {check_strength}") + + #for _line in itp_lines: + # print(_line) + + out_itp_file = os.path.join(path, 'forcefield.itp') + f=open( out_itp_file ,'w') + s1='\n'.join(itp_lines) + f.write(s1) + f.write('\n') + f.write('\n') + f.close() + + + +def extract_itp_from_fort5_fort3csv( fort5_atoms, resname, atomcsv, bondcsv, vdwcsv, anglecsv=False, dihedralcsv=False): + """ + - ITP reference: from https://manual.gromacs.org/documentation/current/reference-manual/topologies/topology-file-formats.html + - ===> itp atoms seciton: index,atomtype,resnr,resname,atomname,cgnr, charge ,mass + ==== e.g. 1 C 1 URE C 1 0.880229 12.01000 + fort5_atoms exp: atomid,atomname,atomtypeID,bondnum,x,y,z,vx,vy,vz,bond1,bond2,bond3,bond4,bond5,bond6 + RESNR = 1 ## here only process one molecule/residue + + - ===> itp bonds seciton: head, tail, func, length,strength + ==== e.g. 5 21 1 0.68031 500.00000 + FUNC = 1 ## here by default only use bond type = 1 + """ + df_atom = pd.read_csv( atomcsv) ## atomtypeID,atomName,atomMass,atomCharge + df_bond = pd.read_csv( bondcsv) ## bondHeadType,bondTailType,bondLength,bondStrength + df_vdw = pd.read_csv( vdwcsv ) + ##print(df_vdw) + + #### atoms section + RESNR = 1 + itpAtom_list = [] + for atom in fort5_atoms: + _item_atom = df_atom.copy()[df_atom["atomtypeID"].isin([atom.atomtypeID])] + index = atom.atomid + atomtype = atom.atomname #atom.atomtypeID; + resnr = RESNR + resname = resname + atomname = atom.atomname + cgnr = atom.atomid + charge = _item_atom.atomCharge.values.tolist()[0] + mass = _item_atom.atomMass.values.tolist()[0] + itpAtom_list.append (ItpAtom(index,atomtype,resnr,resname,atomname,cgnr, charge ,mass )) + #for itp in itpAtom_list: + # print('itpatom',itp.__dict__) + + #### bonds secttion + FUNC = 1 + itpBond_list = [] # head, tail, func, length,strength + + fort5_bond_list = [] + for atom in fort5_atoms: + head = atom.atomid + head_type = atom.atomtypeID + ##print([ atom.bond1,atom.bond2,atom.bond3,atom.bond4,atom.bond5,atom.bond6]) + for j in [atom.bond1,atom.bond2,atom.bond3,atom.bond4,atom.bond5,atom.bond6]: + if j != 0: + tail = j + tail_type = fort5_atoms[j-1].atomtypeID + ##print(head, tail) + if ([head, tail] in fort5_bond_list) or ([tail, head] in fort5_bond_list): + pass + else: + fort5_bond_list.append([head, tail]) + ##print('keep --- ', head, tail) + func = FUNC + #print(head, tail, head_type, tail_type) + #for bond_item in df_bond + #_item_record = df_bond[df_bond["bondHeadType"].isin([head_type])] and df_bond[df_bond["bondTailType"].isin([tail_type])] + _bonditem = df_bond.loc[(df_bond['bondHeadType']==head_type) & (df_bond['bondTailType']==tail_type)] + if _bonditem.empty: + _bonditem = df_bond.loc[(df_bond['bondHeadType']==tail_type) & (df_bond['bondTailType']==head_type)] + if _bonditem.empty: + raise Exception("ERROR, bond parameters not found") + #print(_bonditem) + length = _bonditem.bondLength.values.tolist()[0] + strength = _bonditem.bondStrength.values.tolist()[0] + itpBond_list.append(ItpBond(head,tail,func,length,strength)) + + #for itp in itpBond_list: + # print('itpbond', itp.__dict__) + + ########################### angle part ################################################### + if not anglecsv: + pass + else: + df_angle = pd.read_csv( anglecsv) ## angleUpType,angleMiddleType,angleDownType,angleTheta,angleStrength + FUNC = 1 + itpAngle_list = [] # head, tail, func, length,strength + + #### generate angle list: from bonds to angles + wholeindex = np.array(itpBond_list).tolist() + ###wholeindex = np.arange(len(itpBond_list)).tolist() + checklist = list(zip(wholeindex, wholeindex[1:] + wholeindex[:1])) ## ## reference https://www.geeksforgeeks.org/python-pair-iteration-in-list/ + for pair in checklist: + #print(pair[0].__dict__, pair[1].__dict__) + item1 = pair[0] + item2 = pair[1] + if (item2.head == item1.tail): + up = item1.head + middle = item1.tail + down = item2.tail + itpAngle = get_parameter_angle_from_typecsv(up,middle,down,itpAtom_list,df_atom, df_angle) + itpAngle_list.append(itpAngle) + + #for itp in itpAngle_list: + # print('itpangle',itp.__dict__) + ############## + if dihedralcsv : + return (itpAtom_list, itpBond_list, itpAngle_list, itpDihedral_list ) + elif anglecsv : + return (itpAtom_list, itpBond_list, itpAngle_list) + else: + return (itpAtom_list, itpBond_list) + + + + + + + + + + + +class PdbAtom: + '''the basic class for atom in pdb file, basic format + see in http://deposit.rcsb.org/adit/docs/pdb_atom_format.html + ''' + def __init__(self,label,index, name,indicator,residue, chain,resid,insert,x,y,z,occu,temp,seg,element,charge): + self.label = label + self.index = index + self.name = name + self.indicator = indicator + self.residue = residue + self.chain = chain + self.resid = resid + self.insert = insert + self.x = x + self.y = y + self.z = z + self.occu = occu + self.temp = temp + self.seg = seg + self.element = element + self.charge = charge + +class GroAtom: + def __init__(self,resid,residuename, atomname, index, x, y, z): + self.resid = resid + self.residuename = residuename + self.atomname = atomname + self.index = index + self.x = x + self.y = y + self.z = z + + +class Fort5Atom: + """ + atom line example: + 1 T 1 1 18.417 9.410 27.788 0 0 0 2 0 0 0 0 0 + 2 C 2 2 18.381 9.084 27.791 0 0 0 1 3 0 0 0 0 + --> parsing information + index atomname atomtypeID bondnum x y z vx vy vz bond1 bond2 bond3 bond4 bond5 bond6 + """ + def __init__(self,atomid,atomname,atomtypeID,bondnum,x,y,z,vx,vy,vz,bond1,bond2,bond3,bond4,bond5,bond6): + + self.atomid = atomid + self.atomname = atomname + self.atomtypeID = atomtypeID + self.bondnum = bondnum + self.x = x + self.y = y + self.z = z + self.vx = vx + self.vy = vy + self.vz = vz + self.bond1 = bond1 + self.bond2 = bond2 + self.bond3 = bond3 + self.bond4 = bond4 + self.bond5 = bond5 + self.bond6 = bond6 + + +class Fort5AtomWhole: + """ + extended from Fort5Atom; with self.resid + """ + def __init__(self,resid, atomid,atomname,atomtypeID,bondnum,x,y,z,vx,vy,vz,bond1,bond2,bond3,bond4,bond5,bond6): + + self.resid = resid ## resid starts from 1 ok + self.atomid = atomid + self.atomname = atomname + self.atomtypeID = atomtypeID + self.bondnum = bondnum + self.x = x + self.y = y + self.z = z + self.vx = vx + self.vy = vy + self.vz = vz + self.bond1 = bond1 + self.bond2 = bond2 + self.bond3 = bond3 + self.bond4 = bond4 + self.bond5 = bond5 + self.bond6 = bond6 + +def loadGroPosition(inputfilename): + """ + # 2021-06-10 + BOX SIZE WITH A SPECIFIC GAP + _BOX_ITEM_GAP = ' ' + # 2021-06-16 + just use .split() instead of .split(_BOX_ITEM_GAP) + + """ + _REMARK = 2 + _BOX_ITEM_GAP = ' ' + + with open(inputfilename) as f: + lines = f.readlines() + num_atoms = int(lines[1]) + box_size = np.array( lines[_REMARK+num_atoms].strip('\n').lstrip().split(), dtype=float) + #box_size = np.array( lines[_REMARK+num_atoms].strip('\n').lstrip().split(_BOX_ITEM_GAP), dtype=float) + #print(nun_atoms) + #print(box_size) + demo=[] + GroAtom_list = [] + ## 1ETH CB 1 1.460 1.559 1.491 0.2285 0.2711 -0.7051 + ## 0 1 2 3 4 5 6 + with open(inputfilename,'r') as f: + data = f.readlines() + count = 0 + for line in data[_REMARK:_REMARK+num_atoms]: + count += 1 + #print (line) + list = line.split() + ## the resid and residuename belong to list[0],split it + resid = int(line[0:5])#int( (count-1) / 121 ) +1 + residuename = line[5:10] + atomname = line[10:15] + index = int(line[15:20]) + x = float(line[20:28]) ## double in c, float64 in numpy --> doesn't matter in here + y = float(line[28:36]) + z = float(line[36:44]) + GroAtom_list.append(GroAtom(resid,residuename, atomname, index, x, y, z)) + return GroAtom_list, box_size + + +def loadfort5_whole(inputfilename): + """ + here reads an example file fort.5 file + - reference fort5_to_hdf5.py "hymd/utils" + - + """ + with open(inputfilename, "r") as f: + data = f.readlines() + + box_array = np.array( [float(data[1].split()[i]) for i in range(3)] ) + n_atoms = int(data[-1].split()[0]) + n_molecules = int(data[4].split()[0]) + skip_head = 4 ## 5-1 ## after this will be molecule blockes + skip_head_mol = 2 + + line_count = skip_head + atom_list = [] + for i in np.arange( n_molecules) : + resid = int(data[line_count+1].split()[-1]) ## starts from 1 ok + num_atom_in_resid = int(data[line_count+2]) + ## print(i, resid, num_atom_in_resid) ## time.sleep(1) + for line in data[line_count+skip_head_mol+1:line_count+skip_head_mol+num_atom_in_resid+1]: #rightside need + 1 + ## print(line) ## time.sleep(1) + list = line.split() + atomid = int(list[0]) + atomname = list[1] + atomtypeID = int(list[2]) + bondnum = int(list[3]) + x = float(list[4]) + y = float(list[5]) + z = float(list[6]) + vx = float(list[7]) + vy = float(list[8]) + vz = float(list[9]) + bond1 = int(list[10]) + bond2 = int(list[11]) + bond3 = int(list[12]) + bond4 = int(list[13]) + bond5 = int(list[14]) + bond6 = int(list[15]) + atom_list.append(Fort5AtomWhole(resid,atomid,atomname,atomtypeID,bondnum,x,y,z,vx,vy,vz,bond1,bond2,bond3,bond4,bond5,bond6)) + + line_count += (num_atom_in_resid + skip_head_mol) + + return (atom_list, box_array) + + +def fort5whole_write2_gro(in_fort5_atoms, out_gro_file, case_name, box_array,top_mol): + + resname_list = [] + + ## add judge information + if isinstance(top_mol, collections.OrderedDict): + print('top_mol input as OrderedDict') + + for k, v in top_mol.items(): + #print(k, v) + resname_list += [k] * v + ##print(resname_list) + elif isinstance(top_mol, pd.DataFrame): + print('top_mol input is data frame, to be added') + + + with open( out_gro_file,'w') as fo: + fo.write(case_name+'\n') + fo.write("%5d "%( len( in_fort5_atoms) )+'\n') + for atom in in_fort5_atoms: + if (atom.atomid > 99999): + atom.atomid = (atom.atomid % 99999) + _resid = atom.resid + if (atom.resid > 99999): + atom.resid = (atom.resid % 99999) + #fo.write( "%5d%5s%5s%5d%8.3f%8.3f%8.3f"%(atom.resid,atom.residuename,atom.element,atom.index,atom.x,atom.y,atom.z)+'\n') + fo.write( "%5d%-5s%5s%5d%8.3f%8.3f%8.3f"%(atom.resid, resname_list[_resid-1], atom.atomname, atom.atomid,atom.x,atom.y,atom.z)+'\n') + fo.write(" %8.5f %8.5f %8.5f"%(box_array[0], box_array[1], box_array[2]) ) + fo.write('\n') + + +def loadfort5_simple(inputfilename): + """ + here reads an example file solute.5 file + - clean refers to that the first line is the number of atoms + - skip = 1 + """ + skip = 1 + demo=[] + atom_list = [] + with open(inputfilename,'r') as f: + data = f.readlines() + atom_num = int(data[0]) + ##print(atom_num) + count = 0 + for line in data[skip:skip+atom_num]: + count += 1 + ##print (line) + list = line.split() + #### 1 T 1 1 18.417 9.410 27.788 0 0 0 2 0 0 0 0 0 + ###-> 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + atomid = int(list[0]) + atomname = list[1] + atomtypeID = int(list[2]) + bondnum = int(list[3]) + x = float(list[4]) + y = float(list[5]) + z = float(list[6]) + vx = float(list[7]) + vy = float(list[8]) + vz = float(list[9]) + bond1 = int(list[10]) + bond2 = int(list[11]) + bond3 = int(list[12]) + bond4 = int(list[13]) + bond5 = int(list[14]) + bond6 = int(list[15]) + atom_list.append(Fort5Atom(atomid,atomname,atomtypeID,bondnum,x,y,z,vx,vy,vz,bond1,bond2,bond3,bond4,bond5,bond6)) + return atom_list + +def fort5_write2_gro(in_fort5_atoms, out_gro_file, case_name, molecule_name='MOL', box_x=5,box_y=5,box_z=5): + with open( out_gro_file,'w') as fo: + fo.write(case_name+'\n') + fo.write("%5d "%( len( in_fort5_atoms) )+'\n') + for atom in in_fort5_atoms: + if (atom.atomid > 99999): + atom.atomid = (atom.atomid % 99999) + #fo.write( "%5d%5s%5s%5d%8.3f%8.3f%8.3f"%(atom.resid,atom.residuename,atom.element,atom.index,atom.x,atom.y,atom.z)+'\n') + try: + resid = atom.resid + except: + resid = 1 + fo.write( "%5d%-5s%5s%5d%8.3f%8.3f%8.3f"%(resid, molecule_name, atom.atomname, atom.atomid,atom.x,atom.y,atom.z)+'\n') + fo.write(" %8.5f %8.5f %8.5f"%(box_x, box_y, box_z) ) + fo.write('\n') + + + +def write_pdb_file(atoms,bonds,outputfilename): + with open(outputfilename, 'w') as fo: + #remarks + fo.write("REMARK Regenerated by local python code"+'\n') + fo.write("REMARK SOME MOLECULE"+'\n') + for atom in atoms: + fo.write( "%-6s%5d %4s%1s%3s %1s%4d%1s %8.3f%8.3f%8.3f%6.2f%6.2f %4s%2s%2s"%(atom.label,atom.index, atom.name,atom.indicator,atom.residue, atom.chain,atom.resid,atom.insert,atom.x,atom.y,atom.z,atom.occu,atom.temp,atom.seg,atom.element,atom.charge)+'\n') + ## for the '\n',when split if use [78:] will include the \n to the last element, avoid that + + #TER MIDDLE + fo.write("TER %d"%(ATOMNUM+1)) + fo.write('\n') + for bondgroup in bonds: + fo.write("CONECT") + for item in bondgroup: + fo.write("%5d"%item) + fo.write('\n') + #END + fo.write("END"+'\n') + +def write_numpy_to_mol2_file_nocharge(array,outputfilename): + unit_nm_to_ang = 1 #10 + N_MOL = len(array) + with open(outputfilename, 'w') as fo: + fo.write("@MOLECULE"+'\n') + fo.write("MOL"+'\n') + fo.write("%10d 0 0 0 "%(N_MOL) +'\n') + fo.write("SMALL"+'\n') + fo.write("USER_CHARGES"+'\n') + fo.write('\n') + + fo.write("@ATOM"+'\n') + for i in np.arange( N_MOL ): + fo.write( "%8d %8s %10.4f%10.4f%10.4f c3 %8d MOL %13.6f"%( i+1,'C', array[i][0] * unit_nm_to_ang , array[i][1]* unit_nm_to_ang, array[i][2]*unit_nm_to_ang, i+1, 0)+'\n') + fo.write('\n') + fo.write('\n') + + + +def write_numpy_to_mol2_file(array,charges, outputfilename): + unit_nm_to_ang = 10 #10 + N_MOL = len(array) + with open(outputfilename, 'w') as fo: + fo.write("@MOLECULE"+'\n') + fo.write("MOL"+'\n') + fo.write("%10d 0 0 0 "%(N_MOL) +'\n') + fo.write("SMALL"+'\n') + fo.write("USER_CHARGES"+'\n') + fo.write('\n') + + fo.write("@ATOM"+'\n') + for i in np.arange( N_MOL ): + fo.write( "%8d %8s %10.4f%10.4f%10.4f c3 %8d MOL %13.6f"%( i+1,'C', array[i][0] * unit_nm_to_ang , array[i][1]* unit_nm_to_ang, array[i][2]*unit_nm_to_ang, i+1,charges[i])+'\n') + fo.write('\n') + fo.write('\n') + + + + +def WRITE_TRJ_GRO(fp, x, vel,t,nparticles,box): + fp.write('MD of %d mols, t=%.3f\n'%(nparticles,t)) + fp.write('%-10d\n'%(nparticles)) + for i in range(len(x)): + fp.write("%5d%-5s%5s%5d%8.3f%8.3f%8.3f%8.4f%8.4f%8.4f\n"%(i//10+1,'A','A',i+1,x[i,0],x[i,1],x[i,2],vel[i,0],vel[i,1],vel[i,2])) + fp.write("%-5.5f\t%5.5f\t%5.5f\n"%(box[0],box[1],box[2])) + fp.flush() + return fp + + +def gro_to_hdf5( groAtoms, box, top_mol_dict, charges=False): + """ + - reference: fort5_to_hdf5.py + UNFINISHED + """ + box = box + n_atoms = len(groAtoms) + n_molecules = 0 + for k, v in top_mol_dict.items(): + n_molecules += v + #print(n_molecules) + _ndim = 3 + f_hd5 = h5py.File(out_path, "w") + + dset_pos = f_hd5.create_dataset("coordinates", (1, n_atoms, _ndim), dtype="float64") + dset_vel = f_hd5.create_dataset("velocities", (1, n_atoms, _ndim), dtype="float64") + dset_types = f_hd5.create_dataset("types", (n_atoms,), dtype="i") + dset_molecule_index = f_hd5.create_dataset("molecules", (n_atoms,), dtype="i") + dset_indices = f_hd5.create_dataset("indices", (n_atoms,), dtype="i") + dset_names = f_hd5.create_dataset("names", (n_atoms,), dtype="S5") + dset_bonds = f_hd5.create_dataset("bonds", (n_atoms, _ndim), dtype="i") + + f_hd5.attrs["box"] = box + f_hd5.attrs["n_molecules"] = n_molecules + molecule_index = 0 + + """ + def write_to_dataset(molecule_lines, molecule_index): + atom_indices = [int(s.split()[0]) for s in molecule_lines] + type_indices = [int(s.split()[2]) for s in molecule_lines] + names = [s.split()[1] for s in molecule_lines] + + for i, line in enumerate(molecule_lines): + atom_index = atom_indices[i] - 1 + type_index = type_indices[i] - 1 + pos_vel = [float(s.replace("D", "E")) for s in line.split()[4:10]] + dset_pos[0, atom_index, :] = np.array(pos_vel[:3]) + dset_vel[0, atom_index, :] = np.array(pos_vel[3:]) + dset_types[atom_index] = type_index + dset_molecule_index[atom_index] = molecule_index + dset_names[atom_index] = np.string_(names[i]) + dset_indices[atom_index] = atom_index + + bonds = [int(s) - 1 for s in line.split()[10:]] + dset_bonds[atom_index] = bonds[:3] + + for i, line in enumerate(lines): + split_line = line.split() + if i > 4 and len(split_line) == 1 and is_int(split_line[0]): + molecule_start_line = i + atoms_in_molecule = int(split_line[0]) + + molecules_lines = lines[ + molecule_start_line + 1 : molecule_start_line + atoms_in_molecule + 1 + ] + write_to_dataset(molecules_lines, molecule_index) + molecule_index += 1 + """ + +def load_kai_matrix_gen_toml(input_kai_full_csv,toml_template, out_toml_folder,out_toml_label): + + df_kai_full = pd.read_csv( input_kai_full_csv ) + #print(df_kai_full) + ### get a list without repeats + df_kai_full_norpt = [] + for indexi, rowi in df_kai_full.iterrows(): + found = False + name_set_i = { rowi['HName'], rowi['TName'] } + for item in df_kai_full_norpt: + name_set_j = { item['HName'], item['TName'] } + if name_set_i == name_set_j: + found = True + break + if found: + continue + else: + df_kai_full_norpt.append(rowi ) + + #for item in df_kai_full_norpt: + # print( item['HName'], item['TName'] ) + ### + + Path(out_toml_folder).mkdir(parents=True, exist_ok=True) + + ori_toml = toml.load(toml_template) + #print(ori_toml['bonds']) + + _Cut_Melt = 5 + print(f'WARNING, _Cut_Melt in this function is set to {_Cut_Melt} by default') + column_names = list(df_kai_full.columns) + ##['HName', 'TName', 'MtnHeadName', 'MtnTailName', 'BiasDist', 'MtnKai', 'MtnKaiNoS', ...] + case_list = column_names[_Cut_Melt:] + case_id = 0 + + for case in case_list: + kai_toml = [] #chi = [ [ ["T", "C"],[-10.800] ], ... ] + print(case) ## each case generate a toml file + ### OLD use the full csv dataframe + ##kai_value_list = df_kai_full[case].values.tolist() + ##kai_head_list = df_kai_full['HName'].values.tolist() + ##kai_tail_list = df_kai_full['TName'].values.tolist() + #### NEW use the list of series + for item in df_kai_full_norpt: + kai_head = item['HName'] + kai_tail = item['TName'] + kai_value = item[case] + record = [ [kai_head, kai_tail],[round(kai_value, 3)]] + #print(record) + kai_toml.append(record) + + out_toml = ori_toml + out_toml['field']['chi'] = kai_toml + out_toml_file = os.path.join(out_toml_folder, f'id{case_id}_{out_toml_label}_{case}.toml') + f = open(out_toml_file,'w') + toml.dump(out_toml, f) + f.close() + + case_id += 1 + + ####### a simple change and write out + #out_toml = ori_toml + #out_toml['field']['chi'] = [1,2,3] + #case = 'test' + #out_toml_file = os.path.join(out_toml_folder, f'{out_toml_label}_{case}.toml') + #f = open(out_toml_file,'w') + #toml.dump(out_toml, f) + #f.close() + ######## + + +def gen_single_slurm_multitasks2(work_folder, slurm_label, toml_label, num_case=1): + """ + """ + ####################################### setup could from a toml file + job_name = 'HyMD' + budget_account='nn4654k' + run_timelen = '1-0:0:0' + num_nodes = num_case # 1 node per case + cores_pernode = 128 + ####################################### + IN_H5 = 'all.h5' + seed = 10 + ####################################### + + slurm_file = os.path.join(work_folder, f'{slurm_label}.slurm') + with open(slurm_file, 'w') as fo: + fo.write(f'#!/bin/bash'+'\n') + fo.write(f'#SBATCH --job-name={job_name}'+'\n') + fo.write(f'#SBATCH --account={budget_account}'+'\n') + fo.write(f'#SBATCH --time={run_timelen}'+'\n') + fo.write(f'#SBATCH --nodes={num_nodes}'+'\n') + fo.write(f'#SBATCH --ntasks-per-node={cores_pernode}'+'\n') + fo.write("\n") + + fo.write(f'module --quiet purge'+'\n') + fo.write(f'set -o errexit # exit on errors'+'\n') + fo.write(f'module load h5py/2.10.0-foss-2020a-Python-3.8.2'+'\n') + fo.write(f'module load pfft-python/0.1.21-foss-2020a-Python-3.8.2'+'\n') + fo.write(f'set -x'+'\n') + fo.write("\n") + + fo.write(f'OUT_DIR=${{SLURM_SUBMIT_DIR}}'+'\n') + + for i in range(num_case): + fo.write(f'CASE_{i}=${{OUT_DIR}}/case_{i}'+'\n') + fo.write("\n") + + for i in range(num_case): + fo.write(f'mkdir -p ${{CASE_{i}}}' +'\n') + fo.write("\n") + + #### rename the toml file and put in separate folders + for i in range(num_case): + target_file = os.path.join(f'{work_folder}', f'case_{i}.toml') + files = glob(f'{work_folder}/{toml_label}{i}_*') + if len(files) != 1: + raise ValueError('error: non unique toml file!!!') + ori_file = files[0] + shutil.copyfile(ori_file, target_file) + ######################################################### + + fo.write(f'date'+'\n') + for i in range(num_case): + fo.write(f'srun --exclusive --nodes 1 --ntasks {cores_pernode} python3 ${{HOME}}/HyMD-2021/hymd/main.py case_{i}.toml {IN_H5} --logfile=log_{i}.txt --verbose 2 --velocity-output --destdir ${{CASE_{i}}} --seed {seed} ') + if i == num_case-1: + endstr = f'&> /dev/null'+'\n' + else: + endstr = f'&'+'\n' + fo.write(endstr) + fo.write("\n") + + + +def gen_single_slurm_multitasks(work_folder, slurm_label, toml_label, num_case=1): + """ + run all the tasks in single slurm + see example: template-run-betzy-multiple.slurm + + """ + ####################################### setup could from a toml file + job_name = 'HyMD' + budget_account='nn4654k' + run_timelen = '1-0:0:0' + num_nodes = num_case # 1 node per case + cores_pernode = 128 + ####################################### + IN_H5 = 'all.h5' + seed = 10 + ####################################### + + slurm_file = os.path.join(work_folder, f'{slurm_label}.slurm') + with open(slurm_file, 'w') as fo: + fo.write(f'#!/bin/bash'+'\n') + fo.write(f'#SBATCH --job-name={job_name}'+'\n') + fo.write(f'#SBATCH --account={budget_account}'+'\n') + fo.write(f'#SBATCH --time={run_timelen}'+'\n') + fo.write(f'#SBATCH --nodes={num_nodes}'+'\n') + fo.write(f'#SBATCH --ntasks-per-node={cores_pernode}'+'\n') + fo.write("\n") + + fo.write(f'module --quiet purge'+'\n') + fo.write(f'set -o errexit # exit on errors'+'\n') + fo.write(f'module load h5py/2.10.0-foss-2020a-Python-3.8.2'+'\n') + fo.write(f'module load pfft-python/0.1.21-foss-2020a-Python-3.8.2'+'\n') + fo.write(f'set -x'+'\n') + fo.write("\n") + + fo.write(f'OUT_DIR=${{SLURM_SUBMIT_DIR}}'+'\n') + + for i in range(num_case): + fo.write(f'CASE_{i}=${{OUT_DIR}}/case_{i}'+'\n') + fo.write("\n") + + for i in range(num_case): + fo.write(f'mkdir -p ${{CASE_{i}}}' +'\n') + fo.write("\n") + + #### rename the toml file and put in separate folders + for i in range(num_case): + target_file = os.path.join(f'{work_folder}', f'case_{i}.toml') + files = glob(f'{work_folder}/{toml_label}{i}_*') + if len(files) != 1: + raise ValueError('error: non unique toml file!!!') + ori_file = files[0] + shutil.copyfile(ori_file, target_file) + ######################################################### + + fo.write(f'date'+'\n') + for i in range(num_case): + fo.write(f'srun --exclusive --nodes 1 --ntasks {cores_pernode} python3 ${{HOME}}/HyMD-2021/hymd/main.py case_{i}.toml {IN_H5} --logfile=log_{i}.txt --destdir ${{CASE_{i}}} --seed {seed} ') + if i == num_case-1: + endstr = f'&> /dev/null'+'\n' + else: + endstr = f'&'+'\n' + fo.write(endstr) + fo.write("\n") + + """ + group = ' ' + count = 0 + for i in xrange(total_num_atom): + #if i%121 == 26: + group += str(i+1) + ' ' + count += 1 + if count % 15 == 0: + group += "\n" + fo.write( group + "\n") + """ + + +def gen_multi_slurm_singletask_saga(work_folder, slurm_label, toml_label, num_case=1): + """ + run all the tasks in seperate slurm files on saga + + see example: template-run-betzy-multiple.slurm + """ + ####################################### setup could from a toml file + job_name = 'HyMD' + budget_account='nn4654k' + run_timelen = '6-0:0:0' + #num_nodes = num_case # 1 node per case + cores_pernode = 32 + mem_per_cpu ='1G' + ####################################### + IN_H5 = 'all.h5' + seed = 10 + + ####################################### + for i in np.arange(num_case): + slurm_file = os.path.join(work_folder, f'{slurm_label}_{i}.slurm') + with open(slurm_file, 'w') as fo: + fo.write(f'#!/bin/bash'+'\n') + fo.write(f'#SBATCH --job-name={job_name}'+'\n') + fo.write(f'#SBATCH --account={budget_account}'+'\n') + fo.write(f'#SBATCH --time={run_timelen}'+'\n') + fo.write(f'#SBATCH --ntasks={cores_pernode}'+'\n') + fo.write(f'#SBATCH --mem-per-cpu={mem_per_cpu}'+'\n') + fo.write("\n") + + + fo.write(f'module --quiet purge'+'\n') + fo.write(f'set -o errexit # exit on errors'+'\n') + fo.write(f'module load h5py/2.10.0-foss-2020a-Python-3.8.2'+'\n') + fo.write(f'module load pfft-python/0.1.21-foss-2020a-Python-3.8.2'+'\n') + fo.write(f'set -x'+'\n') + fo.write("\n") + + fo.write(f'OUT_DIR=${{SLURM_SUBMIT_DIR}}'+'\n') + + + fo.write(f'CASE_{i}=${{OUT_DIR}}/case_{i}'+'\n') + fo.write("\n") + + + fo.write(f'mkdir -p ${{CASE_{i}}}' +'\n') + fo.write("\n") + + #### rename the toml file and put in separate folders + target_file = os.path.join(f'{work_folder}', f'case_{i}.toml') + files = glob(f'{work_folder}/{toml_label}{i}_*') + if len(files) != 1: + raise ValueError('error: non unique toml file!!!') + ori_file = files[0] + shutil.copyfile(ori_file, target_file) + ######################################################### + + fo.write(f'date'+'\n') + + fo.write(f'srun -n {cores_pernode} python3 ${{HOME}}/HyMD-2021/hymd/main.py case_{i}.toml {IN_H5} --logfile=log_{i}.txt --verbose 2 --velocity-output --destdir ${{CASE_{i}}} --seed {seed} ') + + fo.write("\n") + + + + + + +def add_lift_binary_and_plot(water_types, interface_types, base_colums, kai_input_csv, kai_full_csv, kai_full_pdf, weight, num_lift_try=3, kai_limit=25, kai_neg_limit=-3): + """ + avoid the case that head parts stay inside the cluster + """ + df_kai = pd.read_csv( kai_input_csv ) + column_names = list(df_kai.columns) + _Cut_Melt = 5 + keep_list = column_names[:_Cut_Melt] + + df_kai_out = df_kai[keep_list].copy() + + ### waterish_phase = water_types+ interface_types + ### for simpliciy just add the water_types, interface tune together as others + ### other wise, can/ need to further tune the interface with water phase + waterish_phase = water_types + + for column in base_colums[1:]: + for i in range(num_lift_try): + new_array = [] + for index, row in df_kai.iterrows(): + head = row['HName'] + tail = row['TName'] + kai_value = row[column] + if (head in waterish_phase and tail not in waterish_phase) or (tail in waterish_phase and head not in waterish_phase): + print(f'tune {head} {tail}') + kai_value = kai_value + (i+1)*weight + else: + print(f'not change {head} {tail}') + new_array.append(kai_value) + + df_kai_out[f'{column}Lift{i+1}'] = new_array + + df_kai_out.to_csv(kai_full_csv, index=False) + + + column_names = list(df_kai_out.columns) + print(column_names) + # ['HName', 'TName', 'MtnHeadName', 'MtnTailName', 'BiasDist', 'MtnKai', 'MtnKaiNoS'] + _Cut_Melt = 5 + print(f'WARNING, _Cut_Melt in this function is set to {_Cut_Melt} by default') + keep_list = column_names[:_Cut_Melt] + melt_list = column_names[_Cut_Melt:] + #print(keep_list) + #print(melt_list) + df_kai_melt = pd.melt(df_kai_out, id_vars=keep_list, value_vars=melt_list, var_name='KaiType',value_name='KaiValue') + #print(df_kai_melt) + + """ + ############## two labels: add bias and add calm + ### bias label + label_array = [] + for item in df_kai_melt['KaiType'].to_numpy(): + if item == 'MtnKai': + label_array.append('MtnKai') + elif item == 'MtnKaiNoS': + label_array.append('MtnKaiNoS') + else: + label_array.append( item[:-8]) ## remove the CalmNeg* + df_kai_melt['BiasLabel'] = label_array + + + ### calm label + label_array = [] + for item in df_kai_melt['KaiType'].to_numpy(): + if item == 'MtnKai': + label_array.append('')# -2 + elif item == 'MtnKaiNoS': + label_array.append('')# -1 + else: + label_array.append( int( item[-1]) ) + df_kai_melt['CalmNegLabel'] = label_array + """ + + g = sns.FacetGrid(df_kai_melt, col="HName", hue="KaiType") + g.map(sns.scatterplot, "BiasDist", "KaiValue", alpha=.7) # MtnKai MtnKaiNoS + g.add_legend() + g.set(xticks=[-6, -4, -2, 0, 2, 4, 6], yticks=[-12, 0, 25]) + + #print(g.axes) + #print(shape(g.axes)) + for item in g.axes: + for ax in item: + ax.axhline(0, ls='-.') + ax.axhline(kai_limit, ls='-.') + ax.axhline(kai_neg_limit, ls='-.') + + g.savefig(kai_full_pdf) + + + + ##################### Simple initial load and plot ################ + #df_kai = pd.read_csv( input_kai_csv ) + ### df_atomtype_list = df_atomtype_record.to_numpy().tolist() + #column_names = list(df_kai.columns) + #print(column_names) + ## ['HName', 'TName', 'MtnHeadName', 'MtnTailName', 'BiasDist', 'MtnKai', 'MtnKaiNoS'] + #_Cut_Melt = 5 + #print(f'WARNING, _Cut_Melt in this function is set to {_Cut_Melt} by default') + #keep_list = column_names[:_Cut_Melt] + #melt_list = column_names[_Cut_Melt:] + ##print(keep_list) + ##print(melt_list) + #df_kai_melt = pd.melt(df_kai, id_vars=keep_list, value_vars=melt_list, var_name='KaiType',value_name='KaiValue') + ##print(df_kai_melt) + #g = sns.FacetGrid(df_kai_melt, col="HName", hue="KaiType",) + #g.map(sns.scatterplot, "BiasDist", "KaiValue", alpha=.7) # MtnKai MtnKaiNoS + #g.add_legend() + #g.set(xticks=[-6, -4, -2, 0, 2, 4, 6], yticks=[-12, 0, 12]) + #g.savefig(kai_full_pdf) + ################################################################# + + +def add_lift_binary2_and_plot(water_types, interface_types, base_colums, kai_input_csv, kai_full_csv, kai_full_pdf, weight, num_lift_try=3, kai_limit=25, kai_neg_limit=-3): + """ + THIS ONE compared with add_lift_binary_and_plot + increase the repulsion between intereace types with the core part + """ + df_kai = pd.read_csv( kai_input_csv ) + column_names = list(df_kai.columns) + _Cut_Melt = 5 + keep_list = column_names[:_Cut_Melt] + + df_kai_out = df_kai[keep_list].copy() + + waterish_phase = water_types+ interface_types + + + for column in base_colums[1:]: + for i in range(num_lift_try): + new_array = [] + for index, row in df_kai.iterrows(): + head = row['HName'] + tail = row['TName'] + kai_value = row[column] + if (head in water_types and tail not in water_types) or (tail in water_types and head not in water_types): + print(f'tune {head} {tail}') + kai_value = kai_value + (i+1)*weight + + if (head in interface_types and tail not in waterish_phase) or (tail in interface_types and head not in waterish_phase): + kai_value = kai_value + (i+1)*weight + + new_array.append(kai_value) + + df_kai_out[f'{column}LiftB{i+1}'] = new_array + + df_kai_out.to_csv(kai_full_csv, index=False) + + + column_names = list(df_kai_out.columns) + print(column_names) + # ['HName', 'TName', 'MtnHeadName', 'MtnTailName', 'BiasDist', 'MtnKai', 'MtnKaiNoS'] + _Cut_Melt = 5 + print(f'WARNING, _Cut_Melt in this function is set to {_Cut_Melt} by default') + keep_list = column_names[:_Cut_Melt] + melt_list = column_names[_Cut_Melt:] + #print(keep_list) + #print(melt_list) + df_kai_melt = pd.melt(df_kai_out, id_vars=keep_list, value_vars=melt_list, var_name='KaiType',value_name='KaiValue') + #print(df_kai_melt) + + + g = sns.FacetGrid(df_kai_melt, col="HName", hue="KaiType") + g.map(sns.scatterplot, "BiasDist", "KaiValue", alpha=.7) # MtnKai MtnKaiNoS + g.add_legend() + g.set(xticks=[-6, -4, -2, 0, 2, 4, 6], yticks=[-12, 0, 25]) + + #print(g.axes) + #print(shape(g.axes)) + for item in g.axes: + for ax in item: + ax.axhline(0, ls='-.') + ax.axhline(kai_limit, ls='-.') + ax.axhline(kai_neg_limit, ls='-.') + + g.savefig(kai_full_pdf) + + +def add_scale_kai_and_plot(input_kai_csv, out_kai_csv, kai_full_pdf, numera_list, denom=6): + """ + [2021-08-12] + based on add_bias_kai_and_plot + + # We select a refence eps_ij, e.g. eps_ref = eps_ww = P4 P4 4.999864058869093 + # then others are defined as + # -6*( eps_ij - eps_ref) + + denom is the 6 + """ + + + df_kai = pd.read_csv( input_kai_csv ) + df_kai_out = df_kai.copy() + + ### generate new columns that add the bias + data = df_kai[['BiasDist', 'MtnKaiNoS']].to_numpy() + for numera in numera_list: + new_array = data[:,1]*(numera/denom) + df_kai_out[f'AddScale{numera}Over{denom}'] = new_array + + df_kai_out.to_csv(out_kai_csv, index=False) + + + column_names = list(df_kai_out.columns) + print(column_names) + # ['HName', 'TName', 'MtnHeadName', 'MtnTailName', 'BiasDist', 'MtnKai', 'MtnKaiNoS'] + _Cut_Melt = 5 + print(f'WARNING, _Cut_Melt in this function is set to {_Cut_Melt} by default') + keep_list = column_names[:_Cut_Melt] + melt_list = column_names[_Cut_Melt:] + #print(keep_list) + #print(melt_list) + df_kai_melt = pd.melt(df_kai_out, id_vars=keep_list, value_vars=melt_list, var_name='KaiType',value_name='KaiValue') + #print(df_kai_melt) + + ############## one label: add scale + ### scale label + label_array = [] + for item in df_kai_melt['KaiType'].to_numpy(): + if item == 'MtnKai': + label_array.append('MtnKai') + elif item == 'MtnKaiNoS': + label_array.append('MtnKaiNoS') + else: + label_array.append( item) ## remove the CalmNeg* + df_kai_melt['ScaleLabel'] = label_array + + + g = sns.FacetGrid(df_kai_melt, col="HName", hue='ScaleLabel') + g.map(sns.scatterplot, "BiasDist", "KaiValue", alpha=.7) # MtnKai MtnKaiNoS + g.add_legend() + g.set(xticks=[-6, -4, -2, 0, 2, 4, 6], yticks=[-12, 0, 25]) + + #print(g.axes) + #print(shape(g.axes)) + for item in g.axes: + for ax in item: + ax.axhline(0, ls='-.') + #ax.axhline(kai_limit, ls='-.') + #ax.axhline(kai_neg_limit, ls='-.') + + g.savefig(kai_full_pdf) + + + + +def add_bias_kai_and_plot(input_kai_csv,out_kai_csv, kai_full_pdf, weight, weightcalm=0, num_bias_try = 3, kai_limit=35, num_calm_neg_try=3, kai_neg_limit=-5.0, linearity=1): + """ + - The weight tunes how strong the bias is added + - the nonlinearity (exponential) tunes the bias length + + - input_kai_csv is the input without adding bias + - out_kai_csv add extra columns with the bias + + - num_calm_neg_try ==> this one infact tunes the surface tension + - kai_neg_limit=-3.0 ===> very negative kai mess up the interacton matrix + + """ + + df_kai = pd.read_csv( input_kai_csv ) + df_kai_out = df_kai.copy() + + ### generate new columns that add the bias + data = df_kai[['BiasDist', 'MtnKaiNoS']].to_numpy() + for i in range(num_bias_try): + new_array = data[:,1] + np.abs(data[:,0])*( weight*(i+1) ) + new_array[new_array > kai_limit] = kai_limit + + #df_kai_out[f'Add{linearity}Bias{i+1}'] = new_array ## this is the one without calm (taking care of very negative values) + for j in range(num_calm_neg_try): + new_array[new_array < kai_neg_limit] += weight*j + df_kai_out[f'Add{linearity}Bias{i+1}CalmNeg{j}'] = new_array + + + + + df_kai_out.to_csv(out_kai_csv, index=False) + + + column_names = list(df_kai_out.columns) + print(column_names) + # ['HName', 'TName', 'MtnHeadName', 'MtnTailName', 'BiasDist', 'MtnKai', 'MtnKaiNoS'] + _Cut_Melt = 5 + print(f'WARNING, _Cut_Melt in this function is set to {_Cut_Melt} by default') + keep_list = column_names[:_Cut_Melt] + melt_list = column_names[_Cut_Melt:] + #print(keep_list) + #print(melt_list) + df_kai_melt = pd.melt(df_kai_out, id_vars=keep_list, value_vars=melt_list, var_name='KaiType',value_name='KaiValue') + #print(df_kai_melt) + + ############## two labels: add bias and add calm + ### bias label + label_array = [] + for item in df_kai_melt['KaiType'].to_numpy(): + if item == 'MtnKai': + label_array.append('MtnKai') + elif item == 'MtnKaiNoS': + label_array.append('MtnKaiNoS') + else: + label_array.append( item[:-8]) ## remove the CalmNeg* + df_kai_melt['BiasLabel'] = label_array + + + ### calm label + label_array = [] + for item in df_kai_melt['KaiType'].to_numpy(): + if item == 'MtnKai': + label_array.append('')# -2 + elif item == 'MtnKaiNoS': + label_array.append('')# -1 + else: + label_array.append( int( item[-1]) ) + df_kai_melt['CalmNegLabel'] = label_array + + g = sns.FacetGrid(df_kai_melt, col="HName", row='CalmNegLabel', hue="BiasLabel") + g.map(sns.scatterplot, "BiasDist", "KaiValue", alpha=.7) # MtnKai MtnKaiNoS + g.add_legend() + g.set(xticks=[-6, -4, -2, 0, 2, 4, 6], yticks=[-12, 0, 25]) + + #print(g.axes) + #print(shape(g.axes)) + for item in g.axes: + for ax in item: + ax.axhline(0, ls='-.') + ax.axhline(kai_limit, ls='-.') + ax.axhline(kai_neg_limit, ls='-.') + + g.savefig(kai_full_pdf) + + + ##################### Simple initial load and plot ################ + #df_kai = pd.read_csv( input_kai_csv ) + ### df_atomtype_list = df_atomtype_record.to_numpy().tolist() + #column_names = list(df_kai.columns) + #print(column_names) + ## ['HName', 'TName', 'MtnHeadName', 'MtnTailName', 'BiasDist', 'MtnKai', 'MtnKaiNoS'] + #_Cut_Melt = 5 + #print(f'WARNING, _Cut_Melt in this function is set to {_Cut_Melt} by default') + #keep_list = column_names[:_Cut_Melt] + #melt_list = column_names[_Cut_Melt:] + ##print(keep_list) + ##print(melt_list) + #df_kai_melt = pd.melt(df_kai, id_vars=keep_list, value_vars=melt_list, var_name='KaiType',value_name='KaiValue') + ##print(df_kai_melt) + #g = sns.FacetGrid(df_kai_melt, col="HName", hue="KaiType",) + #g.map(sns.scatterplot, "BiasDist", "KaiValue", alpha=.7) # MtnKai MtnKaiNoS + #g.add_legend() + #g.set(xticks=[-6, -4, -2, 0, 2, 4, 6], yticks=[-12, 0, 12]) + #g.savefig(kai_full_pdf) + ################################################################# + + + + + + + + + +def pull_kai_matrix_from_martini(atomtype_csv, martini_lj_pairs, out_kai_csv): + """ + 2021-07-08 + template kai in HyMD toml file + template_kai = [ + [["T", "C"],[-10.800]], + [["T", "B"],[-10.800]], + [["T", "S"],[-0.225]] + ] + + + epsilon sigma from c6 and c12 + ;; gmx sigeps -c6 0.15091 -cn 0.16267E-02 + ;; --- c6 = 1.50910e-01, c12 = 1.62670e-03 + ;; --- sigma = 0.47000, epsilon = 3.50000 + + !!! converison + https://en.wikipedia.org/wiki/Lennard-Jones_potential A = c12 B = c6 + sigma = (c12/c6)**(1/6) + epsilon = c6**2 / 4c12 + + !!! calcuate kai + ## -6*( eps_ij - 0.5(eps_i + eps_j) ) + + """ + + kai_list = [] + + + df_atomtype_record = pd.read_csv( atomtype_csv) + df_atomtype_list = df_atomtype_record.to_numpy().tolist() + #list of records print(df_atomtype_list) + + ## loop the atomtype_list + ##pair_list = list(combinations(df_atomtype_list, 2)) ### note 2021-07-19 this is to loop without of repeat + pair_list = list(product(df_atomtype_list, repeat=2)) ### note 2021-07-19 this can loop all, full matrix + for (item1, item2) in pair_list: + #print(item1, item2) + #print(item1[4], type(item1[4])) + #_kai_record = [item1[1], item2[1], item1[4], item2[4], abs(item2[5]-item1[5])] + _kai_record = [item1[1], item2[1], item1[4], item2[4], item2[5]-item1[5]] ## not abs() then easier to find pairs + + + ################################### THIS IS THE RAW + for martini_lj in martini_lj_pairs: + if set( [item1[4], item2[4]] ) == set( [ martini_lj.vdwHeadType, martini_lj.vdwTailType ] ): + #print(f'found {item1[4]} {item2[4]}') + ##c6 = martini_lj.vdwC6 + ##c12 = martini_lj.vdwC12 + ##epsilon = c6**2 / (4*c12) + ##sigma = (c12/c6)**(1/6) + ##print(item1[1], item2[1], item1[4], item2[4], c6, c12, epsilon, sigma ) ## ok + # print(item1[1], item2[1], item1[4], item2[4], martini_lj.vdwC6, martini_lj.vdwC12, martini_lj.epsilon, martini_lj.sigma) + + + ## -6*( eps_ij - 0.5(eps_i + eps_j) ) + eps_ij = martini_lj.epsilon + + for martini_lj in martini_lj_pairs: + if martini_lj.sametype and item1[4] == martini_lj.vdwHeadType : + eps_i = martini_lj.epsilon + break + + for martini_lj in martini_lj_pairs: + if martini_lj.sametype and item2[4] == martini_lj.vdwHeadType : + eps_j = martini_lj.epsilon + break + + kai = -6.0 * ( eps_ij - 0.5*(eps_i + eps_j)) + ##print(item1[1], item2[1], item1[4], item2[4], martini_lj.vdwC6, martini_lj.vdwC12, martini_lj.epsilon, martini_lj.sigma, kai) + _kai_record.append(kai) + ##kai_list.append(_kai_record) + + break + + ################################## remove S + for martini_lj in martini_lj_pairs: + typeHead = item1[4] + typeTail = item2[4] + + ### remove S + if typeHead[0]=='S': + typeHead = typeHead[1:] + if typeTail[0]=='S': + typeTail = typeTail[1:] + + if set( [typeHead , typeTail] ) == set( [ martini_lj.vdwHeadType, martini_lj.vdwTailType ] ): + + ## -6*( eps_ij - 0.5(eps_i + eps_j) ) + eps_ij = martini_lj.epsilon + + for martini_lj in martini_lj_pairs: + if martini_lj.sametype and typeHead == martini_lj.vdwHeadType : + eps_i = martini_lj.epsilon + break + + for martini_lj in martini_lj_pairs: + if martini_lj.sametype and typeTail == martini_lj.vdwHeadType : + eps_j = martini_lj.epsilon + break + + kai = -6.0 * ( eps_ij - 0.5*(eps_i + eps_j)) + _kai_record.append(kai) + kai_list.append(_kai_record) + + break + + #for item in kai_list: + # print('the chi: ', item) + + ############ out csv + ## see https://www.geeksforgeeks.org/make-a-pandas-dataframe-with-two-dimensional-list-python/ + ## data line e.g. ['S', 'N', 'N0', 'SQ0', -2.6247347690813374, -0.0] + df_out = pd.DataFrame(kai_list, + columns =['HName', 'TName', 'MtnHeadName','MtnTailName','BiasDist', 'MtnKai', 'MtnKaiNoS']) + #print(df_out) + df_out.to_csv(out_kai_csv, index=False) + + + + +def pull_kai_matrix_from_martini_new(atomtype_csv, martini_lj_pairs, out_kai_csv, eps_ref = 4.999864): + """ + [2021-08-12] + based on the function pull_kai_matrix_from_martini + checking the issue about self-interaction terms + + !!! calcuate kai + ## -6*( eps_ij - 0.5(eps_i + eps_j) ) Eq + The fact is that Martini already give the pair interaction strenghes + The above Eq twists the interaction scales from Martini FF, e.g.: + W mix with C + W-W deep attractive well + C-C shallow attractive well + W-C can be zero + then purely the self-interaction terms + + We select a refence eps_ij, e.g. eps_ref = eps_ww = P4 P4 4.999864058869093 + then others are defined as + -6*( eps_ij - eps_ref) + + """ + + kai_list = [] + + + df_atomtype_record = pd.read_csv( atomtype_csv) + df_atomtype_list = df_atomtype_record.to_numpy().tolist() + #list of records print(df_atomtype_list) + + ## loop the atomtype_list + ##pair_list = list(combinations(df_atomtype_list, 2)) ### note 2021-07-19 this is to loop without of repeat + pair_list = list(product(df_atomtype_list, repeat=2)) ### note 2021-07-19 this can loop all, full matrix + for (item1, item2) in pair_list: + #print(item1, item2) + #print(item1[4], type(item1[4])) + #_kai_record = [item1[1], item2[1], item1[4], item2[4], abs(item2[5]-item1[5])] + _kai_record = [item1[1], item2[1], item1[4], item2[4], item2[5]-item1[5]] ## not abs() then easier to find pairs + + + ################################### THIS IS THE RAW + for martini_lj in martini_lj_pairs: + if set( [item1[4], item2[4]] ) == set( [ martini_lj.vdwHeadType, martini_lj.vdwTailType ] ): + #print(f'found {item1[4]} {item2[4]}') + ##c6 = martini_lj.vdwC6 + ##c12 = martini_lj.vdwC12 + ##epsilon = c6**2 / (4*c12) + ##sigma = (c12/c6)**(1/6) + ##print(item1[1], item2[1], item1[4], item2[4], c6, c12, epsilon, sigma ) ## ok + # print(item1[1], item2[1], item1[4], item2[4], martini_lj.vdwC6, martini_lj.vdwC12, martini_lj.epsilon, martini_lj.sigma) + + + ## -6*( eps_ij - 0.5(eps_i + eps_j) ) + eps_ij = martini_lj.epsilon + #print(item1[4], item2[4],eps_ij ) + + kai = -6.0 * ( eps_ij - eps_ref ) + ##print(item1[1], item2[1], item1[4], item2[4], martini_lj.vdwC6, martini_lj.vdwC12, martini_lj.epsilon, martini_lj.sigma, kai) + _kai_record.append(kai) + ##kai_list.append(_kai_record) + + break + + ################################## remove S + for martini_lj in martini_lj_pairs: + typeHead = item1[4] + typeTail = item2[4] + + ### remove S + if typeHead[0]=='S': + typeHead = typeHead[1:] + if typeTail[0]=='S': + typeTail = typeTail[1:] + + if set( [typeHead , typeTail] ) == set( [ martini_lj.vdwHeadType, martini_lj.vdwTailType ] ): + + ## -6*( eps_ij - 0.5(eps_i + eps_j) ) + eps_ij = martini_lj.epsilon + + #kai = -6.0 * ( eps_ij - 0.5*(eps_i + eps_j)) + kai = -6.0 * ( eps_ij - eps_ref) # _new + + _kai_record.append(kai) + kai_list.append(_kai_record) + + break + + #for item in kai_list: + # print('the chi: ', item) + + ############ out csv + ## see https://www.geeksforgeeks.org/make-a-pandas-dataframe-with-two-dimensional-list-python/ + ## data line e.g. ['S', 'N', 'N0', 'SQ0', -2.6247347690813374, -0.0] + df_out = pd.DataFrame(kai_list, + columns =['HName', 'TName', 'MtnHeadName','MtnTailName','BiasDist', 'MtnKai', 'MtnKaiNoS']) + #print(df_out) + df_out.to_csv(out_kai_csv, index=False) + + + + +def gmx_record_match(strg, search=re.compile(r'[^a-zA-Z0-9]').search): + """ + requires the strg is (line start with) a-z or number + """ + return not bool(search(strg)) + +def load_martini_ff_vdw(itp_file): + """ + 2021-07-07 before, in all the load gmx itp files, + loop lines and break with empty line; + which requires no unnecessary gap lines + === Now then loop the until the next section [ ] or end + + target section: [ nonbond_params ] + + """ + itpVdwAtom_list = [] + + itpVdwPair_list = [] + with open(itp_file,'r') as f: + data = f.readlines() + index_nonbond = [x for x in range(len(data)) if '[ nonbond_params ]' in data[x].lower()] + index_section = [x for x in range(len(data)) if '[ ' in data[x].lower()] + #print(index_nonbond) + #print(index_section) + start = index_nonbond[0] # section [ nonbond_params ] + try: + end = index_section[ index_section.index(start) + 1 ] # section next to [ nonbond_params ] + data_target = data[start:end] + except: + end = -1 + data_target = data[start:] ### checked already that if put -1; then the last line -1 is not included + #print( start, end) + + ############### access [ nonbond_params ] section + ### for line in data[start:end]: ## to index_pairs[0] or -1 + for line in data_target: + ##print(line ) + demoline = line.split() + #print(demoline) + if demoline: ### this is a list, will filter the empty list, i.e. empty line + #print(demoline) + if gmx_record_match(demoline[0]): + #print(demoline) ### test ok + vdw_head = demoline[0] + vdw_tail = demoline[1] + vdw_func = int(demoline[2]) + vdw_c6 = float(demoline[3]) + vdw_c12 = float(demoline[4]) + itpVdwPair_list.append (ItpVdwPair(vdw_head, vdw_tail, vdw_func, vdw_c6, vdw_c12)) + #print(vdw_head, vdw_tail, vdw_func, vdw_c6, vdw_c12) # test ok + return itpVdwPair_list + + + + +def read_top_molecules(topfile): + """ + only search for the [ molecules ] seciton + + ! currently this funciton does work for: no empty line in the end + ! the problem is the -1 in the data[index_molecules[0]:-1] + ! add -1 means that the last item -1 is not included + ! e.g. a = [1,2,3] + ! a[1:-1] # [2] + ! a[1:] # [2,3] + ! + ! Thus remove the -1 + ! + + + """ + # if file exists + itpMolecules = [] + with open(topfile,'r') as f: + data = f.readlines() + #print('data', data) + index_molecules = [x for x in range(len(data)) if '[ molecules ]' in data[x].lower()] + for line in data[index_molecules[0]:]: + #print('here,,,', line) + demoline = line.split() + if line == "\n": # cutoff at the first empty line + break + elif demoline[1].isdigit(): + demolist = line.split() + molname = demolist[0] + molnum = demolist[1] + itpMolecules.append(ItpMolecule( molname, molnum)) + else: + continue + return itpMolecules + + + +def load_molecule_itp_atoms(itp_file): + """ + HERE only read the [ atoms ] section + + FIX: [x:-1] does not include the last item in a list + + ! Notice that the seciton name should be gaped with space in both left and right side, e.g. [ atoms ] + ! otherwise, will not be able to locate + """ + itpAtom_list = [] + + print('now, reading the itp file:', itp_file) + with open(itp_file,'r') as f: + data = f.readlines() + index_atoms = [x for x in range(len(data)) if '[ atoms ]' in data[x].lower()] + ############### access atoms + for line in data[index_atoms[0]:]: + demoline = line.split() + if line == "\n": # cutoff at the first empty line + break + elif demoline[0].isdigit(): + demolist = line.split() + index = int(demolist[0]) + atomtype = demolist[1] + resnr = int(demolist[2]) + resname = demolist[3] + atomname = demolist[4] + cgnr = int(demolist[5]) + charge = float(demolist[6]) + mass = float(demolist[7]) + #print(index, atomtype) + itpAtom_list.append (ItpAtom(index,atomtype,resnr,resname,atomname, cgnr,charge,mass)) + else: + continue + return itpAtom_list + + + +def load_molecule_itp_bonds(itp_file): + """ + MissingLabel = -999 + """ + MissingLabel = -999 + + itpBond_list = [] + with open(itp_file,'r') as f: + data = f.readlines() + index_bonds= [x for x in range(len(data)) if '[ bonds ]' in data[x].lower()] + ############### access bonds + for line in data[index_bonds[0]:-1]: ## to index_pairs[0] or -1 + demoline = line.split() + if line == "\n": # cutoff at the first empty line + break + elif demoline[0].isdigit(): + demolist = line.split() + head = int(demolist[0]) + tail = int(demolist[1]) + func = int(demolist[2]) + try: + length = float(demolist[3]) + except IndexError: + length = MissingLabel + try: + strength = float(demolist[4]) + except IndexError: + strength = MissingLabel + itpBond_list.append (ItpBond(head,tail,func,length,strength)) + else: + continue + return itpBond_list + + +def access_top_molecule_itps_gen_whole_atomtypeID( itpMolecules, atomcsv, top_to_itp_path=''): + """ + redundent thing here, has to access the atomcsv to convert the typename to typeID ... + """ + _SHIFT = 1 ### IF the id in h5 starts from 0 + + # if file exists + ######## generate the whole list of typename + whole_name_list = [] + + for molecule_group in itpMolecules: + molecule_name = molecule_group.molname + molecule_num = int(molecule_group.molnum) + ### better to get the path of top file, then add the top_to_itp_path + ### --> here assume the atomcsv give the absolute path + abs_path = os.path.dirname(atomcsv) + _path = os.path.join(abs_path, top_to_itp_path) + #print(_path) + molecule_itp_file = os.path.join(_path, f"{molecule_name}.itp" ) + #print( molecule_name , molecule_num, molecule_itp_file ) + itp_atom_list = load_molecule_itp_atoms( molecule_itp_file ) + + #atomtypeName_list = [x.atomname for x in itp_atom_list] + atomtypeName_list = [x.atomtype for x in itp_atom_list] ## 2021-06-11 + + print('inside molecule itp: ' , atomtypeName_list) + whole_name_list += atomtypeName_list * molecule_num ## duplicate list https://stackoverflow.com/questions/33046980/duplicating-a-list-n-number-of-times + ### test + ### a = [ 1, 2] + ### b = a * 3 + + + ####### map the name list to typeID list + ####### construct mapping dict from the dataframe, to convert the typename to typeID + df_atomtype = pd.read_csv( atomcsv ) + atom_typename_typeID_dict = dict(zip( df_atomtype.atomName.values.tolist(), df_atomtype.atomtypeID.values.tolist() )) + print( atom_typename_typeID_dict ) + ####### mapping oepration + whole_atomtypeID_list = np.vectorize(atom_typename_typeID_dict.get)(np.array(whole_name_list)) ## option1 + #print(whole_atomtypeID_list) + for _ in whole_atomtypeID_list-_SHIFT: + print(_) + + return (np.array(whole_name_list,dtype="S5"), np.array(whole_atomtypeID_list)-_SHIFT) + + +def access_top_molecule_itps_gen_whole_atomBondIdx( itpMolecules, atomcsv, top_to_itp_path=''): + """ + - editted 2021-06-14 add _MAX_N_BONDS variable + + """ + _SHIFT = -1 ### IF the id in h5 starts from 0 + # if file exists + ######## generate the whole list of bonded indices + whole_atom_bonded_indices_list = [] + + _MAX_N_BONDS = 4 + + _continue = 0 ## continue from different type of molecules + for molecule_group in itpMolecules: + molecule_name = molecule_group.molname + molecule_num = int(molecule_group.molnum) + print('--------------------------------', molecule_name, molecule_num) + abs_path = os.path.dirname(atomcsv) + _path = os.path.join(abs_path, top_to_itp_path) + molecule_itp_file = os.path.join(_path, f"{molecule_name}.itp" ) + #print( molecule_name , molecule_num, molecule_itp_file ) + itp_atom_list = load_molecule_itp_atoms( molecule_itp_file ) + atom_id_list = [x.index for x in itp_atom_list] + + molecule_bonded_indices = [] + if len(atom_id_list)==1: + print('one bead molecule') + itp_bond_list = [] + _array = [0]*_MAX_N_BONDS #[0,0,0] + molecule_bonded_indices.append(_array) + else: + itp_bond_list = load_molecule_itp_bonds( molecule_itp_file ) + for id in atom_id_list: + #_array = [0,0,0] + _array = [0]*_MAX_N_BONDS + for bond in itp_bond_list: + pair = [ int(bond.head), int(bond.tail) ] + if id in pair : + ##_array.extend(pair) + ##_array = list(set(_array)) + ##_array.remove(id) + pair.remove(id) + _array = pair + _array + _array = _array[:_MAX_N_BONDS] ##_array = _array[:3] #### + + ##molecule_bonded_indices.append( np.array(_array)) + molecule_bonded_indices.append( _array ) + + + molecule_bonded_indices = np.array(molecule_bonded_indices) + _SHIFT + print('***** molecule type *****') + print(molecule_bonded_indices) + + ### mask + _filter_value = -1 + molecule_bonded_indices_mask = ma.masked_values(molecule_bonded_indices, _filter_value) + print(molecule_bonded_indices_mask) + + for _duplicate in np.arange(molecule_num): + ##duplidate_ma = (molecule_bonded_indices_mask + _duplicate*len(atom_id_list)).data + ##print(type(duplidate_ma)) + _shift_indices = (molecule_bonded_indices_mask + _duplicate*len(atom_id_list) + _continue).data + ##print(molecule_name, _shift_indices) + whole_atom_bonded_indices_list.extend( list(_shift_indices) ) + ##time.sleep(1) + + _continue += molecule_num * len(atom_id_list) + + return np.array(whole_atom_bonded_indices_list) + + +def access_top_molecule_itps_gen_whole_atomcharges( itpMolecules, atomcsv, top_to_itp_path=''): + ## after write the code use the cmd+shift+2 to active the autoDocstring ### + + # if file exists + ######## generate the whole list of typename + whole_charge_list = [] + + for molecule_group in itpMolecules: + molecule_name = molecule_group.molname + molecule_num = int(molecule_group.molnum) + ### better to get the path of top file, then add the top_to_itp_path + ### --> here assume the atomcsv give the absolute path + abs_path = os.path.dirname(atomcsv) + _path = os.path.join(abs_path, top_to_itp_path) + #print(_path) + molecule_itp_file = os.path.join(_path, f"{molecule_name}.itp" ) + #print( molecule_name , molecule_num, molecule_itp_file ) + itp_atom_list = load_molecule_itp_atoms( molecule_itp_file ) + atomcharge_list = [x.charge for x in itp_atom_list] + print('inside molecule itp: ' , atomcharge_list) + whole_charge_list += atomcharge_list * molecule_num ## duplicate list https://stackoverflow.com/questions/33046980/duplicating-a-list-n-number-of-times + + return np.array(whole_charge_list) + + +def access_top_molecule_itps_gen_whole_moleculeindex( itpMolecules, atomcsv, top_to_itp_path=''): + """ + # + # This one generate the molecule index using numpy array thus starts from 0 + #- no need to add -1 shift + + ### 2021-06-14 BEEEE careful about this funciton see the changes in 2021-06-14 + + """ + + # if file exists + ######## generate the whole list of typename + whole_molecule_index_list = [] + + def duplicate(testList, n): ## ## duplicate list https://stackoverflow.com/questions/33046980/duplicating-a-list-n-number-of-times + return [ele for ele in testList for _ in range(n)] + + _continue = 0 + for molecule_group in itpMolecules: + molecule_name = molecule_group.molname + molecule_num = int(molecule_group.molnum) + ### better to get the path of top file, then add the top_to_itp_path + ### --> here assume the atomcsv give the absolute path + abs_path = os.path.dirname(atomcsv) + _path = os.path.join(abs_path, top_to_itp_path) + #print(_path) + molecule_itp_file = os.path.join(_path, f"{molecule_name}.itp" ) + #print( molecule_name , molecule_num, molecule_itp_file ) + itp_atom_list = load_molecule_itp_atoms( molecule_itp_file ) + atom_num_in_mol = len( itp_atom_list ) + + #_array_sgl = np.arange( molecule_num ) + _continue + _array_sgl = np.arange( molecule_num ) +1+ _continue ## editted 2021-06-14; careful about from one type molecule to another + _array_dpl = duplicate (_array_sgl, atom_num_in_mol) + whole_molecule_index_list += _array_dpl + + _continue = _array_sgl[-1] + + print(_array_dpl[0],_array_dpl[-1]) + + return np.array(whole_molecule_index_list)-1 # -1 is the molecule id starts from zero # editted 2021-06-14; + + + +def gmx_to_h5( out_h5_file, in_gro_file, top, atomcsv, electric_label=False): ## groAtoms, box, top_mol_dict, charges=False + """ + - reference: fort5_to_hdf5.py + + """ + groAtoms, box = loadGroPosition(in_gro_file) + n_atoms = len(groAtoms) + + n_molecules = 0 + #### access [ molecules ] in the top file + if isinstance(top, str): + print('top input as string, may judge whether file exist') + itpMolecule_list = read_top_molecules(top) + for molecule in itpMolecule_list: + k, v = molecule.molname, int(molecule.molnum) + print( k, v) + n_molecules += v + + #for k, v in top_mol.items(): + # #print(k, v) + # resname_list += [k] * v + #for k, v in top_mol_dict.items(): + # n_molecules += v + print(n_molecules) + + _ndim = 3 + f_h5 = h5py.File(out_h5_file, "w") + + dset_pos = f_h5.create_dataset("coordinates", (1, n_atoms, _ndim), dtype="float64") + dset_vel = f_h5.create_dataset("velocities", (1, n_atoms, _ndim), dtype="float64") + dset_types = f_h5.create_dataset("types", (n_atoms,), dtype="i") + dset_molecule_index = f_h5.create_dataset("molecules", (n_atoms,), dtype="i") + dset_indices = f_h5.create_dataset("indices", (n_atoms,), dtype="i") + dset_names = f_h5.create_dataset("names", (n_atoms,), dtype="S5") + dset_bonds = f_h5.create_dataset("bonds", (n_atoms, 3), dtype="i") + + if electric_label: + ## can get the charge array first, then assign directly + ## hf.create_dataset('charge', data=charges, dtype='float32') + dset_charges = f_h5.create_dataset("charge", (n_atoms,), dtype="float32") + ###### get box size from all.gro + ### h5dump -N "box" all.h5 + f_h5.attrs["box"] = box + + ###### get number molecules from top + ### h5dump -N "n_molecules" all.h5 + f_h5.attrs["n_molecules"] = n_molecules + + + ###### get coordinates from the all.gro + ### h5dump -d "coordinates" all.h5 + _frame = 0 + for idx, atom in enumerate(groAtoms): + if idx % 10 == 0: + print(idx) + dset_pos[_frame, idx, :] = np.array( [atom.x, atom.y, atom.z] ) + + + ##### get indices + ### h5dump -d "indices" all.h5 + dset_indices[:] = np.arange(len(groAtoms)) + ### could also assing value when create_dataset + + ##### get types # atomtypes + ##### CAN DO IT via loopling the gromAtoms, + ##### + ### NTOE the types here are defined by the numbers + ### e.g. as the atomtypeID + ### h5dump -d "types" all.h5 + ### + ##### get names + ### h5dump -d "names" all.h5 + (dset_names[:], dset_types[:])= access_top_molecule_itps_gen_whole_atomtypeID(itpMolecule_list, atomcsv) + + ##### get molecule index ## this can be done via using the groAtoms, which need to take care of the 99999 limits + ##### now mimicking the types, loop the top and itp + ### h5dump -d "molecules" all.h5 + ### dset_molecule_index[:] = access_top_molecule_itps_gen_whole_moleculeindex( itpMolecule_list, atomcsv) + dset_molecule_index[...] = access_top_molecule_itps_gen_whole_moleculeindex( itpMolecule_list, atomcsv) + ##### get bonds + ##### h5dump -d "bonds" all.h5 + ##### WARNING -- bonded terms is done like + ##### _index: x y -1 + ##### e.g. following the occam way, not necessarily. also in git issue: #Make HyMD output valid HyMD input #79 + ##### the input h5 should be further determined + ##### NOW maximumlly follows what is used + ##### + ##### ASSIGN dataset data 1,in create stage 2. assign values item by items 3. assign a new/same type using ellipisis? + ##### https://docs.h5py.org/en/stable/high/dataset.html + ##### https://www.slideshare.net/HDFEOS/dan-kahnpythonhdf5-2 + ##### + dset_bonds[...] = access_top_molecule_itps_gen_whole_atomBondIdx(itpMolecule_list, atomcsv) + + ##### Get charges ; add the decleration above + ##### Then work with run.sh config.toml and try to run + ##### h5dump -d "charge" all.h5 + ##### charges = np.vectorize(particle_type_charge_dict.get)(np.array(names)) ## option1 + ##### + if electric_label: + dset_charges[...] = access_top_molecule_itps_gen_whole_atomcharges(itpMolecule_list, atomcsv) + + + + + + +def gmx_to_h5_from_more_hand( out_h5_file, in_gro_file, top, atomcsv, alias_mol_dict, electric_label=False): ## groAtoms, box, top_mol_dict, charges=False + """ + - reference: fort5_to_hdf5.py + + In this function, we will read the top file and gro file from the GMX martini + top file: we only access the molecule name and molecule num + - as the itp in hpf is redefiend, then we have to give the correpinding itp files for different molecule types; + which can be done providing an dictionary== alias_mol_dict + - + gro file, we get the coordiantes + + + + """ + + groAtoms, box = loadGroPosition(in_gro_file) + n_atoms = len(groAtoms) + + print('----------- load gro file: number of atoms , box size ') + print(n_atoms, box) + + + n_molecules = 0 + #### access [ molecules ] in the top file + if isinstance(top, str): + print('----------- load top file: ') + itpMolecule_list = read_top_molecules(top) + for molecule in itpMolecule_list: + k, v = molecule.molname, int(molecule.molnum) + print( k, v) + n_molecules += v + print('----------- total number of molecules: ') + print(n_molecules) + + + _ndim = 3 + f_h5 = h5py.File(out_h5_file, "w") + + dset_pos = f_h5.create_dataset("coordinates", (1, n_atoms, _ndim), dtype="float64") + dset_vel = f_h5.create_dataset("velocities", (1, n_atoms, _ndim), dtype="float64") + dset_types = f_h5.create_dataset("types", (n_atoms,), dtype="i") + dset_molecule_index = f_h5.create_dataset("molecules", (n_atoms,), dtype="i") + dset_indices = f_h5.create_dataset("indices", (n_atoms,), dtype="i") + dset_names = f_h5.create_dataset("names", (n_atoms,), dtype="S5") + MAX_N_BONDS = 4 + dset_bonds = f_h5.create_dataset("bonds", (n_atoms, MAX_N_BONDS), dtype="i") + + + if electric_label: + ## can get the charge array first, then assign directly + ## hf.create_dataset('charge', data=charges, dtype='float32') + dset_charges = f_h5.create_dataset("charge", (n_atoms,), dtype="float32") + ###### get box size from all.gro + ### h5dump -N "box" all.h5 + f_h5.attrs["box"] = box + + ###### get number molecules from top + ### h5dump -N "n_molecules" all.h5 + f_h5.attrs["n_molecules"] = n_molecules + + + ###### get coordinates from the all.gro + ### h5dump -d "coordinates" all.h5 + _frame = 0 + for idx, atom in enumerate(groAtoms): + #if idx % 10 == 0: + #print(idx) + dset_pos[_frame, idx, :] = np.array( [atom.x, atom.y, atom.z] ) + + + ##### get indices + ### h5dump -d "indices" all.h5 + dset_indices[:] = np.arange(len(groAtoms)) + ### could also assing value when create_dataset + + + ##### get types # atomtypes + ##### CAN DO IT via loopling the gromAtoms, + ##### + ### NTOE the types here are defined by the numbers + ### e.g. as the atomtypeID + ### h5dump -d "types" all.h5 + ### + ##### get names + ### h5dump -d "names" all.h5 + + #################### 2021-06-11 + # ---> add name alias + + # test + #print(itpMolecule_list) + for item in itpMolecule_list : + #print(item.__dict__) + try: + item.molname = alias_mol_dict[item.molname] + except ValueError: + print('corresponding itp is not found !!!! ') + + #for item in itpMolecule_list : + # print(item.__dict__) + + #for item in itpMolecule_list: + # print(item.__dict__) + + (dset_names[:], dset_types[:])= access_top_molecule_itps_gen_whole_atomtypeID(itpMolecule_list, atomcsv) + #access_top_molecule_itps_gen_whole_atomtypeID(itpMolecule_list, atomcsv) + #for _ in zip(dset_names, dset_types): + # print(_) + + ##### get molecule index ## this can be done via using the groAtoms, which need to take care of the 99999 limits + ##### now mimicking the types, loop the top and itp + ### h5dump -d "molecules" all.h5 + ### dset_molecule_index[:] = access_top_molecule_itps_gen_whole_moleculeindex( itpMolecule_list, atomcsv) + dset_molecule_index[...] = access_top_molecule_itps_gen_whole_moleculeindex( itpMolecule_list, atomcsv) + + + + + ##### get bonds + ##### h5dump -d "bonds" all.h5 + ##### WARNING -- bonded terms is done like + ##### _index: x y -1 + ##### e.g. following the occam way, not necessarily. also in git issue: #Make HyMD output valid HyMD input #79 + ##### the input h5 should be further determined + ##### NOW maximumlly follows what is used + ##### + ##### ASSIGN dataset data 1,in create stage 2. assign values item by items 3. assign a new/same type using ellipisis? + ##### https://docs.h5py.org/en/stable/high/dataset.html + ##### https://www.slideshare.net/HDFEOS/dan-kahnpythonhdf5-2 + ##### + dset_bonds[...] = access_top_molecule_itps_gen_whole_atomBondIdx(itpMolecule_list, atomcsv) + + + ##### Get charges ; add the decleration above + ##### Then work with run.sh config.toml and try to run + ##### h5dump -d "charge" all.h5 + ##### charges = np.vectorize(particle_type_charge_dict.get)(np.array(names)) ## option1 + ##### + if electric_label: + dset_charges[...] = access_top_molecule_itps_gen_whole_atomcharges(itpMolecule_list, atomcsv) + +