OpenMP parallelization of energy #364
Closed
directysj
started this conversation in
Show and tell
Replies: 1 comment 48 replies
-
Thanks for the suggested OpenMP additions to the Ewald code. I wonder if you could provide a bit more detail about the system you want to simulate? My immediate thought is that the bottle-neck is the real-space part, i.e. |
Beta Was this translation helpful? Give feedback.
48 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
-
Hello @mlund
I would like to simulate really large system (approximately 20000 particles). Therefore, I made parallel energy.cpp of Faunus, but when running the code. It is using a single thread. It would be really helpful for your suggestions in the code, providing below section wise.
void PolicyIonIon::updateBox(EwaldData &d, const Point &box) const {
assert(d.policy == EwaldData::PBC or d.policy == EwaldData::PBCEigen);
d.box_length = box;
int n_cutoff_ceil = ceil(d.n_cutoff);
d.check_k2_zero = 0.1 * std::pow(2 * pc::pi / d.box_length.maxCoeff(), 2);
int k_vector_size = (2 * n_cutoff_ceil + 1) * (2 * n_cutoff_ceil + 1) * (2 * n_cutoff_ceil + 1) - 1;
if (k_vector_size == 0) {
d.k_vectors.resize(3, 1);
d.Aks.resize(1);
d.k_vectors.col(0) = Point(1, 0, 0); // Just so it is not the zero-vector
d.Aks[0] = 0;
d.num_kvectors = 1;
d.Q_ion.resize(1);
d.Q_dipole.resize(1);
} else {
double nc2 = d.n_cutoff * d.n_cutoff;
d.k_vectors.resize(3, k_vector_size);
d.Aks.resize(k_vector_size);
d.num_kvectors = 0;
d.k_vectors.setZero();
d.Aks.setZero();
int start_value = 1;
}
void PolicyIonIon::updateComplex(EwaldData &data, Space::Tgvec &groups) const {
#pragma omp parallel for private(qr) schedule(dynamic)
}
void PolicyIonIon::updateComplex(EwaldData &d, Change &change, Space::Tgvec &groups, Space::Tgvec &oldgroups) const {
assert(groups.size() == oldgroups.size());
#pragma omp parallel for schedule(dynamic)
}
void PolicyIonIonIPBC::updateBox(EwaldData &data, const Point &box) const {
assert(data.policy == EwaldData::IPBC or data.policy == EwaldData::IPBCEigen);
data.box_length = box;
int ncc = std::ceil(data.n_cutoff);
data.check_k2_zero = 0.1 * std::pow(2 * pc::pi / data.box_length.maxCoeff(), 2);
int k_vector_size = (2 * ncc + 1) * (2 * ncc + 1) * (2 * ncc + 1) - 1;
if (k_vector_size == 0) {
data.k_vectors.resize(3, 1);
data.Aks.resize(1);
data.k_vectors.col(0) = Point(1, 0, 0); // Just so it is not the zero-vector
data.Aks[0] = 0;
data.num_kvectors = 1;
data.Q_ion.resize(1);
data.Q_dipole.resize(1);
} else {
double nc2 = data.n_cutoff * data.n_cutoff;
data.k_vectors.resize(3, k_vector_size);
data.Aks.resize(k_vector_size);
data.num_kvectors = 0;
data.k_vectors.setZero();
data.Aks.setZero();
int start_value = 0;
}
void PolicyIonIonIPBC::updateComplex(EwaldData &d, Space::Tgvec &groups) const {
assert(d.policy == EwaldData::IPBC or d.policy == EwaldData::IPBCEigen);
}
void PolicyIonIonIPBC::updateComplex(EwaldData &d, Change &change, Space::Tgvec &groups,
Space::Tgvec &oldgroups) const {
assert(d.policy == EwaldData::IPBC or d.policy == EwaldData::IPBCEigen);
assert(groups.size() == oldgroups.size());
#pragma omp parallel for schedule(dynamic)
}
`
Beta Was this translation helpful? Give feedback.
All reactions