Commit c0a90b8b authored by Berk Hess's avatar Berk Hess
Browse files

Add bonded #thread runtime check

Replaced a debug assertion on the number of OpenMP threads not being
larger than GMX_OPENMP_MAX_THREADS by fatal error.
But since the listed forces reduction is actually not required with
listed forces, these are now conditional and mdrun can run with more
than GMX_OPENMP_MAX_THREADS threads.

Fixes #2085.

Change-Id: I7a6049d727924cd0b4df10a3525f9e7aec49c3dc
parent 7952be34
......@@ -91,6 +91,11 @@ isPairInteraction(int ftype)
static void
zero_thread_output(struct bonded_threading_t *bt, int thread)
{
if (!bt->haveBondeds)
{
return;
}
f_thread_t *f_t = &bt->f_t[thread];
const int nelem_fa = sizeof(*f_t->f)/sizeof(real);
......@@ -198,6 +203,11 @@ reduce_thread_output(int n, rvec *f, rvec *fshift,
gmx_bool bCalcEnerVir,
gmx_bool bDHDL)
{
if (!bt->haveBondeds)
{
return;
}
if (bt->nblock_used > 0)
{
/* Reduce the bonded force buffer */
......@@ -482,6 +492,7 @@ void calc_listed(const t_commrec *cr,
wallcycle_sub_stop(wcycle, ewcsRESTRAINTS);
}
/* TODO: Skip this whole loop with a system/domain without listeds */
wallcycle_sub_start(wcycle, ewcsLISTED);
#pragma omp parallel for num_threads(bt->nthreads) schedule(static)
for (thread = 0; thread < bt->nthreads; thread++)
......
......@@ -84,6 +84,8 @@ struct bonded_threading_t
gmx_bitmask_t *mask; /**< Mask array, one element corresponds to a block of reduction_block_size atoms of the force array, bit corresponding to thread indices set if a thread writes to that block */
int block_nalloc; /**< Allocation size of block_index and mask */
bool haveBondeds; /**< true if we have and thus need to reduce bonded forces */
/* There are two different ways to distribute the bonded force calculation
* over the threads. We dedice which to use based on the number of threads.
*/
......
......@@ -45,6 +45,8 @@
#include "manage-threading.h"
#include "config.h"
#include <assert.h>
#include <limits.h>
#include <stdlib.h>
......@@ -188,7 +190,8 @@ static void divide_bondeds_by_locality(int ntype,
//! Divides bonded interactions over threads
static void divide_bondeds_over_threads(t_idef *idef,
int nthread,
int max_nthread_uniform)
int max_nthread_uniform,
bool *haveBondeds)
{
ilist_data_t ild[F_NRE];
int ntype;
......@@ -204,7 +207,8 @@ static void divide_bondeds_over_threads(t_idef *idef,
snew(idef->il_thread_division, idef->il_thread_division_nalloc);
}
ntype = 0;
*haveBondeds = false;
ntype = 0;
for (f = 0; f < F_NRE; f++)
{
if (!ftype_is_bonded_potential(f))
......@@ -212,6 +216,11 @@ static void divide_bondeds_over_threads(t_idef *idef,
continue;
}
if (idef->il[f].nr > 0)
{
*haveBondeds = true;
}
if (idef->il[f].nr == 0)
{
/* No interactions, avoid all the integer math below */
......@@ -308,7 +317,16 @@ calc_bonded_reduction_mask(int natoms,
const t_idef *idef,
int thread, int nthread)
{
assert(nthread <= BITMASK_SIZE);
static_assert(BITMASK_SIZE == GMX_OPENMP_MAX_THREADS, "For the error message below we assume these two are equal.");
if (nthread > BITMASK_SIZE)
{
#pragma omp master
gmx_fatal(FARGS, "You are using %d OpenMP threads, which is larger than GMX_OPENMP_MAX_THREADS (%d). Decrease the number of OpenMP threads or rebuild GROMACS with a larger value for GMX_OPENMP_MAX_THREADS.",
nthread, GMX_OPENMP_MAX_THREADS);
#pragma omp barrier
}
GMX_ASSERT(nthread <= BITMASK_SIZE, "We need at least nthread bits in the mask");
int nblock = (natoms + reduction_block_size - 1) >> reduction_block_bits;
......@@ -374,7 +392,14 @@ void setup_bonded_threading(t_forcerec *fr, t_idef *idef)
/* Divide the bonded interaction over the threads */
divide_bondeds_over_threads(idef,
bt->nthreads,
bt->bonded_max_nthread_uniform);
bt->bonded_max_nthread_uniform,
&bt->haveBondeds);
if (!bt->haveBondeds)
{
/* We don't have bondeds, so there is nothing to reduce */
return;
}
/* Determine to which blocks each thread's bonded force calculation
* contributes. Store this as a mask for each thread.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment