Commit 5a9d0f1c authored by Adam Simpson US's avatar Adam Simpson US
Browse files

Add 2.13 release examples

parent 3b1b372d
#!/bin/bash
ggID='1tnn5TgpJHO3WATymQVBngWmx29T0rZfm'
ggURL='https://drive.google.com/uc?export=download'
if [ ! -f apoa1.tar.gz ]; then
echo "Downloading apoa1.tar.gz"
curl -sc /tmp/gcokie "${ggURL}&id=${ggID}" >/dev/null
getcode="$(awk '/_warning_/ {print $NF}' /tmp/gcokie)"
cmd='curl -C - -LOJb /tmp/gcokie "${ggURL}&confirm=${getcode}&id=${ggID}"'
eval $cmd
else
echo "apoa1.tar.gz already exists, not downloading"
fi
if [ ! -d apoa1 ]; then
tar xf apoa1.tar.gz
echo "apoa1 example directory unpacked"
else
echo "apoa1 directory already exists, not unpacking"
fi
#!/bin/bash
#PBS -N namd_ngc
#PBS -j oe
#PBS -l select=4:ncpus=28:ngpus=1
#PBS -l walltime=00:10:00
set -e; set -o pipefail
# Load required modules
module load singularity
# Calculate task/process counts
PBS_TASK_COUNT=$(grep -c . ${PBS_NODEFILE})
PBS_NODE_COUNT=$(uniq ${PBS_NODEFILE} | wc -l)
PBS_TASKS_PER_NODE=$(( PBS_TASK_COUNT / PBS_NODE_COUNT ))
# Reserve one CPU core for comm thread
NAMD_TASKS_PER_NODE=$(( PBS_TASKS_PER_NODE - 1 ))
NAMD_TASKS_TOTAL=$(( PBS_NODE_COUNT * NAMD_TASKS_PER_NODE ))
# Change to PBS submission directory
cd ${PBS_O_WORKDIR}
# Download APOA1 example input
wget -O - https://gitlab.com/NVHCP/ngc-examples/raw/master/namd/2.13/get_apoa1.sh | bash
INPUT="/host_pwd/apoa1/apoa1.namd"
# Generate charmrun nodelist
NODELIST=$(pwd)/.nodelist.${PBS_JOBID}
for host in $(uniq ${PBS_NODEFILE}); do
echo "host ${host} ++cpus ${PBS_TASKS_PER_NODE}" >> ${NODELIST}
done
# singularity alias which will launch charmrun and namd2
SIMG="$(pwd)/namd_2.13-multinode.simg"
SINGULARITY="$(which singularity) exec --nv -B $(pwd):/host_pwd ${SIMG}"
# charmrun alias
SSH="ssh -o PubkeyAcceptedKeyTypes=+ssh-dss -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=ERROR"
CHARMRUN="charmrun ++remote-shell \"${SSH}\" ++nodelist ${NODELIST} ++scalable-start ++p ${NAMD_TASKS_TOTAL} ++ppn ${NAMD_TASKS_PER_NODE}"
# namd2 alias
NAMD2="namd2 +setcpuaffinity +idlepoll ${INPUT}"
# Launch parallel namd
eval "${SINGULARITY} ${CHARMRUN} ${SINGULARITY} ${NAMD2}"
# Cleanup nodelist
rm ${NODELIST}
#!/bin/bash
#SBATCH --nodes 2
#SBATCH --ntasks=16
#SBATCH --time 00:10:00
set -e; set -o pipefail
# Load required modules
module load singularity
# Download APOA1 example input
wget -O - https://gitlab.com/NVHCP/ngc-examples/raw/master/namd/2.13/get_apoa1.sh | bash
INPUT="/host_pwd/apoa1/apoa1.namd"
# Generate charmrun nodelist
NODELIST=$(pwd)/nodelist.${SLURM_JOBID}
for host in $(scontrol show hostnames); do
echo "host ${host} ++cpus ${SLURM_CPUS_ON_NODE}" >> ${NODELIST}
done
# singularity alias which will launch charmrun and namd2
SIMG="$(pwd)/namd_2.13-multinode.simg"
SINGULARITY="$(which singularity) exec --nv -B $(pwd):/host_pwd ${SIMG}"
# charmrun alias
SSH="ssh -o PubkeyAcceptedKeyTypes=+ssh-dss -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=ERROR"
CHARMRUN="charmrun ++remote-shell \"${SSH}\" ++nodelist ${NODELIST} ++p ${SLURM_NTASKS} ++ppn ${SLURM_CPUS_ON_NODE}"
# namd2 alias
NAMD2="namd2 +setcpuaffinity +idlepoll ${INPUT}"
# Launch parallel namd
eval "${SINGULARITY} ${CHARMRUN} ${SINGULARITY} ${NAMD2}"
# Cleanup nodelist
rm ${NODELIST}
#!/bin/bash
#SBATCH --nodes 2
#SBATCH --time 00:10:00
# Load the required modules
module load singularity
echo "Downloading APOA1 Dataset..."
wget -O - https://gitlab.com/NVHCP/ngc-examples/raw/master/namd/2.13b2/get_apoa1.sh | bash
echo "Generating charmrun hostfile..."
NODELIST=$(pwd)/nodelist.${SLURM_JOBID}
for host in $(scontrol show hostnames); do
echo "host ${host} ++cpus ${SLURM_CPUS_ON_NODE}" >> ${NODELIST}
done
SIMG="$(pwd)/namd_2.13b2-multinode.simg"
SINGULARITY="$(which singularity) exec --nv -B $(pwd):/host_pwd ${SIMG}"
SSH="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=ERROR"
CHARMRUN="charmrun ++remote-shell \"${SSH}\" ++nodelist ${NODELIST} ++scalable-start"
INPUT="/host_pwd/apoa1/apoa1.namd"
NAMD2="namd2 +setcpuaffinity +idlepoll ${INPUT} ++ppn ${SLURM_CPUS_ON_NODE}"
echo "Running APOA1 example..."
eval "${SINGULARITY} ${CHARMRUN} ${SINGULARITY} ${NAMD2} ${INPUT}"
rm ${NODELIST}
#!/bin/bash
# Usage: ./docker.sh <gpu count> <image name>
set -e; set -o pipefail
GPU_COUNT=${1:-1}
IMG=${2:-"nvcr.io/hpc/namd:2.13-singlenode"}
echo "Downloading APOA1 Dataset..."
wget -O - https://gitlab.com/NVHCP/ngc-examples/raw/master/namd/2.13/get_apoa1.sh | bash
INPUT="/host_pwd/apoa1/apoa1.namd"
if [ -f /dev/infiniband ]; then
echo "Enabling Infiniband support"
IB_DEV="--device=/dev/infiniband --cap-add=IPC_LOCK --net=host"
fi
DOCKER="nvidia-docker run ${IB_DEV} -it --rm -v ${PWD}:/host_pwd ${IMG}"
NAMD2="namd2 ${INPUT}"
echo "Running APOA1 example in ${IMG} on ${GPU_COUNT} GPUS..."
${DOCKER} ${NAMD2}
#!/usr/bin/env bash
# Usage: ./singularity.sh <gpu count> <image name>
set -e; set -o pipefail
GPU_COUNT=${1:-1}
SIMG=${2:-"namd_2.13-singlenode"}
echo "Downloading APOA1 Dataset..."
wget -O - https://gitlab.com/NVHCP/ngc-examples/raw/master/namd/2.13/get_apoa1.sh | bash
INPUT="/host_pwd/apoa1/apoa1.namd"
SINGULARITY="singularity exec --nv -B $(pwd):/host_pwd ${SIMG}"
NAMD2="namd2 ${INPUT}"
echo "Running APOA1 example in ${SIMG} on ${GPU_COUNT} GPUS..."
${SINGULARITY} ${NAMD2}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment