#!/bin/bash #SBATCH --nodes=4 # Number of Nodes #SBATCH --ntasks-per-node=4 # Number of MPI tasks per node #SBATCH --cpus-per-task=10 # Number of OpenMP threads #SBATCH --hint=nomultithread # Disable hyperthreading #SBATCH --gres=gpu:4 # Allocate 4 GPUs per node #SBATCH --job-name=test_RE # Job name #SBATCH --output=%x.o%j # Output file %x is the jobname, %j the jobid #SBATCH --error=%x.o%j # Error file #SBATCH --time=10:00:00 # Expected runtime HH:MM:SS (max 100h for V100, 20h for A100) ## ## Please, refer to comments below for ## more information about these 3 last options. #SBATCH --account=@v100 # To specify gpu accounting: = echo $IDRPROJ ##SBATCH --qos=qos_gpu-dev # Uncomment for job requiring less than 2 hours ##SBATCH --qos=qos_gpu-t4 # Uncomment for job requiring more than 20h (up to 16 GPU, V100 only) # Cleans out the modules loaded in interactive and inherited by default module purge # Load the module module load namd/2.13-mpi-cuda-charmpp-mpi-smp export replicas=16 mkdir -p output/{0..$replicas} set -x srun $(which namd2) +idlepoll +devicesperreplica 1 \ +replicas $replicas job0.conf +stdout output/%d/job0.%d.log