#!/bin/bash #SBATCH --nodes=1 # 1 node is used #SBATCH --ntasks-per-node=1 # 4 MPI tasks #SBATCH --cpus-per-task=10 # Number of OpenMP threads per MPI task #SBATCH --gres=gpu:1 # Number of GPUs per node #SBATCH --hint=nomultithread # Disable hyperthreading #SBATCH --job-name=pmemd # Jobname #SBATCH --output=%x.%j # Standard output file (%x is the job name, %j is the job number) #SBATCH --error=%x.%j # Standard error file #SBATCH --time=10:00:00 # Expected runtime HH:MM:SS (max 100h for V100, 20h for A100) ## ## Please, refer to comments below for ## more information about these 4 last options. ##SBATCH --account=@v100 # To specify gpu accounting: = echo $IDRPROJ ##SBATCH --partition= # To specify partition (see IDRIS web site for more info) ##SBATCH --qos=qos_gpu-dev # Uncomment for job requiring less than 2 hours ##SBATCH --qos=qos_gpu-t4 # Uncomment for job requiring more than 20h (up to 16 GPU, V100 only) # Cleans out the modules loaded in interactive and inherited by default module purge # Load needed modules module load amber/20-mpi-cuda pmemd.cuda -O -i prod.in -o prod.out -p sys.prmtop -c sys.rst \ -r sys.rst -ref sys.inpcrd -x sys.mdcrd