#!/bin/bash #SBATCH --nodes=1 # 1 node is used #SBATCH --ntasks-per-node=4 # 4 MPI tasks #SBATCH --cpus-per-task=10 # Number of OpenMP threads per MPI task #SBATCH --gres=gpu:4 # Number of GPUs per node #SBATCH --hint=nomultithread # Disable hyperthreading #SBATCH --job-name=gromacs # Jobname #SBATCH --output=GMX_GenMD.o%j # Standard output file (%j is the job number) #SBATCH --error=GMX_GenMD.o%j # Standard error file #SBATCH --time=10:00:00 # Expected runtime HH:MM:SS (max 100h for V100, 20h for A100) ## ## Please, refer to comments below for ## more information about these 4 last options. ##SBATCH --account=@v100 # To specify gpu accounting: = echo $IDRPROJ ##SBATCH --partition= # To specify partition (see IDRIS web site for more info) ##SBATCH --qos=qos_gpu-dev # Uncomment for job requiring less than 2 hours ##SBATCH --qos=qos_gpu-t4 # Uncomment for job requiring more than 20h (up to 16 GPU, V100 only) # Cleans out the modules loaded in interactive and inherited by default module purge # Load needed modules module load gromacs/2022.3-mpi-cuda-plumed # Run : 4 MPI tasks (--ntasks-per-node=4) and 10 threads/task (--cpus-per-task=10) # Be aware that Gromacs recommands 2 <= ntomp <= 6. # Do your own tests # and 4 GPUs (--gres=gpu:4) automatically detected. srun gmx_mpi mdrun -v -deffnm md_test -ntomp $SLURM_CPUS_PER_TASK