#!/bin/bash #SBATCH --nodes=1 # 1 node reserved #SBATCH --ntasks-per-node=8 # 8 MPI tasks (that is, 2 per GPU) #SBATCH --cpus-per-task=5 # 5 OpenMP threads (to obtain all the memory of the node) #SBATCH --hint=nomultithread # Disable hyperthreading #SBATCH --gres=gpu:4 # 4 GPUs requested #SBATCH --constraint=mps # Activates the MPS #SBATCH --jobname VASP #SBATCH --output=%x.o%j # Output file %x is the jobname, %j the jobid #SBATCH --error=%x.o%j # Error file #SBATCH --time=10:00:00 # Expected runtime HH:MM:SS (max 100h for V100, 20h for A100) ## ## Please refer to the "Comments" below for ## more information about the following 4 options. ##SBATCH --account=@v100 # To specify gpu accounting: = echo $IDRPROJ ##SBATCH --partition= # To specify partition (see IDRIS web site for more info) ##SBATCH --qos=qos_gpu-dev # Uncomment for job requiring less than 2 hours ##SBATCH --qos=qos_gpu-t4 # Uncomment for job requiring more than 20h (up to 16 GPU, V100 only) # Cleans out modules loaded in interactive and inherited by default module purge # Load the necessary modules module load vasp/5.4.4-mpi-cuda srun vasp_gpu