#!/bin/bash #SBATCH --nodes=1 # Number of nodes #SBATCH --ntasks-per-node=4 # Number of tasks per node #SBATCH --gres=gpu:4 # Allocate GPUs #SBATCH --cpus-per-task=10 # Number of cores for each task (important to get all memory) #SBATCH --hint=nomultithread # Disable hyperthreading #SBATCH --time=00:10:00 # Expected job duration (HH:MM:SS) #SBATCH --job-name=qmcpack_multi_gpu # Jobname #SBATCH --output=%x.o%j # Output file %x is the jobname, %j the jobid #SBATCH --error=%x.o%j # Error file #SBATCH --time=10:00:00 # Expected runtime HH:MM:SS (max 100h for V100, 20h for A100) ## ## Please, refer to comments below for ## more information about these 4 last options. ##SBATCH --account=@v100 # To specify gpu accounting: = echo $IDRPROJ ##SBATCH --partition= # To specify partition (see IDRIS web site for more info) ##SBATCH --qos=qos_gpu-dev # Uncomment for job requiring less than 2 hours ##SBATCH --qos=qos_gpu-t4 # Uncomment for job requiring more than 20 hours (up to 16 GPU, V100 only) # Cleans out the modules loaded in interactive and inherited by default module purge # Load needed modules module load gcc/8.3.0 module load cuda/10.1.1 module load qmcpack/3.7.0-mpi-cuda # echo commands set -x cd ${SLURM_SUBMIT_DIR}/dmc # Execute code with the right binding. 1GPU per task srun /gpfslocalsup/pub/idrtools/bind_gpu.sh qmcpack C2CP250_dmc_x2.in.xml