#!/bin/bash #SBATCH --job-name=gpu_cuda_mps_multi_mpi # name of job #SBATCH --ntasks=40 # total number of MPI tasks #SBATCH --ntasks-per-node=40 # number of MPI tasks per node (all physical cores) #SBATCH --gres=gpu:4 # number of GPUs per node (all GPUs) #SBATCH --cpus-per-task=1 # number of cores per task # /!\ Caution: In Slurm vocabulary, "multithread" refers to hyperthreading. #SBATCH --hint=nomultithread # hyperthreading deactivated #SBATCH --time=00:10:00 # maximum execution time requested (HH:MM:SS) #SBATCH --output=gpu_cuda_mps_multi_mpi%j.out # name of output file #SBATCH --error=gpu_cuda_mps_multi_mpi%j.out # name of error file (here, common with the output) #SBATCH --exclusive # exclusively reserves the node #SBATCH -C mps # the MPS is activated # cleans out modules loaded in interactive and inherited by default module purge # loads modules module load ... # echo of launched commands set -x # execution of the code with binding via bind_gpu.sh: 4 GPUs for 40 MPI tasks. srun ./executable_multi_gpu_mpi