#!/bin/bash #SBATCH --job-name=SingularityGPU # name of job ##SBATCH --partition=gpu_p2 # uncomment for gpu_p2 partition gpu_p2 #SBATCH --ntasks=1 # total number of MPI tasks (= number of GPUs here) #SBATCH --gres=gpu:1 # number of GPUs per node (1/4 of GPUs) #SBATCH --cpus-per-task=10 # number of cores per task (1/4 of the 4-GPUs node here) ##SBATCH --cpus-per-task=3 # number of cores per task (with gpu_p2: 1/8 of the 8-GPUs node) # /!\ Caution, "multithread" in Slurm vocabulary refers to hyperthreading. #SBATCH --hint=nomultithread # hyperthreading deactivated #SBATCH --time=00:10:00 # maximum execution time requested (HH:MM:SS) #SBATCH --output=SingularityGPU%j.out # name of output file #SBATCH --error=SingularityGPU%j.out # name of error file (here, in common with the output file) # go into the submission directory cd ${SLURM_SUBMIT_DIR} # cleans out modules loaded in interactive and inherited by default module purge # loading singularity module module load singularity # echo of launched commands set -x # code execution from the allowed execution space by using the --nv option in order to take into account the NVIDIA GPU device srun singularity exec --nv $SINGULARITY_ALLOWED_DIR/my-container_GPU.sif python ./my_model.py