#!/bin/bash #SBATCH --job-name=job-array # name of job #SBATCH --ntasks=1 # total number of MPI processes #SBATCH --ntasks-per-node=1 # number of MPI processes per node # In Slurm vocabulary, "multithread" refers to hyperthreading. #SBATCH --hint=nomultithread # 1 MPI process per physical core (no hyperthreading) #SBATCH --time=00:01:00 # maximum execution time requested (HH:MM:SS) #SBATCH --output=%x_%A_%a.out # output file name contaning the ID and the index value #SBATCH --error=%x_%A_%a.out # name of error file (here common with the output) #SBATCH --array=0-19%5 # total of 20 jobs but maximum of 5 jobs in the file # go into the submission directory cd ${SLURM_SUBMIT_DIR} # clean out modules loaded in interactive and inherited by default module purge # loas modules module load ... # echo of launched commands set -x # Execution of the "mon_exe" binary with different data for each job # The value of ${SLURM_ARRAY_TASK_ID} is different for each job. srun ./mon_exe < file${SLURM_ARRAY_TASK_ID}.in > file${SLURM_ARRAY_TASK_ID}.out