#!/bin/bash #SBATCH --nodes=1 # Number of nodes #SBATCH --ntasks-per-node=1 # Number of tasks per node #SBATCH --cpus-per-task=4 # Number of cores for each task (important to get all memory) #SBATCH --hint=nomultithread # Disable hyperthreading #SBATCH --job-name=crest_omp # Jobname #SBATCH --output=%x.o%j # Output file %x is the jobname, %j the jobid #SBATCH --error=%x.o%j # Error file #SBATCH --time=10:00:00 # Expected runtime HH:MM:SS (max 100h) ## ## Please, refer to comments below for ## more information about these 4 last options. ##SBATCH --account=@cpu # To specify cpu accounting: = echo $IDRPROJ ##SBATCH --partition= # To specify partition (see IDRIS web site for more info) ##SBATCH --qos=qos_cpu-dev # Uncomment for job requiring less than 2 hours ##SBATCH --qos=qos_cpu-t4 # Uncomment for job requiring more than 20 hours (up to 4 nodes) # echo commands set -x # Cleans out the modules loaded in interactive and inherited by default module purge # Load needed modules module load crest/2.11.1 # go into the submission directory cd ${SLURM_SUBMIT_DIR} # number of OpenMP threads export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK # Execute code srun crest struc.xyz -gfn2 -g h2o -T 4