#!/usr/bin/env bash #SBATCH --nodes=1 # Using only 1 node #SBATCH --ntasks-per-node=40 # 40 MPI tasks #SBATCH --cpus-per-task=1 # No OpenMP #SBATCH --hint=nomultithread # Disable hyperthreading #SBATCH --job-name=molcas # Job name #SBATCH --output=molcas.o%j # Standard output file (%j is the job number) #SBATCH --time=10:00:00 # Expected runtime HH:MM:SS (max 100h) ## ## Please, refer to comments below for ## more information about these 4 last options. ##SBATCH --account=@cpu # To specify cpu accounting: = echo $IDRPROJ ##SBATCH --partition= # To specify partition (see IDRIS web site for more info) ##SBATCH --qos=qos_cpu-dev # Uncomment for job requiring less than 2 hours ##SBATCH --qos=qos_cpu-t4 # Uncomment for job requiring more than 20h (only one node) set -x module purge module load openmolcas/19.11-mpi intel-mkl/19.0.5 ### Definition of variables ### export Project=dhe if [ -z "$SLURM_CPUS_PER_TASK" ] then export MOLCAS_MEM=$SLURM_MEM_PER_CPU else export MOLCAS_MEM=$(( $SLURM_MEM_PER_CPU * $SLURM_CPUS_PER_TASK )) fi export HomeDir=PWD export MOLCAS_NPROCS=$SLURM_NTASKS export MOLCAS_WORKDIR=$JOBSCRATCH export CurrDir=$(pwd) pymolcas ${Project}.cas.input