#!/bin/bash #SBATCH --nodes=1 # Utilisation d'un seul noeud #SBATCH --ntasks-per-node=40 # 40 taches MPI #SBATCH --cpus-per-task=1 # pas d'OpenMP #SBATCH --hint=nomultithread # desactive l'hyperthreading #SBATCH --job-name=molcas # Jobname #SBATCH --output=%x.o%j # Output file %x is the jobname, %j the jobid #SBATCH --error=%x.o%j # Error file #SBATCH --time=10:00:00 # Expected runtime HH:MM:SS (max 20h) ## ## Please, refer to comments below for ## more information about these 4 last options. ##SBATCH --account=@cpu # To specify cpu accounting: = echo $IDRPROJ ##SBATCH --partition= # To specify partition (see IDRIS web site for more info) ##SBATCH --qos=qos_cpu-dev # Uncomment for job requiring less than 2 hours ##SBATCH --qos=qos_cpu-t4 # Uncomment for job requiring more than 20h (up to 4 nodes) # Cleans out the modules loaded in interactive and inherited by default module purge # Load needed modules module load openmolcas/19.11-mpi intel-mkl/19.0.5 set -x ### Definition of variables ### export Project=dhe if [ -z "$SLURM_CPUS_PER_TASK" ] then export MOLCAS_MEM=$SLURM_MEM_PER_CPU else export MOLCAS_MEM=$(( $SLURM_MEM_PER_CPU * $SLURM_CPUS_PER_TASK )) fi export HomeDir=PWD ## MOLCAS_NPROCS depends on values of "-nodes" and of "--ntasks-per-node" export MOLCAS_NPROCS=$SLURM_NTASKS export MOLCAS_WORKDIR=$JOBSCRATCH export CurrDir=$(pwd) pymolcas ${Project}.cas.input