#!/usr/bin/env bash #SBATCH --nodes=1 # Number of nodes #SBATCH --ntasks-per-node=1 # Number of tasks per node #SBATCH --cpus-per-task=10 # Number of OpenMP threads per task #SBATCH --gpus-per-node=1 # Number of GPUs per node #SBATCH --hint=nomultithread # Disable hyperthreading #SBATCH --job-name=alphafold # Jobname #SBATCH --output=%x.o%j # Output file %x is the jobname, %j the jobid #SBATCH --error=%x.o%j # Error file #SBATCH --time=10:00:00 # Expected runtime HH:MM:SS (max 100h for V100, 20h for A100) ## ## Please, refer to comments below for ## more information about these 4 last options. ##SBATCH --account=@gpu # To specify cpu accounting: = echo $IDRPROJ ##SBATCH --partition= # To specify partition (see IDRIS web site for more info) ##SBATCH --qos=qos_gpu-dev # Uncomment for job requiring less than 2 hours ##SBATCH --qos=qos_gpu-t4 # Uncomment for job requiring more than 20h (max 16 GPU, V100 only) module purge module load alphafold/2.2.4 export TMP=$JOBSCRATCH export TMPDIR=$JOBSCRATCH ## In this example we let the structures relax with OpenMM python3 $(which run_alphafold.py) \ --output_dir=outputs \ --uniref90_database_path=${DSDIR}/AlphaFold/uniref90/uniref90.fasta \ --mgnify_database_path=${DSDIR}/AlphaFold/mgnify/mgy_clusters_2018_12.fa \ --template_mmcif_dir=${DSDIR}/AlphaFold/pdb_mmcif/mmcif_files \ --obsolete_pdbs_path=${DSDIR}/AlphaFold/pdb_mmcif/obsolete.dat \ --bfd_database_path=${DSDIR}/AlphaFold/bfd/bfd_metaclust_clu_complete_id30_c90_final_seq.sorted_opt \ --pdb_seqres_database_path=${DSDIR}/AlphaFold/pdb_seqres/pdb_seqres.txt \ --uniclust30_database_path=${DSDIR}/AlphaFold/uniclust30/uniclust30_2018_08/uniclust30_2018_08 \ --uniprot_database_path=${DSDIR}/AlphaFold/uniprot/uniprot.fasta \ --use_gpu_relax \ --model_preset=multimer \ --fasta_paths=test.fasta \ --max_template_date=2022-01-01 \ --data_dir=${DSDIR}/AlphaFold/model_parameters/2.2.4