diff --git a/notebooks/OpenFold.ipynb b/notebooks/OpenFold.ipynb index 8a8446527..669a57e6f 100644 --- a/notebooks/OpenFold.ipynb +++ b/notebooks/OpenFold.ipynb @@ -1,919 +1,919 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "view-in-github" - }, - "source": [ - "\"Open" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pc5-mbsX9PZC" - }, - "source": [ - "# OpenFold Colab\n", - "\n", - "Runs a simplified version of [OpenFold](https://github.com/aqlaboratory/openfold) on a target sequence. Adapted from DeepMind's [official AlphaFold Colab](https://colab.research.google.com/github/deepmind/alphafold/blob/main/notebooks/AlphaFold.ipynb).\n", - "\n", - "**Differences to AlphaFold v2.0**\n", - "\n", - "OpenFold is a trainable PyTorch reimplementation of AlphaFold 2. For the purposes of inference, it is practically identical to the original (\"practically\" because ensembling is excluded from OpenFold (recycling is enabled, however)).\n", - "\n", - "In this notebook, OpenFold is run with your choice of our original OpenFold parameters or DeepMind's publicly released parameters for AlphaFold 2.\n", - "\n", - "**Note**\n", - "\n", - "Like DeepMind's official Colab, this notebook uses **no templates (homologous structures)** and a selected portion of the full [BFD database](https://bfd.mmseqs.com/).\n", - "\n", - "**Citing this work**\n", - "\n", - "Any publication that discloses findings arising from using this notebook should [cite](https://github.com/deepmind/alphafold/#citing-this-work) DeepMind's [AlphaFold paper](https://doi.org/10.1038/s41586-021-03819-2).\n", - "\n", - "**Licenses**\n", - "\n", - "This Colab supports inference with the [AlphaFold model parameters](https://github.com/deepmind/alphafold/#model-parameters-license), made available under the Creative Commons Attribution 4.0 International ([CC BY 4.0](https://creativecommons.org/licenses/by/4.0/legalcode)) license. The Colab itself is provided under the [Apache 2.0 license](https://www.apache.org/licenses/LICENSE-2.0). See the full license statement below.\n", - "\n", - "**More information**\n", - "\n", - "You can find more information about how AlphaFold/OpenFold works in DeepMind's two Nature papers:\n", - "\n", - "* [AlphaFold methods paper](https://www.nature.com/articles/s41586-021-03819-2)\n", - "* [AlphaFold predictions of the human proteome paper](https://www.nature.com/articles/s41586-021-03828-1)\n", - "\n", - "FAQ on how to interpret AlphaFold/OpenFold predictions are [here](https://alphafold.ebi.ac.uk/faq)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "rowN0bVYLe9n" - }, - "outputs": [], - "source": [ - "#@markdown ### Enter the amino acid sequence to fold ⬇️\n", - "#@markdown For multiple sequences, separate sequences with a colon `:`\n", - "input_sequence = 'MKLKQVADKLEEVASKLYHNANELARVAKLLGER:MKLKQVADKLEEVASKLYHNANELARVAKLLGER: MKLKQVADKLEEVASKLYHNANELARVAKLLGER:MKLKQVADKLEEVASKLYHNANELARVAKLLGER' #@param {type:\"string\"}\n", - "\n", - "#@markdown ### Configure the model ⬇️\n", - "\n", - "weight_set = 'AlphaFold' #@param [\"OpenFold\", \"AlphaFold\"]\n", - "model_mode = 'multimer' #@param [\"monomer\", \"multimer\"]\n", - "relax_prediction = True #@param {type:\"boolean\"}\n", - "\n", - "\n", - "# Remove all whitespaces, tabs and end lines; upper-case\n", - "input_sequence = input_sequence.translate(str.maketrans('', '', ' \\n\\t')).upper()\n", - "aatypes = set('ACDEFGHIKLMNPQRSTVWY') # 20 standard aatypes\n", - "allowed_chars = aatypes.union({':'})\n", - "if not set(input_sequence).issubset(allowed_chars):\n", - " raise Exception(f'Input sequence contains non-amino acid letters: {set(input_sequence) - allowed_chars}. OpenFold only supports 20 standard amino acids as inputs.')\n", - "\n", - "if ':' in input_sequence and weight_set != 'AlphaFold':\n", - " raise ValueError('Input sequence is a multimer, must select Alphafold weight set')\n", - "\n", - "import enum\n", - "@enum.unique\n", - "class ModelType(enum.Enum):\n", - " MONOMER = 0\n", - " MULTIMER = 1\n", - "\n", - "model_type_dict = {\n", - " 'monomer': ModelType.MONOMER,\n", - " 'multimer': ModelType.MULTIMER,\n", - "}\n", - "\n", - "model_type = model_type_dict[model_mode]\n", - "print(f'Length of input sequence : {len(input_sequence.replace(\":\", \"\"))}')\n", - "#@markdown After making your selections, execute this cell by pressing the\n", - "#@markdown *Play* button on the left." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "woIxeCPygt7K" - }, - "outputs": [], - "source": [ - "#@title Install third-party software\n", - "#@markdown Please execute this cell by pressing the *Play* button on\n", - "#@markdown the left.\n", - "\n", - "\n", - "#@markdown **Note**: This installs the software on the Colab\n", - "#@markdown notebook in the cloud and not on your computer.\n", - "\n", - "import os, time\n", - "from IPython.utils import io\n", - "from sys import version_info\n", - "import subprocess\n", - "\n", - "python_version = f\"{version_info.major}.{version_info.minor}\"\n", - "\n", - "\n", - "os.system(\"wget -qnc https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-Linux-x86_64.sh\")\n", - "os.system(\"bash Mambaforge-Linux-x86_64.sh -bfp /usr/local\")\n", - "os.system(\"mamba config --set auto_update_conda false\")\n", - "os.system(f\"mamba install -y -c conda-forge -c bioconda kalign2=2.04 hhsuite=3.3.0 openmm=7.7.0 python={python_version} pdbfixer biopython=1.79\")\n", - "os.system(\"pip install -q torch ml_collections py3Dmol modelcif\")\n", - "\n", - "try:\n", - " with io.capture_output() as captured:\n", - "\n", - " # Create a ramdisk to store a database chunk to make Jackhmmer run fast.\n", - " %shell sudo apt install --quiet --yes hmmer\n", - " %shell sudo mkdir -m 777 --parents /tmp/ramdisk\n", - " %shell sudo mount -t tmpfs -o size=9G ramdisk /tmp/ramdisk\n", - "\n", - " %shell wget -q -P /content \\\n", - " https://git.scicore.unibas.ch/schwede/openstructure/-/raw/7102c63615b64735c4941278d92b554ec94415f8/modules/mol/alg/src/stereo_chemical_props.txt\n", - "\n", - " %shell mkdir -p /content/openfold/openfold/resources\n", - "\n", - " commit = \"e2e19f16676b1a409f9ba3a6f69b11ee7f5887c2\"\n", - " os.system(f\"pip install -q git+https://github.com/aqlaboratory/openfold.git@{commit}\")\n", - "\n", - " os.system(f\"cp -f -p /content/stereo_chemical_props.txt /usr/local/lib/python{python_version}/site-packages/openfold/resources/\")\n", - "\n", - "except subprocess.CalledProcessError as captured:\n", - " print(captured)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "VzJ5iMjTtoZw" - }, - "outputs": [], - "source": [ - "#@title Download model weights\n", - "#@markdown Please execute this cell by pressing the *Play* button on\n", - "#@markdown the left.\n", - "\n", - "# Define constants\n", - "GIT_REPO='https://github.com/aqlaboratory/openfold'\n", - "ALPHAFOLD_PARAM_SOURCE_URL = 'https://storage.googleapis.com/alphafold/alphafold_params_2022-12-06.tar'\n", - "OPENFOLD_PARAMS_DIR = './openfold/openfold/resources/openfold_params'\n", - "ALPHAFOLD_PARAMS_DIR = './openfold/openfold/resources/params'\n", - "ALPHAFOLD_PARAMS_PATH = os.path.join(\n", - " ALPHAFOLD_PARAMS_DIR, os.path.basename(ALPHAFOLD_PARAM_SOURCE_URL)\n", - ")\n", - "\n", - "try:\n", - " with io.capture_output() as captured:\n", - " if(weight_set == 'AlphaFold'):\n", - " %shell mkdir --parents \"{ALPHAFOLD_PARAMS_DIR}\"\n", - " %shell wget -O {ALPHAFOLD_PARAMS_PATH} {ALPHAFOLD_PARAM_SOURCE_URL}\n", - " %shell tar --extract --verbose --file=\"{ALPHAFOLD_PARAMS_PATH}\" \\\n", - " --directory=\"{ALPHAFOLD_PARAMS_DIR}\" --preserve-permissions\n", - " %shell rm \"{ALPHAFOLD_PARAMS_PATH}\"\n", - " elif(weight_set == 'OpenFold'):\n", - " # Install AWS CLI\n", - " %shell curl \"https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip\" -o \"awscliv2.zip\"\n", - " %shell unzip -qq awscliv2.zip\n", - " %shell sudo ./aws/install\n", - " %shell rm awscliv2.zip\n", - " %shell rm -rf ./aws\n", - " %shell mkdir --parents \"{OPENFOLD_PARAMS_DIR}\"\n", - "\n", - " %shell aws s3 cp \\\n", - " --no-sign-request \\\n", - " --region us-east-1 \\\n", - " s3://openfold/openfold_params \"{OPENFOLD_PARAMS_DIR}\" \\\n", - " --recursive\n", - " else:\n", - " raise ValueError(\"Invalid weight set\")\n", - "except subprocess.CalledProcessError as captured:\n", - " print(captured)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "_FpxxMo-mvcP" - }, - "outputs": [], - "source": [ - "#@title Import Python packages\n", - "#@markdown Please execute this cell by pressing the *Play* button on\n", - "#@markdown the left.\n", - "\n", - "import unittest.mock\n", - "import sys\n", - "from typing import Dict, Sequence\n", - "\n", - "sys.path.insert(0, f'/usr/local/lib/python{python_version}/dist-packages/')\n", - "sys.path.insert(0, f'/usr/local/lib/python{python_version}/site-packages/')\n", - "\n", - "# Allows us to skip installing these packages\n", - "unnecessary_modules = [\n", - " \"dllogger\",\n", - " \"pytorch_lightning\",\n", - " \"pytorch_lightning.utilities\",\n", - " \"pytorch_lightning.callbacks.early_stopping\",\n", - " \"pytorch_lightning.utilities.seed\",\n", - "]\n", - "for unnecessary_module in unnecessary_modules:\n", - " sys.modules[unnecessary_module] = unittest.mock.MagicMock()\n", - "\n", - "import os\n", - "\n", - "from urllib import request\n", - "from concurrent import futures\n", - "from google.colab import files\n", - "import json\n", - "from matplotlib import gridspec\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "import py3Dmol\n", - "import torch\n", - "import shutil\n", - "import tqdm\n", - "import tqdm.notebook\n", - "\n", - "TQDM_BAR_FORMAT = '{l_bar}{bar}| {n_fmt}/{total_fmt} [elapsed: {elapsed} remaining: {remaining}]'\n", - "\n", - "# Prevent shell magic being broken by openmm, prevent this cryptic error:\n", - "# \"NotImplementedError: A UTF-8 locale is required. Got ANSI_X3.4-1968\"\n", - "import locale\n", - "def getpreferredencoding(do_setlocale = True):\n", - " return \"UTF-8\"\n", - "locale.getpreferredencoding = getpreferredencoding\n", - "\n", - "from openfold import config\n", - "from openfold.data import feature_pipeline\n", - "from openfold.data import parsers\n", - "from openfold.data import data_pipeline\n", - "from openfold.data import msa_pairing\n", - "from openfold.data import feature_processing_multimer\n", - "from openfold.data.tools import jackhmmer\n", - "from openfold.model import model\n", - "from openfold.np import protein\n", - "from openfold.np.relax import relax\n", - "from openfold.np.relax.utils import overwrite_b_factors\n", - "from openfold.utils.import_weights import import_jax_weights_\n", - "from openfold.utils.tensor_utils import tensor_tree_map\n", - "\n", - "from IPython import display\n", - "from ipywidgets import GridspecLayout\n", - "from ipywidgets import Output" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "W4JpOs6oA-QS" - }, - "source": [ - "## Making a prediction\n", - "\n", - "Note that the search against databases and the actual prediction can take some time, from minutes to hours, depending on the length of the protein and what type of GPU you are allocated by Colab (see FAQ below)." - ] - }, - { - "cell_type": "code", - "source": [ - "#@title Search against genetic databases\n", - "\n", - "#@markdown Once this cell has been executed, you will see\n", - "#@markdown statistics about the multiple sequence alignment\n", - "#@markdown (MSA) that will be used by OpenFold. In particular,\n", - "#@markdown you’ll see how well each residue is covered by similar\n", - "#@markdown sequences in the MSA.\n", - "\n", - "# --- Find the closest source --\n", - "test_url_pattern = 'https://storage.googleapis.com/alphafold-colab{:s}/latest/uniref90_2021_03.fasta.1'\n", - "ex = futures.ThreadPoolExecutor(3)\n", - "def fetch(source):\n", - " request.urlretrieve(test_url_pattern.format(source))\n", - " return source\n", - "fs = [ex.submit(fetch, source) for source in ['', '-europe', '-asia']]\n", - "source = None\n", - "for f in futures.as_completed(fs):\n", - " source = f.result()\n", - " ex.shutdown()\n", - " break\n", - "\n", - "# Run the search against chunks of genetic databases (since the genetic\n", - "# databases don't fit in Colab ramdisk).\n", - "\n", - "jackhmmer_binary_path = '/usr/bin/jackhmmer'\n", - "\n", - "# --- Parse multiple sequences, if there are any ---\n", - "def split_multiple_sequences(sequence):\n", - " seqs = sequence.split(':')\n", - " sorted_seqs = sorted(seqs, key=lambda s: len(s))\n", - "\n", - " # TODO: Handle the homomer case when writing fasta sequences\n", - " fasta_path_tuples = []\n", - " for idx, seq in enumerate(set(sorted_seqs)):\n", - " fasta_path = f'target_{idx+1}.fasta'\n", - " with open(fasta_path, 'wt') as f:\n", - " f.write(f'>query\\n{seq}\\n')\n", - " fasta_path_tuples.append((seq, fasta_path))\n", - " fasta_path_by_seq = dict(fasta_path_tuples)\n", - "\n", - " return sorted_seqs, fasta_path_by_seq\n", - "\n", - "sequences, fasta_path_by_sequence = split_multiple_sequences(input_sequence)\n", - "db_results_by_sequence = {seq: {} for seq in fasta_path_by_sequence.keys()}\n", - "\n", - "DB_ROOT_PATH = f'https://storage.googleapis.com/alphafold-colab{source}/latest/'\n", - "db_configs = {}\n", - "db_configs['smallbfd'] = {\n", - " 'database_path': f'{DB_ROOT_PATH}uniref90_2021_03.fasta',\n", - " 'z_value': 65984053,\n", - " 'num_jackhmmer_chunks': 17,\n", - "}\n", - "db_configs['mgnify'] = {\n", - " 'database_path': f'{DB_ROOT_PATH}mgy_clusters_2022_05.fasta',\n", - " 'z_value': 304820129,\n", - " 'num_jackhmmer_chunks': 120,\n", - "}\n", - "db_configs['uniref90'] = {\n", - " 'database_path': f'{DB_ROOT_PATH}uniref90_2022_01.fasta',\n", - " 'z_value': 144113457,\n", - " 'num_jackhmmer_chunks': 62,\n", - "}\n", - "\n", - "# Search UniProt and construct the all_seq features only for heteromers, not homomers.\n", - "if model_type == ModelType.MULTIMER and len(set(sequences)) > 1:\n", - " db_configs['uniprot'] = {\n", - " 'database_path': f'{DB_ROOT_PATH}uniprot_2021_04.fasta',\n", - " 'z_value': 225013025 + 565928,\n", - " 'num_jackhmmer_chunks': 101,\n", - " }\n", - "\n", - "total_jackhmmer_chunks = sum([d['num_jackhmmer_chunks'] for d in db_configs.values()])\n", - "with tqdm.notebook.tqdm(total=total_jackhmmer_chunks, bar_format=TQDM_BAR_FORMAT) as pbar:\n", - " def jackhmmer_chunk_callback(i):\n", - " pbar.update(n=1)\n", - "\n", - " for db_name, db_config in db_configs.items():\n", - " pbar.set_description(f'Searching {db_name}')\n", - " jackhmmer_runner = jackhmmer.Jackhmmer(\n", - " binary_path=jackhmmer_binary_path,\n", - " database_path=db_config['database_path'],\n", - " get_tblout=True,\n", - " num_streamed_chunks=db_config['num_jackhmmer_chunks'],\n", - " streaming_callback=jackhmmer_chunk_callback,\n", - " z_value=db_config['z_value'])\n", - "\n", - " db_results = jackhmmer_runner.query_multiple(fasta_path_by_sequence.values())\n", - " for seq, result in zip(fasta_path_by_sequence.keys(), db_results):\n", - " db_results_by_sequence[seq][db_name] = result\n", - "\n", - "\n", - "# --- Extract the MSAs and visualize ---\n", - "# Extract the MSAs from the Stockholm files.\n", - "# NB: deduplication happens later in data_pipeline.make_msa_features.\n", - "\n", - "MAX_HITS_BY_DB = {\n", - " 'uniref90': 10000,\n", - " 'smallbfd': 5000,\n", - " 'mgnify': 501,\n", - " 'uniprot': 50000,\n", - "}\n", - "\n", - "msas_by_seq_by_db = {seq: {} for seq in sequences}\n", - "full_msa_by_seq = {seq: [] for seq in sequences}\n", - "\n", - "for seq, sequence_result in db_results_by_sequence.items():\n", - " print(f'parsing_results_for_sequence {seq}')\n", - " for db_name, db_results in sequence_result.items():\n", - " unsorted_results = []\n", - " for i, result in enumerate(db_results):\n", - " msa_obj = parsers.parse_stockholm(result['sto'])\n", - " e_values_dict = parsers.parse_e_values_from_tblout(result['tbl'])\n", - " target_names = msa_obj.descriptions\n", - " e_values = [e_values_dict[t.split('/')[0]] for t in target_names]\n", - " zipped_results = zip(msa_obj.sequences, msa_obj.deletion_matrix, target_names, e_values)\n", - " if i != 0:\n", - " # Only take query from the first chunk\n", - " zipped_results = [x for x in zipped_results if x[2] != 'query']\n", - " unsorted_results.extend(zipped_results)\n", - " sorted_by_evalue = sorted(unsorted_results, key=lambda x: x[3])\n", - " msas, del_matrix, targets, _ = zip(*sorted_by_evalue)\n", - " db_msas = parsers.Msa(msas, del_matrix, targets)\n", - " if db_msas:\n", - " if db_name in MAX_HITS_BY_DB:\n", - " db_msas.truncate(MAX_HITS_BY_DB[db_name])\n", - " msas_by_seq_by_db[seq][db_name] = db_msas\n", - " full_msa_by_seq[seq].extend(db_msas.sequences)\n", - " msa_size = len(set(db_msas.sequences))\n", - " print(f'{msa_size} Sequences Found in {db_name}')\n", - "\n", - "\n", - "fig = plt.figure(figsize=(12, 3))\n", - "max_num_alignments = 0\n", - "\n", - "for seq_idx, seq in enumerate(set(sequences)):\n", - " full_msas = full_msa_by_seq[seq]\n", - " deduped_full_msa = list(dict.fromkeys(full_msas))\n", - " total_msa_size = len(deduped_full_msa)\n", - " print(f'\\n{total_msa_size} Sequences Found in Total\\n')\n", - "\n", - " aa_map = {restype: i for i, restype in enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZ-')}\n", - " msa_arr = np.array([[aa_map[aa] for aa in seq] for seq in deduped_full_msa])\n", - " num_alignments, num_res = msa_arr.shape\n", - " plt.plot(np.sum(msa_arr != aa_map['-'], axis=0), label=f'Chain {seq_idx}')\n", - " max_num_alignments = max(num_alignments, max_num_alignments)\n", - "\n", - "\n", - "plt.title('Per-Residue Count of Non-Gap Amino Acids in the MSA')\n", - "plt.ylabel('Non-Gap Count')\n", - "plt.yticks(range(0, max_num_alignments + 1, max(1, int(max_num_alignments / 3))))\n", - "plt.legend()\n", - "plt.show()" - ], - "metadata": { - "id": "o7BqQN_gfYtq" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "XUo6foMQxwS2" - }, - "outputs": [], - "source": [ - "#@title Run OpenFold and download prediction\n", - "\n", - "#@markdown Once this cell has been executed, a zip-archive with\n", - "#@markdown the obtained prediction will be automatically downloaded\n", - "#@markdown to your computer.\n", - "\n", - "# Color bands for visualizing plddt\n", - "PLDDT_BANDS = [\n", - " (0, 50, '#FF7D45'),\n", - " (50, 70, '#FFDB13'),\n", - " (70, 90, '#65CBF3'),\n", - " (90, 100, '#0053D6')\n", - "]\n", - "\n", - "# --- Run the model ---\n", - "if model_type == ModelType.MONOMER:\n", - " model_names = [\n", - " 'finetuning_3.pt',\n", - " 'finetuning_4.pt',\n", - " 'finetuning_5.pt',\n", - " 'finetuning_ptm_2.pt',\n", - " 'finetuning_no_templ_ptm_1.pt'\n", - " ]\n", - "elif model_type == ModelType.MULTIMER:\n", - " model_names = [\n", - " 'model_1_multimer_v3',\n", - " 'model_2_multimer_v3',\n", - " 'model_3_multimer_v3',\n", - " 'model_4_multimer_v3',\n", - " 'model_5_multimer_v3',\n", - " ]\n", - "\n", - "def _placeholder_template_feats(num_templates_, num_res_):\n", - " return {\n", - " 'template_aatype': np.zeros((num_templates_, num_res_, 22), dtype=np.int64),\n", - " 'template_all_atom_positions': np.zeros((num_templates_, num_res_, 37, 3), dtype=np.float32),\n", - " 'template_all_atom_mask': np.zeros((num_templates_, num_res_, 37), dtype=np.float32),\n", - " 'template_domain_names': np.zeros((num_templates_,), dtype=np.float32),\n", - " 'template_sum_probs': np.zeros((num_templates_, 1), dtype=np.float32),\n", - " }\n", - "\n", - "\n", - "def make_features(\n", - " sequences: Sequence[str],\n", - " msas_by_seq_by_db: Dict[str, Dict[str, parsers.Msa]],\n", - " model_type: ModelType):\n", - " num_templates = 1 # Placeholder for generating fake template features\n", - " feature_dict = {}\n", - "\n", - " for idx, seq in enumerate(sequences, start=1):\n", - " _chain_id = f'chain_{idx}'\n", - " num_res = len(seq)\n", - "\n", - " feats = data_pipeline.make_sequence_features(seq, _chain_id, num_res)\n", - " msas_without_uniprot = [msas_by_seq_by_db[seq][db] for db in db_configs.keys() if db != 'uniprot']\n", - " msa_feats = data_pipeline.make_msa_features(msas_without_uniprot)\n", - " feats.update(msa_feats)\n", - " feats.update(_placeholder_template_feats(num_templates, num_res))\n", - "\n", - " if model_type == ModelType.MONOMER:\n", - " feature_dict[seq] = feats\n", - " if model_type == ModelType.MULTIMER:\n", - " # Perform extra pair processing steps for heteromers\n", - " if len(set(sequences)) > 1:\n", - " uniprot_msa = msas_by_seq_by_db[seq]['uniprot']\n", - " uniprot_msa_features = data_pipeline.make_msa_features([uniprot_msa])\n", - " valid_feat_names = msa_pairing.MSA_FEATURES + (\n", - " 'msa_species_identifiers',\n", - " )\n", - " pair_feats = {\n", - " f'{k}_all_seq': v for k, v in uniprot_msa_features.items()\n", - " if k in valid_feat_names\n", - " }\n", - " feats.update(pair_feats)\n", - "\n", - " feats = data_pipeline.convert_monomer_features(feats, _chain_id)\n", - " feature_dict[_chain_id] = feats\n", - "\n", - " if model_type == ModelType.MONOMER:\n", - " np_example = feature_dict[sequences[0]]\n", - " elif model_type == ModelType.MULTIMER:\n", - " all_chain_feats = data_pipeline.add_assembly_features(feature_dict)\n", - " features = feature_processing_multimer.pair_and_merge(all_chain_features=all_chain_feats)\n", - " np_example = data_pipeline.pad_msa(features, 512)\n", - "\n", - " return np_example\n", - "\n", - "\n", - "output_dir = 'prediction'\n", - "os.makedirs(output_dir, exist_ok=True)\n", - "\n", - "plddts = {}\n", - "pae_outputs = {}\n", - "weighted_ptms = {}\n", - "unrelaxed_proteins = {}\n", - "\n", - "with tqdm.notebook.tqdm(total=len(model_names), bar_format=TQDM_BAR_FORMAT) as pbar:\n", - " for i, model_name in enumerate(model_names, start = 1):\n", - " pbar.set_description(f'Running {model_name}')\n", - "\n", - " feature_dict = make_features(sequences, msas_by_seq_by_db, model_type)\n", - "\n", - " if(weight_set == \"AlphaFold\"):\n", - " if model_type == ModelType.MONOMER:\n", - " config_preset = f\"model_{i}\"\n", - " elif model_type == ModelType.MULTIMER:\n", - " config_preset = f'model_{i}_multimer_v3'\n", - " else:\n", - " if(\"_no_templ_\" in model_name):\n", - " config_preset = \"model_3\"\n", - " else:\n", - " config_preset = \"model_1\"\n", - " if(\"_ptm_\" in model_name):\n", - " config_preset += \"_ptm\"\n", - "\n", - " cfg = config.model_config(config_preset)\n", - "\n", - " # Force the model to only use 3 recycling updates\n", - " cfg.data.common.max_recycling_iters = 3\n", - " cfg.model.recycle_early_stop_tolerance = -1\n", - "\n", - " openfold_model = model.AlphaFold(cfg)\n", - " openfold_model = openfold_model.eval()\n", - " if(weight_set == \"AlphaFold\"):\n", - " params_name = os.path.join(\n", - " ALPHAFOLD_PARAMS_DIR, f\"params_{config_preset}.npz\"\n", - " )\n", - " import_jax_weights_(openfold_model, params_name, version=config_preset)\n", - " elif(weight_set == \"OpenFold\"):\n", - " params_name = os.path.join(\n", - " OPENFOLD_PARAMS_DIR,\n", - " model_name,\n", - " )\n", - " d = torch.load(params_name)\n", - " openfold_model.load_state_dict(d)\n", - " else:\n", - " raise ValueError(f\"Invalid weight set: {weight_set}\")\n", - "\n", - " openfold_model = openfold_model.cuda()\n", - "\n", - " pipeline = feature_pipeline.FeaturePipeline(cfg.data)\n", - " processed_feature_dict = pipeline.process_features(\n", - " feature_dict,\n", - " mode='predict',\n", - " is_multimer = (model_type == ModelType.MULTIMER),\n", - " )\n", - "\n", - " processed_feature_dict = tensor_tree_map(\n", - " lambda t: t.cuda(), processed_feature_dict\n", - " )\n", - "\n", - " with torch.no_grad():\n", - " prediction_result = openfold_model(processed_feature_dict)\n", - "\n", - " # Move the batch and output to np for further processing\n", - " processed_feature_dict = tensor_tree_map(\n", - " lambda t: np.array(t[..., -1].cpu()), processed_feature_dict\n", - " )\n", - " prediction_result = tensor_tree_map(\n", - " lambda t: np.array(t.cpu()), prediction_result\n", - " )\n", - "\n", - " mean_plddt = prediction_result['plddt'].mean()\n", - "\n", - " if model_type == ModelType.MONOMER:\n", - " if 'predicted_aligned_error' in prediction_result:\n", - " pae_outputs[model_name] = (\n", - " prediction_result['predicted_aligned_error'],\n", - " prediction_result['max_predicted_aligned_error']\n", - " )\n", - " else:\n", - " # Get the pLDDT confidence metrics. Do not put pTM models here as they\n", - " # should never get selected.\n", - " plddts[model_name] = prediction_result['plddt']\n", - " elif model_type == ModelType.MULTIMER:\n", - " # Multimer models are sorted by pTM+ipTM.\n", - " plddts[model_name] = prediction_result['plddt']\n", - " pae_outputs[model_name] = (prediction_result['predicted_aligned_error'],\n", - " prediction_result['max_predicted_aligned_error'])\n", - "\n", - " weighted_ptms[model_name] = prediction_result['weighted_ptm_score']\n", - "\n", - " # Set the b-factors to the per-residue plddt.\n", - " final_atom_mask = prediction_result['final_atom_mask']\n", - " b_factors = prediction_result['plddt'][:, None] * final_atom_mask\n", - " unrelaxed_protein = protein.from_prediction(\n", - " processed_feature_dict,\n", - " prediction_result,\n", - " remove_leading_feature_dimension=False,\n", - " b_factors=b_factors,\n", - " )\n", - " unrelaxed_proteins[model_name] = unrelaxed_protein\n", - "\n", - " # Delete unused outputs to save memory.\n", - " del openfold_model\n", - " del processed_feature_dict\n", - " del prediction_result\n", - " pbar.update(n=1)\n", - "\n", - " # Find the best model according to the mean pLDDT.\n", - " if model_type == ModelType.MONOMER:\n", - " best_model_name = max(plddts.keys(), key=lambda x: plddts[x].mean())\n", - " elif model_type == ModelType.MULTIMER:\n", - " best_model_name = max(weighted_ptms.keys(), key=lambda x: weighted_ptms[x])\n", - " best_pdb = protein.to_pdb(unrelaxed_proteins[best_model_name])\n", - "\n", - " # --- AMBER relax the best model ---\n", - " if(relax_prediction):\n", - " pbar.set_description(f'AMBER relaxation')\n", - " amber_relaxer = relax.AmberRelaxation(\n", - " max_iterations=0,\n", - " tolerance=2.39,\n", - " stiffness=10.0,\n", - " exclude_residues=[],\n", - " max_outer_iterations=20,\n", - " use_gpu=True,\n", - " )\n", - " relaxed_pdb, _, _ = amber_relaxer.process(\n", - " prot=unrelaxed_proteins[best_model_name]\n", - " )\n", - " best_pdb = relaxed_pdb\n", - "\n", - " # Write out the prediction\n", - " pred_output_path = os.path.join(output_dir, 'selected_prediction.pdb')\n", - " with open(pred_output_path, 'w') as f:\n", - " f.write(best_pdb)\n", - "\n", - " pbar.update(n=1) # Finished AMBER relax.\n", - "\n", - "# Construct multiclass b-factors to indicate confidence bands\n", - "# 0=very low, 1=low, 2=confident, 3=very high\n", - "banded_b_factors = []\n", - "for plddt in plddts[best_model_name]:\n", - " for idx, (min_val, max_val, _) in enumerate(PLDDT_BANDS):\n", - " if plddt >= min_val and plddt <= max_val:\n", - " banded_b_factors.append(idx)\n", - " break\n", - "banded_b_factors = np.array(banded_b_factors)[:, None] * final_atom_mask\n", - "to_visualize_pdb = overwrite_b_factors(best_pdb, banded_b_factors)\n", - "\n", - "# --- Visualise the prediction & confidence ---\n", - "show_sidechains = True\n", - "def plot_plddt_legend():\n", - " \"\"\"Plots the legend for pLDDT.\"\"\"\n", - " thresh = [\n", - " 'Very low (pLDDT < 50)',\n", - " 'Low (70 > pLDDT > 50)',\n", - " 'Confident (90 > pLDDT > 70)',\n", - " 'Very high (pLDDT > 90)']\n", - "\n", - " colors = [x[2] for x in PLDDT_BANDS]\n", - "\n", - " plt.figure(figsize=(2, 2))\n", - " for c in colors:\n", - " plt.bar(0, 0, color=c)\n", - " plt.legend(thresh, frameon=False, loc='center', fontsize=20)\n", - " plt.xticks([])\n", - " plt.yticks([])\n", - " ax = plt.gca()\n", - " ax.spines['right'].set_visible(False)\n", - " ax.spines['top'].set_visible(False)\n", - " ax.spines['left'].set_visible(False)\n", - " ax.spines['bottom'].set_visible(False)\n", - " plt.title('Model Confidence', fontsize=20, pad=20)\n", - " return plt\n", - "\n", - "# Show the structure coloured by chain if the multimer model has been used.\n", - "if model_type == ModelType.MULTIMER:\n", - " multichain_view = py3Dmol.view(width=800, height=600)\n", - " multichain_view.addModelsAsFrames(to_visualize_pdb)\n", - " multichain_style = {'cartoon': {'colorscheme': 'chain'}}\n", - " multichain_view.setStyle({'model': -1}, multichain_style)\n", - " multichain_view.zoomTo()\n", - " multichain_view.show()\n", - "\n", - "# Color the structure by per-residue pLDDT\n", - "color_map = {i: bands[2] for i, bands in enumerate(PLDDT_BANDS)}\n", - "view = py3Dmol.view(width=800, height=600)\n", - "view.addModelsAsFrames(to_visualize_pdb)\n", - "style = {'cartoon': {\n", - " 'colorscheme': {\n", - " 'prop': 'b',\n", - " 'map': color_map}\n", - " }}\n", - "if show_sidechains:\n", - " style['stick'] = {}\n", - "view.setStyle({'model': -1}, style)\n", - "view.zoomTo()\n", - "\n", - "grid = GridspecLayout(1, 2)\n", - "out = Output()\n", - "with out:\n", - " view.show()\n", - "grid[0, 0] = out\n", - "\n", - "out = Output()\n", - "with out:\n", - " plot_plddt_legend().show()\n", - "grid[0, 1] = out\n", - "\n", - "display.display(grid)\n", - "\n", - "# Display pLDDT and predicted aligned error (if output by the model).\n", - "if pae_outputs:\n", - " num_plots = 2\n", - "else:\n", - " num_plots = 1\n", - "\n", - "plt.figure(figsize=[8 * num_plots, 6])\n", - "plt.subplot(1, num_plots, 1)\n", - "plt.plot(plddts[best_model_name])\n", - "plt.title('Predicted LDDT')\n", - "plt.xlabel('Residue')\n", - "plt.ylabel('pLDDT')\n", - "\n", - "if num_plots == 2:\n", - " plt.subplot(1, 2, 2)\n", - " pae, max_pae = list(pae_outputs.values())[0]\n", - " plt.imshow(pae, vmin=0., vmax=max_pae, cmap='Greens_r')\n", - " plt.colorbar(fraction=0.046, pad=0.04)\n", - "\n", - " # Display lines at chain boundaries.\n", - " best_unrelaxed_prot = unrelaxed_proteins[best_model_name]\n", - " total_num_res = best_unrelaxed_prot.residue_index.shape[-1]\n", - " chain_ids = best_unrelaxed_prot.chain_index\n", - " for chain_boundary in np.nonzero(chain_ids[:-1] - chain_ids[1:]):\n", - " if chain_boundary.size:\n", - " plt.plot([0, total_num_res], [chain_boundary, chain_boundary], color='red')\n", - " plt.plot([chain_boundary, chain_boundary], [0, total_num_res], color='red')\n", - " plt.title('Predicted Aligned Error')\n", - " plt.xlabel('Scored residue')\n", - " plt.ylabel('Aligned residue')\n", - "\n", - "# Save pLDDT and predicted aligned error (if it exists)\n", - "pae_output_path = os.path.join(output_dir, 'predicted_aligned_error.json')\n", - "if pae_outputs:\n", - " # Save predicted aligned error in the same format as the AF EMBL DB\n", - " rounded_errors = np.round(pae.astype(np.float64), decimals=1)\n", - " indices = np.indices((len(rounded_errors), len(rounded_errors))) + 1\n", - " indices_1 = indices[0].flatten().tolist()\n", - " indices_2 = indices[1].flatten().tolist()\n", - " pae_data = json.dumps([{\n", - " 'residue1': indices_1,\n", - " 'residue2': indices_2,\n", - " 'distance': rounded_errors.flatten().tolist(),\n", - " 'max_predicted_aligned_error': max_pae.item()\n", - " }],\n", - " indent=None,\n", - " separators=(',', ':'))\n", - " with open(pae_output_path, 'w') as f:\n", - " f.write(pae_data)\n", - "\n", - "\n", - "# --- Download the predictions ---\n", - "shutil.make_archive(base_name='prediction', format='zip', root_dir=output_dir)\n", - "files.download(f'{output_dir}.zip')" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lUQAn5LYC5n4" - }, - "source": [ - "### Interpreting the prediction\n", - "\n", - "Please see the [AlphaFold methods paper](https://www.nature.com/articles/s41586-021-03819-2) and the [AlphaFold predictions of the human proteome paper](https://www.nature.com/articles/s41586-021-03828-1), as well as [DeepMind's FAQ](https://alphafold.ebi.ac.uk/faq) on how to interpret AlphaFold/OpenFold predictions. More information about the predictions of the AlphaFold Multimer model can be found in the [Alphafold Multimer paper](https://www.biorxiv.org/content/10.1101/2022.03.11.484043v3.full.pdf)." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "jeb2z8DIA4om" - }, - "source": [ - "## FAQ & Troubleshooting\n", - "\n", - "\n", - "* How do I get a predicted protein structure for my protein?\n", - " * Click on the _Connect_ button on the top right to get started.\n", - " * Paste the amino acid sequence of your protein (without any headers) into the “Enter the amino acid sequence to fold”.\n", - " * Run all cells in the Colab, either by running them individually (with the play button on the left side) or via _Runtime_ > _Run all._\n", - " * The predicted protein structure will be downloaded once all cells have been executed. Note: This can take minutes to hours - see below.\n", - "* How long will this take?\n", - " * Downloading the OpenFold source code can take up to a few minutes.\n", - " * Downloading and installing the third-party software can take up to a few minutes.\n", - " * The search against genetic databases can take minutes to hours.\n", - " * Running OpenFold and generating the prediction can take minutes to hours, depending on the length of your protein and on which GPU-type Colab has assigned you.\n", - "* My Colab no longer seems to be doing anything, what should I do?\n", - " * Some steps may take minutes to hours to complete.\n", - " * Sometimes, running the \"installation\" cells more than once can corrupt the OpenFold installation.\n", - " * If nothing happens or if you receive an error message, try restarting your Colab runtime via _Runtime_ > _Restart runtime_.\n", - " * If this doesn’t help, reset your Colab runtime via _Runtime_ > _Factory reset runtime_.\n", - "* How does what's run in this notebook compare to the full versions of Alphafold/Openfold?\n", - " * This Colab version of OpenFold searches a selected portion of the BFD dataset and currently doesn’t use templates, so its accuracy is reduced in comparison to the full version, which is analogous to what's described in the [AlphaFold paper](https://doi.org/10.1038/s41586-021-03819-2) and [Github repo](https://github.com/deepmind/alphafold/). The full version of OpenFold can be run from our own [GitHub repo](https://github.com/aqlaboratory/openfold).\n", - "* What is a Colab?\n", - " * See the [Colab FAQ](https://research.google.com/colaboratory/faq.html).\n", - "* I received a warning “Notebook requires high RAM”, what do I do?\n", - " * The resources allocated to your Colab vary. See the [Colab FAQ](https://research.google.com/colaboratory/faq.html) for more details.\n", - " * You can execute the Colab nonetheless.\n", - "* I received an error “Colab CPU runtime not supported” or “No GPU/TPU found”, what do I do?\n", - " * Colab CPU runtime is not supported. Try changing your runtime via _Runtime_ > _Change runtime type_ > _Hardware accelerator_ > _GPU_.\n", - " * The type of GPU allocated to your Colab varies. See the [Colab FAQ](https://research.google.com/colaboratory/faq.html) for more details.\n", - " * If you receive “Cannot connect to GPU backend”, you can try again later to see if Colab allocates you a GPU.\n", - " * [Colab Pro](https://colab.research.google.com/signup) offers priority access to GPUs.\n", - "* Does this tool install anything on my computer?\n", - " * No, everything happens in the cloud on Google Colab.\n", - " * At the end of the Colab execution a zip-archive with the obtained prediction will be automatically downloaded to your computer.\n", - "* How should I share feedback and bug reports?\n", - " * Please share any feedback and bug reports as an [issue](https://github.com/aqlaboratory/openfold/issues) on Github.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "YfPhvYgKC81B" - }, - "source": [ - "# License and Disclaimer\n", - "\n", - "This Colab notebook and other information provided is for theoretical modelling only, caution should be exercised in its use. It is provided ‘as-is’ without any warranty of any kind, whether expressed or implied. Information is not intended to be a substitute for professional medical advice, diagnosis, or treatment, and does not constitute medical or other professional advice.\n", - "\n", - "## AlphaFold/OpenFold Code License\n", - "\n", - "Copyright 2021 AlQuraishi Laboratory\n", - "\n", - "Copyright 2021 DeepMind Technologies Limited.\n", - "\n", - "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0.\n", - "\n", - "Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n", - "\n", - "## Model Parameters License\n", - "\n", - "DeepMind's AlphaFold parameters are made available under the terms of the Creative Commons Attribution 4.0 International (CC BY 4.0) license. You can find details at: https://creativecommons.org/licenses/by/4.0/legalcode\n", - "\n", - "\n", - "## Third-party software\n", - "\n", - "Use of the third-party software, libraries or code referred to in this notebook may be governed by separate terms and conditions or license provisions. Your use of the third-party software, libraries or code is subject to any such terms and you should check that you can comply with any applicable restrictions or terms and conditions before use.\n", - "\n", - "\n", - "## Mirrored Databases\n", - "\n", - "The following databases have been mirrored by DeepMind, and are available with reference to the following:\n", - "* UniRef90: v2021\\_03 (unmodified), by The UniProt Consortium, available under a [Creative Commons Attribution-NoDerivatives 4.0 International License](http://creativecommons.org/licenses/by-nd/4.0/).\n", - "* MGnify: v2019\\_05 (unmodified), by Mitchell AL et al., available free of all copyright restrictions and made fully and freely available for both non-commercial and commercial use under [CC0 1.0 Universal (CC0 1.0) Public Domain Dedication](https://creativecommons.org/publicdomain/zero/1.0/).\n", - "* BFD: (modified), by Steinegger M. and Söding J., modified by DeepMind, available under a [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by/4.0/). See the Methods section of the [AlphaFold proteome paper](https://www.nature.com/articles/s41586-021-03828-1) for details." - ] - } - ], - "metadata": { - "colab": { - "provenance": [], - "gpuType": "T4", - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "name": "python" - }, - "accelerator": "GPU" + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github" + }, + "source": [ + "\"Open" + ] }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file + { + "cell_type": "markdown", + "metadata": { + "id": "pc5-mbsX9PZC" + }, + "source": [ + "# OpenFold Colab\n", + "\n", + "Runs a simplified version of [OpenFold](https://github.com/aqlaboratory/openfold) on a target sequence. Adapted from DeepMind's [official AlphaFold Colab](https://colab.research.google.com/github/deepmind/alphafold/blob/main/notebooks/AlphaFold.ipynb).\n", + "\n", + "**Differences to AlphaFold v2.0**\n", + "\n", + "OpenFold is a trainable PyTorch reimplementation of AlphaFold 2. For the purposes of inference, it is practically identical to the original (\"practically\" because ensembling is excluded from OpenFold (recycling is enabled, however)).\n", + "\n", + "In this notebook, OpenFold is run with your choice of our original OpenFold parameters or DeepMind's publicly released parameters for AlphaFold 2.\n", + "\n", + "**Note**\n", + "\n", + "Like DeepMind's official Colab, this notebook uses **no templates (homologous structures)** and a selected portion of the full [BFD database](https://bfd.mmseqs.com/).\n", + "\n", + "**Citing this work**\n", + "\n", + "Any publication that discloses findings arising from using this notebook should [cite](https://github.com/deepmind/alphafold/#citing-this-work) DeepMind's [AlphaFold paper](https://doi.org/10.1038/s41586-021-03819-2).\n", + "\n", + "**Licenses**\n", + "\n", + "This Colab supports inference with the [AlphaFold model parameters](https://github.com/deepmind/alphafold/#model-parameters-license), made available under the Creative Commons Attribution 4.0 International ([CC BY 4.0](https://creativecommons.org/licenses/by/4.0/legalcode)) license. The Colab itself is provided under the [Apache 2.0 license](https://www.apache.org/licenses/LICENSE-2.0). See the full license statement below.\n", + "\n", + "**More information**\n", + "\n", + "You can find more information about how AlphaFold/OpenFold works in DeepMind's two Nature papers:\n", + "\n", + "* [AlphaFold methods paper](https://www.nature.com/articles/s41586-021-03819-2)\n", + "* [AlphaFold predictions of the human proteome paper](https://www.nature.com/articles/s41586-021-03828-1)\n", + "\n", + "FAQ on how to interpret AlphaFold/OpenFold predictions are [here](https://alphafold.ebi.ac.uk/faq)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rowN0bVYLe9n" + }, + "source": [ + "#@markdown ### Enter the amino acid sequence to fold ⬇️\n", + "#@markdown For multiple sequences, separate sequences with a colon `:`\n", + "input_sequence = 'MKLKQVADKLEEVASKLYHNANELARVAKLLGER:MKLKQVADKLEEVASKLYHNANELARVAKLLGER: MKLKQVADKLEEVASKLYHNANELARVAKLLGER:MKLKQVADKLEEVASKLYHNANELARVAKLLGER' #@param {type:\"string\"}\n", + "\n", + "#@markdown ### Configure the model ⬇️\n", + "\n", + "weight_set = 'AlphaFold' #@param [\"OpenFold\", \"AlphaFold\"]\n", + "model_mode = 'multimer' #@param [\"monomer\", \"multimer\"]\n", + "relax_prediction = True #@param {type:\"boolean\"}\n", + "\n", + "\n", + "# Remove all whitespaces, tabs and end lines; upper-case\n", + "input_sequence = input_sequence.translate(str.maketrans('', '', ' \\n\\t')).upper()\n", + "aatypes = set('ACDEFGHIKLMNPQRSTVWY') # 20 standard aatypes\n", + "allowed_chars = aatypes.union({':'})\n", + "if not set(input_sequence).issubset(allowed_chars):\n", + " raise Exception(f'Input sequence contains non-amino acid letters: {set(input_sequence) - allowed_chars}. OpenFold only supports 20 standard amino acids as inputs.')\n", + "\n", + "if ':' in input_sequence and weight_set != 'AlphaFold':\n", + " raise ValueError('Input sequence is a multimer, must select Alphafold weight set')\n", + "\n", + "import enum\n", + "@enum.unique\n", + "class ModelType(enum.Enum):\n", + " MONOMER = 0\n", + " MULTIMER = 1\n", + "\n", + "model_type_dict = {\n", + " 'monomer': ModelType.MONOMER,\n", + " 'multimer': ModelType.MULTIMER,\n", + "}\n", + "\n", + "model_type = model_type_dict[model_mode]\n", + "print(f'Length of input sequence : {len(input_sequence.replace(\":\", \"\"))}')\n", + "#@markdown After making your selections, execute this cell by pressing the\n", + "#@markdown *Play* button on the left." + ], + "outputs": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "woIxeCPygt7K" + }, + "source": [ + "#@title Install third-party software\n", + "#@markdown Please execute this cell by pressing the *Play* button on\n", + "#@markdown the left.\n", + "\n", + "\n", + "#@markdown **Note**: This installs the software on the Colab\n", + "#@markdown notebook in the cloud and not on your computer.\n", + "\n", + "import os, time\n", + "from IPython.utils import io\n", + "from sys import version_info\n", + "import subprocess\n", + "\n", + "python_version = f\"{version_info.major}.{version_info.minor}\"\n", + "\n", + "\n", + "os.system(\"wget -qnc https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-Linux-x86_64.sh\")\n", + "os.system(\"bash Mambaforge-Linux-x86_64.sh -bfp /usr/local\")\n", + "os.system(\"mamba config --set auto_update_conda false\")\n", + "os.system(f\"mamba install -y -c conda-forge -c bioconda kalign2=2.04 hhsuite=3.3.0 openmm=7.7.0 python={python_version} pdbfixer biopython=1.79\")\n", + "os.system(\"pip install -q torch ml_collections py3Dmol modelcif\")\n", + "\n", + "try:\n", + " with io.capture_output() as captured:\n", + "\n", + " # Create a ramdisk to store a database chunk to make Jackhmmer run fast.\n", + " %shell sudo apt install --quiet --yes hmmer\n", + " %shell sudo mkdir -m 777 --parents /tmp/ramdisk\n", + " %shell sudo mount -t tmpfs -o size=9G ramdisk /tmp/ramdisk\n", + "\n", + " %shell wget -q -P /content \\\n", + " https://git.scicore.unibas.ch/schwede/openstructure/-/raw/7102c63615b64735c4941278d92b554ec94415f8/modules/mol/alg/src/stereo_chemical_props.txt\n", + "\n", + " %shell mkdir -p /content/openfold/openfold/resources\n", + "\n", + " commit = \"e2e19f16676b1a409f9ba3a6f69b11ee7f5887c2\"\n", + " os.system(f\"pip install -q git+https://github.com/aqlaboratory/openfold.git@{commit}\")\n", + "\n", + " os.system(f\"cp -f -p /content/stereo_chemical_props.txt /usr/local/lib/python{python_version}/site-packages/openfold/resources/\")\n", + "\n", + "except subprocess.CalledProcessError as captured:\n", + " print(captured)" + ], + "outputs": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VzJ5iMjTtoZw" + }, + "source": [ + "#@title Download model weights\n", + "#@markdown Please execute this cell by pressing the *Play* button on\n", + "#@markdown the left.\n", + "\n", + "# Define constants\n", + "GIT_REPO='https://github.com/aqlaboratory/openfold'\n", + "ALPHAFOLD_PARAM_SOURCE_URL = 'https://storage.googleapis.com/alphafold/alphafold_params_2022-12-06.tar'\n", + "OPENFOLD_PARAMS_DIR = './openfold/openfold/resources/openfold_params'\n", + "ALPHAFOLD_PARAMS_DIR = './openfold/openfold/resources/params'\n", + "ALPHAFOLD_PARAMS_PATH = os.path.join(\n", + " ALPHAFOLD_PARAMS_DIR, os.path.basename(ALPHAFOLD_PARAM_SOURCE_URL)\n", + ")\n", + "\n", + "try:\n", + " with io.capture_output() as captured:\n", + " if(weight_set == 'AlphaFold'):\n", + " %shell mkdir --parents \"{ALPHAFOLD_PARAMS_DIR}\"\n", + " %shell wget -O {ALPHAFOLD_PARAMS_PATH} {ALPHAFOLD_PARAM_SOURCE_URL}\n", + " %shell tar --extract --verbose --file=\"{ALPHAFOLD_PARAMS_PATH}\" \\\n", + " --directory=\"{ALPHAFOLD_PARAMS_DIR}\" --preserve-permissions\n", + " %shell rm \"{ALPHAFOLD_PARAMS_PATH}\"\n", + " elif(weight_set == 'OpenFold'):\n", + " # Install AWS CLI\n", + " %shell curl \"https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip\" -o \"awscliv2.zip\"\n", + " %shell unzip -qq awscliv2.zip\n", + " %shell sudo ./aws/install\n", + " %shell rm awscliv2.zip\n", + " %shell rm -rf ./aws\n", + " %shell mkdir --parents \"{OPENFOLD_PARAMS_DIR}\"\n", + "\n", + " %shell aws s3 cp \\\n", + " --no-sign-request \\\n", + " --region us-east-1 \\\n", + " s3://openfold/openfold_params \"{OPENFOLD_PARAMS_DIR}\" \\\n", + " --recursive\n", + " else:\n", + " raise ValueError(\"Invalid weight set\")\n", + "except subprocess.CalledProcessError as captured:\n", + " print(captured)" + ], + "outputs": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_FpxxMo-mvcP" + }, + "source": [ + "#@title Import Python packages\n", + "#@markdown Please execute this cell by pressing the *Play* button on\n", + "#@markdown the left.\n", + "\n", + "import unittest.mock\n", + "import sys\n", + "from typing import Dict, Sequence\n", + "\n", + "sys.path.insert(0, f'/usr/local/lib/python{python_version}/dist-packages/')\n", + "sys.path.insert(0, f'/usr/local/lib/python{python_version}/site-packages/')\n", + "\n", + "# Allows us to skip installing these packages\n", + "unnecessary_modules = [\n", + " \"dllogger\",\n", + " \"pytorch_lightning\",\n", + " \"pytorch_lightning.utilities\",\n", + " \"pytorch_lightning.callbacks.early_stopping\",\n", + " \"pytorch_lightning.utilities.seed\",\n", + "]\n", + "for unnecessary_module in unnecessary_modules:\n", + " sys.modules[unnecessary_module] = unittest.mock.MagicMock()\n", + "\n", + "import os\n", + "\n", + "from urllib import request\n", + "from concurrent import futures\n", + "from google.colab import files\n", + "import json\n", + "from matplotlib import gridspec\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import py3Dmol\n", + "import torch\n", + "import shutil\n", + "import tqdm\n", + "import tqdm.notebook\n", + "\n", + "TQDM_BAR_FORMAT = '{l_bar}{bar}| {n_fmt}/{total_fmt} [elapsed: {elapsed} remaining: {remaining}]'\n", + "\n", + "# Prevent shell magic being broken by openmm, prevent this cryptic error:\n", + "# \"NotImplementedError: A UTF-8 locale is required. Got ANSI_X3.4-1968\"\n", + "import locale\n", + "def getpreferredencoding(do_setlocale = True):\n", + " return \"UTF-8\"\n", + "locale.getpreferredencoding = getpreferredencoding\n", + "\n", + "from openfold import config\n", + "from openfold.data import feature_pipeline\n", + "from openfold.data import parsers\n", + "from openfold.data import data_pipeline\n", + "from openfold.data import msa_pairing\n", + "from openfold.data import feature_processing_multimer\n", + "from openfold.data.tools import jackhmmer\n", + "from openfold.model import model\n", + "from openfold.np import protein\n", + "from openfold.np.relax import relax\n", + "from openfold.np.relax.utils import overwrite_b_factors\n", + "from openfold.utils.import_weights import import_jax_weights_\n", + "from openfold.utils.tensor_utils import tensor_tree_map\n", + "\n", + "from IPython import display\n", + "from ipywidgets import GridspecLayout\n", + "from ipywidgets import Output" + ], + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "W4JpOs6oA-QS" + }, + "source": [ + "## Making a prediction\n", + "\n", + "Note that the search against databases and the actual prediction can take some time, from minutes to hours, depending on the length of the protein and what type of GPU you are allocated by Colab (see FAQ below)." + ] + }, + { + "cell_type": "code", + "source": [ + "#@title Search against genetic databases\n", + "\n", + "#@markdown Once this cell has been executed, you will see\n", + "#@markdown statistics about the multiple sequence alignment\n", + "#@markdown (MSA) that will be used by OpenFold. In particular,\n", + "#@markdown you’ll see how well each residue is covered by similar\n", + "#@markdown sequences in the MSA.\n", + "\n", + "# --- Find the closest source --\n", + "test_url_pattern = 'https://storage.googleapis.com/alphafold-colab{:s}/latest/uniref90_2021_03.fasta.1'\n", + "ex = futures.ThreadPoolExecutor(3)\n", + "def fetch(source):\n", + " request.urlretrieve(test_url_pattern.format(source))\n", + " return source\n", + "fs = [ex.submit(fetch, source) for source in ['', '-europe', '-asia']]\n", + "source = None\n", + "for f in futures.as_completed(fs):\n", + " source = f.result()\n", + " ex.shutdown()\n", + " break\n", + "\n", + "# Run the search against chunks of genetic databases (since the genetic\n", + "# databases don't fit in Colab ramdisk).\n", + "\n", + "jackhmmer_binary_path = '/usr/bin/jackhmmer'\n", + "\n", + "# --- Parse multiple sequences, if there are any ---\n", + "def split_multiple_sequences(sequence):\n", + " seqs = sequence.split(':')\n", + " sorted_seqs = sorted(seqs, key=lambda s: len(s))\n", + "\n", + " # TODO: Handle the homomer case when writing fasta sequences\n", + " fasta_path_tuples = []\n", + " for idx, seq in enumerate(set(sorted_seqs)):\n", + " fasta_path = f'target_{idx+1}.fasta'\n", + " with open(fasta_path, 'wt') as f:\n", + " f.write(f'>query\\n{seq}\\n')\n", + " fasta_path_tuples.append((seq, fasta_path))\n", + " fasta_path_by_seq = dict(fasta_path_tuples)\n", + "\n", + " return sorted_seqs, fasta_path_by_seq\n", + "\n", + "sequences, fasta_path_by_sequence = split_multiple_sequences(input_sequence)\n", + "db_results_by_sequence = {seq: {} for seq in fasta_path_by_sequence.keys()}\n", + "\n", + "DB_ROOT_PATH = f'https://storage.googleapis.com/alphafold-colab{source}/latest/'\n", + "db_configs = {}\n", + "db_configs['smallbfd'] = {\n", + " 'database_path': f'{DB_ROOT_PATH}uniref90_2021_03.fasta',\n", + " 'z_value': 65984053,\n", + " 'num_jackhmmer_chunks': 17,\n", + "}\n", + "db_configs['mgnify'] = {\n", + " 'database_path': f'{DB_ROOT_PATH}mgy_clusters_2022_05.fasta',\n", + " 'z_value': 304820129,\n", + " 'num_jackhmmer_chunks': 120,\n", + "}\n", + "db_configs['uniref90'] = {\n", + " 'database_path': f'{DB_ROOT_PATH}uniref90_2022_01.fasta',\n", + " 'z_value': 144113457,\n", + " 'num_jackhmmer_chunks': 62,\n", + "}\n", + "\n", + "# Search UniProt and construct the all_seq features only for heteromers, not homomers.\n", + "if model_type == ModelType.MULTIMER and len(set(sequences)) > 1:\n", + " db_configs['uniprot'] = {\n", + " 'database_path': f'{DB_ROOT_PATH}uniprot_2021_04.fasta',\n", + " 'z_value': 225013025 + 565928,\n", + " 'num_jackhmmer_chunks': 101,\n", + " }\n", + "\n", + "total_jackhmmer_chunks = sum([d['num_jackhmmer_chunks'] for d in db_configs.values()])\n", + "with tqdm.notebook.tqdm(total=total_jackhmmer_chunks, bar_format=TQDM_BAR_FORMAT) as pbar:\n", + " def jackhmmer_chunk_callback(i):\n", + " pbar.update(n=1)\n", + "\n", + " for db_name, db_config in db_configs.items():\n", + " pbar.set_description(f'Searching {db_name}')\n", + " jackhmmer_runner = jackhmmer.Jackhmmer(\n", + " binary_path=jackhmmer_binary_path,\n", + " database_path=db_config['database_path'],\n", + " get_tblout=True,\n", + " num_streamed_chunks=db_config['num_jackhmmer_chunks'],\n", + " streaming_callback=jackhmmer_chunk_callback,\n", + " z_value=db_config['z_value'])\n", + "\n", + " db_results = jackhmmer_runner.query_multiple(fasta_path_by_sequence.values())\n", + " for seq, result in zip(fasta_path_by_sequence.keys(), db_results):\n", + " db_results_by_sequence[seq][db_name] = result\n", + "\n", + "\n", + "# --- Extract the MSAs and visualize ---\n", + "# Extract the MSAs from the Stockholm files.\n", + "# NB: deduplication happens later in data_pipeline.make_msa_features.\n", + "\n", + "MAX_HITS_BY_DB = {\n", + " 'uniref90': 10000,\n", + " 'smallbfd': 5000,\n", + " 'mgnify': 501,\n", + " 'uniprot': 50000,\n", + "}\n", + "\n", + "msas_by_seq_by_db = {seq: {} for seq in sequences}\n", + "full_msa_by_seq = {seq: [] for seq in sequences}\n", + "\n", + "for seq, sequence_result in db_results_by_sequence.items():\n", + " print(f'parsing_results_for_sequence {seq}')\n", + " for db_name, db_results in sequence_result.items():\n", + " unsorted_results = []\n", + " for i, result in enumerate(db_results):\n", + " msa_obj = parsers.parse_stockholm(result['sto'])\n", + " e_values_dict = parsers.parse_e_values_from_tblout(result['tbl'])\n", + " target_names = msa_obj.descriptions\n", + " e_values = [e_values_dict[t.split('/')[0]] for t in target_names]\n", + " zipped_results = zip(msa_obj.sequences, msa_obj.deletion_matrix, target_names, e_values)\n", + " if i != 0:\n", + " # Only take query from the first chunk\n", + " zipped_results = [x for x in zipped_results if x[2] != 'query']\n", + " unsorted_results.extend(zipped_results)\n", + " sorted_by_evalue = sorted(unsorted_results, key=lambda x: x[3])\n", + " msas, del_matrix, targets, _ = zip(*sorted_by_evalue)\n", + " db_msas = parsers.Msa(msas, del_matrix, targets)\n", + " if db_msas:\n", + " if db_name in MAX_HITS_BY_DB:\n", + " db_msas.truncate(MAX_HITS_BY_DB[db_name])\n", + " msas_by_seq_by_db[seq][db_name] = db_msas\n", + " full_msa_by_seq[seq].extend(db_msas.sequences)\n", + " msa_size = len(set(db_msas.sequences))\n", + " print(f'{msa_size} Sequences Found in {db_name}')\n", + "\n", + "\n", + "fig = plt.figure(figsize=(12, 3))\n", + "max_num_alignments = 0\n", + "\n", + "for seq_idx, seq in enumerate(set(sequences)):\n", + " full_msas = full_msa_by_seq[seq]\n", + " deduped_full_msa = list(dict.fromkeys(full_msas))\n", + " total_msa_size = len(deduped_full_msa)\n", + " print(f'\\n{total_msa_size} Sequences Found in Total\\n')\n", + "\n", + " aa_map = {restype: i for i, restype in enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZ-')}\n", + " msa_arr = np.array([[aa_map[aa] for aa in seq] for seq in deduped_full_msa])\n", + " num_alignments, num_res = msa_arr.shape\n", + " plt.plot(np.sum(msa_arr != aa_map['-'], axis=0), label=f'Chain {seq_idx}')\n", + " max_num_alignments = max(num_alignments, max_num_alignments)\n", + "\n", + "\n", + "plt.title('Per-Residue Count of Non-Gap Amino Acids in the MSA')\n", + "plt.ylabel('Non-Gap Count')\n", + "plt.yticks(range(0, max_num_alignments + 1, max(1, int(max_num_alignments / 3))))\n", + "plt.legend()\n", + "plt.show()" + ], + "metadata": { + "id": "o7BqQN_gfYtq" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XUo6foMQxwS2" + }, + "source": [ + "#@title Run OpenFold and download prediction\n", + "\n", + "#@markdown Once this cell has been executed, a zip-archive with\n", + "#@markdown the obtained prediction will be automatically downloaded\n", + "#@markdown to your computer.\n", + "\n", + "# Color bands for visualizing plddt\n", + "PLDDT_BANDS = [\n", + " (0, 50, '#FF7D45'),\n", + " (50, 70, '#FFDB13'),\n", + " (70, 90, '#65CBF3'),\n", + " (90, 100, '#0053D6')\n", + "]\n", + "\n", + "# --- Run the model ---\n", + "if model_type == ModelType.MONOMER:\n", + " model_names = [\n", + " 'finetuning_3.pt',\n", + " 'finetuning_4.pt',\n", + " 'finetuning_5.pt',\n", + " 'finetuning_ptm_2.pt',\n", + " 'finetuning_no_templ_ptm_1.pt'\n", + " ]\n", + "elif model_type == ModelType.MULTIMER:\n", + " model_names = [\n", + " 'model_1_multimer_v3',\n", + " 'model_2_multimer_v3',\n", + " 'model_3_multimer_v3',\n", + " 'model_4_multimer_v3',\n", + " 'model_5_multimer_v3',\n", + " ]\n", + "\n", + "def _placeholder_template_feats(num_templates_, num_res_):\n", + " return {\n", + " 'template_aatype': np.zeros((num_templates_, num_res_, 22), dtype=np.int64),\n", + " 'template_all_atom_positions': np.zeros((num_templates_, num_res_, 37, 3), dtype=np.float32),\n", + " 'template_all_atom_mask': np.zeros((num_templates_, num_res_, 37), dtype=np.float32),\n", + " 'template_domain_names': np.zeros((num_templates_,), dtype=np.float32),\n", + " 'template_sum_probs': np.zeros((num_templates_, 1), dtype=np.float32),\n", + " }\n", + "\n", + "\n", + "def make_features(\n", + " sequences: Sequence[str],\n", + " msas_by_seq_by_db: Dict[str, Dict[str, parsers.Msa]],\n", + " model_type: ModelType):\n", + " num_templates = 1 # Placeholder for generating fake template features\n", + " feature_dict = {}\n", + "\n", + " for idx, seq in enumerate(sequences, start=1):\n", + " _chain_id = f'chain_{idx}'\n", + " num_res = len(seq)\n", + "\n", + " feats = data_pipeline.make_sequence_features(seq, _chain_id, num_res)\n", + " msas_without_uniprot = [msas_by_seq_by_db[seq][db] for db in db_configs.keys() if db != 'uniprot']\n", + " msa_feats = data_pipeline.make_msa_features(msas_without_uniprot)\n", + " feats.update(msa_feats)\n", + " feats.update(_placeholder_template_feats(num_templates, num_res))\n", + "\n", + " if model_type == ModelType.MONOMER:\n", + " feature_dict[seq] = feats\n", + " if model_type == ModelType.MULTIMER:\n", + " # Perform extra pair processing steps for heteromers\n", + " if len(set(sequences)) > 1:\n", + " uniprot_msa = msas_by_seq_by_db[seq]['uniprot']\n", + " uniprot_msa_features = data_pipeline.make_msa_features([uniprot_msa])\n", + " valid_feat_names = msa_pairing.MSA_FEATURES + (\n", + " 'msa_species_identifiers',\n", + " )\n", + " pair_feats = {\n", + " f'{k}_all_seq': v for k, v in uniprot_msa_features.items()\n", + " if k in valid_feat_names\n", + " }\n", + " feats.update(pair_feats)\n", + "\n", + " feats = data_pipeline.convert_monomer_features(feats, _chain_id)\n", + " feature_dict[_chain_id] = feats\n", + "\n", + " if model_type == ModelType.MONOMER:\n", + " np_example = feature_dict[sequences[0]]\n", + " elif model_type == ModelType.MULTIMER:\n", + " all_chain_feats = data_pipeline.add_assembly_features(feature_dict)\n", + " features = feature_processing_multimer.pair_and_merge(all_chain_features=all_chain_feats)\n", + " np_example = data_pipeline.pad_msa(features, 512)\n", + "\n", + " return np_example\n", + "\n", + "\n", + "output_dir = 'prediction'\n", + "os.makedirs(output_dir, exist_ok=True)\n", + "\n", + "plddts = {}\n", + "pae_outputs = {}\n", + "weighted_ptms = {}\n", + "unrelaxed_proteins = {}\n", + "\n", + "with tqdm.notebook.tqdm(total=len(model_names), bar_format=TQDM_BAR_FORMAT) as pbar:\n", + " for i, model_name in enumerate(model_names, start = 1):\n", + " pbar.set_description(f'Running {model_name}')\n", + "\n", + " feature_dict = make_features(sequences, msas_by_seq_by_db, model_type)\n", + "\n", + " if(weight_set == \"AlphaFold\"):\n", + " if model_type == ModelType.MONOMER:\n", + " config_preset = f\"model_{i}\"\n", + " elif model_type == ModelType.MULTIMER:\n", + " config_preset = f'model_{i}_multimer_v3'\n", + " else:\n", + " if(\"_no_templ_\" in model_name):\n", + " config_preset = \"model_3\"\n", + " else:\n", + " config_preset = \"model_1\"\n", + " if(\"_ptm_\" in model_name):\n", + " config_preset += \"_ptm\"\n", + "\n", + " cfg = config.model_config(config_preset)\n", + "\n", + " # Force the model to only use 3 recycling updates\n", + " cfg.data.common.max_recycling_iters = 3\n", + " cfg.model.recycle_early_stop_tolerance = -1\n", + "\n", + " openfold_model = model.AlphaFold(cfg)\n", + " openfold_model = openfold_model.eval()\n", + " if(weight_set == \"AlphaFold\"):\n", + " params_name = os.path.join(\n", + " ALPHAFOLD_PARAMS_DIR, f\"params_{config_preset}.npz\"\n", + " )\n", + " import_jax_weights_(openfold_model, params_name, version=config_preset)\n", + " elif(weight_set == \"OpenFold\"):\n", + " params_name = os.path.join(\n", + " OPENFOLD_PARAMS_DIR,\n", + " model_name,\n", + " )\n", + " d = torch.load(params_name)\n", + " openfold_model.load_state_dict(d)\n", + " else:\n", + " raise ValueError(f\"Invalid weight set: {weight_set}\")\n", + "\n", + " openfold_model = openfold_model.cuda()\n", + "\n", + " pipeline = feature_pipeline.FeaturePipeline(cfg.data)\n", + " processed_feature_dict = pipeline.process_features(\n", + " feature_dict,\n", + " mode='predict',\n", + " is_multimer = (model_type == ModelType.MULTIMER),\n", + " )\n", + "\n", + " processed_feature_dict = tensor_tree_map(\n", + " lambda t: t.cuda(), processed_feature_dict\n", + " )\n", + "\n", + " with torch.no_grad():\n", + " prediction_result = openfold_model(processed_feature_dict)\n", + "\n", + " # Move the batch and output to np for further processing\n", + " processed_feature_dict = tensor_tree_map(\n", + " lambda t: np.array(t[..., -1].cpu()), processed_feature_dict\n", + " )\n", + " prediction_result = tensor_tree_map(\n", + " lambda t: np.array(t.cpu()), prediction_result\n", + " )\n", + "\n", + " mean_plddt = prediction_result['plddt'].mean()\n", + "\n", + " if model_type == ModelType.MONOMER:\n", + " if 'predicted_aligned_error' in prediction_result:\n", + " pae_outputs[model_name] = (\n", + " prediction_result['predicted_aligned_error'],\n", + " prediction_result['max_predicted_aligned_error']\n", + " )\n", + " else:\n", + " # Get the pLDDT confidence metrics. Do not put pTM models here as they\n", + " # should never get selected.\n", + " plddts[model_name] = prediction_result['plddt']\n", + " elif model_type == ModelType.MULTIMER:\n", + " # Multimer models are sorted by pTM+ipTM.\n", + " plddts[model_name] = prediction_result['plddt']\n", + " pae_outputs[model_name] = (prediction_result['predicted_aligned_error'],\n", + " prediction_result['max_predicted_aligned_error'])\n", + "\n", + " weighted_ptms[model_name] = prediction_result['weighted_ptm_score']\n", + "\n", + " # Set the b-factors to the per-residue plddt.\n", + " final_atom_mask = prediction_result['final_atom_mask']\n", + " b_factors = prediction_result['plddt'][:, None] * final_atom_mask\n", + " unrelaxed_protein = protein.from_prediction(\n", + " processed_feature_dict,\n", + " prediction_result,\n", + " remove_leading_feature_dimension=False,\n", + " b_factors=b_factors,\n", + " )\n", + " unrelaxed_proteins[model_name] = unrelaxed_protein\n", + "\n", + " # Delete unused outputs to save memory.\n", + " del openfold_model\n", + " del processed_feature_dict\n", + " del prediction_result\n", + " pbar.update(n=1)\n", + "\n", + " # Find the best model according to the mean pLDDT.\n", + " if model_type == ModelType.MONOMER:\n", + " best_model_name = max(plddts.keys(), key=lambda x: plddts[x].mean())\n", + " elif model_type == ModelType.MULTIMER:\n", + " best_model_name = max(weighted_ptms.keys(), key=lambda x: weighted_ptms[x])\n", + " best_pdb = protein.to_pdb(unrelaxed_proteins[best_model_name])\n", + "\n", + " # --- AMBER relax the best model ---\n", + " if(relax_prediction):\n", + " pbar.set_description(f'AMBER relaxation')\n", + " amber_relaxer = relax.AmberRelaxation(\n", + " max_iterations=0,\n", + " tolerance=2.39,\n", + " stiffness=10.0,\n", + " exclude_residues=[],\n", + " max_outer_iterations=20,\n", + " use_gpu=True,\n", + " )\n", + " relaxed_pdb, _, _ = amber_relaxer.process(\n", + " prot=unrelaxed_proteins[best_model_name]\n", + " )\n", + " best_pdb = relaxed_pdb\n", + "\n", + " # Write out the prediction\n", + " pred_output_path = os.path.join(output_dir, 'selected_prediction.pdb')\n", + " with open(pred_output_path, 'w') as f:\n", + " f.write(best_pdb)\n", + "\n", + " pbar.update(n=1) # Finished AMBER relax.\n", + "\n", + "# Construct multiclass b-factors to indicate confidence bands\n", + "# 0=very low, 1=low, 2=confident, 3=very high\n", + "banded_b_factors = []\n", + "for plddt in plddts[best_model_name]:\n", + " for idx, (min_val, max_val, _) in enumerate(PLDDT_BANDS):\n", + " if plddt >= min_val and plddt <= max_val:\n", + " banded_b_factors.append(idx)\n", + " break\n", + "banded_b_factors = np.array(banded_b_factors)[:, None] * final_atom_mask\n", + "to_visualize_pdb = overwrite_b_factors(best_pdb, banded_b_factors)\n", + "\n", + "# --- Visualise the prediction & confidence ---\n", + "show_sidechains = True\n", + "def plot_plddt_legend():\n", + " \"\"\"Plots the legend for pLDDT.\"\"\"\n", + " thresh = [\n", + " 'Very low (pLDDT < 50)',\n", + " 'Low (70 > pLDDT > 50)',\n", + " 'Confident (90 > pLDDT > 70)',\n", + " 'Very high (pLDDT > 90)']\n", + "\n", + " colors = [x[2] for x in PLDDT_BANDS]\n", + "\n", + " plt.figure(figsize=(2, 2))\n", + " for c in colors:\n", + " plt.bar(0, 0, color=c)\n", + " plt.legend(thresh, frameon=False, loc='center', fontsize=20)\n", + " plt.xticks([])\n", + " plt.yticks([])\n", + " ax = plt.gca()\n", + " ax.spines['right'].set_visible(False)\n", + " ax.spines['top'].set_visible(False)\n", + " ax.spines['left'].set_visible(False)\n", + " ax.spines['bottom'].set_visible(False)\n", + " plt.title('Model Confidence', fontsize=20, pad=20)\n", + " return plt\n", + "\n", + "# Show the structure coloured by chain if the multimer model has been used.\n", + "if model_type == ModelType.MULTIMER:\n", + " multichain_view = py3Dmol.view(width=800, height=600)\n", + " multichain_view.addModelsAsFrames(to_visualize_pdb)\n", + " multichain_style = {'cartoon': {'colorscheme': 'chain'}}\n", + " multichain_view.setStyle({'model': -1}, multichain_style)\n", + " multichain_view.zoomTo()\n", + " multichain_view.show()\n", + "\n", + "# Color the structure by per-residue pLDDT\n", + "color_map = {i: bands[2] for i, bands in enumerate(PLDDT_BANDS)}\n", + "view = py3Dmol.view(width=800, height=600)\n", + "view.addModelsAsFrames(to_visualize_pdb)\n", + "style = {'cartoon': {\n", + " 'colorscheme': {\n", + " 'prop': 'b',\n", + " 'map': color_map}\n", + " }}\n", + "if show_sidechains:\n", + " style['stick'] = {}\n", + "view.setStyle({'model': -1}, style)\n", + "view.zoomTo()\n", + "\n", + "grid = GridspecLayout(1, 2)\n", + "out = Output()\n", + "with out:\n", + " view.show()\n", + "grid[0, 0] = out\n", + "\n", + "out = Output()\n", + "with out:\n", + " plot_plddt_legend().show()\n", + "grid[0, 1] = out\n", + "\n", + "display.display(grid)\n", + "\n", + "# Display pLDDT and predicted aligned error (if output by the model).\n", + "if pae_outputs:\n", + " num_plots = 2\n", + "else:\n", + " num_plots = 1\n", + "\n", + "plt.figure(figsize=[8 * num_plots, 6])\n", + "plt.subplot(1, num_plots, 1)\n", + "plt.plot(plddts[best_model_name])\n", + "plt.title('Predicted LDDT')\n", + "plt.xlabel('Residue')\n", + "plt.ylabel('pLDDT')\n", + "\n", + "if num_plots == 2:\n", + " plt.subplot(1, 2, 2)\n", + " pae, max_pae = list(pae_outputs.values())[0]\n", + " plt.imshow(pae, vmin=0., vmax=max_pae, cmap='Greens_r')\n", + " plt.colorbar(fraction=0.046, pad=0.04)\n", + "\n", + " # Display lines at chain boundaries.\n", + " best_unrelaxed_prot = unrelaxed_proteins[best_model_name]\n", + " total_num_res = best_unrelaxed_prot.residue_index.shape[-1]\n", + " chain_ids = best_unrelaxed_prot.chain_index\n", + " for chain_boundary in np.nonzero(chain_ids[:-1] - chain_ids[1:]):\n", + " if chain_boundary.size:\n", + " plt.plot([0, total_num_res], [chain_boundary, chain_boundary], color='red')\n", + " plt.plot([chain_boundary, chain_boundary], [0, total_num_res], color='red')\n", + " plt.title('Predicted Aligned Error')\n", + " plt.xlabel('Scored residue')\n", + " plt.ylabel('Aligned residue')\n", + "\n", + "# Save pLDDT and predicted aligned error (if it exists)\n", + "pae_output_path = os.path.join(output_dir, 'predicted_aligned_error.json')\n", + "if pae_outputs:\n", + " # Save predicted aligned error in the same format as the AF EMBL DB\n", + " rounded_errors = np.round(pae.astype(np.float64), decimals=1)\n", + " indices = np.indices((len(rounded_errors), len(rounded_errors))) + 1\n", + " indices_1 = indices[0].flatten().tolist()\n", + " indices_2 = indices[1].flatten().tolist()\n", + " pae_data = json.dumps([{\n", + " 'residue1': indices_1,\n", + " 'residue2': indices_2,\n", + " 'distance': rounded_errors.flatten().tolist(),\n", + " 'max_predicted_aligned_error': max_pae.item()\n", + " }],\n", + " indent=None,\n", + " separators=(',', ':'))\n", + " with open(pae_output_path, 'w') as f:\n", + " f.write(pae_data)\n", + "\n", + "\n", + "# --- Download the predictions ---\n", + "shutil.make_archive(base_name='prediction', format='zip', root_dir=output_dir)\n", + "files.download(f'{output_dir}.zip')" + ], + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lUQAn5LYC5n4" + }, + "source": [ + "### Interpreting the prediction\n", + "\n", + "Please see the [AlphaFold methods paper](https://www.nature.com/articles/s41586-021-03819-2) and the [AlphaFold predictions of the human proteome paper](https://www.nature.com/articles/s41586-021-03828-1), as well as [DeepMind's FAQ](https://alphafold.ebi.ac.uk/faq) on how to interpret AlphaFold/OpenFold predictions. More information about the predictions of the AlphaFold Multimer model can be found in the [Alphafold Multimer paper](https://www.biorxiv.org/content/10.1101/2022.03.11.484043v3.full.pdf)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jeb2z8DIA4om" + }, + "source": [ + "## FAQ & Troubleshooting\n", + "\n", + "\n", + "* How do I get a predicted protein structure for my protein?\n", + " * Click on the _Connect_ button on the top right to get started.\n", + " * Paste the amino acid sequence of your protein (without any headers) into the “Enter the amino acid sequence to fold”.\n", + " * Run all cells in the Colab, either by running them individually (with the play button on the left side) or via _Runtime_ > _Run all._\n", + " * The predicted protein structure will be downloaded once all cells have been executed. Note: This can take minutes to hours - see below.\n", + "* How long will this take?\n", + " * Downloading the OpenFold source code can take up to a few minutes.\n", + " * Downloading and installing the third-party software can take up to a few minutes.\n", + " * The search against genetic databases can take minutes to hours.\n", + " * Running OpenFold and generating the prediction can take minutes to hours, depending on the length of your protein and on which GPU-type Colab has assigned you.\n", + "* My Colab no longer seems to be doing anything, what should I do?\n", + " * Some steps may take minutes to hours to complete.\n", + " * Sometimes, running the \"installation\" cells more than once can corrupt the OpenFold installation.\n", + " * If nothing happens or if you receive an error message, try restarting your Colab runtime via _Runtime_ > _Restart runtime_.\n", + " * If this doesn’t help, reset your Colab runtime via _Runtime_ > _Factory reset runtime_.\n", + "* How does what's run in this notebook compare to the full versions of Alphafold/Openfold?\n", + " * This Colab version of OpenFold searches a selected portion of the BFD dataset and currently doesn’t use templates, so its accuracy is reduced in comparison to the full version, which is analogous to what's described in the [AlphaFold paper](https://doi.org/10.1038/s41586-021-03819-2) and [Github repo](https://github.com/deepmind/alphafold/). The full version of OpenFold can be run from our own [GitHub repo](https://github.com/aqlaboratory/openfold).\n", + "* What is a Colab?\n", + " * See the [Colab FAQ](https://research.google.com/colaboratory/faq.html).\n", + "* I received a warning “Notebook requires high RAM”, what do I do?\n", + " * The resources allocated to your Colab vary. See the [Colab FAQ](https://research.google.com/colaboratory/faq.html) for more details.\n", + " * You can execute the Colab nonetheless.\n", + "* I received an error “Colab CPU runtime not supported” or “No GPU/TPU found”, what do I do?\n", + " * Colab CPU runtime is not supported. Try changing your runtime via _Runtime_ > _Change runtime type_ > _Hardware accelerator_ > _GPU_.\n", + " * The type of GPU allocated to your Colab varies. See the [Colab FAQ](https://research.google.com/colaboratory/faq.html) for more details.\n", + " * If you receive “Cannot connect to GPU backend”, you can try again later to see if Colab allocates you a GPU.\n", + " * [Colab Pro](https://colab.research.google.com/signup) offers priority access to GPUs.\n", + "* Does this tool install anything on my computer?\n", + " * No, everything happens in the cloud on Google Colab.\n", + " * At the end of the Colab execution a zip-archive with the obtained prediction will be automatically downloaded to your computer.\n", + "* How should I share feedback and bug reports?\n", + " * Please share any feedback and bug reports as an [issue](https://github.com/aqlaboratory/openfold/issues) on Github.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YfPhvYgKC81B" + }, + "source": [ + "# License and Disclaimer\n", + "\n", + "This Colab notebook and other information provided is for theoretical modelling only, caution should be exercised in its use. It is provided ‘as-is’ without any warranty of any kind, whether expressed or implied. Information is not intended to be a substitute for professional medical advice, diagnosis, or treatment, and does not constitute medical or other professional advice.\n", + "\n", + "## AlphaFold/OpenFold Code License\n", + "\n", + "Copyright 2021 AlQuraishi Laboratory\n", + "\n", + "Copyright 2021 DeepMind Technologies Limited.\n", + "\n", + "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0.\n", + "\n", + "Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n", + "\n", + "## Model Parameters License\n", + "\n", + "DeepMind's AlphaFold parameters are made available under the terms of the Creative Commons Attribution 4.0 International (CC BY 4.0) license. You can find details at: https://creativecommons.org/licenses/by/4.0/legalcode\n", + "\n", + "\n", + "## Third-party software\n", + "\n", + "Use of the third-party software, libraries or code referred to in this notebook may be governed by separate terms and conditions or license provisions. Your use of the third-party software, libraries or code is subject to any such terms and you should check that you can comply with any applicable restrictions or terms and conditions before use.\n", + "\n", + "\n", + "## Mirrored Databases\n", + "\n", + "The following databases have been mirrored by DeepMind, and are available with reference to the following:\n", + "* UniRef90: v2021\\_03 (unmodified), by The UniProt Consortium, available under a [Creative Commons Attribution-NoDerivatives 4.0 International License](http://creativecommons.org/licenses/by-nd/4.0/).\n", + "* MGnify: v2019\\_05 (unmodified), by Mitchell AL et al., available free of all copyright restrictions and made fully and freely available for both non-commercial and commercial use under [CC0 1.0 Universal (CC0 1.0) Public Domain Dedication](https://creativecommons.org/publicdomain/zero/1.0/).\n", + "* BFD: (modified), by Steinegger M. and Söding J., modified by DeepMind, available under a [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by/4.0/). See the Methods section of the [AlphaFold proteome paper](https://www.nature.com/articles/s41586-021-03828-1) for details." + ] + } + ], + "metadata": { + "colab": { + "provenance": [], + "gpuType": "T4", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + }, + "accelerator": "GPU" + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/openfold/config.py b/openfold/config.py index b6259ed01..0c7d13e54 100644 --- a/openfold/config.py +++ b/openfold/config.py @@ -224,7 +224,7 @@ def model_config( c.data.eval.max_extra_msa = 1152 c.data.predict.max_extra_msa = 1152 else: - raise ValueError("Invalid model name") + raise ValueError(f"Invalid model name {name}") if long_sequence_inference: assert(not train) diff --git a/openfold/data/data_pipeline.py b/openfold/data/data_pipeline.py index ce8494d0c..6ed858649 100644 --- a/openfold/data/data_pipeline.py +++ b/openfold/data/data_pipeline.py @@ -1208,7 +1208,7 @@ def read_msa(start, size): uniprot_msa_path = os.path.join(alignment_dir, "uniprot_hits.sto") if not os.path.exists(uniprot_msa_path): chain_id = os.path.basename(os.path.normpath(alignment_dir)) - raise ValueError(f"Missing 'uniprot_hits.sto' for {chain_id}. " + raise ValueError(f"Missing file {uniprot_msa_path} for {chain_id}. " f"This is required for Multimer MSA pairing.") with open(uniprot_msa_path, "r") as fp: @@ -1235,7 +1235,6 @@ def process_fasta(self, input_fasta_str = f.read() input_seqs, input_descs = parsers.parse_fasta(input_fasta_str) - all_chain_features = {} sequence_features = {} is_homomer_or_monomer = len(set(input_seqs)) == 1 diff --git a/openfold/data/tools/hhsearch.py b/openfold/data/tools/hhsearch.py index 42a6edf6b..d1b13d584 100644 --- a/openfold/data/tools/hhsearch.py +++ b/openfold/data/tools/hhsearch.py @@ -107,8 +107,8 @@ def query(self, a3m: str, output_dir: Optional[str] = None) -> str: if retcode: # Stderr is truncated to prevent proto size errors in Beam. raise RuntimeError( - "HHSearch failed:\nstdout:\n%s\n\nstderr:\n%s\n" - % (stdout.decode("utf-8"), stderr[:100_000].decode("utf-8")) + "HHSearch failed:\ncommand:\n%s\n\nstdout:\n%s\n\nstderr:\n%s\n" + % (f"hhsearch command: {' '.join(cmd)}", stdout.decode("utf-8"), stderr[:100_000].decode("utf-8")) ) with open(hhr_path) as f: diff --git a/run_pretrained_openfold.py b/run_pretrained_openfold.py index e27623e34..d266dc062 100644 --- a/run_pretrained_openfold.py +++ b/run_pretrained_openfold.py @@ -138,7 +138,8 @@ def generate_feature_dict( '\n'.join([f">{tag}\n{seq}" for tag, seq in zip(tags, seqs)]) ) feature_dict = data_processor.process_fasta( - fasta_path=tmp_fasta_path, alignment_dir=alignment_dir, + fasta_path=tmp_fasta_path, + alignment_dir=alignment_dir ) elif len(seqs) == 1: tag = tags[0] @@ -180,6 +181,45 @@ def main(args): config = model_config(args.config_preset, long_sequence_inference=args.long_sequence_inference) + print("") + print("#### INPUT / OUTPUT ####") + print(f"fasta_dir: {args.fasta_dir}") + print(f"output_dir: {args.output_dir}") + print(f"output prediction filenames: {args.output_postfix}") + print(f"cif_output: {args.cif_output}") + print(f"save embedded outputs: {args.save_outputs}") + + print("") + print("#### PRESETS ####") + print(f"skip_relaxation: {args.skip_relaxation}") + print(f"use_precomputed_alignments: {args.use_precomputed_alignments}") + print(f"use_single_seq_mode: {args.use_single_seq_mode}") + print(f"long_sequence_inference: {args.long_sequence_inference}") + print(f"Threads: {args.cpus}") + print(f"multimer_ri_gap: {args.multimer_ri_gap}") + print(f"subtract_plddt: {args.subtract_plddt}") + + print("") + print("#### MODEL PARAMS ####") + print(f"Model: {args.config_preset}") + print(f"trace_model: {args.trace_model}") + + print("") + print("#### DATABASE PARAMS ####") + print(f"template_mmcif_dir: {args.template_mmcif_dir}") + print(f"max_template_date: {args.max_template_date}") + print(f"max_templates: {config.data.predict.max_templates}") + print(f"release_dates_path: {args.release_dates_path}") + print(f"obsolete_pdbs_path: {args.obsolete_pdbs_path}") + + print("") + print("#### GPU / AI PARAMS ####") + print(f"model_device: {args.model_device}") + print(f"openfold_checkpoint_path: {args.openfold_checkpoint_path}") + print(f"jax_param_path: {args.jax_param_path}") + + print("") + if args.trace_model: if not config.data.predict.fixed_size: raise ValueError( @@ -237,6 +277,7 @@ def main(args): for fasta_file in list_files_with_extensions(args.fasta_dir, (".fasta", ".fa")): # Gather input sequences fasta_path = os.path.join(args.fasta_dir, fasta_file) + print(f"reading fasta: {fasta_path}") with open(fasta_path, "r") as fp: data = fp.read() @@ -258,12 +299,15 @@ def main(args): seq_sort_fn = lambda target: sum([len(s) for s in target[1]]) sorted_targets = sorted(zip(tag_list, seq_list), key=seq_sort_fn) feature_dicts = {} + + logger.info(f"Loading model information...") model_generator = load_models_from_command_line( config, args.model_device, args.openfold_checkpoint_path, args.jax_param_path, - args.output_dir) + args.output_dir + ) for model, output_directory in model_generator: cur_tracing_interval = 0 @@ -273,6 +317,7 @@ def main(args): output_name = f'{output_name}_{args.output_postfix}' # Does nothing if the alignments have already been computed + logger.info(f"Perform alignment if not already done...") precompute_alignments(tags, seqs, alignment_dir, args) feature_dict = feature_dicts.get(tag, None) @@ -298,6 +343,10 @@ def main(args): feature_dict, mode='predict', is_multimer=is_multimer ) + # print("Storing feature dict...") + # with open(os.path.join(args.output_dir, f"{output_name}_feature_dict.pickle"), "wb") as fp: + # pickle.dump(processed_feature_dict, fp, protocol=pickle.HIGHEST_PROTOCOL) + processed_feature_dict = { k: torch.as_tensor(v, device=args.model_device) for k, v in processed_feature_dict.items() @@ -316,6 +365,10 @@ def main(args): ) cur_tracing_interval = rounded_seqlen + print("Storing feature dict...") + with open(os.path.join(args.output_dir, f"{output_name}_feature_dict.pickle"), "wb") as fp: + pickle.dump(feature_dict, fp, protocol=pickle.HIGHEST_PROTOCOL) + out = run_model(model, processed_feature_dict, tag, args.output_dir) # Toss out the recycling dimensions --- we don't need them anymore diff --git a/scripts/colabfold_search.py b/scripts/colabfold_search.py new file mode 100644 index 000000000..2aa89f892 --- /dev/null +++ b/scripts/colabfold_search.py @@ -0,0 +1,729 @@ +""" + +ADAPTED FROM https://github.com/sokrypton/ColabFold/blob/main/colabfold/mmseqs/search.py#L223 + +Functionality for running mmseqs locally. Takes in a fasta file, outputs final.a3m + +Note: Currently needs mmseqs compiled from source +""" + +import logging +import math +import os +import shutil +import subprocess +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from pathlib import Path +from typing import Any, Callable, Dict, List, Optional, Tuple, Union, TYPE_CHECKING +import pandas +import random + +logger = logging.getLogger(__name__) + + +def safe_filename(file: str) -> str: + return "".join([c if c.isalnum() or c in ["_", ".", "-"] else "_" for c in file]) + + +def msa_to_str( + unpaired_msa: List[str], + paired_msa: List[str], + query_seqs_unique: List[str], + query_seqs_cardinality: List[int], +) -> str: + msa = "#" + ",".join(map(str, map(len, query_seqs_unique))) + "\t" + msa += ",".join(map(str, query_seqs_cardinality)) + "\n" + # build msa with cardinality of 1, it makes it easier to parse and manipulate + query_seqs_cardinality = [1 for _ in query_seqs_cardinality] + msa += pair_msa(query_seqs_unique, query_seqs_cardinality, paired_msa, unpaired_msa) + return msa + + +def pair_msa( + query_seqs_unique: List[str], + query_seqs_cardinality: List[int], + paired_msa: Optional[List[str]], + unpaired_msa: Optional[List[str]], +) -> str: + if paired_msa is None and unpaired_msa is not None: + a3m_lines = pad_sequences( + unpaired_msa, query_seqs_unique, query_seqs_cardinality + ) + elif paired_msa is not None and unpaired_msa is not None: + a3m_lines = ( + pair_sequences(paired_msa, query_seqs_unique, query_seqs_cardinality) + + "\n" + + pad_sequences(unpaired_msa, query_seqs_unique, query_seqs_cardinality) + ) + elif paired_msa is not None and unpaired_msa is None: + a3m_lines = pair_sequences( + paired_msa, query_seqs_unique, query_seqs_cardinality + ) + else: + raise ValueError(f"Invalid pairing") + return a3m_lines + + +def pair_sequences( + a3m_lines: List[str], query_sequences: List[str], query_cardinality: List[int] +) -> str: + a3m_line_paired = [""] * len(a3m_lines[0].splitlines()) + for n, seq in enumerate(query_sequences): + lines = a3m_lines[n].splitlines() + for i, line in enumerate(lines): + if line.startswith(">"): + if n != 0: + line = line.replace(">", "\t", 1) + a3m_line_paired[i] = a3m_line_paired[i] + line + else: + a3m_line_paired[i] = a3m_line_paired[i] + line * query_cardinality[n] + return "\n".join(a3m_line_paired) + + +def pad_sequences( + a3m_lines: List[str], query_sequences: List[str], query_cardinality: List[int] +) -> str: + _blank_seq = [ + ("-" * len(seq)) + for n, seq in enumerate(query_sequences) + for _ in range(query_cardinality[n]) + ] + a3m_lines_combined = [] + pos = 0 + for n, seq in enumerate(query_sequences): + for j in range(0, query_cardinality[n]): + lines = a3m_lines[n].split("\n") + for a3m_line in lines: + if len(a3m_line) == 0: + continue + if a3m_line.startswith(">"): + a3m_lines_combined.append(a3m_line) + else: + a3m_lines_combined.append( + "".join(_blank_seq[:pos] + [a3m_line] + _blank_seq[pos + 1:]) + ) + pos += 1 + return "\n".join(a3m_lines_combined) + + +def parse_fasta(fasta_string: str) -> Tuple[List[str], List[str]]: + """Parses FASTA string and returns list of strings with amino-acid sequences. + + Arguments: + fasta_string: The string contents of a FASTA file. + + Returns: + A tuple of two lists: + * A list of sequences. + * A list of sequence descriptions taken from the comment lines. In the + same order as the sequences. + """ + sequences = [] + descriptions = [] + index = -1 + for line in fasta_string.splitlines(): + line = line.strip() + if line.startswith("#"): + continue + if line.startswith(">"): + index += 1 + descriptions.append(line[1:]) # Remove the '>' at the beginning. + sequences.append("") + continue + elif not line: + continue # Skip blank lines. + sequences[index] += line + + return sequences, descriptions + + +def get_queries( + input_path: Union[str, Path], sort_queries_by: str = "length" +) -> Tuple[List[Tuple[str, str, Optional[List[str]]]], bool]: + """Reads a directory of fasta files, a single fasta file or a csv file and returns a tuple + of job name, sequence and the optional a3m lines""" + + input_path = Path(input_path) + if not input_path.exists(): + raise OSError(f"{input_path} could not be found") + + if input_path.is_file(): + if input_path.suffix == ".csv" or input_path.suffix == ".tsv": + sep = "\t" if input_path.suffix == ".tsv" else "," + df = pandas.read_csv(input_path, sep=sep) + assert "id" in df.columns and "sequence" in df.columns + queries = [ + (seq_id, sequence.upper().split(":"), None) + for seq_id, sequence in df[["id", "sequence"]].itertuples(index=False) + ] + for i in range(len(queries)): + if len(queries[i][1]) == 1: + queries[i] = (queries[i][0], queries[i][1][0], None) + elif input_path.suffix == ".a3m": + (seqs, header) = parse_fasta(input_path.read_text()) + if len(seqs) == 0: + raise ValueError(f"{input_path} is empty") + query_sequence = seqs[0] + # Use a list so we can easily extend this to multiple msas later + a3m_lines = [input_path.read_text()] + queries = [(input_path.stem, query_sequence, a3m_lines)] + elif input_path.suffix in [".fasta", ".faa", ".fa"]: + (sequences, headers) = parse_fasta(input_path.read_text()) + queries = [] + for sequence, header in zip(sequences, headers): + sequence = sequence.upper() + if sequence.count(":") == 0: + # Single sequence + queries.append((header, sequence, None)) + else: + # Complex mode + queries.append((header, sequence.upper().split(":"), None)) + else: + raise ValueError(f"Unknown file format {input_path.suffix}") + else: + assert input_path.is_dir(), "Expected either an input file or a input directory" + queries = [] + for file in sorted(input_path.iterdir()): + if not file.is_file(): + continue + if file.suffix.lower() not in [".a3m", ".fasta", ".faa"]: + logger.warning(f"non-fasta/a3m file in input directory: {file}") + continue + (seqs, header) = parse_fasta(file.read_text()) + if len(seqs) == 0: + logger.error(f"{file} is empty") + continue + query_sequence = seqs[0] + if len(seqs) > 1 and file.suffix in [".fasta", ".faa", ".fa"]: + logger.warning( + f"More than one sequence in {file}, ignoring all but the first sequence" + ) + + if file.suffix.lower() == ".a3m": + a3m_lines = [file.read_text()] + queries.append((file.stem, query_sequence.upper(), a3m_lines)) + else: + if query_sequence.count(":") == 0: + # Single sequence + queries.append((file.stem, query_sequence, None)) + else: + # Complex mode + queries.append((file.stem, query_sequence.upper().split(":"), None)) + + # sort by seq. len + if sort_queries_by == "length": + queries.sort(key=lambda t: len("".join(t[1]))) + + elif sort_queries_by == "random": + random.shuffle(queries) + + is_complex = False + for job_number, (_, query_sequence, a3m_lines) in enumerate(queries): + if isinstance(query_sequence, list): + is_complex = True + break + if a3m_lines is not None and a3m_lines[0].startswith("#"): + a3m_line = a3m_lines[0].splitlines()[0] + tab_sep_entries = a3m_line[1:].split("\t") + if len(tab_sep_entries) == 2: + query_seq_len = tab_sep_entries[0].split(",") + query_seq_len = list(map(int, query_seq_len)) + query_seqs_cardinality = tab_sep_entries[1].split(",") + query_seqs_cardinality = list(map(int, query_seqs_cardinality)) + is_single_protein = ( + True + if len(query_seq_len) == 1 and query_seqs_cardinality[0] == 1 + else False + ) + if not is_single_protein: + is_complex = True + break + return queries, is_complex + + +def run_mmseqs(mmseqs: Path, params: List[Union[str, Path]]): + params_log = " ".join(str(i) for i in params) + logger.info(f"Running {mmseqs} {params_log}") + # hide MMseqs2 verbose paramters list that clogs up the log + os.environ["MMSEQS_CALL_DEPTH"] = "1" + subprocess.check_call([mmseqs] + params) + + +def mmseqs_search_monomer( + dbbase: Path, + base: Path, + uniref_db: Path = Path("uniref30_2302_db"), + template_db: Path = Path(""), # Unused by default + metagenomic_db: Path = Path("colabfold_envdb_202108_db"), + mmseqs: Path = Path("mmseqs"), + use_env: bool = True, + use_templates: bool = False, + filter: bool = True, + expand_eval: float = math.inf, + align_eval: int = 10, + diff: int = 3000, + qsc: float = -20.0, + max_accept: int = 1000000, + prefilter_mode: int = 0, + s: float = 8, + db_load_mode: int = 2, + threads: int = 32, +): + """Run mmseqs with a local colabfold database set + + db1: uniprot db (UniRef30) + db2: Template (unused by default) + db3: metagenomic db (colabfold_envdb_202108 or bfd_mgy_colabfold, the former is preferred) + """ + if filter: + # 0.1 was not used in benchmarks due to POSIX shell bug in line above + # EXPAND_EVAL=0.1 + align_eval = 10 + qsc = 0.8 + max_accept = 100000 + + used_dbs = [uniref_db] + if use_templates: + used_dbs.append(template_db) + if use_env: + used_dbs.append(metagenomic_db) + + for db in used_dbs: + if not dbbase.joinpath(f"{db}.dbtype").is_file(): + raise FileNotFoundError(f"Database {db} does not exist") + if ( + ( + not dbbase.joinpath(f"{db}.idx").is_file() + and not dbbase.joinpath(f"{db}.idx.index").is_file() + ) + or os.environ.get("MMSEQS_IGNORE_INDEX", False) + ): + logger.info("Search does not use index") + db_load_mode = 0 + dbSuffix1 = "_seq" + dbSuffix2 = "_aln" + dbSuffix3 = "" + else: + dbSuffix1 = ".idx" + dbSuffix2 = ".idx" + dbSuffix3 = ".idx" + + # fmt: off + # @formatter:off + search_param = ["--num-iterations", "3", "--db-load-mode", str(db_load_mode), "-a", "-e", "0.1", "--max-seqs", "10000"] + search_param += ["--prefilter-mode", str(prefilter_mode)] + if s is not None: + search_param += ["-s", "{:.1f}".format(s)] + else: + search_param += ["--k-score", "'seq:96,prof:80'"] + + filter_param = ["--filter-msa", str(filter), "--filter-min-enable", "1000", "--diff", str(diff), "--qid", "0.0,0.2,0.4,0.6,0.8,1.0", "--qsc", "0", "--max-seq-id", "0.95",] + expand_param = ["--expansion-mode", "0", "-e", str(expand_eval), "--expand-filter-clusters", str(filter), "--max-seq-id", "0.95",] + + run_mmseqs(mmseqs, ["search", base.joinpath("qdb"), dbbase.joinpath(uniref_db), base.joinpath("res"), base.joinpath("tmp"), "--threads", str(threads)] + search_param) + run_mmseqs(mmseqs, ["mvdb", base.joinpath("tmp/latest/profile_1"), base.joinpath("prof_res")]) + run_mmseqs(mmseqs, ["lndb", base.joinpath("qdb_h"), base.joinpath("prof_res_h")]) + run_mmseqs(mmseqs, ["expandaln", base.joinpath("qdb"), dbbase.joinpath(f"{uniref_db}{dbSuffix1}"), base.joinpath("res"), dbbase.joinpath(f"{uniref_db}{dbSuffix2}"), base.joinpath("res_exp"), "--db-load-mode", str(db_load_mode), "--threads", str(threads)] + expand_param) + run_mmseqs(mmseqs, ["align", base.joinpath("prof_res"), dbbase.joinpath(f"{uniref_db}{dbSuffix1}"), base.joinpath("res_exp"), base.joinpath("res_exp_realign"), "--db-load-mode", str(db_load_mode), "-e", str(align_eval), "--max-accept", str(max_accept), "--threads", str(threads), "--alt-ali", "10", "-a"]) + run_mmseqs(mmseqs, ["filterresult", base.joinpath("qdb"), dbbase.joinpath(f"{uniref_db}{dbSuffix1}"), + base.joinpath("res_exp_realign"), base.joinpath("res_exp_realign_filter"), "--db-load-mode", + str(db_load_mode), "--qid", "0", "--qsc", str(qsc), "--diff", "0", "--threads", + str(threads), "--max-seq-id", "1.0", "--filter-min-enable", "100"]) + run_mmseqs(mmseqs, ["result2msa", base.joinpath("qdb"), dbbase.joinpath(f"{uniref_db}{dbSuffix1}"), + base.joinpath("res_exp_realign_filter"), base.joinpath("uniref.a3m"), "--msa-format-mode", + "6", "--db-load-mode", str(db_load_mode), "--threads", str(threads)] + filter_param) + subprocess.run([mmseqs] + ["rmdb", base.joinpath("res_exp_realign")]) + subprocess.run([mmseqs] + ["rmdb", base.joinpath("res_exp")]) + subprocess.run([mmseqs] + ["rmdb", base.joinpath("res")]) + subprocess.run([mmseqs] + ["rmdb", base.joinpath("res_exp_realign_filter")]) + + if use_env: + run_mmseqs(mmseqs, ["search", base.joinpath("prof_res"), dbbase.joinpath(metagenomic_db), base.joinpath("res_env"), + base.joinpath("tmp3"), "--threads", str(threads)] + search_param) + run_mmseqs(mmseqs, ["expandaln", base.joinpath("prof_res"), dbbase.joinpath(f"{metagenomic_db}{dbSuffix1}"), base.joinpath("res_env"), + dbbase.joinpath(f"{metagenomic_db}{dbSuffix2}"), base.joinpath("res_env_exp"), "-e", str(expand_eval), + "--expansion-mode", "0", "--db-load-mode", str(db_load_mode), "--threads", str(threads)]) + run_mmseqs(mmseqs, ["align", base.joinpath("tmp3/latest/profile_1"), dbbase.joinpath(f"{metagenomic_db}{dbSuffix1}"), + base.joinpath("res_env_exp"), base.joinpath("res_env_exp_realign"), "--db-load-mode", + str(db_load_mode), "-e", str(align_eval), "--max-accept", str(max_accept), "--threads", + str(threads), "--alt-ali", "10", "-a"]) + run_mmseqs(mmseqs, ["filterresult", base.joinpath("qdb"), dbbase.joinpath(f"{metagenomic_db}{dbSuffix1}"), + base.joinpath("res_env_exp_realign"), base.joinpath("res_env_exp_realign_filter"), + "--db-load-mode", str(db_load_mode), "--qid", "0", "--qsc", str(qsc), "--diff", "0", + "--max-seq-id", "1.0", "--threads", str(threads), "--filter-min-enable", "100"]) + run_mmseqs(mmseqs, ["result2msa", base.joinpath("qdb"), dbbase.joinpath(f"{metagenomic_db}{dbSuffix1}"), + base.joinpath("res_env_exp_realign_filter"), + base.joinpath("bfd.mgnify30.metaeuk30.smag30.a3m"), "--msa-format-mode", "6", + "--db-load-mode", str(db_load_mode), "--threads", str(threads)] + filter_param) + + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_env_exp_realign_filter")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_env_exp_realign")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_env_exp")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_env")]) + + run_mmseqs(mmseqs, ["mergedbs", base.joinpath("qdb"), base.joinpath("final.a3m"), base.joinpath("uniref.a3m"), base.joinpath("bfd.mgnify30.metaeuk30.smag30.a3m")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("bfd.mgnify30.metaeuk30.smag30.a3m")]) + else: + run_mmseqs(mmseqs, ["mvdb", base.joinpath("uniref.a3m"), base.joinpath("final.a3m")]) + + if use_templates: + run_mmseqs(mmseqs, ["search", base.joinpath("prof_res"), dbbase.joinpath(template_db), base.joinpath("res_pdb"), + base.joinpath("tmp2"), "--db-load-mode", str(db_load_mode), "--threads", str(threads), "-s", "7.5", "-a", "-e", "0.1", "--prefilter-mode", str(prefilter_mode)]) + run_mmseqs(mmseqs, ["convertalis", base.joinpath("prof_res"), dbbase.joinpath(f"{template_db}{dbSuffix3}"), base.joinpath("res_pdb"), + base.joinpath(f"{template_db}"), "--format-output", + "query,target,fident,alnlen,mismatch,gapopen,qstart,qend,tstart,tend,evalue,bits,cigar", + "--db-output", "1", + "--db-load-mode", str(db_load_mode), "--threads", str(threads)]) + run_mmseqs(mmseqs, ["unpackdb", base.joinpath(f"{template_db}"), base.joinpath("."), "--unpack-name-mode", "0", "--unpack-suffix", ".m8"]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_pdb")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath(f"{template_db}")]) + + run_mmseqs(mmseqs, ["unpackdb", base.joinpath("final.a3m"), base.joinpath("."), "--unpack-name-mode", "0", "--unpack-suffix", ".a3m"]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("final.a3m")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("uniref.a3m")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res")]) + # @formatter:on + # fmt: on + + for file in base.glob("prof_res*"): + file.unlink() + shutil.rmtree(base.joinpath("tmp")) + if use_templates: + shutil.rmtree(base.joinpath("tmp2")) + if use_env: + shutil.rmtree(base.joinpath("tmp3")) + + +def mmseqs_search_pair( + dbbase: Path, + base: Path, + uniref_db: Path = Path("uniref30_2302_db"), + spire_db: Path = Path("spire_ctg10_2401_db"), + mmseqs: Path = Path("mmseqs"), + pair_env: bool = True, + prefilter_mode: int = 0, + s: float = 8, + threads: int = 64, + db_load_mode: int = 2, + pairing_strategy: int = 0, +): + if not dbbase.joinpath(f"{uniref_db}.dbtype").is_file(): + raise FileNotFoundError(f"Database {uniref_db} does not exist") + if ( + ( + not dbbase.joinpath(f"{uniref_db}.idx").is_file() + and not dbbase.joinpath(f"{uniref_db}.idx.index").is_file() + ) + or os.environ.get("MMSEQS_IGNORE_INDEX", False) + ): + logger.info("Search does not use index") + db_load_mode = 0 + dbSuffix1 = "_seq" + dbSuffix2 = "_aln" + else: + dbSuffix1 = ".idx" + dbSuffix2 = ".idx" + + if pair_env: + db = spire_db + output = ".env.paired.a3m" + else: + db = uniref_db + output = ".paired.a3m" + + # fmt: off + # @formatter:off + search_param = ["--num-iterations", "3", "--db-load-mode", str(db_load_mode), "-a", "-e", "0.1", "--max-seqs", "10000",] + search_param += ["--prefilter-mode", str(prefilter_mode)] + if s is not None: + search_param += ["-s", "{:.1f}".format(s)] + else: + search_param += ["--k-score", "'seq:96,prof:80'"] + expand_param = ["--expansion-mode", "0", "-e", "inf", "--expand-filter-clusters", "0", "--max-seq-id", "0.95",] + run_mmseqs(mmseqs, ["search", base.joinpath("qdb"), dbbase.joinpath(db), base.joinpath("res"), base.joinpath("tmp"), "--threads", str(threads),] + search_param,) + run_mmseqs(mmseqs, ["expandaln", base.joinpath("qdb"), dbbase.joinpath(f"{db}{dbSuffix1}"), base.joinpath("res"), dbbase.joinpath(f"{db}{dbSuffix2}"), base.joinpath("res_exp"), "--db-load-mode", str(db_load_mode), "--threads", str(threads),] + expand_param,) + run_mmseqs(mmseqs, ["align", base.joinpath("qdb"), dbbase.joinpath(f"{db}{dbSuffix1}"), base.joinpath("res_exp"), base.joinpath("res_exp_realign"), "--db-load-mode", str(db_load_mode), "-e", "0.001", "--max-accept", "1000000", "--threads", str(threads), "-c", "0.5", "--cov-mode", "1",],) + run_mmseqs(mmseqs, ["pairaln", base.joinpath("qdb"), dbbase.joinpath(f"{db}"), base.joinpath("res_exp_realign"), base.joinpath("res_exp_realign_pair"), "--db-load-mode", str(db_load_mode), "--pairing-mode", str(pairing_strategy), "--pairing-dummy-mode", "0", "--threads", str(threads), ],) + run_mmseqs(mmseqs, ["align", base.joinpath("qdb"), dbbase.joinpath(f"{db}{dbSuffix1}"), base.joinpath("res_exp_realign_pair"), base.joinpath("res_exp_realign_pair_bt"), "--db-load-mode", str(db_load_mode), "-e", "inf", "-a", "--threads", str(threads), ],) + run_mmseqs(mmseqs, ["pairaln", base.joinpath("qdb"), dbbase.joinpath(f"{db}"), base.joinpath("res_exp_realign_pair_bt"), base.joinpath("res_final"), "--db-load-mode", str(db_load_mode), "--pairing-mode", str(pairing_strategy), "--pairing-dummy-mode", "1", "--threads", str(threads),],) + run_mmseqs(mmseqs, ["result2msa", base.joinpath("qdb"), dbbase.joinpath(f"{db}{dbSuffix1}"), base.joinpath("res_final"), base.joinpath("pair.a3m"), "--db-load-mode", str(db_load_mode), "--msa-format-mode", "5", "--threads", str(threads),],) + run_mmseqs(mmseqs, ["unpackdb", base.joinpath("pair.a3m"), base.joinpath("."), "--unpack-name-mode", "0", "--unpack-suffix", output,],) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_exp")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_exp_realign")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_exp_realign_pair")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_exp_realign_pair_bt")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_final")]) + run_mmseqs(mmseqs, ["rmdb", base.joinpath("pair.a3m")]) + shutil.rmtree(base.joinpath("tmp")) + # @formatter:on + # fmt: on + + +def main(): + parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) + parser.add_argument( + "query", + type=Path, + help="fasta files with the queries.", + ) + parser.add_argument( + "dbbase", + type=Path, + help="The path to the database and indices you downloaded and created with setup_databases.sh", + ) + parser.add_argument( + "base", type=Path, help="Directory for the results (and intermediate files)" + ) + parser.add_argument( + "--prefilter-mode", + type=int, + default=0, + choices=[0, 1, 2], + help="Prefiltering algorithm to use: 0: k-mer (high-mem), 1: ungapped (high-cpu), 2: exhaustive (no prefilter, very slow). See wiki for more details: https://github.com/sokrypton/ColabFold/wiki#colabfold_search", + ) + parser.add_argument( + "-s", + type=float, + default=None, + help="MMseqs2 sensitivity. Lowering this will result in a much faster search but possibly sparser MSAs. By default, the k-mer threshold is directly set to the same one of the server, which corresponds to a sensitivity of ~8.", + ) + # dbs are uniref, templates and environmental + # We normally don't use templates + parser.add_argument( + "--db1", type=Path, default=Path("uniref30_2302_db"), help="UniRef database" + ) + parser.add_argument("--db2", type=Path, default=Path(""), help="Templates database") + parser.add_argument( + "--db3", + type=Path, + default=Path("colabfold_envdb_202108_db"), + help="Environmental database", + ) + parser.add_argument("--db4", type=Path, default=Path("spire_ctg10_2401_db"), help="Environmental pairing database") + + # poor man's boolean arguments + parser.add_argument( + "--use-env", type=int, default=1, choices=[0, 1], help="Use --db3" + ) + parser.add_argument( + "--use-env-pairing", type=int, default=0, choices=[0, 1], help="Use --db4" + ) + parser.add_argument( + "--use-templates", type=int, default=0, choices=[0, 1], help="Use --db2" + ) + parser.add_argument( + "--filter", + type=int, + default=1, + choices=[0, 1], + help="Filter the MSA by pre-defined align_eval, qsc, max_accept", + ) + + # mmseqs params + parser.add_argument( + "--mmseqs", + type=Path, + default=Path("mmseqs"), + help="Location of the mmseqs binary.", + ) + parser.add_argument( + "--expand-eval", + type=float, + default=math.inf, + help="e-val threshold for 'expandaln'.", + ) + parser.add_argument( + "--align-eval", type=int, default=10, help="e-val threshold for 'align'." + ) + parser.add_argument( + "--diff", + type=int, + default=3000, + help="filterresult - Keep at least this many seqs in each MSA block.", + ) + parser.add_argument( + "--qsc", + type=float, + default=-20.0, + help="filterresult - reduce diversity of output MSAs using min score thresh.", + ) + parser.add_argument( + "--max-accept", + type=int, + default=1000000, + help="align - Maximum accepted alignments before alignment calculation for a query is stopped.", + ) + parser.add_argument( + "--pairing_strategy", type=int, default=0, help="pairaln - Pairing strategy." + ) + parser.add_argument( + "--db-load-mode", + type=int, + default=0, + help="Database preload mode 0: auto, 1: fread, 2: mmap, 3: mmap+touch", + ) + parser.add_argument( + "--threads", type=int, default=64, help="Number of threads to use." + ) + args = parser.parse_args() + + logging.basicConfig(level=logging.INFO) + + queries, is_complex = get_queries(args.query, None) + + queries_unique = [] + for job_number, (raw_jobname, query_sequences, a3m_lines) in enumerate(queries): + # remove duplicates before searching + query_sequences = ( + [query_sequences] if isinstance(query_sequences, str) else query_sequences + ) + query_seqs_unique = [] + for x in query_sequences: + if x not in query_seqs_unique: + query_seqs_unique.append(x) + query_seqs_cardinality = [0] * len(query_seqs_unique) + for seq in query_sequences: + seq_idx = query_seqs_unique.index(seq) + query_seqs_cardinality[seq_idx] += 1 + + queries_unique.append([raw_jobname, query_seqs_unique, query_seqs_cardinality]) + + args.base.mkdir(exist_ok=True, parents=True) + query_file = args.base.joinpath("query.fas") + with query_file.open("w") as f: + for job_number, ( + raw_jobname, + query_sequences, + query_seqs_cardinality, + ) in enumerate(queries_unique): + for j, seq in enumerate(query_sequences): + # The header of first sequence set as 101 + query_seq_headername = 101 + j + f.write(f">{query_seq_headername}\n{seq}\n") + + run_mmseqs( + args.mmseqs, + ["createdb", query_file, args.base.joinpath("qdb"), "--shuffle", "0"], + ) + with args.base.joinpath("qdb.lookup").open("w") as f: + id = 0 + file_number = 0 + for job_number, ( + raw_jobname, + query_sequences, + query_seqs_cardinality, + ) in enumerate(queries_unique): + for seq in query_sequences: + raw_jobname_first = raw_jobname.split()[0] + f.write(f"{id}\t{raw_jobname_first}\t{file_number}\n") + id += 1 + file_number += 1 + + mmseqs_search_monomer( + mmseqs=args.mmseqs, + dbbase=args.dbbase, + base=args.base, + uniref_db=args.db1, + template_db=args.db2, + metagenomic_db=args.db3, + use_env=args.use_env, + use_templates=args.use_templates, + filter=args.filter, + expand_eval=args.expand_eval, + align_eval=args.align_eval, + diff=args.diff, + qsc=args.qsc, + max_accept=args.max_accept, + prefilter_mode=args.prefilter_mode, + s=args.s, + db_load_mode=args.db_load_mode, + threads=args.threads, + ) + if is_complex is True: + mmseqs_search_pair( + mmseqs=args.mmseqs, + dbbase=args.dbbase, + base=args.base, + uniref_db=args.db1, + prefilter_mode=args.prefilter_mode, + s=args.s, + db_load_mode=args.db_load_mode, + threads=args.threads, + pairing_strategy=args.pairing_strategy, + pair_env=False, + ) + if args.use_env_pairing: + mmseqs_search_pair( + mmseqs=args.mmseqs, + dbbase=args.dbbase, + base=args.base, + uniref_db=args.db1, + spire_db=args.db4, + prefilter_mode=args.prefilter_mode, + s=args.s, + db_load_mode=args.db_load_mode, + threads=args.threads, + pairing_strategy=args.pairing_strategy, + pair_env=True, + ) + + id = 0 + for job_number, ( + raw_jobname, + query_sequences, + query_seqs_cardinality, + ) in enumerate(queries_unique): + unpaired_msa = [] + paired_msa = None + if len(query_seqs_cardinality) > 1: + paired_msa = [] + for seq in query_sequences: + with args.base.joinpath(f"{id}.a3m").open("r") as f: + unpaired_msa.append(f.read()) + args.base.joinpath(f"{id}.a3m").unlink() + + if args.use_env_pairing: + with open(args.base.joinpath(f"{id}.paired.a3m"), 'a') as file_pair: + with open(args.base.joinpath(f"{id}.env.paired.a3m"), 'r') as file_pair_env: + while chunk := file_pair_env.read(10 * 1024 * 1024): + file_pair.write(chunk) + args.base.joinpath(f"{id}.env.paired.a3m").unlink() + + if len(query_seqs_cardinality) > 1: + with args.base.joinpath(f"{id}.paired.a3m").open("r") as f: + paired_msa.append(f.read()) + args.base.joinpath(f"{id}.paired.a3m").unlink() + id += 1 + msa = msa_to_str( + unpaired_msa, paired_msa, query_sequences, query_seqs_cardinality + ) + args.base.joinpath(f"{job_number}.a3m").write_text(msa) + + # rename a3m files + for job_number, (raw_jobname, query_sequences, query_seqs_cardinality) in enumerate(queries_unique): + os.rename( + args.base.joinpath(f"{job_number}.a3m"), + args.base.joinpath(f"{safe_filename(raw_jobname)}.a3m"), + ) + + # rename m8 files + if args.use_templates: + id = 0 + for raw_jobname, query_sequences, query_seqs_cardinality in queries_unique: + with args.base.joinpath(f"{safe_filename(raw_jobname)}_{args.db2}.m8").open( + "w" + ) as f: + for _ in range(len(query_seqs_cardinality)): + with args.base.joinpath(f"{id}.m8").open("r") as g: + f.write(g.read()) + os.remove(args.base.joinpath(f"{id}.m8")) + id += 1 + + query_file.unlink() + run_mmseqs(args.mmseqs, ["rmdb", args.base.joinpath("qdb")]) + run_mmseqs(args.mmseqs, ["rmdb", args.base.joinpath("qdb_h")]) + + +if __name__ == "__main__": + main() diff --git a/scripts/colabfold_search.sh b/scripts/colabfold_search.sh index f3b609de7..f95578d72 100755 --- a/scripts/colabfold_search.sh +++ b/scripts/colabfold_search.sh @@ -2,17 +2,18 @@ # Copied from colabfold.mmseqs.com MMSEQS="$1" -QUERY="$2" -DBBASE="$3" -BASE="$4" -DB1="$5" -DB2="$6" -DB3="$7" -USE_ENV="${8:-1}" -USE_TEMPLATES="${9:-0}" -FILTER="${10:-1}" -INDEX=${11:-1} -DB_LOAD_MODE="${12:-2}" +MMSEQS_THREADS="$2" +QUERY="$3" +DBBASE="$4" +BASE="$5" +DB1="$6" +DB2="$7" +DB3="$8" +USE_ENV="${9:-1}" +USE_TEMPLATES="${10:-0}" +FILTER="${11:-1}" +INDEX=${12:-1} +DB_LOAD_MODE="${13:-2}" EXPAND_EVAL=inf ALIGN_EVAL=10 DIFF=3000 @@ -41,28 +42,28 @@ FILTER_PARAM="--filter-msa ${FILTER} --filter-min-enable 1000 --diff ${DIFF} --q EXPAND_PARAM="--expansion-mode 0 -e ${EXPAND_EVAL} --expand-filter-clusters ${FILTER} --max-seq-id 0.95" mkdir -p "${BASE}" "${MMSEQS}" createdb "${QUERY}" "${BASE}/qdb" -"${MMSEQS}" search "${BASE}/qdb" "${DBBASE}/${DB1}" "${BASE}/res" "${BASE}/tmp" $SEARCH_PARAM -"${MMSEQS}" expandaln "${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res" "${DBBASE}/${DB1}${ALN}" "${BASE}/res_exp" --db-load-mode ${DB_LOAD_MODE} ${EXPAND_PARAM} +"${MMSEQS}" search "${BASE}/qdb" "${DBBASE}/${DB1}" "${BASE}/res" "${BASE}/tmp" $SEARCH_PARAM --threads ${MMSEQS_THREADS} +"${MMSEQS}" expandaln "${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res" "${DBBASE}/${DB1}${ALN}" "${BASE}/res_exp" --db-load-mode ${DB_LOAD_MODE} ${EXPAND_PARAM} --threads ${MMSEQS_THREADS} "${MMSEQS}" mvdb "${BASE}/tmp/latest/profile_1" "${BASE}/prof_res" "${MMSEQS}" lndb "${BASE}/qdb_h" "${BASE}/prof_res_h" -"${MMSEQS}" align "${BASE}/prof_res" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp" "${BASE}/res_exp_realign" --db-load-mode ${DB_LOAD_MODE} -e ${ALIGN_EVAL} --max-accept ${MAX_ACCEPT} --alt-ali 10 -a -"${MMSEQS}" filterresult "${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp_realign" "${BASE}/res_exp_realign_filter" --db-load-mode ${DB_LOAD_MODE} --qid 0 --qsc $QSC --diff 0 --max-seq-id 1.0 --filter-min-enable 100 -"${MMSEQS}" result2msa "${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp_realign_filter" "${BASE}/uniref.a3m" --msa-format-mode 6 --db-load-mode ${DB_LOAD_MODE} ${FILTER_PARAM} +"${MMSEQS}" align "${BASE}/prof_res" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp" "${BASE}/res_exp_realign" --db-load-mode ${DB_LOAD_MODE} -e ${ALIGN_EVAL} --max-accept ${MAX_ACCEPT} --alt-ali 10 -a --threads ${MMSEQS_THREADS} +"${MMSEQS}" filterresult "${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp_realign" "${BASE}/res_exp_realign_filter" --db-load-mode ${DB_LOAD_MODE} --qid 0 --qsc $QSC --diff 0 --max-seq-id 1.0 --filter-min-enable 100 --threads ${MMSEQS_THREADS} +"${MMSEQS}" result2msa "${BASE}/qdb" "${DBBASE}/${DB1}${SEQ}" "${BASE}/res_exp_realign_filter" "${BASE}/uniref.a3m" --msa-format-mode 6 --db-load-mode ${DB_LOAD_MODE} ${FILTER_PARAM} --threads ${MMSEQS_THREADS} "${MMSEQS}" rmdb "${BASE}/res_exp_realign" "${MMSEQS}" rmdb "${BASE}/res_exp" "${MMSEQS}" rmdb "${BASE}/res" "${MMSEQS}" rmdb "${BASE}/res_exp_realign_filter" if [ "${USE_TEMPLATES}" = "1" ]; then - "${MMSEQS}" search "${BASE}/prof_res" "${DBBASE}/${DB2}" "${BASE}/res_pdb" "${BASE}/tmp" --db-load-mode ${DB_LOAD_MODE} -s 7.5 -a -e 0.1 - "${MMSEQS}" convertalis "${BASE}/prof_res" "${DBBASE}/${DB2}${IDX}" "${BASE}/res_pdb" "${BASE}/${DB2}.m8" --format-output query,target,fident,alnlen,mismatch,gapopen,qstart,qend,tstart,tend,evalue,bits,cigar --db-load-mode ${DB_LOAD_MODE} + "${MMSEQS}" search "${BASE}/prof_res" "${DBBASE}/${DB2}" "${BASE}/res_pdb" "${BASE}/tmp" --db-load-mode ${DB_LOAD_MODE} -s 7.5 -a -e 0.1 --threads ${MMSEQS_THREADS} + "${MMSEQS}" convertalis "${BASE}/prof_res" "${DBBASE}/${DB2}${IDX}" "${BASE}/res_pdb" "${BASE}/${DB2}.m8" --format-output query,target,fident,alnlen,mismatch,gapopen,qstart,qend,tstart,tend,evalue,bits,cigar --db-load-mode ${DB_LOAD_MODE} --threads ${MMSEQS_THREADS} "${MMSEQS}" rmdb "${BASE}/res_pdb" fi if [ "${USE_ENV}" = "1" ]; then - "${MMSEQS}" search "${BASE}/prof_res" "${DBBASE}/${DB3}" "${BASE}/res_env" "${BASE}/tmp" $SEARCH_PARAM - "${MMSEQS}" expandaln "${BASE}/prof_res" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env" "${DBBASE}/${DB3}${ALN}" "${BASE}/res_env_exp" -e ${EXPAND_EVAL} --expansion-mode 0 --db-load-mode ${DB_LOAD_MODE} - "${MMSEQS}" align "${BASE}/tmp/latest/profile_1" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp" "${BASE}/res_env_exp_realign" --db-load-mode ${DB_LOAD_MODE} -e ${ALIGN_EVAL} --max-accept ${MAX_ACCEPT} --alt-ali 10 -a - "${MMSEQS}" filterresult "${BASE}/qdb" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp_realign" "${BASE}/res_env_exp_realign_filter" --db-load-mode ${DB_LOAD_MODE} --qid 0 --qsc $QSC --diff 0 --max-seq-id 1.0 --filter-min-enable 100 - "${MMSEQS}" result2msa "${BASE}/qdb" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp_realign_filter" "${BASE}/bfd.mgnify30.metaeuk30.smag30.a3m" --msa-format-mode 6 --db-load-mode ${DB_LOAD_MODE} ${FILTER_PARAM} + "${MMSEQS}" search "${BASE}/prof_res" "${DBBASE}/${DB3}" "${BASE}/res_env" "${BASE}/tmp" $SEARCH_PARAM --threads ${MMSEQS_THREADS} + "${MMSEQS}" expandaln "${BASE}/prof_res" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env" "${DBBASE}/${DB3}${ALN}" "${BASE}/res_env_exp" -e ${EXPAND_EVAL} --expansion-mode 0 --db-load-mode ${DB_LOAD_MODE} --threads ${MMSEQS_THREADS} + "${MMSEQS}" align "${BASE}/tmp/latest/profile_1" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp" "${BASE}/res_env_exp_realign" --db-load-mode ${DB_LOAD_MODE} -e ${ALIGN_EVAL} --max-accept ${MAX_ACCEPT} --alt-ali 10 -a --threads ${MMSEQS_THREADS} + "${MMSEQS}" filterresult "${BASE}/qdb" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp_realign" "${BASE}/res_env_exp_realign_filter" --db-load-mode ${DB_LOAD_MODE} --qid 0 --qsc $QSC --diff 0 --max-seq-id 1.0 --filter-min-enable 100 --threads ${MMSEQS_THREADS} + "${MMSEQS}" result2msa "${BASE}/qdb" "${DBBASE}/${DB3}${SEQ}" "${BASE}/res_env_exp_realign_filter" "${BASE}/bfd.mgnify30.metaeuk30.smag30.a3m" --msa-format-mode 6 --db-load-mode ${DB_LOAD_MODE} ${FILTER_PARAM} --threads ${MMSEQS_THREADS} "${MMSEQS}" rmdb "${BASE}/res_env_exp_realign_filter" "${MMSEQS}" rmdb "${BASE}/res_env_exp_realign" "${MMSEQS}" rmdb "${BASE}/res_env_exp" diff --git a/scripts/generate_coverage_plot.py b/scripts/generate_coverage_plot.py new file mode 100644 index 000000000..fbc8428b0 --- /dev/null +++ b/scripts/generate_coverage_plot.py @@ -0,0 +1,98 @@ +# taken from: https://github.com/sokrypton/ColabFold/blob/main/colabfold/plot.py +# and https://github.com/sokrypton/ColabFold/blob/main/colabfold/batch.py +import argparse +import os +from pathlib import Path +import pickle as pkl +import numpy as np +from matplotlib import pyplot as plt + + +def plot_msa_v2(feature_dict, sort_lines=True, dpi=100): + seq = feature_dict["msa"][0] + if "asym_id" in feature_dict: + Ls = [0] + k = feature_dict["asym_id"][0] + for i in feature_dict["asym_id"]: + if i == k: + Ls[-1] += 1 + else: + Ls.append(1) + k = i + else: + Ls = [len(seq)] + Ln = np.cumsum([0] + Ls) + + try: + N = feature_dict["num_alignments"][0] + except: + N = feature_dict["num_alignments"] + + msa = feature_dict["msa"][:N] + gap = msa != 21 + qid = msa == seq + gapid = np.stack([gap[:, Ln[i]:Ln[i + 1]].max(-1) for i in range(len(Ls))], -1) + lines = [] + Nn = [] + for g in np.unique(gapid, axis=0): + i = np.where((gapid == g).all(axis=-1)) + qid_ = qid[i] + gap_ = gap[i] + seqid = np.stack([qid_[:, Ln[i]:Ln[i + 1]].mean(-1) for i in range(len(Ls))], -1).sum(-1) / (g.sum(-1) + 1e-8) + non_gaps = gap_.astype(float) + non_gaps[non_gaps == 0] = np.nan + if sort_lines: + lines_ = non_gaps[seqid.argsort()] * seqid[seqid.argsort(), None] + else: + lines_ = non_gaps[::-1] * seqid[::-1, None] + Nn.append(len(lines_)) + lines.append(lines_) + + Nn = np.cumsum(np.append(0, Nn)) + lines = np.concatenate(lines, 0) + plt.figure(figsize=(8, 5), dpi=dpi) + plt.title("Sequence coverage") + plt.imshow(lines, + interpolation='nearest', aspect='auto', + cmap="rainbow_r", vmin=0, vmax=1, origin='lower', + extent=(0, lines.shape[1], 0, lines.shape[0])) + for i in Ln[1:-1]: + plt.plot([i, i], [0, lines.shape[0]], color="black") + for j in Nn[1:-1]: + plt.plot([0, lines.shape[1]], [j, j], color="black") + + plt.plot((np.isnan(lines) == False).sum(0), color='black') + plt.xlim(0, lines.shape[1]) + plt.ylim(0, lines.shape[0]) + plt.colorbar(label="Sequence identity to query") + plt.xlabel("Positions") + plt.ylabel("Sequences") + return plt + + +def generate_coverage(fd_pkl, output_dir, name, dpi=500): + feature_dict_pkl = [] + with (open("{}".format(fd_pkl), "rb")) as openfile: + while True: + try: + feature_dict_pkl.append(pkl.load(openfile)) + except EOFError: + break + feature_dict = feature_dict_pkl[0] + msa_plot = plot_msa_v2(feature_dict, sort_lines=True, dpi=dpi) + coverage_png = os.path.join(output_dir,f"{name}_coverage.png") + msa_plot.savefig(str(coverage_png), bbox_inches='tight') + msa_plot.close() + # result_files.append(coverage_png) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--input_pkl', dest='feature_dict_pkl', required=True) + parser.add_argument('--output_dir', dest='output_dir', required=True) + parser.add_argument('--basename', dest='basename', required=True) + args = parser.parse_args() + + # generate_coverage(args.input_pkl, args.output_dir, args.basename) + print("gen coverage plot") + generate_coverage(args.feature_dict_pkl, args.output_dir, args.basename) diff --git a/scripts/generate_pae_plddt_plot.py b/scripts/generate_pae_plddt_plot.py new file mode 100644 index 000000000..762a2f7ed --- /dev/null +++ b/scripts/generate_pae_plddt_plot.py @@ -0,0 +1,262 @@ +# taken from: https://colab.research.google.com/github/mattarnoldbio/alphapickle/blob/main/AlphaPickle.ipynb#scrollTo=jQUP8Ab3RN7s +import argparse +import sys +import pickle as pkl +#from zipfile import Path +import numpy as np +import pandas as pd + +from matplotlib import pyplot as plt, colors as cols, cm as cm, rcParams, font_manager +from mpl_toolkits.axes_grid1 import ImageGrid +from matplotlib.table import table +from matplotlib.gridspec import GridSpec +import json +from sys import exit +import os +from Bio import PDB as pdb +from Bio import SeqIO +import io +import json +from json import encoder + +encoder.FLOAT_REPR = lambda o: format(o, '.2f') + +# plot size, in inches. +plot_size = 16 + +plot_increment = "200" # @param[10,25,50,100,250,500] +plot_increment = int(plot_increment) + + +# Define class for AlphaFold metadata file and class methods +class AlphaFoldMetaData(object): + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): + # Define attributes + self.name = name + self.PathToFile = PathToFile + self.FastaSequence = FastaSequence + self.saving_filename = name + self.saving_pathname = self.PathToFile.split(self.saving_filename)[0] + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + + +class AlphaFoldPickle(AlphaFoldMetaData): + + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): + super().__init__(name, PathToFile, FastaSequence, ranking) # Define attributes + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + self.data = [] + self.PAE = None + + # Extract pickled data + with (open("{}".format(self.PathToFile), "rb")) as openfile: + while True: + try: + self.data.append(pkl.load(openfile)) + except EOFError: + break + + # Try statement accounts for data run using non-pTM models, with no PAE output + try: + self.PAE = self.data[0]['predicted_aligned_error'].round(2) + except: + print("PAE model data not present. To access this performance metric, run AlphaFold" + "using pTM-enabled models.") + + # Define pLDDT + self.pLDDT = self.data[0]['plddt'].round(2) + self.max_pae = self.data[0]['max_predicted_aligned_error'] + self.ptm = self.data[0]['ptm_score'] + self.iptm = self.data[0].get('iptm_score') + + def save_to_json(self): + # save pkl to json format as colabfold + colab_data = {} + colab_data['plddt'] = list(np.around(np.array(self.pLDDT.tolist()), 2)) + colab_data['pae'] = list(np.around(np.array(self.PAE.tolist()), 2)) + colab_data['max_pae'] = self.max_pae + colab_data['ptm'] = self.ptm + colab_data['iptm'] = self.iptm + + class NumpyEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + return json.JSONEncoder.default(self, obj) + + with open('{}/{}.json'.format(self.saving_pathname, self.saving_filename), "w") as outfile: + outfile.write(json.dumps(colab_data, cls=NumpyEncoder)) + + +def plot_pLDDT(outdir, name, model1, model2, model3, fasta, size_in_inches=3.5, axis_label_increment=100): + m1_x = list(range(0, len(model1.pLDDT), 1)) + m1_y = list(model1.pLDDT) + m2_x = list(range(0, len(model2.pLDDT), 1)) + m2_y = list(model2.pLDDT) + m3_x = list(range(0, len(model3.pLDDT), 1)) + m3_y = list(model3.pLDDT) + + plt.figure(figsize=(size_in_inches, (size_in_inches / 2))) + ticks = np.arange(0, len(model1.pLDDT), axis_label_increment) + plt.xticks(ticks) + plt.yticks() + plt.title(name, size=20, fontweight="bold") + plt.xlabel("Residue index", size=16, fontweight="bold") + plt.ylabel("Predicted LDDT", size=16, fontweight="bold") + plt.plot(m1_x, m1_y, '-b', label='model1') + plt.plot(m2_x, m2_y, '-m', label='model2') + plt.plot(m3_x, m3_y, '-g', label='model3') + + def get_multimer_len(f): + all_len = [] + with open(f) as handle: + for record in SeqIO.parse(handle, "fasta"): + all_len.append(len(record.seq)) + return all_len + + all_len = get_multimer_len(fasta) + cumul_l = 0 + for l in all_len: + cumul_l += l + plt.vlines(x=cumul_l, ymin=0, ymax=100, colors='k', linestyles='--') + + plt.legend(loc='lower right') + plt.savefig('{}/{}_pLDDT.png'.format(outdir, name), dpi=300) + + +def plot_paE(outdir, name, model1, model2, model3, fasta, interface_df, size_in_inches=3.5, axis_label_increment=200): + + # data = [ + # [0.742, 376, 64, 83, 92, 2, 4, 8, 6.0], + # [0.742, 348, 69, 86, 92, 2, 3, 6, 6.0], + # [0.018, 3, 54, 58, 63, 14, 15, 15, 5.7] + # ] + # + # columns = ( + # 'pdockq', 'ncontacts', 'plddt_min', 'plddt_avg', 'plddt_max', 'pae_min', 'pae_avg', 'pae_max', 'distance_avg') + # + # df = pd.DataFrame( + # data, + # columns=list(columns) + # ) + + + def draw_subplot(name, ax, model, fasta, display_scale=False): + ticks = np.arange(0, model.PAE[1].size, axis_label_increment) + img_ax = ax.imshow(model.PAE, cmap="bwr") + ax.set_xticks(ticks) + ax.set_yticks(ticks) + ax.set_title(name, size=20, fontweight="bold") + ax.set_xlabel("Residue index", size=16, fontweight="bold") + ax.set_ylabel("Residue index", size=16, fontweight="bold") + + def get_multimer_len(f): + all_len = [] + with open(f) as handle: + for record in SeqIO.parse(handle, "fasta"): + all_len.append(len(record.seq)) + return all_len + + all_len = get_multimer_len(fasta) + cumul_l = 0 + for l in all_len: + cumul_l += l + ax.axvline(x=cumul_l, color='k', linewidth=4) + ax.axhline(y=cumul_l, color='k', linewidth=4) + return img_ax + + nrows = 1 + height_ratios = [1] + if interface_df is not None: + nrows = 2 + height_ratios = [1, 2] + + fig = plt.figure(figsize=(12, 10), layout="constrained") + gs1 = GridSpec(nrows, 4, figure=fig, width_ratios=[1,1,1,0.1], height_ratios=height_ratios) + + models = [model1, model2, model3] + + ax1 = fig.add_subplot(gs1[0, 0]) + im1 = draw_subplot(f'model1', ax1, models[0], fasta) + + ax2 = fig.add_subplot(gs1[0, 1]) + im2 = draw_subplot(f'model2', ax2, models[1], fasta) + + ax3 = fig.add_subplot(gs1[0, 2]) + im3 = draw_subplot(f'model3', ax3, models[2], fasta) + + ax4 = fig.add_subplot(gs1[0, 3]) + mesh = ax4.pcolormesh(models[2].PAE, cmap="bwr") + scale = fig.colorbar(mesh, ax4, label="Predicted error (Å)") + scale.set_label(label="Predicted error (Å)", size=14, fontweight="bold") + + if interface_df is not None: + ax5 = fig.add_subplot(gs1[1, :]) + ax5.axis('off') + ax5.axis('tight') + rows = ['model %d' % x for x in (1, 2, 3)] + tbl = ax5.table( + cellText=interface_df.values[:,2:], + rowLabels=rows, + colLabels=list(interface_df.columns)[2:], + loc="upper center") + tbl.auto_set_font_size(False) + tbl.set_fontsize(14) + tbl.auto_set_column_width([0, 1, 2, 3, 4, 5, 6, 7, 8]) + + # Save plot + plt.savefig('{}/{}_PAE.png'.format(outdir, name), dpi=300) + + +def generate_plots(fasta, pkl1, pkl2, pkl3, outdir, name, interface): + model1_results = AlphaFoldPickle(name, pkl1) + model2_results = AlphaFoldPickle(name, pkl2) + model3_results = AlphaFoldPickle(name, pkl3) + + + + print("Generating plddt plot") + plot_pLDDT(outdir, name, model1_results, model2_results, model3_results, fasta, size_in_inches=plot_size, + axis_label_increment=plot_increment) + + print("Generating PAE plot") + df = None + if interface is None: + print("No interface file provided, will not output interface table") + elif os.path.exists(interface): + df = pd.read_csv(interface, sep=",") + else: + print(f"Unable to create pandas dataframe with provided interface file {interface}") + + plot_paE(outdir, name, model1_results, model2_results, model3_results, fasta, df, size_in_inches=plot_size, + axis_label_increment=plot_increment) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--fasta', dest='fasta', required=True) + parser.add_argument('--model1_pkl', dest='model1_pkl', required=True) + parser.add_argument('--model2_pkl', dest='model2_pkl', required=True) + parser.add_argument('--model3_pkl', dest='model3_pkl', required=True) + parser.add_argument('--output_dir', dest='output_dir', required=True) + parser.add_argument('--basename', dest='basename', required=True) + parser.add_argument('--interface', dest='interface', required=False) + args = parser.parse_args() + + + # def get_multimer_len(f): + # all_len = [] + # with open(f) as handle: + # for record in SeqIO.parse(handle, "fasta"): + # all_len.append(len(record.seq)) + # return all_len + # + # + # all_len = get_multimer_len(args.fasta) + + generate_plots(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, + args.basename, args.interface) + # generate_plddt_plot(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, args.basename) + # generate_pae_plot(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, args.basename) diff --git a/scripts/generate_pae_plot.py b/scripts/generate_pae_plot.py new file mode 100644 index 000000000..c83427a0a --- /dev/null +++ b/scripts/generate_pae_plot.py @@ -0,0 +1,163 @@ +# taken from: https://colab.research.google.com/github/mattarnoldbio/alphapickle/blob/main/AlphaPickle.ipynb#scrollTo=jQUP8Ab3RN7s +import argparse +import sys +import pickle as pkl +import numpy as np +import pandas as pd + +from matplotlib import pyplot as plt, colors as cols, cm as cm, rcParams, font_manager +from mpl_toolkits.axes_grid1 import ImageGrid +import json +from sys import exit +import os +from Bio import SeqIO +import io +import json +from json import encoder + +encoder.FLOAT_REPR = lambda o: format(o, '.2f') + +# plot size, in inches. +plot_size = 16 + +# @markdown Input value to increment plot axes by (this may need finetuning based on output) +plot_increment = "200" # @param[10,25,50,100,250,500] +plot_increment = int(plot_increment) + + +# Define class for AlphaFold metadata file and class methods +class AlphaFoldMetaData(object): + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): + # Define attributes + self.name = name + self.PathToFile = PathToFile + self.FastaSequence = FastaSequence + self.saving_filename = name + self.saving_pathname = self.PathToFile.split(self.saving_filename)[0] + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + + +class AlphaFoldPickle(AlphaFoldMetaData): + + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): + super().__init__(name, PathToFile, FastaSequence, ranking) # Define attributes + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + self.data = [] + self.PAE = None + + # Extract pickled data + with (open("{}".format(self.PathToFile), "rb")) as openfile: + while True: + try: + self.data.append(pkl.load(openfile)) + except EOFError: + break + + # Try statement accounts for data run using non-pTM models, with no PAE output + try: + self.PAE = self.data[0]['predicted_aligned_error'].round(2) + except: + print("PAE model data not present. To access this performance metric, run AlphaFold" + "using pTM-enabled models.") + + # Define pLDDT + self.pLDDT = self.data[0]['plddt'].round(2) + self.max_pae = self.data[0]['max_predicted_aligned_error'] + self.ptm = self.data[0]['ptm_score'] + self.iptm = self.data[0]['iptm_score'] + + def save_to_json(self): + # save pkl to json format as colabfold + colab_data = {} + colab_data['plddt'] = list(np.around(np.array(self.pLDDT.tolist()), 2)) + colab_data['pae'] = list(np.around(np.array(self.PAE.tolist()), 2)) + colab_data['max_pae'] = self.max_pae + colab_data['ptm'] = self.ptm + colab_data['iptm'] = self.iptm + + class NumpyEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + return json.JSONEncoder.default(self, obj) + + with open('{}/{}.json'.format(self.saving_pathname, self.saving_filename), "w") as outfile: + outfile.write(json.dumps(colab_data, cls=NumpyEncoder)) + + +def plot_paE(outdir, name, model1, model2, model3, prot1len, size_in_inches=3.5, axis_label_increment=200): + def draw_subplot(name, ax, model, prot1len, display_scale=False): + ticks = np.arange(0, model.PAE[1].size, axis_label_increment) + img_ax = ax.imshow(model.PAE, cmap="bwr") + ax.set_xticks(ticks) + ax.set_yticks(ticks) + ax.set_title(name, size=20, fontweight="bold") + ax.set_xlabel("Residue index", size=16, fontweight="bold") + ax.set_ylabel("Residue index", size=16, fontweight="bold") + ax.axvline(x=prot1len, color='k', linewidth=4) + ax.axhline(y=prot1len, color='k', linewidth=4) + return img_ax + + fig = plt.figure(figsize=(size_in_inches, size_in_inches)) + + grid = ImageGrid(fig, 111, # as in plt.subplot(111) + nrows_ncols=(1, 3), + axes_pad=0.15, + share_all=False, + cbar_location="right", + cbar_mode="single", + cbar_size="7%", + cbar_pad=0.15, + ) + + models = [model1, model2, model3] + + cnt = 1 + for ax, model in zip(grid, models): + im = draw_subplot(f'model{cnt}', ax, model, prot1len) + cnt += 1 + + scale = ax.cax.colorbar(im, label="Predicted error (Å)") + scale.set_label(label="Predicted error (Å)", size=14, fontweight="bold") + # Save plot + plt.savefig('{}/{}_PAE.png'.format(outdir, name), dpi=300) + + +def generate_pae_plot(fasta, pkl1, pkl2, pkl3, outdir, name): + model1_results = AlphaFoldPickle(name, pkl1) + model1_results.saving_pathname = outdir + model1_results.saving_filename = name + + model2_results = AlphaFoldPickle(name, pkl2) + model2_results.saving_pathname = outdir + model2_results.saving_filename = name + + model3_results = AlphaFoldPickle(name, pkl3) + model3_results.saving_pathname = outdir + model3_results.saving_filename = name + + def get_multimer_prot1_len(f): + with open(f) as handle: + for record in SeqIO.parse(handle, "fasta"): + return len(record.seq) + + prot1len = get_multimer_prot1_len(fasta) + # results.write_pLDDT_file() + print("Plotting pLDDT for {}".format(name)) + plot_paE(outdir, name, model1_results, model2_results, model3_results, prot1len, size_in_inches=plot_size, + axis_label_increment=plot_increment) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--fasta', dest='fasta', required=True) + parser.add_argument('--model1_pkl', dest='model1_pkl', required=True) + parser.add_argument('--model2_pkl', dest='model2_pkl', required=True) + parser.add_argument('--model3_pkl', dest='model3_pkl', required=True) + parser.add_argument('--output_dir', dest='output_dir', required=True) + parser.add_argument('--basename', dest='basename', required=True) + args = parser.parse_args() + + generate_pae_plot(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, args.basename) diff --git a/scripts/generate_plddt_plot.py b/scripts/generate_plddt_plot.py new file mode 100644 index 000000000..6b6a49f4d --- /dev/null +++ b/scripts/generate_plddt_plot.py @@ -0,0 +1,155 @@ +# taken from: https://colab.research.google.com/github/mattarnoldbio/alphapickle/blob/main/AlphaPickle.ipynb#scrollTo=jQUP8Ab3RN7s +import argparse +import sys +import pickle as pkl +import numpy as np +import pandas as pd + +from matplotlib import pyplot as plt, colors as cols, cm as cm, rcParams, font_manager +import json +from sys import exit +import os +from Bio import SeqIO +import io +import json +from json import encoder + +encoder.FLOAT_REPR = lambda o: format(o, '.2f') + +# plot size, in inches. +plot_size = 16 + +# @markdown Input value to increment plot axes by (this may need finetuning based on output) +plot_increment = "50" # @param[10,25,50,100,250,500] +plot_increment = int(plot_increment) + + +# Define class for AlphaFold metadata file and class methods +class AlphaFoldMetaData(object): + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): + # Define attributes + self.name = name + self.PathToFile = PathToFile + self.FastaSequence = FastaSequence + self.saving_filename = name + self.saving_pathname = self.PathToFile.split(self.saving_filename)[0] + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + + +class AlphaFoldPickle(AlphaFoldMetaData): + + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): + super().__init__(name, PathToFile, FastaSequence, ranking) # Define attributes + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + self.data = [] + self.PAE = None + + # Extract pickled data + with (open("{}".format(self.PathToFile), "rb")) as openfile: + while True: + try: + self.data.append(pkl.load(openfile)) + except EOFError: + break + + # Try statement accounts for data run using non-pTM models, with no PAE output + try: + self.PAE = self.data[0]['predicted_aligned_error'].round(2) + except: + print("PAE model data not present. To access this performance metric, run AlphaFold" + "using pTM-enabled models.") + + # Define pLDDT + self.pLDDT = self.data[0]['plddt'].round(2) + self.max_pae = self.data[0]['max_predicted_aligned_error'] + self.ptm = self.data[0]['ptm_score'] + self.iptm = self.data[0]['iptm_score'] + + def save_to_json(self): + # save pkl to json format as colabfold + colab_data = {} + colab_data['plddt'] = list(np.around(np.array(self.pLDDT.tolist()), 2)) + colab_data['pae'] = list(np.around(np.array(self.PAE.tolist()), 2)) + colab_data['max_pae'] = self.max_pae + colab_data['ptm'] = self.ptm + colab_data['iptm'] = self.iptm + + class NumpyEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + return json.JSONEncoder.default(self, obj) + + with open('{}/{}.json'.format(self.saving_pathname, self.saving_filename), "w") as outfile: + outfile.write(json.dumps(colab_data, cls=NumpyEncoder)) + + +def plot_pLDDT(outdir, name, model1, model2, model3, prot1len, size_in_inches=3.5, axis_label_increment=100): + m1_x = list(range(0, len(model1.pLDDT), 1)) + m1_y = list(model1.pLDDT) + m2_x = list(range(0, len(model2.pLDDT), 1)) + m2_y = list(model2.pLDDT) + m3_x = list(range(0, len(model3.pLDDT), 1)) + m3_y = list(model3.pLDDT) + + plt.figure(figsize=(size_in_inches, (size_in_inches / 2))) + ticks = np.arange(0, len(model1.pLDDT), axis_label_increment) + plt.xticks(ticks) + plt.yticks() + plt.title(name, size=20, fontweight="bold") + plt.xlabel("Residue index", size=16, fontweight="bold") + plt.ylabel("Predicted LDDT", size=16, fontweight="bold") + plt.plot(m1_x, m1_y, '-b', label='model1') + plt.plot(m2_x, m2_y, '-m', label='model2') + plt.plot(m3_x, m3_y, '-g', label='model3') + + plt.vlines(x=prot1len, ymin=0, ymax=100, colors='k', linestyles='--') + + plt.legend(loc='lower right') + plt.savefig('{}/{}_pLDDT.png'.format(outdir, name), dpi=300) + + +def generate_plddt_plot(fasta, pkl1, pkl2, pkl3, outdir, name): + model1_results = AlphaFoldPickle(name, pkl1) + model1_results.saving_pathname = outdir + model1_results.saving_filename = name + print("Saving model1 in json format") + model1_results.save_to_json() + + model2_results = AlphaFoldPickle(name, pkl2) + model2_results.saving_pathname = outdir + model2_results.saving_filename = name + print("Saving model2 in json format") + model2_results.save_to_json() + + model3_results = AlphaFoldPickle(name, pkl3) + model3_results.saving_pathname = outdir + model3_results.saving_filename = name + print("Saving model3 in json format") + model3_results.save_to_json() + + def get_multimer_prot1_len(f): + with open(f) as handle: + for record in SeqIO.parse(handle, "fasta"): + return len(record.seq) + + prot1len = get_multimer_prot1_len(fasta) + # results.write_pLDDT_file() + print("Plotting pLDDT for {}".format(name)) + plot_pLDDT(outdir, name, model1_results, model2_results, model3_results, prot1len, size_in_inches=plot_size, + axis_label_increment=plot_increment) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--fasta', dest='fasta', required=True) + parser.add_argument('--model1_pkl', dest='model1_pkl', required=True) + parser.add_argument('--model2_pkl', dest='model2_pkl', required=True) + parser.add_argument('--model3_pkl', dest='model3_pkl', required=True) + parser.add_argument('--output_dir', dest='output_dir', required=True) + parser.add_argument('--basename', dest='basename', required=True) + args = parser.parse_args() + + generate_plddt_plot(args.fasta, args.model1_pkl, args.model2_pkl, args.model3_pkl, args.output_dir, args.basename) diff --git a/scripts/pickle_to_json.py b/scripts/pickle_to_json.py new file mode 100644 index 000000000..b920fb70c --- /dev/null +++ b/scripts/pickle_to_json.py @@ -0,0 +1,110 @@ +# taken from: https://colab.research.google.com/github/mattarnoldbio/alphapickle/blob/main/AlphaPickle.ipynb#scrollTo=jQUP8Ab3RN7s +import argparse +import sys +import pickle as pkl +#from zipfile import Path +import numpy as np +import pandas as pd + +from matplotlib import pyplot as plt, colors as cols, cm as cm, rcParams, font_manager +from mpl_toolkits.axes_grid1 import ImageGrid +from matplotlib.table import table +from matplotlib.gridspec import GridSpec +import json +from sys import exit +import os +from Bio import PDB as pdb +from Bio import SeqIO +import io +import json +from json import encoder + +encoder.FLOAT_REPR = lambda o: format(o, '.2f') + +# plot size, in inches. +plot_size = 16 + +plot_increment = "200" # @param[10,25,50,100,250,500] +plot_increment = int(plot_increment) + + +# Define class for AlphaFold metadata file and class methods +class AlphaFoldMetaData(object): + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): + # Define attributes + self.name = name + self.PathToFile = PathToFile + self.FastaSequence = FastaSequence + self.saving_filename = name + self.saving_pathname = self.PathToFile.split(self.saving_filename)[0] + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + + +class AlphaFoldPickle(AlphaFoldMetaData): + + def __init__(self, name, PathToFile, FastaSequence=None, ranking=None): + super().__init__(name, PathToFile, FastaSequence, ranking) # Define attributes + if ranking: + self.saving_filename = "ranked_{}".format(ranking) + self.data = [] + self.PAE = None + + # Extract pickled data + with (open("{}".format(self.PathToFile), "rb")) as openfile: + while True: + try: + self.data.append(pkl.load(openfile)) + except EOFError: + break + + # Try statement accounts for data run using non-pTM models, with no PAE output + try: + self.PAE = self.data[0]['predicted_aligned_error'].round(2) + except: + print("PAE model data not present. To access this performance metric, run AlphaFold" + "using pTM-enabled models.") + + # Define pLDDT + self.pLDDT = self.data[0]['plddt'].round(2) + self.max_pae = self.data[0]['max_predicted_aligned_error'] + self.ptm = self.data[0]['ptm_score'] + self.iptm = self.data[0].get('iptm_score') + + def save_to_json(self): + # save pkl to json format as colabfold + colab_data = {} + colab_data['plddt'] = list(np.around(np.array(self.pLDDT.tolist()), 2)) + colab_data['pae'] = list(np.around(np.array(self.PAE.tolist()), 2)) + colab_data['max_pae'] = self.max_pae + colab_data['ptm'] = self.ptm + colab_data['iptm'] = self.iptm + + class NumpyEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + return json.JSONEncoder.default(self, obj) + + with open('{}/{}.json'.format(self.saving_pathname, self.saving_filename), "w") as outfile: + outfile.write(json.dumps(colab_data, cls=NumpyEncoder)) + + +def generate_json(pkl1, outdir, name, model_nbr): + model1_results = AlphaFoldPickle(name, pkl1) + model1_results.saving_pathname = outdir + # "${NAME}_model_${model}_multimer_v3_relaxed" + model1_results.saving_filename = f"{name}_model_{model_nbr}_multimer_v3_relaxed" + print(f"Saving model{model_nbr} in json format") + model1_results.save_to_json() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--model_pkl', dest='model_pkl', required=True) + parser.add_argument('--output_dir', dest='output_dir', required=True) + parser.add_argument('--basename', dest='basename', required=True) + parser.add_argument('--model_nbr', dest='model_nbr', required=True) + args = parser.parse_args() + + generate_json(args.model_pkl, args.output_dir, args.basename, args.model_nbr) diff --git a/scripts/precompute_alignments.py b/scripts/precompute_alignments.py index 1bef41a99..defc51047 100644 --- a/scripts/precompute_alignments.py +++ b/scripts/precompute_alignments.py @@ -22,45 +22,56 @@ def run_seq_group_alignments(seq_groups, alignment_runner, args): dirs = set(os.listdir(args.output_dir)) - for seq, names in seq_groups: - first_name = names[0] - alignment_dir = os.path.join(args.output_dir, first_name) + # print(f"seq_groups: {seq_groups}") + print(f"dirs: {dirs}") + for seq, name in seq_groups: + # first_name = names[0] + print(f"name: {name}") + # print(f"seq: {seq}") + # print(f"args.output_dir: {args.output_dir}") + alignment_dir = os.path.join(args.output_dir, name) - try: - os.makedirs(alignment_dir) - except Exception as e: - logging.warning(f"Failed to create directory for {first_name} with exception {e}...") - continue + # try: + # os.makedirs(alignment_dir) + # except Exception as e: + # logging.warning(f"Failed to create directory for {first_name} with exception {e}...") + # continue + os.makedirs(alignment_dir, exist_ok=True) fd, fasta_path = tempfile.mkstemp(suffix=".fasta") + # print(f"fd: {fd}") + # print(f"fasta_path: {fasta_path}") with os.fdopen(fd, 'w') as fp: fp.write(f'>query\n{seq}') try: + print(f"running alignement fasta_path: {fasta_path}") + print(f"running alignement alignment_dir: {alignment_dir}") alignment_runner.run( fasta_path, alignment_dir ) except Exception as e: logging.warning(e) - logging.warning(f"Failed to run alignments for {first_name}. Skipping...") + logging.warning(f"Failed to run alignments for {name}. Skipping...") os.remove(fasta_path) os.rmdir(alignment_dir) continue os.remove(fasta_path) - for name in names[1:]: - if(name in dirs): - logging.warning( - f'{name} has already been processed. Skipping...' - ) - continue + # for name in names[1:]: + # if(name in dirs): + # logging.warning( + # f'{name} has already been processed. Skipping...' + # ) + # continue - cp_dir = os.path.join(args.output_dir, name) - os.makedirs(cp_dir, exist_ok=True) - - for f in os.listdir(alignment_dir): - copyfile(os.path.join(alignment_dir, f), os.path.join(cp_dir, f)) + # cp_dir = os.path.join(args.output_dir, name) + # os.makedirs(cp_dir, exist_ok=True) + # + # for f in os.listdir(alignment_dir): + # print(f"copying align results to: {cp_dir}") + # copyfile(os.path.join(alignment_dir, f), os.path.join(cp_dir, f)) def parse_and_align(files, alignment_runner, args): @@ -88,15 +99,21 @@ def parse_and_align(files, alignment_runner, args): elif(f.endswith('.fasta') or f.endswith('.fa')): with open(path, 'r') as fp: fasta_str = fp.read() - input_seqs, _ = parse_fasta(fasta_str) - if len(input_seqs) != 1: - msg = f'More than one input_sequence found in {f}' - if(args.raise_errors): - raise ValueError(msg) - else: - logging.warning(msg) - input_sequence = input_seqs[0] - seq_group_dict[input_sequence] = [file_id] + #input_seqs, _ = parse_fasta(fasta_str) + input_seqs, input_tags = parse_fasta(fasta_str) + # print(f"input_seqs: {input_seqs}") + # print(f"input_tags: {input_tags}") + # if len(input_seqs) != 1: + # msg = f'More than one input_sequence found in {f}' + # if(args.raise_errors): + # raise ValueError(msg) + # else: + # logging.warning(msg) + for index in range(len(input_seqs)): + seq_group_dict[input_seqs[index]] = input_tags[index] + # for input_sequence, input_tag in parse_fasta(fasta_str): + # # input_sequence = input_seqs[0] + # seq_group_dict[input_sequence] = [input_tag] elif(f.endswith('.core')): with open(path, 'r') as fp: core_str = fp.read() @@ -110,6 +127,7 @@ def parse_and_align(files, alignment_runner, args): else: continue + # print(f"seq_group_dict: {seq_group_dict}") seq_group_tuples = [(k,v) for k,v in seq_group_dict.items()] run_seq_group_alignments(seq_group_tuples, alignment_runner, args) diff --git a/scripts/precompute_alignments_mmseqs.py b/scripts/precompute_alignments_mmseqs.py index 2c564e41f..f1b509126 100644 --- a/scripts/precompute_alignments_mmseqs.py +++ b/scripts/precompute_alignments_mmseqs.py @@ -37,13 +37,23 @@ def main(args): lines = [l.strip() for l in f.readlines()] names = lines[::2] - seqs = lines[1::2] + seqs = lines[1::2] + + print(f"name: {names}") + print(f"seqs: {seqs}") if(args.fasta_chunk_size is None): chunk_size = len(seqs) else: chunk_size = args.fasta_chunk_size + print(f"chunk_size: {chunk_size}") + if (args.threads is None): + threads = 1 + else: + threads = args.threads + print(f"threads: {threads}") + # Make the output directory Path(args.output_dir).mkdir(parents=True, exist_ok=True) @@ -51,6 +61,7 @@ def main(args): s = 0 while(s < len(seqs)): e = s + chunk_size + print(f"running chunk: {s} - {e}") chunk_fasta = [el for tup in zip(names[s:e], seqs[s:e]) for el in tup] s = e @@ -66,6 +77,7 @@ def main(args): cmd = [ "scripts/colabfold_search.sh", args.mmseqs_binary_path, + f"{threads}", chunk_fasta_path, args.mmseqs_db_dir, args.output_dir, @@ -100,7 +112,7 @@ def main(args): hhsearch_pdb70_runner = hhsearch.HHSearch( - binary_path=args.hhsearch_binary_path, databases=[args.pdb70] + binary_path=args.hhsearch_binary_path, databases=[args.pdb70], n_cpu=threads ) @@ -146,6 +158,10 @@ def main(args): "mmseqs_binary_path", type=str, help="Path to mmseqs binary" ) + parser.add_argument( + "--threads", type=int, default=1, + help="""How many threads should mmseqs use. (Default 1)""" + ) parser.add_argument( "--hhsearch_binary_path", type=str, default=None, help="""Path to hhsearch binary (for template search). In future @@ -171,5 +187,5 @@ def main(args): raise ValueError( "pdb70 must be specified along with hhsearch_binary_path" ) - + print("will start pipeline") main(args)