Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
150 changes: 150 additions & 0 deletions ml_peg/analysis/conformers/OpenFF_Tors/analyse_OpenFF_Tors.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
"""
Analyse the OpenFF-Tors benchmark dataset for torsional angles.

The Journal of Physical Chemistry B 2024 128 (32), 7888-7902.
DOI: 10.1021/acs.jpcb.4c03167.
"""

from __future__ import annotations

from pathlib import Path

from ase import units
from ase.io import read, write
import pytest

from ml_peg.analysis.utils.decorators import build_table, plot_parity
from ml_peg.analysis.utils.utils import build_d3_name_map, load_metrics_config, mae
from ml_peg.app import APP_ROOT
from ml_peg.calcs import CALCS_ROOT
from ml_peg.models.get_models import load_models
from ml_peg.models.models import current_models

MODELS = load_models(current_models)
D3_MODEL_NAMES = build_d3_name_map(MODELS)

CALC_PATH = CALCS_ROOT / "conformers" / "OpenFF_Tors" / "outputs"
OUT_PATH = APP_ROOT / "data" / "conformers" / "OpenFF_Tors"

METRICS_CONFIG_PATH = Path(__file__).with_name("metrics.yml")
DEFAULT_THRESHOLDS, DEFAULT_TOOLTIPS, DEFAULT_WEIGHTS = load_metrics_config(
METRICS_CONFIG_PATH
)

EV_TO_KCAL = units.mol / units.kcal


def labels() -> list:
"""
Get list of system names.

Returns
-------
list
List of all system names.
"""
for model_name in MODELS:
labels_list = [
path.stem for path in sorted((CALC_PATH / model_name).glob("*.xyz"))
]
break
return labels_list


@pytest.fixture
@plot_parity(
filename=OUT_PATH / "figure_openff_tors.json",
title="Energies",
x_label="Predicted energy / kcal/mol",
y_label="Reference energy / kcal/mol",
hoverdata={
"Labels": labels(),
},
)
def conformer_energies() -> dict[str, list]:
"""
Get conformer energies for all systems.

Returns
-------
dict[str, list]
Dictionary of all reference and predicted barrier heights.
"""
results = {"ref": []} | {mlip: [] for mlip in MODELS}
ref_stored = False

for model_name in MODELS:
for label in labels():
atoms = read(CALC_PATH / model_name / f"{label}.xyz")

results[model_name].append(atoms.info["model_rel_energy"] * EV_TO_KCAL)
if not ref_stored:
results["ref"].append(atoms.info["ref_rel_energy"] * EV_TO_KCAL)

# Write structures for app
structs_dir = OUT_PATH / model_name
structs_dir.mkdir(parents=True, exist_ok=True)
write(structs_dir / f"{label}.xyz", atoms)
ref_stored = True
return results


@pytest.fixture
def get_mae(conformer_energies) -> dict[str, float]:
"""
Get mean absolute error for conformer energies.

Parameters
----------
conformer_energies
Dictionary of reference and predicted conformer energies.

Returns
-------
dict[str, float]
Dictionary of predicted conformer energies errors for all models.
"""
results = {}
for model_name in MODELS:
results[model_name] = mae(
conformer_energies["ref"], conformer_energies[model_name]
)
return results


@pytest.fixture
@build_table(
filename=OUT_PATH / "openff_tors_metrics_table.json",
metric_tooltips=DEFAULT_TOOLTIPS,
thresholds=DEFAULT_THRESHOLDS,
mlip_name_map=D3_MODEL_NAMES,
)
def metrics(get_mae: dict[str, float]) -> dict[str, dict]:
"""
Get all metrics.

Parameters
----------
get_mae
Mean absolute errors for all models.

Returns
-------
dict[str, dict]
Metric names and values for all models.
"""
return {
"MAE": get_mae,
}


def test_openff_tors(metrics: dict[str, dict]) -> None:
"""
Run OpenFF-Tors test.

Parameters
----------
metrics
All new benchmark metric names and dictionary of values for each model.
"""
return
7 changes: 7 additions & 0 deletions ml_peg/analysis/conformers/OpenFF_Tors/metrics.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
metrics:
MAE:
good: 0.0
bad: 20.0
unit: kcal/mol
tooltip: Mean Absolute Error for all systems
level_of_theory: CCSD(T)
90 changes: 90 additions & 0 deletions ml_peg/app/conformers/OpenFF_Tors/app_OpenFF_Tors.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
"""Run OpenFF-Tors app."""

from __future__ import annotations

from dash import Dash
from dash.html import Div

from ml_peg.app import APP_ROOT
from ml_peg.app.base_app import BaseApp
from ml_peg.app.utils.build_callbacks import (
plot_from_table_column,
struct_from_scatter,
)
from ml_peg.app.utils.load import read_plot
from ml_peg.models.get_models import get_model_names
from ml_peg.models.models import current_models

MODELS = get_model_names(current_models)
BENCHMARK_NAME = "OpenFF-Tors"
DOCS_URL = (
"https://ddmms.github.io/ml-peg/user_guide/benchmarks/conformers.html#openff-tors"
)
DATA_PATH = APP_ROOT / "data" / "conformers" / "OpenFF_Tors"


class OpenFFTorsApp(BaseApp):
"""OpenFF-Tors benchmark app layout and callbacks."""

def register_callbacks(self) -> None:
"""Register callbacks to app."""
scatter = read_plot(
DATA_PATH / "figure_openff_tors.json",
id=f"{BENCHMARK_NAME}-figure",
)

model_dir = DATA_PATH / MODELS[0]
if model_dir.exists():
labels = sorted([f.stem for f in model_dir.glob("*.xyz")])
structs = [
f"assets/conformers/OpenFF_Tors/{MODELS[0]}/{label}.xyz"
for label in labels
]
else:
structs = []

plot_from_table_column(
table_id=self.table_id,
plot_id=f"{BENCHMARK_NAME}-figure-placeholder",
column_to_plot={"MAE": scatter},
)

struct_from_scatter(
scatter_id=f"{BENCHMARK_NAME}-figure",
struct_id=f"{BENCHMARK_NAME}-struct-placeholder",
structs=structs,
mode="struct",
)


def get_app() -> OpenFFTorsApp:
"""
Get OpenFF-Tors benchmark app layout and callback registration.

Returns
-------
OpenFFTorsApp
Benchmark layout and callback registration.
"""
return OpenFFTorsApp(
name=BENCHMARK_NAME,
description=(
"Performance in predicting torsional energy profiles for the "
"OpenFF-Tors torsional profiles benchmark. "
"Reference data from CCSD(T) calculations."
),
docs_url=DOCS_URL,
table_path=DATA_PATH / "openff_tors_metrics_table.json",
extra_components=[
Div(id=f"{BENCHMARK_NAME}-figure-placeholder"),
Div(id=f"{BENCHMARK_NAME}-struct-placeholder"),
],
)


if __name__ == "__main__":
full_app = Dash(__name__, assets_folder=DATA_PATH.parent.parent)
benchmark_app = get_app()
full_app.layout = benchmark_app.layout
benchmark_app.register_callbacks()
full_app.run(port=8068, debug=True)
125 changes: 125 additions & 0 deletions ml_peg/calcs/conformers/OpenFF_Tors/calc_OpenFF_Tors.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
"""
Calculate the OpenFF-Tors benchmark dataset for torsional angles.

The Journal of Physical Chemistry B 2024 128 (32), 7888-7902.
DOI: 10.1021/acs.jpcb.4c03167.
"""

from __future__ import annotations

import json
from pathlib import Path

from ase import Atoms, units
from ase.io import write
import mlipx
from mlipx.abc import NodeWithCalculator
import numpy as np
from rdkit import Chem
from tqdm import tqdm
import zntrack

from ml_peg.calcs.utils.utils import chdir, download_s3_data
from ml_peg.models.get_models import load_models
from ml_peg.models.models import current_models

MODELS = load_models(current_models)

OUT_PATH = Path(__file__).parent / "outputs"


class OpenFFTorsBenchmark(zntrack.Node):
"""Compute the benchmark."""

model: NodeWithCalculator = zntrack.deps()
model_name: str = zntrack.params()

def run(self) -> None:
"""Run the benchmark."""
data_path = (
download_s3_data(
filename="OpenFF-Tors.zip",
key="inputs/conformers/OpenFF-Tors/OpenFF-Tors.zip",
)
/ "OpenFF-Tors"
)
# Read in data and attach calculator
calc = self.model.get_calculator()
# Add D3 calculator for this test
calc = self.model.add_d3_calculator(calc)
with open(data_path / "MP2_heavy-aug-cc-pVTZ_torsiondrive_data.json") as file:
data = json.load(file)

for molecule_id, conf in tqdm(data.items()):
charge = int(conf["metadata"]["mol_charge"])
spin = int(conf["metadata"]["mol_multiplicity"])
smiles = conf["metadata"]["mapped_smiles"]
params = Chem.SmilesParserParams()
params.removeHs = False
mol = Chem.MolFromSmiles(smiles, params)
symbols = [atom.GetSymbol() for atom in mol.GetAtoms()]
atom_map = {
atom.GetIntProp("molAtomMapNumber"): idx
for idx, atom in enumerate(mol.GetAtoms())
if atom.HasProp("molAtomMapNumber")
}
remapped_symbols = [
symbols[atom_map[i]] for i in range(1, len(symbols) + 1)
]

for i, (ref_energy, positions) in enumerate(
zip(conf["final_energies"], conf["final_geometries"], strict=True)
):
label = f"{molecule_id}_{i}"
atoms = Atoms(
symbols=remapped_symbols, positions=np.array(positions) * units.Bohr
)
atoms.info["charge"] = charge
atoms.info["spin"] = spin
atoms.calc = calc

if i == 0:
e_ref_zero_conf = ref_energy * units.Hartree
e_model_zero_conf = atoms.get_potential_energy()
else:
atoms.info["ref_rel_energy"] = (
ref_energy * units.Hartree - e_ref_zero_conf
)
atoms.info["model_rel_energy"] = (
atoms.get_potential_energy() - e_model_zero_conf
)
write_dir = OUT_PATH / self.model_name
write_dir.mkdir(parents=True, exist_ok=True)
write(write_dir / f"{label}.xyz", atoms)


def build_project(repro: bool = False) -> None:
"""
Build mlipx project.

Parameters
----------
repro
Whether to call dvc repro -f after building.
"""
project = mlipx.Project()
benchmark_node_dict = {}

for model_name, model in MODELS.items():
with project.group(model_name):
benchmark = OpenFFTorsBenchmark(
model=model,
model_name=model_name,
)
benchmark_node_dict[model_name] = benchmark

if repro:
with chdir(Path(__file__).parent):
project.repro(build=True, force=True)
else:
project.build()


def test_openff_tors():
"""Run OpenFF-Tors benchmark via pytest."""
build_project(repro=True)
3 changes: 3 additions & 0 deletions ml_peg/calcs/conformers/openff_tors/.dvc/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
/config.local
/tmp
/cache
Empty file.
3 changes: 3 additions & 0 deletions ml_peg/calcs/conformers/openff_tors/.dvcignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Add patterns of files dvc should ignore, which could improve
# the performance. Learn more at
# https://dvc.org/doc/user-guide/dvcignore