Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions ml_peg/analysis/conformers/37CONF8/metrics.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
metrics:
MAE:
good: 0.0
bad: 20.0
unit: kcal/mol
tooltip: Mean Absolute Error for all systems
level_of_theory: DLPNO-CCSD(T)/cc-pVTZ
146 changes: 146 additions & 0 deletions ml_peg/analysis/conformers/37Conf8/analyse_37Conf8.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
"""
Analyse 37Conf8 conformer energy benchmark.

10.1002/cphc.201801063.
"""

from __future__ import annotations

from pathlib import Path

from ase import units
from ase.io import read, write
import pytest

from ml_peg.analysis.utils.decorators import build_table, plot_parity
from ml_peg.analysis.utils.utils import build_d3_name_map, load_metrics_config, mae
from ml_peg.app import APP_ROOT
from ml_peg.calcs import CALCS_ROOT
from ml_peg.models.get_models import load_models
from ml_peg.models.models import current_models

MODELS = load_models(current_models)
D3_MODEL_NAMES = build_d3_name_map(MODELS)

EV_TO_KCAL = units.mol / units.kcal
CALC_PATH = CALCS_ROOT / "conformers" / "37Conf8" / "outputs"
OUT_PATH = APP_ROOT / "data" / "conformers" / "37Conf8"

METRICS_CONFIG_PATH = Path(__file__).with_name("metrics.yml")
DEFAULT_THRESHOLDS, DEFAULT_TOOLTIPS, DEFAULT_WEIGHTS = load_metrics_config(
METRICS_CONFIG_PATH
)


def labels() -> list:
"""
Get list of system names.

Returns
-------
list
List of all system names.
"""
for model_name in MODELS:
labels_list = [path.stem for path in sorted((CALC_PATH / model_name).glob("*"))]
break
return labels_list


@pytest.fixture
@plot_parity(
filename=OUT_PATH / "figure_37conf8.json",
title="Energies",
x_label="Predicted energy / kcal/mol",
y_label="Reference energy / kcal/mol",
hoverdata={
"Labels": labels(),
},
)
def conformer_energies() -> dict[str, list]:
"""
Get barrier heights for all systems.

Returns
-------
dict[str, list]
Dictionary of all reference and predicted energies.
"""
results = {"ref": []} | {mlip: [] for mlip in MODELS}
ref_stored = False

for model_name in MODELS:
for label in labels():
atoms = read(CALC_PATH / model_name / f"{label}.xyz")
results[model_name].append(atoms.info["model_rel_energy"] * EV_TO_KCAL)

if not ref_stored:
results["ref"].append(atoms.info["ref_energy"] * EV_TO_KCAL)

# Write structures for app
structs_dir = OUT_PATH / model_name
structs_dir.mkdir(parents=True, exist_ok=True)
write(structs_dir / f"{label}.xyz", atoms)
ref_stored = True
return results


@pytest.fixture
def get_mae(conformer_energies) -> dict[str, float]:
"""
Get mean absolute error for conformer energies.

Parameters
----------
conformer_energies
Dictionary of reference and predicted conformer energies.

Returns
-------
dict[str, float]
Dictionary of predicted conformer energies errors for all models.
"""
results = {}
for model_name in MODELS:
results[model_name] = mae(
conformer_energies["ref"], conformer_energies[model_name]
)
return results


@pytest.fixture
@build_table(
filename=OUT_PATH / "37conf8_metrics_table.json",
metric_tooltips=DEFAULT_TOOLTIPS,
thresholds=DEFAULT_THRESHOLDS,
mlip_name_map=D3_MODEL_NAMES,
)
def metrics(get_mae: dict[str, float]) -> dict[str, dict]:
"""
Get all metrics.

Parameters
----------
get_mae
Mean absolute errors for all models.

Returns
-------
dict[str, dict]
Metric names and values for all models.
"""
return {
"MAE": get_mae,
}


def test_37conf8(metrics: dict[str, dict]) -> None:
"""
Run 37Conf8 barriers test.

Parameters
----------
metrics
All new benchmark metric names and dictionary of values for each model.
"""
return
91 changes: 91 additions & 0 deletions ml_peg/app/conformers/37Conf8/app_37Conf8.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
"""Run 37Conf8 app."""

from __future__ import annotations

from dash import Dash
from dash.html import Div

from ml_peg.app import APP_ROOT
from ml_peg.app.base_app import BaseApp
from ml_peg.app.utils.build_callbacks import (
plot_from_table_column,
struct_from_scatter,
)
from ml_peg.app.utils.load import read_plot
from ml_peg.models.get_models import get_model_names
from ml_peg.models.models import current_models

MODELS = get_model_names(current_models)
BENCHMARK_NAME = "37Conf8"
DOCS_URL = (
"https://ddmms.github.io/ml-peg/user_guide/benchmarks/conformers.html#37conf8"
)
DATA_PATH = APP_ROOT / "data" / "conformers" / "37Conf8"


class ThirtySevenConf8App(BaseApp):
"""37Conf8 benchmark app layout and callbacks."""

def register_callbacks(self) -> None:
"""Register callbacks to app."""
scatter = read_plot(
DATA_PATH / "figure_37conf8.json",
id=f"{BENCHMARK_NAME}-figure",
)

model_dir = DATA_PATH / MODELS[0]
if model_dir.exists():
labels = sorted([f.stem for f in model_dir.glob("*.xyz")])
structs = [
f"assets/conformers/37Conf8/{MODELS[0]}/{label}.xyz" for label in labels
]
else:
structs = []

plot_from_table_column(
table_id=self.table_id,
plot_id=f"{BENCHMARK_NAME}-figure-placeholder",
column_to_plot={"MAE": scatter},
)

struct_from_scatter(
scatter_id=f"{BENCHMARK_NAME}-figure",
struct_id=f"{BENCHMARK_NAME}-struct-placeholder",
structs=structs,
mode="struct",
)


def get_app() -> ThirtySevenConf8App:
"""
Get 37Conf8 benchmark app layout and callback registration.

Returns
-------
ThirtySevenCONF8App
Benchmark layout and callback registration.
"""
return ThirtySevenConf8App(
name=BENCHMARK_NAME,
description=(
"Performance in predicting relative conformer energies "
"of 37 organic molecules representing pharmaceuticals, drugs, catalysts, "
"synthetic precursors, and industry-related chemicals (37 neutral "
"molecules, 8 conformers each). "
"Reference data from DLPNO-CCSD(T) calculations."
),
docs_url=DOCS_URL,
table_path=DATA_PATH / "37conf8_metrics_table.json",
extra_components=[
Div(id=f"{BENCHMARK_NAME}-figure-placeholder"),
Div(id=f"{BENCHMARK_NAME}-struct-placeholder"),
],
)


if __name__ == "__main__":
full_app = Dash(__name__, assets_folder=DATA_PATH.parent.parent)
benchmark_app = get_app()
full_app.layout = benchmark_app.layout
benchmark_app.register_callbacks()
full_app.run(port=8062, debug=True)
2 changes: 2 additions & 0 deletions ml_peg/app/conformers/conformers.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
title: Conformers
description:
107 changes: 107 additions & 0 deletions ml_peg/calcs/conformers/37Conf8/calc_37Conf8.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
"""
Compute the 37Conf8 dataset for molecular conformer relative energies.

10.1002/cphc.201801063.
"""

from __future__ import annotations

from pathlib import Path

from ase import units
from ase.io import read, write
import mlipx
from mlipx.abc import NodeWithCalculator
import pandas as pd
from tqdm import tqdm
import zntrack

from ml_peg.calcs.utils.utils import chdir, download_s3_data
from ml_peg.models.get_models import load_models
from ml_peg.models.models import current_models

MODELS = load_models(current_models)

KCAL_TO_EV = units.kcal / units.mol

OUT_PATH = Path(__file__).parent / "outputs"


class Benchmark37Conf8(zntrack.Node):
"""Benchmark the 37Conf8 dataset."""

model: NodeWithCalculator = zntrack.deps()
model_name: str = zntrack.params()

def run(self):
"""Run new benchmark."""
data_path = (
download_s3_data(
filename="37CONF8.zip",
key="inputs/conformers/37Conf8/37Conf8.zip",
)
/ "37CONF8"
)

df = pd.read_excel(
data_path / "37Conf8_data.xlsx", sheet_name="Rel_Energy_SP", header=2
)
calc = self.model.get_calculator()
# Add D3 calculator for this test
calc = self.model.add_d3_calculator(calc)

write_dir = OUT_PATH / self.model_name
write_dir.mkdir(parents=True, exist_ok=True)

for i in tqdm(range(len(df) - 3)):
molecule_name = df.iloc[i][0].strip()
conf_id = int(df.iloc[i][1])
label = f"{molecule_name}_{conf_id}"
if conf_id == 1:
zero_conf = read(data_path / "PBEPBE-D3" / f"{label}_PBEPBE-D3.xyz")
zero_conf.info["charge"] = 0
zero_conf.info["spin"] = 1
zero_conf.calc = calc
e_model_zero_conf = zero_conf.get_potential_energy()
else:
atoms = read(data_path / "PBEPBE-D3" / f"{label}_PBEPBE-D3.xyz")
atoms.info["charge"] = 0
atoms.info["spin"] = 1
atoms.calc = calc
atoms.info["model_rel_energy"] = (
atoms.get_potential_energy() - e_model_zero_conf
)
atoms.info["ref_energy"] = float(df.iloc[i][2]) * KCAL_TO_EV
write(write_dir / f"{label}.xyz", atoms)


def build_project(repro: bool = False) -> None:
"""
Build mlipx project.

Parameters
----------
repro
Whether to call dvc repro -f after building.
"""
project = mlipx.Project()
benchmark_node_dict = {}

for model_name, model in MODELS.items():
with project.group(model_name):
benchmark = Benchmark37Conf8(
model=model,
model_name=model_name,
)
benchmark_node_dict[model_name] = benchmark

if repro:
with chdir(Path(__file__).parent):
project.repro(build=True, force=True)
else:
project.build()


def test_37conf8_conformer_energies():
"""Run 37Conf8 benchmark via pytest."""
build_project(repro=True)
3 changes: 3 additions & 0 deletions ml_peg/calcs/conformers/37conf8/.dvc/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
/config.local
/tmp
/cache
Empty file.
3 changes: 3 additions & 0 deletions ml_peg/calcs/conformers/37conf8/.dvcignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Add patterns of files dvc should ignore, which could improve
# the performance. Learn more at
# https://dvc.org/doc/user-guide/dvcignore
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ dependencies = [
"matcalc",
"matminer",
"MDAnalysis",
"openpyxl",
]

[project.optional-dependencies]
Expand Down