Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
148 changes: 148 additions & 0 deletions ml_peg/analysis/conformers/Glucose205/analyse_Glucose205.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
"""
Analyse the glucose conformer energy dataset.

Journal of Chemical Theory and Computation,
2016 12 (12), 6157-6168.
DOI: 10.1021/acs.jctc.6b00876
"""

from __future__ import annotations

from pathlib import Path

from ase import units
from ase.io import read, write
import pytest

from ml_peg.analysis.utils.decorators import build_table, plot_parity
from ml_peg.analysis.utils.utils import build_d3_name_map, load_metrics_config, mae
from ml_peg.app import APP_ROOT
from ml_peg.calcs import CALCS_ROOT
from ml_peg.models.get_models import load_models
from ml_peg.models.models import current_models

MODELS = load_models(current_models)
D3_MODEL_NAMES = build_d3_name_map(MODELS)

EV_TO_KCAL = units.mol / units.kcal
CALC_PATH = CALCS_ROOT / "conformers" / "Glucose205" / "outputs"
OUT_PATH = APP_ROOT / "data" / "conformers" / "Glucose205"

METRICS_CONFIG_PATH = Path(__file__).with_name("metrics.yml")
DEFAULT_THRESHOLDS, DEFAULT_TOOLTIPS, DEFAULT_WEIGHTS = load_metrics_config(
METRICS_CONFIG_PATH
)


def labels() -> list:
"""
Get list of system names.

Returns
-------
list
List of all system names.
"""
for model_name in MODELS:
labels_list = [path.stem for path in sorted((CALC_PATH / model_name).glob("*"))]
break
return labels_list


@pytest.fixture
@plot_parity(
filename=OUT_PATH / "figure_glucose205.json",
title="Energies",
x_label="Predicted energy / kcal/mol",
y_label="Reference energy / kcal/mol",
hoverdata={
"Labels": labels(),
},
)
def conformer_energies() -> dict[str, list]:
"""
Get conformer energies for all systems.

Returns
-------
dict[str, list]
Dictionary of all reference and predicted conformer energies.
"""
results = {"ref": []} | {mlip: [] for mlip in MODELS}
ref_stored = False

for model_name in MODELS:
for label in labels():
atoms = read(CALC_PATH / model_name / f"{label}.xyz")

results[model_name].append(atoms.info["model_rel_energy"] * EV_TO_KCAL)
if not ref_stored:
results["ref"].append(atoms.info["ref_energy"] * EV_TO_KCAL)

# Write structures for app
structs_dir = OUT_PATH / model_name
structs_dir.mkdir(parents=True, exist_ok=True)
write(structs_dir / f"{label}.xyz", atoms)
ref_stored = True
return results


@pytest.fixture
def get_mae(conformer_energies) -> dict[str, float]:
"""
Get mean absolute error for conformer energies.

Parameters
----------
conformer_energies
Dictionary of reference and predicted conformer energies.

Returns
-------
dict[str, float]
Dictionary of predicted conformer energies errors for all models.
"""
results = {}
for model_name in MODELS:
results[model_name] = mae(
conformer_energies["ref"], conformer_energies[model_name]
)
return results


@pytest.fixture
@build_table(
filename=OUT_PATH / "glucose205_metrics_table.json",
metric_tooltips=DEFAULT_TOOLTIPS,
thresholds=DEFAULT_THRESHOLDS,
mlip_name_map=D3_MODEL_NAMES,
)
def metrics(get_mae: dict[str, float]) -> dict[str, dict]:
"""
Get all metrics.

Parameters
----------
get_mae
Mean absolute errors for all models.

Returns
-------
dict[str, dict]
Metric names and values for all models.
"""
return {
"MAE": get_mae,
}


def test_glucose205(metrics: dict[str, dict]) -> None:
"""
Run Glucose205 test.

Parameters
----------
metrics
All new benchmark metric names and dictionary of values for each model.
"""
return
7 changes: 7 additions & 0 deletions ml_peg/analysis/conformers/Glucose205/metrics.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
metrics:
MAE:
good: 0.0
bad: 20.0
unit: kcal/mol
tooltip: Mean Absolute Error for all systems
level_of_theory: CCSD(T)
90 changes: 90 additions & 0 deletions ml_peg/app/conformers/Glucose205/app_Glucose205.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
"""Run Glucose205 app."""

from __future__ import annotations

from dash import Dash
from dash.html import Div

from ml_peg.app import APP_ROOT
from ml_peg.app.base_app import BaseApp
from ml_peg.app.utils.build_callbacks import (
plot_from_table_column,
struct_from_scatter,
)
from ml_peg.app.utils.load import read_plot
from ml_peg.models.get_models import get_model_names
from ml_peg.models.models import current_models

MODELS = get_model_names(current_models)
BENCHMARK_NAME = "Glucose205"
DOCS_URL = (
"https://ddmms.github.io/ml-peg/user_guide/benchmarks/conformers.html#glucose205"
)
DATA_PATH = APP_ROOT / "data" / "conformers" / "Glucose205"


class Glucose205App(BaseApp):
"""Glucose205 benchmark app layout and callbacks."""

def register_callbacks(self) -> None:
"""Register callbacks to app."""
scatter = read_plot(
DATA_PATH / "figure_glucose205.json",
id=f"{BENCHMARK_NAME}-figure",
)

model_dir = DATA_PATH / MODELS[0]
if model_dir.exists():
labels = sorted([f.stem for f in model_dir.glob("*.xyz")])
structs = [
f"assets/conformers/Glucose205/{MODELS[0]}/{label}.xyz"
for label in labels
]
else:
structs = []

plot_from_table_column(
table_id=self.table_id,
plot_id=f"{BENCHMARK_NAME}-figure-placeholder",
column_to_plot={"MAE": scatter},
)

struct_from_scatter(
scatter_id=f"{BENCHMARK_NAME}-figure",
struct_id=f"{BENCHMARK_NAME}-struct-placeholder",
structs=structs,
mode="struct",
)


def get_app() -> Glucose205App:
"""
Get Glucose205 benchmark app layout and callback registration.

Returns
-------
Glucose205App
Benchmark layout and callback registration.
"""
return Glucose205App(
name=BENCHMARK_NAME,
description=(
"Performance in predicting relative conformer energies for "
"205 glucose structures. "
"Reference data from DLPNO-CCSD(T) calculations."
),
docs_url=DOCS_URL,
table_path=DATA_PATH / "glucose205_metrics_table.json",
extra_components=[
Div(id=f"{BENCHMARK_NAME}-figure-placeholder"),
Div(id=f"{BENCHMARK_NAME}-struct-placeholder"),
],
)


if __name__ == "__main__":
full_app = Dash(__name__, assets_folder=DATA_PATH.parent.parent)
benchmark_app = get_app()
full_app.layout = benchmark_app.layout
benchmark_app.register_callbacks()
full_app.run(port=8064, debug=True)
Loading