Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
cbe7ced
start of removing pypower
corentinravoux Mar 4, 2026
850e6e7
Integrate meshing on the data_vector module
corentinravoux Mar 4, 2026
823bb00
adding broken-alpha and gamma terms + renaming the main SN class
corentinravoux Mar 11, 2026
09364c3
Adding velocity meshing and the start of GW meshing
corentinravoux Mar 11, 2026
8c7ab24
Implementing Multivariate kernel density estimation
corentinravoux Mar 12, 2026
1c133f3
start folder for comparaison likelihoods
corentinravoux Mar 12, 2026
af59607
deleteing replaced gridding module
corentinravoux Mar 12, 2026
6173cb4
Potential fix for pull request finding
corentinravoux Mar 16, 2026
e7f0c43
Potential fix for pull request finding
corentinravoux Mar 16, 2026
da64c3f
Potential fix for pull request finding
corentinravoux Mar 16, 2026
93581e4
autogenerated doc with copilot
corentinravoux Mar 17, 2026
c0a7aac
small unrelated change
corentinravoux Mar 18, 2026
c10e0e0
avoid a warning for counts with no randoms
corentinravoux Mar 18, 2026
fe0d7ae
Merge pull request #104 from corentinravoux/main
corentinravoux Mar 18, 2026
266ff7f
small unrelated change
corentinravoux Mar 18, 2026
dcf083b
test removing cosmoprimo dependencies
corentinravoux Mar 18, 2026
38f9b69
fix
corentinravoux Mar 18, 2026
e34bde9
fix
corentinravoux Mar 18, 2026
32b437d
going back to the original implementation
corentinravoux Mar 18, 2026
b3d8948
fix of likelihood class for covariance, start comparison likelihood (…
corentinravoux Mar 18, 2026
eb00434
fixes + notebook addition
corentinravoux Mar 19, 2026
76d858d
removing last mandatory dependency to cosmoprimo
corentinravoux Mar 19, 2026
ee6e91d
updating doc
corentinravoux Mar 19, 2026
66b4092
start sampling mesh algo
corentinravoux Mar 20, 2026
8d0e542
Initial commit of existing flip package
corentinravoux Apr 1, 2026
3bf1212
Improving gridding gw
corentinravoux Apr 1, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,12 @@ git clone https://github.com/corentinravoux/flip.git
cd flip
pip install .
```
For now, the package requires you to install manually cosmoprimo: pip install git+https://github.com/cosmodesi/cosmoprimo


## Required packages

Mandatory: numpy, scipy, matplotlib, [cosmoprimo](https://github.com/adematti/cosmoprimo), iminuit, emcee, sympy
Mandatory: numpy, scipy, matplotlib, iminuit, emcee, sympy

Optional: classy, pyccl, pypower, GPy, tensorflow
Optional: classy, pyccl, pmesh, GPy, tensorflow, [cosmoprimo](https://github.com/adematti/cosmoprimo)

## Examples

Expand Down
Empty file added flip/comparison/__init__.py
Empty file.
242 changes: 242 additions & 0 deletions flip/comparison/likelihood.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,242 @@
import abc
from functools import partial

from flip.data_vector.basic import DensMesh
from flip.utils import create_log, prior_sum, return_prior

from .._config import __use_jax__

if __use_jax__:
try:
import jax.numpy as jnp
from jax import grad, jit

jax_installed = True

except ImportError:
import numpy as jnp

jax_installed = False
else:

import numpy as jnp

jax_installed = False


# CR - Very first idea of the likelihood class

log = create_log()


_available_field_expressions = ["interpolation", "remesh"]


def express_field_on_grid(
scaling_catalog,
parameter_values_dict,
fixed_grid,
method,
**kwargs,
):
if method not in _available_field_expressions:
raise ValueError(
f"Method {method} not recognized. Available methods: {_available_field_expressions}"
)

if method == "interpolation":
if isinstance(scaling_catalog, DensMesh):
log.warning(
"Scaling catalog is a dict. Interpolation method may not be appropriate."
)

# CR - not implemented yet - need to add interpolation method to DensMesh class
scaled_mesh = scaling_catalog.interpolate(
parameter_values_dict,
fixed_grid,
)
else:
raise ValueError(
"Interpolation method only implemented for DensMesh catalogs."
)
elif method == "remesh":

scaling_catalog["rcom_zobs"] = (
parameter_values_dict["H_0"] / 100
) * scaling_catalog["rcom_zobs"]

rcom_max = kwargs.get("rcom_max", None)
grid_size = kwargs.get("grid_size", None)
grid_type = kwargs.get("grid_type", "sphere")
kind = kwargs.get("kind", "ngc")

scaled_mesh = DensMesh.init_from_catalog(
scaling_catalog,
rcom_max,
grid_size,
grid_type,
kind,
**kwargs,
)

return scaled_mesh


class BaseLikelihood(abc.ABC):

_default_likelihood_properties = {
"use_jit": False,
"use_gradient": False,
}

def __init__(
self,
parameter_names=None,
likelihood_properties={},
):
self.parameter_names = parameter_names

self.likelihood_properties = {
**self._default_likelihood_properties,
**likelihood_properties,
}
self.prior = self.initialize_prior()

self.likelihood_call, self.likelihood_grad = self._init_likelihood()

def __call__(self, parameter_values):
"""Evaluate likelihood at parameter values.

Args:
parameter_values (array-like): Parameter vector aligned with `parameter_names`.

Returns:
float: Likelihood value, sign controlled by `negative_log_likelihood`.
"""
return self.likelihood_call(parameter_values)

@abc.abstractmethod
def _init_likelihood(self, *args):
"""Initialize likelihood and optional gradient.

Returns:
tuple[Callable, Callable|None]: `(likelihood_call, likelihood_grad)`.
"""
likelihood_fun = None
likelihood_grad = None
return likelihood_fun, likelihood_grad

def initialize_prior(
self,
):
"""Build prior function from likelihood properties.

Returns:
Callable: Prior function mapping parameter dict to log-prior.

Raises:
ValueError: If an unsupported prior type is requested.
"""
if "prior" not in self.likelihood_properties.keys():
return lambda x: 0
else:
prior_dict = self.likelihood_properties["prior"]
priors = []
for parameter_name, prior_properties in prior_dict.items():
prior = return_prior(
parameter_name,
prior_properties,
)
priors.append(prior)

prior_function = partial(prior_sum, priors)
return prior_function


class GaussianFieldComparisonLikelihood(BaseLikelihood):

def __init__(
self,
basefield=None,
scaling_catalog=None,
parameter_names=None,
covariance_basefield=None,
likelihood_properties={},
):

super(GaussianFieldComparisonLikelihood, self).__init__(
parameter_names=parameter_names,
likelihood_properties=likelihood_properties,
)
self.basefield = basefield
self.scaling_catalog = scaling_catalog
self.covariance_basefield = covariance_basefield

def _init_likelihood(self):
"""Build callable likelihood and optional gradient for Gaussian model.

Returns:
tuple[Callable, Callable|None]: `(likelihood_call, likelihood_grad)`.
"""

use_jit = self.likelihood_properties["use_jit"]

if jax_installed & use_jit:
prior = jit(self.prior)
else:
prior = self.prior

def likelihood_evaluation(
parameter_values,
):
"""Evaluate likelihood for given parameters.

Args:
parameter_values (array-like): Parameter vector aligned to names.

Returns:
float: Likelihood value (sign depends on `negative_log_likelihood`).
"""
parameter_values_dict = dict(zip(self.parameter_names, parameter_values))

common_grid_x = self.basefield.data["x"]
common_grid_y = self.basefield.data["y"]
common_grid_z = self.basefield.data["z"]

fixed_grid = jnp.stack(
jnp.meshgrid(
common_grid_x, common_grid_y, common_grid_z, indexing="ij"
),
axis=-1,
)

scaled_field = express_field_on_grid(
self.scaling_catalog,
parameter_values_dict,
fixed_grid,
method=self.likelihood_properties["scaling_method"],
**self.likelihood_properties["scaling_kwargs"],
)

likelihood = jnp.sum(
(scaled_field["density"] - self.basefield["density"]) ** 2
/ (
self.scaled_field["density_error"] ** 2
+ self.covariance_basefield["density_error"] ** 2
)
)

likelihood_value = likelihood + prior(parameter_values_dict)

if self.likelihood_properties["negative_log_likelihood"]:
likelihood_value *= -1
return likelihood_value

if jax_installed:
likelihood_grad = grad(likelihood_evaluation)
if use_jit:
likelihood_evaluation = jit(likelihood_evaluation)
likelihood_grad = jit(likelihood_grad)
else:
likelihood_grad = None
return likelihood_evaluation, likelihood_grad
6 changes: 2 additions & 4 deletions flip/covariance/analytical/lai22/generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,13 @@
import multiprocessing as mp
from functools import partial

import cosmoprimo
import numpy as np
from scipy import integrate
from scipy.interpolate import interp1d
from scipy.special import factorial, spherical_jn

from flip.covariance import cov_utils
from flip.covariance.hankel import PowerToCorrelation


def compute_correlation_coefficient_simple_integration(p, q, ell, r, k, pk):
Expand Down Expand Up @@ -41,7 +41,6 @@ def compute_correlation_coefficient_hankel(
):
"""Compute correlation coefficient using FFTLog Hankel transform.

Uses cosmoprimo's ``PowerToCorrelation`` to accelerate the computation.
Falls back to direct integration for small ``r`` values where the Hankel
result may be unreliable, controlled by ``hankel_overhead_coefficient``.

Expand All @@ -58,8 +57,7 @@ def compute_correlation_coefficient_hankel(
Array of correlation coefficient values at separations ``r``.
"""
integrand = k ** (2 * (p + q)) * pk
Hankel = cosmoprimo.fftlog.PowerToCorrelation(k, ell=ell, q=0, complex=False)
Hankel.set_fft_engine("numpy")
Hankel = PowerToCorrelation(k, ell=ell, q=0, complex=False)
r_hankel, xi_hankel = Hankel(integrand)
mask = r < np.min(r_hankel) * hankel_overhead_coefficient
output = np.empty_like(r)
Expand Down
1 change: 0 additions & 1 deletion flip/covariance/contraction.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ def init_from_flip(
coordinate_type=coordinate_type,
additional_parameters_values=additional_parameters_values,
basis_definition=basis_definition,
covariance_prefactor_dict=covariance_prefactor_dict,
**kwargs,
)

Expand Down
10 changes: 4 additions & 6 deletions flip/covariance/generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@
import multiprocessing as mp
from functools import partial

import cosmoprimo
import mpmath
import numpy as np
from scipy import integrate
from scipy.signal import savgol_filter
from scipy.special import spherical_jn

from flip.covariance import cov_utils
from flip.covariance.hankel import PowerToCorrelation
from flip.utils import create_log

log = create_log()
Expand Down Expand Up @@ -65,11 +65,10 @@ def correlation_integration(ell, r, k, integrand):

def correlation_hankel(ell, r, k, integrand, hankel_overhead_coefficient=2, kmin=None):
"""
The correlation_hankel function is a wrapper for the cosmoprimo.fftlog.PowerToCorrelation function,
The correlation_hankel function is a wrapper for the PowerToCorrelation function,
which computes the correlation function from power spectrum using FFTLog (Hamilton 2000).
The PowerToCorrelation class takes in an array of k values and an array of P(k) values, and returns
an array of r values and an array of xi(r) values. The PowerToCorrelation class has two methods: set_fft_engine()
and __call__(). The set_fft_engine() method sets which fft engine to use; it can be either &quot;n
an array of r values and an array of xi(r) values.

Args:
l: Determine the parity of the integrand, and therefore whether to add a 1j term
Expand All @@ -84,8 +83,7 @@ def correlation_hankel(ell, r, k, integrand, hankel_overhead_coefficient=2, kmin
Note:
If l is odd, count a 1j term in the integrand, without the need for adding it
"""
Hankel = cosmoprimo.fftlog.PowerToCorrelation(k, ell=ell, q=0, complex=False)
Hankel.set_fft_engine("numpy")
Hankel = PowerToCorrelation(k, ell=ell, q=0, complex=False)
r_hankel, xi_hankel = Hankel(integrand)
mask = r < np.min(r_hankel) * hankel_overhead_coefficient
if np.any(r > np.max(r_hankel)):
Expand Down
Loading
Loading