diff --git a/cupix/likelihood/CAMB_model.py b/cupix/likelihood/CAMB_model.py deleted file mode 100644 index 168d9a5..0000000 --- a/cupix/likelihood/CAMB_model.py +++ /dev/null @@ -1,132 +0,0 @@ -import numpy as np -import copy -import camb -from lace.cosmo import camb_cosmo -from lace.cosmo import fit_linP -from cupix.likelihood import likelihood_parameter -import types - - -class CAMBModel(object): - """Interface between CAMB object and Theory""" - - def __init__(self, zs, cosmo=None): - """Setup from CAMB object and list of redshifts""" - - # list of redshifts at which we evaluate linear power - self.zs = zs - - # setup CAMB cosmology object - if cosmo is None: - self.cosmo = camb_cosmo.get_cosmology() - else: - self.cosmo = cosmo - - # cache CAMB results when computed - self.cached_camb_results = None - # cache wavenumbers and linear power (at zs) when computed - self.cached_linP_Mpc = None - self.cached_linP_params = None - - def get_camb_results(self): - """Check if we have called CAMB.get_results yet, to save time. - It returns a CAMB.results object.""" - - if self.cached_camb_results is None: - print("Calling get_camb_results") - self.cached_camb_results = camb_cosmo.get_camb_results( - self.cosmo, zs=self.zs, camb_kmax_Mpc=200 #, fast_camb=True - ) - - return self.cached_camb_results - - def get_linP_Mpc(self): - """Check if we have already computed linP_Mpc, to save time. - It returns (k_Mpc, zs, linP_Mpc).""" - - if self.cached_linP_Mpc is None: - camb_results = self.get_camb_results() - self.cached_linP_Mpc = camb_cosmo.get_linP_Mpc( - pars=self.cosmo, zs=self.zs, camb_results=camb_results - ) - - return self.cached_linP_Mpc - - - def get_linP_Mpc_params(self, kp_Mpc): - """Get linear power parameters to call emulator, at each z. - Amplitude, slope and running around pivot point kp_Mpc.""" - - ## Get the P(k) at each z - k_Mpc, z, pk_Mpc = self.get_linP_Mpc() - # specify wavenumber range to fit - kmin_Mpc = 0.5 * kp_Mpc - kmax_Mpc = 2.0 * kp_Mpc - - linP_params = [] - ## Fit the emulator call params - for pk_z in pk_Mpc: - linP_Mpc = fit_linP.fit_polynomial( - kmin_Mpc / kp_Mpc, - kmax_Mpc / kp_Mpc, - k_Mpc / kp_Mpc, - pk_z, - deg=2, - ) - # translate the polynomial to our parameters - ln_A_p = linP_Mpc[0] - Delta2_p = np.exp(ln_A_p) * kp_Mpc**3 / (2 * np.pi**2) - n_p = linP_Mpc[1] - # note that the curvature is alpha/2 - alpha_p = 2.0 * linP_Mpc[2] - linP_z = {"Delta2_p": Delta2_p, "n_p": n_p, "alpha_p": alpha_p} - linP_params.append(linP_z) - - return linP_params - - - def get_dkms_dMpc(self): - """Return M(z)=H(z)/(1+z) for each z""" - dkms_of_zs = [] - for z in self.zs: - dkms_of_zs.append(camb_cosmo.dkms_dMpc(self.cosmo, z)) - - return dkms_of_zs - - def get_dAA_dMpc(self): - """Return conversion factor from inv cMpc to inv Angstroms for each z""" - dAA_of_zs = [] - for z in self.zs: - dAA_of_zs.append(camb_cosmo.dAA_dMpc(self.cosmo, z, 1215.67)) - - return dAA_of_zs - - def get_ddeg_dMpc(self): - """Return conversion factor from Mpc to deg for each z""" - ddeg_of_zs = [] - for z in self.zs: - ddeg_of_zs.append(camb_cosmo.ddeg_dMpc(self.cosmo, z)) - - return ddeg_of_zs - - def get_new_model(self, zs, like_params): - """For an arbitrary list of like_params, return a new CAMBModel""" - - # store a dictionary with parameters set to input values - camb_param_dict = {} - - # loop over list of likelihood parameters own by this object - for mypar in self.get_likelihood_parameters(): - # loop over input parameters - for inpar in like_params: - if inpar.name == mypar.name: - camb_param_dict[inpar.name] = inpar.value - continue - - # set cosmology object (use fiducial for parameters not provided) - new_cosmo = camb_cosmo.get_cosmology_from_dictionary( - camb_param_dict, cosmo_fid=self.cosmo - ) - - return CAMBModel(zs=zs, cosmo=new_cosmo) - diff --git a/cupix/likelihood/cosmologies.py b/cupix/likelihood/cosmologies.py deleted file mode 100644 index 6e6b97b..0000000 --- a/cupix/likelihood/cosmologies.py +++ /dev/null @@ -1,164 +0,0 @@ -import os -import numpy as np - -import lace -from lace.cosmo import camb_cosmo - - -def get_cosmology_from_label(cosmo_label="default"): - if cosmo_label == "default": - return camb_cosmo.get_cosmology() - elif cosmo_label == "low_omch2": - return camb_cosmo.get_cosmology(omch2=0.11) - elif cosmo_label == "high_omch2": - return camb_cosmo.get_cosmology(omch2=0.13) - elif cosmo_label == "omch2_0115": - return camb_cosmo.get_cosmology(omch2=0.115) - elif cosmo_label == "omch2_0125": - return camb_cosmo.get_cosmology(omch2=0.125) - elif cosmo_label == "mnu_03": - return camb_cosmo.get_cosmology(mnu=0.3) - elif cosmo_label == "mnu_06": - return camb_cosmo.get_cosmology(mnu=0.6) - elif cosmo_label == "SHOES": - return camb_cosmo.get_cosmology(H0=73.0) - else: - raise ValueError("implement cosmo_label " + cosmo_label) - - -def set_cosmo( - cosmo_label="mpg_central", - return_all=False, - nyx_version="models_Nyx_Mar2025_with_CGAN_val_3axes", -): - """Set fiducial cosmology - - Parameters - ---------- - cosmo_label : str - - Returns - ------- - cosmo : object - """ - if (cosmo_label[:3] == "mpg") | (cosmo_label[:3] == "nyx"): - if cosmo_label[:3] == "mpg": - repo = os.path.dirname(lace.__path__[0]) + "/" - fname = repo + ("data/sim_suites/Australia20/mpg_emu_cosmo.npy") - get_cosmo = camb_cosmo.get_cosmology_from_dictionary - elif cosmo_label[:3] == "nyx": - fname = ( - os.environ["NYX_PATH"] + "nyx_emu_cosmo_" + nyx_version + ".npy" - ) - get_cosmo = camb_cosmo.get_Nyx_cosmology - - try: - data_cosmo = np.load(fname, allow_pickle=True).item() - except: - raise ValueError(f"{fname} not found") - - if cosmo_label in data_cosmo.keys(): - # print(data_cosmo[cosmo_label]["cosmo_params"]) - cosmo = get_cosmo(data_cosmo[cosmo_label]["cosmo_params"]) - else: - raise ValueError(f"Cosmo not found in {fname} for {cosmo_label}") - - elif cosmo_label == "Planck18": - cosmo = camb_cosmo.get_cosmology( - H0=67.66, - mnu=0.0, - omch2=0.119, - ombh2=0.0224, - omk=0.0, - As=2.105e-09, - ns=0.9665, - nrun=0.0, - pivot_scalar=0.05, - w=-1, - ) - elif cosmo_label == "Planck18_nyx": - cosmo = camb_cosmo.get_cosmology( - H0=67.66, - mnu=0.0, - omch2=0.119, - ombh2=0.0224, - omk=0.0, - As=2.24e-09, - ns=0.937, - nrun=0.0, - pivot_scalar=0.05, - w=-1, - ) - elif cosmo_label == "Planck18_mpg": - cosmo = camb_cosmo.get_cosmology( - H0=67.0, - mnu=0.0, - omch2=0.119, - ombh2=0.022, - omk=0.0, - As=2.26e-09, - ns=0.982, - nrun=0.0, - pivot_scalar=0.05, - w=-1, - ) - elif cosmo_label == "Planck18_h74": - cosmo = camb_cosmo.get_cosmology( - H0=74, - mnu=0.0, - omch2=0.119, - ombh2=0.0224, - omk=0.0, - As=2.105e-09, - ns=0.9665, - nrun=0.0, - pivot_scalar=0.05, - w=-1, - ) - elif (cosmo_label == "ACCEL2_6144_160") | (cosmo_label == "accel2"): - # https://arxiv.org/pdf/2407.04473 - # Planck15 ΛCDM Planck TT,TE,EE+lowP (approx...) - Omegam = 0.31 - Omegab = 0.0487 - h = 0.675 - omch2 = (Omegam - Omegab) * h**2 - ombh2 = Omegab * h**2 - cosmo = camb_cosmo.get_cosmology( - H0=h * 100, - mnu=0.0, - omch2=omch2, - ombh2=ombh2, - omk=0.0, - As=np.exp(3.094) / 1e10, # Planck15 ΛCDM Planck TT,TE,EE+lowP - ns=0.96, - nrun=0.0, - pivot_scalar=0.05, - w=-1, - ) - elif (cosmo_label == "Sherwood_2048_40") | (cosmo_label == "sherwood"): - # https://academic.oup.com/mnras/article/464/1/897/2236089 - # Planck13 ΛCDM Planck+WP+highL+BAO - Omegam = 0.308 - Omegab = 0.0482 - h = 0.678 - omch2 = (Omegam - Omegab) * h**2 - ombh2 = Omegab * h**2 - cosmo = camb_cosmo.get_cosmology( - H0=h * 100, - mnu=0.0, - omch2=omch2, - ombh2=ombh2, - omk=0.0, - As=np.exp(3.0973) / 1e10, # Planck13 ΛCDM Planck+WP+highL+BAO - ns=0.961, - nrun=0.0, - pivot_scalar=0.05, - w=-1, - ) - else: - raise ValueError(f"cosmo_label {cosmo_label} not implemented") - - if return_all: - return data_cosmo - else: - return cosmo diff --git a/cupix/likelihood/fitter.py b/cupix/likelihood/fitter.py index d5c56ba..989dc16 100644 --- a/cupix/likelihood/fitter.py +++ b/cupix/likelihood/fitter.py @@ -10,7 +10,6 @@ # our own modules import cupix, lace -from lace.cosmo import camb_cosmo from cupix.utils.utils import create_print_function, purge_chains diff --git a/cupix/likelihood/forestflow_emu.py b/cupix/likelihood/forestflow_emu.py index 8d5d143..c67a292 100644 --- a/cupix/likelihood/forestflow_emu.py +++ b/cupix/likelihood/forestflow_emu.py @@ -14,7 +14,6 @@ class FF_emulator(): def __init__( self, z, - cosmo_param_dict, Nrealizations=3000 ): @@ -27,7 +26,6 @@ def __init__( "kF_Mpc" ] self.z = z - self.cosmo_param_dict = cosmo_param_dict self.emulator_label = "forestflow_emu" # self.kp_Mpc = kp_Mpc # self.kmax_Mpc = 5 # from Forestflow paper plots, could revisit @@ -103,4 +101,4 @@ def emulate_P3D_params(self, emu_call, zs): arinyo_coeffs[key] = emu_call[key] return arinyo_coeffs - \ No newline at end of file + diff --git a/cupix/likelihood/likelihood.py b/cupix/likelihood/likelihood.py index a22bce4..670e98b 100644 --- a/cupix/likelihood/likelihood.py +++ b/cupix/likelihood/likelihood.py @@ -182,7 +182,9 @@ def get_log_like( log_like += np.sum(log_like_all) end = time.time() - print(f"Log-likelihood computed in {end - start:.2f} seconds") + if self.verbose: + print(f"Log-likelihood computed in {end - start:.2f} seconds") + if np.any(np.isnan(log_like)): return null_out @@ -239,7 +241,8 @@ def minus_log_prob(self, like_params, freepar_names=[]): log_prob = self.log_prob(like_params, freepar_names) if not np.isfinite(log_prob): print("Non-finite value detected:", like_params, log_prob) - print("log prob is", log_prob) + if self.verbose: + print("log prob is", log_prob) return -1.0 * log_prob # def maximise_posterior( diff --git a/cupix/likelihood/likelihood_parameter.py b/cupix/likelihood/likelihood_parameter.py index 97f2fa6..24c9cdc 100644 --- a/cupix/likelihood/likelihood_parameter.py +++ b/cupix/likelihood/likelihood_parameter.py @@ -83,6 +83,8 @@ def get_new_parameter(self, value_in_cube): def likeparam_from_dict(params_dict): # takes a dictionary of parameter names and values, and returns a list of LikelihoodParameter objects + if params_dict is None: + return [] like_params = [] for name, value in params_dict.items(): like_params.append(LikelihoodParameter(name=name, min_value=-1000, max_value=1000, value=value)) @@ -91,6 +93,8 @@ def likeparam_from_dict(params_dict): def dict_from_likeparam(like_params): # takes a list of LikelihoodParameter objects, and returns a dictionary of parameter names and values + if like_params is None: + return {} params_dict = {} for param in like_params: params_dict[param.name] = param.value diff --git a/cupix/likelihood/lya_theory.py b/cupix/likelihood/lya_theory.py index 95b7272..878610e 100644 --- a/cupix/likelihood/lya_theory.py +++ b/cupix/likelihood/lya_theory.py @@ -1,8 +1,7 @@ import numpy as np import copy -from lace.cosmo import camb_cosmo +from lace.cosmo import cosmology from cupix.likelihood.forestflow_emu import FF_emulator -from cupix.likelihood import CAMB_model from cupix.likelihood.lyaP3D import LyaP3D from cupix.likelihood.likelihood_parameter import likeparam_from_dict, LikelihoodParameter, dict_from_likeparam, format_like_params_dict import sys @@ -19,7 +18,7 @@ class Theory(object): def __init__( self, zs, - cosmo_dict=None, + fid_cosmo=None, default_lya_theory='best_fit_arinyo_from_p1d', p3d_label='arinyo', emulator_label=None, @@ -29,7 +28,7 @@ def __init__( """Setup object to compute predictions for the 1D power spectrum. Inputs: - zs: redshifts that will be evaluated - - cosmo: dictionary of cosmology parameters used for unit conversions, setting the linear power spectrum, etc + - fid_cosmo: fiducial Cosmology object, used for unit conversions, setting the linear power spectrum, etc - default_lya_theory: string tag to specify which default set of likelihood parameters to use. Options are 'best_fit_arinyo_from_p1d' to use the best-fit Arinyo coefficients from the p1d paper, 'best_fit_igm_from_p1d' to use the best-fit IGM parameters from the p1d paper, and 'best_fit_arinyo_from_colore' to use the best-fit Arinyo coefficients from Laura's CF fits. - p3d_label: string tag to specify which P3D model to use. Current option is 'arinyo' to use the Arinyo model. - emulator_label: string tag to specify which emulator to use for predicting the Arinyo coefficients. Current option is 'forestflow_emu' to use the forestflow emulator. @@ -39,7 +38,10 @@ def __init__( self.zs = zs # specify pivot point used in compressed parameters self.k_unit = k_unit - self.set_cosmo(cosmo_dict) + if fid_cosmo is None: + self.fid_cosmo = cosmology.Cosmology() + else: + self.fid_cosmo = fid_cosmo self._load_P3D_model(p3d_label=p3d_label) self.default_lya_theory = default_lya_theory self.set_default_param_values(tag=default_lya_theory) @@ -47,117 +49,40 @@ def __init__( self.igm_par_names = ["Delta2_p", "n_p", "mF", "gamma", "sigT_Mpc", "kF_Mpc", "lambda_P"] if emulator_label == "forestflow_emu": - self.emulator = FF_emulator(zs, self.cosmo_dict) + self.emulator = FF_emulator(zs) elif emulator_label is None: assert "igm" not in self.default_lya_theory, "If default_lya_theory includes 'igm', an emulator must be specified to predict the Arinyo parameters. Please specify an emulator_label, for example 'forestflow_emu'." else: raise ValueError("Emulator label not recognized. Current only option is 'forestflow_emu'.") - def set_cosmo_dict(self, cosmo_dict_input): - # Make a dictionary with some default values, if none are passed - # Replace any passed by user-specified inputs) - omnuh2 = 0.0006 - mnu = omnuh2 * 93.14 - H0 = 67.36 - omch2 = 0.12 - ombh2 = 0.02237 - As = 2.1e-9 - ns = 0.9649 - nrun = 0.0 - w = -1.0 - omk = 0 - cosmo_dict = { - 'H0': H0, - 'omch2': omch2, - 'ombh2': ombh2, - 'mnu': mnu, - 'omk': omk, - 'As': As, - 'ns': ns, - 'nrun': nrun, - 'w': w - } - if cosmo_dict_input is not None: - for key in cosmo_dict_input: - if key in cosmo_dict: - cosmo_dict[key] = cosmo_dict_input[key] # update default value with user-provided value - else: - print(f"Warning: {key} not recognized as a cosmological parameter, ignoring it.") - self.cosmo_dict = cosmo_dict - - def set_cosmo( - self, - cosmo_dict - ): - """Setup initial cosmology""" - # first, set the comology dictionary - self.set_cosmo_dict(cosmo_dict) - laceCosmo = camb_cosmo.get_cosmology_from_dictionary(self.cosmo_dict) - self.cosmo = {} - self.cosmo["zs"] = self.zs - self.cosmo["cosmo"] = CAMB_model.CAMBModel( - zs=self.zs, - cosmo=laceCosmo, - ) - self.cosmo["dkms_dMpc_zs"] = self.cosmo["cosmo"].get_dkms_dMpc() - self.cosmo["dAA_dMpc_zs"] = self.cosmo["cosmo"].get_dAA_dMpc() - self.cosmo["ddeg_dMpc_zs"] = self.cosmo["cosmo"].get_ddeg_dMpc() - - - def fixed_background(self, like_params): - """Check if any of the input likelihood parameters would change - the background expansion of the initial cosmology""" - if like_params is None: - return True - # look for parameters that would change background - for par in like_params: - if par.name in ["ombh2", "omch2", "H0", "mnu", "cosmomc_theta"]: - return False - - return True - - def get_unit_conversions(self, zs, like_params, return_blob=False): + def get_unit_conversions(self, zs, like_params): """return conversion from Mpc to km/s or Mpc to inv Angstroms, and from Mpc to deg for transverse separations""" - - # compute linear power parameters at all redshifts, and H(z) / (1+z) - if self.fixed_background(like_params): + + # convert list of LikelihoodParameters to dictionary + params_dict = dict_from_likeparam(like_params) + print('params_dict', params_dict) + + if self.fid_cosmo.same_background(cosmo_params=params_dict): # use background and transfer functions from initial cosmology + cosmo = self.fid_cosmo if self.verbose: print("recycle transfer function") - dkms_dMpc_zs = [] - ddeg_dMpc_zs = [] - dAA_dMpc_zs = [] - for z in zs: - _ = np.argwhere(self.cosmo["zs"] == z)[0, 0] - dkms_dMpc_zs.append(self.cosmo["dkms_dMpc_zs"][_]) - ddeg_dMpc_zs.append(self.cosmo["ddeg_dMpc_zs"][_]) - dAA_dMpc_zs.append(self.cosmo["dAA_dMpc_zs"][_]) - dkms_dMpc_zs = np.array(dkms_dMpc_zs) - ddeg_dMpc_zs = np.array(ddeg_dMpc_zs) - dAA_dMpc_zs = np.array(dAA_dMpc_zs) - if return_blob: - blob = self.get_blob_fixed_background(like_params) else: - # setup a new CAMB_model from like_params - if self.verbose: - print("create new CAMB_model") - camb_model = self.cosmo["cosmo"].get_new_model(zs, like_params) - dkms_dMpc_zs = camb_model.get_dkms_dMpc() - dAA_dMpc_zs = camb_model.get_dAA_dMpc() - ddeg_dMpc_zs = camb_model.get_ddeg_dMpc() - if return_blob: - blob = self.get_blob(camb_model=camb_model) + cosmo = cosmology.Cosmology(cosmo_params_dict=params_dict) + + dkms_dMpc_zs = np.array([cosmo.get_dkms_dMpc(z) for z in zs]) + ddeg_dMpc_zs = np.array([cosmo.get_ddeg_dMpc(z) for z in zs]) + dAA_dMpc_zs = np.array([cosmo.get_dAA_dMpc(z) for z in zs]) + if self.k_unit == 'ikms': return_conv_k_of_zs = dkms_dMpc_zs else: return_conv_k_of_zs = dAA_dMpc_zs - if return_blob: - return return_conv_k_of_zs, ddeg_dMpc_zs, blob - else: - return return_conv_k_of_zs, ddeg_dMpc_zs - + return return_conv_k_of_zs, ddeg_dMpc_zs + + def get_emulator_calls( self, iz_choice, theory_inputs ): @@ -169,11 +94,17 @@ def get_emulator_calls( if 'T0' in key: sigT_kms = thermal_broadening_kms(theory_inputs[key]) z_int = int(key.split('_')[-1]) - sigT_Mpc = sigT_kms / self.cosmo["dkms_dMpc_zs"][z_int] + print('WARNING: check this code (Andreu)') + z=self.zs[z_int] + # I don't this can assume fiducial cosmo... + sigT_Mpc = sigT_kms / self.fid_cosmo.get_dkms_dMpc(z=z) theory_inputs[f'sigT_Mpc_{z_int}'] = sigT_Mpc elif 'kF_kms' in key: z_int = int(key.split('_')[-1]) - kF_Mpc = theory_inputs[key] / self.cosmo["dkms_dMpc_zs"][z_int] + print('WARNING: check this code (Andreu)') + z=self.zs[z_int] + # I don't this can assume fiducial cosmo... + kF_Mpc = theory_inputs[key] / self.fid_cosmo.get_dkms_dMpc(z=z) theory_inputs[f'kF_Mpc_{z_int}'] = kF_Mpc # later, when varying cosmology, can add here the conversions for Delta* and n* to Deltap and np @@ -249,9 +180,7 @@ def get_px_AA( ): """Emulate Px in velocity units, for all redshift bins, as a function of input likelihood parameters. - theta_arcmin is a list of theta bins. - It might also return a covariance from the emulator, - or a blob with extra information for the fitter.""" + theta_arcmin is a list of theta bins.""" if verbose is None: verbose = self.verbose @@ -415,7 +344,7 @@ def _load_P3D_model(self, p3d_label='arinyo'): """ if p3d_label == 'arinyo': from forestflow.model_p3d_arinyo import ArinyoModel - arinyo = ArinyoModel(cosmo=self.cosmo_dict) # set model + arinyo = ArinyoModel(fid_cosmo=self.fid_cosmo) else: sys.exit("Error: no P3D model specified. Please choose a valid P3D model. Current option are 'arinyo'.") @@ -442,4 +371,4 @@ def get_param(self, param_name, iz_choice=None): else: return self.default_param_dict[param_name] - \ No newline at end of file + diff --git a/cupix/likelihood/model_contaminants.py b/cupix/likelihood/model_contaminants.py index 90cd386..fd9bc78 100644 --- a/cupix/likelihood/model_contaminants.py +++ b/cupix/likelihood/model_contaminants.py @@ -1,345 +1,125 @@ +import copy import numpy as np -from cupix.nuisance import ( - metal_model_class, - metal_metal_model_class, - hcd_model_McDonald2005, - hcd_model_Rogers2017, - hcd_model_class, - hcd_model_rogers_class, - si_mult, - si_add, - SN_model, - AGN_model, -) - - -class Contaminants(object): - """Contains all IGM models""" - - def __init__( - self, - free_param_names=None, - metal_models=None, - hcd_model=None, - sn_model=None, - agn_model=None, - pars_cont=None, - ic_correction=None, - ): - self.pars_cont = pars_cont - self.ic_correction = ic_correction - # setup metal models - self.metal_models = {} - if "flat_priors" in pars_cont: - flat_priors = pars_cont["flat_priors"] - else: - flat_priors = None - if "Gauss_priors" in pars_cont: - Gauss_priors = pars_cont["Gauss_priors"] - else: - Gauss_priors = None - - if "z_max" in pars_cont: - z_max = pars_cont["z_max"] - else: - z_max = None - - prop_coeffs = {} - fid_vals = {} - for key in pars_cont: - fid_vals[key] = pars_cont[key] - for key2 in ["otype", "ztype", "znodes"]: - key3 = key + "_" + key2 - if key3 in pars_cont: - prop_coeffs[key3] = pars_cont[key3] - else: - if key3.endswith("otype"): - prop_coeffs[key3] = "exp" - elif key3.endswith("ztype"): - prop_coeffs[key3] = "pivot" - - # if joint_model: - self.metal_add = ["Si_add"] #"CIVa_CIVb", "MgIIa_MgIIb", - - # setup metal models - key = "Si_mult" - try: - print("Models already set up") - self.metal_models[key] = metal_models[key] - except: - print("Setting up Si_mult model") - self.metal_models[key] = si_mult.SiMult( - free_param_names=free_param_names, - fid_vals=fid_vals, - prop_coeffs=prop_coeffs, - z_max=z_max, - flat_priors=flat_priors, - Gauss_priors=Gauss_priors, - ) - - key = "Si_add" - try: - self.metal_models[key] = metal_models[key] - except: - self.metal_models[key] = si_add.SiAdd( - free_param_names=free_param_names, - fid_vals=fid_vals, - prop_coeffs=prop_coeffs, - z_max=z_max, - flat_priors=flat_priors, - Gauss_priors=Gauss_priors, - ) - - # setup HCD model - if hcd_model: - self.hcd_model = hcd_model - else: - if pars_cont["hcd_model_type"] == "Rogers2017": - self.hcd_model = hcd_model_Rogers2017.HCD_Model_Rogers2017( - free_param_names=free_param_names, - fid_A_damp=pars_cont["A_damp1"], - fid_A_scale=pars_cont["A_scale1"], - ) - elif pars_cont["hcd_model_type"] == "McDonald2005": - self.hcd_model = hcd_model_McDonald2005.HCD_Model_McDonald2005( - free_param_names=free_param_names, - fid_A_damp=pars_cont["A_damp1"], - ) - elif pars_cont["hcd_model_type"] == "new": - self.hcd_model = hcd_model_class.HCD_Model( - free_param_names=free_param_names, - fid_vals=pars_cont, - flat_priors=flat_priors, - Gauss_priors=Gauss_priors, - ) - elif pars_cont["hcd_model_type"] == "new_rogers": - self.hcd_model = hcd_model_rogers_class.HCD_Model_Rogers( - free_param_names=free_param_names, - fid_vals=fid_vals, - prop_coeffs=prop_coeffs, - flat_priors=flat_priors, - Gauss_priors=Gauss_priors, - ) - else: - raise ValueError( - "hcd_model_type must be one of 'Rogers2017', 'McDonald2005', 'new', or 'new2'" - ) - - # setup SN model - if sn_model: - self.sn_model = sn_model - else: - self.sn_model = SN_model.SN_Model( - free_param_names=free_param_names, - fid_value=pars_cont["SN"], - ) - - # setup AGN model - if agn_model: - self.agn_model = sn_model - else: - self.agn_model = AGN_model.AGN_Model( - free_param_names=free_param_names, - fid_value=pars_cont["AGN"], - ) - - # def get_dict_cont(self): - # dict_out = {} - - # # maximum number of parameters - # for ii in range(2): - # for metal_line in self.args.metal_lines: - # flag = "f_" + metal_line + "_" + str(ii) - # dict_out[flag] = self.fid_metals[flag][-1 - ii] - # flag = "s_" + metal_line + "_" + str(ii) - # dict_out[flag] = self.fid_metals[flag][-1 - ii] - # dict_out["ln_A_damp_" + str(ii)] = self.fid_A_damp[-1 - ii] - # dict_out["ln_A_scale_" + str(ii)] = self.fid_A_scale[-1 - ii] - # dict_out["ln_SN_" + str(ii)] = self.fid_SN[-1 - ii] - # dict_out["ln_AGN_" + str(ii)] = self.fid_AGN[-1 - ii] - # dict_out["ic_correction"] = self.args.ic_correction - - # return dict_out - - def get_contamination( - self, z, k_kms, mF, M_of_z, like_params=[], remove=None - ): - # include multiplicative metal contamination - - if len(z) == 1: - cont_mul_metals = np.ones_like(k_kms) - cont_add_metals = np.zeros_like(k_kms) - else: - cont_mul_metals = [] - cont_add_metals = [] - for iz in range(len(z)): - cont_mul_metals.append(np.ones_like(k_kms[iz])) - cont_add_metals.append(np.zeros_like(k_kms[iz])) - print("midway model_contaminants.py") - print(self.metal_models) - for model_name in self.metal_models: - cont = self.metal_models[model_name].get_contamination( - z=z, - k_kms=k_kms, - mF=mF, - like_params=like_params, - remove=remove, - ) - print("further model_contaminants.py") - if len(z) == 1: - if model_name in self.metal_add: - cont_add_metals += cont - else: - cont_mul_metals *= cont - else: - for iz in range(len(z)): - if model_name in self.metal_add: - if type(cont) != int: - cont_add_metals[iz] += cont[iz] - else: - cont_add_metals[iz] += cont - else: - if type(cont) != int: - cont_mul_metals[iz] *= cont[iz] - else: - cont_mul_metals[iz] *= cont - print("late model_contaminants.py") - # include HCD contamination - cont_HCD = self.hcd_model.get_contamination( - z=z, - k_kms=k_kms, - like_params=like_params, - ) - - # include SN contamination - # if len(z) != 1: - # k_Mpc = [] - # for iz in range(len(z)): - # k_Mpc.append(k_kms[iz] * M_of_z[iz]) - # else: - # k_Mpc = [k_kms[0] * M_of_z[0]] - # cont_SN = self.sn_model.get_contamination( - # z=z, - # k_Mpc=k_Mpc, - # like_params=like_params, - # ) - cont_SN = 1 - - # include AGN contamination - # cont_AGN = self.agn_model.get_contamination( - # z=z, - # k_kms=k_kms, - # like_params=like_params, - # ) - # if np.any(cont_AGN < 0): - # cont_AGN = 1 - cont_AGN = 1 - - if self.ic_correction: - IC_corr = ref_nyx_ic_correction(k_kms, z) - else: - IC_corr = 1 - - if len(z) == 1: - mult_cont_total = ( - cont_mul_metals * cont_HCD * cont_SN * cont_AGN * IC_corr - ) - add_cont_total = cont_add_metals - else: - mult_cont_total = [] - add_cont_total = [] - if type(cont_mul_metals) == int: - _cont_mul_metals = np.ones_like(z) - else: - _cont_mul_metals = cont_mul_metals - - if type(cont_add_metals) == int: - _cont_add_metals = np.zeros_like(z) - else: - _cont_add_metals = cont_add_metals - - if type(cont_HCD) == int: - _cont_HCD = np.ones_like(z) - else: - _cont_HCD = cont_HCD - - if type(cont_SN) == int: - _cont_SN = np.ones_like(z) - else: - _cont_SN = cont_SN - - if type(cont_AGN) == int: - _cont_AGN = np.ones_like(z) - else: - _cont_AGN = cont_AGN - - if type(IC_corr) == int: - _IC_corr = np.ones_like(z) - else: - _IC_corr = IC_corr - - for iz in range(len(z)): - mult_cont_total.append( - _cont_mul_metals[iz] - * _cont_HCD[iz] - * _cont_SN[iz] - * _cont_AGN[iz] - * _IC_corr[iz] - ) - add_cont_total.append(_cont_add_metals[iz]) - print("end model_contaminants.py") - return mult_cont_total, add_cont_total - - -def ref_nyx_ic_correction(k_kms, z): - # This is the function fitted from the comparison of two Nyx runs, - # one with 2lpt (single fluid) IC and the other one with monofonic (2 fluid) - # - The high k points and z evolution are well determined - # - Low k term: quite uncertain, due to cosmic variance - ic_corr_z = np.array([0.15261529, -2.30600644, 2.61877894]) - ic_corr_k = 0.003669741766936781 - if len(z) == 1: - ancorIC = (ic_corr_z[0] * z**2 + ic_corr_z[1] * z + ic_corr_z[2]) * ( - 1 - np.exp(-k_kms / ic_corr_k) - ) - corICs = 1 / (1 - ancorIC / 100) - else: - corICs = [] - for iz in range(len(z)): - ancorIC = ( - ic_corr_z[0] * z[iz] ** 2 + ic_corr_z[1] * z[iz] + ic_corr_z[2] - ) * (1 - np.exp(-k_kms[iz] / ic_corr_k)) - corICs.append(1 / (1 - ancorIC / 100)) - - return corICs - - -# def nyx_ic_correction(k_kms, z): -# # This is the function fitted from the comparison of two Nyx runs, -# # one with 2lpt (single fluid) IC and the other one with monofonic (2 fluid) -# # - The high k points and z evolution are well determined -# # - Low k term: quite uncertain, due to cosmic variance -# coeff0 = np.poly1d(np.array([0.00067032, -0.00626953, 0.0073908])) -# coeff1 = np.poly1d(np.array([0.00767315, -0.04693207, 0.07151469])) -# cfit = np.zeros(2) -# cfit[0] = coeff0(z) -# cfit[1] = coeff1(z) - -# rfit = np.poly1d(cfit) -# # multiplicative correction -# ic_corr = 10 ** rfit(np.log10(k_kms)) - -# return ic_corr - - -# def nuisance_nyx_ic_correction(P0, k, z, Aic, Bic): -# ic_corr_k = 0.003669741766936781 -# correction = (Aic + Bic * (z - 3)) * (1 - np.exp(-k / ic_corr_k)) # in % -# return P0 / (1 - 0.01 * correction) - - -# def prior_nyx_ic_correction(): -# # The central values are the result of a 1st order polynomial fit from the -# # 2nd order function given in _ref_nyx_ic_correction(P0, k, z) -# return {"Aic": (-2.9, 1.0), "Bic": (-1.4, 0.5)} +# our modules below + + +class ContaminantsModel(object): + """Help the theory with contaminants (HCDs, metals, sky, continuum...)""" + + def __init__(self, z, config={'verbose':False}): + """Create object from a dictionary""" + self.z_lya = z + self.lr_metal = 1206.52 # SiIII for now + self.setup_from_config(config) + return + + + def setup_from_config(self, config): + """Setup from a dictionary""" + self.verbose = config.get('verbose', False) + if self.verbose: print('ContaminantsModel::setup_from_config') + + # set meaningful default values for all parameters + self.default_hcd_params = self.get_default_hcd_params(config) + self.default_metal_params = self.get_default_metal_params(config) + self.default_sky_params = self.get_default_sky_params(config) + self.default_continuum_params = self.get_default_continuum_params(config) + + # read the function xi_noise(theta) needed for the sky contamination + self.xi_noise = self.read_xi_noise(config) + + return + + + def get_default_hcd_params(self, config): + # here we should get the default values based on the config and z + # L_H in Mpc, not Mpc/h + hcd_params = {'b_H': -0.02, 'beta_H': 0.5, 'L_H_Mpc': 5.0/0.67} + return hcd_params + + + def get_default_metal_params(self, config): + # here we should get the default values based on the config and z + metal_params = {'b_X': -0.005, 'beta_X': 0.5} + return metal_params + + + def get_default_sky_params(self, config): + # here we should get the default values based on the config and z + a_noise = 4e-4 + # b_noise here in Mpc, not Mpc/h + Delta_rp = 4 / 0.67 + sky_params = {'b_noise_Mpc': a_noise * Delta_rp} + return sky_params + + + def get_default_continuum_params(self, config): + # here we should get the default values based on the config and z + continuum_params = {'kC_Mpc': 0.02 * 0.67, 'pC': 1} + return continuum_params + + + def get_hcd_params(self, params): + """Get the complete list of HCD parameters from defaults and input""" + + hcd_params = copy.deepcopy(self.default_hcd_params) + for key in hcd_params: + if key in params: + hcd_params[key] = params[key] + + return hcd_params + + + def get_metal_params(self, params): + """Get the complete list of metal parameters from defaults and input""" + + metal_params = copy.deepcopy(self.default_metal_params) + for key in metal_params: + if key in params: + metal_params[key] = params[key] + + return metal_params + + + def get_sky_params(self, params): + """Get the complete list of sky parameters from defaults and input""" + + sky_params = copy.deepcopy(self.default_sky_params) + for key in sky_params: + if key in params: + sky_params[key] = params[key] + + return sky_params + + + def get_continuum_params(self, params): + """Get the complete list of continuum parameters from defaults and input""" + + continuum_params = copy.deepcopy(self.default_continuum_params) + for key in continuum_params: + if key in params: + continuum_params[key] = params[key] + + return continuum_params + + + def read_xi_noise(self, config): + """Read function xi_noise(theta) for sky contamination""" + + from scipy.interpolate import interp1d + from astropy.table import Table + import cupix + + cupixpath = cupix.__path__[0].rsplit('/', 1)[0] + fname=cupixpath+"/data/desi_instrument/desi-instrument-syst-for-forest-auto-correlation_arcmin.csv" + syst_table = Table.read(fname) + syst_interp = interp1d(syst_table["theta_arc"], syst_table["xi_noise"], kind='linear') + + return syst_interp + + + def get_xi_noise(self, theta_arc): + """Evaluate xi_noise at theta_arc""" + + return self.xi_noise(theta_arc) diff --git a/cupix/likelihood/model_igm.py b/cupix/likelihood/model_igm.py deleted file mode 100644 index b969429..0000000 --- a/cupix/likelihood/model_igm.py +++ /dev/null @@ -1,365 +0,0 @@ -import os -import lace -import numpy as np -from cupix.nuisance.mean_flux_class import MeanFlux -from cupix.nuisance.pressure_class import Pressure -from cupix.nuisance.thermal_class import Thermal -from cupix.utils.utils import is_number_string - - -class IGM(object): - """Contains all IGM models""" - - def __init__( - self, - free_param_names=None, - pars_igm=None, - F_model=None, - T_model=None, - P_model=None, - ): - # set simulation from which we get fiducial IGM history - for key in ["mF", "T", "kF"]: - lab = "label_" + key - if lab in pars_igm: - setattr(self, "fid_sim_igm_" + key, pars_igm[lab]) - else: - setattr(self, "fid_sim_igm_" + key, "mpg_central") - - if "Gauss_priors" in pars_igm: - Gauss_priors = pars_igm["Gauss_priors"] - else: - Gauss_priors = None - - prop_coeffs = {} - fid_vals = {} - for key in ["tau_eff", "gamma", "sigT_kms", "kF_kms"]: - if key in pars_igm: - fid_vals[key] = pars_igm[key] - for key2 in ["otype", "ztype", "znodes"]: - key3 = key + "_" + key2 - if key3 in pars_igm: - prop_coeffs[key3] = pars_igm[key3] - else: - if key3 == "tau_eff_otype": - prop_coeffs[key3] = "exp" - else: - if key3.endswith("otype"): - prop_coeffs[key3] = "const" - elif key3.endswith("ztype"): - prop_coeffs[key3] = "pivot" - - if "priors" in pars_igm: - type_priors = pars_igm["priors"] - else: - type_priors = "hc" - - fid_igm = self.get_igm( - sim_igm_mF=self.fid_sim_igm_mF, - sim_igm_T=self.fid_sim_igm_T, - sim_igm_kF=self.fid_sim_igm_kF, - ) - - self.set_priors(fid_igm, prop_coeffs, type_priors=type_priors) - - self.models = { - "F_model": F_model, - "T_model": T_model, - "P_model": P_model, - } - - for key in self.models: - if self.models[key] is None: - if key == "F_model": - model = MeanFlux - elif key == "T_model": - model = Thermal - elif key == "P_model": - model = Pressure - - self.models[key] = model( - free_param_names=free_param_names, - fid_igm=fid_igm, - fid_vals=fid_vals, - prop_coeffs=prop_coeffs, - flat_priors=self.priors, - Gauss_priors=Gauss_priors, - ) - - def set_fid_igm(self, zs): - self.fid_igm = {} - self.fid_igm["z"] = zs - for key in self.models: - for key2 in self.models[key].list_coeffs: - if key2 == "tau_eff": - self.fid_igm[key] = self.models[key].get_tau_eff(zs) - elif key2 == "gamma": - self.fid_igm[key] = self.models[key].get_gamma(zs) - elif key2 == "sigT_kms": - self.fid_igm[key] = self.models[key].get_sigT_kms(zs) - elif key2 == "kF_kms": - self.fid_igm[key] = self.models[key].get_kF_kms(zs) - - def get_igm(self, sim_igm_mF=None, sim_igm_T=None, sim_igm_kF=None): - """Load IGM history""" - - repo = os.path.dirname(lace.__path__[0]) - fname = os.path.join( - repo, "data", "sim_suites", "Australia20", "IGM_histories.npy" - ) - try: - self.igm_hist_mpg = np.load(fname, allow_pickle=True).item() - except: - raise ValueError( - fname - + " not found. You can produce it using LaCE" - + r" script save_mpg_IGM.py" - ) - - # fname = os.path.join(os.environ["NYX_PATH"], "IGM_histories.npy") - # try: - # self.igm_hist_nyx = np.load(fname, allow_pickle=True).item() - # except: - # raise ValueError( - # fname - # + " not found. You can produce it using LaCE" - # + r" script save_nyx_IGM.py" - # ) - - sim_igms = [sim_igm_mF, sim_igm_T, sim_igm_kF] - - igms_return = {} - for ii, sim_igm in enumerate(sim_igms): # only keep MPG histories for now. - if sim_igm[:3] == "mpg": - igm_hist = self.igm_hist_mpg - else: - ValueError("sim_igm must be 'mpg'") - - if sim_igm not in igm_hist: - igm_return = igm_hist[sim_igm + "_0"] - else: - igm_return = igm_hist[sim_igm] - - if ii == 0: - igms_return["tau_eff_z"] = igm_return["z"] - igms_return["tau_eff"] = igm_return["tau_eff"] - igms_return["mF"] = igm_return["mF"] - igms_return["F_suite"] = sim_igm - elif ii == 1: - igms_return["sigT_kms_z"] = igm_return["z"] - igms_return["sigT_kms"] = igm_return["sigT_kms"] - igms_return["sigT_Mpc"] = igm_return["sigT_Mpc"] - igms_return["gamma_z"] = igm_return["z"] - igms_return["gamma"] = igm_return["gamma"] - igms_return["T_suite"] = sim_igm - elif ii == 2: - igms_return["kF_kms_z"] = igm_return["z"] - igms_return["kF_kms"] = igm_return["kF_kms"] - igms_return["kF_Mpc"] = igm_return["kF_Mpc"] - igms_return["P_suite"] = sim_igm - - # important for nyx simulations, not all have kF - # if so, we assign the values for nyx_central - if np.sum(igm_return["kF_kms"] != 0) == 0: - igms_return["kF_kms_z"] = igm_hist["nyx_central"]["z"] - igms_return["kF_Mpc"] = igm_hist["nyx_central"]["kF_Mpc"] - igms_return["kF_kms"] = igm_hist["nyx_central"]["kF_kms"] - igms_return["P_suite"] = "nyx_central" - - return igms_return - - def set_priors(self, fid_igm, prop_coeffs, type_priors="hc", z_pivot=3): - """Set priors for all IGM models - - This is only important for giving the minimizer and the sampler a uniform - prior that it is not too broad. The metric below takes care of the real priors - """ - - if type_priors == "hc": - percent = 95 - elif type_priors == "data": - percent = 68 - else: - raise ValueError("type_priors must be 'hc' or 'data'") - - self.priors = {} - for par in fid_igm: - if (par == "val_scaling") | ( - par.endswith("_z") | par.endswith("_suite") - ): - continue - - if (par == "mF") | (par == "tau_eff"): - z = fid_igm["tau_eff_z"] - otype = prop_coeffs["tau_eff_otype"] - emu_suite = fid_igm["F_suite"] - elif (par == "kF_Mpc") | (par == "kF_kms"): - z = fid_igm["kF_kms_z"] - otype = prop_coeffs["kF_kms_otype"] - emu_suite = fid_igm["P_suite"] - elif par == "gamma": - z = fid_igm["sigT_kms_z"] - elif (par == "sigT_Mpc") | (par == "sigT_kms"): - z = fid_igm["sigT_kms_z"] - otype = prop_coeffs["sigT_kms_otype"] - emu_suite = fid_igm["T_suite"] - - if emu_suite.startswith("mpg"): - all_igm = self.igm_hist_mpg - elif emu_suite.startswith("nyx"): - all_igm = self.igm_hist_nyx - else: - ValueError("sim_igm must be 'mpg' or 'nyx'") - - res_div = np.zeros((len(all_igm), 2)) - for ii, sim in enumerate(all_igm): - if (sim in ["accel2"]) | (np.char.isnumeric(sim[-1]) == False): - continue - - string_split = sim.split("_") - sim_label = string_split[0] + "_" + string_split[1] - if is_number_string(sim_label[-1]) == False: - continue - - try: - _ = np.argwhere( - np.isfinite(fid_igm[par]) - & (fid_igm[par] != 0) - & (all_igm[sim][par] != 0) - )[:, 0] - except: - continue - if len(_) == 0: - continue - - res_div[ii, 0] = np.max(all_igm[sim][par][_] / fid_igm[par][_]) - res_div[ii, 1] = np.min(all_igm[sim][par][_] / fid_igm[par][_]) - - _ = np.argwhere( - np.isfinite(res_div[:, 0]) - & (res_div[:, 0] != 0) - & (np.abs(res_div[:, 0]) != 1) - )[:, 0] - if len(_) == 0: - print("no good points for ", par) - self.priors[par] = [[-1, 1], [-1, 1]] - continue - - if otype == "exp": - y0_max = np.abs( - np.log(np.percentile(np.abs(res_div[_, 0]), percent)) - ) - elif otype == "const": - y0_max = np.percentile(res_div[_, 0], percent) - else: - raise ValueError("otype must be 'exp' or 'const'", par) - - _ = np.argwhere( - np.isfinite(res_div[:, 1]) - & (res_div[:, 1] != 0) - & (np.abs(res_div[:, 1]) != 1) - )[:, 0] - if len(_) == 0: - print("no good points for ", par) - self.priors[par] = [[-1, 1], [-1, 1]] - continue - - if otype == "exp": - y0_min = np.abs( - np.log(np.percentile(1 / np.abs(res_div[_, 1]), percent)) - ) - elif otype == "const": - y0_min = np.percentile(res_div[_, 1], 100 - percent) - - y0_cen = 0.5 * (y0_max + y0_min) - if otype == "exp": - y1 = y0_cen / np.log((1 + z.max()) / (1 + z_pivot)) - elif otype == "const": - y1 = y0_cen / ((1 + z.max()) / (1 + z_pivot)) - - self.priors[par] = [ - [-y1 * 2, y1 * 2], - [-y0_min * 1.05, y0_max * 1.05], - ] - - # def set_metric(self, emu_igm_params, tol_factor=95): - # # get all individual points separately - - # all_points = {} - # for par in emu_igm_params: - # if par not in ["Delta2_p", "n_p", "alpha_p"]: - # all_points[par] = [] - - # for key in self.all_igm: - # if key[4].isdigit(): - # # distance between tau scalings for mpg is too small - # if (key[:3] == "mpg") and (key[-1] != "0"): - # continue - # for par in all_points: - # ind_use = np.argwhere(self.all_igm[key][par] != 0)[:, 0] - # all_points[par].append(self.all_igm[key][par][ind_use]) - - # for key in all_points: - # all_points[key] = np.concatenate(all_points[key]) - - # # compute the maximum distance between training points - # min_dist = {} - - # # get closest point to each IGM point - # for key in all_points: - # npoints = all_points[key].shape[0] - # min_dist[key] = np.zeros(npoints) - # for ii in range(npoints): - # dist = np.abs(all_points[key][ii] - all_points[key]) - # _ = dist != 0 - # min_dist[key][ii] = dist[_].min() - - # # get most distant of closest points - # max_dist = {} - # for key in min_dist: - # max_dist[key] = min_dist[key].max() - - # # define function to get normalizer distance from new points - # def metric_par(p0): - # dist = ( - # ((p0["mF"] - all_points["mF"]) / max_dist["mF"]) ** 2 - # + ( - # (p0["sigT_Mpc"] - all_points["sigT_Mpc"]) - # / max_dist["sigT_Mpc"] - # ) - # ** 2 - # + ((p0["gamma"] - all_points["gamma"]) / max_dist["gamma"]) ** 2 - # + ((p0["kF_Mpc"] - all_points["kF_Mpc"]) / max_dist["kF_Mpc"]) - # ** 2 - # ) - # return np.sqrt(dist) - - # # find maximum normalized distance between training points - # dist_norm = np.zeros(npoints) - - # for ii in range(npoints): - # p0 = {} - # for key in all_points: - # p0[key] = all_points[key][ii] - # res = metric_par(p0) - # _ = res != 0 - # dist_norm[ii] = res[_].min() - - # # max_dist_norm = dist_norm.max() * tol_factor - # max_dist_norm = np.percentile(dist_norm, tol_factor) - - # def metric_par(p0): - # dist = ( - # ((p0["mF"] - all_points["mF"]) / max_dist["mF"]) ** 2 - # + ( - # (p0["sigT_Mpc"] - all_points["sigT_Mpc"]) - # / max_dist["sigT_Mpc"] - # ) - # ** 2 - # + ((p0["gamma"] - all_points["gamma"]) / max_dist["gamma"]) ** 2 - # + ((p0["kF_Mpc"] - all_points["kF_Mpc"]) / max_dist["kF_Mpc"]) - # ** 2 - # ) - # return np.sqrt(dist.min()) / max_dist_norm - - # return metric_par diff --git a/cupix/likelihood/model_lya.py b/cupix/likelihood/model_lya.py new file mode 100644 index 0000000..fc99162 --- /dev/null +++ b/cupix/likelihood/model_lya.py @@ -0,0 +1,156 @@ +import copy +import numpy as np + +# our modules below +from lace.cosmo.thermal_broadening import thermal_broadening_kms +import forestflow +from forestflow.P3D_cINN import P3DEmulator + + + +class LyaModel(object): + """Help the theory with pure / clean Lya P3D. + It can work with IGM parameters, or directly with Lya parameters. + """ + + def __init__(self, z, config={'verbose':False}): + """Create object from dictionary""" + self.z = z + self.lr_lya = 1215.67 # could be read from elsewhere + self._setup_from_config(config) + return + + + def _setup_from_config(self, config): + """Setup from a dictionary""" + + self.verbose = config.get('verbose', False) + if self.verbose: print('LyaModel::setup_from_config') + + # setup default values for parameters + self.default_lya_model = config.get('default_lya_model', 'best_fit_arinyo_from_p1d') + if 'igm' in self.default_lya_model: + # default values of mF, T0, gamma and kF_kms + self.default_igm_params = self.get_default_igm_params(config) + self.default_lya_params = None + # setup emulator + emulator_label = config.get('emulator_label', 'forest_mpg') + Nrealizations = config.get('Nrealizations', 3000) + self.emulator = self.get_emulator(emulator_label, Nrealizations) + else: + # default values of Lya params (bias, beta, arinyo) + self.default_lya_params = self.get_default_lya_params(config) + self.default_igm_params = None + self.emulator = None + + return + + + def get_default_igm_params(self, config): + # here we should get the default values based on default_lya_model string and z + print('FINISH get_default_igm_params') + igm_params = {'mF': 0.8, 'T0': 1e4, 'gamma': 1.6, 'kF_kms': 0.1} + print('use config dictorionary to update values') + return igm_params + + + def get_default_lya_params(self, config): + if self.verbose: print('LyaModel::get_default_lya_params') + # here we should get the default values based on default_lya_model string and z + print('FINISH get_default_lya_params') + lya_params = {'bias': -0.12, 'beta': 1.5} + lya_params['q1'] = 0.5 + lya_params['q2'] = 0.0 + lya_params['av'] = 0.3 + lya_params['bv'] = 1.5 + # these are in Mpc units + lya_params['kv_Mpc'] = 0.2 + lya_params['kp_Mpc'] = 10.0 + # update parameters if present in config + for par in lya_params: + if par in config: + lya_params[par] = config[par] + + return lya_params + + + def get_emulator(self, emulator_label, Nrealizations): + """Setup the ForestFlow emulator""" + + if emulator_label == "forest_mpg": + path_program = forestflow.__path__[0][:-10] + emulator = P3DEmulator( + model_path=path_program+"/data/emulator_models/forest_mpg", #new_emu + Nrealizations=Nrealizations + ) + else: + raise ValueError("implement emulator_label", emulator_label) + + return emulator + + + def get_lya_params(self, cosmo, params): + """Get the complete list of lya parameters (bias, beta, arinyo) + from defaults and input params, potentially using the emulator""" + + # check whether you are working with IGM parameters (and emulator) + if self.default_igm_params is not None: + assert self.emulator is not None, "need emulator for IGM params" + # updated IGM params + igm_params = copy.deepcopy(self.default_igm_params) + for key in igm_params: + if key in params: + igm_params[key] = params[key] + # get Lya params from IGM params with emulator + lya_params = self.emulate_lya_params(cosmo, igm_params) + # here we could look for Lya params also in input params... not sure + + else: + lya_params = copy.deepcopy(self.default_lya_params) + # update Lya params + for key in lya_params: + if key in params: + lya_params[key] = params[key] + + return lya_params + + + def emulate_lya_params(self, cosmo, igm_params): + """Use emulator to translate IGM params and cosmo to Lya params""" + + # emu params include igm and cosmo params + emu_params = {} + emu_params['mF'] = igm_params['mF'] + emu_params['gamma'] = igm_params['gamma'] + + # these igm params are not in the correct units for the emulator + dkms_dMpc = cosmo.get_dkms_dMpc(self.z) + sigT_kms = thermal_broadening_kms(igm_params['T0']) + emu_params['sigT_Mpc'] = sigT_kms / dkms_dMpc + emu_params['kF_Mpc'] = igm_params['kF_kms'] * dkms_dMpc + + # amplitude and slope of linear power at kp = 0.7 1/Mpc + #kp_Mpc = self.emulator.kp_Mpc + kp_Mpc = 0.7 + linP_params = cosmo.get_linP_Mpc_params(z=self.z, kp_Mpc=kp_Mpc) + emu_params['Delta2_p'] = linP_params['Delta2_p'] + emu_params['n_p'] = linP_params['n_p'] + + # use emulator to estimate Lya params + lya_params = self.emulator.predict_Arinyos(emu_params=emu_params) + if self.verbose: + print('igm params', igm_params) + print('emu params', emu_params) + print('lya params', lya_params) + + # we use slightly different names in cupix + lya_params['kp_Mpc'] = lya_params.pop('kp') + kvav = lya_params.pop('kvav') + av = lya_params['av'] + # kvav = kv^av + lya_params['kv_Mpc'] = np.exp( np.log(kvav) / av ) + if self.verbose: + print('updated lya params', lya_params) + + return lya_params + diff --git a/cupix/likelihood/model_systematics.py b/cupix/likelihood/model_systematics.py deleted file mode 100644 index 58583e1..0000000 --- a/cupix/likelihood/model_systematics.py +++ /dev/null @@ -1,59 +0,0 @@ -import numpy as np - -from cupix.nuisance.resolution_model import Resolution_Model -from cupix.nuisance.resolution_model_chunks import Resolution_Model_Chunks - - -class Systematics(object): - """Contains all IGM models""" - - def __init__( - self, free_param_names=None, resolution_model=None, pars_syst=None - ): - if "Gauss_priors" in pars_syst: - Gauss_priors = pars_syst["Gauss_priors"] - else: - Gauss_priors = None - # setup Resolution model - if resolution_model: - self.resolution_model = resolution_model - else: - if pars_syst["res_model_type"] == "pivot": - self.resolution_model = Resolution_Model( - free_param_names=free_param_names, - fid_R_coeff=pars_syst["R_coeff"], - Gauss_priors=Gauss_priors, - ) - elif pars_syst["res_model_type"] == "chunks": - self.resolution_model = Resolution_Model_Chunks( - free_param_names=free_param_names, - Gauss_priors=Gauss_priors, - ) - else: - raise ValueError( - "resolution_model_type must be 'pivot' or 'chunks'" - ) - - # def get_dict_cont(self): - # dict_out = {} - - # if self.args.fid_syst["res_model_type"] == "pivot": - # for ii in range(len(self.args.fid_syst["R_coeff"])): - # dict_out["R_coeff_" + str(ii)] = self.args.fid_syst["R_coeff"][ - # -1 - ii - # ] - # else: - # for ii in range(self.resolution_model.get_Nparam()): - # dict_out["R_coeff_" + str(ii)] = 0 - - # return dict_out - - def get_contamination(self, z, k_kms, like_params=[]): - # include multiplicative resolution correction - cont_resolution = self.resolution_model.get_contamination( - z=z, k_kms=k_kms, like_params=like_params - ) - - # print("cont_resolution", cont_resolution) - - return cont_resolution diff --git a/cupix/likelihood/new_likelihood.py b/cupix/likelihood/new_likelihood.py new file mode 100644 index 0000000..d1c9e7a --- /dev/null +++ b/cupix/likelihood/new_likelihood.py @@ -0,0 +1,115 @@ +import numpy as np +import math +from cupix.likelihood.window_and_rebin import convolve_window, rebin_theta + +class Likelihood(object): + """Likelihood class, holds data, theory, and knows about parameters""" + + def __init__( + self, + data, + theory, + iz, + verbose=False, + ): + """Setup likelihood from theory and data. Options: + - data (required) is the data to model + - theory (required) instance of lya_theory + - iz is the index of the redshift bin to use + """ + + self.verbose = verbose + self.data = data + self.iz = iz + self.theory = theory + assert self.data.z[iz] == self.theory.z, "inconsistent redshifts" + if self.verbose: + print("Likelihood will evaluate redshift bin", self.iz, "corresponds to z =", self.theory.z) + + + def get_convolved_px(self, params={}): + # array with discrete k values (inverse Angstroms) + k_m = self.data.k_m[self.iz] + + # array with centers of the original theta bins (in arcmin) + theta_a = (self.data.theta_min_a_arcmin + self.data.theta_max_a_arcmin)/2. + + # evaluate theory at these values (no rebinning, no convolution) + Px_Zam = self.theory.get_px_obs(theta_arc=theta_a, k_AA=k_m, params=params) + + # number of rebinned k_M and theta_A bins + N_M = len(self.data.k_M_edges[self.iz]) - 1 + N_A = len(self.data.theta_max_A_arcmin) + if self.verbose: + print('Working with {} k bins, and {} theta bins'.format(N_M, N_A)) + + # window convolution + # loop through the large-theta bins from the data + Px_ZAM_all = [] + for it_A in range(N_A): + # get the small-theta indices in this coarse-theta bin + ind_in_theta = self.data.B_A_a.astype(bool)[it_A,:] # generally this could be different with redshift; assume it's not for now + theta_a_inds = np.where(ind_in_theta)[0] + # collect Px and V from each narrow bin + Px_ZaM_all = np.zeros((len(theta_a_inds), N_M)) + V_ZaM_all = np.zeros((len(theta_a_inds), N_M)) + for save_index, a in enumerate(theta_a_inds): + # retrieve the window matrix for this small-theta bin + if self.data.U_ZaMn is not None: + U_ZaMn = self.data.U_ZaMn[self.iz, a] + # print("U_ZaMn shape", U_ZaMn.shape) + # print("Px_ZaM shape to be convolved", Px_Zam[a].T.shape) + Px_ZaM = convolve_window(U_ZaMn, Px_Zam[a].T) + Px_ZaM_all[save_index,:] = Px_ZaM + V_ZaM = self.data.V_ZaM[self.iz, a] + V_ZaM_all[save_index,:] = V_ZaM + # rebin in theta + Px_ZAM = rebin_theta(V_ZaM_all, Px_ZaM_all) + Px_ZAM_all.append(Px_ZAM) + + return np.asarray(Px_ZAM_all) + + + def get_chi2(self, params={}, return_info=False): + log_like, info = self._compute_log_like(params) + chi2 = -2.0 * log_like + if return_info: + return chi2, info + else: + return chi2 + + + def get_log_like(self, params={}, return_info=False): + log_like, info = self._compute_log_like(params=params) + if return_info: + return log_like, info + else: + return log_like + + + def _compute_log_like(self, params={}): + + # get model prediction, including convolution + model_px = self.get_convolved_px(params=params) + if self.verbose: + print('shape model_px', model_px.shape) + N_A, N_M = model_px.shape + + # compute log like contributions from each theta bin + log_like_all = np.zeros(N_A) + for it_A in range(N_A): + det_cov = np.linalg.det(self.data.cov_ZAM[self.iz, it_A,:,:]) + if det_cov == 0: + print("Det(cov) appears to be 0: could be a singular covariance matrix!") + # compute chi2 for this theta bin + icov_ZAM = np.linalg.inv(self.data.cov_ZAM[self.iz, it_A,:,:]) + data_A = self.data.Px_ZAM[self.iz, it_A,:] + diff = np.squeeze(data_A - model_px[it_A]) + chi2 = np.dot(np.dot(np.squeeze(icov_ZAM), diff), diff) + log_like_all[it_A] = -0.5*chi2 + + log_like = np.sum(log_like_all) + info = {'log_like_all': log_like_all} + + return log_like, info + diff --git a/cupix/likelihood/new_minimizer.py b/cupix/likelihood/new_minimizer.py new file mode 100644 index 0000000..dfa83fe --- /dev/null +++ b/cupix/likelihood/new_minimizer.py @@ -0,0 +1,311 @@ +import numpy as np +import matplotlib.pyplot as plt +from iminuit import Minuit +import copy +from cupix.likelihood.likelihood_parameter import par_index, LikelihoodParameter, like_parameter_by_name + + +class IminuitMinimizer(object): + """Wrapper around an iminuit minimizer for Lyman alpha likelihood""" + + def __init__(self, like, like_params, free_param_names, error=0.02, verbose=False): + """Setup minimizer from likelihood.""" + + self.verbose = verbose + self.like = like + self.free_param_names = free_param_names + self.like_params = like_params + self.out_like_params = None + free_params = [] + for par in self.free_param_names: + for lpar in self.like_params: + if lpar.name == par: + free_params.append(lpar) + assert len(self.free_param_names)==len(free_params), "Couldn't find all desired free parameters in like_params" + if self.verbose: + print("Free params are", free_params) + + # set initial values for the parameters + ini_values = np.full(len(self.free_param_names), 0.5) + for i,par in enumerate(free_params): + if par.ini_value is not None: + ini_values[i] = par.get_value_in_cube(par.ini_value) + if self.verbose: + print("Setting ini_values (in cube) to:", ini_values) + + # iminuit class itself + self.minimizer = Minuit(self.minus_log_prob_interface, ini_values, name=self.free_param_names) + + # parameter limits + for i,parname in enumerate(self.free_param_names): + self.minimizer.limits[parname] = (0.0, 1.0) + if self.verbose: + print("Set the iMinuit params to:\n", self.minimizer.params) + + # set errordef=0.5 if using log-likelihood + self.minimizer.errordef = 0.5 + + # error only used to set initial parameter step + self.minimizer.errors = error + + + def minus_log_prob_interface(self, values): + # a log like function that ingests the likelihood log_like function + # but instead of taking parameter dictionaries or LikelihoodParamter objects, + # takes a list of parameter values in the unit cube, and uses like.free_params information to transform them to physical values before passing to the likelihood log_like function + # this will be the function that is passed to the minimizer, and will be called by the minimizer with different parameter values in the unit cube + + Np = len(self.free_param_names) + assert len(values) == Np, "Inconsistent number of free parameters" + + # dictionary of parameters that will be passed to the likelihood class + params = {} + for ip in range(Np): + name = self.free_param_names[ip] + par = self.like_params[ip] + par_value = par.value_from_cube(values[ip]) + params[name] = par_value + if self.verbose: + print('params', params) + + # ask likelihood to evalute log-like + log_like = self.like.get_log_like(params=params) + + # here we could add priors eventually + minus_log_prob = - log_like - 0.0 + + return minus_log_prob + + + def minimize(self, compute_hesse=True): + """Run migrad optimizer, and optionally compute Hessian matrix""" + + if self.verbose: + print("will run migrad") + self.minimizer.print_level = 0 + self.minimizer.migrad() + + if compute_hesse: + if self.verbose: + print("will compute Hessian matrix") + self.minimizer.hesse() + + return + + + def plot_best_fit(self, multiply_by_k=True, every_other_theta=False, show=True, theorylabel=None, datalabel=None, plot_fname=None, ylim=None, xlim=None, ylim2=None, title=None, residual_to_theory=False): + """Plot best-fit P1D vs data.""" + + # get best-fit values from minimizer (should check that it was run) + best_fit_values = np.array(self.minimizer.values) + + like_params_to_plot = copy.deepcopy(self.like_params) + for i, lp in enumerate(like_params_to_plot): + if lp.name in self.free_param_names: + + + index = self.free_param_names.index(lp.name) + lp.value = lp.value_from_cube(best_fit_values[index]) + if self.verbose: + print("best-fit value for", lp.name, "is", lp.value) + + self.like.plot_px( + like_params=like_params_to_plot, + every_other_theta=every_other_theta, + multiply_by_k=multiply_by_k, + xlim=xlim, + ylim=ylim, + show=show, + theorylabel=theorylabel, + datalabel=datalabel, + plot_fname=plot_fname, + ylim2=ylim2, + title=title, + residual_to_theory=residual_to_theory + ) + + return + + + def best_fit_value(self, pname, return_hesse=False): + """Return best-fit value for pname parameter (assuming it was run). + - return_hess: set to true to return also Gaussian error""" + + # get best-fit values from minimizer (in unit cube) + cube_values = np.array(self.minimizer.values) + if self.verbose: + print("cube values =", cube_values) + + # get index for this parameter, and normalize value + ipar = self.free_param_names.index(pname) + par = like_parameter_by_name(self.like_params,pname) + par_value = par.value_from_cube(cube_values[ipar]) + + # check if you were asked for errors as well + if return_hesse: + cube_errors = self.minimizer.errors + par_error = cube_errors[ipar] * (par.max_value - par.min_value) + return par_value, par_error + else: + return par_value + + + def set_bestfit_like_params(self): + print("Assuming minimizer already run.") + cube_values = np.array(self.minimizer.values) + like_params_temp = [] # list of LikelihoodParameters objects that will get passed through the likelihood functions + for i,val in enumerate(cube_values): + parname = self.free_param_names[i] + par_i = par_index(self.like_params, parname) + like_param = self.like_params[par_i] + # copy over all but update the current values + like_params_temp.append( + LikelihoodParameter( + name=like_param.name, + min_value=like_param.min_value, + max_value=like_param.max_value, + ini_value=like_param.ini_value, + value=like_param.value_from_cube(val) + )) + self.out_like_params = like_params_temp + + + def fit_probability(self): + if self.out_like_params is None: + self.set_bestfit_like_params() + return self.like.fit_probability(self.out_like_params, n_free_p=len(self.free_param_names)) + + + def chi2(self): + if self.out_like_params is None: + self.set_bestfit_like_params() + return self.like.get_chi2(self.out_like_params) + + + def plot_ellipses(self, pname_x, pname_y, nsig=2, cube_values=False, true_vals=None, true_val_label="true value", xrange=None, yrange=None): + """Plot Gaussian contours for parameters (pname_x,pname_y) + - nsig: number of sigma contours to plot + - cube_values: if True, will use unit cube values.""" + + from matplotlib.patches import Ellipse + from numpy import linalg as LA + + # figure out order of parameters in free parameters list + ix = self.free_param_names.index(pname_x) + iy = self.free_param_names.index(pname_y) + + # find out best-fit values, errors and covariance for parameters + val_x = self.minimizer.values[ix] + val_y = self.minimizer.values[iy] + sig_x = self.minimizer.errors[ix] + sig_y = self.minimizer.errors[iy] + r = self.minimizer.covariance[ix, iy] / sig_x / sig_y + + # rescale from cube values (unless asked not to) + if not cube_values: + par_x = like_parameter_by_name(self.like_params, pname_x) + val_x = par_x.value_from_cube(val_x) + sig_x = sig_x * (par_x.max_value - par_x.min_value) + par_y = like_parameter_by_name(self.like_params, pname_y) + val_y = par_y.value_from_cube(val_y) + sig_y = sig_y * (par_y.max_value - par_y.min_value) + + # shape of ellipse from eigenvalue decomposition of covariance + w, v = LA.eig( + np.array( + [ + [sig_x**2, sig_x * sig_y * r], + [sig_x * sig_y * r, sig_y**2], + ] + ) + ) + + # semi-major and semi-minor axis of ellipse + a = np.sqrt(w[0]) + b = np.sqrt(w[1]) + print("a, b", a, b) + # figure out inclination angle of ellipse + alpha = np.arccos(v[0, 0]) + if v[1, 0] < 0: + alpha = -alpha + # compute angle in degrees (expected by matplotlib) + alpha_deg = alpha * 180 / np.pi + + # make plot + fig = plt.subplot(111) + for isig in range(1, nsig + 1): + ell = Ellipse( + (val_x, val_y), 2 * isig * a, 2 * isig * b, angle=alpha_deg + ) + ell.set_alpha(0.6 / isig) + fig.add_artist(ell) + # plot a marker at the central value + plt.plot(val_x, val_y, "ro", label="best fit") + if true_vals is not None: + plt.axvline(true_vals[pname_x], color='grey', linestyle='--', label=true_val_label) + plt.axhline(true_vals[pname_y], color='grey', linestyle='--') + + plt.xlabel(pname_x) + plt.ylabel(pname_y) + if xrange==None or yrange==None: + if true_vals is None: + plt.xlim(val_x - (nsig + 1) * sig_x, val_x + (nsig + 1) * sig_x) + plt.ylim(val_y - (nsig + 1) * sig_y, val_y + (nsig + 1) * sig_y) + else: + minx = min(val_x - (nsig + 1) * sig_x, true_vals[pname_x]-.1*abs(true_vals[pname_x])) + maxx = max(val_x + (nsig + 1) * sig_x, true_vals[pname_x]+.1*abs(true_vals[pname_x])) + miny = min(val_y - (nsig + 1) * sig_y, true_vals[pname_y]-.1*abs(true_vals[pname_y])) + maxy = max(val_y + (nsig + 1) * sig_y, true_vals[pname_y]+.1*abs(true_vals[pname_y])) + plt.ylim([miny,maxy]) + plt.xlim([minx,maxx]) + else: + plt.ylim(yrange) + plt.xlim(xrange) + plt.axvline(like_parameter_by_name(self.like_params, pname_x).ini_value, color='orange', linestyle='dotted', label='ini value') + plt.axhline(like_parameter_by_name(self.like_params, pname_y).ini_value, color='orange', linestyle='dotted') + plt.legend() + + + def results_dict_2par(self): + """Return dictionary with best-fit results and covariance matrix.""" + results_dict = {} + for parname in self.like.free_param_names: + bestfit, err = self.best_fit_value(parname, return_hesse=True) + results_dict[parname] = bestfit + results_dict[parname+'_err'] = err + covariance = self.minimizer.covariance + results_dict['cov'] = covariance + + ## save the following for the sake of plotting ellipses: + ix = self.like.index_by_name(self.like.free_param_names[0]) + iy = self.like.index_by_name(self.like.free_param_names[1]) + + # find out best-fit values, errors and covariance for parameters + sig_x = self.minimizer.errors[ix] + sig_y = self.minimizer.errors[iy] + r = self.minimizer.covariance[ix, iy] / sig_x / sig_y + results_dict['r']=r + results_dict['par_x']=parname[0] + results_dict['par_y']=parname[1] + prob = self.fit_probability() + results_dict['prob'] = prob + chi2 = self.get_chi2() + results_dict['chi2'] = chi2 + return results_dict + +def save_analysis_npz(results, filename="analysis_results.npz"): + """ + results: list or dict of per-analysis dictionaries + """ + out = {} + + if isinstance(results, list): + for i, r in enumerate(results): + out[f'analysis-{i}'] = r + else: # dict + for k, r in results.items(): + out[str(k)] = r + + # Save each dict as an object + np.savez(filename, **out, allow_pickle=True) + diff --git a/cupix/likelihood/theory.py b/cupix/likelihood/theory.py new file mode 100644 index 0000000..e4f60f6 --- /dev/null +++ b/cupix/likelihood/theory.py @@ -0,0 +1,533 @@ +import copy +import numpy as np + +# our modules below +from lace.cosmo import cosmology, rescale_cosmology +from cupix.likelihood.likelihood_parameter import dict_from_likeparam +from cupix.likelihood.model_lya import LyaModel +from cupix.likelihood.model_contaminants import ContaminantsModel + + + +class Theory(object): + """Likelihood will ask this object for Px predictions""" + + def __init__(self, z, fid_cosmo=None, config={'verbose':False}): + """Setup object to compute predictions for the 1D power spectrum. + Inputs: + - z: redshift of the Lya bin + - fid_cosmo: fiducial Cosmology object, used for unit conversions, setting the linear power spectrum, etc + - config: dictionary with other settinsg. + """ + + self.verbose = config.get('verbose', False) + self.z = z + # this is only needed temporarily while working with old likelihood + self.zs = z + + # this could also be specified from the config + if fid_cosmo is None: + self.fid_cosmo = cosmology.Cosmology() + else: + self.fid_cosmo = fid_cosmo + + # setup LyaModel to compute clean P3D at z + self.lya_model = LyaModel(z, config) + + # setup model for systematics / contaminants at z + self.cont_model = ContaminantsModel(z, config) + + # whether to model the different contaminants + self.include_hcd = config.get('include_hcd', False) + self.include_metal = config.get('include_metal', False) + self.include_sky = config.get('include_sky', False) + self.include_continuum = config.get('include_continuum', False) + + return + + + def get_cosmology(self, cosmo=None, params={}): + """If cosmo is not None, return cosmo. + If cosmo is None, use params to compute cosmology""" + if cosmo is None: + # figure out whether we can recycle the fiducial cosmology + if self.fid_cosmo.same_background(cosmo_params=params): + cosmo = rescale_cosmology.RescaledCosmology(fid_cosmo=self.fid_cosmo, + new_params_dict=params) + if self.verbose: print("recycle transfer function") + else: + if self.verbose: print("create new CAMB cosmology") + cosmo = cosmology.Cosmology(cosmo_params_dict=params) + else: + if self.verbose: print('cosmology already provided') + + return cosmo + + + # currently needed by the old likelihood class, keeping the format as is + def get_px_AA(self, + k_AA, + theta_arcmin, + zs=None, + like_params={}, + return_arinyo_coeffs=False, + verbose=None + ): + if verbose is None: + verbose = self.verbose + if verbose: + print('inside Theory::get_px_AA') + + # convert list of LikelihoodParameters to dictionary + params_dict = dict_from_likeparam(like_params) + if verbose: + print('params_dict', params_dict) + + assert zs == self.z, "Input redshift does not match one in theory" + + if self.verbose: + print('Theta bins (arcmin)', theta_arcmin) + + return self.get_px_obs(theta_arc=theta_arcmin, k_AA=k_AA, + params=params_dict) + + + def get_px_obs(self, theta_arc, k_AA, cosmo=None, params={}): + + # figure out the cosmology to use + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + + if self.include_hcd: + # ask for Px (Lya + HCD) + px_obs = self.get_px_lya_hcd_obs(theta_arc, k_AA, cosmo, params) + else: + # ask for Lya Px + px_obs = self.get_px_lya_obs(theta_arc, k_AA, cosmo, params) + + if self.include_metal: + # compute metals here (silicon auto, and silicon x lya) + px_metal_auto = self.get_px_metal_auto_obs(theta_arc, k_AA, cosmo, params) + px_metal_cross = self.get_px_metal_cross_obs(theta_arc, k_AA, cosmo, params) + px_obs += px_metal_auto + px_metal_cross + + if self.include_sky: + # compute contamination from sky residuals + px_sky = self.get_px_sky_obs(theta_arc, k_AA, cosmo, params) + px_obs += px_sky + + if self.include_continuum: + # compute multiplicative correction due to continuum fitting + cont_distortion = self.get_continuum_distortion_obs(k_AA, cosmo, params) + px_obs *= cont_distortion + + return px_obs + + + def get_px_lya_obs(self, theta_arc, k_AA, cosmo=None, params={}): + + # figure out the cosmology to use + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + + # unit conversions to Mpc (where theory lives) + darc_dMpc = cosmo.get_darc_dMpc(self.z) + lr_lya = self.lya_model.lr_lya + dAA_dMpc = cosmo.get_dAA_dMpc(self.z, lambda_rest_AA=lr_lya) + rt_Mpc = theta_arc / darc_dMpc + kp_Mpc = k_AA * dAA_dMpc + + # compute Px in Mpc + Px_Mpc = self.get_px_lya_Mpc(rt_Mpc, kp_Mpc, cosmo, params) + + # back to inverse Angstroms + Px_AA = Px_Mpc * dAA_dMpc + + return Px_AA + + + def get_px_lya_hcd_obs(self, theta_arc, k_AA, cosmo=None, params={}): + + # figure out the cosmology to use + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + + # unit conversions to Mpc (where theory lives) + darc_dMpc = cosmo.get_darc_dMpc(self.z) + lr_lya = self.lya_model.lr_lya + dAA_dMpc = cosmo.get_dAA_dMpc(self.z, lambda_rest_AA=lr_lya) + rt_Mpc = theta_arc / darc_dMpc + kp_Mpc = k_AA * dAA_dMpc + + # compute Px in Mpc + Px_Mpc = self.get_px_lya_hcd_Mpc(rt_Mpc, kp_Mpc, cosmo, params) + + # back to inverse Angstroms + Px_AA = Px_Mpc * dAA_dMpc + + return Px_AA + + + def get_z_metal_auto(self): + """Given Lya z bin, compute redshift to evaluated Silicon auto""" + + # Lya redshift to use + z_lya = self.z + lr_lya = self.lya_model.lr_lya + lr_metal = self.cont_model.lr_metal + z_metal = (1+z_lya) * lr_lya / lr_metal - 1 + if self.verbose and False: + print('z_lya =',z_lya) + print('z_metal =',z_metal) + + return z_metal + + + def get_z_metal_cross(self): + """Given Lya z bin, compute redshift to evaluated Silicon x Lya""" + + # Lya redshift to use + z_lya = self.z + lr_lya = self.lya_model.lr_lya + lr_metal = self.cont_model.lr_metal + z_metal = (1+z_lya) * lr_lya / lr_metal - 1 + z_cross = np.sqrt((1+z_lya)*(1+z_metal)) - 1 + if self.verbose and False: + print('z_lya =',z_lya) + print('z_metal =',z_metal) + print('z_cross =',z_cross) + + return z_cross + + + def get_px_metal_auto_obs(self, theta_arc, k_AA, cosmo=None, params={}): + + # figure out the cosmology to use + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + + # redshift to use (not the same as the Lya z) + z_metal_auto = self.get_z_metal_auto() + + # unit conversions to Mpc (where theory lives) + darc_dMpc = cosmo.get_darc_dMpc(z_metal_auto) + lr_metal = self.cont_model.lr_metal + dAA_dMpc = cosmo.get_dAA_dMpc(z_metal_auto, lambda_rest_AA=lr_metal) + rt_Mpc = theta_arc / darc_dMpc + kp_Mpc = k_AA * dAA_dMpc + + # compute Px in Mpc + Px_Mpc = self.get_px_metal_auto_Mpc(rt_Mpc, kp_Mpc, cosmo, params) + + # back to inverse Angstroms + Px_AA = Px_Mpc * dAA_dMpc + + return Px_AA + + + def get_px_metal_cross_obs(self, theta_arc, k_AA, cosmo=None, params={}): + + # figure out the cosmology to use + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + + # redshift to use (not the same as the Lya z) + z_metal_cross = self.get_z_metal_cross() + + # unit conversions to Mpc (where theory lives) + darc_dMpc = cosmo.get_darc_dMpc(z_metal_cross) + lr_metal = self.cont_model.lr_metal + lr_lya = self.lya_model.lr_lya + lr_cross = np.sqrt(lr_metal*lr_lya) + dAA_dMpc = cosmo.get_dAA_dMpc(z_metal_cross, lambda_rest_AA=lr_cross) + rt_Mpc = theta_arc / darc_dMpc + kp_Mpc = k_AA * dAA_dMpc + + # compute Px in Mpc + Px_Mpc = self.get_px_metal_cross_Mpc(rt_Mpc, kp_Mpc, cosmo, params) + + # back to inverse Angstroms + Px_AA = Px_Mpc * dAA_dMpc + + return Px_AA + + + def _compute_px_from_p3d(self, rt_Mpc, kp_Mpc, p3d_func_kmu, kt_Mpc_max): + + # trying to recycle existing Px functionality in ForestFlow + from forestflow import pcross + from forestflow.model_p3d_arinyo import coordinates + @coordinates("k_mu") + def dummy_p3d_func_kmu(dummy, k, mu, ari_pp=None, new_cosmo_params=None): + return p3d_func_kmu(k, mu) + return pcross.Px_Mpc_detailed( + z=123456789, + kpar_iMpc=kp_Mpc, + rperp_Mpc=rt_Mpc, + p3d_fun_Mpc=dummy_p3d_func_kmu, + p3d_params={'dummy':123456789}, + max_k_for_p3d=kt_Mpc_max) + + + def _compute_lya_hcd_biases(self, kpar, lya_params, hcd_params): + """Compute scale-dependent bias for Lya + HCD""" + + b_a = lya_params['bias'] + beta_a = lya_params['beta'] + b_H = hcd_params['b_H'] + beta_H = hcd_params['beta_H'] + L_H_Mpc = hcd_params['L_H_Mpc'] + # F_hcd(kpar) + F_H = np.exp(-kpar*L_H_Mpc) + # total (scale-dependent) bias + b_T = b_a + b_H * F_H + b_beta_T = b_a * beta_a + b_H * beta_H * F_H + beta_T = b_beta_T / b_T + + return b_T, beta_T + + + def _compute_DNL_Arinyo(self, k, mu, linP, lya_params): + """Compute small-scales correction from Arinyo 2015""" + + # Model the small-scale correction (D_NL in Arinyo-i-Prats 2015) + delta2 = (1 / (2 * np.pi**2)) * k**3 * linP + nonlin = delta2 * (lya_params["q1"] + lya_params["q2"] * delta2) + vel = (k / lya_params["kv_Mpc"]) ** lya_params["av"] * mu ** lya_params["bv"] + press = (k / lya_params["kp_Mpc"]) ** 2 + D_NL = np.exp(nonlin * (1 - vel) - press) + + return D_NL + + + def get_p3d_lya_Mpc(self, k, mu, cosmo=None, params={}): + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + linP = cosmo.get_linP_Mpc(self.z, k) + lya_params = self.lya_model.get_lya_params(cosmo, params) + bias = lya_params['bias'] + beta = lya_params['beta'] + # large-scales power + p3d = bias**2 * (1 + beta * mu**2)**2 * linP + # DNL correction from Arinyo + DNL = self._compute_DNL_Arinyo(k, mu, linP, lya_params) + return p3d * DNL + + + def get_px_lya_Mpc(self, rt_Mpc, kp_Mpc, cosmo=None, params={}): + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + # function to be passed to compute Px + def p3d_func(k, mu): + return self.get_p3d_lya_Mpc(k, mu, cosmo, params) + + # maximum kt_Mpc to use (power should be 0 past that) + # could ask CAMB object, but pressure is doing this job for you + # kt_Mpc_max = 5 * lya_params['kp_Mpc'] + kt_Mpc_max = 200.0 + + return self._compute_px_from_p3d(rt_Mpc, kp_Mpc, p3d_func, kt_Mpc_max) + + + def get_p3d_lya_hcd_Mpc(self, k, mu, cosmo=None, params={}): + + # figure out cosmology to use from input + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + linP = cosmo.get_linP_Mpc(self.z, k) + + # get the complete list of lya parameters (bias, beta, arinyo) + # from defaults and input params, potentially using the emulator + lya_params = self.lya_model.get_lya_params(cosmo, params) + # get the complete list of hcd parameters (b_H, beta_H, L_H_Mpc) + hcd_params = self.cont_model.get_hcd_params(params) + + # scale-dependent bias for Lya + HCD + kpar = k*mu + b_T, beta_T = self._compute_lya_hcd_biases(kpar, lya_params, hcd_params) + # large-scales power + p3d = b_T**2 * (1 + beta_T * mu**2)**2 * linP + # DNL correction from Arinyo (probably should not apply to HCD...) + DNL = self._compute_DNL_Arinyo(k, mu, linP, lya_params) + return p3d * DNL + + + def get_px_lya_hcd_Mpc(self, rt_Mpc, kp_Mpc, cosmo=None, params={}): + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + # function to be passed to compute Px + def p3d_func(k, mu): + return self.get_p3d_lya_hcd_Mpc(k, mu, cosmo, params) + + # maximum kt_Mpc to use (power should be 0 past that) + # could ask CAMB object, but pressure is doing this job for you + # kt_Mpc_max = 5 * lya_params['kp_Mpc'] + kt_Mpc_max = 200.0 + + return self._compute_px_from_p3d(rt_Mpc, kp_Mpc, p3d_func, kt_Mpc_max) + + + def get_p3d_metal_auto_Mpc(self, k, mu, cosmo=None, params={}): + # evaluate linP at different z than Lya + z_metal_auto = self.get_z_metal_auto() + + # figure out cosmology to use from input + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + linP = cosmo.get_linP_Mpc(z_metal_auto, k) + + # get the complete list of metal parameters (b_X, beta_X) + metal_params = self.cont_model.get_metal_params(params) + bias = metal_params['b_X'] + beta = metal_params['beta_X'] + + # large-scales power + p3d = bias**2 * (1 + beta * mu**2)**2 * linP + + # suppress power on small scales (help with Hankl) + kp_Mpc = 10.0 + smooth = np.exp(-(k/kp_Mpc)**2) + + return p3d * smooth + + + def get_px_metal_auto_Mpc(self, rt_Mpc, kp_Mpc, cosmo=None, params={}): + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + # function to be passed to compute Px + def p3d_func(k, mu): + return self.get_p3d_metal_auto_Mpc(k, mu, cosmo, params) + + # maximum kt_Mpc to use (power should be 0 past that) + # could ask CAMB object, but pressure is doing this job for you + # kt_Mpc_max = 5 * lya_params['kp_Mpc'] + kt_Mpc_max = 200.0 + + return self._compute_px_from_p3d(rt_Mpc, kp_Mpc, p3d_func, kt_Mpc_max) + + + def get_p3d_metal_cross_Mpc(self, k, mu, cosmo=None, params={}): + # evaluate linP at different z than Lya + z_cross = self.get_z_metal_cross() + + # figure out cosmology to use from input + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + linP = cosmo.get_linP_Mpc(z_cross, k) + + # get the complete list of lya parameters (bias, beta, arinyo) + # from defaults and input params, potentially using the emulator + lya_params = self.lya_model.get_lya_params(cosmo, params) + b_a = lya_params['bias'] + beta_a = lya_params['beta'] + + # get the complete list of metal parameters (b_X, beta_X) + metal_params = self.cont_model.get_metal_params(params) + b_X = metal_params['b_X'] + beta_X = metal_params['beta_X'] + + # large-scales power + p3d = b_a * b_X * (1 + beta_a * mu**2) * (1 + beta_X * mu**2) * linP + + # suppress power on small scales (help with Hankl) + kp_Mpc = 10.0 + smooth = np.exp(-(k/kp_Mpc)**2) + + # scale of silicon oscillations (in log lambda) + lr_lya = self.lya_model.lr_lya + lr_metal = self.cont_model.lr_metal + dX_loglam = np.log(lr_lya / lr_metal) + # scale in observed Angstroms + lr_cross = np.sqrt(lr_lya * lr_metal) + dX_AA = dX_loglam * (1 + z_cross) * lr_cross + # translate to comoving Mpc + drp_Mpc = dX_AA / cosmo.get_dAA_dMpc(z_cross, lambda_rest_AA=lr_cross) + + # compute oscillations + kpar = k * mu + wiggles = 2 * np.cos(kpar * drp_Mpc) + + return p3d * smooth * wiggles + + + def get_px_metal_cross_Mpc(self, rt_Mpc, kp_Mpc, cosmo=None, params={}): + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + # function to be passed to compute Px + def p3d_func(k, mu): + return self.get_p3d_metal_cross_Mpc(k, mu, cosmo, params) + + # maximum kt_Mpc to use (power should be 0 past that) + # could ask CAMB object, but pressure is doing this job for you + # kt_Mpc_max = 5 * lya_params['kp_Mpc'] + kt_Mpc_max = 200.0 + + return self._compute_px_from_p3d(rt_Mpc, kp_Mpc, p3d_func, kt_Mpc_max) + + + def get_px_sky_obs(self, theta_arc, k_AA, cosmo=None, params={}): + + # figure out the cosmology to use + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + + # unit conversions to Mpc (where theory lives) + darc_dMpc = cosmo.get_darc_dMpc(self.z) + lr_lya = self.lya_model.lr_lya + dAA_dMpc = cosmo.get_dAA_dMpc(self.z, lambda_rest_AA=lr_lya) + rt_Mpc = theta_arc / darc_dMpc + kp_Mpc = k_AA * dAA_dMpc + + # compute Px in Mpc + Px_Mpc = self.get_px_sky_Mpc(rt_Mpc, kp_Mpc, cosmo, params) + + # back to inverse Angstroms + Px_AA = Px_Mpc * dAA_dMpc + + return Px_AA + + + def get_px_sky_Mpc(self, rt_Mpc, kp_Mpc, cosmo=None, params={}): + + # figure out the cosmology to use + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + + # unit conversions (should be fast) + darc_dMpc = cosmo.get_darc_dMpc(self.z) + lr_lya = self.lya_model.lr_lya + dAA_dMpc = cosmo.get_dAA_dMpc(self.z, lambda_rest_AA=lr_lya) + + # 1 at theta=0, 0 at large angular separations + theta_arc = rt_Mpc * darc_dMpc + xi_noise = self.cont_model.get_xi_noise(theta_arc) + + # get b_noise parameter (using default and input params) + sky_params = self.cont_model.get_sky_params(params) + b_noise_Mpc = sky_params['b_noise_Mpc'] + + # pixel width in Angstroms + x_AA = 0.8 + x_Mpc = x_AA / dAA_dMpc + + # pixel smoothing + x = x_Mpc * kp_Mpc / 2.0 + smooth = (np.sin(x)/x)**2 + + Px_sky = b_noise_Mpc * np.outer(xi_noise, smooth) + + return Px_sky + + + def get_continuum_distortion_obs(self, k_AA, cosmo=None, params={}): + """ compute multiplicative correction due to continuum fitting """ + + # figure out the cosmology to use + cosmo = self.get_cosmology(cosmo=cosmo, params=params) + + # unit conversions to Mpc (where theory lives) + lr_lya = self.lya_model.lr_lya + dAA_dMpc = cosmo.get_dAA_dMpc(self.z, lambda_rest_AA=lr_lya) + kp_Mpc = k_AA * dAA_dMpc + + cont_dist = self.get_continuum_distortion_Mpc(kp_Mpc, params) + + return cont_dist + + + def get_continuum_distortion_Mpc(self, kp_Mpc, params={}): + + # get continuum parameters (using default and input params) + cont_params = self.cont_model.get_continuum_params(params) + kC_Mpc = cont_params['kC_Mpc'] + pC = cont_params['pC'] + + # for now let's use the first model from Blomqvist et al. (2015) + cont_dist = np.tanh( (kp_Mpc/kC_Mpc)**pC ) + + return cont_dist diff --git a/data/desi_instrument/README b/data/desi_instrument/README new file mode 100644 index 0000000..7d6dfe8 --- /dev/null +++ b/data/desi_instrument/README @@ -0,0 +1,5 @@ +python script copied from Vega +https://github.com/andreicuceu/vega/blob/master/vega/models/instrumental_systematics/write_desi_instrumental_syst_table.py + +and modified to write file as a function of arcmin (and not Mpc) + diff --git a/data/desi_instrument/desi-instrument-syst-for-forest-auto-correlation_arcmin.csv b/data/desi_instrument/desi-instrument-syst-for-forest-auto-correlation_arcmin.csv new file mode 100644 index 0000000..3ea222e --- /dev/null +++ b/data/desi_instrument/desi-instrument-syst-for-forest-auto-correlation_arcmin.csv @@ -0,0 +1,189 @@ +theta_arc,xi_noise +0.0,1.0 +0.25,0.9430541524331544 +0.75,0.8291624572994634 +1.25,0.8148507055199238 +1.75,0.8007899614809224 +2.25,0.7886063043893279 +2.75,0.7782817383737457 +3.25,0.7659961719215296 +3.75,0.7543114848558524 +4.25,0.7423004377650868 +4.75,0.7312318824402114 +5.25,0.7193318154051938 +5.75,0.7084102179093835 +6.25,0.6981060835970171 +6.75,0.6871252712192887 +7.25,0.6756714126191794 +7.75,0.6647271335171937 +8.25,0.6554786957180218 +8.75,0.6446919282028334 +9.25,0.6343277380304918 +9.75,0.624289510965899 +10.25,0.6138634530224713 +10.75,0.6040438776659863 +11.25,0.5948166127887007 +11.75,0.5842664190753988 +12.25,0.5749229269695726 +12.75,0.5648791984288503 +13.25,0.5551872551231092 +13.75,0.5457284534689503 +14.25,0.5362488322470446 +14.75,0.526077672176818 +15.25,0.5166456170165049 +15.75,0.5074017026581206 +16.25,0.4982318269716744 +16.75,0.48836984828298385 +17.25,0.4798160658642523 +17.75,0.4702878915806655 +18.25,0.46083539944163976 +18.75,0.4519147074371322 +19.25,0.44316641939048207 +19.75,0.43392466087011344 +20.25,0.42510919747029413 +20.75,0.416555904638947 +21.25,0.40798536002083235 +21.75,0.4001021993571381 +22.25,0.3917549633250179 +22.75,0.38312881862757997 +23.25,0.37530451247595886 +23.75,0.3676503028499662 +24.25,0.35964512741068594 +24.75,0.35186168809327106 +25.25,0.3442797802853807 +25.75,0.3363307413661187 +26.25,0.32876980592409766 +26.75,0.3214451875326811 +27.25,0.3138803442225297 +27.75,0.3065740559617272 +28.25,0.29910385892313895 +28.75,0.29191626154858075 +29.25,0.28442495375814936 +29.75,0.2775945880827468 +30.25,0.2701273851814644 +30.75,0.2629248136123268 +31.25,0.2560561700280676 +31.75,0.24918531505034314 +32.25,0.2421989427042098 +32.75,0.23553470176476024 +33.25,0.22888911226089959 +33.75,0.22223947555040277 +34.25,0.21570266646216918 +34.75,0.20958064756680042 +35.25,0.20340913402239563 +35.75,0.19751421168785946 +36.25,0.19157459583621858 +36.75,0.18568334317204885 +37.25,0.1800109044596199 +37.75,0.17428116953416653 +38.25,0.16882693376966823 +38.75,0.1632104398084371 +39.25,0.15768718985436592 +39.75,0.15238114924486162 +40.25,0.14700889464619743 +40.75,0.14223278930229954 +41.25,0.13709268091557356 +41.75,0.1323325360589422 +42.25,0.12744851366030724 +42.75,0.12289952462031505 +43.25,0.11828183510148921 +43.75,0.11376519651676192 +44.25,0.1095546474664413 +44.75,0.10537902092177619 +45.25,0.10136834488133067 +45.75,0.09733494962633489 +46.25,0.09351263307310259 +46.75,0.08968345441750371 +47.25,0.08634117321728987 +47.75,0.08300979115032166 +48.25,0.07981719377415263 +48.75,0.07691348016844045 +49.25,0.0739874640065028 +49.75,0.07126393564933535 +50.25,0.06860486116534291 +50.75,0.06610849621211763 +51.25,0.06369932538364018 +51.75,0.06134868398506905 +52.25,0.059054258725087454 +52.75,0.056895996694436415 +53.25,0.05479657248082659 +53.75,0.0527882857800508 +54.25,0.050892265960504925 +54.75,0.04894490145830076 +55.25,0.047002252301861676 +55.75,0.04524248769264657 +56.25,0.04353777560112471 +56.75,0.041890648645597585 +57.25,0.04040167469046348 +57.75,0.03878037940285704 +58.25,0.03740890397998633 +58.75,0.036068950907067056 +59.25,0.034645519357215444 +59.75,0.03343144158692556 +60.25,0.032173025182882266 +60.75,0.030830330603039274 +61.25,0.02970708583269846 +61.75,0.028595913770474376 +62.25,0.02745823974371075 +62.75,0.026432574700966924 +63.25,0.025349750361789047 +63.75,0.02430436427396433 +64.25,0.02334122332688233 +64.75,0.02235978099809882 +65.25,0.021400716664087802 +65.75,0.020475169304865037 +66.25,0.019553160770052723 +66.75,0.01867959867378177 +67.25,0.017841624477972905 +67.75,0.017048137108946974 +68.25,0.01625467055077828 +68.75,0.01553095789890756 +69.25,0.01484622760379181 +69.75,0.014127837740247774 +70.25,0.013446957029596059 +70.75,0.012760986260018113 +71.25,0.012070961020865216 +71.75,0.011458826446050835 +72.25,0.01089740147180404 +72.75,0.010314716128004754 +73.25,0.009787159610880558 +73.75,0.009272421400391225 +74.25,0.008772657397217045 +74.75,0.008247161770490105 +75.25,0.0078010346440978325 +75.75,0.007315202787974822 +76.25,0.006895649076360807 +76.75,0.006492925304105266 +77.25,0.006064114066846198 +77.75,0.005674671458520899 +78.25,0.00528017247712117 +78.75,0.004922979114824132 +79.25,0.004582989280714853 +79.75,0.004251562816450614 +80.25,0.003953789490151516 +80.75,0.0036655459710326138 +81.25,0.003373801136586056 +81.75,0.0030702718768942415 +82.25,0.00278048623326092 +82.75,0.002483957541925536 +83.25,0.0022254095886735776 +83.75,0.0019814225097715328 +84.25,0.0017663040486050573 +84.75,0.001547746575339054 +85.25,0.001378535090575608 +85.75,0.0012115811309192559 +86.25,0.0010540710441360839 +86.75,0.0009028916960751647 +87.25,0.0007469606825149893 +87.75,0.0005983812095087938 +88.25,0.00046207814043160275 +88.75,0.0003415315546664492 +89.25,0.00024347887366064493 +89.75,0.0001549172451630915 +90.25,8.704997710758878e-05 +90.75,4.1606717737394076e-05 +91.25,1.3239669244509862e-05 +91.75,2.4096179311547585e-06 +92.25,2.2644639414475294e-07 +92.75,0.0 +1000.0,0.0 diff --git a/data/desi_instrument/write_desi_instrumental_syst_table_arcmin.py b/data/desi_instrument/write_desi_instrumental_syst_table_arcmin.py new file mode 100755 index 0000000..14cbf6a --- /dev/null +++ b/data/desi_instrument/write_desi_instrumental_syst_table_arcmin.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python +import numpy as np +from astropy.table import Table +from vega.utils import find_file + + + +def main(): + path = "instrumental_systematics/desi-positioners.csv" + file = find_file(path) + print(f"Reading {file}") + positioner_table = Table.read(file) + + xp = positioner_table["FOCAL_PLANE_X_DEG"] + yp = positioner_table["FOCAL_PLANE_Y_DEG"] + rpatrol = positioner_table["PATROL_RADIUS_DEG"] + + print("Compute randoms...") + nr = 50000 + x = np.random.uniform(size=nr) * (np.max(xp + rpatrol)) + y = np.random.uniform(size=nr) * (np.max(yp + rpatrol)) + ok = np.repeat(False, nr) + for xxp, yyp, rrp in zip(xp, yp, rpatrol) : + ok |= ((x - xxp)**2 + (y - yyp)**2) < rrp**2 + x = x[ok] + y = y[ok] + + print("Compute correlation...") + #deg2mpc = comoving_distance * np.pi / 180. + bins = np.linspace(0, 200, 401) + nbins = bins.size - 1 + h0 = np.zeros(nbins) + for xx, yy in zip(x, y): + d_deg = np.sqrt((xx - x)**2 + (yy - y)**2) + d_arcmin = d_deg * 60.0 + t, _ = np.histogram(d_arcmin, bins=bins) + h0 += t + ok = (h0 > 0) + rt = (bins[:-1] + (bins[1] - bins[0]) / 2) + rt = rt[ok] + xi = h0[ok] / rt # number of random pairs scales as rt + + # add a value at 0, last measured bin + 1 step, and 1000 Mpc to avoid extrapolations + xi_at_0 = (xi[0] - xi[1]) / (rt[0] - rt[1]) * (0 - rt[0]) + xi[0] # linearly extrapolated to r=0 + rt = np.append(0, rt) + xi = np.append(xi_at_0, xi) + rt = np.append(rt, [rt[-1] + bins[1] - bins[0], 1000.]) + xi = np.append(xi, [0, 0]) + xi /= np.max(xi) # norm + + t = Table() + t["theta_arc"] = rt + t["xi_noise"] = xi + filename = "desi-instrument-syst-for-forest-auto-correlation_arcmin.csv" + t.write(filename, overwrite=True) + print("wrote ", filename) + + +if __name__ == '__main__': + main() diff --git a/notebooks/test/compare_px.py b/notebooks/test/compare_px.py new file mode 100644 index 0000000..20a7595 --- /dev/null +++ b/notebooks/test/compare_px.py @@ -0,0 +1,292 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.16.4 +# kernelspec: +# display_name: cupix +# language: python +# name: cupix +# --- + +# %% [markdown] +# # Copied from ForestFlow, expanded: Tutorial for how to calculate $P_\times$ + +# %% [markdown] +# +# Reproduce results with the new Px functions in cupix + +# %% +import numpy as np +from scipy import special +import numpy as np +import os +import sys +import matplotlib.pyplot as plt +import matplotlib as mpl +from matplotlib import rcParams +# %load_ext autoreload +# %autoreload 2 + + +# %% +rcParams["mathtext.fontset"] = "stix" +rcParams["font.family"] = "STIXGeneral" +# import P3D theory +from lace.cosmo import cosmology +from forestflow.model_p3d_arinyo import ArinyoModel +from cupix.likelihood import theory +from forestflow.pcross import Px_Mpc, Px_Mpc_detailed + +# %% [markdown] +# First, choose a redshift and $k$ range. Initialize an instance of the Arinyo class for this redshift given cosmology calculations from Camb. + +# %% +zs = np.array([2, 2.5]) # set target redshift + + +# %% +cosmo_params = { + "H0": 67.66, + "mnu": 0, + "omch2": 0.119, + "ombh2": 0.0224, + "omk": 0, + 'As': 2.105e-09, + 'ns': 0.9665, + "nrun": 0.0, + "pivot_scalar": 0.05, + "w": -1.0, +} +cosmo = cosmology.Cosmology(cosmo_params_dict=cosmo_params) +model_Arinyo = ArinyoModel(fid_cosmo=cosmo) +model_Arinyo.default_params + +# %% [markdown] +# ## Plot the 3D power spectrum + +# %% +nn_k = 200 # number of k bins +nn_mu = 10 # number of mu bins +k = np.logspace(-4, 2, nn_k) +mu = np.linspace(0, 1, nn_mu) +k2d = np.tile(k[:, np.newaxis], nn_mu) # k grid for P3D +mu2d = np.tile(mu[:, np.newaxis], nn_k).T # mu grid for P3D + +kpar = np.logspace(-3, 1, nn_k) # kpar for P1D + +plin = model_Arinyo.linP_Mpc(zs[0], k) # get linear power spectrum at target z +p3d = model_Arinyo.P3D_Mpc_k_mu( + zs[0], k2d, mu2d, ari_pp=model_Arinyo.default_params +) # get P3D at target z +p1d = model_Arinyo.P1D_Mpc( + zs[0], kpar, ari_pp=model_Arinyo.default_params +) # get P1D at target z + +# %% +for ii in range(p3d.shape[1]): + plt.loglog( + k, p3d[:, ii] / plin, label=r"$<\mu>=$" + str(np.round(mu[ii], 2)) + ) +plt.xlabel(r"$k$ [Mpc]") +plt.ylabel(r"$P/P_{\rm lin}$") +plt.xlim([10**-1, 10**2]) +plt.ylim([10**-3, 1]) +plt.legend() + +# %% +iz=0 +z=zs[iz] +new_theory = theory.Theory(z=z, fid_cosmo=cosmo, config={'verbose':True}) + +# %% +# we use slightly different conventions +new_params = {} +for par in ['bias', 'beta', 'q1', 'q2', 'av', 'bv']: + new_params[par] = model_Arinyo.default_params[par] +new_params['kp_Mpc'] = model_Arinyo.default_params['kp'] +new_params['kv_Mpc'] = np.exp(np.log(model_Arinyo.default_params['kvav'])/model_Arinyo.default_params['av']) + +# %% +new_p3d = new_theory.get_p3d_lya_Mpc(k=k2d, mu=mu2d, params=new_params) + +# %% +for ii in [-1, 0]: + plt.loglog(k, p3d[:, ii] / plin, label=r"$<\mu>=$" + str(np.round(mu[ii], 2))) + plt.loglog(k, new_p3d[:, ii] / plin, ls=':') +plt.xlabel(r"$k$ [Mpc]") +plt.ylabel(r"$P/P_{\rm lin}$") +plt.xlim([10**-1, 40]) +plt.ylim([10**-3, 1]) +plt.legend() + +# %% +test_p3d = new_theory.get_p3d_lya_hcd_Mpc(k=k2d, mu=mu2d, params=new_params) + +# %% +for ii in [-1, 0]: + plt.loglog(k, new_p3d[:, ii] / plin, label=r"$<\mu>=$" + str(np.round(mu[ii], 2))) + plt.loglog(k, test_p3d[:, ii] / plin, ls=':') +plt.xlabel(r"$k$ [Mpc]") +plt.ylabel(r"$P/P_{\rm lin}$") +plt.xlim([10**-3, 40]) +plt.ylim([10**-3, 1]) +plt.legend() +plt.title('Impact of HCDs to the Lya P3D') + +# %% +for ii in [-1, 0]: + plt.semilogx(k, test_p3d[:, ii] / new_p3d[:, ii], label=r"$<\mu>=$" + str(np.round(mu[ii], 2))) +plt.xlabel(r"$k$ [Mpc]") +plt.ylabel(r"$P_{\rm Lya + HCD}/P_{\rm Lya}(k, \mu)$") +plt.xlim([10**-4, 40]) +#plt.ylim([10**-3, 1]) +plt.legend() +plt.title('Impact of HCDs to the Lya P3D') + +# %% +for ii in [-1, 0]: + plt.loglog(k, (test_p3d[:, ii] - new_p3d[:, ii]) / new_p3d[:, ii], label=r"$<\mu>=$" + str(np.round(mu[ii], 2))) +plt.xlabel(r"$k$ [Mpc]") +plt.ylabel(r"$(P_{\rm Lya + HCD} - P_{\rm Lya})/P_{\rm Lya}(k, \mu)$") +plt.xlim([10**-4, 40]) +plt.ylim([10**-2, 1]) +plt.legend() +plt.title('Impact of HCDs to the Lya P3D') + +# %% [markdown] +# ### Now test Px (in comoving Mpc) + +# %% +rperp = np.logspace(-2,2,100) # use the same rperp for each z. We could also input this as a list of [rperp, rperp] for each z. + +# %% +# we can compute Px from within the Arinyo class using default parameters, +Px_Mpc_1 = model_Arinyo.Px_Mpc(z=z, kpar_iMpc = kpar, rperp_Mpc = rperp, ari_pp=model_Arinyo.default_params) + +# we could have also done it outside of the class with the function Px_Mpc: +Px_Mpc_2 = Px_Mpc(z, kpar, rperp, model_Arinyo.P3D_Mpc_k_mu, p3d_params=model_Arinyo.default_params) +print("Detailed method is equal to previous method:", np.allclose(Px_Mpc_1, Px_Mpc_2, atol=1e-15)) + +# %% +Px_Mpc_3 = new_theory.get_px_lya_Mpc(rt_Mpc=rperp, kp_Mpc=kpar, params=new_params) + +# %% +print("New method:", np.allclose(Px_Mpc_1, Px_Mpc_3, atol=1e-15)) + +# %% [markdown] +# ### Play with contaminants (comoving Mpc) + +# %% +px_lya = new_theory.get_px_lya_Mpc(rt_Mpc=rperp, kp_Mpc=kpar, params=new_params) + +# %% +px_lya_hcd = new_theory.get_px_lya_hcd_Mpc(rt_Mpc=rperp, kp_Mpc=kpar, params=new_params) + +# %% +for irt in [0, 50, 70]: + plt.semilogx(kpar, px_lya[irt], label='rt = {:.3f} Mpc'.format(rperp[irt])) + plt.semilogx(kpar, px_lya_hcd[irt], ls=':') +plt.legend() +plt.ylabel(r'$P_\times(r_\perp, k_\parallel)$ [Mpc]') +plt.xlabel(r'$k_\parallel$ [1/Mpc]') +plt.title('Impact of HCD contamination') + +# %% +px_metal_auto = new_theory.get_px_metal_auto_Mpc(rt_Mpc=rperp, kp_Mpc=kpar, params=new_params) +px_metal_cross = new_theory.get_px_metal_cross_Mpc(rt_Mpc=rperp, kp_Mpc=kpar, params=new_params) + +# %% +for irt in [0, 50, 70]: + plt.semilogx(kpar, px_lya[irt], label='rt = {:.3f} Mpc'.format(rperp[irt])) + plt.semilogx(kpar, px_lya[irt] + px_metal_auto[irt], ls=':') + plt.semilogx(kpar, px_lya[irt] + px_metal_cross[irt], ls='--') +plt.legend() +plt.ylabel(r'$P_\times(r_\perp, k_\parallel)$ [Mpc]') +plt.xlabel(r'$k_\parallel$ [1/Mpc]') +plt.title('Impact of metal contamination') + +# %% +px_sky = new_theory.get_px_sky_Mpc(rt_Mpc=rperp, kp_Mpc=kpar, params=new_params) + +# %% +for irt in [0, 50, 70]: + plt.semilogx(kpar, px_lya[irt], label='rt = {:.3f} Mpc'.format(rperp[irt])) + plt.semilogx(kpar, px_lya[irt] + 10*px_sky[irt], ls=':') +plt.legend() +plt.ylabel(r'$P_\times(r_\perp, k_\parallel)$ [Mpc]') +plt.xlabel(r'$k_\parallel$ [1/Mpc]') +plt.title('Impact of correlated sky residuals (x10)') + +# %% +cont_dist = new_theory.get_continuum_distortion_Mpc(kp_Mpc=kpar, params=new_params) + +# %% +for irt in [0, 50, 70]: + plt.semilogx(kpar, px_lya[irt], label='rt = {:.3f} Mpc'.format(rperp[irt])) + plt.semilogx(kpar, px_lya[irt] * cont_dist, ls=':') +plt.legend() +plt.ylabel(r'$P_\times(r_\perp, k_\parallel)$ [Mpc]') +plt.xlabel(r'$k_\parallel$ [1/Mpc]') +plt.title('Impact of continuum distortion') + +# %% [markdown] +# ### Now test Px (in observing units) + +# %% +theta_arc = 5.0 +k_AA = np.linspace(0.01,1.0, 1000) + +# %% +px_lya = new_theory.get_px_lya_obs(theta_arc=theta_arc, k_AA=k_AA, params=new_params) + +# %% +px_lya_hcd = new_theory.get_px_lya_hcd_obs(theta_arc=theta_arc, k_AA=k_AA, cosmo=cosmo, params=new_params) + +# %% +px_metal_auto = new_theory.get_px_metal_auto_obs(theta_arc=theta_arc, k_AA=k_AA, params=new_params) + +# %% +px_metal_cross = new_theory.get_px_metal_cross_obs(theta_arc=theta_arc, k_AA=k_AA, params=new_params) + +# %% +px_sky = new_theory.get_px_sky_obs(theta_arc=theta_arc, k_AA=k_AA, params=new_params) +if px_sky.shape[0]==1: + px_sky = px_sky.squeeze() + +# %% +cont_dist = new_theory.get_continuum_distortion_obs(k_AA=k_AA, params=new_params) + +# %% +plt.semilogx(k_AA, px_lya, label='Lya') +plt.semilogx(k_AA, px_lya_hcd, label='Lya + HCD') +plt.semilogx(k_AA, px_lya + px_sky, label='Lya + sky') +plt.semilogx(k_AA, px_lya_hcd + px_metal_auto + px_metal_cross, label='Lya + HCD + metals') +plt.semilogx(k_AA, px_lya * cont_dist, label='Lya (continuum fitted)') + +plt.legend() +plt.ylabel(r'$P_\times(\theta, q)$ [AA]') +plt.xlabel(r'q [1/AA]') +plt.title(r'Impact of contamination at z = {:.2f}, $\theta={:.2f}$ arcmin'.format(zs[0], theta_arc)) + +# %% +plt.semilogx(k_AA, px_lya_hcd / px_lya, label='Lya + HCD') +plt.semilogx(k_AA, (px_lya + px_sky) / px_lya, label='Lya + sky') +plt.semilogx(k_AA, (px_lya_hcd + px_metal_auto + px_metal_cross) / px_lya, label='Lya + HCD + metals') +plt.semilogx(k_AA, cont_dist, label='Continuum distortion') +plt.axhline(y=1, ls=':', color='gray') +plt.legend() +plt.ylim([0.9,1.3]) +plt.ylabel(r'$P_\times(\theta, q) ~/ ~ P_\times^{\alpha}(\theta, q) $') +plt.xlabel(r'q [1/AA]') +plt.title(r'Impact of contamination at z = {:.2f}, $\theta={:.2f}$ arcmin'.format(zs[0], theta_arc)) +plt.savefig('px_cont.png') + +# %% +a=3 + +# %% diff --git a/notebooks/test/example_user_interface_notebook.py b/notebooks/test/example_user_interface_notebook.py index 7893fb8..e8e636a 100644 --- a/notebooks/test/example_user_interface_notebook.py +++ b/notebooks/test/example_user_interface_notebook.py @@ -6,42 +6,39 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.19.1 +# jupytext_version: 1.16.4 # kernelspec: -# display_name: Python 3 +# display_name: cupix # language: python -# name: python3 +# name: cupix # --- # %% import numpy as np -from cupix.likelihood.likelihood import Likelihood -from cupix.likelihood.lya_theory import Theory -from cupix.likelihood.forestflow_emu import FF_emulator -from lace.cosmo import camb_cosmo import matplotlib.pyplot as plt -from cupix.likelihood.likelihood_parameter import LikelihoodParameter -from cupix.px_data.data_DESI_DR2 import DESI_DR2 import h5py as h5 -import cupix -import pandas as pd -cupixpath = cupix.__path__[0].rsplit('/', 1)[0] # %load_ext autoreload # %autoreload 2 +# %% +from lace.cosmo import cosmology +from cupix.px_data.data_DESI_DR2 import DESI_DR2 +from cupix.likelihood.likelihood_parameter import LikelihoodParameter +from cupix.likelihood.likelihood import Likelihood +from cupix.likelihood.lya_theory import Theory + # %% [markdown] # ### Step 1: Load some data # %% data_file = "../../data/px_measurements/forecast/forecast_ffcentral_real_binned_out_px-zbins_4-thetabins_9_w_res_noiseless_z0.hdf5" - data = DESI_DR2(data_file, kM_max_cut_AA=1, km_max_cut_AA=1.2) # kM_max_cut determines max of the widely-binned k; km_max_cut cuts the finely-binned k that goes into the window matrix # %% # native binning (no rebinning) Nz, Nt_a, Nk_M, Nk_m = data.U_ZaMn.shape -print(f"native binning: Nz={Nz}, Nt_a={Nt_a}, Nk_M={Nk_M}, Nk_m={Nk_m}") +print(f"native binning: Nz={Nz}, Nt_a={Nt_a}, Nk_m={Nk_m}") # rebinned values Nz, Nt_A, Nk_M = data.Px_ZAM.shape print(f"rebinned values: Nz={Nz}, Nt_A={Nt_A}, Nk_M={Nk_M}") @@ -80,17 +77,19 @@ def plot_theta_bins(data, k_M, iz, it_M): # %% z = data.z print("Input data file has redshift at,", z) -cosmo = {'H0': 67} +cosmo_dict = {'H0': 67} +cosmo = cosmology.Cosmology(cosmo_params_dict=cosmo_dict) # default_theory options are: # 'best_fit_arinyo_from_p1d': best fit to the DESI DR1 P1D data from Chaves+2026 # 'best_fit_igm_from_p1d': same but for IGM parameters # 'best_fit_arinyo_from_colore': best fit to xi from colore mocks. Only works for z=2.2, 2.4, 2.6, 2.8 -theory_p1d = Theory(z, cosmo_dict=cosmo, default_lya_theory='best_fit_igm_from_p1d', p3d_label='arinyo', emulator_label='forestflow_emu', k_unit='iAA', verbose=True) +theory_p1d = Theory(z, fid_cosmo=cosmo, default_lya_theory='best_fit_igm_from_p1d', p3d_label='arinyo', emulator_label='forestflow_emu', k_unit='iAA', verbose=True) # theory_colore = Theory(z, bkgd_cosmo=cosmo, default_lya_theory='best_fit_arinyo_from_colore', p3d_label='arinyo', emulator_label='forestflow_emu', k_unit='iAA', verbose=True) # %% # check full cosmo dictionary to see what other default parameters were used -theory_p1d.cosmo_dict +#theory_p1d.fid_cosmo.print_info() +theory_p1d.fid_cosmo.get_background_params() # %% theory_p1d.default_param_dict @@ -312,17 +311,19 @@ def plot_theta_bins(data, k_M, iz, it_M): # %% param_mode = 'arinyo' # enter the type of parameters you want truth_params = {} +true_cosmo_dict = {} with h5.File(data_file) as f: for cosmo_par in f['cosmo_params'].attrs.keys(): - cosmo[cosmo_par] = f['cosmo_params'].attrs[cosmo_par] + true_cosmo_dict[cosmo_par] = f['cosmo_params'].attrs[cosmo_par] if param_mode=='igm': for like_par in f['like_params'].attrs.keys(): truth_params[like_par] = f['like_params'].attrs[like_par] elif param_mode=='arinyo': for arinyo_par in f['arinyo_pars'].attrs.keys(): truth_params[arinyo_par] = f['arinyo_pars'].attrs[arinyo_par] -print("Passing forecast cosmology to theory object:", cosmo) -theory = Theory(zs, cosmo_dict=cosmo, default_lya_theory='best_fit_arinyo_from_p1d', emulator_label="forestflow_emu", verbose=True) +print("Passing forecast cosmology to theory object:", true_cosmo_dict) +true_cosmo = cosmology.Cosmology(cosmo_params_dict=true_cosmo_dict) +theory = Theory(zs, fid_cosmo=true_cosmo, default_lya_theory='best_fit_arinyo_from_p1d', emulator_label="forestflow_emu", verbose=True) # check the parameters print("Truth params are:") for p, val in truth_params.items(): @@ -338,7 +339,6 @@ def plot_theta_bins(data, k_M, iz, it_M): # %% like_params = [] - arinyo_par_names_iz = [par+"_0" for par in theory.arinyo_par_names] def update_likepar_list(like_params, par_names): for par in par_names: diff --git a/notebooks/test/likelihood_evals.py b/notebooks/test/likelihood_evals.py index a175b4f..f9fdc10 100644 --- a/notebooks/test/likelihood_evals.py +++ b/notebooks/test/likelihood_evals.py @@ -6,11 +6,11 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.19.1 +# jupytext_version: 1.16.4 # kernelspec: -# display_name: Python 3 +# display_name: cupix # language: python -# name: python3 +# name: cupix # --- # %% [markdown] @@ -19,9 +19,9 @@ # %% import numpy as np from cupix.likelihood.likelihood import Likelihood -from cupix.likelihood.lya_theory import set_theory +from cupix.likelihood.lya_theory import Theory from cupix.likelihood.forestflow_emu import FF_emulator -from lace.cosmo import camb_cosmo +from lace.cosmo import cosmology import matplotlib.pyplot as plt from cupix.likelihood.likelihood_parameter import LikelihoodParameter, par_index, dict_from_likeparam from cupix.px_data.data_DESI_DR2 import DESI_DR2 @@ -41,14 +41,8 @@ # mode = 'arinyo' # if the parameters you want to test are Arinyo params # %% -forecast_file = f"{cupixpath}/data/px_measurements/forecast//forecast_ffcentral_igm_real_binned_out_px-zbins_4-thetabins_9_w_res_noiseless.hdf5" -# forecast_file = f"../../data/px_measurements/forecast/forecast_ffcentral_{mode}_real_binned_out_px-zbins_4-thetabins_9_w_res_noiseless.hdf5" -forecast = DESI_DR2(forecast_file, kmax_cut_AA=1) -# get default theory from forecast -with h5.File(forecast_file, 'r') as f: - default_theory_label = f['metadata'].attrs['true_lya_theory'] -print(f"Default theory label: {default_theory_label}") - +forecast_file = f"../../data/px_measurements/forecast/forecast_ffcentral_igm_real_binned_out_px-zbins_4-thetabins_9_w_res_noiseless.hdf5" +forecast = DESI_DR2(forecast_file, kM_max_cut_AA=1, km_max_cut_AA=1.2) # %% [markdown] # ### Step 2: Load the emulator with Nrealizations = 1000 @@ -67,7 +61,7 @@ nrun = 0.0 w = -1.0 omk = 0 -cosmo = { +cosmo_dict = { 'H0': H0, 'omch2': omch2, 'ombh2': ombh2, @@ -78,8 +72,11 @@ 'nrun': nrun, 'w': w } +fid_cosmo = cosmology.Cosmology(cosmo_params_dict=cosmo_dict) +#fid_cosmo = cosmology.Cosmology(label='Planck18') -theory = set_theory(z, bkgd_cosmo=cosmo, default_theory=default_theory_label, p3d_label='arinyo', emulator_label='forestflow_emu', k_unit='iAA', verbose=True) +theory = Theory(z, fid_cosmo=fid_cosmo, default_lya_theory='best_fit_igm_from_p1d', + p3d_label='arinyo', emulator_label='forestflow_emu', k_unit='iAA', verbose=True) # %% [markdown] @@ -116,9 +113,8 @@ # %% # make sure theory matches forecast data at central value -theta_A_index = 5 -like_params = None # only use the default cosmo+IGM theory, not like_params as it contains the evaluated Arinyo params -Px_convolved = like.get_convolved_Px_AA(theta_A=theta_A_index, like_params=like_params)[0] +theta_A_index = 8 +Px_convolved = like.get_convolved_Px_AA(theta_A=theta_A_index, like_params=like_params)[0] # %% plt.plot(forecast.k_M_centers_AA, forecast.Px_ZAM[iz_choice, theta_A_index]) @@ -176,7 +172,7 @@ best = par_vals[i] print(f"New best fit found with chi2={chi2_i} at mF={best}") best_chi2 = chi2_i - + # %% par_i = par_index(like_params, par_to_test) @@ -193,3 +189,9 @@ plt.legend() # %% + +# %% + +# %% + +# %% diff --git a/notebooks/test/test_likelihood.py b/notebooks/test/test_likelihood.py new file mode 100644 index 0000000..17afc7d --- /dev/null +++ b/notebooks/test/test_likelihood.py @@ -0,0 +1,209 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.16.4 +# kernelspec: +# display_name: cupix +# language: python +# name: cupix +# --- + +# %% [markdown] +# # Setup the theory and likelihood class, and play with it + +# %% +import numpy as np +import matplotlib.pyplot as plt +import h5py as h5 +# %load_ext autoreload +# %autoreload 2 + +# %% +from lace.cosmo import cosmology +from cupix.likelihood.theory import Theory +from cupix.px_data.data_DESI_DR2 import DESI_DR2 + + +# %% [markdown] +# ### Setup data + +# %% +#data_file = "../../data/px_measurements/forecast/forecast_ffcentral_real_binned_out_px-zbins_4-thetabins_9_w_res_noiseless_z0.hdf5" +data_file = "../../data/px_measurements/forecast/test.hdf5" +data = DESI_DR2(data_file, kM_max_cut_AA=1, km_max_cut_AA=1.2) + +# %% +# native binning (no rebinning) +Nz, Nt_a, Nk_M, Nk_m = data.U_ZaMn.shape +print(f"native binning: Nz={Nz}, Nt_a={Nt_a}, Nk_m={Nk_m}") +# rebinned values +Nz, Nt_A, Nk_M = data.Px_ZAM.shape +print(f"rebinned values: Nz={Nz}, Nt_A={Nt_A}, Nk_M={Nk_M}") + +# %% [markdown] +# ### Set up theory + +# %% +cosmo_params = {"H0": 67.66} +cosmo = cosmology.Cosmology(cosmo_params_dict=cosmo_params) + +# %% +iz=0 +z=data.z[iz] +theory = Theory(z=z, fid_cosmo=cosmo, config={'verbose':True}) + +# %% [markdown] +# ### Set up old likelihood + +# %% +# first with the old code +from cupix.likelihood import likelihood +old_like = likelihood.Likelihood(data, theory, z=z, verbose=True) + +# %% +# get the convolved Px for a chosen theta bin +it_A = 5 +theta_bin_choice = data.theta_centers_arcmin[it_A] +print("Getting the convolved Px for theta bin {} arcmin".format(theta_bin_choice)) +Px_convolved = old_like.get_convolved_Px_AA(theta_A=it_A) + +# %% +# plot the convolved Px. Always has shape Nt_A, Nk_M so we need to specify the theta bin index or just squeeze the result +plt.plot(data.k_M_centers_AA, np.squeeze(Px_convolved), label='convolved Px') +# without convolution, it would have been: +Px_model = theory.get_px_AA(k_AA=data.k_M_centers_AA, theta_arcmin=theta_bin_choice, zs=[z]) +# Px_model always has shape [Nz, Nt_A, Nk_M], so we need to specify the redshift and theta bin indices or just squeeze the result +plt.plot(data.k_M_centers_AA, np.squeeze(Px_model), label='unconvolved Px') +plt.legend() +plt.xlabel(r'$k_\parallel$ (1/Ang)') +plt.ylabel('Px [Ang]') + + +# %% [markdown] +# ### Set up new likelihood + +# %% +# now with the new likelihood class +from cupix.likelihood import new_likelihood +new_like = new_likelihood.Likelihood(data=data, theory=theory, iz=iz, verbose=True) + +# %% +model_px=new_like.get_convolved_px(params={}) + +# %% +# compare old and new likelihoods +k_M = data.k_M_centers_AA +it_A = 5 +new_px = model_px[it_A] +old_px = old_like.get_convolved_Px_AA(theta_A=it_A)[0] +plt.plot(k_M, old_px, label='old like') +plt.plot(k_M, new_px, ls=':', label='new like') +plt.legend() +plt.xlabel('k [1/A]') +plt.ylabel('Px [A]') + +# %% +# compare both likelihoods +old_chi2=old_like.get_chi2() +new_chi2=new_like.get_chi2() +print(old_chi2, new_chi2) + +# %% +# chi2 scan for bias +N=20 +bias_arr = np.linspace(-0.120, -0.115, N) +chi2_arr = np.zeros(N) +new_like.verbose=False +new_like.theory.verbose=False +for i in range(N): + chi2_arr[i] = new_like.get_chi2(params={'bias':bias_arr[i]}) +plt.plot(bias_arr, chi2_arr) +plt.axhline(y=0, ls=':') +plt.xlabel('bias') +plt.ylabel('chi2'); + +# %% +# chi2 scan for ns +N=20 +ns_arr = np.linspace(0.90, 1.0, N) +chi2_arr = np.zeros(N) +new_like.verbose=False +new_like.theory.verbose=False +for i in range(N): + chi2_arr[i] = new_like.get_chi2(params={'ns':ns_arr[i]}) +plt.plot(ns_arr, chi2_arr) +plt.axhline(y=0, ls=':') +plt.xlabel('n_s') +plt.ylabel('chi2'); + +# %% [markdown] +# ### Time the new likelihood + +# %% [markdown] +# First with defaut params + +# %% +# %timeit chi2=new_like.get_chi2() + +# %% [markdown] +# Now with rescaled cosmology + +# %% +# %timeit chi2=new_like.get_chi2(params={'ns':0.95}) + +# %% [markdown] +# Now with new CAMB calls + +# %% +# %timeit chi2=new_like.get_chi2(params={'ombh2':0.022}) + +# %% [markdown] +# ### Try to reproduce exactly the theory used in the forecast + +# %% +f = h5.File(data_file) +dict(f) + +# %% +lya_params = { + 'bias': f['arinyo_pars'].attrs['bias_0'], + 'beta': f['arinyo_pars'].attrs['beta_0'], + 'av': f['arinyo_pars'].attrs['av_0'], + 'bv': f['arinyo_pars'].attrs['bv_0'], + 'kp_Mpc': f['arinyo_pars'].attrs['kp_0'], + 'q1': f['arinyo_pars'].attrs['q1_0'], + 'q2': f['arinyo_pars'].attrs['q2_0'], + 'kv_Mpc': np.exp( np.log(f['arinyo_pars'].attrs['kvav_0']) / f['arinyo_pars'].attrs['av_0'] ) +} + +# %% +cosmo_params = {} +for key in f['cosmo_params'].attrs.keys(): + cosmo_params[key] = f['cosmo_params'].attrs[key] +print(cosmo_params) + +# %% +params = cosmo_params | lya_params +print(params.keys()) + +# %% +new_like.verbose=True +new_like.theory.verbose=True +new_chi2=new_like.get_chi2(params=params) + +# %% +from cupix.likelihood.likelihood_parameter import likeparam_from_dict +like_params = likeparam_from_dict(params) +old_like.verbose=True +old_like.theory.verbose=True +old_chi2=old_like.get_chi2(like_params=like_params) + +# %% +print(old_chi2, new_chi2) + +# %% diff --git a/notebooks/test/test_new_minimizer.py b/notebooks/test/test_new_minimizer.py new file mode 100644 index 0000000..3832ffd --- /dev/null +++ b/notebooks/test/test_new_minimizer.py @@ -0,0 +1,146 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.16.4 +# kernelspec: +# display_name: cupix +# language: python +# name: cupix +# --- + +# %% [markdown] +# # Example use of the Minuit minimizer (new likelihood, new theory) + +# %% +import numpy as np +import matplotlib.pyplot as plt +import h5py as h5 +# %load_ext autoreload +# %autoreload 2 + +# %% +from lace.cosmo import cosmology +from cupix.px_data.data_DESI_DR2 import DESI_DR2 +from cupix.likelihood.likelihood_parameter import LikelihoodParameter, like_parameter_by_name +#from cupix.likelihood.likelihood import Likelihood +from cupix.likelihood.new_likelihood import Likelihood +from cupix.likelihood.theory import Theory +#from cupix.likelihood.iminuit_minimizer import IminuitMinimizer +from cupix.likelihood.new_minimizer import IminuitMinimizer +import cupix +cupixpath = cupix.__path__[0].rsplit('/', 1)[0] + +# %% [markdown] +# ### Step 1: Import a noiseless forecast + +# %% +forecast_file = f"{cupixpath}/data/px_measurements/forecast/forecast_ffcentral_real_binned_out_px-zbins_4-thetabins_9_w_res_noiseless_z0.hdf5" +forecast = DESI_DR2(forecast_file, kM_max_cut_AA=1, km_max_cut_AA=1.2) +iz = 0 +z = forecast.z[iz] + +# %% +true_cosmo_params = {} +with h5.File(forecast_file) as f: + for key in f['cosmo_params'].attrs.keys(): + true_cosmo_params[key] = f['cosmo_params'].attrs[key] +print(true_cosmo_params) + +# %% +# translate these to our Lya params +true_lya_params = {} +with h5.File(forecast_file) as f: + true_lya_params['bias'], = -1.0 * f['arinyo_pars'].attrs['bias_0'], + true_lya_params['beta'], = f['arinyo_pars'].attrs['beta_0'], + true_lya_params['av'], = f['arinyo_pars'].attrs['av_0'], + true_lya_params['bv'], = f['arinyo_pars'].attrs['bv_0'], + true_lya_params['kp_Mpc'], = f['arinyo_pars'].attrs['kp_0'], + true_lya_params['q1'], = f['arinyo_pars'].attrs['q1_0'], + true_lya_params['q2'], = f['arinyo_pars'].attrs['q2_0'], + true_lya_params['kv_Mpc'] = np.exp( np.log(f['arinyo_pars'].attrs['kvav_0']) / f['arinyo_pars'].attrs['av_0'] ) +print(true_lya_params) + +# %% +# use the true cosmology as fiducial +cosmo = cosmology.Cosmology(cosmo_params_dict=true_cosmo_params) + +# %% +# use the true Lya parameters (Arinyo / bias / beta) +config = true_lya_params | {'verbose': True} +# make your life a bit harder by changing a bit the value of beta +wrong_beta = True +if wrong_beta: + config['beta'] = 1.02*config['beta'] +print(config) +theory = Theory(z=z, fid_cosmo=cosmo, config=config) + +# %% +like = Likelihood(data=forecast, theory=theory, iz=iz, verbose=True) +#like = Likelihood(forecast, theory, z=z, verbose=False) + +# %% +# set the likelihood parameters as the Arinyo params with some fiducial values +free_param_names=['bias', 'q1'] +like_params = [] +like_params.append(LikelihoodParameter( + name='bias', + min_value=-.13, + max_value=-.1, + ini_value=-.11, + value =-.11 + )) + +like_params.append(LikelihoodParameter( + name='q1', + min_value=0, + max_value=1, + ini_value=0.3, + value =0.3 + )) +for par in like_params: + print(par.name) + + +# %% +mini = IminuitMinimizer(like, like_params, free_param_names, verbose=True) + +# %% +mini.verbose=False +mini.like.verbose=False +mini.like.theory.verbose=False +mini.minimize() + +# %% +#mini.plot_best_fit(multiply_by_k=True, every_other_theta=False, xlim=[-.01, .4], datalabel="Mock Data", show=True) + +# %% +mini.best_fit_value("q1", return_hesse=True), mini.best_fit_value("bias", return_hesse=True) + +# %% +#prob = mini.fit_probability() +#print("Probability of fit", prob) +#chi2 = mini.chi2() +#print("chi2 of fit", chi2) + + +# %% +true_lya_params + +# %% +mini.set_bestfit_like_params() +for par in mini.out_like_params: + print(par.name, par.value) + +# %% +mini.plot_ellipses("bias", "q1", nsig=3, cube_values=False, + true_vals={'bias':true_lya_params['bias'], 'q1':true_lya_params['q1']}, + xrange=[-.12, -.11], yrange=[.2,.4]) + +# %% + +# %% diff --git a/notebooks/test/test_old_minimizer.py b/notebooks/test/test_old_minimizer.py new file mode 100644 index 0000000..b21e7cf --- /dev/null +++ b/notebooks/test/test_old_minimizer.py @@ -0,0 +1,143 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.16.4 +# kernelspec: +# display_name: cupix +# language: python +# name: cupix +# --- + +# %% [markdown] +# # Example use of the Minuit minimizer (old likelihood, new theory) + +# %% +import numpy as np +import matplotlib.pyplot as plt +import h5py as h5 +# %load_ext autoreload +# %autoreload 2 + +# %% +from lace.cosmo import cosmology +from cupix.px_data.data_DESI_DR2 import DESI_DR2 +from cupix.likelihood.likelihood_parameter import LikelihoodParameter, like_parameter_by_name +from cupix.likelihood.likelihood import Likelihood +from cupix.likelihood.theory import Theory +from cupix.likelihood.iminuit_minimizer import IminuitMinimizer +import cupix +cupixpath = cupix.__path__[0].rsplit('/', 1)[0] + +# %% [markdown] +# ### Step 1: Import a noiseless forecast + +# %% +forecast_file = f"{cupixpath}/data/px_measurements/forecast/forecast_ffcentral_real_binned_out_px-zbins_4-thetabins_9_w_res_noiseless_z0.hdf5" +forecast = DESI_DR2(forecast_file, kM_max_cut_AA=1, km_max_cut_AA=1.2) +iz = 0 +z = forecast.z[iz] + +# %% +true_cosmo_params = {} +with h5.File(forecast_file) as f: + for key in f['cosmo_params'].attrs.keys(): + true_cosmo_params[key] = f['cosmo_params'].attrs[key] +print(true_cosmo_params) + +# %% +# translate these to our Lya params +true_lya_params = {} +with h5.File(forecast_file) as f: + true_lya_params['bias'], = -1.0 * f['arinyo_pars'].attrs['bias_0'], + true_lya_params['beta'], = f['arinyo_pars'].attrs['beta_0'], + true_lya_params['av'], = f['arinyo_pars'].attrs['av_0'], + true_lya_params['bv'], = f['arinyo_pars'].attrs['bv_0'], + true_lya_params['kp_Mpc'], = f['arinyo_pars'].attrs['kp_0'], + true_lya_params['q1'], = f['arinyo_pars'].attrs['q1_0'], + true_lya_params['q2'], = f['arinyo_pars'].attrs['q2_0'], + true_lya_params['kv_Mpc'] = np.exp( np.log(f['arinyo_pars'].attrs['kvav_0']) / f['arinyo_pars'].attrs['av_0'] ) +print(true_lya_params) + +# %% +# use the true cosmology as fiducial +cosmo = cosmology.Cosmology(cosmo_params_dict=true_cosmo_params) + +# %% +# use the true Lya parameters (Arinyo / bias / beta) +config = true_lya_params | {'verbose': True} +# make your life a bit harder by changing a bit the value of beta +wrong_beta = True +if wrong_beta: + config['beta'] = 1.02*config['beta'] +print(config) +theory = Theory(z=z, fid_cosmo=cosmo, config=config) + +# %% +#like = Likelihood(data=forecast, theory=theory, iz=iz_choice, verbose=True) +like = Likelihood(forecast, theory, z=z, verbose=False) + +# %% +# set the likelihood parameters as the Arinyo params with some fiducial values +free_param_names=['bias', 'q1'] +like_params = [] +like_params.append(LikelihoodParameter( + name='bias', + min_value=-.13, + max_value=-.1, + ini_value=-.11, + value =-.11 + )) + +like_params.append(LikelihoodParameter( + name='q1', + min_value=0, + max_value=1, + ini_value=0.3, + value =0.3 + )) +for par in like_params: + print(par.name) + + +# %% +mini = IminuitMinimizer(like, like_params, free_param_names, verbose=True) + +# %% +mini.verbose=False +mini.like.verbose=False +mini.like.theory.verbose=False +mini.minimize() + +# %% +mini.plot_best_fit(multiply_by_k=True, every_other_theta=False, xlim=[-.01, .4], datalabel="Mock Data", show=True) + +# %% +mini.best_fit_value("q1", return_hesse=True), mini.best_fit_value("bias", return_hesse=True) + +# %% +prob = mini.fit_probability() +print("Probability of fit", prob) +chi2 = mini.chi2() +print("chi2 of fit", chi2) + + +# %% +true_lya_params + +# %% +for par in mini.out_like_params: + print(par.name, par.value) + +# %% +mini.plot_ellipses("bias", "q1", nsig=3, cube_values=False, + true_vals={'bias':true_lya_params['bias'], 'q1':true_lya_params['q1']}, + xrange=[-.12, -.11], yrange=[.2,.4]) + +# %% + +# %% diff --git a/notebooks/test/test_theory.py b/notebooks/test/test_theory.py new file mode 100644 index 0000000..5aeb738 --- /dev/null +++ b/notebooks/test/test_theory.py @@ -0,0 +1,160 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.16.4 +# kernelspec: +# display_name: cupix +# language: python +# name: cupix +# --- + +# %% [markdown] +# # Set up new theory object and play with some plots + +# %% +import numpy as np +import matplotlib.pyplot as plt +# %load_ext autoreload +# %autoreload 2 + +# %% +from lace.cosmo import cosmology +from cupix.likelihood.theory import Theory + +# %% +cosmo_params = {"H0": 67.66} +cosmo = cosmology.Cosmology(cosmo_params_dict=cosmo_params) + +# %% +z=2.25 +theory = Theory(z=z, fid_cosmo=cosmo, config={'verbose':True}) + +# %% +# numpy array of kpar values (in inverse AA) +kp_AA = np.linspace(0.01, 2.0, 1000) +# numpy array of theta values (in arcmin) +theta_arc = np.linspace(0.1, 60.0, 100) +# get a 2D array prediction +px_obs = theory.get_px_lya_obs(theta_arc=theta_arc, k_AA=kp_AA, cosmo=cosmo, params={}) + +# %% +# plot the prediction for a couple of theta values +for it_A in [10, 20, 40]: + label = "theta = {:.2f}' ".format(theta_arc[it_A]) + plt.plot(kp_AA, px_obs[it_A], label=label) +plt.xlim([0,1]) +plt.xlabel('k [1/A]') +plt.ylabel('Px [A]') +plt.legend(); + +# %% [markdown] +# ### Now make predictions for different parameter values + +# %% +theory.lya_model.default_lya_params + +# %% +# this can be a list of likelihood parameters or a dictionary +params = {'bias': -0.12, 'beta': 1.6, 'q1': .3} +px_obs = theory.get_px_lya_obs(theta_arc=theta_arc, k_AA=kp_AA, cosmo=cosmo, params=params) + +# %% +for beta in [0.0, 1.0, 2.0]: + px_obs = theory.get_px_lya_obs(theta_arc=theta_arc, k_AA=kp_AA, cosmo=cosmo, params={'beta':beta}) + for it_A in [5]: + label = 'theta = {:.2f} arcmin, beta={:.2f}'.format(theta_arc[it_A], beta) + plt.plot(kp_AA, px_obs[it_A], label=label) +plt.legend() +plt.xlabel(r'$k_\parallel$ (1/Ang)') +plt.ylabel('Px [Ang]') + +# %% +for q1 in [0.0, 0.5, 0.9]: + px_obs = theory.get_px_lya_obs(theta_arc=theta_arc, k_AA=kp_AA, cosmo=cosmo, params={'q1':q1}) + + for it_A in [5]: + label = 'theta = {:.2f} arcmin, q1={:.2f}'.format(theta_arc[it_A], q1) + plt.plot(kp_AA, px_obs[it_A], label=label) +plt.legend() +plt.xlabel(r'$k_\parallel$ (1/Ang)') +plt.ylabel('Px [Ang]') + +# %% [markdown] +# ### Vary cosmology and check that everything works + +# %% +N=5 +nss = np.linspace(0.94, 0.98, N) +k = np.logspace(-4, 2, 100) + + +# %% +def plot_linP_ratios(): + plins = [] + for i in range(N): + cosmo = theory.get_cosmology(cosmo=None, params={'ns':nss[i]}) + print('new cosmo', cosmo.new_params) + plins.append(cosmo.get_linP_Mpc(theory.z, k)) + iref=2 + plin_ref=plins[iref] + for i in range(N): + plt.semilogx(k, plins[i] / plins[iref], label=r'$n_s={:.3f}$'.format(nss[i])) + plt.xlabel('k [1/Mpc]') + plt.ylabel(r'ratio of $P_L (k)$') + plt.legend() + plt.ylim([0.95,1.05]) + + +# %% +plot_linP_ratios() + + +# %% +def plot_p3d_ratios(mu): + p3ds = [] + for i in range(N): + p3ds.append(theory.get_p3d_lya_Mpc(k, mu, params={'ns':nss[i]})) + iref=2 + p3d_ref=p3ds[iref] + for i in range(N): + plt.semilogx(k, p3ds[i] / p3ds[iref], label=r'$n_s={:.3f}$'.format(nss[i])) + plt.xlabel('k [1/Mpc]') + plt.ylabel(r'ratio of $P_{{3D}} (k, \mu={:.1f})$'.format(mu)) + plt.legend() + plt.ylim([0.8,1.2]) + + +# %% +plot_p3d_ratios(mu=0.0) + +# %% +plot_p3d_ratios(mu=1.0) + + +# %% +def plot_px_ratios(rt=10.0): + pxs = [] + for i in range(N): + pxs.append(theory.get_px_lya_Mpc(rt_Mpc=rt, kp_Mpc=k, params={'ns':nss[i]})) + iref=2 + px_ref=pxs[iref] + for i in range(N): + plt.semilogx(k, pxs[i] / pxs[iref], label=r'$n_s={:.3f}$'.format(nss[i])) + plt.xlabel('k [1/Mpc]') + plt.ylabel(r'ratio of $P_{{3D}} (k, \mu={:.1f})$'.format(rt)) + plt.legend() + plt.ylim([0.8,1.2]) + + +# %% +plot_px_ratios(rt=1.0) + +# %% +plot_px_ratios(rt=20.0) + +# %%