diff --git a/.github/workflows/build_check.yml b/.github/workflows/build_check.yml index a87a0a0bc..0949a3d07 100644 --- a/.github/workflows/build_check.yml +++ b/.github/workflows/build_check.yml @@ -41,7 +41,7 @@ jobs: - name: Analysing the code with pylint run: | pip install pylint - pylint clmm + pylint clmm --ignored-classes=astropy.units - name: Run the unit tests run: | pip install pytest pytest-cov diff --git a/clmm/__init__.py b/clmm/__init__.py index b5e90d638..3499c132b 100644 --- a/clmm/__init__.py +++ b/clmm/__init__.py @@ -26,4 +26,4 @@ ) from . import support -__version__ = "1.11.1" +__version__ = "1.12.0" diff --git a/clmm/cosmology/ccl.py b/clmm/cosmology/ccl.py index def7c9927..e54e8c439 100644 --- a/clmm/cosmology/ccl.py +++ b/clmm/cosmology/ccl.py @@ -76,29 +76,29 @@ def _get_param(self, key): return value def _get_Omega_m(self, z): - a = self.get_a_from_z(z) + a = self._get_a_from_z(z) return ccl.omega_x(self.be_cosmo, a, "matter") def _get_E2(self, z): - a = self.get_a_from_z(z) + a = self._get_a_from_z(z) return (ccl.h_over_h0(self.be_cosmo, a)) ** 2 def _get_E2Omega_m(self, z): - a = self.get_a_from_z(z) + a = self._get_a_from_z(z) return ccl.omega_x(self.be_cosmo, a, "matter") * (ccl.h_over_h0(self.be_cosmo, a)) ** 2 def _get_rho_m(self, z): # total matter density in physical units [Msun/Mpc3] - a = self.get_a_from_z(z) + a = self._get_a_from_z(z) return ccl.rho_x(self.be_cosmo, a, "matter", is_comoving=False) def _get_rho_c(self, z): - a = self.get_a_from_z(z) + a = self._get_a_from_z(z) return ccl.rho_x(self.be_cosmo, a, "critical", is_comoving=False) def _eval_da_z1z2_core(self, z1, z2): - a1 = np.atleast_1d(self.get_a_from_z(z1)) - a2 = np.atleast_1d(self.get_a_from_z(z2)) + a1 = np.atleast_1d(self._get_a_from_z(z1)) + a2 = np.atleast_1d(self._get_a_from_z(z2)) if len(a1) == 1 and len(a2) != 1: a1 = np.full_like(a2, a1) elif len(a2) == 1 and len(a1) != 1: @@ -110,10 +110,10 @@ def _eval_da_z1z2_core(self, z1, z2): return res def _eval_sigma_crit_core(self, z_len, z_src): - a_len = self.get_a_from_z(z_len) - a_src = self.get_a_from_z(z_src) + a_len = self._get_a_from_z(z_len) + a_src = self._get_a_from_z(z_src) return self.be_cosmo.sigma_critical(a_lens=a_len, a_source=a_src) * self.cor_factor def _eval_linear_matter_powerspectrum(self, k_vals, redshift): - return ccl.linear_matter_power(self.be_cosmo, k_vals, self.get_a_from_z(redshift)) + return ccl.linear_matter_power(self.be_cosmo, k_vals, self._get_a_from_z(redshift)) diff --git a/clmm/cosmology/parent_class.py b/clmm/cosmology/parent_class.py index 7bbef4d9f..e970b4245 100644 --- a/clmm/cosmology/parent_class.py +++ b/clmm/cosmology/parent_class.py @@ -1,4 +1,5 @@ """@file parent_class.py +CLMMCosmology abstract class """ # CLMM Cosmology object abstract superclass import numpy as np @@ -41,11 +42,91 @@ def __setitem__(self, key, val): else: raise TypeError(f"key input must be str, not {type(key)}") + # 1. Functions to be implemented by children classes + def _init_from_cosmo(self, be_cosmo): + raise NotImplementedError + + def _init_from_params(self, **kwargs): + raise NotImplementedError + + def _set_param(self, key, value): + raise NotImplementedError + + def _get_param(self, key): + raise NotImplementedError + + def _get_Omega_m(self, z): + raise NotImplementedError + + def _get_E2(self, z): + raise NotImplementedError + + def _get_E2Omega_m(self, z): + raise NotImplementedError + + def _get_rho_c(self, z): + raise NotImplementedError + + def _eval_da_z1z2_core(self, z1, z2): + raise NotImplementedError + + def _eval_sigma_crit_core(self, z_len, z_src): + raise NotImplementedError + + def _eval_linear_matter_powerspectrum(self, k_vals, redshift): + raise NotImplementedError + + # 2. Functions that can be used by all subclasses + + def _get_rho_m(self, z): + rhocrit_cd2018 = (3.0e16 * const.PC_TO_METER.value) / ( + 8.0 * np.pi * const.GNEWT.value * const.SOLAR_MASS.value + ) + return rhocrit_cd2018 * (z + 1) ** 3 * self["Omega_m0"] * self["h"] ** 2 + + def _eval_da_z1z2(self, z1, z2): + warning_msg = "\nSome values of z2 are lower than z1." + "\nda = np.nan for those." + return compute_for_good_redshifts( + self._eval_da_z1z2_core, z1, z2, np.nan, warning_message=warning_msg + ) + + def _eval_da(self, z): + return self._eval_da_z1z2(0.0, z) + + def _get_a_from_z(self, z): + z = np.array(z) + return 1.0 / (1.0 + z) + + def _get_z_from_a(self, a): + a = np.array(a) + return (1.0 / a) - 1.0 + + def _eval_sigma_crit(self, z_len, z_src): + warning_msg = ( + "\nSome source redshifts are lower than the cluster redshift." + + "\nSigma_crit = np.inf for those galaxies." + ) + return compute_for_good_redshifts( + self._eval_sigma_crit_core, + z_len, + z_src, + np.inf, + z1_arg_name="z_len", + z2_arg_name="z_src", + warning_message=warning_msg, + ) + + # 3. Wrapper functions for input validation + + def get_desc(self): """ - To be filled in child classes + Returns the Cosmology description. """ - raise NotImplementedError + return ( + f"{type(self).__name__}(H0={self['H0']}, Omega_dm0={self['Omega_dm0']}, " + f"Omega_b0={self['Omega_b0']}, Omega_k0={self['Omega_k0']})" + ) def init_from_params(self, H0=67.66, Omega_b0=0.049, Omega_dm0=0.262, Omega_k0=0.0): """Set the cosmology from parameters @@ -68,33 +149,6 @@ def init_from_params(self, H0=67.66, Omega_b0=0.049, Omega_dm0=0.262, Omega_k0=0 validate_argument(locals(), "Omega_k0", float, argmin=0, eqmin=True) self._init_from_params(H0=H0, Omega_b0=Omega_b0, Omega_dm0=Omega_dm0, Omega_k0=Omega_k0) - def _init_from_params(self, **kwargs): - """ - To be filled in child classes - """ - raise NotImplementedError - - def _set_param(self, key, value): - """ - To be filled in child classes - """ - raise NotImplementedError - - def _get_param(self, key): - """ - To be filled in child classes - """ - raise NotImplementedError - - def get_desc(self): - """ - Returns the Cosmology description. - """ - return ( - f"{type(self).__name__}(H0={self['H0']}, Omega_dm0={self['Omega_dm0']}, " - f"Omega_b0={self['Omega_b0']}, Omega_k0={self['Omega_k0']})" - ) - def set_be_cosmo(self, be_cosmo=None, H0=67.66, Omega_b0=0.049, Omega_dm0=0.262, Omega_k0=0.0): """Set the cosmology @@ -134,9 +188,6 @@ def get_Omega_m(self, z): validate_argument(locals(), "z", "float_array", argmin=0, eqmin=True) return self._get_Omega_m(z=z) - def _get_Omega_m(self, z): - raise NotImplementedError - def get_E2(self, z): r"""Gets the value of the hubble parameter (normalized at 0) @@ -159,9 +210,6 @@ def get_E2(self, z): validate_argument(locals(), "z", "float_array", argmin=0, eqmin=True) return self._get_E2(z=z) - def _get_E2(self, z): - raise NotImplementedError - def get_E2Omega_m(self, z): r"""Gets the value of the dimensionless matter density times the Hubble parameter squared (normalized at 0) @@ -187,9 +235,6 @@ def get_E2Omega_m(self, z): validate_argument(locals(), "z", "float_array", argmin=0, eqmin=True) return self._get_E2Omega_m(z=z) - def _get_E2Omega_m(self, z): - raise NotImplementedError - def get_rho_m(self, z): r"""Gets physical matter density at a given redshift. @@ -207,12 +252,6 @@ def get_rho_m(self, z): validate_argument(locals(), "z", "float_array", argmin=0, eqmin=True) return self._get_rho_m(z=z) - def _get_rho_m(self, z): - rhocrit_cd2018 = (3.0e16 * const.PC_TO_METER.value) / ( - 8.0 * np.pi * const.GNEWT.value * const.SOLAR_MASS.value - ) - return rhocrit_cd2018 * (z + 1) ** 3 * self["Omega_m0"] * self["h"] ** 2 - def get_rho_c(self, z): r"""Gets physical critical density at a given redshift. @@ -230,9 +269,6 @@ def get_rho_c(self, z): validate_argument(locals(), "z", "float_array", argmin=0, eqmin=True) return self._get_rho_c(z=z) - def _get_rho_c(self, z): - raise NotImplementedError - def eval_da_z1z2(self, z1, z2): r"""Computes the angular diameter distance between z1 and z2 @@ -260,15 +296,6 @@ def eval_da_z1z2(self, z1, z2): validate_argument(locals(), "z2", "float_array", argmin=0, eqmin=True) return self._eval_da_z1z2(z1=z1, z2=z2) - def _eval_da_z1z2(self, z1, z2): - warning_msg = "\nSome values of z2 are lower than z1." + "\nda = np.nan for those." - return compute_for_good_redshifts( - self._eval_da_z1z2_core, z1, z2, np.nan, warning_message=warning_msg - ) - - def _eval_da_z1z2_core(self, z1, z2): - raise NotImplementedError - def eval_da(self, z): r"""Computes the angular diameter distance between 0.0 and z @@ -284,14 +311,10 @@ def eval_da(self, z): ------- float, numpy.ndarray Angular diameter distance in units :math:`M\!pc` - - Notes - ----- - Describe the vectorization. """ if self.validate_input: validate_argument(locals(), "z", "float_array", argmin=0, eqmin=True) - return self.eval_da_z1z2(0.0, z) + return self._eval_da(z) def eval_da_a1a2(self, a1, a2=1.0): r"""This is a function to calculate the angular diameter distance @@ -340,7 +363,7 @@ def eval_da_a1a2(self, a1, a2=1.0): ) z1 = self.get_z_from_a(a2) z2 = self.get_z_from_a(a1) - return self.eval_da_z1z2(z1, z2) + return self._eval_da_z1z2(z1, z2) def get_a_from_z(self, z): """Convert redshift to scale factor @@ -357,8 +380,7 @@ def get_a_from_z(self, z): """ if self.validate_input: validate_argument(locals(), "z", "float_array", argmin=0, eqmin=True) - z = np.array(z) - return 1.0 / (1.0 + z) + return self._get_a_from_z(z) def get_z_from_a(self, a): """Convert scale factor to redshift @@ -377,8 +399,7 @@ def get_z_from_a(self, a): validate_argument( locals(), "a", "float_array", argmin=0, eqmin=True, argmax=1, eqmax=True ) - a = np.array(a) - return (1.0 / a) - 1.0 + return self._get_z_from_a(a) def rad2mpc(self, dist1, redshift): r"""Convert between radians and Mpc using the small angle approximation @@ -446,24 +467,6 @@ def eval_sigma_crit(self, z_len, z_src): validate_argument(locals(), "z_src", "float_array", argmin=0, eqmin=True) return self._eval_sigma_crit(z_len=z_len, z_src=z_src) - def _eval_sigma_crit(self, z_len, z_src): - warning_msg = ( - "\nSome source redshifts are lower than the cluster redshift." - + "\nSigma_crit = np.inf for those galaxies." - ) - return compute_for_good_redshifts( - self._eval_sigma_crit_core, - z_len, - z_src, - np.inf, - z1_arg_name="z_len", - z2_arg_name="z_src", - warning_message=warning_msg, - ) - - def _eval_sigma_crit_core(self, z_len, z_src): - raise NotImplementedError - def eval_linear_matter_powerspectrum(self, k_vals, redshift): r"""Computes the linear matter power spectrum @@ -483,6 +486,3 @@ def eval_linear_matter_powerspectrum(self, k_vals, redshift): validate_argument(locals(), "k_vals", "float_array", argmin=0) validate_argument(locals(), "redshift", float, argmin=0, eqmin=True) return self._eval_linear_matter_powerspectrum(k_vals, redshift) - - def _eval_linear_matter_powerspectrum(self, k_vals, redshift): - raise NotImplementedError diff --git a/clmm/theory/func_layer.py b/clmm/theory/func_layer.py index ac43d69bd..f2b465411 100644 --- a/clmm/theory/func_layer.py +++ b/clmm/theory/func_layer.py @@ -539,7 +539,6 @@ def compute_tangential_shear( massdef="mean", alpha_ein=None, z_src_info="discrete", - beta_kwargs=None, verbose=False, validate_input=True, ): @@ -614,17 +613,6 @@ def compute_tangential_shear( \langle \beta_s^2 \rangle = \left\langle \left(\frac{D_{LS}} {D_S}\frac{D_\infty}{D_{L,\infty}}\right)^2 \right\rangle - beta_kwargs: None, dict - Extra arguments for the `compute_beta_s_mean, compute_beta_s_square_mean` functions. - Only used if `z_src_info='distribution'`. Possible keys are: - - * 'zmin' (None, float) : Minimum redshift to be set as the source of the galaxy - when performing the sum. (default=None) - * 'zmax' (float) : Maximum redshift to be set as the source of the galaxy - when performing the sum. (default=10.0) - * 'delta_z_cut' (float) : Redshift cut so that `zmin` = `z_cl` + `delta_z_cut`. - `delta_z_cut` is ignored if `z_min` is already provided. (default=0.1) - verbose : bool, optional If True, the Einasto slope (alpha_ein) is printed out. Only availble for the NC and CCL backends. @@ -657,7 +645,6 @@ def compute_tangential_shear( z_cluster, z_src, z_src_info=z_src_info, - beta_kwargs=beta_kwargs, verbose=verbose, ) @@ -677,7 +664,6 @@ def compute_convergence( massdef="mean", alpha_ein=None, z_src_info="discrete", - beta_kwargs=None, verbose=False, validate_input=True, ): @@ -752,17 +738,6 @@ def compute_convergence( \langle \beta_s^2 \rangle = \left\langle \left(\frac{D_{LS}} {D_S}\frac{D_\infty}{D_{L,\infty}}\right)^2 \right\rangle - beta_kwargs: None, dict - Extra arguments for the `compute_beta_s_mean, compute_beta_s_square_mean` functions. - Only used if `z_src_info='distribution'`. Possible keys are: - - * 'zmin' (None, float) : Minimum redshift to be set as the source of the galaxy - when performing the sum. (default=None) - * 'zmax' (float) : Maximum redshift to be set as the source of the galaxy - when performing the sum. (default=10.0) - * 'delta_z_cut' (float) : Redshift cut so that `zmin` = `z_cl` + `delta_z_cut`. - `delta_z_cut` is ignored if `z_min` is already provided. (default=0.1) - verbose : bool, optional If True, the Einasto slope (alpha_ein) is printed out. Only availble for the NC and CCL backends. @@ -791,7 +766,6 @@ def compute_convergence( z_cluster, z_src, z_src_info=z_src_info, - beta_kwargs=beta_kwargs, verbose=verbose, ) @@ -811,7 +785,7 @@ def compute_reduced_tangential_shear( massdef="mean", z_src_info="discrete", approx=None, - beta_kwargs=None, + integ_kwargs=None, alpha_ein=None, validate_input=True, verbose=False, @@ -863,10 +837,11 @@ def compute_reduced_tangential_shear( * 'discrete' (default) : The redshift of sources is provided by `z_src`. It can be individual redshifts for each source galaxy when `z_src` is an array - or all sources are at the same redshift when `z_src` is a float. + or all sources are at the same redshift when `z_src` is a float + (Used for `approx=None`). * 'distribution' : A redshift distribution function is provided by `z_src`. - `z_src` must be a one dimensional function. + `z_src` must be a one dimensional function (Used when `approx=None`). * 'beta' : The averaged lensing efficiency is provided by `z_src`. `z_src` must be a tuple containing @@ -898,9 +873,8 @@ def compute_reduced_tangential_shear( {\int_{z_{min}}^{z_{max}} N(z)\text{d}z} * 'order1' : Same approach as in Weighing the Giants - III (equation 6 in - Applegate et al. 2014; https://arxiv.org/abs/1208.0605). `z_src_info` must be - either 'beta', or 'distribution' (that will be used to compute - :math:`\langle \beta_s \rangle`) + Applegate et al. 2014; https://arxiv.org/abs/1208.0605). + `z_src_info` must be 'beta': .. math:: g_t\approx\frac{\left<\beta_s\right>\gamma_{\infty}} @@ -909,8 +883,7 @@ def compute_reduced_tangential_shear( * 'order2' : Same approach as in Cluster Mass Calibration at High Redshift (equation 12 in Schrabback et al. 2017; https://arxiv.org/abs/1611.03866). - `z_src_info` must be either 'beta', or 'distribution' (that will be used - to compute :math:`\langle \beta_s \rangle` and :math:`\langle \beta_s^2 \rangle`) + `z_src_info` must be 'beta': .. math:: g_t\approx\frac{\left<\beta_s\right>\gamma_{\infty}} @@ -918,9 +891,9 @@ def compute_reduced_tangential_shear( \left(1+\left(\frac{\left<\beta_s^2\right>} {\left<\beta_s\right>^2}-1\right)\left<\beta_s\right>\kappa_{\infty}\right) - beta_kwargs: None, dict - Extra arguments for the `compute_beta_s_mean, compute_beta_s_square_mean` functions. - Only used if `z_src_info='distribution'`. Possible keys are: + integ_kwargs: None, dict + Extra arguments for the redshift integration (when + `approx=None, z_src_info='distribution'`). Possible keys are: * 'zmin' (None, float) : Minimum redshift to be set as the source of the galaxy when performing the sum. (default=None) @@ -962,7 +935,7 @@ def compute_reduced_tangential_shear( z_src, z_src_info=z_src_info, approx=approx, - beta_kwargs=beta_kwargs, + integ_kwargs=integ_kwargs, verbose=verbose, ) @@ -983,7 +956,7 @@ def compute_magnification( alpha_ein=None, z_src_info="discrete", approx=None, - beta_kwargs=None, + integ_kwargs=None, verbose=False, validate_input=True, ): @@ -1034,10 +1007,11 @@ def compute_magnification( * 'discrete' (default) : The redshift of sources is provided by `z_src`. It can be individual redshifts for each source galaxy when `z_src` is an array - or all sources are at the same redshift when `z_src` is a float. + or all sources are at the same redshift when `z_src` is a float + (Used for `approx=None`). * 'distribution' : A redshift distribution function is provided by `z_src`. - `z_src` must be a one dimensional function. + `z_src` must be a one dimensional function (Used when `approx=None`). * 'beta' : The averaged lensing efficiency is provided by `z_src`. `z_src` must be a tuple containing @@ -1071,26 +1045,24 @@ def compute_magnification( {\int_{z_{min}}^{z_{max}} N(z)\text{d}z} * 'order1' : Uses the weak lensing approximation of the magnification with up to - first-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}`. - `z_src_info` must be either 'beta', or 'distribution' (that will be used to - compute :math:`\langle \beta_s \rangle`) + first-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}` + (`z_src_info` must be 'beta'): .. math:: \mu \approx 1 + 2 \left<\beta_s\right>\kappa_{\infty} * 'order2' : Uses the weak lensing approximation of the magnification with up to - second-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}`. - `z_src_info` must be either 'beta', or 'distribution' (that will be used to - compute :math:`\langle \beta_s \rangle` and :math:`\langle \beta_s^2 \rangle`) + second-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}` + (`z_src_info` must be 'beta'): .. math:: \mu \approx 1 + 2 \left<\beta_s\right>\kappa_{\infty} + 3 \left<\beta_s^2\right>\kappa_{\infty}^2 + \left<\beta_s^2\right>\gamma_{\infty}^2 - beta_kwargs: None, dict - Extra arguments for the `compute_beta_s_mean, compute_beta_s_square_mean` functions. - Only used if `z_src_info='distribution'`. Possible keys are: + integ_kwargs: None, dict + Extra arguments for the redshift integration (when + `approx=None, z_src_info='distribution'`). Possible keys are: * 'zmin' (None, float) : Minimum redshift to be set as the source of the galaxy when performing the sum. (default=None) @@ -1128,7 +1100,7 @@ def compute_magnification( z_src, z_src_info=z_src_info, approx=approx, - beta_kwargs=beta_kwargs, + integ_kwargs=integ_kwargs, verbose=verbose, ) @@ -1150,7 +1122,7 @@ def compute_magnification_bias( alpha_ein=None, z_src_info="discrete", approx=None, - beta_kwargs=None, + integ_kwargs=None, verbose=False, validate_input=True, ): @@ -1217,10 +1189,11 @@ def compute_magnification_bias( * 'discrete' (default) : The redshift of sources is provided by `z_src`. It can be individual redshifts for each source galaxy when `z_src` is an array - or all sources are at the same redshift when `z_src` is a float. + or all sources are at the same redshift when `z_src` is a float + (Used for `approx=None`). * 'distribution' : A redshift distribution function is provided by `z_src`. - `z_src` must be a one dimensional function. + `z_src` must be a one dimensional function (Used when `approx=None`). * 'beta' : The averaged lensing efficiency is provided by `z_src`. `z_src` must be a tuple containing @@ -1255,18 +1228,16 @@ def compute_magnification_bias( {\int_{z_{min}}^{z_{max}} N(z)\text{d}z} * 'order1' : Uses the weak lensing approximation of the magnification bias with up - to first-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}`. - `z_src_info` must be either 'beta', or 'distribution' (that will be used to - compute :math:`\langle \beta_s \rangle`) + to first-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}` + (`z_src_info` must be 'beta'): .. math:: \mu^{\alpha-1} \approx 1 + \left(\alpha-1\right)\left(2 \left<\beta_s\right>\kappa_{\infty}\right) * 'order2' : Uses the weak lensing approximation of the magnification bias with up - to second-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}`. - `z_src_info` must be either 'beta', or 'distribution' (that will be used to - compute :math:`\langle \beta_s \rangle` and :math:`\langle \beta_s^2 \rangle`) + to second-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}` + `z_src_info` must be 'beta': .. math:: \mu^{\alpha-1} \approx @@ -1277,9 +1248,9 @@ def compute_magnification_bias( &+ \left(2\alpha-1\right)\left(\alpha-1\right) \left(\left<\beta_s^2\right>\kappa_{\infty}^2\right) - beta_kwargs: None, dict - Extra arguments for the `compute_beta_s_mean, compute_beta_s_square_mean` functions. - Only used if `z_src_info='distribution'`. Possible keys are: + integ_kwargs: None, dict + Extra arguments for the redshift integration (when + `approx=None, z_src_info='distribution'`). Possible keys are: * 'zmin' (None, float) : Minimum redshift to be set as the source of the galaxy when performing the sum. (default=None) @@ -1314,7 +1285,7 @@ def compute_magnification_bias( alpha, z_src_info=z_src_info, approx=approx, - beta_kwargs=beta_kwargs, + integ_kwargs=integ_kwargs, verbose=verbose, ) diff --git a/clmm/theory/parent_class.py b/clmm/theory/parent_class.py index d27b8f7c6..b9d4fabfa 100644 --- a/clmm/theory/parent_class.py +++ b/clmm/theory/parent_class.py @@ -20,8 +20,6 @@ ) from ..utils import ( validate_argument, - compute_beta_s_mean, - compute_beta_s_square_mean, compute_beta_s_func, ) from ..redshift import ( @@ -66,6 +64,10 @@ class CLMModeling: The value used as infinite redshift """ # pylint: disable=too-many-instance-attributes + # The disable below is added to avoid a pylint error where it thinks CLMMCosmlogy + # has duplicates since both have many NotImplementedError functions + # description of bug at https://github.com/pylint-dev/pylint/issues/7213 + # pylint: disable=duplicate-code def __init__(self, validate_input=True, z_inf=1000): self.backend = None @@ -705,140 +707,7 @@ def eval_surface_density_2h( r_proj, z_cl, halobias, logkbounds, ksteps, loglbounds, lsteps ) - def _get_beta_s_mean(self, z_cl, z_src, z_src_info="discrete", beta_kwargs=None): - r"""Get mean value of the geometric lensing efficicency ratio from typical class function. - - Parameters - ---------- - z_cl : float - Galaxy cluster redshift - z_src : array_like, float, function - Information on the background source galaxy redshift(s). Value required depends on - `z_src_info` (see below). - z_src_info : str, optional - Type of redshift information provided by the `z_src` argument. - The following supported options are: - - * 'discrete' (default) : The redshift of sources is provided by `z_src`. - It can be individual redshifts for each source galaxy when `z_src` is an - arrayor all sources are at the same redshift when `z_src` is a float. - - * 'distribution' : A redshift distribution function is provided by `z_src`. - `z_src` must be a one dimentional function. - - * 'beta' : The averaged lensing efficiency is provided by `z_src`. - `z_src` must be a tuple containing - ( :math:`\langle \beta_s \rangle, \langle \beta_s^2 \rangle`), - the lensing efficiency and square of the lensing efficiency averaged over - the galaxy redshift distribution repectively. - - .. math:: - \langle \beta_s \rangle = \left\langle \frac{D_{LS}}{D_S}\frac{D_\infty} - {D_{L,\infty}}\right\rangle - - .. math:: - \langle \beta_s^2 \rangle = \left\langle \left(\frac{D_{LS}} - {D_S}\frac{D_\infty}{D_{L,\infty}}\right)^2 \right\rangle - - beta_kwargs: None, dict - Extra arguments for the `compute_beta_s_mean, compute_beta_s_square_mean` functions. - Only used if `z_src_info='distribution'`. Possible keys are: - - * 'zmin' (None, float) : Minimum redshift to be set as the source of the galaxy - when performing the sum. (default=None) - * 'zmax' (float) : Maximum redshift to be set as the source of the galaxy - when performing the sum. (default=10.0) - * 'delta_z_cut' (float) : Redshift cut so that `zmin` = `z_cl` + `delta_z_cut`. - `delta_z_cut` is ignored if `z_min` is already provided. (default=0.1) - - Returns - ------- - array_like, float - The averaged lensing efficiency. - """ - if z_src_info == "beta": - # z_src (tuple) is (beta_s_mean, beta_s_square_mean) - beta_s_mean = z_src[0] - elif z_src_info == "distribution": - # z_src (function) if PDZ - beta_kwargs = {} if beta_kwargs is None else beta_kwargs - beta_s_mean = compute_beta_s_mean( - z_cl, self.z_inf, self.cosmo, z_distrib_func=z_src, **beta_kwargs - ) - return beta_s_mean - - def _get_beta_s_square_mean(self, z_cl, z_src, z_src_info="discrete", beta_kwargs=None): - r"""Get mean value of the square geometric lensing efficicency ratio from typical class - function. - - Parameters - ---------- - z_cl : float - Galaxy cluster redshift - z_src : array_like, float, function - Information on the background source galaxy redshift(s). Value required depends on - `z_src_info` (see below). - z_src_info : str, optional - Type of redshift information provided by the `z_src` argument. - The following supported options are: - - * 'discrete' (default) : The redshift of sources is provided by `z_src`. - It can be individual redshifts for each source galaxy when `z_src` is an - arrayor all sources are at the same redshift when `z_src` is a float. - - * 'distribution' : A redshift distribution function is provided by `z_src`. - `z_src` must be a one dimentional function. - - * 'beta' : The averaged lensing efficiency is provided by `z_src`. - `z_src` must be a tuple containing - ( :math:`\langle \beta_s \rangle, \langle \beta_s^2 \rangle`), - the lensing efficiency and square of the lensing efficiency averaged over - the galaxy redshift distribution repectively. - - .. math:: - \langle \beta_s \rangle = \left\langle \frac{D_{LS}}{D_S}\frac{D_\infty} - {D_{L,\infty}}\right\rangle - - .. math:: - \langle \beta_s^2 \rangle = \left\langle \left(\frac{D_{LS}} - {D_S}\frac{D_\infty}{D_{L,\infty}}\right)^2 \right\rangle - - beta_kwargs: None, dict - Extra arguments for the `compute_beta_s_mean, compute_beta_s_square_mean` functions. - Only used if `z_src_info='distribution'`. Possible keys are: - - * 'zmin' (None, float) : Minimum redshift to be set as the source of the galaxy - when performing the sum. (default=None) - * 'zmax' (float) : Maximum redshift to be set as the source of the galaxy - when performing the sum. (default=10.0) - * 'delta_z_cut' (float) : Redshift cut so that `zmin` = `z_cl` + `delta_z_cut`. - `delta_z_cut` is ignored if `z_min` is already provided. (default=0.1) - - Returns - ------- - array_like, float - The square averaged lensing efficiency. - """ - if z_src_info == "beta": - # z_src (tuple) is (beta_s_mean, beta_s_square_mean) - beta_s_square_mean = z_src[1] - elif z_src_info == "distribution": - # z_src (function) if PDZ - beta_kwargs = {} if beta_kwargs is None else beta_kwargs - beta_s_square_mean = compute_beta_s_square_mean( - z_cl, self.z_inf, self.cosmo, z_distrib_func=z_src, **beta_kwargs - ) - return beta_s_square_mean - - def eval_tangential_shear( - self, - r_proj, - z_cl, - z_src, - z_src_info="discrete", - beta_kwargs=None, - verbose=False, - ): + def eval_tangential_shear(self, r_proj, z_cl, z_src, z_src_info="discrete", verbose=False): r"""Computes the tangential shear Parameters @@ -856,10 +725,7 @@ def eval_tangential_shear( * 'discrete' (default) : The redshift of sources is provided by `z_src`. It can be individual redshifts for each source galaxy when `z_src` is an - arrayor all sources are at the same redshift when `z_src` is a float. - - * 'distribution' : A redshift distribution function is provided by `z_src`. - `z_src` must be a one dimentional function. + array or all sources are at the same redshift when `z_src` is a float. * 'beta' : The averaged lensing efficiency is provided by `z_src`. `z_src` must be a tuple containing @@ -875,17 +741,6 @@ def eval_tangential_shear( \langle \beta_s^2 \rangle = \left\langle \left(\frac{D_{LS}} {D_S}\frac{D_\infty}{D_{L,\infty}}\right)^2 \right\rangle - beta_kwargs: None, dict - Extra arguments for the `compute_beta_s_mean, compute_beta_s_square_mean` functions. - Only used if `z_src_info='distribution'`. Possible keys are: - - * 'zmin' (None, float) : Minimum redshift to be set as the source of the galaxy - when performing the sum. (default=None) - * 'zmax' (float) : Maximum redshift to be set as the source of the galaxy - when performing the sum. (default=10.0) - * 'delta_z_cut' (float) : Redshift cut so that `zmin` = `z_cl` + `delta_z_cut`. - `delta_z_cut` is ignored if `z_min` is already provided. (default=0.1) - verbose : bool, optional If True, the Einasto slope (alpha_ein) is printed out. Only availble for the NC and CCL backends. @@ -895,6 +750,7 @@ def eval_tangential_shear( numpy.ndarray, float tangential shear """ + if self.validate_input: validate_argument(locals(), "r_proj", "float_array", argmin=0) validate_argument(locals(), "z_cl", float, argmin=0) @@ -903,7 +759,7 @@ def eval_tangential_shear( if self.halo_profile_model == "einasto" and verbose: print(f"Einasto alpha = {self._get_einasto_alpha(z_cl=z_cl)}") - + gammat = None if z_src_info == "discrete": warning_msg = ( "\nSome source redshifts are lower than the cluster redshift." @@ -919,30 +775,16 @@ def eval_tangential_shear( "z_src", r_proj, ) - elif z_src_info in ("distribution", "beta"): - beta_s_mean = self._get_beta_s_mean( - z_cl, z_src, z_src_info=z_src_info, beta_kwargs=beta_kwargs - ) - + elif z_src_info == "beta": + beta_s_mean = z_src[0] gammat_inf = self._eval_tangential_shear_core( r_proj=r_proj, z_cl=z_cl, z_src=self.z_inf ) - gammat = beta_s_mean * gammat_inf - else: - raise ValueError(f"Unsupported z_src_info (='{z_src_info}')") return gammat - def eval_convergence( - self, - r_proj, - z_cl, - z_src, - z_src_info="discrete", - beta_kwargs=None, - verbose=False, - ): + def eval_convergence(self, r_proj, z_cl, z_src, z_src_info="discrete", verbose=False): r"""Computes the mass convergence .. math:: @@ -970,9 +812,6 @@ def eval_convergence( It can be individual redshifts for each source galaxy when `z_src` is an array or all sources are at the same redshift when `z_src` is a float. - * 'distribution' : A redshift distribution function is provided by `z_src`. - `z_src` must be a one dimentional function. - * 'beta' : The averaged lensing efficiency is provided by `z_src`. `z_src` must be a tuple containing ( :math:`\langle \beta_s \rangle, \langle \beta_s^2 \rangle`), @@ -987,17 +826,6 @@ def eval_convergence( \langle \beta_s^2 \rangle = \left\langle \left(\frac{D_{LS}} {D_S}\frac{D_\infty}{D_{L,\infty}}\right)^2 \right\rangle - beta_kwargs: None, dict - Extra arguments for the `compute_beta_s_mean, compute_beta_s_square_mean` functions. - Only used if `z_src_info='distribution'`. Possible keys are: - - * 'zmin' (None, float) : Minimum redshift to be set as the source of the galaxy - when performing the sum. (default=None) - * 'zmax' (float) : Maximum redshift to be set as the source of the galaxy - when performing the sum. (default=10.0) - * 'delta_z_cut' (float) : Redshift cut so that `zmin` = `z_cl` + `delta_z_cut`. - `delta_z_cut` is ignored if `z_min` is already provided. (default=0.1) - verbose : bool, optional If True, the Einasto slope (alpha_ein) is printed out. Only availble for the NC and CCL backends. @@ -1031,16 +859,10 @@ def eval_convergence( "z_src", r_proj, ) - elif z_src_info in ("distribution", "beta"): - beta_s_mean = self._get_beta_s_mean( - z_cl, z_src, z_src_info=z_src_info, beta_kwargs=beta_kwargs - ) - + elif z_src_info == "beta": + beta_s_mean = z_src[0] kappa_inf = self._eval_convergence_core(r_proj=r_proj, z_cl=z_cl, z_src=self.z_inf) - kappa = beta_s_mean * kappa_inf - else: - raise ValueError(f"Unsupported z_src_info (='{z_src_info}')") return kappa @@ -1058,7 +880,8 @@ def _pdz_weighted_avg(self, core, pdz_func, r_proj, z_cl, integ_kwargs=None): z_cl : float Galaxy cluster redshift integ_kwargs: None, dict - Extra arguments for the redshift integration. Possible keys are: + Extra arguments for the redshift integration (when + `approx=None, z_src_info='distribution'`). Possible keys are: * 'zmin' (None, float) : Minimum redshift to be set as the source of the galaxy when performing the sum. (default=None) @@ -1101,6 +924,7 @@ def __integrand__(z, radius): return pdz_func(z) * core(tfunc(z, radius), kfunc(z, radius)) _integ_kwargs = {"zmax": 10.0, "delta_z_cut": 0.1} + _integ_kwargs.update({} if integ_kwargs is None else integ_kwargs) zmax = _integ_kwargs["zmax"] @@ -1117,7 +941,7 @@ def eval_reduced_tangential_shear( z_src, z_src_info="discrete", approx=None, - beta_kwargs=None, + integ_kwargs=None, verbose=False, ): r"""Computes the reduced tangential shear @@ -1140,10 +964,11 @@ def eval_reduced_tangential_shear( * 'discrete' (default) : The redshift of sources is provided by `z_src`. It can be individual redshifts for each source galaxy when `z_src` is an - array or all sources are at the same redshift when `z_src` is a float. + array or all sources are at the same redshift when `z_src` is a float + (Used for `approx=None`). * 'distribution' : A redshift distribution function is provided by `z_src`. - `z_src` must be a one dimentional function. + `z_src` must be a one dimentional function (Used when `approx=None`). * 'beta' : The averaged lensing efficiency is provided by `z_src`. `z_src` must be a tuple containing @@ -1176,8 +1001,7 @@ def eval_reduced_tangential_shear( * 'order1' : Same approach as in Weighing the Giants - III (equation 6 in Applegate et al. 2014; https://arxiv.org/abs/1208.0605). `z_src_info` must be - either 'beta', or 'distribution' (that will be used to compute - :math:`\langle \beta_s \rangle`) + 'beta': .. math:: g_t\approx\frac{\left<\beta_s\right>\gamma_{\infty}} @@ -1186,8 +1010,7 @@ def eval_reduced_tangential_shear( * 'order2' : Same approach as in Cluster Mass Calibration at High Redshift (equation 12 in Schrabback et al. 2017; https://arxiv.org/abs/1611.03866). - `z_src_info` must be either 'beta', or 'distribution' (that will be used - to compute :math:`\langle \beta_s \rangle, \langle \beta_s^2 \rangle`) + `z_src_info` must be 'beta': .. math:: g_t\approx\frac{\left<\beta_s\right>\gamma_{\infty}} @@ -1195,9 +1018,9 @@ def eval_reduced_tangential_shear( \left(1+\left(\frac{\left<\beta_s^2\right>} {\left<\beta_s\right>^2}-1\right)\left<\beta_s\right>\kappa_{\infty}\right) - beta_kwargs: None, dict - Extra arguments for the `compute_beta_s_mean, compute_beta_s_square_mean` functions. - Only used if `z_src_info='distribution'`. Possible keys are: + integ_kwargs: None, dict + Extra arguments for the redshift integration (when + `approx=None, z_src_info='distribution'`). Possible keys are: * 'zmin' (None, float) : Minimum redshift to be set as the source of the galaxy when performing the sum. (default=None) @@ -1224,6 +1047,7 @@ def eval_reduced_tangential_shear( validate_argument(locals(), "z_cl", float, argmin=0) validate_argument(locals(), "z_src_info", str) validate_argument(locals(), "approx", str, none_ok=True) + self._validate_approx_z_src_info(locals()) self._validate_z_src(locals()) if self.halo_profile_model == "einasto" and verbose: @@ -1236,7 +1060,7 @@ def eval_reduced_tangential_shear( z_src, r_proj, z_cl, - integ_kwargs=beta_kwargs, + integ_kwargs=integ_kwargs, ) elif z_src_info == "discrete": warning_msg = ( @@ -1253,16 +1077,8 @@ def eval_reduced_tangential_shear( "z_src", r_proj, ) - else: - raise ValueError( - "approx=None requires z_src_info='discrete' or 'distribution'," - f"z_src_info='{z_src_info}' was provided." - ) - elif approx in ("order1", "order2"): - beta_s_mean = self._get_beta_s_mean( - z_cl, z_src, z_src_info=z_src_info, beta_kwargs=beta_kwargs - ) + beta_s_mean = z_src[0] gammat_inf = self._eval_tangential_shear_core(r_proj, z_cl, z_src=self.z_inf) kappa_inf = self._eval_convergence_core(r_proj, z_cl, z_src=self.z_inf) @@ -1270,18 +1086,13 @@ def eval_reduced_tangential_shear( gt = beta_s_mean * gammat_inf / (1.0 - beta_s_mean * kappa_inf) if approx == "order2": - beta_s_square_mean = self._get_beta_s_square_mean( - z_cl, z_src, z_src_info=z_src_info, beta_kwargs=beta_kwargs - ) - + beta_s_square_mean = z_src[1] gt *= ( 1.0 + (beta_s_square_mean / (beta_s_mean * beta_s_mean) - 1.0) * beta_s_mean * kappa_inf ) - else: - raise ValueError(f"Unsupported approx (='{approx}')") return gt @@ -1292,8 +1103,8 @@ def eval_magnification( z_src, z_src_info="discrete", approx=None, - beta_kwargs=None, verbose=False, + integ_kwargs=None, ): r"""Computes the magnification @@ -1315,10 +1126,11 @@ def eval_magnification( * 'discrete' (default) : The redshift of sources is provided by `z_src`. It can be individual redshifts for each source galaxy when `z_src` is an - array or all sources are at the same redshift when `z_src` is a float. + array or all sources are at the same redshift when `z_src` is a float + (Used for `approx=None`). * 'distribution' : A redshift distribution function is provided by `z_src`. - `z_src` must be a one dimentional function. + `z_src` must be a one dimentional function (Used when `approx=None`). * 'beta' : The averaged lensing efficiency is provided by `z_src`. `z_src` must be a tuple containing @@ -1352,26 +1164,24 @@ def eval_magnification( {\int_{z_{min}}^{z_{max}} N(z)\text{d}z} * 'order1' : Uses the weak lensing approximation of the magnification with up to - first-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}`. - `z_src_info` must be either 'beta', or 'distribution' (that will be used to - compute :math:`\langle \beta_s \rangle`) + first-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}` + (`z_src_info` must be 'beta'): .. math:: \mu \approx 1 + 2 \left<\beta_s\right>\kappa_{\infty} * 'order2' : Uses the weak lensing approximation of the magnification with up to - second-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}`. - `z_src_info` must be either 'beta', or 'distribution' (that will be used to - compute :math:`\langle \beta_s \rangle`) + second-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}` + (`z_src_info` must be 'beta'): .. math:: \mu \approx 1 + 2 \left<\beta_s\right>\kappa_{\infty} + 3 \left<\beta_s^2\right>\kappa_{\infty}^2 + \left<\beta_s^2\right>\gamma_{\infty}^2 - beta_kwargs: None, dict - Extra arguments for the `compute_beta_s_mean, compute_beta_s_square_mean` functions. - Only used if `z_src_info='distribution'`. Possible keys are: + integ_kwargs: None, dict + Extra arguments for the redshift integration (when + `approx=None, z_src_info='distribution'`). Possible keys are: * 'zmin' (None, float) : Minimum redshift to be set as the source of the galaxy when performing the sum. (default=None) @@ -1395,6 +1205,7 @@ def eval_magnification( validate_argument(locals(), "z_cl", float, argmin=0) validate_argument(locals(), "z_src_info", str) validate_argument(locals(), "approx", str, none_ok=True) + self._validate_approx_z_src_info(locals()) self._validate_z_src(locals()) if self.halo_profile_model == "einasto" and verbose: @@ -1407,7 +1218,7 @@ def eval_magnification( z_src, r_proj, z_cl, - integ_kwargs=beta_kwargs, + integ_kwargs=integ_kwargs, ) elif z_src_info == "discrete": warning_msg = ( @@ -1424,16 +1235,8 @@ def eval_magnification( "z_src", r_proj, ) - else: - raise ValueError( - "approx=None requires z_src_info='discrete' or 'distribution'," - f"z_src_info='{z_src_info}' was provided." - ) - elif approx in ("order1", "order2"): - beta_s_mean = self._get_beta_s_mean( - z_cl, z_src, z_src_info=z_src_info, beta_kwargs=beta_kwargs - ) + beta_s_mean = z_src[0] kappa_inf = self._eval_convergence_core(r_proj, z_cl, z_src=self.z_inf) gammat_inf = self._eval_tangential_shear_core(r_proj, z_cl, z_src=self.z_inf) @@ -1441,14 +1244,10 @@ def eval_magnification( mu = 1 + 2 * beta_s_mean * kappa_inf if approx == "order2": - beta_s_square_mean = self._get_beta_s_square_mean( - z_cl, z_src, z_src_info=z_src_info, beta_kwargs=beta_kwargs - ) + beta_s_square_mean = z_src[1] # Taylor expansion with up to second-order terms mu += 3 * beta_s_square_mean * kappa_inf**2 + beta_s_square_mean * gammat_inf**2 - else: - raise ValueError(f"Unsupported approx (='{approx}')") return mu def eval_magnification_bias( @@ -1459,7 +1258,7 @@ def eval_magnification_bias( alpha, z_src_info="discrete", approx=None, - beta_kwargs=None, + integ_kwargs=None, verbose=False, ): r"""Computes the magnification bias @@ -1484,10 +1283,11 @@ def eval_magnification_bias( * 'discrete' (default) : The redshift of sources is provided by `z_src`. It can be individual redshifts for each source galaxy when `z_src` is an - array or all sources are at the same redshift when `z_src` is a float. + array or all sources are at the same redshift when `z_src` is a float + (Used for `approx=None`). * 'distribution' : A redshift distribution function is provided by `z_src`. - `z_src` must be a one dimentional function. + `z_src` must be a one dimentional function (Used when `approx=None`). * 'beta' : The averaged lensing efficiency is provided by `z_src`. `z_src` must be a tuple containing @@ -1522,18 +1322,16 @@ def eval_magnification_bias( {\int_{z_{min}}^{z_{max}} N(z)\text{d}z} * 'order1' : Uses the weak lensing approximation of the magnification bias with up - to first-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}`. - `z_src_info` must be either 'beta', or 'distribution' (that will be used to - compute :math:`\langle \beta_s \rangle`) + to first-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}` + (`z_src_info` must be 'beta'): .. math:: \mu^{\alpha-1} \approx 1 + \left(\alpha-1\right)\left(2 \left<\beta_s\right>\kappa_{\infty}\right) * 'order2' : Uses the weak lensing approximation of the magnification bias with up - to second-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}`. - `z_src_info` must be either 'beta', or 'distribution' (that will be used to - compute :math:`\langle \beta_s \rangle`) + to second-order terms in :math:`\kappa_{\infty}` or :math:`\gamma_{\infty}` + (`z_src_info` must be 'beta'): .. math:: \mu^{\alpha-1} \approx @@ -1544,9 +1342,9 @@ def eval_magnification_bias( &+ \left(2\alpha-1\right)\left(\alpha-1\right) \left(\left<\beta_s^2\right>\kappa_{\infty}^2\right) - beta_kwargs: None, dict - Extra arguments for the `compute_beta_s_mean, compute_beta_s_square_mean` functions. - Only used if `z_src_info='distribution'`. Possible keys are: + integ_kwargs: None, dict + Extra arguments for the redshift integration (when + `approx=None, z_src_info='distribution'`). Possible keys are: * 'zmin' (None, float) : Minimum redshift to be set as the source of the galaxy when performing the sum. (default=None) @@ -1571,6 +1369,7 @@ def eval_magnification_bias( validate_argument(locals(), "z_src_info", str) validate_argument(locals(), "alpha", "float_array") validate_argument(locals(), "approx", str, none_ok=True) + self._validate_approx_z_src_info(locals()) self._validate_z_src(locals()) if self.halo_profile_model == "einasto" and verbose: @@ -1584,7 +1383,7 @@ def eval_magnification_bias( z_src, r_proj, z_cl, - integ_kwargs=beta_kwargs, + integ_kwargs=integ_kwargs, ) elif z_src_info == "discrete": warning_msg = ( @@ -1602,16 +1401,9 @@ def eval_magnification_bias( r_proj, alpha=alpha, ) - else: - raise ValueError( - "approx=None requires z_src_info='discrete' or 'distribution'," - f"z_src_info='{z_src_info}' was provided." - ) elif approx in ("order1", "order2"): - beta_s_mean = self._get_beta_s_mean( - z_cl, z_src, z_src_info=z_src_info, beta_kwargs=beta_kwargs - ) + beta_s_mean = z_src[0] kappa_inf = self._eval_convergence_core(r_proj, z_cl, z_src=self.z_inf) gammat_inf = self._eval_tangential_shear_core(r_proj, z_cl, z_src=self.z_inf) @@ -1619,17 +1411,12 @@ def eval_magnification_bias( mu_bias = 1 + (alpha - 1) * (2 * beta_s_mean * kappa_inf) if approx == "order2": - beta_s_square_mean = self._get_beta_s_square_mean( - z_cl, z_src, z_src_info=z_src_info, beta_kwargs=beta_kwargs - ) + beta_s_square_mean = z_src[1] # Taylor expansion with up to second-order terms mu_bias += (alpha - 1) * (beta_s_square_mean * gammat_inf**2) + ( 2 * alpha - 1 ) * (alpha - 1) * beta_s_square_mean * kappa_inf**2 - else: - raise ValueError(f"Unsupported approx (='{approx}')") - return mu_bias def eval_rdelta(self, z_cl): @@ -1764,8 +1551,6 @@ def _validate_z_src(self, loc_dict): * z_src_info='beta' : z_src must be a tuple containing ( :math:`\langle \beta_s \rangle, \langle \beta_s^2 \rangle`). - Also, if approx is provided and not None, z_src_info must be 'distribution' or 'beta'. - Parameters ---------- locals_dict: dict @@ -1775,12 +1560,12 @@ def _validate_z_src(self, loc_dict): validate_argument(loc_dict, "z_src", "float_array", argmin=0) elif loc_dict["z_src_info"] == "distribution": validate_argument(loc_dict, "z_src", "function", none_ok=False) - beta_kwargs = {} if loc_dict["beta_kwargs"] is None else loc_dict["beta_kwargs"] + integ_kwargs = {} if loc_dict["integ_kwargs"] is None else loc_dict["integ_kwargs"] _def_keys = ["zmin", "zmax", "delta_z_cut"] - if any(key not in _def_keys for key in beta_kwargs): + if any(key not in _def_keys for key in integ_kwargs): raise KeyError( - f"beta_kwargs must contain only {_def_keys} keys, " - f" {beta_kwargs.keys()} provided." + f"integ_kwargs must contain only {_def_keys} keys, " + f" {integ_kwargs.keys()} provided." ) elif loc_dict["z_src_info"] == "beta": validate_argument(loc_dict, "z_src", "array") @@ -1790,12 +1575,34 @@ def _validate_z_src(self, loc_dict): } validate_argument(beta_info, "beta_s_mean", "float_array") validate_argument(beta_info, "beta_s_square_mean", "float_array") - if loc_dict.get("approx") and loc_dict["z_src_info"] not in ( - "distribution", - "beta", - ): - approx, z_src_info = loc_dict["approx"], loc_dict["z_src_info"] - raise ValueError( - f"approx='{approx}' requires z_src_info='distribution' or 'beta', " - f"z_src_info='{z_src_info}' was provided." - ) + else: + raise ValueError(f"Unsupported z_src_info (='{loc_dict['z_src_info']}')") + + def _validate_approx_z_src_info(self, loc_dict): + r"""Validation for compatility between approx and z_src_info. The conditions are: + + * approx=None: z_src_info must be 'discrete' or 'distribution' + * approx='order1' or 'order2': z_src_info must be 'beta' + * approx=other: raises error + + Parameters + ---------- + locals_dict: dict + Should be the call locals() + """ + # check compatility between approx and z_src_info + z_src_info, approx = loc_dict["z_src_info"], loc_dict["approx"] + if approx is None: + if z_src_info not in ("discrete", "distribution"): + raise ValueError( + "approx=None requires z_src_info='discrete' or 'distribution'," + f" z_src_info='{z_src_info}' was provided." + ) + elif approx in ("order1", "order2"): + if z_src_info != "beta": + raise ValueError( + f"approx='{approx}' requires z_src_info='beta', " + f"z_src_info='{z_src_info}' was provided." + ) + else: + raise ValueError(f"Unsupported approx (='{approx}')") diff --git a/clmm/utils/__init__.py b/clmm/utils/__init__.py index cf15420ee..ae10a484f 100644 --- a/clmm/utils/__init__.py +++ b/clmm/utils/__init__.py @@ -4,9 +4,10 @@ compute_beta, compute_beta_s, compute_beta_s_func, - compute_beta_mean, - compute_beta_s_mean, - compute_beta_s_square_mean, + compute_beta_s_mean_from_distribution, + compute_beta_s_square_mean_from_distribution, + compute_beta_s_mean_from_weights, + compute_beta_s_square_mean_from_weights, ) from .boost import ( diff --git a/clmm/utils/beta_lens.py b/clmm/utils/beta_lens.py index d61f1dce8..26ce410f7 100644 --- a/clmm/utils/beta_lens.py +++ b/clmm/utils/beta_lens.py @@ -15,7 +15,7 @@ def compute_beta(z_src, z_cl, cosmo): Parameters ---------- - z_src: float + z_src : float, array_like Source galaxy redshift z_cl: float Galaxy cluster redshift @@ -24,11 +24,14 @@ def compute_beta(z_src, z_cl, cosmo): Returns ------- - float + float, array Geometric lensing efficicency """ - beta = np.heaviside(z_src - z_cl, 0) * cosmo.eval_da_z1z2(z_cl, z_src) / cosmo.eval_da(z_src) - return beta + # pylint: disable-msg=protected-access + _z_src = np.array(z_src) + return ( + np.heaviside(_z_src - z_cl, 0) * cosmo._eval_da_z1z2(z_cl, _z_src) / cosmo._eval_da(_z_src) + ) def compute_beta_s(z_src, z_cl, z_inf, cosmo): @@ -39,7 +42,7 @@ def compute_beta_s(z_src, z_cl, z_inf, cosmo): Parameters ---------- - z_src: float + z_src : float, array_like Source galaxy redshift z_cl: float Galaxy cluster redshift @@ -50,7 +53,7 @@ def compute_beta_s(z_src, z_cl, z_inf, cosmo): Returns ------- - float + numpy array Geometric lensing efficicency ratio """ beta_s = compute_beta(z_src, z_cl, cosmo) / compute_beta(z_inf, z_cl, cosmo) @@ -66,8 +69,9 @@ def compute_beta_s_func(z_src, z_cl, z_inf, cosmo, func, *args, **kwargs): Parameters ---------- - z_src: float - Source galaxy redshift + z_src : array_like, float, function + Information on the background source galaxy redshift(s). Value required depends on + `z_src_info` (see below). z_cl: float Galaxy cluster redshift z_inf: float @@ -83,25 +87,29 @@ def compute_beta_s_func(z_src, z_cl, z_inf, cosmo, func, *args, **kwargs): Returns ------- - float - Geometric lensing efficicency ratio + numpy array + Geometric lensing efficicency ratio for each source """ beta_s = compute_beta(z_src, z_cl, cosmo) / compute_beta(z_inf, z_cl, cosmo) beta_s_func = beta_s * func(*args, **kwargs) return beta_s_func -def compute_beta_mean(z_cl, cosmo, zmax=10.0, delta_z_cut=0.1, zmin=None, z_distrib_func=None): +def compute_beta_s_mean_from_distribution( + z_cl, z_inf, cosmo, zmax=10.0, delta_z_cut=0.1, zmin=None, z_distrib_func=None +): r"""Mean value of the geometric lensing efficicency .. math:: - \left<\beta\right> = \frac{\int_{z = z_{min}}^{z_{max}}\beta(z)N(z)} + \left<\beta_s\right> = \frac{\int_{z = z_{min}}^{z_{max}}\beta_s(z)N(z)} {\int_{z = z_{min}}^{z_{max}}N(z)} Parameters ---------- z_cl: float Galaxy cluster redshift + z_inf: float + Redshift at infinity cosmo: clmm.Cosmology CLMM Cosmology object zmax: float, optional @@ -124,8 +132,8 @@ def compute_beta_mean(z_cl, cosmo, zmax=10.0, delta_z_cut=0.1, zmin=None, z_dist if z_distrib_func is None: z_distrib_func = zdist.chang2013 - def integrand(z_i, z_cl=z_cl, cosmo=cosmo): - return compute_beta(z_i, z_cl, cosmo) * z_distrib_func(z_i) + def integrand(z_i): + return compute_beta_s(z_i, z_cl, z_inf, cosmo) * z_distrib_func(z_i) if zmin is None: zmin = z_cl + delta_z_cut @@ -133,13 +141,13 @@ def integrand(z_i, z_cl=z_cl, cosmo=cosmo): return quad(integrand, zmin, zmax)[0] / quad(z_distrib_func, zmin, zmax)[0] -def compute_beta_s_mean( +def compute_beta_s_square_mean_from_distribution( z_cl, z_inf, cosmo, zmax=10.0, delta_z_cut=0.1, zmin=None, z_distrib_func=None ): - r"""Mean value of the geometric lensing efficicency ratio + r"""Mean square value of the geometric lensing efficicency ratio .. math:: - \left<\beta_s\right> =\frac{\int_{z = z_{min}}^{z_{max}}\beta_s(z)N(z)} + \left<\beta_s^2\right> =\frac{\int_{z = z_{min}}^{z_{max}}\beta_s^2(z)N(z)} {\int_{z = z_{min}}^{z_{max}}N(z)} Parameters @@ -151,27 +159,26 @@ def compute_beta_s_mean( cosmo: clmm.Cosmology CLMM Cosmology object zmax: float - Maximum redshift to be set as the source of the galaxy when performing the sum. - Default: 10 - delta_z_cut: float, optional - Redshift interval to be summed with :math:`z_{cl}` to return :math:`z_{min}`. - This feature is not used if :math:`z_{min}` is provided by the user. Default: 0.1 + Minimum redshift to be set as the source of the galaxy\ + when performing the sum. + delta_z_cut: float + Redshift interval to be summed with $z_cl$ to return\ + $zmin$. This feature is not used if $z_min$ is provided by the user. zmin: float, None, optional Minimum redshift to be set as the source of the galaxy when performing the sum. Default: None z_distrib_func: one-parameter function, optional Redshift distribution function. Default is Chang et al (2013) distribution function. - Returns ------- float - Mean value of the geometric lensing efficicency ratio + Mean square value of the geometric lensing efficicency ratio. """ if z_distrib_func is None: z_distrib_func = zdist.chang2013 - def integrand(z_i, z_cl=z_cl, z_inf=z_inf, cosmo=cosmo): - return compute_beta_s(z_i, z_cl, z_inf, cosmo) * z_distrib_func(z_i) + def integrand(z_i): + return compute_beta_s(z_i, z_cl, z_inf, cosmo) ** 2 * z_distrib_func(z_i) if zmin is None: zmin = z_cl + delta_z_cut @@ -179,47 +186,77 @@ def integrand(z_i, z_cl=z_cl, z_inf=z_inf, cosmo=cosmo): return quad(integrand, zmin, zmax)[0] / quad(z_distrib_func, zmin, zmax)[0] -def compute_beta_s_square_mean( - z_cl, z_inf, cosmo, zmax=10.0, delta_z_cut=0.1, zmin=None, z_distrib_func=None -): - r"""Mean square value of the geometric lensing efficiency ratio +def compute_beta_s_mean_from_weights(z_src, z_cl, z_inf, cosmo, shape_weights): + r"""Mean square value of the geometric lensing efficicency ratio .. math:: - \left<\beta_s^2\right> =\frac{\int_{z = z_{min}}^{z_{max}}\beta_s^2(z)N(z)} - {\int_{z = z_{min}}^{z_{max}}N(z)} + \left<\beta_s\right> =\frac{\sum_i \beta_s(z_i)w_i} + {\sum_i w_i} Parameters ---------- + z_src: float, array_like + Invididual source galaxies redshift. z_cl: float - Galaxy cluster redshift + Galaxy cluster redshift. z_inf: float - Redshift at infinity + Redshift at infinity. cosmo: clmm.Cosmology CLMM Cosmology object - zmax: float - Maximum redshift to be set as the source of the galaxy when performing the sum. - Default: 10 - delta_z_cut: float, optional - Redshift interval to be summed with :math:`z_{cl}` to return :math:`z_{min}`. - This feature is not used if :math:`z_{min}` is provided by the user. Default: 0.1 - zmin: float, None, optional - Minimum redshift to be set as the source of the galaxy when performing the sum. - Default: None - z_distrib_func: one-parameter function, optional - Redshift distribution function. Default is Chang et al (2013) distribution function. + shape_weights: float, array_like + Individual source galaxies shape weights.\ + If not None, the function uses Eq.(13) from\ + https://arxiv.org/pdf/1611.03866.pdf with evenly distributed\ + weights summing to one. Returns ------- float - Mean square value of the geometric lensing efficicency ratio. + Mean value of the geometric lensing efficicency ratio. """ - if z_distrib_func is None: - z_distrib_func = zdist.chang2013 - - def integrand(z_i, z_cl=z_cl, z_inf=z_inf, cosmo=cosmo): - return compute_beta_s(z_i, z_cl, z_inf, cosmo) ** 2 * z_distrib_func(z_i) + _z_src = np.array(z_src) + if shape_weights is None: + _shape_weights = np.ones_like(_z_src) + else: + _shape_weights = np.array(shape_weights) + beta_s = compute_beta_s(_z_src, z_cl, z_inf, cosmo) + return (_shape_weights * beta_s).sum() / _shape_weights.sum() + + +def compute_beta_s_square_mean_from_weights( + z_src, + z_cl, + z_inf, + cosmo, + shape_weights, +): + r"""Mean square value of the geometric lensing efficicency ratio - if zmin is None: - zmin = z_cl + delta_z_cut + .. math:: + \left<\beta_s^2\right> =\frac{\sum_i \beta_s^2(z_i)w_i} + {\sum_i w_i} - return quad(integrand, zmin, zmax)[0] / quad(z_distrib_func, zmin, zmax)[0] + Parameters + ---------- + z_src: float, array_like + Invididual source galaxies redshift. + z_cl: float + Galaxy cluster redshift. + z_inf: float + Redshift at infinity. + cosmo: clmm.Cosmology + CLMM Cosmology object + shape_weights: float, array_like + Individual source galaxies shape weights. + Returns + ------- + float + Mean square value of the geometric lensing efficicency ratio. + """ + _z_src = np.array(z_src) + if shape_weights is None: + _shape_weights = np.ones_like(_z_src) + else: + _shape_weights = np.array(shape_weights) + beta_s = compute_beta_s(_z_src, z_cl, z_inf, cosmo) + return (_shape_weights * beta_s**2).sum() / _shape_weights.sum() diff --git a/examples/demo_theory_functionality.ipynb b/examples/demo_theory_functionality.ipynb index 31d720432..ce22a18b8 100644 --- a/examples/demo_theory_functionality.ipynb +++ b/examples/demo_theory_functionality.ipynb @@ -285,16 +285,29 @@ "metadata": {}, "outputs": [], "source": [ + "# Compute first beta\n", + "beta_kwargs = {\n", + " \"z_cl\": z_cl,\n", + " \"z_inf\": 10.0,\n", + " \"cosmo\": cosmo,\n", + " #'zmax' :zsrc_max,\n", + " #'delta_z_cut': delta_z_cut,\n", + " #'zmin': None,\n", + " \"z_distrib_func\": z_distrib_func,\n", + "}\n", + "beta_s_mean = clmm.utils.compute_beta_s_mean_from_distribution(**beta_kwargs)\n", + "beta_s_square_mean = clmm.utils.compute_beta_s_square_mean_from_distribution(**beta_kwargs)\n", + "\n", "gt_z = m.compute_reduced_tangential_shear(\n", " r3d,\n", " mdelta=cluster_mass,\n", " cdelta=cluster_concentration,\n", " z_cluster=z_cl,\n", - " z_src=z_distrib_func,\n", + " z_src=[beta_s_mean, beta_s_square_mean],\n", " cosmo=cosmo,\n", " delta_mdef=mass_Delta,\n", " halo_profile_model=density_profile_parametrization,\n", - " z_src_info=\"distribution\",\n", + " z_src_info=\"beta\",\n", " approx=\"order2\",\n", ")" ] @@ -496,7 +509,7 @@ " cosmo=cosmo,\n", " halo_profile_model=\"einasto\",\n", " alpha_ein=0.17,\n", - " use_projected_quad=True, # use quad_vec\n", + " use_projected_quad=True, # use quad_vec\n", " verbose=True,\n", ")\n", "\n", @@ -518,7 +531,7 @@ " cosmo=cosmo,\n", " halo_profile_model=\"einasto\",\n", " alpha_ein=0.17,\n", - " use_projected_quad=False, # default\n", + " use_projected_quad=False, # default\n", " verbose=True,\n", ")\n", "\n", diff --git a/examples/demo_theory_functionality_diff_z_types.ipynb b/examples/demo_theory_functionality_diff_z_types.ipynb index 61f3e275e..04e6bfafa 100644 --- a/examples/demo_theory_functionality_diff_z_types.ipynb +++ b/examples/demo_theory_functionality_diff_z_types.ipynb @@ -31,7 +31,7 @@ "import os\n", "\n", "## Uncomment the following line if you want to use a specific modeling backend among 'ct' (cluster-toolkit), 'ccl' (CCL) or 'nc' (Numcosmo). Default is 'ccl'\n", - "# os.environ['CLMM_MODELING_BACKEND'] = 'nc'" + "#os.environ['CLMM_MODELING_BACKEND'] = 'nc'" ] }, { @@ -216,7 +216,7 @@ "source": [ "z_inf = 1000\n", "\n", - "beta_s_mean = clmm.utils.compute_beta_s_mean(\n", + "beta_s_mean = clmm.utils.compute_beta_s_mean_from_distribution(\n", " cluster_z,\n", " z_inf,\n", " cosmo,\n", @@ -225,7 +225,7 @@ " zmin=None,\n", " z_distrib_func=model_z_distrib_dict[\"func\"],\n", ")\n", - "beta_s_square_mean = clmm.utils.compute_beta_s_square_mean(\n", + "beta_s_square_mean = clmm.utils.compute_beta_s_square_mean_from_distribution(\n", " cluster_z,\n", " z_inf,\n", " cosmo,\n", @@ -239,6 +239,40 @@ "display(Math(beta_sq_label(beta_s_square_mean)))" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It is also possible to compute $\\langle\\beta_s\\rangle$ and $\\langle\\beta_s^2\\rangle$ using galaxy shape weights:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "z_inf = 1000\n", + "\n", + "beta_s_mean_wts = clmm.utils.compute_beta_s_mean_from_weights(\n", + " source_catalog['z'],\n", + " cluster_z,\n", + " z_inf,\n", + " cosmo,\n", + " shape_weights=None,\n", + ")\n", + "beta_s_square_mean_wts = clmm.utils.compute_beta_s_square_mean_from_weights(\n", + " source_catalog['z'],\n", + " cluster_z,\n", + " z_inf,\n", + " cosmo,\n", + " shape_weights=None,\n", + ")\n", + "\n", + "display(Math(beta_label(beta_s_mean_wts)))\n", + "display(Math(beta_sq_label(beta_s_square_mean_wts)))" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -430,7 +464,6 @@ " delta_mdef=500,\n", " massdef=\"critical\",\n", " z_src_info=\"distribution\",\n", - " beta_kwargs={\"zmax\": zsrc_max},\n", " approx=None,\n", ")" ] @@ -439,65 +472,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### Cases 3 : Redshift distribution and approximation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If we want a faster approach we can use an approximation for the reduced shear, using 1 or 2 order of Taylor expansion for the expression of the reduced shear." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%time\n", - "gt_distribution_order1 = clmm.theory.compute_reduced_tangential_shear(\n", - " rr,\n", - " cluster_m,\n", - " concentration,\n", - " cluster_z,\n", - " model_z_distrib_dict[\"func\"],\n", - " cosmo,\n", - " delta_mdef=500,\n", - " massdef=\"critical\",\n", - " z_src_info=\"distribution\",\n", - " beta_kwargs={\"zmax\": zsrc_max},\n", - " approx=\"order1\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%time\n", - "gt_distribution_order2 = clmm.theory.compute_reduced_tangential_shear(\n", - " rr,\n", - " cluster_m,\n", - " concentration,\n", - " cluster_z,\n", - " model_z_distrib_dict[\"func\"],\n", - " cosmo,\n", - " delta_mdef=500,\n", - " massdef=\"critical\",\n", - " z_src_info=\"distribution\",\n", - " beta_kwargs={\"zmax\": zsrc_max},\n", - " approx=\"order2\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Cases 4 : Mean lensing efficiencies and approximation" + "#### Cases 3 : Mean lensing efficiencies and approximation" ] }, { @@ -505,7 +480,7 @@ "metadata": {}, "source": [ "Finally, we can also model the reduced shear if the only information we have about the source redshift distribution is through the mean lensing efficiency parameters $\\langle\\beta_s\\rangle$ and $\\langle\\beta_s^2\\rangle$. \\\n", - "In this case, we need to use an approximation for the formula. This is the fastest approach." + "In this case, we need to use an approximation for the formula. This is the fastest approach. Bear in mind that the user has to pre compute the beta parameters and pass them as an argument of the function." ] }, { @@ -525,8 +500,7 @@ " delta_mdef=500,\n", " massdef=\"critical\",\n", " z_src_info=\"beta\",\n", - " beta_kwargs={\"zmax\": zsrc_max},\n", - " approx=\"order1\",\n", + " approx=\"order1\"\n", ")" ] }, @@ -547,8 +521,7 @@ " delta_mdef=500,\n", " massdef=\"critical\",\n", " z_src_info=\"beta\",\n", - " beta_kwargs={\"zmax\": zsrc_max},\n", - " approx=\"order2\",\n", + " approx=\"order2\"\n", ")" ] }, @@ -556,7 +529,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 3 - Comparison of the four cases for the reduced tangnetial shear" + "## 3 - Comparison of the three cases for the reduced tangnetial shear" ] }, { @@ -600,8 +573,6 @@ " base=(gt_discrete, \"k.-\", dict(label=\"discrete\")),\n", " others=(\n", " (gt_distribution_no_approx, \"r.-\", dict(label=\"distribution, no approx\")),\n", - " (gt_distribution_order1, \"ro-\", dict(label=\"distribution, order 1 approx\")),\n", - " (gt_distribution_order2, \"rd-\", dict(label=\"distribution, order 2 approx\")),\n", " (gt_beta_1, \"b--\", dict(label=\"beta, order 1 approx\")),\n", " (gt_beta_2, \"bx-\", dict(label=\"beta, order 2 approx\")),\n", " ),\n", @@ -616,7 +587,7 @@ "source": [ "All modeled profiles give similar results. They do not correspond to the profile computed from the data in the inner part, because of different ways of constructing the profiles (taking the average radial point and reduced shear value in a bin or computing the average expected reduced shear at a given radius).\n", "\n", - "The profiles computed using an approximation for the reduced shear formula are lower by a few percents, especially in the inner region. The profiles computed from a redshift distribution or a known source redshift differ at the subpercent level." + "The profiles computed using an approximation for the reduced shear formula are lower by a few percents, especially in the inner region. The profile computed from a redshift distribution differs at the subpercent level." ] }, { @@ -630,7 +601,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Here we will just compute models for cases 1, 2 and 4. For the shear and convergence there is no need for an approximated formula. " + "Here we will just compute models for cases 1, 2 and 3. For the shear and convergence there is no need for an approximated formula. " ] }, { @@ -651,25 +622,13 @@ " cosmo,\n", " delta_mdef=500,\n", " massdef=\"critical\",\n", - " z_src_info=\"discrete\",\n", + " z_src_info=\"discrete\"\n", " )\n", " )\n", " for _r in rr\n", " ]\n", ")\n", "\n", - "gammat_distribution = clmm.theory.compute_tangential_shear(\n", - " rr,\n", - " cluster_m,\n", - " concentration,\n", - " cluster_z,\n", - " model_z_distrib_dict[\"func\"],\n", - " cosmo,\n", - " delta_mdef=500,\n", - " massdef=\"critical\",\n", - " z_src_info=\"distribution\",\n", - " beta_kwargs={\"zmax\": zsrc_max},\n", - ")\n", "\n", "gammat_beta = clmm.theory.compute_tangential_shear(\n", " rr,\n", @@ -680,8 +639,7 @@ " cosmo,\n", " delta_mdef=500,\n", " massdef=\"critical\",\n", - " z_src_info=\"beta\",\n", - " beta_kwargs={\"zmax\": zsrc_max},\n", + " z_src_info=\"beta\"\n", ")" ] }, @@ -695,7 +653,6 @@ " rr,\n", " base=(gammat_discrete, \"k.-\", dict(label=\"discrete\")),\n", " others=(\n", - " (gammat_distribution, \"rx-\", dict(label=\"distribution, no approx\")),\n", " (gammat_beta, \"b--\", dict(label=\"beta, no approx\")),\n", " ),\n", " ylabel=\"$\\gamma_t$\",\n", @@ -727,18 +684,6 @@ " ]\n", ")\n", "\n", - "kappa_distribution = clmm.theory.compute_convergence(\n", - " rr,\n", - " cluster_m,\n", - " concentration,\n", - " cluster_z,\n", - " model_z_distrib_dict[\"func\"],\n", - " cosmo,\n", - " delta_mdef=500,\n", - " massdef=\"critical\",\n", - " z_src_info=\"distribution\",\n", - " beta_kwargs={\"zmax\": zsrc_max},\n", - ")\n", "\n", "kappa_beta = clmm.theory.compute_convergence(\n", " rr,\n", @@ -749,8 +694,7 @@ " cosmo,\n", " delta_mdef=500,\n", " massdef=\"critical\",\n", - " z_src_info=\"beta\",\n", - " beta_kwargs={\"zmax\": zsrc_max},\n", + " z_src_info=\"beta\"\n", ")" ] }, @@ -764,7 +708,6 @@ " rr,\n", " base=(kappa_discrete, \"k.-\", dict(label=\"discrete\")),\n", " others=(\n", - " (kappa_distribution, \"rx-\", dict(label=\"distribution, no approx\")),\n", " (kappa_beta, \"b--\", dict(label=\"beta, no approx\")),\n", " ),\n", " ylabel=\"$\\kappa_t$\",\n", @@ -796,19 +739,6 @@ " ]\n", ")\n", "\n", - "mu_distribution = clmm.theory.compute_magnification(\n", - " rr,\n", - " cluster_m,\n", - " concentration,\n", - " cluster_z,\n", - " model_z_distrib_dict[\"func\"],\n", - " cosmo,\n", - " delta_mdef=500,\n", - " massdef=\"critical\",\n", - " z_src_info=\"distribution\",\n", - " approx=None,\n", - " beta_kwargs={\"zmax\": zsrc_max},\n", - ")\n", "\n", "mu_beta_1 = clmm.theory.compute_magnification(\n", " rr,\n", @@ -820,8 +750,7 @@ " delta_mdef=500,\n", " massdef=\"critical\",\n", " z_src_info=\"beta\",\n", - " approx=\"order1\",\n", - " beta_kwargs={\"zmax\": zsrc_max},\n", + " approx=\"order1\"\n", ")\n", "\n", "mu_beta_2 = clmm.theory.compute_magnification(\n", @@ -834,8 +763,7 @@ " delta_mdef=500,\n", " massdef=\"critical\",\n", " z_src_info=\"beta\",\n", - " approx=\"order2\",\n", - " beta_kwargs={\"zmax\": zsrc_max},\n", + " approx=\"order2\"\n", ")" ] }, @@ -849,7 +777,6 @@ " rr,\n", " base=(mu_discrete, \"k.-\", dict(label=\"discrete\")),\n", " others=(\n", - " (mu_distribution, \"rs-\", dict(label=\"distribution, no approx\")),\n", " (mu_beta_1, \"bx-\", dict(label=\"beta, order 1 approx\")),\n", " (mu_beta_2, \"b--\", dict(label=\"beta, order 2 approx\")),\n", " ),\n", @@ -885,20 +812,6 @@ " ]\n", ")\n", "\n", - "mu_bias_distribution = clmm.theory.compute_magnification_bias(\n", - " rr,\n", - " alpha,\n", - " cluster_m,\n", - " concentration,\n", - " cluster_z,\n", - " model_z_distrib_dict[\"func\"],\n", - " cosmo,\n", - " delta_mdef=500,\n", - " massdef=\"critical\",\n", - " z_src_info=\"distribution\",\n", - " approx=None,\n", - " beta_kwargs={\"zmax\": zsrc_max},\n", - ")\n", "\n", "mu_bias_beta_1 = clmm.theory.compute_magnification_bias(\n", " rr,\n", @@ -911,8 +824,7 @@ " delta_mdef=500,\n", " massdef=\"critical\",\n", " z_src_info=\"beta\",\n", - " approx=\"order1\",\n", - " beta_kwargs={\"zmax\": zsrc_max},\n", + " approx=\"order1\"\n", ")\n", "\n", "mu_bias_beta_2 = clmm.theory.compute_magnification_bias(\n", @@ -926,8 +838,7 @@ " delta_mdef=500,\n", " massdef=\"critical\",\n", " z_src_info=\"beta\",\n", - " approx=\"order2\",\n", - " beta_kwargs={\"zmax\": zsrc_max},\n", + " approx=\"order2\"\n", ")" ] }, @@ -941,7 +852,6 @@ " rr,\n", " base=(mu_bias_discrete, \"k.-\", dict(label=\"discrete\")),\n", " others=(\n", - " (mu_bias_distribution, \"rs-\", dict(label=\"distribution, no approx\")),\n", " (mu_bias_beta_1, \"bx-\", dict(label=\"beta, order 1 approx\")),\n", " (mu_bias_beta_2, \"b--\", dict(label=\"beta, order 2 approx\")),\n", " ),\n", @@ -974,7 +884,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.6" + "version": "3.11.0" } }, "nbformat": 4, diff --git a/examples/demo_theory_functionality_oo.ipynb b/examples/demo_theory_functionality_oo.ipynb index 2274307df..b0d773f0b 100644 --- a/examples/demo_theory_functionality_oo.ipynb +++ b/examples/demo_theory_functionality_oo.ipynb @@ -132,8 +132,19 @@ "\n", "gt = moo.eval_reduced_tangential_shear(r3d, z_cl, z_src)\n", "# Lensing quantities assuming sources follow a given redshift distribution.\n", + "\n", + "# Compute first beta\n", + "beta_kwargs = {\n", + " \"z_cl\": z_cl,\n", + " \"z_inf\": 10.0,\n", + " \"cosmo\": cosmo,\n", + " \"z_distrib_func\": z_distrib_func,\n", + "}\n", + "beta_s_mean = clmm.utils.compute_beta_s_mean_from_distribution(**beta_kwargs)\n", + "beta_s_square_mean = clmm.utils.compute_beta_s_square_mean_from_distribution(**beta_kwargs)\n", + "\n", "gt_z = moo.eval_reduced_tangential_shear(\n", - " r3d, z_cl, z_distrib_func, z_src_info=\"distribution\", approx=\"order2\"\n", + " r3d, z_cl, [beta_s_mean, beta_s_square_mean], z_src_info=\"beta\", approx=\"order2\"\n", ")\n", "\n", "mu = moo.eval_magnification(r3d, z_cl, z_src)\n", diff --git a/examples/mass_fitting/Example3_Fit_Halo_Mass_to_Shear_Catalog.ipynb b/examples/mass_fitting/Example3_Fit_Halo_Mass_to_Shear_Catalog.ipynb index 0cbce9bb9..6c9c07b41 100644 --- a/examples/mass_fitting/Example3_Fit_Halo_Mass_to_Shear_Catalog.ipynb +++ b/examples/mass_fitting/Example3_Fit_Halo_Mass_to_Shear_Catalog.ipynb @@ -444,46 +444,45 @@ "# The default mass is M200m; use massdef='critical' for M200c.\n", "\n", "\n", - "def model_reduced_tangential_shear_applegate14(logm, catalog, profile):\n", - " z_values = catalog.galcat[\"z\"]\n", - " z_cl = cluster_z\n", + "def _model_reduced_tangential_shear(logm, catalog, profile, approx):\n", + " beta_kwargs = {\n", + " \"z_cl\": cluster_z,\n", + " \"z_inf\": 10.0,\n", + " \"cosmo\": cosmo,\n", + " #'zmax' :zsrc_max,\n", + " #'delta_z_cut': delta_z_cut,\n", + " #'zmin': None,\n", + " # We provide the redshift distribution (default: Chang et al. 2013) for calculating the beta_s statistics\n", + " \"z_distrib_func\": clmm.redshift.distributions.chang2013,\n", + " }\n", + " beta_s_mean = clmm.utils.compute_beta_s_mean_from_distribution(**beta_kwargs)\n", + " beta_s_square_mean = clmm.utils.compute_beta_s_square_mean_from_distribution(**beta_kwargs)\n", + "\n", " gt_model = clmm.compute_reduced_tangential_shear(\n", " r_proj=profile[\"radius\"], # Radial component of the profile\n", " mdelta=10**logm, # Mass of the cluster [M_sun]\n", " cdelta=concentration, # Concentration of the cluster\n", " z_cluster=cluster_z, # Redshift of the cluster\n", - " z_src=clmm.redshift.distributions.chang2013, # We provide the redshift distribution (default: Chang et al. 2013) for calculating the beta_s statistics\n", + " z_src=[beta_s_mean, beta_s_square_mean],\n", " cosmo=cosmo,\n", " delta_mdef=200,\n", " # massdef='critical',\n", " halo_profile_model=\"nfw\",\n", - " z_src_info=\"distribution\",\n", - " approx=\"order1\",\n", - " # beta_s_mean=None, # beta_s_mean and beta_s_square_mean can be provided (default: None)\n", + " z_src_info=\"beta\",\n", + " approx=approx,\n", + " # beta_s_mean=None,\n", " # beta_s_square_mean=None\n", " )\n", " return gt_model\n", "\n", "\n", - "# Similarly, we also consider the method from Schrabback et al. (2018), called 'order12' in CLMM.\n", + "def model_reduced_tangential_shear_applegate14(logm, catalog, profile):\n", + " return _model_reduced_tangential_shear(logm, catalog, profile, approx=\"order1\")\n", + "\n", + "\n", + "# Similarly, we also consider the method from Schrabback et al. (2018), called 'order2' in CLMM.\n", "def model_reduced_tangential_shear_schrabback18(logm, catalog, profile):\n", - " z_values = catalog.galcat[\"z\"]\n", - " gt_model = clmm.compute_reduced_tangential_shear(\n", - " r_proj=profile[\"radius\"], # Radial component of the profile\n", - " mdelta=10**logm, # Mass of the cluster [M_sun]\n", - " cdelta=concentration, # Concentration of the cluster\n", - " z_cluster=cluster_z, # Redshift of the cluster\n", - " z_src=clmm.redshift.distributions.chang2013, # We provide the redshift distribution (default: Chang et al. 2013) for calculating the beta_s statistics\n", - " cosmo=cosmo,\n", - " delta_mdef=200,\n", - " # massdef='critical',\n", - " halo_profile_model=\"nfw\",\n", - " z_src_info=\"distribution\",\n", - " approx=\"order2\",\n", - " # beta_s_mean=None,\n", - " # beta_s_square_mean=None\n", - " )\n", - " return gt_model" + " return _model_reduced_tangential_shear(logm, catalog, profile, approx=\"order2\")" ] }, { @@ -551,25 +550,25 @@ " r,\n", " gt_model_ideal_zdistrib,\n", " \"-b\",\n", - " label=r\"model w/ zdistrib, $M_{input}$ = %.2e Msun\" % cluster_m,\n", + " label=r\"w/ zdistrib, $M_{input}$ = %.2e Msun\" % cluster_m,\n", ")\n", "plt.loglog(\n", " r,\n", " gt_model_ideal_singlez,\n", " \"-y\",\n", - " label=r\"model w/o zdistrib, $M_{input}$ = %.2e Msun\" % cluster_m,\n", + " label=r\"w/o zdistrib, $M_{input}$ = %.2e Msun\" % cluster_m,\n", ")\n", "plt.loglog(\n", " r,\n", " gt_model_ideal_applegate14,\n", " \":r\",\n", - " label=r\"model applegate14, $M_{input}$ = %.2e Msun\" % cluster_m,\n", + " label=r\"applegate14, $M_{input}$ = %.2e Msun\" % cluster_m,\n", ")\n", "plt.loglog(\n", " r,\n", " gt_model_ideal_schrabback18,\n", " \":g\",\n", - " label=r\"model schrabback18, $M_{input}$ = %.2e Msun\" % cluster_m,\n", + " label=r\"schrabback18, $M_{input}$ = %.2e Msun\" % cluster_m,\n", ")\n", "\n", "plt.xlabel(\"r [Mpc]\", fontsize=20)\n", @@ -594,25 +593,25 @@ " r,\n", " gt_model_noisy_zdistrib,\n", " \"-b\",\n", - " label=r\"model w/ zdistrib, $M_{input}$ = %.2e Msun\" % cluster_m,\n", + " label=r\"w/ zdistrib, $M_{input}$ = %.2e Msun\" % cluster_m,\n", ")\n", "plt.loglog(\n", " r,\n", " gt_model_noisy_singlez,\n", " \"-y\",\n", - " label=r\"model w/o zdistrib, $M_{input}$ = %.2e Msun\" % cluster_m,\n", + " label=r\"w/o zdistrib, $M_{input}$ = %.2e Msun\" % cluster_m,\n", ")\n", "plt.loglog(\n", " r,\n", " gt_model_noisy_applegate14,\n", " \":r\",\n", - " label=r\"model applegate14, $M_{input}$ = %.2e Msun\" % cluster_m,\n", + " label=r\"applegate14, $M_{input}$ = %.2e Msun\" % cluster_m,\n", ")\n", "plt.loglog(\n", " r,\n", " gt_model_noisy_schrabback18,\n", " \":g\",\n", - " label=r\"model schrabback18, $M_{input}$ = %.2e Msun\" % cluster_m,\n", + " label=r\"schrabback18, $M_{input}$ = %.2e Msun\" % cluster_m,\n", ")\n", "\n", "plt.xlabel(\"r [Mpc]\", fontsize=20)\n", @@ -764,36 +763,32 @@ "source": [ "print(f\"The input mass = {cluster_m:.2e} Msun\\n\")\n", "\n", + "_prt_mfit = lambda mass, mass_err: f\"{mass*1e-14:.2f} +/- {mass_err*1e-14:.2f} x 10^14 Msun\"\n", + "\n", "print(\"Without accounting for the redshift distribution in the model\\n\")\n", - "print(\n", - " f\"Best fit mass for ideal data = {m_est_ideal_singlez:.2e} +/- {m_est_err_ideal_singlez:.2e} Msun\"\n", - ")\n", - "print(\n", - " f\"Best fit mass for noisy data = {m_est_noisy_singlez:.2e} +/- {m_est_err_noisy_singlez:.2e} Msun\\n\"\n", - ")\n", + "print(f\"Best fit mass for ideal data = {_prt_mfit(m_est_ideal_singlez, m_est_err_ideal_singlez)}\")\n", + "print(f\"Best fit mass for noisy data = {_prt_mfit(m_est_noisy_singlez, m_est_err_noisy_singlez)}\\n\")\n", "\n", "print(\"Accounting for the redshift distribution in the model\\n\")\n", + "print(f\"Best fit mass for ideal data = {_prt_mfit(m_est_ideal_zdistrib, m_est_err_ideal_zdistrib)}\")\n", "print(\n", - " f\"Best fit mass for ideal data = {m_est_ideal_zdistrib:.2e} +/- {m_est_err_ideal_zdistrib:.2e} Msun\"\n", - ")\n", - "print(\n", - " f\"Best fit mass for noisy data = {m_est_noisy_zdistrib:.2e} +/- {m_est_err_noisy_zdistrib:.2e} Msun\\n\"\n", + " f\"Best fit mass for noisy data = {_prt_mfit(m_est_noisy_zdistrib, m_est_err_noisy_zdistrib)}\\n\"\n", ")\n", "\n", "print(\"Using applegate14 (Applegate et al. 2014)\\n\")\n", "print(\n", - " f\"Best fit mass for ideal data = {m_est_ideal_applegate14:.2e} +/- {m_est_err_ideal_applegate14:.2e} Msun\"\n", + " f\"Best fit mass for ideal data = {_prt_mfit(m_est_ideal_applegate14, m_est_err_ideal_applegate14)}\"\n", ")\n", "print(\n", - " f\"Best fit mass for noisy data = {m_est_noisy_applegate14:.2e} +/- {m_est_err_noisy_applegate14:.2e} Msun\\n\"\n", + " f\"Best fit mass for noisy data = {_prt_mfit(m_est_noisy_applegate14, m_est_err_noisy_applegate14)}\\n\"\n", ")\n", "\n", "print(\"Using schrabback18 (Schrabback et al. 2018)\\n\")\n", "print(\n", - " f\"Best fit mass for ideal data = {m_est_ideal_schrabback18:.2e} +/- {m_est_err_ideal_schrabback18:.2e} Msun\"\n", + " f\"Best fit mass for ideal data = {_prt_mfit(m_est_ideal_schrabback18, m_est_err_ideal_schrabback18)}\"\n", ")\n", "print(\n", - " f\"Best fit mass for noisy data = {m_est_noisy_schrabback18:.2e} +/- {m_est_err_noisy_schrabback18:.2e} Msun\\n\"\n", + " f\"Best fit mass for noisy data = {_prt_mfit(m_est_noisy_schrabback18, m_est_err_noisy_schrabback18)}\\n\"\n", ")" ] }, @@ -864,6 +859,10 @@ "metadata": {}, "outputs": [], "source": [ + "_prt_mfit_lab = (\n", + " lambda mass, mass_err: rf\"${mass*1e-14:.2f} (\\pm {mass_err*1e-14:.2f}) \\times 10^{{14}}$ $M_\\odot$\"\n", + ")\n", + "\n", "plt.figure(figsize=(20, 10))\n", "plt.subplot(1, 2, 1)\n", "plt.title(r\"tangential shear $g_t$ (ideal data)\", fontsize=20)\n", @@ -874,31 +873,31 @@ " c=\"k\",\n", " linestyle=\"\",\n", " marker=\"o\",\n", - " label=r\"ideal data, $M_{input}$ = %.1e Msun\" % cluster_m,\n", + " label=rf\"ideal data, $M_{{input}}$ = ${cluster_m*1e-14:.2f} \\times 10^{{14}}$ $M_\\odot$\",\n", ")\n", "plt.loglog(\n", " r,\n", " gt_est_ideal_zdistrib,\n", " \"-b\",\n", - " label=rf\"model w/ zdistrib, M_fit = {m_est_ideal_zdistrib:.2e} $\\pm$ {m_est_err_ideal_zdistrib:.2e} Msun\",\n", + " label=rf\"w/ zdistrib, M_fit = {_prt_mfit_lab(m_est_ideal_zdistrib, m_est_err_ideal_zdistrib)}\",\n", ")\n", "plt.loglog(\n", " r,\n", " gt_est_ideal_applegate14,\n", " \":r\",\n", - " label=rf\"model applegate14, M_fit = {m_est_ideal_applegate14:.2e} $\\pm$ {m_est_err_ideal_applegate14:.2e} Msun\",\n", + " label=rf\"applegate14, M_fit = {_prt_mfit_lab(m_est_ideal_applegate14, m_est_err_ideal_applegate14)}\",\n", ")\n", "plt.loglog(\n", " r,\n", " gt_est_ideal_schrabback18,\n", " \":g\",\n", - " label=rf\"model schrabback18, M_fit = {m_est_ideal_schrabback18:.2e} $\\pm$ {m_est_err_ideal_schrabback18:.2e} Msun\",\n", + " label=rf\"schrabback18, M_fit = {_prt_mfit_lab(m_est_ideal_schrabback18, m_est_err_ideal_schrabback18)}\",\n", ")\n", "plt.loglog(\n", " r,\n", " gt_est_ideal_singlez,\n", " \"-y\",\n", - " label=rf\"model w/o zdistrib, M_fit = {m_est_ideal_singlez:.2e} $\\pm$ {m_est_err_ideal_singlez:.2e} Msun\",\n", + " label=rf\"w/o zdistrib, M_fit = {_prt_mfit_lab(m_est_ideal_singlez, m_est_err_ideal_singlez)}\",\n", ")\n", "\n", "plt.xlabel(\"r [Mpc]\", fontsize=20)\n", @@ -916,32 +915,32 @@ " c=\"k\",\n", " linestyle=\"\",\n", " marker=\"o\",\n", - " label=r\"noisy data, $M_{input}$ = %.1e Msun\" % cluster_m,\n", + " label=rf\"noisy data, $M_{{input}}$ = ${cluster_m*1e-14:.2f} \\times 10^{{14}}$ $M_\\odot$\",\n", ")\n", "# plt.loglog(r,gt_model_noisy,'-r', label='model, $M_{input}$ = %.3e Msun' % cluster_m)\n", "plt.loglog(\n", " r,\n", " gt_est_noisy_zdistrib,\n", " \"-b\",\n", - " label=rf\"model w/ zdistrib, M_fit = {m_est_noisy_zdistrib:.2e} $\\pm$ {m_est_err_noisy_zdistrib:.2e} Msun\",\n", + " label=rf\"w/ zdistrib, M_fit = {_prt_mfit_lab(m_est_noisy_zdistrib, m_est_err_noisy_zdistrib)}\",\n", ")\n", "plt.loglog(\n", " r,\n", " gt_est_noisy_applegate14,\n", " \":r\",\n", - " label=rf\"model applegate14, M_fit = {m_est_noisy_applegate14:.2e} $\\pm$ {m_est_err_noisy_applegate14:.2e} Msun\",\n", + " label=rf\"applegate14, M_fit = {_prt_mfit_lab(m_est_noisy_applegate14, m_est_err_noisy_applegate14)}\",\n", ")\n", "plt.loglog(\n", " r,\n", " gt_est_noisy_schrabback18,\n", " \":g\",\n", - " label=rf\"model schrabback18, M_fit = {m_est_noisy_schrabback18:.2e} $\\pm$ {m_est_err_noisy_schrabback18:.2e} Msun\",\n", + " label=rf\"schrabback18, M_fit = {_prt_mfit_lab(m_est_noisy_schrabback18, m_est_err_noisy_schrabback18)}\",\n", ")\n", "plt.loglog(\n", " r,\n", " gt_est_noisy_singlez,\n", " \"-y\",\n", - " label=rf\"model w/o zdistrib, M_fit = {m_est_noisy_singlez:.2e} $\\pm$ {m_est_err_noisy_singlez:.2e} Msun\",\n", + " label=rf\"w/o zdistrib, M_fit = {_prt_mfit_lab(m_est_noisy_singlez, m_est_err_noisy_singlez)}\",\n", ")\n", "\n", "plt.xlabel(\"r [Mpc]\", fontsize=20)\n", @@ -974,7 +973,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.6" + "version": "3.11.5" } }, "nbformat": 4, diff --git a/tests/test_theory.py b/tests/test_theory.py index ad8562b33..a17ab661d 100644 --- a/tests/test_theory.py +++ b/tests/test_theory.py @@ -7,7 +7,7 @@ from clmm.constants import Constants as clc from clmm.galaxycluster import GalaxyCluster from clmm import GCData -from clmm.utils import compute_beta_s_square_mean, compute_beta_s_mean +from clmm.utils import compute_beta_s_square_mean_from_distribution, compute_beta_s_mean_from_distribution, compute_beta_s_func from clmm.redshift.distributions import chang2013, desc_srd TOLERANCE = {"rtol": 1.0e-8} @@ -547,13 +547,11 @@ def test_shear_convergence_unittests(modeling_data, profile_init): cfg_inf = load_validation_config() # compute some values - cfg_inf["GAMMA_PARAMS"]["z_src"] = 1000.0 - beta_s_mean = compute_beta_s_mean( - cfg_inf["GAMMA_PARAMS"]["z_cluster"], cfg_inf["GAMMA_PARAMS"]["z_src"], cosmo - ) - beta_s_square_mean = compute_beta_s_square_mean( - cfg_inf["GAMMA_PARAMS"]["z_cluster"], cfg_inf["GAMMA_PARAMS"]["z_src"], cosmo - ) + cfg_inf['GAMMA_PARAMS']['z_src'] = 1000. + beta_s_mean = compute_beta_s_mean_from_distribution( + cfg_inf['GAMMA_PARAMS']['z_cluster'], cfg_inf['GAMMA_PARAMS']['z_src'], cosmo) + beta_s_square_mean = compute_beta_s_square_mean_from_distribution( + cfg_inf['GAMMA_PARAMS']['z_cluster'], cfg_inf['GAMMA_PARAMS']['z_src'], cosmo) gammat_inf = theo.compute_tangential_shear(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"]) kappa_inf = theo.compute_convergence(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"]) @@ -573,104 +571,10 @@ def test_shear_convergence_unittests(modeling_data, profile_init): cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], alpha=alpha ) cfg_inf["GAMMA_PARAMS"]["r_proj"] = r_proj - # tangential shear - assert_allclose( - theo.compute_tangential_shear(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"]), - beta_s_mean * gammat_inf, - 1.0e-10, - ) - - # convergence - assert_allclose( - theo.compute_convergence(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"]), - beta_s_mean * kappa_inf, - 1.0e-10, - ) - + # tangential shear and convergence cannot use z_src_info="distribution" # reduced tangential shear - cfg_inf["GAMMA_PARAMS"]["approx"] = "order1" - assert_allclose( - theo.compute_reduced_tangential_shear(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"]), - beta_s_mean * gammat_inf / (1.0 - beta_s_mean * kappa_inf), - 1.0e-10, - ) - assert_allclose( - theo.compute_reduced_tangential_shear(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"])[-5:], - gt, - 3.0e-6, - ) - - cfg_inf["GAMMA_PARAMS"]["approx"] = "order2" - assert_allclose( - theo.compute_reduced_tangential_shear(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"]), - ( - 1.0 - + (beta_s_square_mean / (beta_s_mean * beta_s_mean) - 1.0) * beta_s_mean * kappa_inf - ) - * (beta_s_mean * gammat_inf / (1.0 - beta_s_mean * kappa_inf)), - 1.0e-10, - ) - assert_allclose( - theo.compute_reduced_tangential_shear(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"])[-5:], - gt, - 2.0e-6, - ) - - # magnification - cfg_inf["GAMMA_PARAMS"]["approx"] = "order1" - assert_allclose( - theo.compute_magnification(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"]), - 1 + 2 * beta_s_mean * kappa_inf, - 1.0e-10, - ) - assert_allclose( - theo.compute_magnification(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"])[-5:], mu, 2.0e-8 - ) - cfg_inf["GAMMA_PARAMS"]["approx"] = "order2" - assert_allclose( - theo.compute_magnification(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"]), - 1 - + 2 * beta_s_mean * kappa_inf - + beta_s_square_mean * gammat_inf**2 - + 3 * beta_s_square_mean * kappa_inf**2, - 1.0e-10, - ) - assert_allclose( - theo.compute_magnification(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"])[-5:], mu, 1.0e-10 - ) - - # magnification bias - cfg_inf["GAMMA_PARAMS"]["approx"] = "order1" - assert_allclose( - theo.compute_magnification_bias(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], alpha=alpha), - 1 + (alpha - 1) * (2 * beta_s_mean * kappa_inf), - 1.0e-10, - ) - assert_allclose( - theo.compute_magnification_bias(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], alpha=alpha)[ - -5: - ], - mu_bias, - 4.0e-8, - ) - cfg_inf["GAMMA_PARAMS"]["approx"] = "order2" - assert_allclose( - theo.compute_magnification_bias(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], alpha=alpha), - 1 - + (alpha - 1) * (2 * beta_s_mean * kappa_inf + beta_s_square_mean * gammat_inf**2) - + (2 * alpha - 1) * (alpha - 1) * beta_s_square_mean * kappa_inf**2, - 1.0e-10, - ) - assert_allclose( - theo.compute_magnification_bias(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], alpha=alpha)[ - -5: - ], - mu_bias, - 1.0e-10, - ) # test errors and also prepare for the next round of tests - del cfg_inf["GAMMA_PARAMS"]["approx"] # test ValueError from unsupported approx assert_raises( ValueError, @@ -694,34 +598,20 @@ def test_shear_convergence_unittests(modeling_data, profile_init): alpha=alpha, approx="notvalid" ) - # test KeyError from invalid key in beta_kwargs - assert_raises( - KeyError, - theo.compute_tangential_shear, - cosmo=cosmo, - **cfg_inf["GAMMA_PARAMS"], - beta_kwargs={"notavalidkey": 0.0} - ) - assert_raises( - KeyError, - theo.compute_convergence, - cosmo=cosmo, - **cfg_inf["GAMMA_PARAMS"], - beta_kwargs={"notavalidkey": 0.0} - ) + # test KeyError from invalid key in integ_kwargs assert_raises( KeyError, theo.compute_reduced_tangential_shear, cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], - beta_kwargs={"notavalidkey": 0.0} + integ_kwargs={"notavalidkey": 0.0} ) assert_raises( KeyError, theo.compute_magnification, cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], - beta_kwargs={"notavalidkey": 0.0} + integ_kwargs={"notavalidkey": 0.0} ) assert_raises( KeyError, @@ -729,7 +619,7 @@ def test_shear_convergence_unittests(modeling_data, profile_init): cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], alpha=alpha, - beta_kwargs={"notavalidkey": 0.0} + integ_kwargs={"notavalidkey": 0.0} ) # test ValueError from unsupported z_src_info cfg_inf["GAMMA_PARAMS"]["z_src_info"] = "notvalid" diff --git a/tests/test_utils.py b/tests/test_utils.py index 9a7f17d07..0b2db3591 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -535,46 +535,83 @@ def test_validate_argument(): ) -def test_beta_functions(): +def test_beta_functions(modeling_data): z_cl = 1.0 - z_s = 2.4 + z_src = [2.4, 2.1] + shape_weights = [4.6, 6.4] z_inf = 1000.0 zmax = 15.0 nsteps = 1000 zmin = z_cl + 0.1 z_int = np.linspace(zmin, zmax, nsteps) cosmo = clmm.Cosmology(H0=70.0, Omega_dm0=0.27 - 0.045, Omega_b0=0.045, Omega_k0=0.0) - beta_test = np.heaviside(z_s - z_cl, 0) * cosmo.eval_da_z1z2(z_cl, z_s) / cosmo.eval_da(z_s) - beta_s_test = utils.compute_beta(z_s, z_cl, cosmo) / utils.compute_beta(z_inf, z_cl, cosmo) + beta_test = [ + np.heaviside(z_s - z_cl, 0) * cosmo.eval_da_z1z2(z_cl, z_s) / cosmo.eval_da(z_s) + for z_s in z_src + ] + beta_s_test = utils.compute_beta(z_src, z_cl, cosmo) / utils.compute_beta(z_inf, z_cl, cosmo) - assert_allclose(utils.compute_beta(z_s, z_cl, cosmo), beta_test, **TOLERANCE) - assert_allclose(utils.compute_beta_s(z_s, z_cl, z_inf, cosmo), beta_s_test, **TOLERANCE) + assert_allclose(utils.compute_beta(z_src, z_cl, cosmo), beta_test, **TOLERANCE) + assert_allclose(utils.compute_beta_s(z_src, z_cl, z_inf, cosmo), beta_s_test, **TOLERANCE) + + # beta mean from distributions for model in (None, zdist.chang2013, zdist.desc_srd): # None defaults to chang2013 for compute_beta* functions - test1 = utils.compute_beta_mean(z_cl, cosmo, zmax, z_distrib_func=model) - test2 = utils.compute_beta_s_mean(z_cl, z_inf, cosmo, zmax, z_distrib_func=model) - test3 = utils.compute_beta_s_square_mean(z_cl, z_inf, cosmo, zmax, z_distrib_func=model) - if model is None: model = zdist.chang2013 - def integrand1(z_i, z_cl=z_cl, cosmo=cosmo): - return utils.compute_beta(z_i, z_cl, cosmo) * model(z_i) - - def integrand2(z_i, z_inf=z_inf, z_cl=z_cl, cosmo=cosmo): + def integrand1(z_i, z_inf=z_inf, z_cl=z_cl, cosmo=cosmo): return utils.compute_beta_s(z_i, z_cl, z_inf, cosmo) * model(z_i) - def integrand3(z_i, z_inf=z_inf, z_cl=z_cl, cosmo=cosmo): + def integrand2(z_i, z_inf=z_inf, z_cl=z_cl, cosmo=cosmo): return utils.compute_beta_s(z_i, z_cl, z_inf, cosmo) ** 2 * model(z_i) assert_allclose( - test1, quad(integrand1, zmin, zmax)[0] / quad(model, zmin, zmax)[0], **TOLERANCE - ) - assert_allclose( - test2, quad(integrand2, zmin, zmax)[0] / quad(model, zmin, zmax)[0], **TOLERANCE + utils.compute_beta_s_mean_from_distribution( + z_cl, z_inf, cosmo, zmax, z_distrib_func=model + ), + quad(integrand1, zmin, zmax)[0] / quad(model, zmin, zmax)[0], + **TOLERANCE ) assert_allclose( - test3, quad(integrand3, zmin, zmax)[0] / quad(model, zmin, zmax)[0], **TOLERANCE + utils.compute_beta_s_square_mean_from_distribution( + z_cl, z_inf, cosmo, zmax, z_distrib_func=model + ), + quad(integrand2, zmin, zmax)[0] / quad(model, zmin, zmax)[0], + **TOLERANCE ) + + # beta mean from weights + + assert_allclose( + utils.compute_beta_s_mean_from_weights(z_src, z_cl, z_inf, cosmo, shape_weights), + np.sum( + shape_weights * utils.compute_beta_s(z_src, z_cl, z_inf, cosmo) / np.sum(shape_weights) + ), + **TOLERANCE + ) + assert_allclose( + utils.compute_beta_s_square_mean_from_weights(z_src, z_cl, z_inf, cosmo, shape_weights), + np.sum( + shape_weights + * utils.compute_beta_s(z_src, z_cl, z_inf, cosmo) ** 2 + / np.sum(shape_weights) + ), + **TOLERANCE + ) + + no_weights = [1, 1] + assert_allclose( + utils.compute_beta_s_mean_from_weights(z_src, z_cl, z_inf, cosmo, None), + np.sum(no_weights * utils.compute_beta_s(z_src, z_cl, z_inf, cosmo) / np.sum(no_weights)), + **TOLERANCE + ) + assert_allclose( + utils.compute_beta_s_square_mean_from_weights(z_src, z_cl, z_inf, cosmo, None), + np.sum( + no_weights * utils.compute_beta_s(z_src, z_cl, z_inf, cosmo) ** 2 / np.sum(no_weights) + ), + **TOLERANCE + )