diff --git a/project/data_analysis/python/paper_plots/B_modes/B_modes_utils.py b/project/data_analysis/python/paper_plots/B_modes/B_modes_utils.py new file mode 100644 index 00000000..1720d1f7 --- /dev/null +++ b/project/data_analysis/python/paper_plots/B_modes/B_modes_utils.py @@ -0,0 +1,178 @@ +""" +""" + +from pspy import so_dict, pspy_utils +from pspipe_utils import covariance +import numpy as np +import pylab as plt +import sys, os + + +def get_model_vecs(l_th, ps_dict, fg_dict, lmax, + binning_file, spec_name_list, spectra): + + vec_cmb, vec_dust = [], [] + for spec in spectra: + lb, ps_b = pspy_utils.naive_binning(l_th, ps_dict[spec], binning_file, lmax) + for spec_name in spec_name_list: + na, nb = spec_name.split("x") + if spec in ["EB", "BE"]: + dust = l_th * 0 + else: + dust = fg_dict[spec.lower(), "dust", na, nb] + + lb, dust_b = pspy_utils.naive_binning(l_th, dust, binning_file, lmax) + + if (spec == "ET" or spec == "BT" or spec == "BE") & (na == nb): continue + vec_dust = np.append(vec_dust, dust_b) + vec_cmb = np.append(vec_cmb, ps_b) + + return vec_dust, vec_cmb + +def get_P_mat_BB(vec_size, bin_out_dict, bin_scheme_edge, fig_name=None): + """ + Very naive "pointing" matrix for maximum likelihood combination of the spectra + it should be vectorized but writing the loops was trivial and the code is sufficienctly fast + """ + P_mat = np.zeros((vec_size, len(bin_scheme_edge))) + index1 = 0 + y_ticks, y_name = [], [] + for my_spec in bin_out_dict.keys(): + s_name, _ = my_spec + id_spec, lb_spec = bin_out_dict[my_spec] + # for plotting + y_ticks += [index1] + y_name += [s_name] + + for jj in lb_spec: + for index2, my_bin in enumerate(bin_scheme_edge): + min, max = my_bin + if (min <= jj) & (jj <= max): + P_mat[index1, index2] = 1 + index1 += 1 + + if fig_name is not None: + plt.figure(figsize=(12, 12)) + plt.imshow(P_mat) + plt.yticks(y_ticks, y_name) + plt.xticks(np.arange(len(bin_scheme_edge)),bin_scheme_edge, rotation=90) + plt.gca().set_aspect(0.02) + plt.tight_layout() + plt.savefig(f"{fig_name}", dpi=300, bbox_inches="tight") + plt.clf() + plt.close() + + return P_mat + + +def get_fg_sub_ML_solution_BB(lb, vec_BB, vec_dust_BB, i_cov_BB, bin_out_dict, bin_scheme_edge, fig_name=None): + + + P_mat_BB = get_P_mat_BB(len(vec_BB), bin_out_dict, bin_scheme_edge, fig_name=fig_name) + + + cov_ML_BB = covariance.get_max_likelihood_cov(P_mat_BB, + i_cov_BB, + force_sim = True, + check_pos_def = True) + + vec_ML_BB = covariance.max_likelihood_spectra(cov_ML_BB, + i_cov_BB, + P_mat_BB, + vec_BB - vec_dust_BB) + + lb_all = [] + for my_spec in bin_out_dict.keys(): + s_name, spectrum = my_spec + id, lb = bin_out_dict[my_spec] + lb_all = np.append(lb_all, lb) + + lb_ML_BB = covariance.max_likelihood_spectra(cov_ML_BB, + i_cov_BB, + P_mat_BB, + lb_all) + + return lb_ML_BB, vec_ML_BB, cov_ML_BB + + +def get_ml_bins(bin_out_dict, bin_mean): + + """ + Find the bin range of the maximum likelihood combination of the data + we go through all spectra in the list and choose as a minimum bin the minimum bin + of all spectra that enters the combinaison and as a maximum the maximum bin + of all spectra that enters the combinaison + """ + + min_lb_list, max_lb_list = [], [] + for my_spec in bin_out_dict.keys(): + id_spec, lb_spec = bin_out_dict[my_spec] + min_lb_list += [np.min(lb_spec)] + max_lb_list += [np.max(lb_spec)] + + min_lb_comb = np.min(min_lb_list, axis=0) + max_lb_comb = np.max(max_lb_list, axis=0) + + ml_id = np.where((bin_mean >= min_lb_comb) & (bin_mean <= max_lb_comb)) + ml_lb = bin_mean[ml_id] + + return ml_lb + + +def get_P_mat(vec_size, lb_ml, bin_out_dict, fig_name=None): + + """ + Very naive "pointing" matrix for maximum likelihood combination of the spectra + it should be vectorized but writing the loops was trivial and the code is sufficienctly fast + """ + + P_mat = np.zeros((vec_size, len(lb_ml))) + + index1 = 0 + y_ticks, y_name = [], [] + + for my_spec in bin_out_dict.keys(): + s_name, _ = my_spec + id_spec, lb_spec = bin_out_dict[my_spec] + + # for plotting + y_ticks += [index1] + y_name += [s_name] + + for jj in lb_spec: + for index2, ii in enumerate(lb_ml): + if ii == jj: + P_mat[index1, index2] = 1 + index1 += 1 + + if fig_name is not None: + plt.figure(figsize=(12,8)) + plt.imshow(P_mat, aspect="auto") + plt.yticks(y_ticks, y_name) + plt.xticks(np.arange(len(lb_ml))[::2], lb_ml[::2], rotation=90) + plt.tight_layout() + plt.savefig(fig_name, dpi=300, bbox_inches="tight") + plt.clf() + plt.close() + + return P_mat + + +def get_spectra_cuts(cut, lmax): + + if cut == "pre_unblinding": + spectra_cuts = { + "dr6_pa4_f220": dict(T=[975, lmax], P=[lmax, lmax]), + "dr6_pa5_f150": dict(T=[475, lmax], P=[475, lmax]), + "dr6_pa6_f150": dict(T=[475, lmax], P=[475, lmax]), + "dr6_pa5_f090": dict(T=[475, lmax], P=[475, lmax]), + "dr6_pa6_f090": dict(T=[475, lmax], P=[475, lmax])} + if cut == "post_unblinding": + spectra_cuts = { + "dr6_pa4_f220": dict(T=[975, lmax], P=[lmax, lmax]), + "dr6_pa5_f150": dict(T=[775, lmax], P=[775, lmax]), + "dr6_pa6_f150": dict(T=[575, lmax], P=[575, lmax]), + "dr6_pa5_f090": dict(T=[975, lmax], P=[975, lmax]), + "dr6_pa6_f090": dict(T=[975, lmax], P=[975, lmax])} + + return spectra_cuts diff --git a/project/data_analysis/python/paper_plots/B_modes/EB_summary.py b/project/data_analysis/python/paper_plots/B_modes/EB_summary.py new file mode 100644 index 00000000..5a66fabb --- /dev/null +++ b/project/data_analysis/python/paper_plots/B_modes/EB_summary.py @@ -0,0 +1,263 @@ +""" +For this to work you need to have created two folders results_EB_optimal and results_EB_uniform +using results_plot_pol_angle.py for both the uniform and weighted analysis. +Similarly you need to have created combined_spectra_paper_optimal and the corresponding simulations +""" + +import numpy as np +import pylab as plt +import pickle +from matplotlib import rcParams +from pspy import so_spectra, pspy_utils, so_cov +from pspipe_utils import pol_angle +import scipy.stats as ss + + +binning_file = "BIN_ACTPOL_50_4_SC_large_bin_at_low_ell" +lmax = 8500 + + +rcParams["xtick.labelsize"] = 16 +rcParams["ytick.labelsize"] = 16 +rcParams["axes.labelsize"] = 20 +rcParams["axes.titlesize"] = 20 + +latex_par_name = {} +latex_par_name["alpha_pa5_f090"] = r"$\alpha_{pa5 f090}$" +latex_par_name["alpha_pa6_f090"] = r"$\alpha_{pa6 f090}$" +latex_par_name["alpha_pa5_f150"] = r"$\alpha_{pa5 f150}$" +latex_par_name["alpha_pa6_f150"] = r"$\alpha_{pa6 f150}$" +latex_par_name["beta_pa5"] = r"\beta_{\rm pa5}" +latex_par_name["beta_pa6"] = r"\beta_{\rm pa6}" +latex_par_name["beta_ACT"] = r"\beta_{\rm ACT}" +latex_par_name["beta_ACT+komatsu"] = r"\beta_{\rm ACT + Komatsu}" +latex_par_name["beta_komatsu"] = r"\beta_{\rm Komatsu}" + +cut_list = [ "post_unblinding", "pre_unblinding"] +name_list = [ "baseline multipole cut", "extended multipole cut"] +weight_list = ["uniform", "optimal"] + +angle= {} +for my_w in weight_list: + for my_cut in cut_list: + with open(f"results_EB_{my_w}/angle_{my_cut}.pkl", "rb") as fp: + angle[my_w, my_cut] = pickle.load(fp) + + +alpha_list = ["alpha_pa5_f090", "alpha_pa5_f150", "alpha_pa6_f090", "alpha_pa6_f150"] + + +x = np.linspace(0,4.5,100) + +plt.figure(figsize=(12,6)) +for count, alpha in enumerate(alpha_list): + color_list = ["red", "orange", "blue", "lightblue"] + count_all = 0 + for shift2, (name, my_cut) in enumerate(zip(name_list, cut_list)): + for shift1, my_w in enumerate(weight_list): + + print(f"{alpha} {name} ({my_w} weighting)", angle[my_w, my_cut][alpha, "mean"], angle[my_w, my_cut][alpha, "std"] ) + sh1 = shift1*0.1 + sh2 = shift2*0.2 + plt.errorbar(count+sh1+sh2, + angle[my_w, my_cut][alpha, "mean"], + angle[my_w, my_cut][alpha, "std"], + label=f"{name} ({my_w} weighting)", + color=color_list[count_all], + fmt="o") + + count_all += 1 + if count == 0: + plt.legend(fontsize=12) + +xticks_list = ["pa5 f090", "pa5 f150", "pa6 f090", "pa6 f150"] +plt.xticks([0.15,1.15,2.15,3.15], xticks_list, rotation=90, fontsize=22) +plt.ylabel(r"$\alpha$", fontsize=32) +plt.tight_layout() +plt.savefig("all_alpha.png") +plt.clf() +plt.close() + + + +x = np.linspace(0,4.5,100) +beta_list = ["beta_pa5", "beta_pa6", "beta_ACT", "beta_ACT+komatsu"] +plt.figure(figsize=(12,8)) + +for count, beta in enumerate(beta_list): + count_all = 0 + for shift2, (name, my_cut) in enumerate(zip(name_list, cut_list)): + for shift1, my_w in enumerate(weight_list): + + print(f"{beta} {name} ({my_w} weighting)", angle[my_w, my_cut][beta, "mean"], angle[my_w, my_cut][beta, "std"] ) + + sh1 = shift1*0.1 + sh2 = shift2*0.2 + plt.errorbar(count+sh1+sh2, + angle[my_w, my_cut][beta, "mean"], + angle[my_w, my_cut][beta, "std"], + label=f"{name} ({my_w} weighting)", + color=color_list[count_all], + fmt="o") + + count_all += 1 + + if count == 0: + plt.legend(fontsize=12) + +xticks_list = ["pa5", "pa6", "ACT", "ACT+Planck"] +plt.xticks([0.15,1.15,2.15,3.15], xticks_list, rotation=90, fontsize=22) +plt.ylabel(r"$\beta$", fontsize=32) +plt.tight_layout() +plt.savefig("all_beta.png") +plt.clf() +plt.close() + + + +beta_ACT = angle["optimal", "post_unblinding"]["beta_ACT", "mean"] +print(beta_ACT) + + +spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"] + +for spec in ["EB", "TB"]: + l, ps_th = so_spectra.read_ps("cmb.dat", spectra=spectra) + l, psth_rot = pol_angle.rot_theory_spectrum(l, ps_th, beta_ACT, beta_ACT) + + lb, ps_th_b = pspy_utils.naive_binning(l, psth_rot[spec], binning_file, lmax) + + lb_dat, ps, error = np.loadtxt(f"combined_spectra_paper_optimal/Dl_all_{spec}.dat", unpack=True) + + id = np.where(lb>=lb_dat[0]) + + lb, ps_th_b = lb[id], ps_th_b[id] + + cov = np.load(f"combined_spectra_paper_optimal/cov_all_{spec}.npy") + + chi2 = (ps - ps_th_b) @ np.linalg.inv(cov) @ (ps - ps_th_b) + chi2_null = (ps) @ np.linalg.inv(cov) @ (ps) + + pte = 1 - ss.chi2(len(lb)).cdf(chi2) + pte_null = 1 - ss.chi2(len(lb)).cdf(chi2_null) + + plt.figure(figsize=(12,6)) + plt.errorbar(l, ps_th["TT"]*0, label=f"pte (null): {pte_null*100:.5f} %", color="gray") + plt.errorbar(l, psth_rot[spec], label=f"pte (model): {pte*100:.2f} %", color="red") + plt.errorbar(lb_dat, ps, error, fmt="o", color="royalblue") + plt.xlabel(r"$\ell$", fontsize=25) + plt.ylabel(r"$D^{%s}_{\ell}$" % spec, fontsize=25) + plt.legend(fontsize=14) + if spec == "EB": + plt.ylim(-0.3,0.6) + if spec == "TB": + plt.ylim(-4,6) + + plt.xlim(0,3500) + plt.savefig(f"{spec}.png") + plt.clf() + plt.close() + +for spec in ["EB", "TB"]: + l, ps_th = so_spectra.read_ps("cmb.dat", spectra=spectra) + l, psth_rot = pol_angle.rot_theory_spectrum(l, ps_th, beta_ACT, beta_ACT) + + for count, freq_pair in enumerate(["90x90", "90x150", "150x150"]): + lb, ps_th_b = pspy_utils.naive_binning(l, psth_rot[spec], binning_file, lmax) + lb_dat, ps, error = np.loadtxt(f"combined_spectra_paper_optimal/Dl_{freq_pair}_{spec}.dat", unpack=True) + id = np.where(lb>=lb_dat[0]) + + lb, ps_th_b = lb[id], ps_th_b[id] + + cov = np.load(f"combined_spectra_paper_optimal/cov_{freq_pair}_{spec}.npy") + + chi2 = (ps - ps_th_b) @ np.linalg.inv(cov) @ (ps - ps_th_b) + chi2_null = (ps) @ np.linalg.inv(cov) @ (ps) + + pte = 1 - ss.chi2(len(lb)).cdf(chi2) + pte_null = 1 - ss.chi2(len(lb)).cdf(chi2_null) + + plt.figure(figsize=(12,6)) + plt.errorbar(l, ps_th["TT"]*0, label=f"pte (null): {pte_null*100:.5f} %", color="gray") + plt.errorbar(l, psth_rot[spec], label=f"pte (model): {pte*100:.2f} %", color="red") + plt.errorbar(lb_dat, ps, error, fmt=".") + + plt.xlabel(r"$\ell$", fontsize=25) + plt.ylabel(r"$D^{%s}_{\ell}$ (%s)" % (spec, freq_pair), fontsize=25) + + plt.legend(fontsize=14) + if spec == "EB": + plt.ylim(-0.3,0.6) + if spec == "TB": + plt.ylim(-4,6) + + plt.show() + + +for spec in ["EB", "TB"]: + + lb_90x90, ps_90x90, error_90x90 = np.loadtxt(f"combined_spectra_paper_optimal/Dl_90x90_{spec}.dat", unpack=True) + lb_150x150, ps_150x150, error_150x150 = np.loadtxt(f"combined_spectra_paper_optimal/Dl_150x150_{spec}.dat", unpack=True) + lb_90x150, ps_90x150, error_90x150 = np.loadtxt(f"combined_spectra_paper_optimal/Dl_90x150_{spec}.dat", unpack=True) + + id = np.where(lb_150x150>= lb_90x90[0]) + lb_150x150, ps_150x150, error_150x150 = lb_150x150[id], ps_150x150[id], error_150x150[id] + + diff_data_150x150_90x90 = ps_150x150 - ps_90x90 + diff_data_150x150_90x150 = ps_150x150 - ps_90x150 + + diff_150x150_90x90_list = [] + diff_150x150_90x150_list = [] + + for iii in range(399): + lb_90x90, ps_sim_90x90, error_sim_90x90 = np.loadtxt(f"combined_sim_spectra_optimal/Dl_90x90_{spec}_{iii:05d}.dat", unpack=True) + lb_90x150, ps_sim_90x150, error_sim_90x150 = np.loadtxt(f"combined_sim_spectra_optimal/Dl_90x150_{spec}_{iii:05d}.dat", unpack=True) + lb_150x150, ps_sim_150x150, error_sim_150x150 = np.loadtxt(f"combined_sim_spectra_optimal/Dl_150x150_{spec}_{iii:05d}.dat", unpack=True) + id = np.where(lb_150x150>= lb_90x90[0]) + + lb_150x150, ps_sim_150x150, error_sim_150x150 = lb_150x150[id], ps_sim_150x150[id], error_sim_150x150[id] + + diff_150x150_90x90_list += [ps_sim_150x150 - ps_sim_90x90] + diff_150x150_90x150_list += [ps_sim_150x150 - ps_sim_90x150] + + error_mc_150x150_90x90 = np.std(diff_150x150_90x90_list, axis=0) + error_mc_150x150_90x150 = np.std(diff_150x150_90x150_list, axis=0) + + cov_mc_150x150_90x90 = np.cov(diff_150x150_90x90_list, rowvar=False) + cov_mc_150x150_90x150 = np.cov(diff_150x150_90x150_list, rowvar=False) + + + corr_mc_150x150_90x90 = so_cov.cov2corr(cov_mc_150x150_90x90, remove_diag=True) + corr_mc_150x150_90x150 = so_cov.cov2corr(cov_mc_150x150_90x150, remove_diag=True) + + plt.imshow(corr_mc_150x150_90x90) + plt.colorbar() + plt.show() + + plt.imshow(corr_mc_150x150_90x150) + plt.colorbar() + plt.show() + + + chi2_150x150_90x90 = np.sum((diff_data_150x150_90x90) ** 2 / error_mc_150x150_90x90 ** 2) + chi2_150x150_90x150 = np.sum((diff_data_150x150_90x150) ** 2 / error_mc_150x150_90x150 ** 2) + + pte_150x150_90x90 = 1 - ss.chi2(len(lb_150x150)).cdf(chi2_150x150_90x90) + pte_150x150_90x150 = 1 - ss.chi2(len(lb_150x150)).cdf(chi2_150x150_90x150) + + plt.figure(figsize=(16,6)) + plt.title("Frequency null", fontsize=24) + plt.errorbar(lb_150x150-10, diff_data_150x150_90x90, error_mc_150x150_90x90, fmt="o", label=f"pte 150x150 - 90x90: {pte_150x150_90x90*100:.2f} %") + plt.errorbar(lb_150x150+10, diff_data_150x150_90x150, error_mc_150x150_90x150, fmt="o", label=f"pte 150x150 - 90x150: {pte_150x150_90x150*100:.2f} %") + plt.plot(lb_150x150, lb_150x150*0) + if spec == "EB": + plt.ylim(-2,2) + if spec == "TB": + plt.ylim(-5,5) + plt.xlabel(r"$\ell$", fontsize=25) + plt.ylabel(r"$D^{%s}_{\ell}$" % spec, fontsize=25) + + plt.legend(fontsize=16) + plt.savefig(f"freq_null_{spec}.png") + plt.clf() + plt.close() diff --git a/project/data_analysis/python/paper_plots/B_modes/mc_BB_likelihood.py b/project/data_analysis/python/paper_plots/B_modes/mc_BB_likelihood.py new file mode 100644 index 00000000..77b47846 --- /dev/null +++ b/project/data_analysis/python/paper_plots/B_modes/mc_BB_likelihood.py @@ -0,0 +1,214 @@ +""" +This script compute the amplitude of the BB power spectra of ACT DR6 simulation +python mc_BB_likelihood.py global_dr6_v4.dict +""" + +from pspy import so_dict, pspy_utils, so_spectra, so_cov +from pspipe_utils import covariance, pspipe_list, log, best_fits, external_data +import numpy as np +import pylab as plt +import sys, os +import scipy.stats as ss +from getdist.mcsamples import loadMCSamples +import getdist.plots as gdplt +from cobaya.run import run +from matplotlib import rcParams +import matplotlib.ticker as ticker +import B_modes_utils + +rcParams["xtick.labelsize"] = 16 +rcParams["ytick.labelsize"] = 16 +rcParams["axes.labelsize"] = 20 +rcParams["axes.titlesize"] = 20 + +def loglike(a_BB_cmb, a_BB_dust): + """ compute the loglike according to the parameters """ + + theory = a_BB_cmb * vec_cmb_BB + a_BB_dust * vec_dust_BB_template + residual = vec_BB - theory + chi2 = residual @ i_cov_BB @ residual + + return -0.5 * chi2 + +def mcmc(mean_dust, std_dust, Rminus1_stop, Rminus1_cl_stop): + + """ the MCMC on the BB amplitude, mean_dust and std_dust are priors from Planck BB 353 GHz """ + + info = {"likelihood": {"BB": loglike}, + "params": { + "a_BB_cmb": {"prior": {"min": 0., "max": 2}, "latex": r"A_{BB}^{CMB}"}, + "a_BB_dust": {"prior": {"dist": "norm", "loc": mean_dust,"scale": std_dust}, "latex": r"A_{BB}^{dust}"}}, + "sampler": {"mcmc": {"max_tries": 10**6, "Rminus1_stop": Rminus1_stop, "Rminus1_cl_stop": Rminus1_cl_stop}}, + "resume": False, + "output": f"{BB_dir}/mcmc/chain", "force": True} + + updated_info, sampler = run(info) + + samples = sampler.products(to_getdist=True, skip_samples=0.5)["sample"] + + return samples + +d = so_dict.so_dict() +d.read_from_file(sys.argv[1]) +log = log.get_logger(**d) + +cut = "pre_unblinding" +Rminus1_stop = 0.03 +Rminus1_cl_stop = 0.03 + +cov_dir = "covariances" +sim_spec_dir = "sim_spectra" + +BB_dir = f"results_BB_MC" + +pspy_utils.create_directory(f"{BB_dir}/mcmc") + +surveys = d["surveys"] +type = d["type"] +lmax = d["lmax"] +binning_file = d["binning_file"] +cosmo_params = d["cosmo_params"] +accuracy_params = d["accuracy_params"] + +fg_norm = d["fg_norm"] +fg_params = d["fg_params"] +fg_components = d["fg_components"] +do_bandpass_integration = d["do_bandpass_integration"] +spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"] + +bin_lo, bin_hi, lb, bin_size = pspy_utils.read_binning_file(binning_file, lmax) +spec_name_list, nu_tag_list = pspipe_list.get_spec_name_list(d, delimiter="_", return_nu_tag=True) + + +passbands = {} +if do_bandpass_integration: + log.info("Doing bandpass integration") + +narrays, sv_list, ar_list = pspipe_list.get_arrays_list(d) +for sv, ar in zip(sv_list, ar_list): + + freq_info = d[f"freq_info_{sv}_{ar}"] + if do_bandpass_integration: + nu_ghz, pb = np.loadtxt(freq_info["passband"]).T + else: + nu_ghz, pb = np.array([freq_info["freq_tag"]]), np.array([1.]) + passbands[f"{sv}_{ar}"] = [nu_ghz, pb] + +l_th, ps_dict = pspy_utils.ps_from_params(cosmo_params, type, lmax + 500, **accuracy_params) +fg_dict = best_fits.get_foreground_dict(l_th, passbands, fg_components, fg_params, fg_norm) + +################################################################################################ + +spectra_cuts = B_modes_utils.get_spectra_cuts(cut, lmax) + +if cut == "pre_unblinding": + # list of new [bin_low, bin_max] for the final BB spectra + bin_scheme_edge = [[500, 750.5], [751, 1050.5], [1051, 1350.5], [1351, 1600.5], [1601, 2275.5], [2276, 9000]] +if cut == "post_unblinding": + bin_scheme_edge = [[500, 1200], [1201, 1500], [1501, 2000], [2001, 9000]] + + +only_TT_map_set = ["dr6_pa4_f220"] + +bin_out_dict, indices = covariance.get_indices(bin_lo, + bin_hi, + lb, + spec_name_list, + spectra_cuts=spectra_cuts, + spectra_order=spectra, + selected_spectra=["BB"], + only_TT_map_set=only_TT_map_set) + +my_spectra = bin_out_dict.keys() + +################################################################################################ +cov_xar = np.load(f"{cov_dir}/x_ar_final_cov_data.npy") +cov_BB = cov_xar[np.ix_(indices, indices)] +i_cov_BB = np.linalg.inv(cov_BB) +corr_BB = so_cov.cov2corr(cov_BB, remove_diag=True) + +plt.figure(figsize=(12,8)) +plt.imshow(corr_BB) +plt.colorbar() +plt.savefig(f"{BB_dir}/full_correlation_{cut}.png", dpi=300, bbox_inches="tight") +plt.clf() +plt.close() + + +vec_dust, vec_cmb = B_modes_utils.get_model_vecs(l_th, ps_dict, fg_dict, lmax, binning_file, spec_name_list, spectra) +vec_dust_BB = vec_dust[indices] +vec_cmb_BB = vec_cmb[indices] +vec_dust_BB_template = vec_dust_BB / fg_params["a_gbb"] + +mean_dust, std_dust = fg_params["a_gbb"], 0.0084 #prior on data gal amplitude + + + +vec_ml_list = [] +chi2_list, a_cmb_list, a_dust_list = [], [], [] +test_sampling = True +n_sims = 100 +for iii in range(n_sims): + print(iii) + vec_xar = covariance.read_x_ar_spectra_vec(sim_spec_dir, + spec_name_list, + f"cross_{iii:05d}", + spectra_order = spectra, + type="Dl") + + vec_BB = vec_xar[indices] + + lb_ml_BB, vec_ml_BB, cov_ml_BB = B_modes_utils.get_fg_sub_ML_solution_BB(lb, + vec_BB, + vec_dust_BB, + i_cov_BB, + bin_out_dict, + bin_scheme_edge) + std_ml_BB = np.sqrt(cov_ml_BB.diagonal()) + + vec_ml_list += [vec_ml_BB] + + if test_sampling == True: + samples = mcmc(mean_dust, std_dust, Rminus1_stop=Rminus1_stop, Rminus1_cl_stop=Rminus1_cl_stop) + chi2 = -2 * loglike(samples.mean("a_BB_cmb"), samples.mean("a_BB_dust")) + ndof = len(vec_BB) + print(f"chi2: {chi2}, ndof: {ndof}") + + chi2_list += [chi2] + a_cmb_list += [samples.mean("a_BB_cmb")] + a_dust_list += [samples.mean("a_BB_dust")] + + +mean_vec, std_vec = np.mean(vec_ml_list, axis=0), np.std(vec_ml_list, axis=0) +cov_mc = np.cov(vec_ml_list, rowvar=False) + +plt.figure(figsize=(12,8)) +plt.plot(lb_ml_BB, np.sqrt(cov_ml_BB.diagonal())) +plt.plot(lb_ml_BB, std_vec) +plt.savefig(f"{BB_dir}/error_comp_{cut}.png", dpi=300, bbox_inches="tight") +plt.clf() +plt.close() + +plt.figure(figsize=(12,8)) +plt.imshow(so_cov.cov2corr(cov_mc, remove_diag=True)) +plt.colorbar() +plt.savefig(f"{BB_dir}/mc_correlation_{cut}.png", dpi=300, bbox_inches="tight") +plt.clf() +plt.close() + +plt.figure(figsize=(12,8)) +plt.ylim(-0.1, 0.25) +plt.xlim(0, 4000) +plt.plot(l_th, ps_dict["BB"]) +plt.errorbar(lb_ml_BB, mean_vec, std_vec) +plt.ylabel(r"$D_{\ell}^{BB}$", fontsize=22) +plt.xlabel(r"$\ell$", fontsize=22) +plt.savefig(f"{BB_dir}/combined_BB_sim_{cut}.png", dpi=300, bbox_inches="tight") +plt.clf() +plt.close() + +if test_sampling == True: + mean, std = np.mean(a_cmb_list, axis=0), np.std(a_cmb_list, axis=0) + print("mean", mean, "std", std, "std MC", np.sqrt(samples.cov(["a_BB_cmb"])[0, 0]), std/np.sqrt(n_sims)) + mean_chi2 = np.mean(chi2, axis=0) + print("mean chi2", mean_chi2) diff --git a/project/data_analysis/python/paper_plots/B_modes/results_BB_likelihood.py b/project/data_analysis/python/paper_plots/B_modes/results_BB_likelihood.py new file mode 100644 index 00000000..e01c49d3 --- /dev/null +++ b/project/data_analysis/python/paper_plots/B_modes/results_BB_likelihood.py @@ -0,0 +1,242 @@ +""" +This script compute the amplitude of the BB power spectra of ACT DR6 using a dust prior from Planck 353 +python results_BB_likelihood.py post_likelihood_PACT.dict +""" + +from pspy import so_dict, pspy_utils, so_spectra, so_cov +from pspipe_utils import covariance, pspipe_list, log, best_fits, external_data +import numpy as np +import pylab as plt +import sys, os +import scipy.stats as ss +from getdist.mcsamples import loadMCSamples +import getdist.plots as gdplt +from cobaya.run import run +from matplotlib import rcParams +import matplotlib.ticker as ticker +import B_modes_utils + +rcParams["xtick.labelsize"] = 16 +rcParams["ytick.labelsize"] = 16 +rcParams["axes.labelsize"] = 20 +rcParams["axes.titlesize"] = 20 + +def loglike(a_BB_cmb, a_BB_dust): + """ compute the loglike according to the parameters """ + + theory = a_BB_cmb * vec_cmb_BB + a_BB_dust * vec_dust_BB_template + residual = vec_BB - theory + chi2 = residual @ i_cov_BB @ residual + + return -0.5 * chi2 + +def mcmc(mean_dust, std_dust, Rminus1_stop, Rminus1_cl_stop): + + """ the MCMC on the BB amplitude, mean_dust and std_dust are priors from Planck BB 353 GHz """ + + info = {"likelihood": {"BB": loglike}, + "params": { + "a_BB_cmb": {"prior": {"min": 0., "max": 2}, "latex": r"A_{BB}^{CMB}"}, + "a_BB_dust": {"prior": {"dist": "norm", "loc": mean_dust,"scale": std_dust}, "latex": r"A_{BB}^{dust}"}}, + "sampler": {"mcmc": {"max_tries": 10**6, "Rminus1_stop": Rminus1_stop, "Rminus1_cl_stop": Rminus1_cl_stop}}, + "resume": False, + "output": f"{BB_dir}/mcmc/chain", "force": True} + + updated_info, sampler = run(info) + + samples = sampler.products(to_getdist=True, skip_samples=0.5)["sample"] + + return samples + +d = so_dict.so_dict() +d.read_from_file(sys.argv[1]) +log = log.get_logger(**d) + +cut = "post_unblinding" +Rminus1_stop = 0.01 +Rminus1_cl_stop = 0.02 + +cov_dir = "covariances" +spec_dir = "spectra_leak_corr_ab_corr" +sim_spec_dir = "sim_spectra" +tag = d["best_fit_tag"] + +BB_dir = f"results_BB{tag}" + +pspy_utils.create_directory(f"{BB_dir}/mcmc") + +surveys = d["surveys"] +type = d["type"] +lmax = d["lmax"] +binning_file = d["binning_file"] +cosmo_params = d["cosmo_params"] +accuracy_params = d["accuracy_params"] + +fg_norm = d["fg_norm"] +fg_params = d["fg_params"] +fg_components = d["fg_components"] +do_bandpass_integration = d["do_bandpass_integration"] +spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"] + +bin_lo, bin_hi, lb, bin_size = pspy_utils.read_binning_file(binning_file, lmax) +spec_name_list, nu_tag_list = pspipe_list.get_spec_name_list(d, delimiter="_", return_nu_tag=True) + + +passbands = {} +if do_bandpass_integration: + log.info("Doing bandpass integration") + +narrays, sv_list, ar_list = pspipe_list.get_arrays_list(d) +for sv, ar in zip(sv_list, ar_list): + + freq_info = d[f"freq_info_{sv}_{ar}"] + if do_bandpass_integration: + nu_ghz, pb = np.loadtxt(freq_info["passband"]).T + else: + nu_ghz, pb = np.array([freq_info["freq_tag"]]), np.array([1.]) + passbands[f"{sv}_{ar}"] = [nu_ghz, pb] + +l_th, ps_dict = pspy_utils.ps_from_params(cosmo_params, type, lmax + 500, **accuracy_params) +fg_dict = best_fits.get_foreground_dict(l_th, passbands, fg_components, fg_params, fg_norm) + +################################################################################################ + +spectra_cuts = B_modes_utils.get_spectra_cuts(cut, lmax) + +if cut == "pre_unblinding": + # list of new [bin_low, bin_max] for the final BB spectra + bin_scheme_edge = [[500, 750.5], [751, 1050.5], [1051, 1350.5], [1351, 1600.5], [1601, 2275.5], [2276, 9000]] +if cut == "post_unblinding": + bin_scheme_edge = [[500, 1200.5], [1201, 1500.5], [1501, 2000.5], [2001, 9000]] + # bin_scheme_edge = [[500, 750.5], [751, 1050.5], [1051, 1350.5], [1351, 1600.5], [1601, 2275.5], [2276, 9000]] + + +only_TT_map_set = ["dr6_pa4_f220"] + +bin_out_dict, indices = covariance.get_indices(bin_lo, + bin_hi, + lb, + spec_name_list, + spectra_cuts=spectra_cuts, + spectra_order=spectra, + selected_spectra=["BB"], + only_TT_map_set=only_TT_map_set) + +my_spectra = bin_out_dict.keys() + +################################################################################################ +cov_xar = np.load(f"{cov_dir}/x_ar_final_cov_data.npy") +cov_BB = cov_xar[np.ix_(indices, indices)] +i_cov_BB = np.linalg.inv(cov_BB) +corr_BB = so_cov.cov2corr(cov_BB, remove_diag=True) + +plt.figure(figsize=(12,8)) +plt.imshow(corr_BB) +plt.colorbar() +plt.savefig(f"{BB_dir}/full_correlation_{cut}.png", dpi=300, bbox_inches="tight") +plt.clf() +plt.close() + + +vec_dust, vec_cmb = B_modes_utils.get_model_vecs(l_th, ps_dict, fg_dict, lmax, binning_file, spec_name_list, spectra) +vec_dust_BB = vec_dust[indices] +vec_cmb_BB = vec_cmb[indices] +vec_dust_BB_template = vec_dust_BB / fg_params["a_gbb"] + +vec_xar = covariance.read_x_ar_spectra_vec(spec_dir, + spec_name_list, + "cross", + spectra_order = spectra, + type="Dl") + +vec_BB = vec_xar[indices] + + + + +mean_dust, std_dust = fg_params["a_gbb"], 0.0084 #prior on data gal amplitude + +samples = mcmc(mean_dust, std_dust, Rminus1_stop=Rminus1_stop, Rminus1_cl_stop=Rminus1_cl_stop) +gdplot = gdplt.get_subplot_plotter() +gdplot.triangle_plot(samples, ["a_BB_cmb", "a_BB_dust"], filled=True, title_limit=1) +plt.savefig(f"{BB_dir}/posterior_BB_{cut}.png", dpi=300, bbox_inches="tight") +plt.clf() +plt.close() + +chi2 = -2 * loglike(samples.mean("a_BB_cmb"), samples.mean("a_BB_dust")) +ndof = len(vec_BB) +print(f"chi2: {chi2}, ndof: {ndof}") + +lb_ml_BB, vec_ml_BB, cov_ml_BB = B_modes_utils.get_fg_sub_ML_solution_BB(lb, + vec_BB, + vec_dust_BB, + i_cov_BB, + bin_out_dict, + bin_scheme_edge, + fig_name=f"{BB_dir}/P_mat_all_BB.png") +std_ml_BB = np.sqrt(cov_ml_BB.diagonal()) + +plt.figure(figsize=(12,8)) +plt.imshow(so_cov.cov2corr(cov_ml_BB, remove_diag=True)) +plt.colorbar() +plt.savefig(f"{BB_dir}/ml_correlation_{cut}.png", dpi=300, bbox_inches="tight") +plt.clf() +plt.close() + +### Now do the plotting + +add_BK = True +add_sptpol = True + +fac_ell = -1. + +fig, ax = plt.subplots( figsize=(14,8)) +ax.plot(l_th, ps_dict["BB"] * l_th ** fac_ell, color="gray") +ax.set_xlim(30, 4000) +if fac_ell == - 1: + ax.set_ylim(-2*10**-5, 2.1*10**-4) +if fac_ell == 0: + ax.set_ylim(-0.05, 0.25) + +ax.errorbar(lb_ml_BB, vec_ml_BB * lb_ml_BB ** fac_ell, std_ml_BB * lb_ml_BB ** fac_ell, fmt="o", color="royalblue", label="ACT") +if add_BK: + ax.semilogx() + lb_bicep, Db_bicep, err_bicep = external_data.get_bicep_BB_spectrum() + ax.errorbar(lb_bicep, Db_bicep * lb_bicep ** fac_ell, err_bicep * lb_bicep ** fac_ell, fmt="o", color="red", label="BICEP/Keck (2021)") +if add_sptpol: + lb_sptpol, Db_sptpol, err_sptpol = external_data.get_sptpol_BB_spectrum() + ax.errorbar(lb_sptpol, Db_sptpol * lb_sptpol ** fac_ell, err_sptpol * lb_sptpol ** fac_ell, fmt="o", color="orange", label="SPTpol (2020)") + +ax.plot(l_th, ps_dict["BB"] * 0, linestyle="--", color="black") +ax.legend(fontsize=22) +ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.0e')) + +plt.ylabel(r"$\ell^{-1} D_{\ell}^{BB}$", fontsize=35) +plt.xlabel(r"$\ell$", fontsize=35) +plt.tight_layout() +plt.savefig(f"{BB_dir}/combined_BB_ellfac{fac_ell}_{cut}.png", dpi=300, bbox_inches="tight") +plt.clf() +plt.close() + +# also plot individual spectra for the data +for my_spec in bin_out_dict.keys(): + s_name, spectrum = my_spec + id, lb = bin_out_dict[my_spec] + lb_, Db = so_spectra.read_ps(f"{spec_dir}/Dl_{s_name}_cross.dat", spectra=spectra) + + plt.figure(figsize=(12,8)) + plt.title(f"{my_spec}, min={np.min(lb)}, max={np.max(lb)}") + plt.plot(lb_, Db[spectrum], label="original spectrum") + plt.errorbar(lb, vec_BB[id], np.sqrt(cov_BB[np.ix_(id,id)].diagonal()), fmt=".", label="selected spectrum") + plt.plot(lb, vec_dust_BB[id] + vec_cmb_BB[id], "--", color="gray",alpha=0.3, label="theory") + plt.ylim(-1, 1) + plt.legend() + plt.savefig(f"{BB_dir}/{spectrum}_{s_name}.png", bbox_inches="tight") + plt.clf() + plt.close() + + res = vec_BB[id] - vec_dust_BB[id] - vec_cmb_BB[id] + chi2 = res @ np.linalg.inv(cov_BB[np.ix_(id, id)]) @ res + ndof = len(lb) - 1 + pte = 1 - ss.chi2(ndof).cdf(chi2) + print(my_spec, f"chi2: {chi2}, ndof: {ndof}, pte: {pte}") diff --git a/project/data_analysis/python/paper_plots/results_EB_likelihood.py b/project/data_analysis/python/paper_plots/B_modes/results_EB_likelihood.py similarity index 53% rename from project/data_analysis/python/paper_plots/results_EB_likelihood.py rename to project/data_analysis/python/paper_plots/B_modes/results_EB_likelihood.py index 14431c49..b34e24f9 100644 --- a/project/data_analysis/python/paper_plots/results_EB_likelihood.py +++ b/project/data_analysis/python/paper_plots/B_modes/results_EB_likelihood.py @@ -9,8 +9,10 @@ import sys, os import scipy.stats as ss from cobaya.run import run -from getdist import plots +from getdist import plots, loadMCSamples, MCSamples import matplotlib as mpl +from matplotlib import cm +import B_modes_utils def get_vec_th_EB(alpha_pa5_f090, alpha_pa5_f150, alpha_pa6_f090, alpha_pa6_f150): @@ -19,10 +21,11 @@ def get_vec_th_EB(alpha_pa5_f090, alpha_pa5_f150, alpha_pa6_f090, alpha_pa6_f150 alpha["dr6_pa5_f150"] = alpha_pa5_f150 alpha["dr6_pa6_f090"] = alpha_pa6_f090 alpha["dr6_pa6_f150"] = alpha_pa6_f150 - vec_th_EB = [] for spec in my_spectra: spec_name, mode = spec + id_spec, lb_spec = bin_out_dict[spec] + id = np.where(lb >= lb_spec[0]) n1, n2 = spec_name.split("x") _, psth_rot = pol_angle.rot_theory_spectrum(lb, psth_b, alpha[n1], alpha[n2]) @@ -31,18 +34,23 @@ def get_vec_th_EB(alpha_pa5_f090, alpha_pa5_f150, alpha_pa6_f090, alpha_pa6_f150 return vec_th_EB def loglike(alpha_pa5_f090, alpha_pa5_f150, alpha_pa6_f090, alpha_pa6_f150): + vec_th_EB = get_vec_th_EB(alpha_pa5_f090, alpha_pa5_f150, alpha_pa6_f090, alpha_pa6_f150) res = vec_EB - vec_th_EB chi2 = res @ i_cov @ res return -0.5 * chi2 - + d = so_dict.so_dict() d.read_from_file(sys.argv[1]) +cut = "pre_unblinding" +Rminus1_stop = 0.01 +Rminus1_cl_stop = 0.02 + cov_dir = "covariances" -spec_dir = "spectra_leak_corr_ab_corr_cal" -result_dir = "plots/pol_angle" +spec_dir = "spectra_leak_corr_ab_corr" +result_dir = "results_EB" bestfit_dir = "best_fits" pspy_utils.create_directory(result_dir) @@ -52,102 +60,95 @@ def loglike(alpha_pa5_f090, alpha_pa5_f150, alpha_pa6_f090, alpha_pa6_f150): lmax = d["lmax"] binning_file = d["binning_file"] spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"] -lmin = 475 bin_lo, bin_hi, lb, bin_size = pspy_utils.read_binning_file(binning_file, lmax) -id = np.where(bin_lo > lmin) - spec_name_list, nu_tag_list = pspipe_list.get_spec_name_list(d, delimiter="_", return_nu_tag=True) - -vec_xar = covariance.read_x_ar_spectra_vec(spec_dir, - spec_name_list, - "cross", - spectra_order = spectra, - type="Dl") - -cov_xar = np.load(f"{cov_dir}/x_ar_final_cov_data.npy") - - ################################################################################################ # Start at l=500, remove pa4_f220 pol and only include EB/BE -spectra_cuts = { - "dr6_pa4_f220": dict(T=[lmax, lmax], P=[lmax, lmax]), - "dr6_pa5_f150": dict(T=[lmin, lmax], P=[lmin, lmax]), - "dr6_pa6_f150": dict(T=[lmin, lmax], P=[lmin, lmax]), - "dr6_pa5_f090": dict(T=[lmin, lmax], P=[lmin, lmax]), - "dr6_pa6_f090": dict(T=[lmin, lmax], P=[lmin, lmax]), -} +spectra_cuts = B_modes_utils.get_spectra_cuts(cut, lmax) + +only_TT_map_set = ["dr6_pa4_f220"] bin_out_dict, indices = covariance.get_indices(bin_lo, bin_hi, lb, spec_name_list, spectra_cuts=spectra_cuts, spectra_order=spectra, - selected_spectra=["EB", "BE"]) - -my_spectra = bin_out_dict.keys() + selected_spectra=["EB", "BE"], + only_TT_map_set=only_TT_map_set) +my_spectra = bin_out_dict.keys() ################################################################################################ -# Select and plot the corresponding correlation matrix +cov_xar = np.load(f"{cov_dir}/x_ar_final_cov_data.npy") +vec_xar = covariance.read_x_ar_spectra_vec(spec_dir, + spec_name_list, + "cross", + spectra_order = spectra, + type="Dl") + cov_EB = cov_xar[np.ix_(indices, indices)] vec_EB = vec_xar[indices] i_cov = np.linalg.inv(cov_EB) +corr_EB = so_cov.cov2corr(cov_EB, remove_diag=True) -n_spec = len(my_spectra) -n_bins = len(lb[id]) -name_list = [] -for my_spec in my_spectra: - name, spectrum = my_spec +tick_start = 0 +tick_loc_list, name_list = [], [] +for spec in my_spectra: + name, spectrum = spec + id_spec, lb_spec = bin_out_dict[spec] + tick_loc_list += [len(lb_spec)/2 + tick_start] + tick_start += len(lb_spec) name = name.replace("dr6_", "") name_list += [f"{spectrum} {name}"] # plot the EB/BE correlation matrix plt.figure(figsize=(16, 16)) -plt.imshow(so_cov.cov2corr(cov_EB)) -plt.xticks(ticks=np.arange(n_spec) * n_bins + n_bins/2, labels = name_list, rotation=90, fontsize=20) -plt.yticks(ticks=np.arange(n_spec) * n_bins + n_bins/2, labels = name_list, fontsize=20) +plt.imshow(corr_EB) +plt.xticks(ticks=tick_loc_list, labels = name_list, rotation=90, fontsize=20) +plt.yticks(ticks=tick_loc_list, labels = name_list, fontsize=20) plt.colorbar() plt.tight_layout() plt.savefig(f"{result_dir}/correlation_EB.png") plt.clf() plt.close() - ################################################################################################ # Run a MCMC chain and plot the posterior distribution lth, psth = so_spectra.read_ps(bestfit_dir + f"/cmb.dat", spectra=spectra) -lb_, psth_b = so_spectra.bin_spectra(lth, - psth, - d["binning_file"], - d["lmax"], - type="Cl", - spectra=spectra) +_, psth_b = so_spectra.bin_spectra(lth, + psth, + d["binning_file"], + d["lmax"], + type="Cl", + spectra=spectra) + +sample = True roots = ["mcmc"] params = ["alpha_pa5_f090", "alpha_pa5_f150", "alpha_pa6_f090", "alpha_pa6_f150"] -info = {} -info["likelihood"] = { "my_like": loglike} -info["params"] = { "alpha_pa5_f090": { "prior": { "min": -0.5, "max": 0.5}, "ref": 0, "proposal": 0.005, "latex": r"\alpha_{pa5 f090}"}, - "alpha_pa5_f150": { "prior": { "min": -0.5, "max": 0.5}, "ref": 0, "proposal": 0.005, "latex": r"\alpha_{pa5 f150}"}, - "alpha_pa6_f090": { "prior": { "min": -0.5, "max": 0.5}, "ref": 0, "proposal": 0.005, "latex": r"\alpha_{pa6 f090}"}, - "alpha_pa6_f150": { "prior": { "min": -0.5, "max": 0.5}, "ref": 0, "proposal": 0.005, "latex": r"\alpha_{pa6 f150}"}} -info["sampler"] = { "mcmc": { "max_tries": 1e6, "Rminus1_stop": 0.01, "Rminus1_cl_stop": 0.01}} - -info["output"] = f"{result_dir}/chains/{roots[0]}" -info["force"] = True -info["debug"] = False -updated_info, sampler = run(info) - - +if sample: + info = {} + info["likelihood"] = { "my_like": loglike} + info["params"] = { "alpha_pa5_f090": { "prior": { "min": -0.5, "max": 0.5}, "ref": 0, "proposal": 0.005, "latex": r"\alpha_{pa5 f090}"}, + "alpha_pa5_f150": { "prior": { "min": -0.5, "max": 0.5}, "ref": 0, "proposal": 0.005, "latex": r"\alpha_{pa5 f150}"}, + "alpha_pa6_f090": { "prior": { "min": -0.5, "max": 0.5}, "ref": 0, "proposal": 0.005, "latex": r"\alpha_{pa6 f090}"}, + "alpha_pa6_f150": { "prior": { "min": -0.5, "max": 0.5}, "ref": 0, "proposal": 0.005, "latex": r"\alpha_{pa6 f150}"}} + info["sampler"] = {"mcmc": { "max_tries": 1e6, "Rminus1_stop": Rminus1_stop, "Rminus1_cl_stop": Rminus1_cl_stop}} + info["output"] = f"{result_dir}/chains_{cut}/{roots[0]}" + info["force"] = True + info["debug"] = False + updated_info, sampler = run(info) + +burnin = 0.5 g = plots.get_subplot_plotter( - chain_dir=os.path.join(os.getcwd(), f"{result_dir}/chains"), - analysis_settings={"ignore_rows": 0.5}, + chain_dir=os.path.join(os.getcwd(), f"{result_dir}/chains_{cut}"), + analysis_settings={"ignore_rows": burnin}, ) kwargs = dict(colors=["k"], lws=[1]) g.triangle_plot(roots, params, **kwargs, diag1d_kwargs=kwargs) @@ -156,39 +157,32 @@ def loglike(alpha_pa5_f090, alpha_pa5_f150, alpha_pa6_f090, alpha_pa6_f150): with mpl.rc_context(rc={"text.usetex": True}): table = g.sample_analyser.mcsamples[roots[0]].getTable(limit=1, paramList=params) kwargs = dict(size=15, ha="right") - g.subplots[0, 0].text(1.2, 0, table.tableTex().replace("\n", ""), **kwargs) + g.subplots[0, 0].text(1.2, 0.2, table.tableTex().replace("\n", ""), **kwargs) -plt.savefig(f"{result_dir}/pol_angles_chain_results.pdf") +plt.savefig(f"{result_dir}/pol_angles_chain_results_{cut}.pdf") plt.clf() plt.close() -samples = sampler.products(to_getdist=True, skip_samples=0.5)["sample"] -min_chi2 = np.min(samples.getParams().chi2) +samples = loadMCSamples(f"{result_dir}/chains_{cut}/{roots[0]}", settings={"ignore_rows": burnin}) -ndof = len(vec_EB) - 4 -pte = 1 - ss.chi2(ndof).cdf(min_chi2) -print(f"min chi2 = {min_chi2}, pte = {pte}") - - - -################################################################################################ -# now plot all EB power spectrum pre and post correction using the mean posterior angles - -mean = {} -for par_name in params: - mean[par_name] = samples.mean(par_name) nbin_tot = len(vec_EB) bin = np.arange(nbin_tot) error_EB = np.sqrt(np.diagonal(cov_EB)) -vec_EB_corr = vec_EB - get_vec_th_EB(mean["alpha_pa5_f090"], - mean["alpha_pa5_f150"], - mean["alpha_pa6_f090"], - mean["alpha_pa6_f150"]) +vec_EB_corr = vec_EB - get_vec_th_EB(samples.mean("alpha_pa5_f090"), + samples.mean("alpha_pa5_f150"), + samples.mean("alpha_pa6_f090"), + samples.mean("alpha_pa6_f150")) + chi2_precorr = vec_EB @ i_cov @ vec_EB chi2_postcorr = vec_EB_corr @ i_cov @ vec_EB_corr +min_chi2 = np.min(samples.getParams().chi2) +ndof = len(vec_EB) - 4 +pte = 1 - ss.chi2(ndof).cdf(min_chi2) +print(f"min chi2 = {min_chi2}, pte = {pte}") + PTE_precorr = 1 - ss.chi2(nbin_tot).cdf(chi2_precorr) PTE_postcorr = 1 - ss.chi2(nbin_tot - 4).cdf(chi2_postcorr) @@ -197,9 +191,41 @@ def loglike(alpha_pa5_f090, alpha_pa5_f150, alpha_pa6_f090, alpha_pa6_f150): plt.plot(bin, vec_EB / error_EB, label=r"pre-correction $\chi^{2}$=%.02f, $n_{DoF}$ = %d, PTE = %.04f" % (chi2_precorr, nbin_tot, PTE_precorr), color="gray") plt.plot(bin, vec_EB_corr / error_EB, label=r"post-correction $\chi^{2}$=%.02f, $n_{DoF}$ = %d, PTE = %.04f" % (chi2_postcorr, nbin_tot - 4, PTE_postcorr), color="orange", alpha=0.6) plt.plot(bin, bin*0, "--", color="gray") -plt.xticks(ticks=np.arange(n_spec) * n_bins + n_bins/2, labels = name_list, rotation=90, fontsize=20) +plt.xticks(ticks=tick_loc_list, labels = name_list, rotation=90, fontsize=20) plt.legend(fontsize=18) plt.tight_layout() -plt.savefig(f"{result_dir}/vec_EB.png") +plt.savefig(f"{result_dir}/vec_EB_{cut}.png") +plt.clf() +plt.close() + + +lb_ml = B_modes_utils.get_ml_bins(bin_out_dict, lb) +P_mat = B_modes_utils.get_P_mat(len(vec_EB), lb_ml, bin_out_dict, fig_name=f"{result_dir}/P_mat_all_EB.png") + +cov_ml = covariance.get_max_likelihood_cov(P_mat, + i_cov, + force_sim = True, + check_pos_def = True) + +vec_ml_corr = covariance.max_likelihood_spectra(cov_ml, + i_cov, + P_mat, + vec_EB_corr) + + +i_cov_ml = np.linalg.inv(cov_ml) +error_ml = np.sqrt(cov_ml.diagonal()) + +chi2_ml = vec_ml_corr @ i_cov_ml @ vec_ml_corr +PTE_ml = 1 - ss.chi2(len(lb_ml)).cdf(chi2_ml) + +plt.figure(figsize=(12,8)) +plt.ylabel(r"$D_{\ell}^{EB}$", fontsize=22) +plt.xlabel(r"$\ell$", fontsize=22) +plt.plot(lb_ml, lb_ml*0) +plt.errorbar(lb_ml, vec_ml_corr, error_ml, fmt=".", label=f"p-value {PTE_ml:.3f}") +plt.legend() +plt.savefig(f"{result_dir}/vec_ml_{cut}.png") plt.clf() plt.close() + diff --git a/project/data_analysis/python/paper_plots/results_combined_TB.py b/project/data_analysis/python/paper_plots/B_modes/results_combined_TB.py similarity index 99% rename from project/data_analysis/python/paper_plots/results_combined_TB.py rename to project/data_analysis/python/paper_plots/B_modes/results_combined_TB.py index eeae1a13..d99cd906 100644 --- a/project/data_analysis/python/paper_plots/results_combined_TB.py +++ b/project/data_analysis/python/paper_plots/B_modes/results_combined_TB.py @@ -177,3 +177,4 @@ #plt.errorbar(bins, np.sqrt(cov_TB.diagonal())) #plt.errorbar(bins, err_test, fmt= "--") #plt.show() +OBOB diff --git a/project/data_analysis/python/paper_plots/B_modes/results_plot_pol_angle.py b/project/data_analysis/python/paper_plots/B_modes/results_plot_pol_angle.py new file mode 100644 index 00000000..50505dc6 --- /dev/null +++ b/project/data_analysis/python/paper_plots/B_modes/results_plot_pol_angle.py @@ -0,0 +1,178 @@ +""" +This script produce lot of different plots of the pol angles +""" + +from pspy import so_dict, pspy_utils, so_spectra, so_cov +from pspipe_utils import covariance, pspipe_list, pol_angle +import numpy as np +import pylab as plt +import sys, os +import scipy.stats as ss +from cobaya.run import run +from getdist import plots, loadMCSamples, MCSamples +import matplotlib as mpl +from matplotlib import cm +import scipy.stats as stats +from matplotlib import rcParams +import pickle + + +rcParams["xtick.labelsize"] = 16 +rcParams["ytick.labelsize"] = 16 +rcParams["axes.labelsize"] = 20 +rcParams["axes.titlesize"] = 20 + +def gaussian(mean, std): + x = np.linspace(-1, 1, 1000) + gauss = stats.norm.pdf(x, mean, std) + gauss /= np.max(gauss) + return x, gauss + +d = so_dict.so_dict() +d.read_from_file(sys.argv[1]) + +result_dir = "results_EB" +roots = ["mcmc"] +params = ["alpha_pa5_f090", "alpha_pa5_f150", "alpha_pa6_f090", "alpha_pa6_f150"] + +syst_error = 0.1 +cut = "post_unblinding" +burnin = 0.5 +samples = loadMCSamples(f"{result_dir}/chains_{cut}/{roots[0]}", settings={"ignore_rows": burnin}) +corr = samples.corr(params) +cov = samples.cov(params) +std = samples.std(params) + + +latex_par_name = {} +latex_par_name["alpha_pa5_f090"] = r"\alpha_{pa5 f090}" +latex_par_name["alpha_pa6_f090"] = r"\alpha_{pa6 f090}" +latex_par_name["alpha_pa5_f150"] = r"\alpha_{pa5 f150}" +latex_par_name["alpha_pa6_f150"] = r"\alpha_{pa6 f150}" +latex_par_name["beta_pa5"] = r"\beta_{\rm pa5}" +latex_par_name["beta_pa6"] = r"\beta_{\rm pa6}" +latex_par_name["beta_ACT"] = r"\beta_{\rm ACT}" +latex_par_name["beta_ACT+komatsu"] = r"\beta_{\rm ACT + Komatsu}" +latex_par_name["beta_komatsu"] = r"\beta_{\rm Komatsu}" + + +mean, std = {}, {} +all_angles = {} + +plt.figure(figsize=(14,8)) +mean_ml_alpha = 0 +cov_ml_alpha = 0 +for par_name in params: + mean[par_name] = samples.mean(par_name) + std[par_name] = samples.std(par_name) + print(par_name, mean[par_name], std[par_name]) + x, gauss = gaussian(mean[par_name], std[par_name] ) + plt.plot(x, gauss, label=f"${latex_par_name[par_name]}$ = {mean[par_name]:.3f} $\pm$ {std[par_name]:.3f}") + + cov_ml_alpha += 1 / std[par_name]**2 + mean_ml_alpha += mean[par_name] / std[par_name] ** 2 + + all_angles[par_name, "mean"] = mean[par_name] + all_angles[par_name, "std"] = std[par_name] + +plt.xlim(-0.3, 0.8) +plt.xlabel(r"$\alpha$", fontsize=25) +plt.legend(fontsize=15) +plt.tight_layout() +plt.savefig(f"{result_dir}/alpha_ACT_{cut}.png") +plt.clf() +plt.close() + +cov_ml_alpha = 1 / cov_ml_alpha +mean_ml_alpha = cov_ml_alpha * mean_ml_alpha +std_ml_alpha = np.sqrt(cov_ml_alpha) + +all_angles["alpha_all", "mean"] = mean_ml_alpha +all_angles["alpha_all", "std"] = std_ml_alpha + + +cov_90 = 1 / (1 / std["alpha_pa5_f090"] ** 2 + 1 / std["alpha_pa6_f090"] ** 2) +mean_90 = cov_90 * (mean["alpha_pa5_f090"] / std["alpha_pa5_f090"] ** 2 + mean["alpha_pa6_f090"] / std["alpha_pa6_f090"] ** 2) +std_90 = np.sqrt(cov_90) + + +cov_150 = 1 / (1 / std["alpha_pa5_f150"] ** 2 + 1 / std["alpha_pa6_f150"] ** 2) +mean_150 = cov_150 * (mean["alpha_pa5_f150"] / std["alpha_pa5_f150"] ** 2 + mean["alpha_pa6_f150"] / std["alpha_pa6_f150"] ** 2) +std_150 = np.sqrt(cov_150) + + +all_angles["alpha_90", "mean"] = mean_90 +all_angles["alpha_90", "std"] = std_90 + +all_angles["alpha_150", "mean"] = mean_150 +all_angles["alpha_150", "std"] = std_150 + + + + +for c1, par_name1 in enumerate(params): + for c2, par_name2 in enumerate(params): + if c1 >= c2: continue + diff = (mean[par_name1] - mean[par_name2])/np.sqrt(std[par_name1] ** 2 + std[par_name2] ** 2) + print(f"{par_name1} - {par_name2}, {diff} sigma") + + +plt.figure(figsize=(14,8)) +mean_comb, std_comb = {}, {} +for det_waf in ["pa5", "pa6"]: + cov_comb = 1 / (1 / std[f"alpha_{det_waf}_f090"] ** 2 + 1 / std[f"alpha_{det_waf}_f150"] ** 2 ) + mean_comb[det_waf] = cov_comb * ( mean[f"alpha_{det_waf}_f090"] / std[f"alpha_{det_waf}_f090"] ** 2 + mean[f"alpha_{det_waf}_f150"] / std[f"alpha_{det_waf}_f150"] ** 2) + std_comb[det_waf] = np.sqrt(cov_comb) + print(det_waf, mean_comb[det_waf], std_comb[det_waf]) + std_comb[det_waf] = np.sqrt(std_comb[det_waf] ** 2 + syst_error ** 2) # add sys error + x, gauss = gaussian(mean_comb[det_waf], std_comb[det_waf] ) + + plt.plot(x, gauss, label=f"${latex_par_name[f'beta_{det_waf}']}$ = {mean_comb[det_waf]:.3f} $\pm$ {std_comb[det_waf]:.3f}", linestyle="--", alpha=0.5) + + all_angles[f"beta_{det_waf}", "mean"] = mean_comb[det_waf] + all_angles[f"beta_{det_waf}", "std"] = std_comb[det_waf] + + + +cov_ACT = 1 / ( 1 / std_comb["pa5"] ** 2 + 1 / std_comb["pa6"] ** 2 ) +mean_ACT = cov_ACT * (mean_comb["pa5"] / std_comb["pa5"] ** 2 + mean_comb["pa6"] / std_comb["pa6"] ** 2) +std_ACT = np.sqrt(cov_ACT) + + +x, gauss = gaussian(mean_ACT, std_ACT) +plt.plot(x, gauss, label=f"${latex_par_name[f'beta_ACT']}$ = {mean_ACT:.3f} $\pm$ {std_ACT:.3f}", color="gray") + +mean_komatsu = 0.342 # https://arxiv.org/abs/2205.13962 +std_komatsu = 0.094 + +cov_ACT_komatsu = 1 / ( 1 / std_ACT ** 2 + 1 / std_komatsu ** 2 ) +mean_ACT_komatsu = cov_ACT_komatsu * (mean_ACT / std_ACT ** 2 + mean_komatsu / std_komatsu ** 2) +std_ACT_komatsu = np.sqrt(cov_ACT_komatsu) + + +x, gauss = gaussian(mean_komatsu, std_komatsu) +plt.plot(x, gauss, label=f"${latex_par_name[f'beta_komatsu']}$ = {mean_komatsu:.3f} $\pm$ {std_komatsu:.3f}") + +x, gauss = gaussian(mean_ACT_komatsu, std_ACT_komatsu) +plt.plot(x, gauss, label=f"${latex_par_name[f'beta_ACT+komatsu']}$ = {mean_ACT_komatsu:.3f} $\pm$ {std_ACT_komatsu:.3f}") +plt.xlim(-0.3, 0.8) +plt.legend(fontsize=15) +plt.xlabel(r"$\beta$", fontsize=25) +plt.tight_layout() +plt.savefig(f"{result_dir}/beta_{cut}.png") +plt.clf() +plt.close() + + +all_angles[f"beta_ACT", "mean"] = mean_ACT +all_angles[f"beta_ACT", "std"] = std_ACT +all_angles[f"beta_komatsu", "mean"] = mean_komatsu +all_angles[f"beta_komatsu", "std"] = std_komatsu +all_angles[f"beta_ACT+komatsu", "mean"] = mean_ACT_komatsu +all_angles[f"beta_ACT+komatsu", "std"] = std_ACT_komatsu + + +with open(f"{result_dir}/angle_{cut}.pkl", "wb") as fp: + pickle.dump(all_angles, fp) + + diff --git a/project/data_analysis/python/paper_plots/results_BB_likelihood.py b/project/data_analysis/python/paper_plots/results_BB_likelihood.py deleted file mode 100644 index bf00a79c..00000000 --- a/project/data_analysis/python/paper_plots/results_BB_likelihood.py +++ /dev/null @@ -1,437 +0,0 @@ -""" -This script compute the amplitude of the BB power spectra of ACT DR6 using a dust prior from Planck 353 -""" - -from pspy import so_dict, pspy_utils, so_spectra, so_cov -from pspipe_utils import covariance, pspipe_list, log, best_fits, external_data -import numpy as np -import pylab as plt -import sys, os -import scipy.stats as ss -from getdist.mcsamples import loadMCSamples -import getdist.plots as gdplt -from cobaya.run import run -import scipy.stats as ss -from matplotlib import rcParams - - -rcParams["xtick.labelsize"] = 16 -rcParams["ytick.labelsize"] = 16 -rcParams["axes.labelsize"] = 20 -rcParams["axes.titlesize"] = 20 - - -def get_and_select_data_vec(spec_dir, - spec_tag, - spec_name_list, - spectra, - type, - indices): - """ get the selected data vector """ - - vec_xar = covariance.read_x_ar_spectra_vec(spec_dir, - spec_name_list, - spec_tag, - spectra_order = spectra, - type=type) - - sub_vec = vec_xar[indices] - - return sub_vec - -def get_and_select_model_vecs(cosmo_params, - accuracy_params, - fg_components, - fg_params, - fg_norm, - passbands, - type, - lmax, - binning_file, - spec_name_list, - spectra, - indices): - - """ get the selected model vector for CMB and fg """ - - log.info(f"Computing CMB spectra with cosmological parameters: {cosmo_params}") - l_th, ps_dict = pspy_utils.ps_from_params(cosmo_params, type, lmax + 500, **accuracy_params) - - log.info("Getting foregrounds contribution") - - fg_dict = best_fits.get_foreground_dict(l_th, passbands, fg_components, fg_params, fg_norm) - - vec_cmb, vec_fg = [], [] - plt.figure(figsize=(12,10)) - for spec in spectra: - lb, ps_b = pspy_utils.naive_binning(l_th, ps_dict[spec], binning_file, lmax) - - if spec == "BB": - plt.plot(l_th, ps_dict["BB"], color="gray") - - for spec_name in spec_name_list: - na, nb = spec_name.split("x") - fg = fg_dict["bb", "all", na, nb] - lb, fg_b = pspy_utils.naive_binning(l_th, fg, binning_file, lmax) - if (spec == "ET" or spec == "BT" or spec == "BE") & (na == nb): continue - vec_fg = np.append(vec_fg, fg_b) - vec_cmb = np.append(vec_cmb, ps_b) - - if (spec == "BB") & ("pa4" not in spec_name): - plt.plot(l_th, fg, label = spec_name) - plt.ylim(0, 0.15) - plt.xlim(0, 4000) - plt.ylabel(r"$D_{\ell}^{BB}$", fontsize=22) - plt.xlabel(r"$\ell$", fontsize=22) - plt.legend(fontsize=12) - plt.savefig(f"{BB_dir}/foreground.png", bbox_inches="tight") - plt.clf() - plt.close() - - - vec_fg_BB = vec_fg[indices] - vec_cmb_BB = vec_cmb[indices] - - return vec_fg_BB, vec_cmb_BB - -def get_ML_solution(vec_data_BB, vec_fg_BB, i_cov_BB, n_spec, n_bins, meta_bin_scheme): - """ this project all BB spectra into one with a sparser binning scheme defined by meta_bin_scheme""" - - def get_P_mat(n_spec, n_bins, meta_bin_scheme): - - new_n_bins = len(meta_bin_scheme) - sub_P_mat = np.zeros((n_bins, new_n_bins)) - for i in range(new_n_bins): - if i == 0: - not_zero_loc = np.arange(meta_bin_scheme[i]) - else: - not_zero_loc = np.arange(meta_bin_scheme[i]) + np.max(not_zero_loc) + 1 - sub_P_mat[not_zero_loc, i] = 1 - - full_P_mat = np.zeros((n_bins * n_spec, new_n_bins)) - for i_spec in range(n_spec): - full_P_mat[i_spec * n_bins: (i_spec + 1) * n_bins, :] = sub_P_mat - - return full_P_mat - - full_P_mat = get_P_mat(n_spec, n_bins, meta_bin_scheme) - - plt.figure(figsize=(12, 12)) - plt.imshow(full_P_mat) - plt.gca().set_aspect(0.02) - plt.savefig(f"{BB_dir}/P_mat.png", dpi=300, bbox_inches="tight") - plt.clf() - plt.close() - - cov_ML_BB = covariance.get_max_likelihood_cov(full_P_mat, - i_cov_BB, - force_sim = True, - check_pos_def = True) - - plt.figure(figsize=(12,8)) - plt.imshow(so_cov.cov2corr(cov_ML_BB, remove_diag=True)) - plt.colorbar() - plt.savefig(f"{BB_dir}/correlation.png", dpi=300, bbox_inches="tight") - plt.clf() - plt.close() - - vec_ML_BB = covariance.max_likelihood_spectra(cov_ML_BB, - i_cov_BB, - full_P_mat, - vec_data_BB - vec_fg_BB) - - lb_vec = np.tile(lb, n_spec) - lb_ML_BB = covariance.max_likelihood_spectra(cov_ML_BB, - i_cov_BB, - full_P_mat, - lb_vec) - - return lb_ML_BB, vec_ML_BB, cov_ML_BB - -def loglike(a_BB_cmb, a_BB_dust): - """ compute the loglike according to the parameters """ - - theory = a_BB_cmb * vec_cmb_BB + a_BB_dust * vec_fg_BB_template - residual = vec_data_BB - theory - chi2 = residual @ i_cov_BB @ residual - - return -0.5 * chi2 - -def mcmc(mean_dust, std_dust, Rminus1_stop, Rminus1_cl_stop): - - """ the MCMC on the BB amplitude, mean_dust and std_dust are priors from Planck BB 353 GHz """ - - info = {"likelihood": {"BB": loglike}, - "params": { - "a_BB_cmb": {"prior": {"min": 0., "max": 2}, "latex": r"A_{BB}^{CMB}"}, - "a_BB_dust": {"prior": {"dist": "norm", "loc": mean_dust,"scale": std_dust}, "latex": r"A_{BB}^{dust}"}}, - "sampler": {"mcmc": {"max_tries": 10**6, "Rminus1_stop": Rminus1_stop, "Rminus1_cl_stop": Rminus1_cl_stop}}, - "resume": False, - "output": f"{BB_dir}/mcmc/chain", "force": True} - - updated_info, sampler = run(info) - - samples = sampler.products(to_getdist=True, skip_samples=0.5)["sample"] - - return samples - - -d = so_dict.so_dict() -d.read_from_file(sys.argv[1]) -log = log.get_logger(**d) - -cov_dir = "covariances" -spec_dir = "spectra_leak_corr" -sim_spec_dir = "sim_spectra" -BB_dir = "results_BB" - -pspy_utils.create_directory(f"{BB_dir}/mcmc") - - -surveys = d["surveys"] -type = d["type"] -lmax = d["lmax"] -binning_file = d["binning_file"] -cosmo_params = d["cosmo_params"] -accuracy_params = d["accuracy_params"] - - -fg_norm = d["fg_norm"] -fg_params = d["fg_params"] -fg_components = d["fg_components"] -do_bandpass_integration = d["do_bandpass_integration"] -spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"] -lmin = 475 - -l_th, ps_dict = pspy_utils.ps_from_params(cosmo_params, type, lmax + 500, **accuracy_params) - -bin_lo, bin_hi, lb, bin_size = pspy_utils.read_binning_file(binning_file, lmax) -spec_name_list, nu_tag_list = pspipe_list.get_spec_name_list(d, delimiter="_", return_nu_tag=True) - - -passbands = {} -if do_bandpass_integration: - log.info("Doing bandpass integration") - -narrays, sv_list, ar_list = pspipe_list.get_arrays_list(d) -for sv, ar in zip(sv_list, ar_list): - - freq_info = d[f"freq_info_{sv}_{ar}"] - if do_bandpass_integration: - nu_ghz, pb = np.loadtxt(freq_info["passband"]).T - else: - nu_ghz, pb = np.array([freq_info["freq_tag"]]), np.array([1.]) - passbands[f"{sv}_{ar}"] = [nu_ghz, pb] - -################################################################################################ -# Start at l=500, remove pa4_f220 pol and only include TB/BT - -selected_spectra = ["BB"] -spectra_cuts = { - "dr6_pa4_f220": dict(T=[lmax, lmax], P=[lmax, lmax]), - "dr6_pa5_f150": dict(T=[lmin, lmax], P=[lmin, lmax]), - "dr6_pa6_f150": dict(T=[lmin, lmax], P=[lmin, lmax]), - "dr6_pa5_f090": dict(T=[lmin, lmax], P=[lmin, lmax]), - "dr6_pa6_f090": dict(T=[lmin, lmax], P=[lmin, lmax]), -} - -only_TT_map_set = ["dr6_pa4_f220"] - -bin_out_dict, indices = covariance.get_indices(bin_lo, - bin_hi, - lb, - spec_name_list, - spectra_cuts=spectra_cuts, - spectra_order=spectra, - selected_spectra=selected_spectra, - only_TT_map_set=only_TT_map_set) - -my_spectra = bin_out_dict.keys() -################################################################################################ - -n_spec = len(my_spectra) - -id = np.where(bin_lo>lmin) -bin_lo, bin_hi, lb, bin_size = bin_lo[id], bin_hi[id], lb[id], bin_size[id] -n_bins = len(bin_hi) -print(n_bins) - - -cov_xar = np.load(f"{cov_dir}/x_ar_final_cov_data.npy") - -cov_BB = cov_xar[np.ix_(indices, indices)] - -plt.figure(figsize=(12,8)) -plt.imshow(so_cov.cov2corr(cov_BB, remove_diag=True)) -plt.colorbar() -plt.savefig(f"{BB_dir}/full_correlation.png", dpi=300, bbox_inches="tight") -plt.clf() -plt.close() - -i_cov_BB = np.linalg.inv(cov_BB) - -vec_fg_BB, vec_cmb_BB = get_and_select_model_vecs(cosmo_params, - accuracy_params, - fg_components, - fg_params, - fg_norm, - passbands, - type, - lmax, - binning_file, - spec_name_list, - spectra, - indices) - -# create a fg template that A_BB_dust multiply -vec_fg_BB_template = vec_fg_BB / fg_params["a_gbb"] - -# we will rebin according the this scheme -#(the entry of the list correspond to the number of old bin - -meta_bin_scheme = [3, 3, 3, 3, 3, 4, 4, 11, 15] -print(np.sum(meta_bin_scheme), n_bins) -assert(np.sum(meta_bin_scheme) == n_bins) - -sim = False -if sim == True: - n_sims = 100 - mean_dust, std_dust = 0.114, 0.0084 #prior on sim gal amplitude - - a_BB_cmb_list, vec_ml_BB_list = [], [] - cov_mc = 0 - - for iii in range(n_sims): - vec_data_BB = get_and_select_data_vec(sim_spec_dir, f"cross_{iii:05d}", spec_name_list, spectra, type, indices) - samples = mcmc(mean_dust, std_dust, Rminus1_stop=0.03, Rminus1_cl_stop=0.03) - - a_BB_cmb_list += [samples.mean("a_BB_cmb")] - - lb_ml_BB, vec_ml_BB, cov_ml_BB = get_ML_solution(vec_data_BB, vec_fg_BB, i_cov_BB, n_spec, n_bins, meta_bin_scheme) - vec_ml_BB_list += [vec_ml_BB] - - if iii == 0: - gdplot = gdplt.get_subplot_plotter() - gdplot.triangle_plot(samples, ["a_BB_cmb", "a_BB_dust"], filled=True, title_limit=1) - plt.savefig(f"{BB_dir}/posterior_sim_BB.png", dpi=300, bbox_inches="tight") - plt.clf() - plt.close() - - - cov_mc += np.outer(vec_ml_BB, vec_ml_BB) - - mean, std = np.mean(a_BB_cmb_list, axis=0), np.std(a_BB_cmb_list, axis=0) - mean_vec, std_vec = np.mean(vec_ml_BB_list, axis=0), np.std(vec_ml_BB_list, axis=0) - - print("mean", mean, "std", std, "std MC", np.sqrt(samples.cov(["a_BB_cmb"])[0, 0]), std/np.sqrt(n_sims)) - - - cov_mc = cov_mc/n_sims - np.outer(mean_vec, mean_vec) - - plt.figure(figsize=(12,8)) - plt.plot(lb_ml_BB, np.sqrt(cov_ml_BB.diagonal())) - plt.plot(lb_ml_BB, std_vec) - plt.savefig(f"{BB_dir}/error_comp.png", dpi=300, bbox_inches="tight") - plt.clf() - plt.close() - - plt.figure(figsize=(12,8)) - plt.imshow(so_cov.cov2corr(cov_mc, remove_diag=True)) - plt.colorbar() - plt.savefig(f"{BB_dir}/mc_correlation.png", dpi=300, bbox_inches="tight") - plt.clf() - plt.close() - - plt.figure(figsize=(12,8)) - plt.ylim(-0.1, 0.25) - plt.xlim(0, 4000) - plt.plot(l_th, ps_dict["BB"]) - plt.errorbar(lb_ml_BB, mean_vec, std_vec) - plt.ylabel(r"$D_{\ell}^{BB}$", fontsize=22) - plt.xlabel(r"$\ell$", fontsize=22) - plt.savefig(f"{BB_dir}/combined_BB_sim.png", dpi=300, bbox_inches="tight") - plt.clf() - plt.close() - -else: - mean_dust, std_dust = 0.113, 0.0084 #prior on data gal amplitude - - vec_data_BB = get_and_select_data_vec(spec_dir, f"cross", spec_name_list, spectra, type, indices) - samples = mcmc(mean_dust, std_dust, Rminus1_stop=0.01, Rminus1_cl_stop=0.01) - gdplot = gdplt.get_subplot_plotter() - gdplot.triangle_plot(samples, ["a_BB_cmb", "a_BB_dust"], filled=True, title_limit=1) - plt.savefig(f"{BB_dir}/posterior_BB.png", dpi=300, bbox_inches="tight") - plt.clf() - plt.close() - - - chi2 = -2 * loglike(samples.mean("a_BB_cmb"), samples.mean("a_BB_dust")) - ndof = n_spec * n_bins - 1 - pte = 1 - ss.chi2(ndof).cdf(chi2) - print(f"chi2: {chi2}, ndof: {ndof}, pte: {pte}") - - lb_ml_BB, vec_ml_BB, cov_ml_BB = get_ML_solution(vec_data_BB, vec_fg_BB, i_cov_BB, n_spec, n_bins, meta_bin_scheme) - - - add_BK = True - add_sptpol = True - - plt.figure(figsize=(12,8)) - plt.plot(l_th, ps_dict["BB"], color="gray") - plt.ylim(-0.1, 0.25) - plt.xlim(30, 4000) - plt.errorbar(lb_ml_BB, vec_ml_BB, np.sqrt(np.diagonal(cov_ml_BB)), fmt="o", color="red", label="ACT DR6") - if add_BK: - plt.semilogx() - lb_bicep, Db_bicep, err_bicep = external_data.get_bicep_BB_spectrum() - plt.errorbar(lb_bicep, Db_bicep, err_bicep, fmt="o", color="blue", label="BICEP/Keck (2021)") - if add_sptpol: - lb_sptpol, Db_sptpol, err_sptpol = external_data.get_sptpol_BB_spectrum() - plt.errorbar(lb_sptpol, Db_sptpol, err_sptpol, fmt="o", color="orange", label="SPTpol (2020)") - - - plt.plot(l_th, ps_dict["BB"] * 0, linestyle="--", color="black") - plt.legend(fontsize=16) - plt.ylabel(r"$D_{\ell}^{BB}$", fontsize=22) - plt.xlabel(r"$\ell$", fontsize=22) - plt.savefig(f"{BB_dir}/combined_BB.png", dpi=300, bbox_inches="tight") - plt.clf() - plt.close() - - - plt.figure(figsize=(12,8)) - plt.plot(l_th, ps_dict["EE"] * 1/100, label="1% EE", linestyle="--", alpha=0.5) - plt.plot(l_th, ps_dict["BB"], label="BB", color="gray") - plt.ylim(-0.1, 0.5) - plt.xlim(0, 4000) - plt.errorbar(lb_ml_BB, vec_ml_BB, np.sqrt(np.diagonal(cov_ml_BB)), fmt="o", label="BB ACT DR6", color="red") - plt.plot(l_th, ps_dict["BB"] * 0, linestyle="--", color="black") - plt.legend(fontsize=16) - plt.ylabel(r"$D_{\ell}$", fontsize=22) - plt.xlabel(r"$\ell$", fontsize=22) - plt.savefig(f"{BB_dir}/combined_BB_with_EE.png", dpi=300, bbox_inches="tight") - plt.clf() - plt.close() - - # also plot individual spectra for the data - for my_spec in bin_out_dict.keys(): - s_name, spectrum = my_spec - id, lb = bin_out_dict[my_spec] - lb_, Db = so_spectra.read_ps(f"{spec_dir}/Dl_{s_name}_cross.dat", spectra=spectra) - - plt.figure(figsize=(12,8)) - plt.title(f"{my_spec}, min={np.min(lb)}, max={np.max(lb)}") - plt.plot(lb_, Db[spectrum], label="original spectrum") - plt.errorbar(lb, vec_data_BB[id], np.sqrt(cov_BB[np.ix_(id,id)].diagonal()), fmt=".", label="selected spectrum") - plt.plot(lb, vec_fg_BB[id] + vec_cmb_BB[id], "--", color="gray",alpha=0.3, label="theory") - plt.ylim(-1, 1) - plt.legend() - plt.savefig(f"{BB_dir}/{spectrum}_{s_name}.png", bbox_inches="tight") - plt.clf() - plt.close() - - res = vec_data_BB[id] - vec_fg_BB[id] - vec_cmb_BB[id] - chi2 = res @ np.linalg.inv(cov_BB[np.ix_(id, id)]) @ res - ndof = len(lb) - 1 - pte = 1 - ss.chi2(ndof).cdf(chi2) - print(my_spec, f"chi2: {chi2}, ndof: {ndof}, pte: {pte}")