Problem with camb.correlations.cl2corr function
Posted: March 07 2025
Helo everyone, I'm having some problems getting the correlation function of the [math] power spectrum, I'm plotting the theoreticla best fit for the TT power spectrum and I'm plotting the results when using cosmological parameters that are used in CAMB, and I'm not getting the excat same curve as you can see in the image (second row).
The problem is that despite having the same power spectrum I don't get the same correlation function.

Here is my code:
To be able to run the code you will need to download the following data from PLA:
Thnaks for reading this far
EDIT: I have made my own correlation function that is slower, but I can understand what I'm doing and compared them:

Edit 2:
I think that the differences in the correlation function arise due to small differences in the [math] that are enlarged by the high multipoles since Legendre polynomials of high order are really large number, so the correlation function is enhancing the differences of the [math]
The problem is that despite having the same power spectrum I don't get the same correlation function.

Here is my code:
To be able to run the code you will need to download the following data from PLA:
- The unbinned power spectrum The Full grid of cosmological parameters (or at least a chain containing .minimum and .minimum.theory_cl files)
Code: Select all
wget -O COM_PowerSpect_CMB-EE-full_R3.01.txt "http://pla.esac.esa.int/pla/aio/product-action?COSMOLOGY.FILE_ID=COM_PowerSpect_CMB-EE-full_R3.01.txt"
Code: Select all
wget -O COM_CosmoParams_fullGrid_R3.01.zip "http://pla.esac.esa.int/pla/aio/product-action?COSMOLOGY.FILE_ID=COM_CosmoParams_fullGrid_R3.01.zip"
Code: Select all
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import camb
import numpy as np
import pandas as pd
import scipy
unbin_cl = pd.read_csv('maps/COM_PowerSpect_CMB-TT-full_R3.01.txt', sep=',')
unbin_cl.head()
unbin_cl.columns = ['ell', 'D_ell', '-dD_ell', '+dD_ell']
unbin_cl.head()
def solidang(cor,theta):
from scipy.integrate import simpson
# Calcular sin(theta)
sin_theta = np.sin(theta)
integrand = cor[:,0] * sin_theta
# Integrar usando la regla del trapecio
integral = simpson(integrand, theta)
# Multiplicar por 2*pi para obtener la integral sobre el ángulo sólido
integral_solido = 2 * np.pi * integral
return round(integral_solido,3)
def clcorr(cls, c= False):
import camb.correlations
xvals = np.linspace(-0.999,0.999,1000)
theta=np.arccos(xvals)*180/np.pi
if c:
corrs = camb.correlations.cl2corr(cls,xvals)
inte= solidang(corrs,theta)
return corrs[:,0], theta, inte
else:
clstot = np.tile(np.array(cls).reshape(-1, 1), 4)
corrs = camb.correlations.cl2corr(clstot,xvals)
inte= solidang(corrs, theta)
return corrs[:,0], theta, inte
def clandcorrplotbase(file, expdata=unbin_cl):
from scipy.stats import chisquare
data = pd.read_csv(f'COM_CosmoParams_fullGrid_R3.01/base/{file}/base_{file}.minimum.theory_cl', sep=r'\s+', skiprows=1, header=None)
data.columns = ["L", "TT", "TE", "EE", "BB", "PP"]
#print(data.head())
# Define the list of parameter names to keep
params_to_keep = {"omegabh2", "omegach2", "H0", "tau", "omegak", "logA", "ns",
"mnu", "w", "wa", "nnu", "yhe", "Alens", "nrun"}
# Read the file while skipping LaTeX notation at the end
df = pd.read_csv(f'COM_CosmoParams_fullGrid_R3.01/base/{file}/base_{file}.minimum', sep=r"\s+", header=None, usecols=[1, 2], names=["value", "param"])
# Filter rows where the 'param' column matches the parameters we want
df_filtered = df[df["param"].isin(params_to_keep)]
# Display the filtered dataframe
df_filtered['value']=df_filtered['value'].astype(float)
dic = {}
for i, j in zip(df_filtered['param'],df_filtered['value']):
dic[i]= j
#print(dic)
pars = camb.set_params(ombh2=dic['omegabh2'], omch2=dic['omegach2'], H0 = dic['H0'],omk=dic['omegak'],
YHe=dic['yhe'], nnu=dic['nnu'], nrun=dic['nrun'], Alens=dic['Alens'], ns=dic['ns'], As=np.exp(dic['logA'])*1e-10,w=dic['w'],wa=dic['wa'], mnu=dic['mnu'], tau=dic['tau'])
#res = camb.get_background(pars, no_thermo=True)
resu = camb.get_results(pars)
cls = resu.get_cmb_power_spectra(pars, CMB_unit='muK',lmax=max(data['L']))
totCL=cls['total']
ell = expdata['ell'][:len(data['L'])]
D_ell = expdata['D_ell'][:len(data['L'])]
errors = (expdata['-dD_ell'][:len(data['L'])],expdata['+dD_ell'][:len(data['L'])])
fig, axes = plt.subplots(6, 2, sharex='col', gridspec_kw={'height_ratios': [3, 1,3, 1,3, 1]})
fig.subplots_adjust(hspace=0)
fig.set_size_inches(60,15)
# Experimental vs theory cl
expected_th = data['TT'] * (D_ell.sum() / data['TT'].sum())
mask = expected_th != 0
D_ell_filtered = D_ell[mask]
expected_th_filtered = expected_th[mask]
# Normalize expected values to match observed values' sum
expected_th_filtered *= D_ell_filtered.sum() / expected_th_filtered.sum()
# Compute chi-square
chi_th, _ = chisquare(D_ell_filtered, expected_th_filtered)
axes[0,0].scatter(ell,data['TT'], marker="+", c='black', s=5, label='Best fit')
axes[0,0].errorbar(ell, D_ell, yerr=errors, fmt='o', color='red', ecolor=colors.to_rgba('blue',0.2), elinewidth=1, capsize=2, markersize=2, label=rf"Planck data $\chi_n^2={round(chi_th,3)/len(D_ell[mask])}$")
axes[0,0].set_ylabel(r'$D_\ell^{TT} \, [\mu K^2]$')
# Bottom subplot (Residuals)
delta_D_ell = np.array(D_ell) - np.array(data['TT'])
axes[1,0].errorbar(data['L'], delta_D_ell, yerr=errors, fmt='o', color='green', ecolor=colors.to_rgba('blue',0.2), elinewidth=1, capsize=2, markersize=2, label="Residuals")
axes[1,0].set_ylabel(r'$\Delta D_\ell^{TT}$')
axes[0,0].title.set_text("Power Spectrum of TT from Planck Archive Unbinned")
tcor, theta ,_= clcorr(data[['TT','TE','EE','BB']],True)
expcor, theta ,_ = clcorr(D_ell)
axes[0,1].scatter(theta,tcor, marker="+", c='black', s=2, label='Derived from best fit')
axes[0,1].scatter(theta,expcor, marker="o", c='red', s=2, label='Derived from Planck data')
axes[0,1].title.set_text("Correlation function")
delta_corr=np.array(expcor) - np.array(tcor)
axes[1,1].scatter(theta,delta_corr,marker="^", c='green', s=2, label="Residuals")
axes[1,1].set_ylabel(r'$\Delta \xi (\theta)$')
axes[0,1].set_ylabel(r'$\xi (\theta)$')
# Theory vs CAMb
axes[2,0].scatter(ell,data['TT'], marker="+", c='black', s=5, label='Best fit')
axes[2,0].scatter(ell,totCL[:,0][2:], marker="o", c='red', s=5, label='CAMB results')
axes[2,0].set_ylabel(r'$D_\ell^{TT} \, [\mu K^2]$')
# Bottom subplot (Residuals)
delta_D_ell = np.array(totCL[:,0][2:]) - np.array(data['TT'])
axes[3,0].errorbar(data['L'], delta_D_ell, yerr=errors, fmt='o', color='green', ecolor=colors.to_rgba('blue',0.2), elinewidth=1, capsize=2, markersize=2, label="Residuals")
axes[3,0].set_ylabel(r'$\Delta D_\ell^{TT}$')
axes[3,0].set_xlabel(r'$\ell$')
tcor, theta , inte_bf= clcorr(data[['TT','TE','EE','BB']],True)
cambcor, theta, inte_camb = clcorr(totCL,True)
axes[2,1].scatter(theta,tcor, marker="+", c='black', s=2, label='Derived from best fit')
axes[2,1].scatter(theta,cambcor, marker="o", c='red', s=2, label='Derived from CAMB')
delta_corr=np.array(cambcor) - np.array(tcor)
axes[3,1].scatter(theta,delta_corr,marker="^", c='green', s=2, label="Residuals")
axes[3,1].set_ylabel(r'$\Delta \xi (\theta)$')
axes[2,1].set_ylabel(r'$\xi (\theta)$')
# CAMB vs experimental
expected_camb = totCL[:,0][2:] * (D_ell.sum() / totCL[:,0][2:].sum())
# Apply mask
mask2 = expected_camb != 0
D_ell_filtered = D_ell[mask2]
expected_camb_filtered = expected_camb[mask2]
# Normalize expected values to match observed values' sum
expected_camb_filtered *= D_ell_filtered.sum() / expected_camb_filtered.sum()
# Compute chi-square
chi_camb, _ = chisquare(D_ell_filtered, expected_camb_filtered)
print(f"Excluding {len(D_ell)-len(D_ell[mask])} data points")
print(f"Excluding {len(D_ell)-len(D_ell[mask2])} data points")
axes[4,0].scatter(ell,totCL[:,0][2:], marker="o", c='black', s=5, label='CAMB results')
axes[4,0].errorbar(ell, D_ell, yerr=errors, fmt='o', color='red', ecolor=colors.to_rgba('blue',0.2), elinewidth=1, capsize=2, markersize=2, label=rf"Planck data $\chi_n^2={round(chi_camb,3)/len(D_ell[mask2])}$")
axes[4,0].set_ylabel(r'$D_\ell^{TT} \, [\mu K^2]$')
# Bottom subplot (Residuals)
delta_D_ell = np.array(D_ell) - np.array(totCL[:,0][2:])
axes[5,0].errorbar(data['L'], delta_D_ell, yerr=errors, fmt='o', color='green', ecolor=colors.to_rgba('blue',0.2), elinewidth=1, capsize=2, markersize=2, label="Residuals")
axes[5,0].set_ylabel(r'$\Delta D_\ell^{TT}$')
axes[5,0].set_xlabel(r'$\ell$')
expcor, theta , inte_exp = clcorr(D_ell)
cambcor, theta,_ = clcorr(totCL,True)
axes[4,1].scatter(theta,expcor, marker="+", c='red', s=2, label='Derived from Plancks Data')
axes[4,1].scatter(theta,cambcor, marker="o", c='black', s=2, label='Derived from CAMB')
delta_corr=np.array(expcor) - np.array(cambcor)
axes[5,1].scatter(theta,delta_corr,marker="^", c='green', s=2, label="Residuals")
axes[5,1].set_xlabel(r'$\theta [^\circ]$')
axes[5,1].set_ylabel(r'$\Delta \xi (\theta)$')
axes[4,1].set_ylabel(r'$\xi (\theta)$')
fig.text(0.51, 0.82, "Experimenatl VS Bestfit\n" + r"$\int \xi(\theta)_{exp}d\Omega =$" + f"{inte_exp}",
ha='center', fontsize=12, fontweight='bold')
fig.text(0.51, 0.52, "CAMB Best VS Bestfit\n" + r"$\int \xi(\theta)_{bf}d\Omega =$" + f"{inte_bf}",
ha='center', fontsize=12, fontweight='bold')
fig.text(0.51, 0.22, "Experimenatl VS CAMB Best\n" + r"$\int \xi(\theta)_{CAMB}d\Omega =$" + f"{inte_camb}",
ha='center', fontsize=12, fontweight='bold')
title = f"Comparision of plancks result for the chain {file}\n"
for i in params_to_keep:
title+=f"{i}: {dic[i]} "
fig.suptitle(title)
for ax in axes[:,0]:
ax.axvline(x=20, color='black', linestyle='--', linewidth=0.5)
ax.set_yscale('linear')
ax.grid(True, which='both', linestyle='--', linewidth=0.5)
ax.legend(loc='upper right')
ax.set_xscale('log')
for ax in axes[:,1]:
ax.set_xscale('linear')
ax.set_xlim(0,max(theta)+0.5)
ax.set_yscale('linear')
ax.grid(True, which='both', linestyle='--', linewidth=0.5)
ax.legend(loc='upper right')
plt.legend()
plt.savefig(f"Comp_Graphs/base/{file}.png")
plt.close()
EDIT: I have made my own correlation function that is slower, but I can understand what I'm doing and compared them:
Code: Select all
from scipy.special import eval_legendre
import numpy as np
def clcorr(dls, c= False):
from scipy.special import eval_legendre
xvals = np.linspace(-0.999,0.999,1000)
theta=np.arccos(xvals)*180/np.pi
if c:
cls=dls[:,0]
lmax=len(cls)
corr= np.zeros(dtype=np.float64, shape=xvals.shape)
ell = np.linspace(2,lmax+2,lmax)
cl=cls*2*np.pi*1/(ell*(ell+1))
for i ,j in enumerate(cl):
deg=i+2
P_n = eval_legendre(deg, xvals)
corr += (2*deg+1)*j*P_n
corrs = 1/(4*np.pi)*corr
inte= solidang(corrs,theta)
return corrs, theta, inte
else:
cls=dls
lmax=len(cls)
corr= np.zeros(dtype=np.float64, shape=xvals.shape)
ell = np.linspace(2,lmax+2,lmax)
cl=cls*2*np.pi*1/(ell*(ell+1))
for i ,j in enumerate(cl):
deg=i+2
P_n = eval_legendre(deg, xvals)
corr += (2*deg+1)*j*P_n
corrs = 1/(4*np.pi)*corr
inte= solidang(corrs, theta)
return corrs, theta, inte
# Modified data loading with safety checks
ell = unbin_cl['ell'][:2500] # Reduced from 2500 to 1000
D_ell = unbin_cl['D_ell'][:2500]
xvals = np.linspace(-0.999,0.999,1000) # Avoid extreme values close to ±1
def clcorrcamb(cls, c= False):
import camb.correlations
xvals = np.linspace(-0.999,0.999,1000)
theta=np.arccos(xvals)*180/np.pi
if c:
corrs = camb.correlations.cl2corr(cls,xvals)
inte= solidang(corrs[:,0],theta)
return corrs[:,0], theta, inte
else:
clstot = np.tile(np.array(cls).reshape(-1, 1), 4)
corrs = camb.correlations.cl2corr(clstot,xvals)
inte= solidang(corrs[:,0], theta)
return corrs[:,0], theta, inte
# Plotting with safety limits
result1,_,_ = clcorr(D_ell)
valid_mask1 = np.isfinite(result1)
result2,_,_ = clcorrcamb(D_ell)
valid_mask2 = np.isfinite(result2)
plt.plot(np.arccos(xvals[valid_mask1])*180/np.pi, result1[valid_mask1], label="Own")
plt.plot(np.arccos(xvals[valid_mask2])*180/np.pi, result2[valid_mask2], label="Camb")
#plt.ylim(-1e50, 1e50) # Adjust based on your expected range
plt.legend()
plt.show()

Edit 2:
I think that the differences in the correlation function arise due to small differences in the [math] that are enlarged by the high multipoles since Legendre polynomials of high order are really large number, so the correlation function is enhancing the differences of the [math]