'How to adapt emcee to work with odeint numerical solution

I'm trying to constrain the function with the use of MCMC methods (emcee) without the analytical form of this function. I'm using the odeint to obtain the function. Everything works fine, but MCMC does not want to constrain my model. Maybe someone could help? That is the code:

import numpy as np
import matplotlib.pyplot as plt
import emcee
import corner
import scipy.integrate as integrate

import scipy.special as special

z=np.array([0.070, 0.090, 0.120, 0.170, 0.1791, 0.1993, 0.200, 0.24, 0.270, 0.280, 0.30, 0.31, 0.34, 0.35, 0.3519, 0.36, 0.38, 0.3802, 0.400, 0.40, 0.4004, 0.4247, 0.43, 0.44, 0.44, 0.4497, 0.4783, 0.480, 0.48, 0.51, 0.52, 0.56, 0.57, 0.57, 0.59, 0.593, 0.60, 0.61, 0.64, 0.6797, 0.73, 0.7812, 0.8754, 0.880, 0.900, 1.037, 1.300, 1.363, 1.430, 1.530, 1.750, 1.965, 2.30, 2.33, 2.34, 2.36])
Hobs=np.array([69, 69, 68.6, 83, 75, 75, 72.9, 79.69, 77, 88.8, 81.7, 78.18, 83.8, 82.7, 83, 79.94, 81.5, 83, 95, 82.04, 77, 87.1, 86.45, 82.6, 84.81, 92.8, 80, 97, 87.79, 90.4, 94.35, 93.34, 87.6, 96.8, 98.48, 104, 87.9, 97.3, 98.82, 92, 97.3, 105, 125, 90, 117, 154, 168, 160, 177, 140, 202, 186.5, 224, 224, 222, 226])
sigma=np.array([19.6, 12, 26.2, 8, 4, 5, 29.6, 2.99, 14, 36.6, 6.22, 4.74, 3.66, 9.1, 14, 3.38, 1.9, 13.5, 17, 02.03, 10.2, 11.2, 3.97, 7.8, 1.83, 12.9, 99, 62, 02.03, 1.9, 2.64, 2.3, 7.8, 3.4, 3.18, 13, 6.1, 2.1, 2.98, 8, 7.0, 12, 17, 40, 23, 20, 17, 33.6, 18, 14, 40, 50.4, 8.6, 8, 8.5, 9.3])
#Hubble parameter
from scipy.integrate import odeint
H0=63.2

alpha_true=-0.0166
n_true=0.974
gamma_true=1.36
zeta0_true=0.65
zeta1_true=0

a_true = 1/(1+z)

def model(H,a,alpha,n,gamma,zeta0,zeta1):
    dHdz=-((2**(-1 - n)*3**(1 - n)*H*(6*zeta0*H + 6*zeta1*H**2 - 6**n*alpha*gamma*(H**2)**n + 2**(1 + n)*3**n*n*alpha*gamma*(H**2)**n))/((a)*n*alpha*(H**2)**n))
    return dHdz


def Hz_th(theta,a):
    alpha,n,gamma,zeta0,zeta1=theta
    return odeint(model, H0, a,args=(alpha,n,gamma,zeta0,zeta1))

def chi_squared_H(theta,z):
    alpha,n,gamma,zeta0,zeta1=theta
    return np.sum(((Hz_th(theta,a)-Hobs)**2)/(sigma**2))

def lnlike(theta,z,obs,sigma):
    alpha,n,gamma,zeta0,zeta1=theta
    return np.log(np.exp(-(chi_squared_H(theta,z))/2))

def lnprior(theta):
    alpha,n,gamma,zeta0,zeta1=theta
    if (-0.1<alpha<-0.01 and 0.97<n<2 and 1<gamma<2 and 0<zeta0<0.75 and 0<zeta1<0.75):
        return (0.0)
    return (-np.inf)

def log_probability(theta, z, Hobs, sigma):
    alpha,n,gamma,zeta0,zeta1=theta
    lp = lnprior(theta)
    if not np.isfinite(lp):
        return -np.inf
    return lp + lnlike(theta,  z, Hobs, sigma)

import scipy.optimize as op
nll = lambda *args: -lnlike(*args)
result = op.minimize(nll, [alpha_true,n_true,gamma_true,zeta0_true,zeta1_true], args=(z, Hobs, sigma))

alpha_ml,n_ml,gamma_ml,zeta0_ml,zeta1_ml= result["x"]

ndim, nwalkers =5,50
from tqdm import tqdm
pos = [result["x"] + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]


sampler = emcee.EnsembleSampler(
    nwalkers, ndim, log_probability, args=(z, Hobs, sigma))

sampler.run_mcmc(pos, 1000, progress=True);

flat_samples = sampler.get_chain(discard=1, flat=True)

import corner

corner.corner(flat_samples, labels=[r"$a$", "$n$", "$gamma$", "$zeta0$", "$zeta1$"],levels=(1 - np.exp(-0.5), 1 - np.exp(-2), 1 - np.exp(-9 / 2.)),quantiles_kwargs={"color":"black"},
                    show_titles=True, title_kwargs={"fontsize": 14},plot_datapoints=False,smooth=0.9,    plot_density=False,
    fill_contours=True,
    max_n_ticks=5,truth_color='black',color='tab:red',title_fmt=".4",hist_kwargs={"color":"black"})
plt.tight_layout()
plt.savefig('corner1.pdf',bbox_inches='tight')

I tested priors, they are fine, truths are very close to the actual best fit.



Sources

This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.

Source: Stack Overflow

Solution Source