from numpy import log, array, matrix,sqrt from scipy.stats import norm,uniform from scipy.optimize import fmin ''' PROBIT MODEL USING MLE ''' # DGP n = 100 x=norm.rvs(size=(n,1)) z=uniform.rvs(size=(n,1)) ys=x+z+norm.rvs(size=(n,1)) y=ys>0 # DGP ENDS # Negative of the Log-Likelihood def lik(theta): b1 = theta[0] b2 = theta[1] loglik = log(norm.cdf(x*b1+z*b2))*y + log(1-norm.cdf((x*b1+z*b2)))*(1-y) return -loglik.sum() # Gradient matrix def glik(theta): b1 = theta[0] b2 = theta[1] xmat = array((x,z)).T f1=(xmat*norm.pdf(x*b1+z*b2)/norm.cdf(x*b1+z*b2))*y f2=(-xmat*norm.pdf(x*b1+z*b2)/norm.cdf(x*b1+z*b2))*(1-y) return matrix(f1+f2) # ESTIMATION b0=[0.5,0.5] thetaml= fmin(lik,b0) gR = glik(thetaml) # Gradient varcov = (gR.T*gR).I # VarCov Matrix via OPG SE = sqrt(varcov.diagonal()) # Standard Errors print 'Coefficients',thetaml print 'Std. Errors', SE