# NLS ESTIMATION BY MINIMIZING RSS from __future__ import division import numpy as np from scipy.optimize import fmin np.random.seed(1234) # DGP n =100 x = np.random.uniform(size=(n,1)) eps = np.random.standard_normal(size=(n,1)) beta = [2, 2, 2] y = beta[0]+beta[1]*x**beta[2]+eps # DGP ENDS # ESTIMATION # Starting values beta0=np.array([3, 3, 3]) # FUNCTIONS # RSS for the Nonlinear model def ssr(beta0): e=y-beta0[0]-beta0[1]*x**beta0[2] return np.dot(e.T,e) # Var Cov Matrix of the NLS estimator def vcov(bnls): k=len(bnls) e=np.matrix(y-bnls[0]-bnls[2]*x**bnls[2]) sig2hat=(e.T*e)/(n-k) #First derivatives xd1 = np.ones((n,1)) xd2 = x**bnls[2] xd3 = (bnls[1]*x**bnls[2])*np.log(x) xdm=np.matrix(np.hstack([xd1,xd2,xd3])) return (np.multiply(sig2hat,(xdm.T*xdm).I)) bnls = fmin(ssr,beta0) #Standard Errors serr=np.sqrt(np.diag(vcov(bnls))) # OUTPUT print ''' ========================== GNR Estimation Results model : y = b1+b2+x^b3+eps ========================== ''' bnlst = ['beta1','beta2','beta3'] print 'Param.','Coef.',' SERR' print bnlst[0],"%.4f"% bnls[0],"(%.4f)"% serr[0] print bnlst[1],"%.4f"% bnls[1],"(%.4f)"% serr[1] print bnlst[2],"%.4f"% bnls[2],"(%.4f)"% serr[2] print ''' =========================== ''' # Below Numerical Gradient is provided # It is not used here but # it can be activated if analysitcal derivatives # are hard to obtain def gradp(f,beta): h= 0.0000001 h0 = [h/2,0,0] h1 = [0,h/2,0] h2 = [0,0,h/2] d0 = (f(beta+h0)-f(beta-h0))/h d1 = (f(beta+h1)-f(beta-h1))/h d2 = (f(beta+h2)-f(beta-h2))/h return np.hstack([d0,d1,d2]) def reg(beta0): rhs=beta0[0]-beta0[1]*x**beta0[2] return rhs