#################### #generate data, these are good things to twiddle MU<-5 SIGMA.2<-10 set.seed(1234321) rnorm(100,MU,sd=sqrt(SIGMA.2))->dat #################### #some thing that are going to be reused estimates<-matrix(nrow=4,ncol=2) rownames(estimates)<-c("True","MLE","MH","Gibbs") colnames(estimates)<-c("Mean","Variance") c(MU,SIGMA.2)->estimates[1,] log.like<-function(pars,x,neg=FALSE) { pars[1]->mu pars[2]->s2 exp(-(x-mu)^2/(2*s2))/sqrt(2*pi*s2)->vals if (neg) -sum(log(vals)) else sum(log(vals)) } #################### #MLE start.vals<-c(0,4) nlminb(start.vals,log.like,gradient=NULL, hessian=NULL,x=dat,neg=TRUE,lower=c(-Inf,0))->out out\$par->estimates[2,] #################### #MH- library(MASS) cov.mat<-diag(1,2) tune.par<-.5 n.iter<-2000 burn<-1000 chain<-matrix(NA,n.iter,2) start.vals<-c(2,4) #note that these aren't particularly good estimates of the starting values start.vals->chain[1,] ar<-numeric(length=n.iter-1) old.ll<-log.like(start.vals,dat) for (i in 1:(n.iter-1)) { #draw candidate proposal<-c(-1,-1) #note that i'm going to cheat below. #the better way to do this would be to use a #proposal that doesn't allow for a negative second component while (proposal[2]<0) mvrnorm(1,mu=chain[i,],Sigma=tune.par*cov.mat)->proposal #ratio of the log likelihoods r1<-exp(log.like(proposal,dat))/exp(old.ll) #ratio of the proposal densities is 1 since proposal is symmetric if (!is.finite(r1)) stop(print(i)) if (r1>1) ar[i]<-1 else ifelse(rbinom(1,1,r1)==1,ar[i]<-2,ar[i]<-0) if (ar[i]>0) proposal->chain[i+1,] else chain[i,]->chain[i+1,] old.ll<-log.like(chain[i+1,],dat) } #pdf("/home/bd/Dropbox/Documents/talks/meetup_bayes/talk/conv_mh.pdf") plot(chain,type="l",xlab="mu",ylab="sigma^2") points(x=estimates[1,1],y=estimates[1,2],col="red",cex=5,pch=19) chain[-(1:burn),]->chain colMeans(chain)->estimates[3,] #dev.off() #################### #Gibbs library(MCMCpack) n.iter<-2000 burn<-1000 chain<-matrix(NA,n.iter,2) start.vals<-c(0,2) start.vals->chain[1,] #these define the prior for the mean #they are the center and spread of a normal mu.0<-1; omega.0<-1 #these definte the prior for the variance #note that they are parameters for an inverse gamma not a normal nu.0<-5; sigma.0<-1 for (i in 2:n.iter) { #sampling new mu length(dat)->n chain[i-1,2]->s2 #note dependence on old estimate of the variance center<-(mu.0/omega.0+n*mean(dat)/s2)/(1/mu.0+n/s2) spread<-1/(1/omega.0+n/s2) rnorm(1,mean=center,sd=sqrt(spread))->mu #sampling new sigma2 nu.tmp<-nu.0+n/2 sigma.tmp<-sigma.0+.5*sum((dat-mu)^2) #note dependence on new estimate of the mean rinvgamma(1,shape=nu.tmp,scale=sigma.tmp)->s2 #update c(mu,s2)->chain[i,] } #pdf("/home/bd/Dropbox/Documents/talks/meetup_bayes/talk/conv_gibbs.pdf") par(mfrow=c(1,2)) plot(chain[,1],type="l",ylab="mu",xlab="iteration") abline(h=estimates[1,1],col="red") plot(chain[,2],type="l",ylab="sigma^2",xlab="iteration") abline(h=estimates[1,2],col="red") chain[-(1:burn),]->chain colMeans(chain)->estimates[4,] #dev.off() #pdf("/home/bd/Dropbox/Documents/talks/meetup_bayes/talk/prior_posterior.pdf") x<-seq(-50,50,length.out=10000) y1<-dnorm(x,mean=mu.0,sd=sqrt(omega.0)) #prior y1<-y1/sum(y1) y2<-dnorm(x,mean=estimates[4,1],sd=sqrt(estimates[4,2])) #posterior y2<-y2/sum(y2) y3<-numeric() for (i in 1:length(x)) log.like(c(x[i],SIGMA.2),dat)->y3[i] exp(y3)->y3 y3<-y3/sum(y3) plot(x,y1,type="l",lwd=3,xlab="Mu",ylab="density",yaxt="n",ylim=c(0,.025),col="red",xlim=c(0,6)) lines(x,y2,lwd=3,col="black") lines(x,y3,lwd=3,col="blue") legend("topleft",c("Prior","Likelihood","Posterior"),col=c("red","blue","black"),lwd=3,bty="n") abline(v=weighted.mean(x,y1),col="red",lty=2,lwd=3) abline(v=weighted.mean(x,y2),col="black",lty=1,lwd=3) abline(v=weighted.mean(x,y3),col="blue",lty=2,lwd=3) #dev.off()