@misc{walsh2004, abstract = {A major limitation towards more widespread implementation of Bayesian approaches is that obtaining the posterior distribution often requires the integration of high-dimensional functions. This can be computationally very difficult, but several approaches short of direct integration have been proposed (reviewed by Smith 1991, Evans and Swartz 1995, Tanner 1996). We focus here on Markov Chain Monte Carlo (MCMC) methods, which attempt to simulate direct draws from some complex distribution of interest. MCMC approaches are so-named because one uses the previous sample values to randomly generate the next sample value, generating a Markov chain (as the transition probabilities between sample values are only a function of the most recent sample value). The realization in the early 1990’s (Gelfand and Smith 1990) that one particular MCMC method, the Gibbs sampler, is very widely applicable to a broad class of Bayesian problems has sparked a major increase in the application of Bayesian analysis, and this interest is likely to continue expanding for sometime to come. MCMC methods have their roots in the Metropolis algorithm (Metropolis and Ulam 1949, Metropolis et al. 1953), an attempt by physicists to compute complex integrals by expressing them as expectations for some distribution and then estimate this expectation by drawing samples from that distribution. The Gibbs sampler (Geman and Geman 1984) has its origins in image processing. It is thus somewhat ironic that the powerful machinery ofMCMCmethods had essentially no impact on the field of statistics until rather recently. Excellent (and detailed) treatments of MCMC methods are found in Tanner (1996) and Chapter two of Draper (2000). Additional references are given in the particular sections below.}, author = {Walsh, B.}, booktitle = {Lecture Notes for EEB 581, version 26}, citeulike-article-id = {405095}, interhash = {dd9709f5a7d02b5eaab31aa307b9b0e0}, intrahash = {84f2f6ddcdf8bb5387656640c6782529}, month = {April}, priority = {0}, privnote = {Introduction to MCMC and as a special case of MCMC: gibbs sampling. See also: for another gibbs introduction Casella et al. 92 "explaining the Gibbs sampler". Am. Stat. 46 for details on the Gibbs Stopper (convergence criterion) Tanner, M.A. 96 "Tools for statistical inference" 3rd ed. Springer-Verlag, New York}, title = {Markov Chain Monte Carlo and Gibbs Sampling}, url = {http://nitro.biosci.arizona.edu/courses/EEB581-2004/handouts/Gibbs.pdf}, year = 2004 }