MCMCpack/0000755000176000001440000000000012140120720011645 5ustar ripleyusersMCMCpack/MD50000644000176000001440000002421612140120720012162 0ustar ripleyusers6470104b1e1bf32d945b869f9811a91e *DESCRIPTION 19a6a667aa62fb26c81531564e05298c *HISTORY b1382cb26ee38f7a860c379826362e50 *NAMESPACE 1eaf24e8d468ac0b624a9aa90cded306 *R/BayesFactors.R 9c52f72ee21a71bfbfd312cb56233125 *R/HMMpanelFE.R ca0b3fff125085e13069fae3f237e5ed *R/HMMpanelRE.R 626c7d968d4f56fe081b595cd59d4976 *R/MCMCSVDreg.R c93b534213a07cec26b71eaa48f02295 *R/MCMCbinaryChange.R 2736b2f4759f92e4e1d49d92fd0811e8 *R/MCMCdynamicEI.R 87107048db50664880297536b41ce8e7 *R/MCMCdynamicIRT1d-b.R b0810a4be00979118db6e89ea9f2be46 *R/MCMCdynamicIRT1d.R f99ecb9f321a578b1cdfa9e5b6e163c6 *R/MCMCfactanal.R dcd83d013877c39ace4ca33c4357c779 *R/MCMChierBetaBinom.R 065bd9bb18b1bc5d6011137361dc3412 *R/MCMChierEI.R 4b95a74aa87e670a88106d4661b6e833 *R/MCMChlogit.R e2b64d567aa2e22410251a753f0b55c6 *R/MCMChpoisson.R 0a759c18d9f3b280dbe6090c92e20e35 *R/MCMChregress.R e4ece8084610294f593ca5fc632dd432 *R/MCMCintervention.R a4db065464be685b498d341ecc3c2109 *R/MCMCirt1d.R 78b6d6361c4e9b3056226d66c50a3479 *R/MCMCirtHier1d.R 4b433d0cace650f7304ff9a94e69ad4a *R/MCMCirtKd.R 7582e3ad855f5ece89f1dfbcf4a2a856 *R/MCMCirtKdHet.R 0293dbca0de9cb96b203f1739cda2c02 *R/MCMCirtKdRob.R ba21d6d4a4dcaaa56b6784b749bcbc07 *R/MCMClogit.R 7a83fa7762a8e777a5cacc21c14f2498 *R/MCMCmetrop1R.R 33f53d5be86c9d714dd7b68a27285c2f *R/MCMCmixfactanal.R a6508dd38804705e750995ca27070cde *R/MCMCmnl.R 4b7cb2b7fa8eb3d0435c55dfdcbed40e *R/MCMCoprobit.R 6b63259803b2890eae6d5778592ed7dd *R/MCMCoprobitChange.R be0928d3cd74e5485b1e26a9f0a15c59 *R/MCMCordfactanal.R d438fc9dabd9993da5d4a45a3df68c39 *R/MCMCpoisson.R cbf40bf99270db5f9dd8f4dea8f5274d *R/MCMCpoissonChange.R b98bdefca76d3316c664bcb2ba2bcc38 *R/MCMCprobit.R 31fdcb514ab53ba258a581536fcbda60 *R/MCMCprobitChange.R e27dcf535f23c25018bb778b61640f22 *R/MCMCquantreg.R fe5ffab10fce83348751c3bd7abaa83a *R/MCMCregress.R 868cfbf83aef7ee5009e78e5c0bc36f7 *R/MCMCregressChange.R 510f86a50f94be5b4bbd76cd361025f4 *R/MCMCresidualBreakAnalysis.R 720834bbb59d6c4ce7c610acd22558be *R/MCMCtobit.R 4d2218bcdb4715ac9d791f26d5469f9b *R/MCmodels.R 3a98b4d919a265b46c29d14310bb90d4 *R/SSVSquantreg.R 555c87acdcc2f8572b14151e29022e20 *R/SSVSquantregsummary.R fdfcfe9909cadaf15c47bbb604be2257 *R/automate.R 49a5845b1a235ed9e931f8f57800eb71 *R/btsutil.R 56829ce5340d26f9488b8b9894ff5cd0 *R/distn.R 18daa9fee800c7d8d5827405738531e2 *R/hidden-hmodels.R 4dc19c1cb7809d9be5e7353929358aa1 *R/hidden.R 317675992da250e591e78cc96b10581b *R/make.breaklist.R f799711e5d3fb7140cbc0995360339b3 *R/procrust.R 2e00b2b1593d22b5d1c579e92b294106 *R/scythe.R 4de1106e38b8ca3b12396ec0d84bba4c *R/testpanelGroupBreak.R 0907ed9bbd42212d50c6208dce57d640 *R/testpanelSubjectBreak.R c67ac5238de55cdc8c49d017e0226ed9 *R/tomog.R 6377da3453982f578df4a886f673122c *R/utility.R 7e2021def8a88edb41fe78d0a0ae642c *R/zzz.R 7bf94529cc64482070ef07fa6805012c *README 46e955baeb55718a380a11fb34d2ea67 *cleanup ce8d6bc2b702996ba91799bc33b82840 *configure 21e1a006fb388ed39daf16e6d2ec9549 *configure.ac e17202d22ce5ab5ab7b01e3b248a4008 *data/Nethvote.rda 79c0617133751c81adad3dbe643aafea *data/PErisk.rda 93e636ac58b966f9a74315f51be07d25 *data/Rehnquist.rda accb7b5b46207a02e49de2702a6faff4 *data/Senate.rda ce79b1b47b561cb44dd4f93589d3d755 *data/SupremeCourt.rda a5cfcf73e21c03eeaf4f3baa5c492c14 *inst/CITATION 2b42855343b5f0c51c7e9411aef57509 *man/BayesFactor.Rd c3e38d2c8a1adee2ff096056416ca6ec *man/HMMpanelFE.Rd 2e322e903df0d9c20513670710537995 *man/HMMpanelRE.Rd 293ff17f852d0e60dd1aac1f419353d2 *man/MCMCSVDreg.Rd 9af751257fcacc3a57543e4ae6a5e6cc *man/MCMCbinaryChange.Rd 5078ab3b9d86c7165909791acf1fc886 *man/MCMCdynamicEI.Rd 102a94ced183cf023da7696f948bb82f *man/MCMCdynamicIRT1d.Rd 29e89e27c6b11794c5dc6f50618375e0 *man/MCMCfactanal.Rd 5dabac071a3722205135ae2cbae0b3d9 *man/MCMChierEI.Rd 00a29263d43f0e2006de60d4800f5f7f *man/MCMChlogit.Rd b894aa07a0e68f9ffdeb3b09bab0aee2 *man/MCMChpoisson.Rd 8d9c22e8d965d60581ec837803790c80 *man/MCMChregress.Rd 9afab254ca5f275f437f7b22ab798b5a *man/MCMCintervention.Rd 52f2cb9bc1cde5e38d35aa5d9858be4a *man/MCMCirt1d.Rd 336682b062ff43261f0e75a4b51a34d8 *man/MCMCirtHier1d.Rd 0d6b55f1f6062483b894fd4f4e9cecfd *man/MCMCirtKd.Rd 19142a1e2a1a9217001a2d19b377b4ce *man/MCMCirtKdHet.Rd 1bcb750570641123acbbc149bdad93af *man/MCMCirtKdRob.Rd 1ee97a34364e07ec83542d78fce4d823 *man/MCMClogit.Rd 8c4a033b4e5fca56699e9cbd96a56222 *man/MCMCmetrop1R.Rd 751e56edabc12da2d8246f7c34cd7613 *man/MCMCmixfactanal.Rd b7dfcf274126008379b0ee903f134b25 *man/MCMCmnl.Rd cf07e2c9f307215c8fe841f5381b62f8 *man/MCMCoprobit.Rd 1556620313c5a4ca175fbe0b68f84161 *man/MCMCoprobitChange.Rd a491294e97dd0b7aa5d0de525542ea8f *man/MCMCordfactanal.Rd 03f2e93e6876a4f0e87b3469bfe38d2f *man/MCMCpoisson.Rd d754c71781dbdbcdfc4fecd6ea807056 *man/MCMCpoissonChange.Rd 1917e296040b80c92525e5cb8ad02e71 *man/MCMCprobit.Rd c3631c612ccc2d8975a7e164c8506331 *man/MCMCprobitChange.Rd 2f9caf8983e405f912eb5b25f29784eb *man/MCMCquantreg.Rd 94f027a39b5a875cf5c54757a093f144 *man/MCMCregress.Rd 65de16850240fd21bfe6e62c63fc5c10 *man/MCMCregressChange.Rd 02144cc6cca72312f4eafb013cfe23c3 *man/MCMCresidualBreakAnalysis.Rd c314e36afdd60449cf4f8ec08e00f80d *man/MCMCtobit.Rd a96eacae6b64827e40505661fb636cd4 *man/MCbinomialbeta.Rd 193401fb83df78594bb965c7bbac968f *man/MCmultinomdirichlet.Rd 4f2164ebeaaf83bde6a7f6c78b718d32 *man/MCnormalnormal.Rd ecc40a241e849aacfa5ee3f17b8dd97d *man/MCpoissongamma.Rd c04a5372b5a75cf67ef78d7e94d121e9 *man/Nethvote.Rd 1aae2198de60fb0e4d568b0f2f4d994c *man/PErisk.Rd 034a4e9a54b8eff7dddcee5d0a923a02 *man/PostProbMod.Rd aa99f8aa2b507bc8e1465a2b6e3993bd *man/QRSSVSplot.Rd 594c242058a0e3f9f2b8b5d4067c5b72 *man/QRSSVSsummary.Rd 3ad4d7cb7b7b9fea628a16b8a9ab4053 *man/Rehnquist.Rd 43eae86dc52b891ed0a9fc9d1c6887bb *man/SSVSquantreg.Rd e6849fc07eb2dd45dcc07300e7cd149d *man/Senate.Rd 03bbb0adb292bf12b04fe466816d20a6 *man/SupremeCourt.Rd 1d8a9684290a7afeb7202f3797f5da71 *man/choicevar.Rd 50b0fec7baf798a609432b1578f65b41 *man/dirichlet.Rd 5203f2019ccb8b2dbf032c1689335ec3 *man/dtomog.Rd b6f23a0e12fa9bb16f571261b265921f *man/invgamma.Rd 0051db062affc35be76c3ef5591dac82 *man/iwishart.Rd a5378f49cfb5f7ae58439b828e10fb84 *man/make.breaklist.Rd 629023d17bc9299449eec0b74a73340d *man/mptable.Rd 96ab1db8e6a5fb7c6640f06dd95a162c *man/noncenhypergeom.Rd 170bc2168de62e08f7157a8cbc635e06 *man/plotChangepoint.Rd 34999350c3ac48a2b5dbbb7766e5d463 *man/plotIntervention.Rd c64abc923f2c6f56a167a815c881ffb1 *man/plotState.Rd 6a88d877e88b5abfd7a66d659c9b4e6a *man/procrust.Rd 41feb17dda3b00cffa2f6176ba584b74 *man/readscythe.Rd f151473ede68e2cff21c06afd3060373 *man/testpanelGroupBreak.Rd 24a4ef81a3573532c757c0c1dad393eb *man/testpanelSubjectBreak.Rd 254168bb5258fd00a7b766fb7d3bbfd9 *man/tomog.Rd f5ba8eb700c67f188505a4585dd39f6b *man/topmodels.Rd a4c560360ba912bb342a429e434bcc96 *man/vech.Rd 8699162f8e39a03d96dd633866f8c4ee *man/wishart.Rd 9d56915bf90694106df6586cec1bada0 *man/writescythe.Rd ee8f580a79a520bbdf458410b49aae2a *man/xpnd.Rd 40b362dda564acce80bffa4748808bf3 *src/HMMmultivariateGaussian.cc 9c0015db15e56f1028564a23b3a2b3ee *src/HMMpanelFE.cc 46a172ae3349a7724c02a2d5ab001e74 *src/HMMpanelRE.cc 1c68060796310df44a049e66f10b14f7 *src/MCMCSVDreg.cc 659155a4036b2f0e6da9e07440d1cd3e *src/MCMCbinaryChange.cc c6353855b63e402d7e60d248b22e6f50 *src/MCMCdynamicEI.cc 35b5469f5e52a965f78cad71c21582c3 *src/MCMCdynamicIRT1d-b.cc 5fa9b4a2d06646a79042296a62431d3d *src/MCMCdynamicIRT1d.cc b3323f678776ada7c1fdf0f400be12e0 *src/MCMCfactanal.cc 2745b72494da22ef8e0f62553011bbc9 *src/MCMCfcds.h db0074b59394f540436a57cbe7d03544 *src/MCMChierBetaBinom.cc d4f36358e358d81d2d432c8cf23cc33d *src/MCMChierEI.cc c938af28648eb358458f111d52119052 *src/MCMChlogit.cc ab7fc8d513b7c66d9b777af3fda19133 *src/MCMChpoisson.cc ea2d9f60541c419765dde67824ca9139 *src/MCMChregress.cc d0c3136bef3a5243dd984dab7f2df5ab *src/MCMCintervention.cc fa7404ccc4ce053d13b2e0b9a86819b8 *src/MCMCirt1d.cc abdf98660af4bed11d89e04074d9f1d1 *src/MCMCirtHier1d.cc ac9b6d3f588b46cf009816bf028ee5ed *src/MCMCirtKdHet.cc 128b3fffee2679d9ae85e844ed9b522c *src/MCMCirtKdRob.cc 82710c136632704b70c71cc8ee5dca5c *src/MCMClogit.cc 8e8ab807995eedcd108f11ad639475a9 *src/MCMClogituserprior.cc d886c33fcacd18ba4435f4171159824f *src/MCMCmetrop1R.cc af5765bb03517864b5d29b8c4b594ad2 *src/MCMCmixfactanal.cc eaab5804f229b660e958f6aa2cf728ba *src/MCMCmnl.h 97c3e9b9e2d034ff9bd12fd1e1c81466 *src/MCMCmnlMH.cc 9f0c3eea4d8ecafe8ab931c12d1616f9 *src/MCMCmnlslice.cc 0d7b2c13371662f7cf901ef5a7ec00a3 *src/MCMCoprobit.cc bdd18ead0856bbff8587b11639945699 *src/MCMCoprobitChange.cc 6addbe81efb51cb4b8401e9a6931f782 *src/MCMCordfactanal.cc 6bfed80bb7c109537461e6381837b6c4 *src/MCMCpoisson.cc a90346688ef620947dd846967e181603 *src/MCMCpoissonChange.cc 84b94f6e67b026a915a23fb85bdaf00b *src/MCMCprobit.cc e2239f7d7503dc4eca812034f4bc1da8 *src/MCMCprobitChange.cc a6a01c3906a8b92b9f1a12cfa92a1a63 *src/MCMCprobitres.cc 806af936660056856d4b908f8fb0cfa4 *src/MCMCquantreg.cc c89523a7da37782992fe91d8364b1cd3 *src/MCMCregress.cc 3fa981b1ba947770ff834c45237be2b1 *src/MCMCregressChange.cc cfbd489b6fd8886e262a57ca9629c6da *src/MCMCresidualBreakAnalysis.cc ef5566f824887d9bbef5b76f1b64d839 *src/MCMCrng.h f28dc81e54a6ca54a2202a0d6f52ed40 *src/MCMCtobit.cc b3e224bb1a8452c3da347348b8e510cc *src/Makevars 43f4124319734194009441b15aff3695 *src/Makevars.in 6ba1ab7daac8f7149663686fd7abfd53 *src/SSVSquantreg.cc 5362ed41a42ce34cf7817f6bfd988935 *src/algorithm.h 80eab6b8072ae34de6c192af41324093 *src/datablock.h 5da8702c23b61c037f61dccebec10838 *src/defs.h 62288e8cb29f90983d0ab28852cf5ba7 *src/distributions.h b3e55dce9080169d7161c8a182f0c70a *src/error.h 0b1465cd303a39f5bd6ccb327e1e4b1f *src/ide.h 822c244edb53be2f4f8b1e831abe16fb *src/la.h 41a6caea698666bf78dda6d4d8b923e8 *src/lapack.h cfc7f8d03a981a3453f81f2bd67d02c5 *src/lecuyer.cc 53cd167ad58abffbb0f155da45e6385a *src/lecuyer.h 0e84b5d3b0cd6c3d61f9ae9ab4bf3f64 *src/matrix.h 7ab16dbb6757a5de37dc7de9867a3c10 *src/matrix_bidirectional_iterator.h f924b218cd6eaed80846ed0cba534b03 *src/matrix_forward_iterator.h 3dc8b8e303daa7ffdf3e2d29b2fa43d9 *src/matrix_random_access_iterator.h b0f0c5a5c5197df32d18b77af2b85957 *src/mersenne.h a111bb9e9e66be7cb044ad27ea675124 *src/optimize.h e6ce586d7974b548aead641795ad9ca4 *src/rng.h b48baf77b93da895a04154ef15c8683f *src/rtmvnorm.h 9a1656760820e50605248198302c308c *src/smath.h a5574b5829f3cab30cd73eb26725ae68 *src/stat.h ca47c74890139220fcf9dddd01d33c3c *src/wrapped_generator.h MCMCpack/src/0000755000176000001440000000000012140061656012450 5ustar ripleyusersMCMCpack/src/wrapped_generator.h0000644000176000001440000001437412140061657016343 0ustar ripleyusers/* * Scythe Statistical Library * Copyright (C) 2000-2002 Andrew D. Martin and Kevin M. Quinn; * 2002-present Andrew D. Martin, Kevin M. Quinn, and Daniel * Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * under the terms of the GNU General Public License as published by * Free Software Foundation; either version 2 of the License, or (at * your option) any later version. See the text files COPYING * and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/rng/wrapped_generator.h * * Provides a class definition that allows users to adapt non-Scythe * pseudo-random number generators to Scythe's rng interface. * Specifically, wraps any functor that generators uniform variates on * (0, 1). * */ /*! \file wrapped_generator.h * \brief Adaptor for non-Scythe quasi-random number generators. * * This file contains the wrapped_generator class, a class that * extends Scythe's base random number generation class (scythe::rng) * by allowing an arbitrary random uniform number generator to act as * the engine for random number generation in Scythe. */ #ifndef SCYTHE_WRAPPED_GENERATOR_H #define SCYTHE_WRAPPED_GENERATOR_H #ifdef SCYTHE_COMPILE_DIRECT #include "rng.h" #else #include "scythestat/rng.h" #endif namespace scythe { /*! \brief Adaptor for non-Scythe quasi-random number generators. * * This class defines a wrapper for arbitrary random uniform number * generators, allowing them to act as the underlying engine for * random number generation in Scythe. Specifically, any function * object that overloads the function call operator to return * random uniform deviates on the interval (0, 1). * * The wrapped_generator class extends Scythe's basic random number * generating class, scythe::rng, implementing the interface that it * defines. * * \see rng * \see lecuyer * */ template class wrapped_generator: public rng > { public: /*! \brief Default constructor * * This constructor wraps the provided random uniform number * generating function object, creating an object suitable for * random number generation in Scythe. Note that the function * object is passed by reference and is not copied on * construction. * * \param e A function object that returns uniform random * numbers on (0,1) when invoked. * * \see wrapped_generator(const wrapped_generator& wg) */ wrapped_generator (ENGINE& e) : rng > (), engine (e) {} /*! \brief Copy constructor * * This constructor makes a copy of an existing * wrapped_generator object, duplicating its seed and current * state exactly. Note that this will create a copy of the * underlying function object using the function objects copy * construction semantics. * * \param wg An existing wrapped_generator object. * * \see wrapped_generator(ENGINE& e) */ wrapped_generator(const wrapped_generator& wg) : rng > (), engine (wg.engine) {} /*! \brief Generate a random uniform variate on (0, 1). * * This routine returns a random double precision floating point * number from the uniform distribution on the interval (0, * 1). This method overloads the pure virtual method of the * same name in the rng base class. * * \see runif(unsigned int, unsigned int) * \see rng */ inline double runif() { return engine(); } /* We have to override the overloaded forms of runif because * overloading the no-arg runif() hides the base class * definition; C++ stops looking once it finds the above. */ /*! \brief Generate a Matrix of random uniform variates. * * This routine returns a Matrix of double precision random * uniform variates. on the interval (0, 1). This method * overloads the virtual method of the same name in the rng base * class. * * This is the general template version of this method and * is called through explicit template instantiation. * * \param rows The number of rows in the returned Matrix. * \param cols The number of columns in the returned Matrix. * * \see runif() * \see rng * * \note We are forced to override this overloaded method * because the 1-arg version of runif() hides the base class's * definition of this method from the compiler, although it * probably should not. */ template inline Matrix runif(unsigned int rows, unsigned int cols) { return rng >::runif(rows, cols); } /*! \brief Generate a Matrix of random uniform variates. * * This routine returns a Matrix of double precision random * uniform variates on the interval (0, 1). This method * overloads the virtual method of the same name in the rng base * class. * * This is the default template version of this method and * is called through implicit template instantiation. * * \param rows The number of rows in the returned Matrix. * \param cols The number of columns in the returned Matrix. * * \see runif() * \see rng * * \note We are forced to override this overloaded method * because the 1-arg version of runif() hides the base class's * definition of this method from the compiler, although it * probably should not. */ Matrix runif (unsigned int rows, unsigned int cols) { return rng >::runif(rows, cols); } protected: ENGINE& engine; // The wrapped runif engine }; } // end namespace scythe #endif /* SCYTHE_WRAPPED_GENERATOR_H */ MCMCpack/src/stat.h0000644000176000001440000004415312140061657013604 0ustar ripleyusers/* * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/stat.h * */ /*! * \file stat.h * \brief Definitions for functions that perform common * statistical operations on Scythe Matrix objects. * * \note As is the case throughout the library, we provide both * general and default template definitions of the Matrix-returning * functions in this file, explicitly providing documentation for only * the general template versions. */ #ifndef SCYTHE_STAT_H #define SCYTHE_STAT_H #ifdef SCYTHE_COMPILE_DIRECT #include "matrix.h" #include "algorithm.h" #include "error.h" #else #include "scythestat/matrix.h" #include "scythestat/algorithm.h" #include "scythestat/error.h" #endif #include #include namespace scythe { namespace { typedef unsigned int uint; } /* A macro for defining column versions of a function. That is, * when expanded, this macro produces general and default template * functions that compute function NAME on each column in a matrix and * return a row vector with the results. We use this to generate * column versions of every function in this header file. */ #define SCYTHE_STATMETH_COL(NAME) \ template \ Matrix \ NAME ## c (const Matrix& A) \ { \ Matrix res (1, A.cols(), false); \ \ for (uint j = 0; j < A.cols(); ++j) \ res[j] = NAME(A(_, j)); \ \ return res; \ } \ \ template \ Matrix \ NAME ## c (const Matrix& A) \ { \ return NAME ## c(A); \ } /* Calculate the sum of a Matrix */ /*! * \brief Calculate the sum of a Matrix * * This function calculates the sum of a matrix by adding each element * in turn. * * \param A The matrix to be summed. * * \see prod(const Matrix &A) * \see sumc(const Matrix &A) * \see prodc(const Matrix &A) */ template T sum (const Matrix &A) { return (std::accumulate(A.begin_f(), A.end_f(), (T) 0)); } /* Calculate the sum of each column in a Matrix */ /*! * \brief Calculate the sum of each column in a Matrix * * This function calculates the sum of each column in a matrix by * consecutively adding elements in a single column, looping through all * columns, and returning the results. * * \param A The matrix to be summed. * * \see prod(const Matrix &A) * \see sum(const Matrix &A) * \see prodc(const Matrix &A) */ SCYTHE_STATMETH_COL(sum) /* Calculate the product of a Matrix */ /*! * \brief Calculate the product of a Matrix * * This function calculates the product of a matrix by beginning with the * first element of a matrix, and consecutively multiplying each entry. * * \param A The matrix to be multiplied. * * \see sumc(const Matrix &A) * \see sum(const Matrix &A) * \see prodc(const Matrix &A) */ template T prod (const Matrix &A) { return std::accumulate(A.begin_f(), A.end_f(), (T) 1, std::multiplies ()); } /* Calculate the product of each column of a matrix */ /*! * \brief Calculate the product of each column of a Matrix * * This function calculates the product of each column of a matrix by * multiplying all elements of a single column, looping through all columns, * and returning the results. * * \param A The matrix to be multiplied. * * \see sumc(const Matrix &A) * \see sum(const Matrix &A) * \see prod(const Matrix &A) */ SCYTHE_STATMETH_COL(prod) /* Calculate the mean of a Matrix */ /*! * \brief Calculate the mean of a Matrix * * This function calculates the mean of a matrix by summing all elements of * the matrix, and dividing by the total number of elements in the matrix. * * \param A The matrix to be averaged. * * \see sum(const Matrix &A) * \see meanc(const Matrix &A) * \see median(const Matrix &A) * \see mode(const Matrix &A) * \see variance(const Matrix &A) */ template T mean (const Matrix &A) { return (std::accumulate(A.begin_f(), A.end_f(), (T) 0) / A.size()); } /* Calculate the mean of each column of a Matrix */ /*! * \brief Calculate the mean of each column of a Matrix * * This function calculates the mean of each column of a matrix by summing * all elements of a column in the matrix, divding by the total number of * elements in the column, and looping over every column in the matrix. * * \param A The matrix to be averaged. * * \see sumc(const Matrix &A) * \see mean(const Matrix &A) * \see medianc(const Matrix &A) * \see modec(const Matrix &A) * \see variancec(const Matrix &A) */ SCYTHE_STATMETH_COL(mean) /* Calculate the median of a matrix. Uses a sort but I'll implement * the randomized alg when I figure out how to generalize it to * even-length lists */ /*! * \brief Calculate the median of a Matrix * * This function calculates the median of a matrix by first sorting the elements * of the matrix, and then finding the middle element. * * \param A The matrix whose median is of interest. * * \see medianc(const Matrix &A) * \see mean(const Matrix &A) * \see mode(const Matrix &A) */ template T median (const Matrix &A) { Matrix temp(A); uint n = temp.size(); sort(temp.begin(), temp.end()); if (n % 2 == 0) return ((temp[n / 2] + temp[n / 2 - 1]) / 2); else return temp[(uint) ::floor(n / 2.0)]; } /* Calculate the median of each column of a matrix */ /*! * \brief Calculate the median of each column a Matrix * * This function calculates the median of each column of a matrix by first * sorting the elements and locating the middle in a single column, and then * looping over all columns. * * \param A The matrix whose medians are of interest. * * \see median(const Matrix &A) * \see meanc(const Matrix &A) * \see modec(const Matrix &A) */ SCYTHE_STATMETH_COL(median) /* Calculate the mode of a matrix */ /*! * \brief Calculate the mode of a Matrix * * This function calculates the mode of a matrix by determining which value of * the matrix occurs with the highest frequency. * * \param A The matrix whose mode is of interest. * * \see modec(const Matrix &A) * \see mean(const Matrix &A) * \see median(const Matrix &A) */ template T mode (const Matrix &A) { Matrix temp(A); sort(temp.begin(), temp.end()); T last = temp[0]; uint cnt = 1; T cur_max = temp[0]; uint max_cnt = 1; for (uint i = 1; i < temp.size(); ++i) { if (last == temp[i]) { ++cnt; } else { last = temp[i]; cnt = 1; } if (cnt > max_cnt) { max_cnt = cnt; cur_max = temp[i]; } } return cur_max; } /*! * \brief Calculate the mode of the columns of a Matrix * * This function calculates the mode of the columns of a matrix by * determining which value in a single column of the matrix occurs * most frequently, and then looping over all columns. * * \param A The matrix whose modes are of interest. * * \see mode(const Matrix &A) * \see meanc(const Matrix &A) * \see medianc(const Matrix &A) */ SCYTHE_STATMETH_COL(mode) /* Calculate the variance of a Matrix */ /* A functor that encapsulates a single variance calculation step. * Also used by skew and kurtosis. */ namespace { template struct var_step : std::binary_function { T constant_; T2 divisor_; T exponent_; var_step (T c, T2 d, T e) : constant_ (c), divisor_ (d), exponent_ (e) {} T operator() (T last, T x) const { return (last + std::pow(constant_ - x, exponent_) / divisor_); } }; } /*! * \brief Calculate the variance of a Matrix * * This function calculates the variance of a matrix. * * \param A The matrix whose variance is of interest. * * \see var(cons Matrix &A, T mu) * \see varc(const Matrix &A) * \see sd(const Matrix &A) * \see mean(const Matrix &A) */ template T var (const Matrix &A) { return var(A, mean(A)); } /* Calculate the variances of each column of a Matrix. */ /*! * \brief Calculate the variance of each column of a Matrix * * This function calculates the variance of each column of a matrix. * * \param A The matrix whose variances are of interest. * * \see var(const Matrix &A) * \see var(cons Matrix &A, T mu) * \see sdc(const Matrix &A) * \see meanc(const Matrix &A) */ SCYTHE_STATMETH_COL(var) /*! * \brief Calculate the variance of a Matrix * * This function calculates the variance of a matrix when the mean is * already known. * * \param A The matrix whose variance is of interest. * \param mu The mean of the values in the matrix. * * \see var(cons Matrix &A) * \see varc(const Matrix &A) * \see sd(const Matrix &A) * \see mean(const Matrix &A) */ template T var (const Matrix &A, T mu) { return std::accumulate(A.begin_f(), A.end_f(), (T) 0, var_step (mu, A.size() - 1, 2)); } /* Calculate the standard deviation of a Matrix (not std cause of namespace std:: */ /*! * \brief Calculate the standard deviation of a Matrix * * This function calculates the standard deviation of a matrix by * taking the square root of the matrix's variance. * * \param A The matrix whose standard deviation is of interest. * * \see sd(const Matrix &A) * \see variance(const Matrix &A) */ template T sd (const Matrix &A) { return std::sqrt(var(A)); } /* Calculate the standard deviation of each column of a Matrix */ /*! * \brief Calculate the standard deviation of each column of a Matrix * * This function calculates the standard deviation of each column of a matrix by * taking the square root of each column's variance. * * \param A The matrix whose standard deviations are of interest. * * \see sd(const Matrix &A) * \see variancec(const Matrix &A) */ SCYTHE_STATMETH_COL(sd) /*! * \brief Calculate the standard deviation of a Matrix * * This function calculates the standard deviation of a matrix * when the matrix's mean is already known. * * \param A The matrix whose standard deviation is of interest. * \param mu The matrix mean. * * \see sd(const Matrix &A) * \see variance(const Matrix &A) */ template T sd (const Matrix &A, T mu) { return std::sqrt(var(A, mu)); } /* Calculate the skew of a Matrix */ /*! * \brief Calculate the skew of a Matrix * * This function calculates the skew of a matrix. * * \param A The matrix whose skew is of interest. * * \see skewc(const Matrix &A) * \see kurtosis(const Matrix &A) */ template T skew (const Matrix &A) { T mu = mean(A); T sde = sd(A, mu); return std::accumulate(A.begin_f(), A.end_f(), (T) 0, var_step (mu, A.size() * std::pow(sde, 3), 3)); } /* Calculate the skew of each column of a Matrix. */ /*! * \brief Calculate the skew of each column of a Matrix * * This function calculates the skew of each column of a matrix. * * \param A The matrix whose skews are of interest. * * \see skew(const Matrix &A) * \see kurtosisc(const Matrix &A) */ SCYTHE_STATMETH_COL(skew) /* Calculate the kurtosis of a Matrix */ /*! * \brief Calculate the kurtosis of a Matrix * * This function calculates the kurtosis of a matrix. * * \param A The matrix whose kurtosis is of interest. * * \see skew(const Matrix &A) * \see kurtosisc(const Matrix &A) */ template T kurtosis (const Matrix &A) { T mu = mean(A); T sde = sd(A, mu); return (std::accumulate(A.begin_f(), A.end_f(), (T) 0, var_step (mu, A.size() * std::pow(sde, 4), 4)) - 3); } /* Calculate the kurtosis of each column of a Matrix. */ /*! * \brief Calculate the kurtosis of each column of a Matrix * * This function calculates the kurtosis of each column of a matrix. * * \param A The matrix whose kurtoses are of interest. * * \see skewc(const Matrix &A) * \see kurtosis(const Matrix &A) */ SCYTHE_STATMETH_COL(kurtosis) /* Calculates the maximum element in a Matrix */ /*! * \brief Calculate the maximum element in a Matrix * * This function identifies the maximum element in a matrix. * * \param A The matrix whose maximum element is of interest. * * \see min(const Matrix &A) * \see maxc (const Matrix &A) */ template T max (const Matrix &A) { return *(max_element(A.begin_f(), A.end_f())); } /*! * \brief Calculate the maximum of each column of a Matrix * * This function identifies the maximum of each column in a matrix. * * \param A The matrix whose maximae are of interest. * * \see max(const Matrix &A) * \see minc(const Matrix &A) */ SCYTHE_STATMETH_COL(max) /* Calculates the minimum element in a Matrix */ /*! * \brief Calculate the maximum element in a Matrix * * This function identifies the maximum element in a matrix. * * \param A The matrix whose maximum element is of interest. * * \see max(const Matrix &A) * \see minc(const Matrix &A) */ template T min (const Matrix &A) { return *(min_element(A.begin_f(), A.end_f())); } /*! * \brief Calculate the minimum of each column of a Matrix * * This function identifies the minimum of each column in a matrix. * * \param A The matrix whose minimae are of interest. * * \see min(const Matrix &A) * \see maxc(const Matrix &A) */ SCYTHE_STATMETH_COL(min) /* Find the index of the max element */ /*! * \brief Calculate the index of the maximum element in a Matrix * * This function identifies the index of the maximum element in a matrix. * * \param A The matrix whose maximum element indices are of interest. * * \see minind(const Matrix &A) * \see max(const Matrix &A) * \see maxindc(const Matrix &A) */ template unsigned int maxind (const Matrix &A) { return (max_element(A.begin_f(), A.end_f())).get_index(); } /*! * \brief Calculate the index of the maximum for each column of a Matrix * * This function identifies the index of the maximum for each column of a Matrix. * * \param A The matrix whose maximum indices are of interest. * * \see maxc(const Matrix &A) * \see minindc(const Matrix &A) */ SCYTHE_STATMETH_COL(maxind) /* Find the index of the min element */ /*! * \brief Calculate the index of the minimum element in a Matrix * * This function identifies the index of the minimum element in a matrix. * * \param A The matrix whose minimum element indices are of interest. * * \see maxind(const Matrix &A) * \see min(const Matrix &A) * \see minindc(const Matrix &A) */ template unsigned int minind (const Matrix &A) { return (min_element(A.begin_f(), A.end_f())).get_index(); } /*! * \brief Calculate the index of the minimum for each column of a Matrix * * This function identifies the index of the minimum for each column of a Matrix. * * \param A The matrix whose minimum indices are of interest. * * \see minc(const Matrix &A) * \see maxindc(const Matrix &A) */ SCYTHE_STATMETH_COL(minind) } // end namespace scythe #endif /* SCYTHE_STAT_H */ MCMCpack/src/SSVSquantreg.cc0000644000176000001440000001761612140061657015340 0ustar ripleyusers// SSVSquantreg.cc is a function that uses stochastic search variable selection // to select promising models at a pre-specified quantile. // // The initial version of this file was generated by the // auto.Scythe.call() function in the MCMCpack R package // written by: // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // Copyright (C) 2009 Andrew D. Martin and Kevin M. Quinn // // This file was initially generated on June 1 2009 // // The function was rewritten by: // // Craig Reed // Department of Mathematical Sciences // Brunel University // craig.reed@brunel.ac.uk #ifndef SSVSQUANTREG_CC #define SSVSQUANTREG_CC #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; struct COV_TRIAL_PREP{ Matrix<> C; Matrix<> U; double logdetminhalf; }; // Helper function to prepare for the sequence of draws from the covariate indicators static inline COV_TRIAL_PREP QR_SSVS_covariate_trials_prep(const Matrix<>& X_gamma, const Matrix<>& Y, const Matrix& gamma, const Matrix<>& weights, const Matrix<>& lambda, double tau, unsigned int n_cov, unsigned int q){ const unsigned int n_obs=Y.rows(); Matrix<> U(Y); // obtain cholesky decomposition of (Xtilde, utilde) if (tau!=0.5){ U -= (1.0-2.0*tau)*weights; } Matrix<> XU(U.rows(),n_cov+1,false); if (n_cov == 0){ XU = U; } else{ XU = cbind(X_gamma,U); } Matrix<> XUtwXU(n_cov+1,n_cov+1,false); double temp_xu = 0.0; //Calculate XUtwXU for (unsigned int i=0; i lambda_gamma = selif(lambda,gamma(q,0,gamma.rows()-1,0)); for (unsigned int j=q; j C = cholesky(XUtwXU); double logdetminhalf = 0.0; // Work out -1/2 log(det(C'C)) for (unsigned int r=0; r reference which it * fills with the posterior of the indicator variables. */ template void SSVSquantreg_impl (rng& stream, double tau, Matrix<>& Y, const Matrix<>& X, unsigned int q, double pi0a0, double pi0b0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, Matrix<>& result) { // define constants unsigned int n_obs = X.rows(); unsigned int k = X.cols(); double tau_onemintau = tau*(1.0-tau); const unsigned int tot_iter = burnin + mcmc; //total iterations const unsigned int nstore = mcmc / thin; // number of draws to store // storage matrices Matrix gamma(k,1,true,true); Matrix<> gamma_matrix (k, nstore); Matrix<> beta(k,1,true,0.0); Matrix<> beta_matrix(k, nstore); COV_TRIAL trial; COV_TRIAL_PREP trial_prep; unsigned int n_cov = k; // Matrices that will change dimension Matrix<> *X_gamma = new Matrix<>(X); Matrix<> *C = new Matrix<>(n_cov+1,n_cov+1,false); Matrix<> *beta_gamma = new Matrix<>(n_cov,1,false); // Matrix with same dimensions Matrix<> U(n_obs,1,false); // Gibbs sampler unsigned int count = 0; unsigned int col_index = q; // Initial value of pi0 double pi0 = stream.rbeta(pi0a0,pi0b0); // Initial values for lambda Matrix<> lambda = stream.rgamma(n_cov-q,1,0.5,0.5); // Initial values for weights Matrix<> weights = stream.rexp(n_obs,1,tau_onemintau); for (unsigned int iter = 0; iter < tot_iter; ++iter) { trial_prep = QR_SSVS_covariate_trials_prep(*X_gamma, Y, gamma, weights, lambda, tau, n_cov, q); C = new Matrix<>(trial_prep.C); double logdetminhalf = trial_prep.logdetminhalf; U = trial_prep.U; // updating the indicator variables corresponding to each covariate for (unsigned int j=q; j (trial.Cnew); delete X_gamma; X_gamma = new Matrix<> (t(selif(t(X),gamma))); ++n_cov; } } else{ trial = QR_SSVS_covariate_trials_draw_present(*C,col_index,n_obs,pi0,lambda(j-q),logdetminhalf,stream); gamma(j) = trial.newtrial; logdetminhalf = trial.logdetminhalf; if (trial.newtrial == false){ delete C; C = new Matrix<> (trial.Cnew); delete X_gamma; X_gamma = new Matrix<> (t(selif(t(X),gamma))); --n_cov; } } col_index = q; } // end covariate trials loop if (n_cov == 0){ // Support for null model weights = ALaplaceIGaussregress_weights_draw (fabs(Y), stream); lambda = stream.rexp(k-q,1,0.5); } else{ beta_gamma = new Matrix<> (QR_SSVS_beta_draw (*C, stream)); if (q != 0){ beta(0,0,q-1,0) = (*beta_gamma)(0,0,q-1,0); } for (unsigned int j=q; j e = gaxpy(*X_gamma, (-1*(*beta_gamma)), Y); Matrix<> abse = fabs(e); weights = ALaplaceIGaussregress_weights_draw (abse, stream); lambda = QR_SSVS_lambda_draw(*beta_gamma, gamma, k, q, stream); delete beta_gamma; } delete C; pi0 = QR_SSVS_pi0_draw(n_cov-q, k-q, pi0a0, pi0b0, stream); // store draws in storage matrices if (iter >= burnin && (iter % thin == 0)) { gamma_matrix(_, count) = gamma; beta_matrix(_, count) = beta; ++count; } // print output to stdout if(verbose > 0 && iter % verbose == 0) { Rprintf("\n\nSSVSquantreg iteration %i of %i \n", (iter+1), tot_iter); Rprintf("gamma = \n"); for (unsigned int r=0; r(k,1,true,0.0); R_CheckUserInterrupt(); // allow user interrupts } // end MCMC loop delete X_gamma; result = cbind(t(gamma_matrix),t(beta_matrix)); } // end SSVSquantreg_impl extern "C" { void SSVSquantreg(double *sampledata, const int *samplerow, const int *samplecol, const double *tau, const double *Ydata, const int *Yrow, const int *Ycol, const double *Xdata, const int *Xrow, const int *Xcol, const int *q, const int *burnin, const int *mcmc, const int *thin, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int *verbose, const double *pi0a0, const double *pi0b0) { // pull together Matrix objects Matrix<> Y(*Yrow, *Ycol, Ydata); Matrix<> X(*Xrow, *Xcol, Xdata); Matrix<> storagematrix; MCMCPACK_PASSRNG2MODEL(SSVSquantreg_impl, *tau, Y, X, *q, *pi0a0, *pi0b0, *burnin, *mcmc, *thin, *verbose, storagematrix); const unsigned int size = *samplerow * *samplecol; for (unsigned int h = 0; h < size; ++h){ sampledata[h] = storagematrix(h); } } } #endif MCMCpack/src/smath.h0000644000176000001440000006521112140061657013743 0ustar ripleyusers/* * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/smath.h * */ /*! * \file smath.h * \brief Definitions for functions that perform common mathematical * operations on every element of a Matrix. * * \note As is the case throughout the library, we provide both * general and default template definitions of the Matrix-returning * functions in this file, explicitly providing documentation for only * the general template versions. As is also often the case, Doxygen * does not always correctly add the default template definition to * the function list below; there is always a default template * definition available for every function. * */ #ifndef SCYTHE_MATH_H #define SCYTHE_MATH_H #ifdef SCYTHE_COMPILE_DIRECT #include "matrix.h" #include "algorithm.h" #include "error.h" #else #include "scythestat/matrix.h" #include "scythestat/algorithm.h" #include "scythestat/error.h" #endif #include #include #include namespace scythe { namespace { typedef unsigned int uint; } /* Almost every function in this file follows one of the two patterns * described by these macros. The first macro handles single-argument * functions. The second handles two-matrix-argument functions (or * scalar-matrix, matrix-scalar. The second macro also permits * cross-type operations (these are limited only by the capabilities * of the underlying functions). */ #define SCYTHE_MATH_OP(NAME, OP) \ template \ Matrix \ NAME (const Matrix& A) \ { \ Matrix res(A.rows(), A.cols(), false); \ std::transform(A.begin_f(), A.end_f(), res.begin_f(), (T (*) (T))OP); \ return res; \ } \ \ template \ Matrix \ NAME (const Matrix& A) \ { \ return NAME(A); \ } #define SCYTHE_MATH_OP_2ARG(NAME, OP) \ template \ Matrix \ NAME (const Matrix& A, const Matrix& B) \ { \ SCYTHE_CHECK_10 (A.size() != 1 && B.size() != 1 && \ A.size() != B.size(), scythe_conformation_error, \ "Matrices with dimensions (" << A.rows() \ << ", " << A.cols() \ << ") and (" << B.rows() << ", " << B.cols() \ << ") are not conformable"); \ \ Matrix res; \ \ if (A.size() == 1) { \ res.resize2Match(B); \ std::transform(B.template begin_f(), B.template end_f(),\ res.begin_f(), std::bind1st(std::ptr_fun((T (*) (T, S))OP), A(0))); \ } else if (B.size() == 1) { \ res.resize2Match(A); \ std::transform(A.template begin_f(), A.template end_f(),\ res.begin_f(), std::bind2nd(std::ptr_fun((T (*) (T, S))OP), B(0))); \ } else { \ res.resize2Match(A); \ std::transform(A.template begin_f(), A.template end_f(),\ B.template begin_f(), res.begin_f(), (T (*) (T, S))OP); \ } \ \ return res; \ } \ \ template \ Matrix \ NAME (const Matrix& A, const Matrix& B) \ { \ return NAME(A, B); \ } \ \ template \ Matrix \ NAME (const Matrix& A, S b) \ { \ return NAME(A, Matrix(b)); \ } \ \ template \ Matrix \ NAME (const Matrix& A, S b) \ { \ return NAME(A, Matrix(b)); \ } \ \ template \ Matrix \ NAME (T a, const Matrix& B) \ { \ return NAME(Matrix(a), B); \ } \ \ template \ Matrix \ NAME (T a, const Matrix& B) \ { \ return NAME(Matrix(a), B); \ } /* calc the inverse cosine of each element of a Matrix */ /*! * \brief Calculate the inverse cosine of each element of a Matrix * * This function calculates the inverse cosine of each element in a Matrix * * \param A The matrix whose inverse cosines are of interest. * * \see tan() * \see tanh() * \see sin() * \see sinh() * \see cos() * \see cosh() * \see acosh() * \see asin() * \see asinh() * \see atan() * \see atanh() * \see atan2() */ SCYTHE_MATH_OP(acos, ::acos) /* calc the inverse hyperbolic cosine of each element of a Matrix */ /*! * \brief Calculate the inverse hyperbolic cosine of each element of a Matrix * * This function calculates the inverse hyperbolic cosine of each element * in a Matrix * * \param A The matrix whose inverse hyperbolic cosines are of interest. * * \see tan() * \see tanh() * \see sin() * \see sinh() * \see cos() * \see cosh() * \see acos() * \see asin() * \see asinh() * \see atan() * \see atanh() * \see atan2() */ SCYTHE_MATH_OP(acosh, ::acosh) /* calc the inverse sine of each element of a Matrix */ /*! * \brief Calculate the inverse sine of each element of a Matrix * * This function calculates the inverse sine of each element * in a Matrix * * \param A The matrix whose inverse sines are of interest. * * \see tan() * \see tanh() * \see sin() * \see sinh() * \see cos() * \see cosh() * \see acos() * \see acosh() * \see asinh() * \see atan() * \see atanh() * \see atan2() */ SCYTHE_MATH_OP(asin, ::asin) /* calc the inverse hyperbolic sine of each element of a Matrix */ /*! * \brief Calculate the inverse hyperbolic sine of each element of a Matrix * * This function calculates the inverse hyperbolic sine of each element * in a Matrix * * \param A The matrix whose inverse hyperbolic sines are of interest. * * \see tan() * \see tanh() * \see sin() * \see sinh() * \see cos() * \see cosh() * \see acos() * \see acosh() * \see asin() * \see atan() * \see atanh() * \see atan2() */ SCYTHE_MATH_OP(asinh, ::asinh) /* calc the inverse tangent of each element of a Matrix */ /*! * \brief Calculate the inverse tangent of each element of a Matrix * * This function calculates the inverse tangent of each element * in a Matrix * * \param A The matrix whose inverse tangents are of interest. * * \see tan() * \see tanh() * \see sin() * \see sinh() * \see cos() * \see cosh() * \see acos() * \see acosh() * \see asin() * \see asin() * \see atanh() * \see atan2() */ SCYTHE_MATH_OP(atan, ::atan) /* calc the inverse hyperbolic tangent of each element of a Matrix */ /*! * \brief Calculate the inverse hyperbolic tangent of each element of a Matrix * * This function calculates the inverse hyperbolic tangent of each element * in a Matrix * * \param A The matrix whose inverse hyperbolic tangents are of interest. * * \see tan() * \see tanh() * \see sin() * \see sinh() * \see cos() * \see cosh() * \see acos() * \see acosh() * \see asin() * \see asinh() * \see atan() * \see atan2() */ SCYTHE_MATH_OP(atanh, ::atanh) /* calc the angle whose tangent is y/x */ /*! * \brief Calculate the angle whose tangent is y/x * * This function calculates the angle whose tangent is y/x, given two * matrices A and B (where y is the ith element of A, and x is the jth element * of matrix B). * * \param A The matrix of y values * \param B The matrix of x values * * \see tan() * \see tanh() * \see sin() * \see sinh() * \see cos() * \see cosh() * \see acos() * \see acosh() * \see asin() * \see asinh() * \see atan() * \see atanh() */ SCYTHE_MATH_OP_2ARG(atan2, ::atan2) /* calc the cube root of each element of a Matrix */ /*! * \brief Calculate the cube root of each element of a Matrix * * This function calculates the cube root of each element * in a Matrix * * \param A The matrix whose cube roots are of interest. * * \see sqrt() */ SCYTHE_MATH_OP(cbrt, ::cbrt) /* calc the ceil of each element of a Matrix */ /*! * \brief Calculate the ceiling of each element of a Matrix * * This function calculates the ceiling of each element * in a Matrix * * \param A The matrix whose ceilings are of interest. * * \see floor() */ SCYTHE_MATH_OP(ceil, ::ceil) /* create a matrix containing the absval of the first input and the * sign of the second */ /*! * \brief Create a matrix containing the absolute value of the first input * and the sign of the second input * * This function creates a matrix containing the absolute value of the first * input, a matrix called A, and the sign of the second input, matrix B. * * \param A The matrix whose absolute values will comprise the resultant matrix. * \param B The matrix whose signs will comprise the resultant matrix */ SCYTHE_MATH_OP_2ARG(copysign, ::copysign) /* calc the cosine of each element of a Matrix */ /*! * \brief Calculate the cosine of each element of a Matrix * * This function calculates the cosine of each element in a Matrix * * \param A The matrix whose cosines are of interest. * * \see tan() * \see tanh() * \see sin() * \see sinh() * \see cosh() * \see acos() * \see acosh() * \see asin() * \see asinh() * \see atan() * \see atanh() * \see atan2() */ SCYTHE_MATH_OP(cos, ::cos) /* calc the hyperbolic cosine of each element of a Matrix */ /*! * \brief Calculate the hyperbolic cosine of each element of a Matrix * * This function calculates the hyperbolic cosine of each element in a Matrix * * \param A The matrix whose hyperbolic cosines are of interest. * * \see tan() * \see tanh() * \see sin() * \see sinh() * \see cos() * \see acos() * \see acosh() * \see asin() * \see asinh() * \see atan() * \see atanh() * \see atan2() */ SCYTHE_MATH_OP(cosh, ::cosh) /* calc the error function of each element of a Matrix */ /*! * \brief Calculate the error function of each element of a Matrix * * This function calculates the error function of each element in a Matrix * * \param A The matrix whose error functions are of interest. * * \see erfc() */ SCYTHE_MATH_OP(erf, ::erf) /* calc the complementary error function of each element of a Matrix */ /*! * \brief Calculate the complementary error function of each element of a Matrix * * This function calculates the complemenatry error function of each * element in a Matrix * * \param A The matrix whose complementary error functions are of interest. * * \see erf() */ SCYTHE_MATH_OP(erfc, ::erfc) /* calc the vaue e^x of each element of a Matrix */ /*! * \brief Calculate the value e^x for each element of a Matrix * * This function calculates the value e^x for each element of a matrix, where * x is the ith element of the matrix A * * \param A The matrix whose elements are to be exponentiated. * * \see expm1() */ SCYTHE_MATH_OP(exp, ::exp) /* calc the exponent - 1 of each element of a Matrix */ /*! * \brief Calculate the value e^(x-1) for each element of a Matrix * * This function calculates the value e^(x-1) for each element of a matrix, where * x is the ith element of the matrix A * * \param A The matrix whose elements are to be exponentiated. * * \see exp() */ SCYTHE_MATH_OP(expm1, ::expm1) /* calc the absval of each element of a Matrix */ /*! * \brief Calculate the absolute value of each element of a Matrix * * This function calculates the absolute value of each element in a Matrix * * \param A The matrix whose absolute values are to be taken. */ SCYTHE_MATH_OP(fabs, (T (*) (T))::fabs) /* calc the floor of each element of a Matrix */ /*! * \brief Calculate the floor of each element of a Matrix * * This function calculates the floor of each element * in a Matrix * * \param A The matrix whose floors are of interest. * * \see ceil() */ SCYTHE_MATH_OP(floor, ::floor) /* calc the remainder of the division of each matrix element */ /*! * \brief Calculate the remainder of the division of each matrix element * * This function calculates the remainder when the elements of Matrix A are * divided by the elements of Matrix B. * * \param A The matrix to serve as dividend * \param B the matrix to serve as divisor */ SCYTHE_MATH_OP_2ARG(fmod, ::fmod) /* calc the fractional val of input and return exponents in int * matrix reference */ /*! */ template Matrix frexp (const Matrix& A, Matrix& ex) { SCYTHE_CHECK_10(A.size() != ex.size(), scythe_conformation_error, "The input matrix sizes do not match"); Matrix res(A.rows(), A.cols()); typename Matrix::const_forward_iterator it; typename Matrix::forward_iterator rit = res.begin_f(); typename Matrix::const_forward_iterator it2 = ex.begin_f(); for (it = A.begin_f(); it != A.end_f(); ++it) { *rit = ::frexp(*it, &(*it2)); ++it2; ++rit; } return res; } template Matrix frexp (Matrix& A, Matrix& ex) { return frexp(A,ex); } /* calc the euclidean distance between the two inputs */ /*! * \brief Calculate the euclidean distance between two inputs * * This function calculates the euclidean distance between the elements of Matrix * A and the elements of Matrix B. * * \param A Input matrix * \param B Input matrix */ SCYTHE_MATH_OP_2ARG(hypot, ::hypot) /* return (int) logb */ SCYTHE_MATH_OP(ilogb, ::ilogb) /* compute the bessel func of the first kind of the order 0 */ /*! * \brief Compute the Bessel function of the first kind of the order 0 * * This function computes the Bessel function of the first kind of order 0 * for each element in the input matrix, A. * * \param A Matrix for which the Bessel function is of interest * * \see j1() * \see jn() * \see y0() * \see y1() * \see yn() */ SCYTHE_MATH_OP(j0, ::j0) /* compute the bessel func of the first kind of the order 1 */ /*! * \brief Compute the Bessel function of the first kind of the order 1 * * This function computes the Bessel function of the first kind of order 1 * for each element in the input matrix, A. * * \param A Matrix for which the Bessel function is of interest * * \see j0() * \see jn() * \see y0() * \see y1() * \see yn() */ SCYTHE_MATH_OP(j1, ::j1) /* compute the bessel func of the first kind of the order n * TODO: This definition causes the compiler to issue some warnings. * Fix */ /*! * \brief Compute the Bessel function of the first kind of the order n * * This function computes the Bessel function of the first kind of order n * for each element in the input matrix, A. * * \param n Order of the Bessel function * \param A Matrix for which the Bessel function is of interest * * \see j0() * \see j1() * \see y0() * \see y1() * \see yn() */ SCYTHE_MATH_OP_2ARG(jn, ::jn) /* calc x * 2 ^ex */ /*! * \brief Compute x * 2^ex * * This function computes the value of x * 2^ex, where x is the ith element of * the input matrix A, and ex is the desired value of the exponent. * * \param A Matrix whose elements are to be multiplied * \param ex Matrix of powers to which 2 will be raised. */ SCYTHE_MATH_OP_2ARG(ldexp, ::ldexp) /* compute the natural log of the absval of gamma function */ /*! * \brief Compute the natural log of the absolute value of the gamma function * * This function computes the absolute value of the Gamma Function, evaluated at * each element of the input matrix A. * * \param A Matrix whose elements will serve as inputs for the Gamma Function * * \see log() */ SCYTHE_MATH_OP(lgamma, ::lgamma) /* calc the natural log of each element of a Matrix */ /*! * \brief Compute the natural log of each element of a Matrix * * This function computes the natural log of each element in a matrix, A. * * \param A Matrix whose natural logs are of interest * * \see log10() * \see log1p() * \see logb() */ SCYTHE_MATH_OP(log, (T (*)(T))::log) /* calc the base-10 log of each element of a Matrix */ /*! * \brief Compute the log base 10 of each element of a Matrix * * This function computes the log base 10 of each element in a matrix, A. * * \param A Matrix whose logs are of interest * * \see log() * \see log1p() * \see logb() */ SCYTHE_MATH_OP(log10, ::log10) /* calc the natural log of 1 + each element of a Matrix */ /*! * \brief Compute the natural log of 1 + each element of a Matrix * * This function computes the natural log of 1 + each element of a Matrix. * * \param A Matrix whose logs are of interest * * \see log() * \see log10() * \see logb() */ SCYTHE_MATH_OP(log1p, ::log1p) /* calc the logb of each element of a Matrix */ /*! * \brief Compute the logb each element of a Matrix * * This function computes the log base b of each element of a Matrix. * * \param A Matrix whose logs are of interest * * \see log() * \see log10() * \see log1p() */ SCYTHE_MATH_OP(logb, ::logb) /* x = frac + i, return matrix of frac and place i in 2nd matrix */ template Matrix modf (const Matrix& A, Matrix& ipart) { SCYTHE_CHECK_10(A.size() != ipart.size(), scythe_conformation_error, "The input matrix sizes do not match"); Matrix res(A.rows(), A.cols()); typename Matrix::const_forward_iterator it; typename Matrix::forward_iterator rit = res.begin_f(); typename Matrix::const_forward_iterator it2 = ipart.begin_f(); for (it = A.begin_f(); it != A.end_f(); ++it) { *rit = ::modf(*it, &(*it2)); ++it2; ++rit; } return res; } template Matrix modf (Matrix& A, Matrix& ipart) { return modf(A,ipart); } /* calc x^ex of each element of a Matrix */ /*! * \brief Compute x^ex for each element of a matrix * * This function computes x^ex, where x is the ith element of the matrix A, * and ex is the desired exponent. * * \param A Matrix to be exponentiated * \param ex Desired exponent */ SCYTHE_MATH_OP_2ARG(pow, ::pow) /* calc rem == x - n * y */ SCYTHE_MATH_OP_2ARG(remainder, ::remainder) /* return x rounded to nearest int */ /*! * \brief Return x rounded to the nearest integer * * This function returns x, where x is the ith element of the Matrix A, * rounded to the nearest integer. * * \param A Matrix whose elements are to be rounded */ SCYTHE_MATH_OP(rint, ::rint) /* returns x * FLT_RADIX^ex */ SCYTHE_MATH_OP_2ARG(scalbn, ::scalbn) /* calc the sine of x */ /*! * \brief Calculate the sine of each element of a Matrix * * This function calculates the sine of each element in a Matrix * * \param A The matrix whose sines are of interest. * * \see tan() * \see tanh() * \see sinh() * \see cos() * \see cosh() * \see acos() * \see acosh() * \see asin() * \see asinh() * \see atan() * \see atanh() * \see atan2() */ SCYTHE_MATH_OP(sin, ::sin) /* calc the hyperbolic sine of x */ /*! * \brief Calculate the hyperbolic sine of each element of a Matrix * * This function calculates the hyperbolic sine of each element in a Matrix * * \param A The matrix whose hyperbolic sines are of interest. * * \see tan() * \see tanh() * \see sin() * \see cos() * \see cosh() * \see acos() * \see acosh() * \see asin() * \see asinh() * \see atan() * \see atanh() * \see atan2() */ SCYTHE_MATH_OP(sinh, ::sinh) /* calc the sqrt of x */ /*! * \brief Calculate the square root of each element in a matrix * * This function calculates the square root of each element in a Matrix * * \param A The matrix whose roots are of interest. * * \see cbrt() */ SCYTHE_MATH_OP(sqrt, (T (*)(T))::sqrt) /* calc the tangent of x */ /*! * \brief Calculate the tangent of each element of a Matrix * * This function calculates the tangent of each element in a Matrix * * \param A The matrix whose tangents are of interest. * * \see sinh() * \see tanh() * \see sin() * \see cos() * \see cosh() * \see acos() * \see acosh() * \see asin() * \see asinh() * \see atan() * \see atanh() * \see atan2() */ SCYTHE_MATH_OP(tan, ::tan) /* calc the hyperbolic tangent of x */ /*! * \brief Calculate the hyperbolic tangent of each element of a Matrix * * This function calculates the hyperbolic tangent of each element in a Matrix * * \param A The matrix whose hyperbolic tangents are of interest. * * \see sinh() * \see tan() * \see sin() * \see cos() * \see cosh() * \see acos() * \see acosh() * \see asin() * \see asinh() * \see atan() * \see atanh() * \see atan2() */ SCYTHE_MATH_OP(tanh, ::tanh) /* bessel function of the second kind of order 0*/ /*! * \brief Compute the Bessel function of the second kind of order 0 * * This function computes the Bessel function of the second kind of order 0 * for each element in the input matrix, A. * * \param A Matrix for which the Bessel function is of interest * * \see j0() * \see j1() * \see jn() * \see y1() * \see yn() */ SCYTHE_MATH_OP(y0, ::y0) /* bessel function of the second kind of order 1*/ /*! * \brief Compute the Bessel function of the second kind of order 1 * * This function computes the Bessel function of the second kind of order 1 * for each element in the input matrix, A. * * \param A Matrix for which the Bessel function is of interest * * \see j0() * \see j1() * \see jn() * \see y0() * \see yn() */ SCYTHE_MATH_OP(y1, ::y1) /* bessel function of the second kind of order n * TODO: This definition causes the compiler to issue some warnings. * Fix */ /*! * \brief Compute the Bessel function of the second kind of order n * * This function computes the Bessel function of the second kind of order n * for each element in the input matrix, A. * * \param n Order of the Bessel function * \param A Matrix for which the Bessel function is of interest * * \see j0() * \see j1() * \see jn() * \see y0() * \see y1() */ SCYTHE_MATH_OP_2ARG(yn, ::yn) } // end namespace scythe #endif /* SCYTHE_MATH_H */ MCMCpack/src/rtmvnorm.h0000644000176000001440000002364312140061657014516 0ustar ripleyusers/* * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/rng/rtmvnorm.h * */ /*! * \file rng/rtmvnorm.h * * \brief A truncated multivariate normal random number generator. * * This file provides the class definition for the rtmvnorm class, a * functor that generates random variates from truncated multivariate * normal distributions. * */ #ifndef SCYTHE_RTMVNORM_H #define SCYTHE_RTMVNORM_H #include #include #ifdef SCYTHE_COMPILE_DIRECT #include "matrix.h" #include "rng.h" #include "error.h" #include "algorithm.h" #include "ide.h" #else #include "scythestat/matrix.h" #include "scythestat/rng.h" #include "scythestat/error.h" #include "scythestat/algorithm.h" #include "scythestat/ide.h" #endif namespace scythe { /* Truncated Multivariate Normal Distribution by Gibbs sampling * (Geweke 1991). This is a functor that allows one to * initialize---and optionally burn in---a sampler for a given * truncated multivariate normal distribution on construction * and then make (optionally thinned) draws with calls to the () * operator. * */ /*! \brief Truncated multivariate normal distribution random number * generator. * * This class is a functor that allows one to initialize, and * optionally burn in, a Gibbs sampler (Geweke 1991) for a given * truncated multivariate normal distribution on construction and * then make optionally thinned draws from the distribution with * calls to the () operator. */ template class rtmvnorm { public: /*! \brief Standard constructor. * * This method constructs a functor capable of generating * linearly constrained variates of the form: \f$x \sim * N_n(\mu, \Sigma), a \le Dx \le b\f$. That is, it generates * an object capable of simulating random variables from an * n-variate normal distribution defined by \a mu * (\f$\mu\f$) and \a sigma (\f$\Sigma\f$) subject to fewer * than \f$n\f$ linear constraints, defined by the Matrix \a D * and the bounds vectors \a a and \a b. * * The user may pass optional burn in and thinning * parameters to the constructor. The \a burnin parameter * indicates the number of draws that the sampler should * initially make and throw out on construction. The \a thin * parameter controls the behavior of the functor's () * operator. A thinning parameter of 1 indicates that each * call to operator()() should return the random variate * generated by one iteration of the Gibbs sampler, while a * value of 2 indicates that the sampler should throw every * other variate out, a value of 3 causes operator()() to * iterate the sampler three times before returning, and so on. * * Finally, this constructor inverts \a D before proceeding. * If you have pre-inverted \a D, you can set the \a * preinvertedD flag to true and the functor will not redo the * operation. This helps optimize common cases; for example, * when \a D is simply the identity matrix (and thus equal to * its own inverse), there is no need to compute the inverse. * * \param mu An n x 1 vector of means. \param sigma An n x n * variance-covariance matrix. \param D An n x n linear * constraint definition matrix; should be of rank n. \param a * An n x 1 lower bound vector (may contain infinity or * negative infinity). \param b An n x 1 upper bound vector (may * contain infinity or negative infinity). \param generator * Reference to an rng object \param burnin Optional burnin * parameter; default value is 0. \param thin Optional thinning * parameter; default value is 1. \param preinvertedD Optional * flag with default value of false; if set to true, functor * will not invert \a D. * * \throw scythe_dimension_error (Level 1) * \throw scythe_conformation_error (Level 1) * \throw scythe_invalid_arg (Level 1) * * \see operator()() * \see rng */ template rtmvnorm (const Matrix& mu, const Matrix& sigma, const Matrix& D, const Matrix& a, const Matrix& b, rng& generator, unsigned int burnin = 0, unsigned int thin = 1, bool preinvertedD = false) : mu_ (mu), C_ (mu.rows(), mu.rows(), false), h_ (mu.rows(), 1, false), z_ (mu.rows(), 1, true, 0), generator_ (generator), n_ (mu.rows()), thin_ (thin), iter_ (0) { SCYTHE_CHECK_10(thin == 0, scythe_invalid_arg, "thin must be >= 1"); SCYTHE_CHECK_10(! mu.isColVector(), scythe_dimension_error, "mu not column vector"); SCYTHE_CHECK_10(! sigma.isSquare(), scythe_dimension_error, "sigma not square"); SCYTHE_CHECK_10(! D.isSquare(), scythe_dimension_error, "D not square"); SCYTHE_CHECK_10(! a.isColVector(), scythe_dimension_error, "a not column vector"); SCYTHE_CHECK_10(! b.isColVector(), scythe_dimension_error, "b not column vector"); SCYTHE_CHECK_10(sigma.rows() != n_ || D.rows() != n_ || a.rows() != n_ || b.rows() != n_, scythe_conformation_error, "mu, sigma, D, a, and b not conformable"); // TODO will D * sigma * t(D) always be positive definite, // allowing us to use the faster invpd? if (preinvertedD) Dinv_ = D; else Dinv_ = inv(D); Matrix<> Tinv = inv(D * sigma * t(D)); alpha_ = a - D * mu; beta_ = b - D * mu; // Check truncation bounds if (SCYTHE_DEBUG > 0) { for (unsigned int i = 0; i < n_; ++i) { SCYTHE_CHECK(alpha_(i) >= beta_(i), scythe_invalid_arg, "Truncation bound " << i << " not logically consistent"); } } // Precompute some stuff (see Geweke 1991 pg 7). for (unsigned int i = 0; i < n_; ++i) { C_(i, _) = -(1 / Tinv(i, i)) % Tinv(i, _); C_(i, i) = 0; // not really clever but probably too clever h_(i) = std::sqrt(1 / Tinv(i, i)); SCYTHE_CHECK_30(std::isnan(h_(i)), scythe_invalid_arg, "sigma is not positive definite"); } // Do burnin for (unsigned int i = 0; i < burnin; ++i) sample (); } /*! \brief Generate random variates. * * Iterates the Gibbs sampler and returns a Matrix containing a * single draw from the truncated multivariate random number * generator encapsulated by the instantiated object. Thinning * of sampler draws is specified at construction. * * \see rtmvnorm() */ template Matrix operator() () { do { sample (); } while (iter_ % thin_ != 0); return (mu_ + Dinv_ * z_); } /*! \brief Generate random variates. * * Default template. See general template for details. * * \see operator()(). */ Matrix operator() () { return operator()(); } protected: /* Does one step of the Gibbs sampler (see Geweke 1991 p 6) */ void sample () { double czsum; double above; double below; for (unsigned int i = 0; i < n_; ++i) { // Calculate sum_{j \ne i} c_{ij} z_{j} czsum = 0; for (unsigned int j = 0; j < n_; ++j) { if (i == j) continue; czsum += C_(i, j) * z_(j); } // Calc truncation of conditional univariate std normal below = (alpha_(i) - czsum) / h_(i); above = (beta_(i) - czsum) / h_(i); // Draw random variate z_i z_(i) = h_(i); if (above == std::numeric_limits::infinity()){ if (below == -std::numeric_limits::infinity()) z_(i) *= generator_.rnorm(0, 1); // untruncated else z_(i) *= generator_.rtbnorm_combo(0, 1, below); } else if (below == -std::numeric_limits::infinity()) z_(i) *= generator_.rtanorm_combo(0, 1, above); else z_(i) *= generator_.rtnorm_combo(0, 1, below, above); z_(i) += czsum; } ++iter_; } /* Instance variables */ // Various reused computation matrices with names from // Geweke 1991. Matrix<> mu_; Matrix<> Dinv_; Matrix<> C_; Matrix<> alpha_; Matrix<> beta_; Matrix<> h_; Matrix<> z_; // The current draw of the posterior rng& generator_; // Refernce to random number generator unsigned int n_; // The dimension of the distribution unsigned int thin_; // thinning parameter unsigned int iter_; // The current post-burnin iteration }; } // end namespace scythe #endif MCMCpack/src/rng.h0000644000176000001440000014412212140061657013414 0ustar ripleyusers/* * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/rng.h * * The code for many of the RNGs defined in this file and implemented * in rng.cc is based on that in the R project, version 1.6.0-1.7.1. * This code is available under the terms of the GNU GPL. Original * copyright: * * Copyright (C) 1998 Ross Ihaka * Copyright (C) 2000-2002 The R Development Core Team * Copyright (C) 2003 The R Foundation */ /*! * \file rng.h * * \brief The definition of the random number generator base class. * */ /* Doxygen doesn't deal well with the macros that we use to make * matrix versions of rngs easy to define. */ #ifndef SCYTHE_RNG_H #define SCYTHE_RNG_H #include #include #ifdef HAVE_IEEEFP_H #include #endif #ifdef SCYTHE_COMPILE_DIRECT #include "matrix.h" #include "error.h" #include "algorithm.h" #include "distributions.h" #include "ide.h" #include "la.h" #else #include "scythestat/matrix.h" #include "scythestat/error.h" #include "scythestat/algorithm.h" #include "scythestat/distributions.h" #include "scythestat/ide.h" #include "scythestat/la.h" #endif namespace scythe { /* Shorthand for the matrix versions of the various distributions' * random number generators. */ #define SCYTHE_RNGMETH_MATRIX(NAME, RTYPE, ARGNAMES, ...) \ template \ Matrix \ NAME (unsigned int rows, unsigned int cols, __VA_ARGS__) \ { \ Matrix ret(rows, cols, false); \ typename Matrix::forward_iterator it; \ typename Matrix::forward_iterator last \ = ret.end_f(); \ for (it = ret.begin_f(); it != last; ++it) \ *it = NAME (ARGNAMES); \ SCYTHE_VIEW_RETURN(RTYPE, O, S, ret) \ } \ \ Matrix \ NAME (unsigned int rows, unsigned int cols, __VA_ARGS__) \ { \ return NAME (rows, cols, ARGNAMES); \ } /*! \brief Random number generator. * * This class provides objects capable of generating random numbers * from a variety of probability distributions. This * abstract class forms the foundation of random number generation in * Scythe. Specific random number generators should extend this class * and implement the virtual void function runif(); this function * should take no arguments and return uniformly distributed random * numbers on the interval (0, 1). The rng class provides no * interface for seed-setting or initialization, allowing for maximal * flexibility in underlying implementation. This class does provide * implementations of functions that return random numbers from a wide * variety of commonly (and not-so-commonly) used distributions, by * manipulating the uniform variates returned by runif(). See * rng/mersenne.h and rng/lecuyer.h for the rng implementations * offered by Scythe. * * Each univariate distribution is represented by three overloaded * versions of the same method. The first is a simple method * returning a single value. The remaining method versions return * Matrix values and are equivalent to calling the single-valued * method multiple times to fill a Matrix object. They each take * two arguments describing the number of rows and columns in the * returned Matrix object and as many subsequent arguments as is * necessary to describe the distribution. As is the case * throughout the library, the Matrix-returning versions of the * method include both a general and default template. We * explicitly document only the single-valued versions of the * univariate methods. For matrix-valued distributions we provide * only a single method per distribution. * * \note Doxygen incorrectly parses the macros we use to * automatically generate the Matrix returning versions of the * various univariate methods in this class. Whenever you see the * macro variable __VA_ARGS__ in the public member function list * below, simply substitute in the arguments in the explicitly * documented single-valued version of the method. * */ template class rng { public: /* This declaration allows users to treat rng objects like * functors that generate random uniform numbers. This can be * quite convenient. */ /*! \brief Generate uniformly distributed random variates. * * This operator acts as an alias for runif() and generates * pseudo-random variates from the uniform distribution on the * interval (0, 1). We include this operator to allow rng * objects to behave as function objects. */ double operator() () { return runif(); } /* Returns random uniform numbers on (0, 1). This function must * be implemented by extending classes */ /*! \brief Generate uniformly distributed random variates. * * This method generates pseudo-random variates from the * uniform distribution on the interval (0, 1). * * This function is pure virtual and is implemented by * extending concrete classes, like scythe::mersenne and * scythe::lecuyer. */ double runif () { return as_derived().runif(); } /* No point in declaring these virtual because we have to * override them anyway because C++ isn't too bright. Also, it * is illegal to make template methods virtual */ template Matrix runif(unsigned int rows, unsigned int cols) { Matrix ret(rows, cols, false); typename Matrix::forward_iterator it; typename Matrix::forward_iterator last=ret.end_f(); for (it = ret.begin_f(); it != last; ++it) *it = runif(); return ret; } Matrix runif(unsigned int rows, unsigned int cols) { return runif(rows, cols); } /*! \brief Generate a beta distributed random variate. * * This function returns a pseudo-random variate drawn from the * beta distribution described by the shape parameters \a a and * \a b. * * \param alpha The first positive beta shape parameter. * \param beta the second positive beta shape parameter. * * \see pbeta(double x, double a, double b) * \see dbeta(double x, double a, double b) * \see betafn(double a, double b) * \see lnbetafn(double a, double b) * * \throw scythe_invalid_arg (Level 1) */ double rbeta (double alpha, double beta) { double report; double xalpha, xbeta; // Check for allowable parameters SCYTHE_CHECK_10(alpha <= 0, scythe_invalid_arg, "alpha <= 0"); SCYTHE_CHECK_10(beta <= 0, scythe_invalid_arg, "beta <= 0"); xalpha = rchisq (2 * alpha); xbeta = rchisq (2 * beta); report = xalpha / (xalpha + xbeta); return (report); } SCYTHE_RNGMETH_MATRIX(rbeta, double, SCYTHE_ARGSET(alpha, beta), double alpha, double beta); /*! \brief Generate a non-central hypergeometric disributed * random variate. * * This function returns a pseudo-random variate drawn from the * non-centrial hypergeometric distribution described by the * number of positive outcomes \a m1, the two group size * parameters \a n1 and \a n2, and the odds ratio \a psi. * * \param m1 The number of positive outcomes in both groups. * \param n1 The size of group one. * \param n2 The size of group two. * \param psi The odds ratio * \param delta The precision. * * \throw scythe_convergence_error (Level 0) */ double rnchypgeom(double m1, double n1, double n2, double psi, double delta) { // Calculate mode of mass function double a = psi - 1; double b = -1 * ((n1+m1+2)*psi + n2 - m1); double c = psi * (n1+1) * (m1+1); double q = -0.5 * ( b + sgn(b) * std::sqrt(std::pow(b,2) - 4*a*c)); double root1 = c/q; double root2 = q/a; double el = std::max(0.0, m1-n2); double u = std::min(n1,m1); double mode = std::floor(root1); int exactcheck = 0; if (u(u+1); double *fvec = new double[size]; fvec[static_cast(mode)] = 1.0; double s; // compute the mass function at y if (delta <= 0 || exactcheck==1){ //exact evaluation // sum from mode to u double f = 1.0; s = 1.0; for (double i=(mode+1); i<=u; ++i){ double r = ((n1-i+1)*(m1-i+1))/(i*(n2-m1+i)) * psi; f = f*r; s += f; fvec[static_cast(i)] = f; } // sum from mode to el f = 1.0; for (double i=(mode-1); i>=el; --i){ double r = ((n1-i)*(m1-i))/((i+1)*(n2-m1+i+1)) * psi; f = f/r; s += f; fvec[static_cast(i)] = f; } } else { // approximation double epsilon = delta/10.0; // sum from mode to ustar double f = 1.0; s = 1.0; double i = mode+1; double r; do { if (i>u) break; r = ((n1-i+1)*(m1-i+1))/(i*(n2-m1+i)) * psi; f = f*r; s += f; fvec[static_cast(i)] = f; ++i; } while(f>=epsilon || r>=5.0/6.0); // sum from mode to elstar f = 1.0; i = mode-1; do { if (i(i)] = f; --i; } while(f>=epsilon || r <=6.0/5.0); } double udraw = runif(); double psum = fvec[static_cast(mode)]/s; if (udraw<=psum) return mode; double lower = mode-1; double upper = mode+1; do{ double fl; double fu; if (lower >= el) fl = fvec[static_cast(lower)]; else fl = 0.0; if (upper <= u) fu = fvec[static_cast(upper)]; else fu = 0.0; if (fl > fu) { psum += fl/s; if (udraw<=psum) return lower; --lower; } else { psum += fu/s; if (udraw<=psum) return upper; ++upper; } } while(udraw>psum); delete [] fvec; SCYTHE_THROW(scythe_convergence_error, "Algorithm did not converge"); } SCYTHE_RNGMETH_MATRIX(rnchypgeom, double, SCYTHE_ARGSET(m1, n1, n2, psi, delta), double m1, double n1, double n2, double psi, double delta); /*! \brief Generate a Bernoulli distributed random variate. * * This function returns a pseudo-random variate drawn from the * Bernoulli distribution with probability of success \a p. * * \param p The probability of success on a trial. * * \throw scythe_invalid_arg (Level 1) */ unsigned int rbern (double p) { unsigned int report; double unif; // Check for allowable paramters SCYTHE_CHECK_10(p < 0 || p > 1, scythe_invalid_arg, "p parameter not in[0,1]"); unif = runif (); if (unif < p) report = 1; else report = 0; return (report); } SCYTHE_RNGMETH_MATRIX(rbern, unsigned int, p, double p); /*! \brief Generate a binomial distributed random variate. * * This function returns a pseudo-random variate drawn from the * binomial distribution with \a n trials and \p probability of * success on each trial. * * \param n The number of trials. * \param p The probability of success on each trial. * * \see pbinom(double x, unsigned int n, double p) * \see dbinom(double x, unsigned int n, double p) * * \throw scythe_invalid_arg (Level 1) */ unsigned int rbinom (unsigned int n, double p) { unsigned int report; unsigned int count = 0; double hold; // Check for allowable parameters SCYTHE_CHECK_10(n == 0, scythe_invalid_arg, "n == 0"); SCYTHE_CHECK_10(p < 0 || p > 1, scythe_invalid_arg, "p not in [0,1]"); // Loop and count successes for (unsigned int i = 0; i < n; i++) { hold = runif (); if (hold < p) ++count; } report = count; return (report); } SCYTHE_RNGMETH_MATRIX(rbinom, unsigned int, SCYTHE_ARGSET(n, p), unsigned int n, double p); /*! \brief Generate a \f$\chi^2\f$ distributed random variate. * * This function returns a pseudo-random variate drawn from the * \f$\chi^2\f$distribution with \a df degress of freedom. * * \param df The degrees of freedom. * * \see pchisq(double x, double df) * \see dchisq(double x, double df) * * \throw scythe_invalid_arg (Level 1) */ double rchisq (double df) { double report; // Check for allowable paramter SCYTHE_CHECK_10(df <= 0, scythe_invalid_arg, "Degrees of freedom <= 0"); // Return Gamma(nu/2, 1/2) variate report = rgamma (df / 2, .5); return (report); } SCYTHE_RNGMETH_MATRIX(rchisq, double, df, double df); /*! \brief Generate an exponentially distributed random variate. * * This function returns a pseudo-random variate drawn from the * exponential distribution described by the inverse scale * parameter \a invscale. * * \param invscale The inverse scale parameter. * * \see pexp(double x, double scale) * \see dexp(double x, double scale) * * \throw scythe_invalid_arg (Level 1) */ double rexp (double invscale) { double report; // Check for allowable parameter SCYTHE_CHECK_10(invscale <= 0, scythe_invalid_arg, "Inverse scale parameter <= 0"); report = -std::log (runif ()) / invscale; return (report); } SCYTHE_RNGMETH_MATRIX(rexp, double, invscale, double invscale); /*! \brief Generate an F distributed random variate. * * This function returns a pseudo-random variate drawn from the * F distribution with degress of freedom \a df1 and \a df2. * * \param df1 The positive degrees of freedom for the * \f$chi^2\f$ variate in the nominator of the F statistic. * \param df2 The positive degrees of freedom for the * \f$chi^2\f$ variate in the denominator of the F statistic. * * \see pf(double x, double df1, double df2) * \see df(double x, double df1, double df2) * * \throw scythe_invalid_arg (Level 1) */ double rf (double df1, double df2) { SCYTHE_CHECK_10(df1 <= 0 || df2 <= 0, scythe_invalid_arg, "n1 or n2 <= 0"); return ((rchisq(df1) / df1) / (rchisq(df2) / df2)); } SCYTHE_RNGMETH_MATRIX(rf, double, SCYTHE_ARGSET(df1, df2), double df1, double df2); /*! \brief Generate a gamma distributed random variate. * * This function returns a pseudo-random variate drawn from the * gamma distribution with a given \a shape and \a scale. * * \param shape The strictly positive shape of the distribution. * \param rate The inverse of the strictly positive scale of the distribution. That is, 1 / scale. * * \see pgamma(double x, double shape, double scale) * \see dgamma(double x, double shape, double scale) * \see gammafn(double x) * \see lngammafn(double x) * * \throw scythe_invalid_arg (Level 1) */ double rgamma (double shape, double rate) { double report; // Check for allowable parameters SCYTHE_CHECK_10(shape <= 0, scythe_invalid_arg, "shape <= 0"); SCYTHE_CHECK_10(rate <= 0, scythe_invalid_arg, "rate <= 0"); if (shape > 1) report = rgamma1 (shape) / rate; else if (shape == 1) report = -std::log (runif ()) / rate; else report = rgamma1 (shape + 1) * std::pow (runif (), 1 / shape) / rate; return (report); } SCYTHE_RNGMETH_MATRIX(rgamma, double, SCYTHE_ARGSET(shape, rate), double shape, double rate); /*! \brief Generate a logistically distributed random variate. * * This function returns a pseudo-random variate drawn from the * logistic distribution described by the given \a location and * \a scale variables. * * \param location The location of the distribution. * \param scale The scale of the distribution. * * \see plogis(double x, double location, double scale) * \see dlogis(double x, double location, double scale) * * \throw scythe_invalid_arg (Level 1) */ double rlogis (double location, double scale) { double report; double unif; // Check for allowable paramters SCYTHE_CHECK_10(scale <= 0, scythe_invalid_arg, "scale <= 0"); unif = runif (); report = location + scale * std::log (unif / (1 - unif)); return (report); } SCYTHE_RNGMETH_MATRIX(rlogis, double, SCYTHE_ARGSET(location, scale), double location, double scale); /*! \brief Generate a log-normal distributed random variate. * * This function returns a pseudo-random variate drawn from the * log-normal distribution with given logged mean and standard * deviation. * * \param logmean The logged mean of the distribtion. * \param logsd The strictly positive logged standard deviation * of the distribution. * * \see plnorm(double x, double logmean, double logsd) * \see dlnorm(double x, double logmean, double logsd) * * \throw scythe_invalid_arg (Level 1) */ double rlnorm (double logmean, double logsd) { SCYTHE_CHECK_10(logsd < 0.0, scythe_invalid_arg, "standard deviation < 0"); return std::exp(rnorm(logmean, logsd)); } SCYTHE_RNGMETH_MATRIX(rlnorm, double, SCYTHE_ARGSET(logmean, logsd), double logmean, double logsd); /*! \brief Generate a negative binomial distributed random * variate. * * This function returns a pseudo-random variate drawn from the * negative binomial distribution with given dispersion * parameter and probability of success on each trial. * * \param n The strictly positive target number of successful * trials (dispersion parameters). * \param p The probability of success on each trial. * * \see pnbinom(unsigned int x, double n, double p) * \see dnbinom(unsigned int x, double n, double p) * * \throw scythe_invalid_arg (Level 1) */ unsigned int rnbinom (double n, double p) { SCYTHE_CHECK_10(n == 0 || p <= 0 || p > 1, scythe_invalid_arg, "n == 0, p <= 0, or p > 1"); return rpois(rgamma(n, (1 - p) / p)); } SCYTHE_RNGMETH_MATRIX(rnbinom, unsigned int, SCYTHE_ARGSET(n, p), double n, double p); /*! \brief Generate a normally distributed random variate. * * This function returns a pseudo-random variate drawn from the * normal distribution with given \a mean and \a standard * distribution. * * \param mean The mean of the distribution. * \param sd The standard deviation of the distribution. * * \see pnorm(double x, double mean, double sd) * \see dnorm(double x, double mean, double sd) * * \throw scythe_invalid_arg (Level 1) */ double rnorm (double mean = 0, double sd = 1) { SCYTHE_CHECK_10(sd <= 0, scythe_invalid_arg, "Negative standard deviation"); return (mean + rnorm1 () * sd); } SCYTHE_RNGMETH_MATRIX(rnorm, double, SCYTHE_ARGSET(mean, sd), double mean, double sd); /*! \brief Generate a Poisson distributed random variate. * * This function returns a pseudo-random variate drawn from the * Poisson distribution with expected number of occurrences \a * lambda. * * \param lambda The strictly positive expected number of * occurrences. * * \see ppois(double x, double lambda) * \see dpois(double x, double lambda) * * \throw scythe_invalid_arg (Level 1) */ unsigned int rpois(double lambda) { SCYTHE_CHECK_10(lambda <= 0, scythe_invalid_arg, "lambda <= 0"); unsigned int n; if (lambda < 33) { double cutoff = std::exp(-lambda); n = -1; double t = 1.0; do { ++n; t *= runif(); } while (t > cutoff); } else { bool accept = false; double c = 0.767 - 3.36/lambda; double beta = M_PI/std::sqrt(3*lambda); double alpha = lambda*beta; double k = std::log(c) - lambda - std::log(beta); while (! accept){ double u1 = runif(); double x = (alpha - std::log((1-u1)/u1))/beta; while (x <= -0.5){ u1 = runif(); x = (alpha - std::log((1-u1)/u1))/beta; } n = static_cast(x + 0.5); double u2 = runif(); double lhs = alpha - beta*x + std::log(u2/std::pow(1+std::exp(alpha-beta*x),2)); double rhs = k + n*std::log(lambda) - lnfactorial(n); if (lhs <= rhs) accept = true; } } return n; } SCYTHE_RNGMETH_MATRIX(rpois, unsigned int, lambda, double lambda); /* There is a naming issue here, with respect to the p- and d- * functions in distributions. This is really analagous to rt1- * and dt1- XXX Clear up. Also, we should probably have a * random number generator for both versions of the student t. */ /*! \brief Generate a Student t distributed random variate. * * This function returns a pseudo-random variate drawn from the * Student's t distribution with given mean \a mu, variance \a * sigma2, and degrees of freedom \a nu * * \param mu The mean of the distribution. * \param sigma2 The variance of the distribution. * \param nu The degrees of freedom of the distribution. * * \see dt1(double x, double mu, double sigma2, double nu) * * \throw scythe_invalid_arg (Level 1) */ double rt (double mu, double sigma2, double nu) { double report; double x, z; // Check for allowable paramters SCYTHE_CHECK_10(sigma2 <= 0, scythe_invalid_arg, "Variance parameter sigma2 <= 0"); SCYTHE_CHECK_10(nu <= 0, scythe_invalid_arg, "D.O.F parameter nu <= 0"); z = rnorm1 (); x = rchisq (nu); report = mu + std::sqrt (sigma2) * z * std::sqrt (nu) / std::sqrt (x); return (report); } SCYTHE_RNGMETH_MATRIX(rt1, double, SCYTHE_ARGSET(mu, sigma2, nu), double mu, double sigma2, double nu); /*! \brief Generate a Weibull distributed random variate. * * This function returns a pseudo-random variate drawn from the * Weibull distribution with given \a shape and \a scale. * * \param shape The strictly positive shape of the distribution. * \param scale The strictly positive scale of the distribution. * * \see pweibull(double x, double shape, double scale) * \see dweibull(double x, double shape, double scale) * * \throw scythe_invalid_arg (Level 1) */ double rweibull (double shape, double scale) { SCYTHE_CHECK_10(shape <= 0 || scale <= 0, scythe_invalid_arg, "shape or scale <= 0"); return scale * std::pow(-std::log(runif()), 1.0 / shape); } SCYTHE_RNGMETH_MATRIX(rweibull, double, SCYTHE_ARGSET(shape, scale), double shape, double scale); /*! \brief Generate an inverse \f$\chi^2\f$ distributed random * variate. * * This function returns a pseudo-random variate drawn from the * inverse \f$\chi^2\f$ distribution with \a nu degress of * freedom. * * \param nu The degrees of freedom. * * \see rchisq(double df) * * \throw scythe_invalid_arg (Level 1) */ double richisq (double nu) { double report; // Check for allowable parameter SCYTHE_CHECK_10(nu <= 0, scythe_invalid_arg, "Degrees of freedom <= 0"); // Return Inverse-Gamma(nu/2, 1/2) variate report = rigamma (nu / 2, .5); return (report); } SCYTHE_RNGMETH_MATRIX(richisq, double, nu, double nu); /*! \brief Generate an inverse gamma distributed random variate. * * This function returns a pseudo-random variate drawn from the * inverse gamma distribution with given \a shape and \a scale. * * \param shape The strictly positive shape of the distribution. * \param scale The strictly positive scale of the distribution. * * \see rgamma(double alpha, double beta) * * \throw scythe_invalid_arg (Level 1) */ double rigamma (double alpha, double beta) { double report; // Check for allowable parameters SCYTHE_CHECK_10(alpha <= 0, scythe_invalid_arg, "alpha <= 0"); SCYTHE_CHECK_10(beta <= 0, scythe_invalid_arg, "beta <= 0"); // Return reciprocal of gamma variate report = std::pow (rgamma (alpha, beta), -1); return (report); } SCYTHE_RNGMETH_MATRIX(rigamma, double, SCYTHE_ARGSET(alpha, beta), double alpha, double beta); /* Truncated Distributions */ /*! \brief Generate a truncated normally distributed random * variate. * * This function returns a pseudo-random variate drawn from the * normal distribution with given \a mean and \a variance, * truncated both above and below. It uses the inverse CDF * method. * * \param mean The mean of the distribution. * \param variance The variance of the distribution. * \param below The lower truncation point of the distribution. * \param above The upper truncation point of the distribution. * * \see rtnorm_combo(double mean, double variance, double below, double above) * \see rtbnorm_slice(double mean, double variance, double below, unsigned int iter = 10) * \see rtanorm_slice(double mean, double variance, double above, unsigned int iter = 10) * \see rtbnorm_combo(double mean, double variance, double below, unsigned int iter = 10) * \see rtanorm_combo(double mean, double variance, double above, unsigned int iter = 10) * \see rnorm(double x, double mean, double sd) * * \throw scythe_invalid_arg (Level 1) */ double rtnorm(double mean, double variance, double below, double above) { SCYTHE_CHECK_10(below >= above, scythe_invalid_arg, "Truncation bound not logically consistent"); SCYTHE_CHECK_10(variance <= 0, scythe_invalid_arg, "Variance <= 0"); double sd = std::sqrt(variance); double FA = 0.0; double FB = 0.0; if ((std::fabs((above-mean)/sd) < 8.2) && (std::fabs((below-mean)/sd) < 8.2)){ FA = pnorm1((above-mean)/sd, true, false); FB = pnorm1((below-mean)/sd, true, false); } if ((((above-mean)/sd) < 8.2) && (((below-mean)/sd) <= -8.2) ){ FA = pnorm1((above-mean)/sd, true, false); FB = 0.0; } if ( (((above-mean)/sd) >= 8.2) && (((below-mean)/sd) > -8.2) ){ FA = 1.0; FB = pnorm1((below-mean)/sd, true, false); } if ( (((above-mean)/sd) >= 8.2) && (((below-mean)/sd) <= -8.2)){ FA = 1.0; FB = 0.0; } double term = runif()*(FA-FB)+FB; if (term < 5.6e-17) term = 5.6e-17; if (term > (1 - 5.6e-17)) term = 1 - 5.6e-17; double draw = mean + sd * qnorm1(term); if (draw > above) draw = above; if (draw < below) draw = below; return draw; } SCYTHE_RNGMETH_MATRIX(rtnorm, double, SCYTHE_ARGSET(mean, variance, above, below), double mean, double variance, double above, double below); /*! \brief Generate a truncated normally distributed random * variate. * * This function returns a pseudo-random variate drawn from the * normal distribution with given \a mean and \a variance, * truncated both above and below. It uses a combination of * rejection sampling (when \a below <= mean <= \a above) * sampling method of Robert and Casella (1999), pp. 288-289 * (when \a meam < \a below or \a mean > \a above). * * \param mean The mean of the distribution. * \param variance The variance of the distribution. * \param below The lower truncation point of the distribution. * \param above The upper truncation point of the distribution. * * \see rtnorm(double mean, double variance, double below, double above) * \see rtbnorm_slice(double mean, double variance, double below, unsigned int iter = 10) * \see rtanorm_slice(double mean, double variance, double above, unsigned int iter = 10) * \see rtbnorm_combo(double mean, double variance, double below, unsigned int iter = 10) * \see rtanorm_combo(double mean, double variance, double above, unsigned int iter = 10) * \see rnorm(double x, double mean, double sd) * * \throw scythe_invalid_arg (Level 1) */ double rtnorm_combo(double mean, double variance, double below, double above) { SCYTHE_CHECK_10(below >= above, scythe_invalid_arg, "Truncation bound not logically consistent"); SCYTHE_CHECK_10(variance <= 0, scythe_invalid_arg, "Variance <= 0"); double sd = std::sqrt(variance); if ((((above-mean)/sd > 0.5) && ((mean-below)/sd > 0.5)) || (((above-mean)/sd > 2.0) && ((below-mean)/sd < 0.25)) || (((mean-below)/sd > 2.0) && ((above-mean)/sd > -0.25))) { double x = rnorm(mean, sd); while ((x > above) || (x < below)) x = rnorm(mean,sd); return x; } else { // use the inverse cdf method double FA = 0.0; double FB = 0.0; if ((std::fabs((above-mean)/sd) < 8.2) && (std::fabs((below-mean)/sd) < 8.2)){ FA = pnorm1((above-mean)/sd, true, false); FB = pnorm1((below-mean)/sd, true, false); } if ((((above-mean)/sd) < 8.2) && (((below-mean)/sd) <= -8.2) ){ FA = pnorm1((above-mean)/sd, true, false); FB = 0.0; } if ( (((above-mean)/sd) >= 8.2) && (((below-mean)/sd) > -8.2) ){ FA = 1.0; FB = pnorm1((below-mean)/sd, true, false); } if ( (((above-mean)/sd) >= 8.2) && (((below-mean)/sd) <= -8.2)){ FA = 1.0; FB = 0.0; } double term = runif()*(FA-FB)+FB; if (term < 5.6e-17) term = 5.6e-17; if (term > (1 - 5.6e-17)) term = 1 - 5.6e-17; double x = mean + sd * qnorm1(term); if (x > above) x = above; if (x < below) x = below; return x; } } SCYTHE_RNGMETH_MATRIX(rtnorm_combo, double, SCYTHE_ARGSET(mean, variance, above, below), double mean, double variance, double above, double below); /*! \brief Generate a normally distributed random variate, * truncated below. * * This function returns a pseudo-random variate drawn from the * normal distribution with given \a mean and \a variance, * truncated below. It uses the slice sampling method of * Robert and Casella (1999), pp. 288-289. * * \param mean The mean of the distribution. * \param variance The variance of the distribution. * \param below The lower truncation point of the distribution. * \param iter The number of iterations to use. * * \see rtnorm(double mean, double variance, double below, double above) * \see rtnorm_combo(double mean, double variance, double below, double above) * \see rtanorm_slice(double mean, double variance, double above, unsigned int iter = 10) * \see rtbnorm_combo(double mean, double variance, double below, unsigned int iter = 10) * \see rtanorm_combo(double mean, double variance, double above, unsigned int iter = 10) * \see rnorm(double x, double mean, double sd) * * \throw scythe_invalid_arg (Level 1) */ double rtbnorm_slice (double mean, double variance, double below, unsigned int iter = 10) { SCYTHE_CHECK_10(below < mean, scythe_invalid_arg, "Truncation point < mean"); SCYTHE_CHECK_10(variance <= 0, scythe_invalid_arg, "Variance <= 0"); double z = 0; double x = below + .00001; for (unsigned int i=0; i mean, scythe_invalid_arg, "Truncation point > mean"); SCYTHE_CHECK_10(variance <= 0, scythe_invalid_arg, "Variance <= 0"); double below = -1*above; double newmu = -1*mean; double z = 0; double x = below + .00001; for (unsigned int i=0; i= \a below) and the slice * sampling method of Robert and Casella (1999), pp. 288-289 * (when \a mean < \a below). * * \param mean The mean of the distribution. * \param variance The variance of the distribution. * \param below The lower truncation point of the distribution. * \param iter The number of iterations to run the slice * sampler. * * \see rtnorm(double mean, double variance, double below, double above) * \see rtnorm_combo(double mean, double variance, double below, double above) * \see rtbnorm_slice(double mean, double variance, double below, unsigned int iter = 10) * \see rtanorm_slice(double mean, double variance, double above, unsigned int iter = 10) * \see rtanorm_combo(double mean, double variance, double above, unsigned int iter = 10) * \see rnorm(double x, double mean, double sd) * * \throw scythe_invalid_arg (Level 1) */ double rtbnorm_combo (double mean, double variance, double below, unsigned int iter = 10) { SCYTHE_CHECK_10(variance <= 0, scythe_invalid_arg, "Variance <= 0"); double s = std::sqrt(variance); // do rejection sampling and return value //if (m >= below){ if ((mean/s - below/s ) > -0.5){ double x = rnorm(mean, s); while (x < below) x = rnorm(mean,s); return x; } else if ((mean/s - below/s ) > -5.0 ){ // use the inverse cdf method double above = std::numeric_limits::infinity(); double x = rtnorm(mean, variance, below, above); return x; } else { // do slice sampling and return value double z = 0; double x = below + .00001; for (unsigned int i=0; i \a * above). * * \param mean The mean of the distribution. * \param variance The variance of the distribution. * \param above The upper truncation point of the distribution. * \param iter The number of iterations to run the slice sampler. * * \see rtnorm(double mean, double variance, double below, double above) * \see rtnorm_combo(double mean, double variance, double below, double above) * \see rtbnorm_slice(double mean, double variance, double below, unsigned int iter = 10) * \see rtanorm_slice(double mean, double variance, double above, unsigned int iter = 10) * \see rtbnorm_combo(double mean, double variance, double below, unsigned int iter = 10) * \see rnorm(double x, double mean, double sd) * * \throw scythe_invalid_arg (Level 1) */ double rtanorm_combo (double mean, double variance, double above, const unsigned int iter = 10) { SCYTHE_CHECK_10(variance <= 0, scythe_invalid_arg, "Variance <= 0"); double s = std::sqrt(variance); // do rejection sampling and return value if ((mean/s - above/s ) < 0.5){ double x = rnorm(mean, s); while (x > above) x = rnorm(mean,s); return x; } else if ((mean/s - above/s ) < 5.0 ){ // use the inverse cdf method double below = -std::numeric_limits::infinity(); double x = rtnorm(mean, variance, below, above); return x; } else { // do slice sampling and return value double below = -1*above; double newmu = -1*mean; double z = 0; double x = below + .00001; for (unsigned int i=0; i Matrix rwish(unsigned int v, const Matrix &Sigma) { SCYTHE_CHECK_10(! Sigma.isSquare(), scythe_dimension_error, "Sigma not square"); SCYTHE_CHECK_10(v < Sigma.rows(), scythe_invalid_arg, "v < Sigma.rows()"); Matrix A(Sigma.rows(), Sigma.rows()); Matrix C = cholesky(Sigma); Matrix alpha; for (unsigned int i = 0; i < v; ++i) { alpha = C * rnorm(Sigma.rows(), 1, 0, 1); A += (alpha * (t(alpha))); } return A; } /*! \brief Generate a Dirichlet distributed random variate Matrix. * * This function returns a pseudo-random matrix-valued variate * drawn from the Dirichlet disribution described by the vector * \a alpha. * * \param alpha A vector of non-negative reals. * * \throw scythe_invalid_arg (Level 1) * \throw scythe_dimension_error (Level 1) */ template Matrix rdirich(const Matrix& alpha) { // Check for allowable parameters SCYTHE_CHECK_10(std::min(alpha) <= 0, scythe_invalid_arg, "alpha has elements < 0"); SCYTHE_CHECK_10(! alpha.isColVector(), scythe_dimension_error, "alpha not column vector"); Matrix y(alpha.rows(), 1); double ysum = 0; // We would use std::transform here but rgamma is a function // and wouldn't get inlined. const_matrix_forward_iterator ait; const_matrix_forward_iterator alast = alpha.template end_f(); typename Matrix::forward_iterator yit = y.begin_f(); for (ait = alpha.begin_f(); ait != alast; ++ait) { *yit = rgamma(*ait, 1); ysum += *yit; ++yit; } y /= ysum; return y; } /*! \brief Generate a multivariate normal distributed random * variate Matrix. * * This function returns a pseudo-random matrix-valued variate * drawn from the multivariate normal disribution with means \mu * and variance-covariance matrix \a sigma. * * \param mu A vector containing the distribution means. * \param sigma The distribution variance-covariance matrix. * * \throw scythe_invalid_arg (Level 1) * \throw scythe_dimension_error (Level 1) */ template Matrix rmvnorm(const Matrix& mu, const Matrix& sigma) { unsigned int dim = mu.rows(); SCYTHE_CHECK_10(! mu.isColVector(), scythe_dimension_error, "mu not column vector"); SCYTHE_CHECK_10(! sigma.isSquare(), scythe_dimension_error, "sigma not square"); SCYTHE_CHECK_10(sigma.rows() != dim, scythe_conformation_error, "mu and sigma not conformable"); return(mu + cholesky(sigma) * rnorm(dim, 1, 0, 1)); } /*! \brief Generate a multivariate Student t distributed random * variate Matrix. * * This function returns a pseudo-random matrix-valued variate * drawn from the multivariate Student t disribution with * and variance-covariance matrix \a sigma, and degrees of * freedom \a nu * * \param sigma The distribution variance-covariance matrix. * \param nu The strictly positive degrees of freedom. * * \throw scythe_invalid_arg (Level 1) * \throw scythe_dimension_error (Level 1) */ template Matrix rmvt (const Matrix& sigma, double nu) { Matrix result; SCYTHE_CHECK_10(nu <= 0, scythe_invalid_arg, "D.O.F parameter nu <= 0"); result = rmvnorm(Matrix(sigma.rows(), 1, true, 0), sigma); result /= std::sqrt(rchisq(nu) / nu); return result; } protected: /* Default (and only) constructor */ /*! \brief Default constructor * * Instantiate a random number generator */ rng() : rnorm_count_ (1) // Initialize the normal counter {} /* For Barton and Nackman trick. */ RNGTYPE& as_derived() { return static_cast(*this); } /* Generate Standard Normal variates */ /* These instance variables were static in the old * implementation. Making them instance variables provides * thread safety, as long as two threads don't access the same * rng at the same time w/out precautions. Fixes possible * previous issues with lecuyer. See the similar approach in * rgamma1 below. */ int rnorm_count_; double x2_; double rnorm1 () { double nu1, nu2, rsquared, sqrt_term; if (rnorm_count_ == 1){ // odd numbered passses do { nu1 = -1 +2*runif(); nu2 = -1 +2*runif(); rsquared = ::pow(nu1,2) + ::pow(nu2,2); } while (rsquared >= 1 || rsquared == 0.0); sqrt_term = std::sqrt(-2*std::log(rsquared)/rsquared); x2_ = nu2*sqrt_term; rnorm_count_ = 2; return nu1*sqrt_term; } else { // even numbered passes rnorm_count_ = 1; return x2_; } } /* Generate standard gamma variates */ double accept_; double rgamma1 (double alpha) { int test; double u, v, w, x, y, z, b, c; // Check for allowable parameters SCYTHE_CHECK_10(alpha <= 1, scythe_invalid_arg, "alpha <= 1"); // Implement Best's (1978) simulator b = alpha - 1; c = 3 * alpha - 0.75; test = 0; while (test == 0) { u = runif (); v = runif (); w = u * (1 - u); y = std::sqrt (c / w) * (u - .5); x = b + y; if (x > 0) { z = 64 * std::pow (v, 2) * std::pow (w, 3); if (z <= (1 - (2 * std::pow (y, 2) / x))) { test = 1; accept_ = x; } else if ((2 * (b * std::log (x / b) - y)) >= ::log (z)) { test = 1; accept_ = x; } else { test = 0; } } } return (accept_); } }; } // end namespace scythe #endif /* RNG_H */ MCMCpack/src/optimize.h0000644000176000001440000007704312140061657014475 0ustar ripleyusers/* * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/optimize.h * */ /*! * \file optimize.h * \brief Definitions of functions for doing numerical optimization * and related operations. * * This file contains a number of functions that are useful for * numerical optimization and maximum likelihood estimation. In * addition, it contains some basic facilities for evaluating definite * integrals. * * As is the case across Scythe, we provide both general and default * template definitions for the functions in this file that return * Matrix objects. The general definitions allow the user to * customize the matrix_order and matrix_style of the returned Matrix, * while the default versions return concrete matrices of the same * matrix_order as the first (or only) Matrix argument to the * function. In cases where we supply these two types of definitions, * we explicitly document only the general version, although the * default definition will typically appear in the function list * below. * * \note * Doxygen has some difficulty dealing with overloaded templates. * Under certain circumstances it does not correctly process the * definitions of default templates. In these cases, the definition * for the default template will not even appear in the function list. * We provide default templates for all of the Matrix-returning * functions in this file. * */ #ifndef SCYTHE_OPTIMIZE_H #define SCYTHE_OPTIMIZE_H #ifdef SCYTHE_COMPILE_DIRECT #include "matrix.h" #include "algorithm.h" #include "error.h" #include "rng.h" #include "distributions.h" #include "la.h" #include "ide.h" #include "smath.h" #include "stat.h" #else #include "scythestat/matrix.h" #include "scythestat/algorithm.h" #include "scythestat/error.h" #include "scythestat/rng.h" #include "scythestat/distributions.h" #include "scythestat/la.h" #include "scythestat/ide.h" #include "scythestat/smath.h" #include "scythestat/stat.h" #endif /* We want to use an anonymous namespace to make the following consts * and functions local to this file, but mingw doesn't play nice with * anonymous namespaces so we do things differently when using the * cross-compiler. */ #ifdef __MINGW32__ #define SCYTHE_MINGW32_STATIC static #else #define SCYTHE_MINGW32_STATIC #endif namespace scythe { #ifndef __MINGW32__ namespace { #endif /* Functions (private to this file) that do very little... */ template SCYTHE_MINGW32_STATIC T donothing (const Matrix& x) { return (T) 0.0; } template SCYTHE_MINGW32_STATIC T donothing (T& x) { return (T) 0.0; } #ifndef __MINGW32__ } #endif /* Return the machine epsilon * Notes: Algorithm taken from Sedgewick, Robert. 1992. Algorithms * in C++. Addison Wesley. pg. 561 */ /*! \brief Compute the machine epsilon. * * The epsilon function returns the machine epsilon: the smallest * number that, when summed with 1, produces a value greater than * one. */ template T epsilon() { T eps, del, neweps; del = (T) 0.5; eps = (T) 0.0; neweps = (T) 1.0; while ( del > 0 ) { if ( 1 + neweps > 1 ) { /* Then the value might be too large */ eps = neweps; /* ...save the current value... */ neweps -= del; /* ...and decrement a bit */ } else { /* Then the value is too small */ neweps += del; /* ...so increment it */ } del *= 0.5; /* Reduce the adjustment by half */ } return eps; } /*! \brief Calculate the definite integral of a function from a to b. * * This function calculates the definite integral of a univariate * function on the interval \f$[a,b]\f$. * * \param fun The function (or functor) whose definite integral is * to be calculated. This function should both take and return a * single argument of type T. * \param a The starting value of the interval. * \param b The ending value of the interval. * \param N The number of subintervals to calculate. Increasing * this number will improve the accuracy of the estimate but will * also increase run-time. * * \throw scythe_invalid_arg (Level 1) * * \see adaptsimp(FUNCTOR fun, T a, T b, unsigned int& N, double tol = 1e-5) * \note * Users will typically wish to implement \a fun in terms of a * functor. Using a functor provides a generic way in which to * evaluate functions with more than one parameter. Furthermore, * although one can pass a function pointer to this routine, * the compiler cannot inline and fully optimize code * referenced by function pointers. */ template T intsimp (FUNCTOR fun, T a, T b, unsigned int N) { SCYTHE_CHECK_10(a > b, scythe_invalid_arg, "Lower limit larger than upper"); T I = (T) 0; T w = (b - a) / N; for (unsigned int i = 1; i <= N; ++i) I += w * (fun(a +(i - 1) *w) + 4 * fun(a - w / 2 + i * w) + fun(a + i * w)) / 6; return I; } /*! \brief Calculate the definite integral of a function from a to b. * * This function calculates the definite integral of a univariate * function on the interval \f$[a,b]\f$. * * \param fun The function (or functor) whose definite integral is * to be calculated. This function should both take and return a * single argument of type T. * \param a The starting value of the interval. * \param b The ending value of the interval. * \param N The number of subintervals to calculate. Increasing * this number will improve the accuracy of the estimate but will * also increase run-time. * \param tol The accuracy required. Both accuracy and run-time * decrease as this number increases. * * \throw scythe_invalid_arg (Level 1) * * \see intsimp(FUNCTOR fun, T a, T b, unsigned int& N) * * \note * Users will typically wish to implement \a fun in terms of a * functor. Using a functor provides a generic way in which to * evaluate functions with more than one parameter. Furthermore, * although one can pass a function pointer to this routine, * the compiler cannot inline and fully optimize code * referenced by function pointers. */ template T adaptsimp(FUNCTOR fun, T a, T b, unsigned int N, double tol = 1e-5) { SCYTHE_CHECK_10(a > b, scythe_invalid_arg, "Lower limit larger than upper"); T I = intsimp(fun, a, b, N); if (std::fabs(I - intsimp(fun, a, b, N / 2)) > tol) return adaptsimp(fun, a, (a + b) / 2, N, tol) + adaptsimp(fun, (a + b) / 2, b, N, tol); return I; } /*! \brief Calculate gradient of a function using a forward * difference formula. * * This function numerically calculates the gradient of a * vector-valued function at \a theta using a forward difference * formula. * * \param fun The function to calculate the gradient of. This * function should both take and return a single Matrix (vector) of * type T. * \param theta The column vector of values at which to calculate * the gradient of the function. * * \see gradfdifls(FUNCTOR fun, T alpha, const Matrix& theta, const Matrix& p) * \see jacfdif(FUNCTOR fun, const Matrix& theta) * \see hesscdif(FUNCTOR fun, const Matrix& theta) * * \throw scythe_dimension_error (Level 1) * * \note * Users will typically wish to implement \a fun in terms of a * functor. Using a functor provides a generic way in which to * evaluate functions with more than one parameter. Furthermore, * although one can pass a function pointer to this routine, * the compiler cannot inline and fully optimize code * referenced by function pointers. */ template Matrix gradfdif (FUNCTOR fun, const Matrix& theta) { SCYTHE_CHECK_10(! theta.isColVector(), scythe_dimension_error, "Theta not column vector"); unsigned int k = theta.size(); T h = std::sqrt(epsilon()); h = std::sqrt(h); Matrix grad(k, 1); Matrix e; Matrix temp; for (unsigned int i = 0; i < k; ++i) { e = Matrix(k, 1); e[i] = h; temp = theta + e; donothing(temp); // XXX I don't understand this e = temp - theta; grad[i] = (fun(theta + e) - fun(theta)) / e[i]; } return grad; } // Default template version template Matrix gradfdif (FUNCTOR fun, const Matrix& theta) { return gradfdif(fun, theta); } /*! \brief Calculate the first derivative of the function using * a forward difference formula. * * This function numerically calculates the first derivative of a * function with respect to \a alpha at \f$theta + alpha \cdot p\f$ * using a forward difference formula. This function is primarily * useful for linesearches. * * \param fun The function to calculate the first derivative of. * This function should take a single Matrix argument and return * a value of type T. * \param alpha Double the step length. * \param theta A Matrix (vector) of parameter values at which to * calculate the gradient. * \param p A direction vector. * * \see gradfdif(FUNCTOR fun, const Matrix& theta) * \see jacfdif(FUNCTOR fun, const Matrix& theta) * \see hesscdif(FUNCTOR fun, const Matrix& theta) * * \throw scythe_dimension_error (Level 1) * * \note * Users will typically wish to implement \a fun in terms of a * functor. Using a functor provides a generic way in which to * evaluate functions with more than one parameter. Furthermore, * although one can pass a function pointer to this routine, * the compiler cannot inline and fully optimize code * referenced by function pointers. */ template T gradfdifls (FUNCTOR fun, T alpha, const Matrix& theta, const Matrix& p) { SCYTHE_CHECK_10(! theta.isColVector(), scythe_dimension_error, "Theta not column vector"); SCYTHE_CHECK_10(! p.isColVector(), scythe_dimension_error, "p not column vector"); unsigned int k = theta.size(); T h = std::sqrt(epsilon()); h = std::sqrt(h); //T h = std::sqrt(2.2e-16); T deriv; for (unsigned int i = 0; i < k; ++i) { T temp = alpha + h; donothing(temp); T e = temp - alpha; deriv = (fun(theta + (alpha + e) * p) - fun(theta + alpha * p)) / e; } return deriv; } /*! \brief Calculate the Jacobian of a function using a forward * difference formula. * * This function numerically calculates the Jacobian of a * vector-valued function using a forward difference formula. * * \param fun The function to calculate the Jacobian of. This * function should both take and return a Matrix (vector) of type * T. * \param theta The column vector of parameter values at which to * take the Jacobian of \a fun. * * \see gradfdif(FUNCTOR fun, const Matrix& theta) * \see gradfdifls(FUNCTOR fun, T alpha, const Matrix& theta, const Matrix& p) * \see hesscdif(FUNCTOR fun, const Matrix& theta) * * \throw scythe_dimension_error (Level 1) * * \note * Users will typically wish to implement \a fun in terms of a * functor. Using a functor provides a generic way in which to * evaluate functions with more than one parameter. Furthermore, * although one can pass a function pointer to this routine, * the compiler cannot inline and fully optimize code * referenced by function pointers. */ template Matrix jacfdif (FUNCTOR fun, const Matrix& theta) { SCYTHE_CHECK_10(! theta.isColVector(), scythe_dimension_error, "Theta not column vector"); Matrix fval = fun(theta); unsigned int k = theta.rows(); unsigned int n = fval.rows(); T h = std::sqrt(epsilon()); //2.2e-16 h = std::sqrt(h); Matrix J(n,k); Matrix e; Matrix temp; Matrix fthetae; Matrix ftheta; for (int i = 0; i < k; ++i) { e = Matrix(k,1); e[i] = h; temp = theta + e; donothing(temp); /// XXX ?? e = temp - theta; fthetae = fun(theta + e); ftheta = fun(theta); for (unsigned int j = 0; j < n; ++j) { J(j,i) = (fthetae[j] - ftheta[j]) / e[i]; } } return J; } // default template template Matrix jacfdif (FUNCTOR fun, const Matrix& theta) { return jacfdif(fun, theta); } /*! \brief Calculate the Hessian of a function using a central * difference formula. * * This function numerically calculates the Hessian of a * vector-valued function using a central difference formula. * * \param fun The function to calculate the Hessian of. This * function should take a Matrix (vector) of type T and return a * single value of type T. * \param theta The column vector of parameter values at which to * calculate the Hessian. * * \see gradfdif(FUNCTOR fun, const Matrix& theta) * \see gradfdifls(FUNCTOR fun, T alpha, const Matrix& theta, const Matrix& p) * \see jacfdif(FUNCTOR fun, const Matrix& theta) * * \throw scythe_dimension_error * * \note * Users will typically wish to implement \a fun in terms of a * functor. Using a functor provides a generic way in which to * evaluate functions with more than one parameter. Furthermore, * although one can pass a function pointer to this routine, * the compiler cannot inline and fully optimize code * referenced by function pointers. */ template Matrix hesscdif (FUNCTOR fun, const Matrix& theta) { SCYTHE_CHECK_10(! theta.isColVector(), scythe_dimension_error, "Theta not column vector"); T fval = fun(theta); //std::cout << std::endl; //std::cout << "hesscdif theta = " << theta << "\n"; //std::cout << "hesscdif fun(theta) = " << fval << std::endl; unsigned int k = theta.rows(); // stepsize CAREFUL -- THIS IS MACHINE SPECIFIC !!!! T h2 = std::sqrt(epsilon()); //T h2 = (T) 1e-10; T h = std::sqrt(h2); Matrix H(k,k); //std::cout << "h2 = " << h2 << " h = " << h << std::endl; Matrix ei; Matrix ej; Matrix temp; for (unsigned int i = 0; i < k; ++i) { ei = Matrix(k, 1); ei[i] = h; temp = theta + ei; donothing(temp); // XXX Again, I'm baffled ei = temp - theta; for (unsigned int j = 0; j < k; ++j){ ej = Matrix(k,1); ej[j] = h; temp = theta + ej; donothing(temp); // XXX and again ej = temp - theta; if (i == j) { H(i,i) = ( -fun(theta + 2.0 * ei) + 16.0 * fun(theta + ei) - 30.0 * fval + 16.0 * fun(theta - ei) - fun(theta - 2.0 * ei)) / (12.0 * h2); } else { H(i,j) = ( fun(theta + ei + ej) - fun(theta + ei - ej) - fun(theta - ei + ej) + fun(theta - ei - ej)) / (4.0 * h2); } } } //std::cout << "end of hesscdif, H = " << H << "\n"; return H; } // default template template Matrix hesscdif (FUNCTOR fun, const Matrix& theta) { return hesscdif(fun, theta); } /*! \brief Find the step length that minimizes an implied 1-dimensional function. * * This function performs a line search to find the step length * that approximately minimizes an implied one dimensional * function. * * \param fun The function to minimize. This function should take * one Matrix (vector) argument of type T and return a single value * of type T. * \param theta A column vector of parameter values that anchor the * 1-dimensional function. * \param p A direction vector that creates the 1-dimensional * function. * * \see linesearch2(FUNCTOR fun, const Matrix& theta, const Matrix& p, rng& runif) * \see zoom(FUNCTOR fun, T alpha_lo, T alpha_hi, const Matrix& theta, const Matrix& p) * \see BFGS(FUNCTOR fun, const Matrix& theta, rng& runif, unsigned int maxit, T tolerance, bool trace = false) * * \throw scythe_dimension_error (Level 1) * * \note * Users will typically wish to implement \a fun in terms of a * functor. Using a functor provides a generic way in which to * evaluate functions with more than one parameter. Furthermore, * although one can pass a function pointer to this routine, * the compiler cannot inline and fully optimize code * referenced by function pointers. */ template T linesearch1 (FUNCTOR fun, const Matrix& theta, const Matrix& p) { SCYTHE_CHECK_10(! theta.isColVector(), scythe_dimension_error, "Theta not column vector"); SCYTHE_CHECK_10(! p.isColVector(), scythe_dimension_error, "p not column vector"); T alpha_bar = (T) 1.0; T rho = (T) 0.9; T c = (T) 0.5; T alpha = alpha_bar; Matrix fgrad = gradfdif(fun, theta); while (fun(theta + alpha * p) > (fun(theta) + c * alpha * t(fgrad) * p)[0]) { alpha = rho * alpha; } return alpha; } /*! \brief Find the step length that minimizes an implied 1-dimensional function. * * This function performs a line search to find the step length * that approximately minimizes an implied one dimensional * function. * * \param fun The function to minimize. This function should take * one Matrix (vector) argument of type T and return a single value * of type T. * \param theta A column vector of parameter values that anchor the * 1-dimensional function. * \param p A direction vector that creates the 1-dimensional * function. * \param runif A random uniform number generator function object * (an object that returns a random uniform variate on (0,1) when * its () operator is invoked). * * \see linesearch1(FUNCTOR fun, const Matrix& theta, const Matrix& p) * \see zoom(FUNCTOR fun, T alpha_lo, T alpha_hi, const Matrix& theta, const Matrix& p) * \see BFGS(FUNCTOR fun, const Matrix& theta, rng& runif, unsigned int maxit, T tolerance, bool trace = false) * * \throw scythe_dimension_error (Level 1) * * \note * Users will typically wish to implement \a fun in terms of a * functor. Using a functor provides a generic way in which to * evaluate functions with more than one parameter. Furthermore, * although one can pass a function pointer to this routine, * the compiler cannot inline and fully optimize code * referenced by function pointers. */ template T linesearch2 (FUNCTOR fun, const Matrix& theta, const Matrix& p, rng& runif) { SCYTHE_CHECK_10(! theta.isColVector(), scythe_dimension_error, "Theta not column vector"); SCYTHE_CHECK_10(! p.isColVector(), scythe_dimension_error, "p not column vector"); T alpha_last = (T) 0.0; T alpha_cur = (T) 1.0; T alpha_max = (T) 10.0; T c1 = (T) 1e-4; T c2 = (T) 0.5; unsigned int max_iter = 50; T fgradalpha0 = gradfdifls(fun, (T) 0, theta, p); for (unsigned int i = 0; i < max_iter; ++i) { T phi_cur = fun(theta + alpha_cur * p); T phi_last = fun(theta + alpha_last * p); if ((phi_cur > (fun(theta) + c1 * alpha_cur * fgradalpha0)) || ((phi_cur >= phi_last) && (i > 0))) { T alphastar = zoom(fun, alpha_last, alpha_cur, theta, p); return alphastar; } T fgradalpha_cur = gradfdifls(fun, alpha_cur, theta, p); if (std::fabs(fgradalpha_cur) <= -1 * c2 * fgradalpha0) return alpha_cur; if ( fgradalpha_cur >= (T) 0.0) { T alphastar = zoom(fun, alpha_cur, alpha_last, theta, p); return alphastar; } alpha_last = alpha_cur; // runif stuff below is probably not correc KQ 12/08/2006 // I think it should work now DBP 01/02/2007 alpha_cur = runif() * (alpha_max - alpha_cur) + alpha_cur; } return 0.001; } /*! \brief Find minimum of a function once bracketed. * * This function finds the minimum of a function, once bracketed. * * \param fun The function to minimize. This function should take * one Matrix (vector) argument of type T and return a single value * of type T. * \param alpha_lo The lower bracket. * \param alpha_hi The upper bracket. * \param theta A column vector of parameter values that anchor the * 1-dimensional function. * \param p A direction vector that creates the 1-dimensional * * \see linesearch1(FUNCTOR fun, const Matrix& theta, const Matrix& p) * \see linesearch2(FUNCTOR fun, const Matrix& theta, const Matrix& p, rng& runif) * \see BFGS(FUNCTOR fun, const Matrix& theta, rng& runif, unsigned int maxit, T tolerance, bool trace = false) * * \throw scythe_dimension_error (Level 1) * * \note * Users will typically wish to implement \a fun in terms of a * functor. Using a functor provides a generic way in which to * evaluate functions with more than one parameter. Furthermore, * although one can pass a function pointer to this routine, * the compiler cannot inline and fully optimize code * referenced by function pointers. * */ template T zoom (FUNCTOR fun, T alpha_lo, T alpha_hi, const Matrix& theta, const Matrix& p) { SCYTHE_CHECK_10(! theta.isColVector(), scythe_dimension_error, "Theta not column vector"); SCYTHE_CHECK_10(! p.isColVector(), scythe_dimension_error, "p not column vector"); T alpha_j = (alpha_lo + alpha_hi) / 2.0; T phi_0 = fun(theta); T c1 = (T) 1e-4; T c2 = (T) 0.5; T fgrad0 = gradfdifls(fun, (T) 0, theta, p); unsigned int count = 0; unsigned int maxit = 20; while(count < maxit) { T phi_j = fun(theta + alpha_j * p); T phi_lo = fun(theta + alpha_lo * p); if ((phi_j > (phi_0 + c1 * alpha_j * fgrad0)) || (phi_j >= phi_lo)){ alpha_hi = alpha_j; } else { T fgradj = gradfdifls(fun, alpha_j, theta, p); if (std::fabs(fgradj) <= -1 * c2 * fgrad0){ return alpha_j; } if ( fgradj * (alpha_hi - alpha_lo) >= 0){ alpha_hi = alpha_lo; } alpha_lo = alpha_j; } ++count; } return alpha_j; } /*! \brief Find function minimum using the BFGS algorithm. * * Numerically find the minimum of a function using the BFGS * algorithm. * * \param fun The function to minimize. This function should take * one Matrix (vector) argument of type T and return a single value * of type T. * \param theta A column vector of parameter values that anchor the * 1-dimensional function. * \param runif A random uniform number generator function object * (an object that returns a random uniform variate on (0,1) when * its () operator is invoked). * \param maxit The maximum number of iterations. * \param tolerance The convergence tolerance. * \param trace Boolean value determining whether BFGS should print * to stdout (defaults to false). * * \see linesearch1(FUNCTOR fun, const Matrix& theta, const Matrix& p) * \see linesearch2(FUNCTOR fun, const Matrix& theta, const Matrix& p, rng& runif) * \see zoom(FUNCTOR fun, T alpha_lo, T alpha_hi, const Matrix& theta, const Matrix& p) * * \throw scythe_dimension_error (Level 1) * \throw scythe_convergence_error (Level 0) * * \note * Users will typically wish to implement \a fun in terms of a * functor. Using a functor provides a generic way in which to * evaluate functions with more than one parameter. Furthermore, * although one can pass a function pointer to this routine, * the compiler cannot inline and fully optimize code * referenced by function pointers. */ // there were 2 versions of linesearch1-- the latter was what we // had been calling linesearch2 template Matrix BFGS (FUNCTOR fun, const Matrix& theta, rng& runif, unsigned int maxit, T tolerance, bool trace = false) { SCYTHE_CHECK_10(! theta.isColVector(), scythe_dimension_error, "Theta not column vector"); unsigned int n = theta.size(); // H is initial inverse hessian Matrix H = inv(hesscdif(fun, theta)); // gradient at starting values Matrix fgrad = gradfdif(fun, theta); Matrix thetamin = theta; Matrix fgrad_new = fgrad; Matrix I = eye(n); Matrix s; Matrix y; unsigned int count = 0; while( (t(fgrad_new)*fgrad_new)[0] > tolerance) { Matrix p = -1.0 * H * fgrad; //std::cout << "initial H * fgrad = " << H * fgrad << "\n"; //std::cout << "initial p = " << p << "\n"; T alpha = linesearch2(fun, thetamin, p, runif); //T alpha = linesearch1(fun, thetamin, p); //std::cout << "after linesearch p = " << p << "\n"; Matrix thetamin_new = thetamin + alpha * p; fgrad_new = gradfdif(fun, thetamin_new); s = thetamin_new - thetamin; y = fgrad_new - fgrad; T rho = 1.0 / (t(y) * s)[0]; H = (I - rho * s * t(y)) * H *(I - rho * y * t(s)) + rho * s * t(s); thetamin = thetamin_new; fgrad = fgrad_new; ++count; #ifndef SCYTHE_RPACK if (trace) { std::cout << "BFGS iteration = " << count << std::endl; std::cout << "thetamin = " << (t(thetamin)) ; std::cout << "gradient = " << (t(fgrad)) ; std::cout << "t(gradient) * gradient = " << (t(fgrad) * fgrad) ; std::cout << "function value = " << fun(thetamin) << std::endl << std::endl; } #endif //std::cout << "Hessian = " << hesscdif(fun, theta) << "\n"; //std::cout << "H = " << H << "\n"; //std::cout << "alpha = " << alpha << std::endl; //std::cout << "p = " << p << "\n"; //std::cout << "-1 * H * fgrad = " << -1.0 * H * fgrad << "\n"; SCYTHE_CHECK(count > maxit, scythe_convergence_error, "Failed to converge. Try better starting values"); } return thetamin; } // Default template template Matrix BFGS (FUNCTOR fun, const Matrix& theta, rng& runif, unsigned int maxit, T tolerance, bool trace = false) { return BFGS (fun, theta, runif, maxit, tolerance, trace); } /* Solves a system of n nonlinear equations in n unknowns of the form * fun(thetastar) = 0 for thetastar given the function, starting * value theta, max number of iterations, and tolerance. * Uses Broyden's method. */ /*! \brief Solve a system of nonlinear equations. * * Solves a system of n nonlinear equations in n unknowns of the form * \f$fun(\theta^*) = 0\f$ for \f$\theta^*\f$. * * \param fun The function to solve. The function should both take * and return a Matrix of type T. * \param theta A column vector of parameter values at which to * start the solve procedure. * \param maxit The maximum number of iterations. * \param tolerance The convergence tolerance. * * \throw scythe_dimension_error (Level 1) * \throw scythe_convergence_error (Level 1) * * \note * Users will typically wish to implement \a fun in terms of a * functor. Using a functor provides a generic way in which to * evaluate functions with more than one parameter. Furthermore, * although one can pass a function pointer to this routine, * the compiler cannot inline and fully optimize code * referenced by function pointers. */ template Matrix nls_broyden(FUNCTOR fun, const Matrix& theta, unsigned int maxit = 5000, T tolerance = 1e-6) { SCYTHE_CHECK_10(! theta.isColVector(), scythe_dimension_error, "Theta not column vector"); Matrix thetastar = theta; Matrix B = jacfdif(fun, thetastar); Matrix fthetastar; Matrix p; Matrix thetastar_new; Matrix fthetastar_new; Matrix s; Matrix y; for (unsigned int i = 0; i < maxit; ++i) { fthetastar = fun(thetastar); p = lu_solve(B, -1 * fthetastar); T alpha = (T) 1.0; thetastar_new = thetastar + alpha*p; fthetastar_new = fun(thetastar_new); s = thetastar_new - thetastar; y = fthetastar_new - fthetastar; B = B + ((y - B * s) * t(s)) / (t(s) * s); thetastar = thetastar_new; if (max(fabs(fthetastar_new)) < tolerance) return thetastar; } SCYTHE_THROW_10(scythe_convergence_error, "Failed to converge. Try better starting values or increase maxit"); return thetastar; } // default template template Matrix nls_broyden (FUNCTOR fun, const Matrix& theta, unsigned int maxit = 5000, T tolerance = 1e-6) { return nls_broyden(fun, theta, maxit, tolerance); } } // namespace scythe #endif /* SCYTHE_OPTIMIZE_H */ MCMCpack/src/mersenne.h0000644000176000001440000002504612140061657014445 0ustar ripleyusers/* * Scythe Statistical Library * Copyright (C) 2000-2002 Andrew D. Martin and Kevin M. Quinn; * 2002-present Andrew D. Martin, Kevin M. Quinn, and Daniel * Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * under the terms of the GNU General Public License as published by * Free Software Foundation; either version 2 of the License, or (at * your option) any later version. See the text files COPYING * and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/rng/mersenne.h * * Provides the class definition for the mersenne random number * generator. This class extends the base rng class by providing an * implementation of runif() based on an implementation of the * mersenne twister, released under the following license: * * A C-program for MT19937, with initialization improved 2002/1/26. * Coded by Takuji Nishimura and Makoto Matsumoto. * * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright * notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * 3. The names of its contributors may not be used to endorse or * promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * For more information see: * http://www.math.keio.ac.jp/matumoto/emt.html * */ /*! \file mersenne.h * \brief The Mersenne Twister random number generator. * * This file contains the mersenne class, a class that extends * Scythe's base random number generation class (scythe::rng) by * providing an implementation of scythe::rng::runif() using the * Mersenne Twister algorithm. */ #ifndef SCYTHE_MERSENNE_H #define SCYTHE_MERSENNE_H #ifdef SCYTHE_COMPILE_DIRECT #include "rng.h" #else #include "scythestat/rng.h" #endif namespace scythe { #ifdef __MINGW32__ /* constant vector a */ static const unsigned long MATRIX_A = 0x9908b0dfUL; /* most significant w-r bits */ static const unsigned long UPPER_MASK = 0x80000000UL; /* least significant r bits */ static const unsigned long LOWER_MASK = 0x7fffffffUL; #else namespace { /* constant vector a */ const unsigned long MATRIX_A = 0x9908b0dfUL; /* most significant w-r bits */ const unsigned long UPPER_MASK = 0x80000000UL; /* least significant r bits */ const unsigned long LOWER_MASK = 0x7fffffffUL; } #endif /*! \brief The Mersenne Twister random number generator. * * This class defines a random number generator, using the Mersenne * Twister algorithm developed and implemented by Makoto Matsumoto * and Takuji Nishimura (1997, 2002). The period of this random * number generator is \f$2^{19937} - 1\f$. * * The mersenne class extends Scythe's basic random number * generating class, scythe::rng, implementing the interface that it * defines. * * \see rng * \see lecuyer * */ class mersenne: public rng { public: /*! \brief Default constructor * * This constructor generates an unseeded and uninitialized * mersenne object. It is most useful for creating arrays of * random number generators. An uninitialized mersenne object * will be seeded with the default seed (5489UL) automatically * upon use. * * \see mersenne(const mersenne &m) * \see initialize(unsigned long s) */ mersenne () : rng (), mti (N + 1) {} /*! \brief Copy constructor * * This constructor makes a copy of an existing mersenne * object, duplicating its seed and current state exactly. * * \param m An existing mersenne random number generator. * * \see mersenne() */ mersenne (const mersenne &m) : rng (), mti (m.mti) { } /*! \brief Sets the seed. * * This method sets the seed of the random number generator and * readies it to begin generating random numbers. Calling this * function on a mersenne object that is already in use is * supported, although not suggested unless you know what you * are doing. * * \param s A long integer seed. * * \see mersenne() */ void initialize (unsigned long s) { mt[0]= s & 0xffffffffUL; for (mti=1; mti> 30)) + mti); /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ /* In the previous versions, MSBs of the seed affect */ /* only MSBs of the array mt[]. */ /* 2002/01/09 modified by Makoto Matsumoto */ mt[mti] &= 0xffffffffUL; /* for >32 bit machines */ } } /*! \brief Generate a random uniform variate on (0, 1). * * This routine returns a random double precision floating point * number from the uniform distribution on the interval (0, * 1). This method overloads the pure virtual method of the * same name in the rng base class. * * \see runif(unsigned int, unsigned int) * \see genrand_int32() * \see rng */ inline double runif() { return (((double) genrand_int32()) + 0.5) * (1.0 / 4294967296.0); } /* We have to override the overloaded forms of runif because * overloading the no-arg runif() hides the base class * definition; C++ stops looking once it finds the above. */ /*! \brief Generate a Matrix of random uniform variates. * * This routine returns a Matrix of double precision random * uniform variates. on the interval (0, 1). This method * overloads the virtual method of the same name in the rng base * class. * * This is the general template version of this method and * is called through explicit template instantiation. * * \param rows The number of rows in the returned Matrix. * \param cols The number of columns in the returned Matrix. * * \see runif() * \see rng * * \note We are forced to override this overloaded method * because the 1-arg version of runif() hides the base class's * definition of this method from the compiler, although it * probably should not. */ template inline Matrix runif(unsigned int rows, unsigned int cols) { return rng::runif(rows, cols); } /*! \brief Generate a Matrix of random uniform variates. * * This routine returns a Matrix of double precision random * uniform variates on the interval (0, 1). This method * overloads the virtual method of the same name in the rng base * class. * * This is the default template version of this method and * is called through implicit template instantiation. * * \param rows The number of rows in the returned Matrix. * \param cols The number of columns in the returned Matrix. * * \see runif() * \see rng * * \note We are forced to override this overloaded method * because the 1-arg version of runif() hides the base class's * definition of this method from the compiler, although it * probably should not. */ Matrix runif(unsigned int rows, unsigned int cols) { return rng::runif(rows, cols); } /* generates a random number on [0,0xffffffff]-interval */ /*! \brief Generate a random long integer. * * This method generates a random integer, drawn from the * discrete uniform distribution on the interval [0,0xffffffff]. * * \see runif() * \see initialize(unsigned long s) */ unsigned long genrand_int32() { unsigned long y; static unsigned long mag01[2]={0x0UL, MATRIX_A}; /* mag01[x] = x * MATRIX_A for x=0,1 */ if (mti >= N) { /* generate N words at one time */ int kk; if (mti == N+1) // if init_genrand() has not been called, this->initialize(5489UL); // a default initial seed is used for (kk=0;kk> 1) ^ mag01[y & 0x1UL]; } for (;kk> 1) ^ mag01[y & 0x1UL]; } y = (mt[N-1]&UPPER_MASK)|(mt[0]&LOWER_MASK); mt[N-1] = mt[M-1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mti = 0; } y = mt[mti++]; /* Tempering */ y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; } protected: /* Period parameters */ static const int N = 624; static const int M = 398; /* the array for the state vector */ unsigned long mt[N]; /* mti==N+1 means mt[N] is not initialized */ int mti; }; } #endif /* SCYTHE_MERSENNE_H */ MCMCpack/src/MCMCtobit.cc0000644000176000001440000001232712140061657014546 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCtobit.cc is a program that simualates draws from the posterior // density of a linear regression model with Gaussian errors when the // dependent variable is censored from below and/or above. // // The initial version of this file was generated by the // auto.Scythe.call() function in the MCMCpack R package // written by: // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // This file was initially generated on Tue Sep 14 00:50:08 2004 // ADM and KQ 10/10/2002 [ported to Scythe0.3] // BG 09/18/2004 [updated to new specification, added above censoring] // ADM 7/7/2007 [updated to Scythe 1.0.X] // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCTOBIT_CC #define MCMCTOBIT_CC #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; /* MCMCtobit implemenation. Takes Matrix<> reference which it * fills with the posterior. */ template void MCMCtobit_impl (rng& stream, const Matrix<>& Y, const Matrix<>& X, Matrix<>& beta, const Matrix<>& b0, const Matrix<>& B0, double c0, double d0, double below, double above, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, Matrix<>& result) { // define constants const unsigned int tot_iter = burnin + mcmc; // total number of mcmc iterations const unsigned int nstore = mcmc / thin; // number of draws to store const unsigned int k = X.cols(); const unsigned int N = X.rows(); const Matrix <> XpX = crossprod(X); // storage matrix or matrices Matrix <> betamatrix (k, nstore); Matrix <> sigmamatrix (1, nstore); ///// MCMC SAMPLING OCCURS IN THIS FOR LOOP int count = 0; Matrix <> Z = Y; for(unsigned int iter = 0; iter < tot_iter; ++iter){ double sigma2 = NormIGregress_sigma2_draw (X, Z, beta, c0, d0, stream); Matrix <> Z_mean = X * beta; for (unsigned int i=0; i= above) Z[i] = stream.rtbnorm_combo(Z_mean[i], sigma2, above); } Matrix <> XpZ = t(X) * Z; beta = NormNormregress_beta_draw (XpX, XpZ, b0, B0, sigma2, stream); // store draws in storage matrix (or matrices) if (iter >= burnin && (iter % thin == 0)) { sigmamatrix (0, count) = sigma2; betamatrix(_, count) = beta; ++count; } // print output to stdout if(verbose > 0 && iter % verbose == 0) { Rprintf("\n\nMCMCtobit iteration %i of %i \n", (iter+1), tot_iter); Rprintf("beta = \n"); for (unsigned int j=0; j Y(*Yrow, *Ycol, Ydata); const Matrix <> X(*Xrow, *Xcol, Xdata); Matrix betastart(*betastartrow, *betastartcol, betastartdata); const Matrix <> b0(*b0row, *b0col, b0data); const Matrix <> B0(*B0row, *B0col, B0data); Matrix<> storagematrix; MCMCPACK_PASSRNG2MODEL(MCMCtobit_impl, Y, X, betastart, b0, B0, *c0, *d0, *below, *above, *burnin, *mcmc, *thin, *verbose, storagematrix); const unsigned int size = *samplerow * *samplecol; for (unsigned int i=0; i // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; template void MCMCSVDreg_impl(rng& stream, double *sampledata, const int *samplerow, const int *samplecol, const double *Ydata, const int *Yrow, const int *Ycol, const int *Ymiss, const double *Adata, const int *Arow, const int *Acol, const double *Ddata, const int *Drow, const int *Dcol, const double *Fdata, const int *Frow, const int *Fcol, const int *burnin, const int *mcmc, const int *thin, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int *verbose, const double *taustartdata, const int *taustartrow, const int *taustartcol, const double *g0data, const int *g0row, const int *g0col, const double *a0, const double *b0, const double* c0, const double* d0, const double* w0, const int* betasamp ){ // pull together Matrix objects Matrix <> y(*Yrow, *Ycol, Ydata); Matrix <> A(*Arow, *Acol, Adata); Matrix <> D(*Drow, *Dcol, Ddata); Matrix <> F(*Frow, *Fcol, Fdata); Matrix <> g0(*g0row, *g0col, g0data); // define constants const int tot_iter = *burnin + *mcmc; // total number of mcmc iterations const int nstore = *mcmc / *thin; // number of draws to store const int k = *Arow; const int n = *Yrow; Matrix<> FtD = t(F) * D; Matrix<> DinvF = invpd(D) * F; Matrix<> Dg0 = D * g0; //double dsquared[n]; OLD (NEW BELOW) double* dsquared = new double[n]; for (int i=0; i beta_store; if (*betasamp == 1){ beta_store = Matrix(k, nstore); } Matrix<> gamma_store(n, nstore); Matrix<> Y_store(nYmiss, nstore); Matrix<> sigma2_store(1, nstore); Matrix<> tau2_store(n, nstore); // set starting values // double tau2[n]; OLD WAY (NEW BELOW) double* tau2 = new double[n]; for (int i=0; i gamma(n, 1); double sigma2 = 1; Matrix beta; if (*betasamp == 1){ beta = Matrix(k, 1); } /////////////////// Gibbs sampler /////////////////// int count = 0; for (int iter = 0; iter < tot_iter; ++iter) { // sample [sigma2 | Y, A, D, F, tau2] Matrix Fy = F * y; Fy = Fy - Dg0; double q = 0.0; for (int i=0; i gammahat = DinvF * y; for (int i=0; i gammanoti = gamma; gammanoti[i] = 0.0; Matrix<> residvec = y - FtD * gammanoti; Matrix<> margmeanvec = FtD(_,i) * g0[i]; Matrix<> margVarmat = FtD(_,i) * t(FtD(_,i)) * (sigma2 * tau2[i]) / (dsquared[i]) + eye(n) * sigma2; double logw0 = std::log(w0[i]); double log1minusw0 = std::log(1.0 - w0[i]); double logf0 = 0.0; for (int j=0; j (log1minusw0 + logfnot0)){ logdenom = logw0 + logf0 + std::log(1.0 + std::exp(log1minusw0 - logw0 + logfnot0 - logf0)); } else{ logdenom = log1minusw0 + logfnot0 + std::log(1.0 + std::exp(logw0 - log1minusw0 + logf0 - logfnot0)); } double wstar = std::exp(logw0 + logf0 - logdenom); if (stream.runif() < wstar){ gamma[i] = 0.0; } else { gamma[i] = stream.rnorm(mstar, std::sqrt(vstar)); } } // sample [tau2 | Y, A, D, F, gamma, sigma2] for (int i=0; i eta = FtD * gamma; for (int i=0; i= *burnin && (iter % *thin == 0)) { sigma2_store[count] = sigma2; int Ymiss_count = 0; for (int i = 0; i 0 && iter % *verbose == 0) { Rprintf("\n\nMCMCSVDreg iteration %i of %i \n", (iter+1), tot_iter); Rprintf("gamma = \n"); for (int j=0; j storagematrix = cbind(t(Y_store), t(gamma_store)); storagematrix = cbind(storagematrix, t(tau2_store)); storagematrix = cbind(storagematrix, t(sigma2_store)); if (*betasamp == 1){ storagematrix = cbind(storagematrix, t(beta_store)); } const int size = *samplerow * *samplecol; for(int i = 0; i < size; ++i) sampledata[i] = storagematrix[i]; } extern "C" { // simulate from posterior distribution and return an mcmc by parameters // matrix of the posterior sample void MCMCSVDreg(double *sampledata, const int *samplerow, const int *samplecol, const double *Ydata, const int *Yrow, const int *Ycol, const int *Ymiss, const double *Adata, const int *Arow, const int *Acol, const double *Ddata, const int *Drow, const int *Dcol, const double *Fdata, const int *Frow, const int *Fcol, const int *burnin, const int *mcmc, const int *thin, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int *verbose, const double *taustartdata, const int *taustartrow, const int *taustartcol, const double *g0data, const int *g0row, const int *g0col, const double *a0, const double *b0, const double* c0, const double* d0, const double* w0, const int* betasamp) { MCMCPACK_PASSRNG2MODEL(MCMCSVDreg_impl, sampledata, samplerow, samplecol, Ydata, Yrow, Ycol, Ymiss, Adata, Arow, Acol, Ddata, Drow, Dcol, Fdata, Frow, Fcol, burnin, mcmc, thin, uselecuyer, seedarray, lecuyerstream, verbose, taustartdata, taustartrow, taustartcol, g0data, g0row, g0col, a0, b0, c0, d0, w0, betasamp); } // end MCMCSVDreg } // end extern "C" #endif MCMCpack/src/MCMCrng.h0000644000176000001440000000562612140061657014061 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCrng.h is the header file for MCMCrng.cc. It contains // functions used to handle random number generator streams. // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // ADM 7/22/04 // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCRNG_H #define MCMCRNG_H #include "mersenne.h" #include "lecuyer.h" /* This allows model handles to efficiently pass the appropriate rng * to object a model's implementation. The first arg is the name of * the model implementation function. The remaining arguments are the * arguments to the model implementation function. * * The macro assumes that the function it is called in contains an int * pointer named uselecuyer holding a boolean indication of whether or * not to use the lecuyer rng. Secondly, it assumes the function * contains a pointer to an array of integers called seedarray that * contains six random number seeds (or just one if using mersenne). * Finally, it assumes it contains a pointer to an integer called * lecuyerstream that indicates which of the lecuyer generator's * streams to use. */ #define MCMCPACK_PASSRNG2MODEL(MODEL_IMPL, ...) \ { \ unsigned long u_seed_array[6]; \ for (int i = 0; i < 6; ++i) \ u_seed_array[i] = static_cast(seedarray[i]); \ \ if (*uselecuyer == 0) { \ mersenne the_rng; \ the_rng.initialize(u_seed_array[0]); \ MODEL_IMPL(the_rng, __VA_ARGS__); \ } else { \ lecuyer::SetPackageSeed(u_seed_array); \ for (int i = 0; i < (*lecuyerstream - 1); ++i) \ lecuyer skip_rng; \ lecuyer the_rng; \ MODEL_IMPL(the_rng, __VA_ARGS__); \ } \ } #endif MCMCpack/src/MCMCresidualBreakAnalysis.cc0000644000176000001440000003565512140061657017717 0ustar ripleyusers//////////////////////////////////////////////////////////////////// // MCMCresidualBreakAnalysis.cc // // Jong Hee Park // Department of Political Science and International Relations // Seoul National University // jongheepark@snu.ac.kr // // Written 03/03/2009 // //////////////////////////////////////////////////////////////////// #ifndef MCMCRESIDUALBREAKANALYSIS_CC #define MCMCRESIDUALBREAKANALYSIS_CC #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; static double dinvgamma(double theta, double a, double b) { double logf = a * log(b) - lngammafn(a) + -(a+1) * log(theta) + -b/theta; return exp(logf); } //////////////////////////////////////////// // Start MCMCresidualBreakAnalysispoint function /////////////////////////////////////////// template void MCMCresidualBreakAnalysis_impl(rng& stream, const double m, const Matrix<>& Y, Matrix<>& beta, Matrix<>& Sigma, Matrix<>& P, Matrix& s, const double b0, const double B0, const double c0, const double d0, const Matrix<>& A0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, bool chib, Matrix<>& beta_store, Matrix<>& Sigma_store, Matrix<>& P_store, Matrix<>& ps_store, Matrix& s_store, double& logmarglike, double&loglike) { // define constants and form cross-product matrices const int tot_iter = burnin + mcmc; const int nstore = mcmc / thin; const int n = Y.rows(); const int ns = m + 1; const double B0inv = 1/B0; Matrix<> sigma(ns, 1); //MCMC loop unsigned int count = 0; for (int iter = 0; iter < tot_iter; ++iter){ ////////////////////// // 1. Sample beta and Sigma ////////////////////// int beta_count = 0; Matrix nstate(ns, 1); for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); double Bn = 1/(B0 + (double)nstate[j]/Sigma[j]); double bn = Bn*(B0*b0 + sum(yj)/Sigma[j]); beta(j) = stream.rnorm(bn, sqrt(Bn)); // SIGMA UPDATE double shape = (c0 + (double)nstate[j])/2; const Matrix<> ej(nstate[j], 1); for (int i = 0; i SSE = crossprod (ej); double scale =(d0 + SSE[0])/2; Sigma[j] = 1/stream.rgamma(shape, scale); sigma[j] = sqrt(Sigma[j]); } ////////////////////// // 2. Sample P ////////////////////// double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; for (int j =0; j<(ns-1); ++j){ shape1 = A0(j,j) + (double)nstate[j] - 1; shape2 = A0(j,j+1) + 1; P(j,j) = stream.rbeta(shape1, shape2); P(j,j+1) = 1 - P(j,j); } ////////////////////// // 3. Sample s ////////////////////// Matrix F(n, ns); Matrix pr1(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); Matrix ps = Matrix(n, ns); for (int tt=0; tt unnorm_pstyt = pstyt1%py; const Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j pstyn = Matrix(ns, 1); double pone = 0.0; int tt = n-2; while (tt >= 0){ int st = s(tt+1); Matrix Pst_1 = ::t(P(_,st-1)); Matrix unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1) s(tt) = 1; else{ pone = pstyn(st-2); if(stream.runif() < pone) s(tt) = st-1; else s(tt) = st;// stay } ps(tt,_) = pstyn; --tt; }// end of while loop // load draws into sample array if (iter >= burnin && ((iter % thin)==0)){ for (int i=0; i 0 && iter % verbose == 0){ Rprintf("\n testpanelSubjectBreak iteration %i of %i \n", (iter+1), tot_iter); for (int j = 0;j(nstate[j])); } Rprintf("\n beta \n"); for (int i = 0; i betast = meanc(beta_store); Matrix beta_st(ns, 1); for (int j = 0; j Sigma_st = meanc(Sigma_store); Matrix P_vec_st = meanc(P_store); const Matrix P_st(ns, ns); for (int j = 0; j< ns*ns; ++j){ P_st[j] = P_vec_st[j]; } ////////////////////// // 1. pdf.beta ////////////////////// Matrix density_beta(nstore, ns); for (int iter = 0; iter nstate(ns, 1); int beta_count = 0; for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); const double precision = 1.0/Sigma_store(iter, j); const double Bn = 1/(B0 + (double)nstate[j]*precision); double bn = Bn*(B0*b0 + sum(yj)*precision); density_beta(iter, j) = dnorm(beta_st(j), bn, sqrt(Bn)); } }// end of pdf.beta double pdf_beta = log(prod(meanc(density_beta))); ////////////////////// // 2. pdf.Sigma ////////////////////// Matrix density_Sigma(nstore, ns); for (int iter = 0; iter F(n, ns); Matrix pr1(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); Matrix ps = Matrix(n, ns); for (int tt=0; tt unnorm_pstyt = pstyt1%py; const Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j pstyn = Matrix(ns, 1); double pone = 0.0; int tt = n-2; while (tt >= 0){ int st = s(tt+1); Matrix Pst_1 = ::t(P(_,st-1)); Matrix unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1) s(tt) = 1; else{ pone = pstyn(st-2); if(stream.runif() < pone) s(tt) = st-1; else s(tt) = st; } ps(tt,_) = pstyn; --tt; }// end of while loop int beta_count = 0; Matrix nstate(ns, 1); for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); const Matrix<> ej(nstate[j], 1); for (int i = 0; i SSE = crossprod (ej); double scale =(d0 + SSE[0])/2; double shape = (c0 + (double)nstate[j])/2; Sigma[j] = stream.rigamma(shape, scale); sigma[j] = sqrt(Sigma[j]); density_Sigma(iter, j) = dinvgamma(Sigma_st[j], shape, scale); } double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; for (int j =0; j< (ns-1); ++j){ shape1 = A0(j,j) + (double)nstate[j] - 1; shape2 = A0(j,j+1) + 1; // P(j,j) = stream.rbeta(shape1, shape2); P(j,j+1) = 1 - P(j,j); } }// end of pdf.Sigma double pdf_Sigma = log(prod(meanc(density_Sigma))); // 3. pdf.P|beta_st, Sigma_st, S Matrix density_P(nstore, ns); for (int iter = 0; iter < nstore; ++iter){ Matrix F(n, ns); Matrix pr1(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); Matrix ps = Matrix(n, ns); for (int tt=0; tt unnorm_pstyt = pstyt1%py; const Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j pstyn = Matrix(ns, 1); double pone = 0.0; int tt = n-2; while (tt >= 0){ int st = s(tt+1); Matrix Pst_1 = ::t(P(_,st-1)); Matrix unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1) s(tt) = 1; else{ pone = pstyn(st-2); if(stream.runif() < pone) s(tt) = st-1; else s(tt) = st;// stay } ps(tt,_) = pstyn; --tt; }// end of while loop double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; // compute addN Matrix P_addN(ns, 1); for (int j = 0; j F = Matrix(n, ns); Matrix like(n, 1); Matrix pr1 = Matrix(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); for (int t=0; t unnorm_pstyt = pstyt1%py; Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j density_beta_prior(ns, 1); Matrix density_Sigma_prior(ns, 1); Matrix density_P_prior(ns, 1); density_P[ns-1] = 1; // for (int j=0; j 0){ Rprintf("\nlogmarglike = %10.5f\n", logmarglike); Rprintf("loglike = %10.5f\n", loglike); Rprintf("logprior = %10.5f\n", logprior); Rprintf("pdf_beta = %10.5f\n", pdf_beta); Rprintf("pdf_Sigma = %10.5f\n", pdf_Sigma); Rprintf("pdf_P = %10.5f\n", pdf_P); } }// end of marginal likelihood computation } extern "C"{ void MCMCresidualBreakAnalysis(double *betaout, double *Sigmaout, double *psout, const double *Ydata, const int *Yrow, const int *Ycol, const int *m, const int *burnin, const int *mcmc, const int *thin, const int *verbose, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const double *betastart, const double *Sigmastart, const double *Pstart, const int *statestart, const double *a, const double *b, const double *b0data, const double *B0data, const double *c0, const double *d0, const double *A0data, double *logmarglikeholder, double *loglikeholder, const int *chib){ // pull together Matrix objects const Matrix Y(*Yrow, *Ycol, Ydata); const unsigned int nstore = *mcmc / *thin; const int n = Y.rows(); const int ns = *m + 1; // generate starting values Matrix <> beta(ns, 1, betastart); Matrix <> Sigma(ns, 1, Sigmastart); Matrix <> P(ns, ns, Pstart); Matrix s(n, 1, statestart); const Matrix <> A0(ns, ns, A0data); double logmarglike; double loglike; // storage matrices Matrix<> beta_store(nstore, ns); Matrix<> Sigma_store(nstore, ns); Matrix<> P_store(nstore, ns*ns); Matrix<> ps_store(n, ns); Matrix s_store(nstore, n); MCMCPACK_PASSRNG2MODEL(MCMCresidualBreakAnalysis_impl, *m, Y, beta, Sigma, P, s, *b0data, *B0data, *c0, *d0, A0, *burnin, *mcmc, *thin, *verbose, *chib, beta_store, Sigma_store, P_store, ps_store, s_store, logmarglike, loglike); logmarglikeholder[0] = logmarglike; loglikeholder[0] = loglike; // return output for (int i = 0; i<(nstore*ns); ++i){ betaout[i] = beta_store[i]; } for (int i = 0; i<(nstore*ns); ++i){ Sigmaout[i] = Sigma_store[i]; } for (int i = 0; i<(n*ns); ++i){ psout[i] = ps_store[i]; } }// end of MCMCresidualBreakAnalysis }//end extern "C" #endif MCMCpack/src/MCMCregressChange.cc0000755000176000001440000003735512140061657016220 0ustar ripleyusers//////////////////////////////////////////////////////////////////// // MCMCregressChange.cc is a C++ code to estimate // linear regression changepoint model // // Jong Hee Park // Department of Political Science and International Relations // Seoul National University // jongheepark@snu.ac.kr // // 03/03/2009 Written //////////////////////////////////////////////////////////////////// #ifndef MCMCREGRESSCHANGE_CC #define MCMCREGRESSCHANGE_CC #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; double lndinvgamma_jhp (const double x, const double shape, const double scale){ double log_density = shape *::log(scale) - lngammafn(shape) - (shape + 1) * ::log(x) - (scale/x); return (log_density); } template Matrix gaussian_state_sampler(rng& stream, const int m, const Matrix& Y, const Matrix& X, const Matrix& beta, const Matrix& Sigma, const Matrix& P){ const int ns = m + 1; const int n = Y.rows(); Matrix F(n, ns); Matrix pr1(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); Matrix s(n, 1); Matrix ps = Matrix(n, ns); for (int tt=0; tt mu = X(tt,_)*::t(beta); for (int j = 0; j< ns; ++j){ py[j] = dnorm(Y[tt], mu[j], sqrt(Sigma[j])); } if (tt==0) pstyt1 = pr1; else { pstyt1 = ::t(F(tt-1,_)*P); } Matrix unnorm_pstyt = pstyt1%py; const Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j pstyn = Matrix(ns, 1); double pone = 0.0; int tt = n-2; while (tt >= 0){ int st = s(tt+1); Matrix Pst_1 = ::t(P(_,st-1)); Matrix unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1) s(tt) = 1; else{ pone = pstyn(st-2); if(stream.runif() < pone) s(tt) = st-1; else s(tt) = st; } ps(tt,_) = pstyn; --tt; }// end of while loop Matrix Sout(n, ns+1); Sout(_, 0) = s(_,0); for (int j = 0; j void MCMCregressChange_impl(rng& stream, const double m, const Matrix<>& Y, const Matrix<>& X, Matrix<>& beta, Matrix<>& Sigma, Matrix<>& P, Matrix& s, Matrix<>& b0, Matrix<>& B0, const double c0, const double d0, const Matrix<>& A0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, bool chib, Matrix<>& beta_store, Matrix<>& Sigma_store, Matrix<>& P_store, Matrix<>& ps_store, Matrix& s_store, double& logmarglike) { // define constants and form cross-product matrices const int tot_iter = burnin + mcmc; //total iterations const int nstore = mcmc / thin; // number of draws to store const int n = Y.rows(); const int ns = m + 1; // number of states const int k = X.cols(); const Matrix<> B0inv = invpd(B0); Matrix <> sigma(ns, 1); //MCMC loop unsigned int count = 0; for (int iter = 0; iter < tot_iter; ++iter){ ////////////////////// // 1. Sample beta and Sigma ////////////////////// int beta_count = 0; Matrix nstate(ns, 1); for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); Matrix Bn = invpd(B0 + t(Xj)*Xj/Sigma[j]); Matrix bn = Bn*(B0*b0 + t(Xj)*yj/Sigma[j]); beta(j,_) = stream.rmvnorm(bn, Bn); // SIGMA UPDATE double shape = (c0 + (double)nstate[j])/2; Matrix<> ysimul_j = Xj*::t(beta(j,_)); // Matrix ej = yj - ysimul_j; Matrix sum_ej = t(ej)*ej; double scale =(d0 + sum_ej[0])/2; Sigma[j] = 1/stream.rgamma(shape, scale); sigma[j] = sqrt(Sigma[j]); }// end of sampling beta and Sigma ////////////////////// // 2. Sample P ////////////////////// double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; for (int j =0; j<(ns-1); ++j){ shape1 = A0(j,j) + (double)nstate[j] - 1; shape2 = A0(j,j+1) + 1; // SS(j,j+1); P(j,j) = stream.rbeta(shape1, shape2); P(j,j+1) = 1 - P(j,j); } ////////////////////// // 3. Sample s ////////////////////// Matrix Sout = gaussian_state_sampler(stream, m, Y, X, beta, Sigma, P); Matrix s = Sout(_, 0); Matrix<> ps(n, ns); for (int j = 0; j= burnin && ((iter % thin)==0)){ Matrix tbeta = ::t(beta); //transpose beta for R output for (int i=0; i<(ns*k); ++i) beta_store(count,i) = tbeta[i];// stored by the order of (11, 12, 13, 21, 22, 23) for (int i=0; i 0 && iter % verbose == 0){ Rprintf("\nMCMCregressChange iteration %i of %i \n", (iter+1), tot_iter); for (int j = 0;j(nstate[j])); } Rprintf("\n beta \n"); for (int i = 0; i betast = meanc(beta_store); //meanc(beta_store)=(11, 12, 13, 21, 22, 23) Matrix beta_st(ns, k); for (int j = 0; j Sigma_st = meanc(Sigma_store); Matrix P_vec_st = meanc(P_store); const Matrix P_st(ns, ns); for (int j = 0; j< ns*ns; ++j){ P_st[j] = P_vec_st[j]; } ////////////////////// // 1. pdf.beta ////////////////////// Matrix density_beta(nstore, ns); for (int iter = 0; iter nstate(ns, 1); // contains total numbers of each state int beta_count = 0; for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); const Matrix Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); const double precision = 1.0/Sigma_store(iter, j); const Matrix XpX = (::t(Xj)*Xj); const Matrix XpY = (::t(Xj)*yj); const Matrix Bn = invpd(B0 + XpX*precision); const Matrix bn = Bn*gaxpy(B0, b0, XpY*precision); if (k == 1){ density_beta(iter, j) = dnorm(beta_st(j), bn(0), sqrt(Bn(0))); } else{ density_beta(iter, j) = ::exp(lndmvn(::t(beta_st(j,_)), bn, Bn)); } } } double pdf_beta = log(prod(meanc(density_beta))); ////////////////////// // 2. pdf.Sigma ////////////////////// Matrix density_Sigma(nstore, ns); for (int iter = 0; iter Sout = gaussian_state_sampler(stream, m, Y, X, beta_st, Sigma, P); Matrix s = Sout(_, 0); int beta_count = 0; Matrix nstate(ns, 1); for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); Matrix ej = yj - Xj*::t(beta_st(j,_)); Matrix sum_ej = ::t(ej)*ej; double shape = (c0 + (double)nstate[j])/2; double scale =(d0 + sum_ej[0])/2; Sigma[j] = stream.rigamma(shape, scale); density_Sigma(iter, j) = ::exp(lndinvgamma_jhp(Sigma_st[j], shape, scale)); }// end of sampling beta and Sigma // STEP 2.3 P|S double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; //no jump at the last state for (int j =0; j< (ns-1); ++j){ shape1 = A0(j,j) + (double)nstate[j] - 1; shape2 = A0(j,j+1) + 1; // P(j,j) = stream.rbeta(shape1, shape2); P(j,j+1) = 1 - P(j,j); } }// end of pdf.Sigma double pdf_Sigma = log(prod(meanc(density_Sigma))); ////////////////////// // 3. pdf.P|beta_st, Sigma_st, S ////////////////////// Matrix density_P(nstore, ns); for (int iter = 0; iter < nstore; ++iter){ // STEP 2.1 S|y, beta.st, Sigma, P Matrix Sout = gaussian_state_sampler(stream, m, Y, X, beta_st, Sigma_st, P); Matrix s = Sout(_, 0); double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; //no jump at the last state // compute addN Matrix P_addN(ns, 1); for (int j = 0; j F = Matrix(n, ns); Matrix like(n, 1); Matrix pr1 = Matrix(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); for (int t=0; t mu = X(t,_)*::t(beta_st); //k by 1 vector for (int j = 0; j< ns; ++j){ py[j] = dnorm(Y[t], mu[j], sqrt(Sigma_st[j])); } if (t==0) pstyt1 = pr1; else { pstyt1 = ::t(F(t-1,_)*P_st); // make it an ns by 1 matrix } Matrix unnorm_pstyt = pstyt1%py; Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); // pstyt = Pr(st|Yt) for (int j=0; j density_beta_prior(ns, 1); Matrix density_Sigma_prior(ns, 1); Matrix density_P_prior(ns, 1); density_P[ns-1] = 1; // if (k == 1){ for (int j=0; j 0){ Rprintf("logmarglike = %10.5f\n", logmarglike); Rprintf("loglike = %10.5f\n", loglike); Rprintf("logprior = %10.5f\n", logprior); Rprintf("pdf_beta = %10.5f\n", pdf_beta); Rprintf("pdf_Sigma = %10.5f\n", pdf_Sigma); Rprintf("pdf_P = %10.5f\n", pdf_P); } }// end of marginal likelihood computation } //////////////////////////////////////////// // Start MCMCregressChangepoint function /////////////////////////////////////////// extern "C"{ void MCMCregressChange(double *betaout, double *Sigmaout, double *Pout, double *psout, double *sout, const double *Ydata, const int *Yrow, const int *Ycol, const double *Xdata, const int *Xrow, const int *Xcol, const int *m, const int *burnin, const int *mcmc, const int *thin, const int *verbose, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const double *betastart, const double *Sigmastart, const double *Pstart, const int *statestart, const double *a, const double *b, const double *b0data, const double *B0data, const double *c0, const double *d0, const double *A0data, double *logmarglikeholder, double *loglikeholder, const int *chib){ // pull together Matrix objects const Matrix Y(*Yrow, *Ycol, Ydata); const Matrix X(*Xrow, *Xcol, Xdata); const unsigned int tot_iter = *burnin + *mcmc; //total iterations const unsigned int nstore = *mcmc / *thin; // number of draws to store const int n = Y.rows(); const int k = X.cols(); const int ns = *m + 1; // number of states // generate starting values Matrix <> beta(ns, k, betastart); Matrix <> Sigma(ns, 1, Sigmastart); Matrix <> P(ns, ns, Pstart); Matrix s(n, 1, statestart); Matrix <> b0(k, 1, b0data); Matrix <> B0(k, k, B0data); const Matrix <> A0(ns, ns, A0data); double logmarglike; // storage matrices Matrix<> beta_store(nstore, ns*k); Matrix<> Sigma_store(nstore, ns); Matrix<> P_store(nstore, ns*ns); Matrix<> ps_store(n, ns); Matrix s_store(nstore, n); MCMCPACK_PASSRNG2MODEL(MCMCregressChange_impl, *m, Y, X, beta, Sigma, P, s, b0, B0, *c0, *d0, A0, *burnin, *mcmc, *thin, *verbose, *chib, beta_store, Sigma_store, P_store, ps_store, s_store, logmarglike) logmarglikeholder[0] = logmarglike; // return output for (int i = 0; i<(nstore*ns*k); ++i){ betaout[i] = beta_store[i]; } for (int i = 0; i<(nstore*ns); ++i){ Sigmaout[i] = Sigma_store[i]; } for (int i = 0; i<(nstore*ns*ns); ++i){ Pout[i] = P_store[i]; } for (int i = 0; i<(n*ns); ++i){ psout[i] = ps_store[i]; } for (int i = 0; i<(nstore*n); ++i){ sout[i] = s_store[i]; } }// end of MCMCpoissonChange }//end extern "C" #endif MCMCpack/src/MCMCregress.cc0000644000176000001440000001660712140061657015104 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCregress.cc is a program that simualates draws from the posterior // density of a linear regression model with Gaussian errors. // // The initial version of this file was generated by the // auto.Scythe.call() function in the MCMCpack R package // written by: // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // This file was initially generated on Fri Jul 23 15:07:21 2004 // // ADM and KQ 10/10/2002 [ported to Scythe0.3] // ADM 6/2/04 [re-written using template] // KQ 6/18/04 [modified to meet new developer specification] // ADM 7/22/04 [modified to work with new Scythe and rngs] // DBP 7/1/07 [ported to scythe 1.0.x] // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCREGRESS_CC #define MCMCREGRESS_CC #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; static double digamma(double theta, double a, double b) { double logf = a * log(b) - lngammafn(a) + -(a+1) * log(theta) + -b/theta; return exp(logf); //pow(b, a) / gammafn(a) * pow(theta, -(a+1)) * exp(-b/theta); } /* MCMCregress implementation. Takes Matrix<> reference which it * fills with the posterior. The logmarklike double reference is * filled with the log marginal likelihood if asked for. */ template void MCMCregress_impl (rng& stream, const Matrix<>& Y, const Matrix<>& X, Matrix<>& beta, const Matrix<>& b0, const Matrix<>& B0, double c0, double d0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, bool chib, Matrix<>& result, double& logmarglike) { // define constants and form cross-product matrices const unsigned int tot_iter = burnin + mcmc; //total iterations const unsigned int nstore = mcmc / thin; // number of draws to store const unsigned int k = X.cols (); const Matrix<> XpX = crossprod(X); const Matrix<> XpY = t(X) * Y; // storage matrices Matrix<> betamatrix (k, nstore); Matrix<> sigmamatrix (1, nstore); // Gibbs sampler unsigned int count = 0; for (unsigned int iter = 0; iter < tot_iter; ++iter) { double sigma2 = NormIGregress_sigma2_draw (X, Y, beta, c0, d0, stream); beta = NormNormregress_beta_draw (XpX, XpY, b0, B0, sigma2, stream); // store draws in storage matrix (or matrices) if (iter >= burnin && (iter % thin == 0)) { sigmamatrix (0, count) = sigma2; betamatrix(_, count) = beta; ++count; } // print output to stdout if(verbose > 0 && iter % verbose == 0) { Rprintf("\n\nMCMCregress iteration %i of %i \n", (iter+1), tot_iter); Rprintf("beta = \n"); for (unsigned int j=0; j betastar = t(meanc(t(betamatrix))); // step 1 Matrix<> sigma2_density(nstore, 1); // second set of Gibbs scans for (unsigned int iter = 0; iter < nstore; ++iter) { // double sigma2 = sigmamatrix(iter); // beta = NormNormregress_beta_draw (XpX, XpY, b0, B0, sigma2, stream); beta = betamatrix (_, iter); const Matrix<> e = gaxpy(X, (-1*beta), Y); const Matrix<> SSE = crossprod (e); const double c_post = (c0 + X.rows ()) * 0.5; const double d_post = (d0 + SSE[0]) * 0.5; sigma2_density(iter) = digamma(sigma2star, c_post, d_post); R_CheckUserInterrupt(); } // end MCMC loop double pdf_sigma2 = log(mean(sigma2_density)); // step 2 const Matrix<> Bn = invpd (B0 + XpX /sigma2star); const Matrix<> bn = Bn * gaxpy(B0, b0, XpY/sigma2star); double pdf_beta = 0; if (k == 1){ pdf_beta = log(dnorm(betastar(0), bn(0), sqrt(Bn(0)))); } else { pdf_beta = lndmvn(betastar, bn, Bn); } // calculate loglikelihood at (betastar, sigma2star) Matrix<> eta = X * betastar; double loglike_sum = 0.0; for (unsigned int i = 0; i < X.rows(); ++i) { loglike_sum += lndnorm(Y(i), eta(i), sigmastar); } double loglike = loglike_sum; // calculate log prior ordinate double logprior = 0; if (k == 1){ logprior = log(digamma(sigma2star, c0/2.0, d0/2.0)) + log(dnorm(betastar(0), b0(0), 1/sqrt(B0(0)))); } else{ logprior = log(digamma(sigma2star, c0/2.0, d0/2.0)) + lndmvn(betastar, b0, invpd(B0)); } // put pieces together and print the marginal likelihood logmarglike = loglike + logprior - pdf_beta - pdf_sigma2; if (verbose >0 ){ Rprintf("\nlogmarglike = %10.5f\n", logmarglike); Rprintf("loglike = %10.5f\n", loglike); Rprintf("log_prior = %10.5f\n", logprior); Rprintf("log_beta = %10.5f\n", pdf_beta); Rprintf("log_sigma2 = %10.5f\n", pdf_sigma2); } } result = cbind(t(betamatrix), t(sigmamatrix)); } // end MCMCregress extern "C" { void MCMCregress(double *sampledata, const int *samplerow, const int *samplecol, const double *Ydata, const int *Yrow, const int *Ycol, const double *Xdata, const int *Xrow, const int *Xcol, const int *burnin, const int *mcmc, const int *thin, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int *verbose, const double *betastartdata, const int *betastartrow, const int *betastartcol, const double *b0data, const int *b0row, const int *b0col, const double *B0data, const int *B0row, const int *B0col, const double *c0, const double *d0, double* logmarglikeholder, const int* chib) { // pull together Matrix objects Matrix<> Y(*Yrow, *Ycol, Ydata); Matrix<> X(*Xrow, *Xcol, Xdata); Matrix<> betastart(*betastartrow, *betastartcol, betastartdata); Matrix<> b0(*b0row, *b0col, b0data); Matrix<> B0(*B0row, *B0col, B0data); double logmarglike; Matrix<> storagematrix; MCMCPACK_PASSRNG2MODEL(MCMCregress_impl, Y, X, betastart, b0, B0, *c0, *d0, *burnin, *mcmc, *thin, *verbose, *chib, storagematrix, logmarglike); logmarglikeholder[0] = logmarglike; const unsigned int size = *samplerow * *samplecol; for (unsigned int i = 0; i < size; ++i) sampledata[i] = storagematrix(i); } } #endif MCMCpack/src/MCMCquantreg.cc0000644000176000001440000001027112140061657015247 0ustar ripleyusers// MCMCquantreg.cc is a function that draws from the posterior // distribution of a linear regression model with errors having tauth quantile equal to zero. // // The initial version of this file was generated by the // auto.Scythe.call() function in the MCMCpack R package // written by: // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // Copyright (C) 2009 Andrew D. Martin and Kevin M. Quinn // // This file was initially generated on Wed Apr 1 11:39:12 2009 // // The function was rewritten by: // // Craig Reed // Department of Mathematical Sciences // Brunel University // craig.reed@brunel.ac.uk // // CR 9/4/09 [Rewritten function using templates] // // CR 7/12/09 [Placed MCMCmedreg within MCMCquantreg] #ifndef MCMCQUANTREG_CC #define MCMCQUANTREG_CC #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; /* MCMCquantreg implementation. Takes Matrix<> reference which it * fills with the posterior. */ template void MCMCquantreg_impl (rng& stream, double tau, Matrix<>& Y, const Matrix<>& X, Matrix<>& beta, const Matrix<>& b0, const Matrix<>& B0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, Matrix<>& result) { // define constants const unsigned int tot_iter = burnin + mcmc; //total iterations const unsigned int nstore = mcmc / thin; // number of draws to store const unsigned int k = X.cols (); // storage matrices Matrix<> betamatrix (k, nstore); // Gibbs sampler unsigned int count = 0; for (unsigned int iter = 0; iter < tot_iter; ++iter) { Matrix<> e = gaxpy(X, (-1*beta), Y); Matrix<> abse = fabs(e); Matrix<> weights = ALaplaceIGaussregress_weights_draw (abse, stream); beta = ALaplaceNormregress_beta_draw (tau, X, Y, weights, b0, B0, stream); // store draws in storage matrix if (iter >= burnin && (iter % thin == 0)) { betamatrix(_, count) = beta; ++count; } // print output to stdout if(verbose > 0 && iter % verbose == 0) { Rprintf("\n\nMCMCquantreg iteration %i of %i \n", (iter+1), tot_iter); Rprintf("beta = \n"); for (unsigned int j=0; j Y(*Yrow, *Ycol, Ydata); Matrix<> X(*Xrow, *Xcol, Xdata); Matrix<> betastart(*betastartrow, *betastartcol, betastartdata); Matrix<> b0(*b0row, *b0col, b0data); Matrix<> B0(*B0row, *B0col, B0data); Matrix<> storagematrix; MCMCPACK_PASSRNG2MODEL(MCMCquantreg_impl, *tau, Y, X, betastart, b0, B0, *burnin, *mcmc, *thin, *verbose, storagematrix); const unsigned int size = *samplerow * *samplecol; for (unsigned int i = 0; i < size; ++i) sampledata[i] = storagematrix(i); } } #endif MCMCpack/src/MCMCprobitres.cc0000644000176000001440000001407512140061657015440 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCprobitres.cc is a program that simulates draws from the posterior // density of a probit regression model and returns latent residuals. // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // updated to the new version of Scythe 7/26/2004 KQ // updated to Scythe 1.0.X 7/28/2007 KQ // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCPROBITRES_CC #define MCMCPROBITRES_CC #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; /* MCMCprobitres implementation. Takes Matrix<> reference and fills with the * posterior. */ template void MCMCprobitres_impl (rng& stream, const Matrix<>& Y, const Matrix<>& X, Matrix<>& beta, Matrix<>& resvec, const Matrix<>& b0, const Matrix<>& B0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, bool chib, Matrix<>& result, double& logmarglike) { // define constants and from cross-product matrices const unsigned int tot_iter = burnin + mcmc; // total number of mcmc iterations const unsigned int nstore = mcmc / thin; // number of draws to store const unsigned int k = X.cols(); const unsigned int N = X.rows(); const Matrix<> XpX = crossprod(X); const Matrix<> B0inv = invpd(B0); // storage matrix or matrices Matrix<> beta_store(nstore, k); Matrix<> Z_store(nstore, N); // initialize Z Matrix<> Z(N,1); // MCMC sampling starts here unsigned int count = 0; for (unsigned int iter = 0; iter < tot_iter; ++iter){ // [Z| beta, y] const Matrix<> Z_mean = X * beta; for (unsigned int i=0; i XpZ = t(X) * Z; beta = NormNormregress_beta_draw(XpX, XpZ, b0, B0, 1.0, stream); // store values in matrices if (iter >= burnin && ((iter % thin)==0)){ for (unsigned int j = 0; j < k; j++){ beta_store(count, j) = beta[j]; } Z_store(count,_) = Z; for (unsigned int j=0; j<(resvec.rows()); ++j){ const int i = static_cast(resvec[j]) - 1; beta_store(count, j+k) = Z[i] - Z_mean[i]; } ++count; } // print output to stdout if(verbose > 0 && iter % verbose == 0){ Rprintf("\n\nMCMCprobit iteration %i of %i \n", (iter+1), tot_iter); Rprintf("beta = \n"); for (unsigned int j=0; j beta_star = meanc(beta_store); Matrix density_beta(nstore, 1); for (int iter = 0; iter Z_reduced = Z_store(iter,_); const Matrix XpZ = (::t(X)*Z_reduced); const Matrix Bn = invpd(B0inv + XpX); const Matrix bn = Bn*gaxpy(B0inv, b0, XpZ); density_beta(iter) = exp(lndmvn(beta_star, bn, Bn)); } double logbeta = log(prod(meanc(density_beta))); double loglike = 0.0; Matrix<> eta = X * beta_star; for (unsigned int i = 0; i < X.rows(); ++i) { double phi = pnorm(eta(i), 0, 1); loglike += log(dbinom(Y(i), 1, phi)); } // calculate log prior ordinate double logprior = lndmvn(beta_star, b0, B0inv); logmarglike = loglike + logprior - logbeta; Rprintf("\n logmarglike %10.5f", logmarglike, "\n"); Rprintf("\n loglike %10.5f", loglike, "\n"); }// end of marginal likelihood computation result = beta_store; } extern "C"{ void MCMCprobitres(double *sampledata, const int *samplerow, const int *samplecol, const double *Ydata, const int *Yrow, const int *Ycol, const double *Xdata, const int *Xrow, const int *Xcol, const double *resvecdata, const int *resvecrow, const int *resveccol, const int *burnin, const int *mcmc, const int *thin, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int *verbose, const double *betastartdata, const int *betastartrow, const int *betastartcol, const double *b0data, const int *b0row, const int *b0col, const double *B0data, const int *B0row, const int *B0col, double *logmarglikeholder, // double *loglikeholder, const int *chib) { // pull together Matrix objects const Matrix <> Y(*Yrow, *Ycol, Ydata); const Matrix <> X(*Xrow, *Xcol, Xdata); Matrix <> resvec(*resvecrow, *resveccol, resvecdata); Matrix <> beta(*betastartrow, *betastartcol, betastartdata); const Matrix <> b0(*b0row, *b0col, b0data); const Matrix <> B0(*B0row, *B0col, B0data); double logmarglike; Matrix<> storagematrix; MCMCPACK_PASSRNG2MODEL(MCMCprobitres_impl, Y, X, beta, resvec, b0, B0, *burnin, *mcmc, *thin, *verbose, *chib, storagematrix, logmarglike); // return output const unsigned int size = *samplerow * *samplecol; for (unsigned int i=0; i #include using namespace std; using namespace scythe; // probit state sampler template Matrix<> probit_state_sampler(rng& stream, const int m, const Matrix& Y, const Matrix& X, const Matrix& beta, const Matrix& P){ const int ns = m + 1; const int n = Y.rows(); Matrix F = Matrix(n, ns); Matrix pr1 = Matrix(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); for (int t=0; t mu = X(t,_)*::t(beta); for (int j=0; j unnorm_pstyt = pstyt1%py; const Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j s(n, 1); Matrix ps = Matrix(n, ns); ps(n-1,_) = F(n-1,_); s(n-1) = ns; Matrix pstyn = Matrix(ns, 1); double pone = 0.0; int t = n-2; while (t >= 0){ int st = s(t+1); Matrix Pst_1 = ::t(P(_,st-1)); Matrix unnorm_pstyn = F(t,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1) s(t) = 1; else{ pone = pstyn(st-2); if(stream.runif() < pone) s(t) = st-1; else s(t) = st; } ps(t,_) = pstyn; --t; } Matrix Sout(n, ns+1); Sout(_, 0) = s(_,0); for (int j = 0; j void MCMCprobitChange_impl(rng& stream, const int m, const Matrix<>& Y, const Matrix<>& X, Matrix<>& beta, Matrix<>& P, Matrix<>& b0, Matrix<>& B0, const Matrix<>& A0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, bool chib, Matrix<>& beta_store, Matrix<>& Z_store, Matrix<>& P_store, Matrix<>& ps_store, Matrix& s_store, double& logmarglike, double& loglike) { const int tot_iter = burnin + mcmc; const int nstore = mcmc / thin; const int n = Y.rows(); const int ns = m + 1; const int k = X.cols(); const Matrix<> B0inv = invpd(B0); Matrix<> Z(n, 1); unsigned int count = 0; for (int iter = 0; iter < tot_iter; ++iter){ // 1. Sample s Matrix<> Sout = probit_state_sampler(stream, m, Y, X, beta, P); Matrix s = Sout(_, 0); Matrix ps(n, ns); for (int j = 0; j mu = X(i,_)*t(beta(s[i]-1,_)); double muj = mu[0]; if(muj>200){ muj = 200; } if(muj<-200){ muj = -200; } if (Y[i] == 1.0) Z[i] = stream.rtbnorm_combo(muj, 1.0, 0); if (Y[i] == 0.0) Z[i] = stream.rtanorm_combo(muj, 1.0, 0); } // 3. Sample beta int beta_count = 0; Matrix nstate(ns, 1); for (int j = 0; j Zj = Z((beta_count - nstate[j]), 0, (beta_count - 1), 0); const Matrix Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); const Matrix XpX = t(Xj)*Xj; const Matrix XpZ = t(Xj)*Zj; beta(j,_) = NormNormregress_beta_draw(XpX, XpZ, b0, B0, 1.0, stream); } // 4. Sample P double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; for (int j =0; j< (ns-1); ++j){ shape1 = A0(j,j) + nstate[j] - 1; shape2 = A0(j,j+1) + 1; // P(j,j) = stream.rbeta(shape1, shape2); P(j,j+1) = 1 - P(j,j); } // load draws into sample array if (iter >= burnin && ((iter % thin)==0)){ Matrix tbeta = ::t(beta); for (int i=0; i<(ns*k); ++i) beta_store(count,i) = tbeta[i]; for (int j=0; j 0 && iter % verbose == 0){ Rprintf("\n\nMCMCprobitChange iteration %i of %i \n", (iter+1), tot_iter); for (int j = 0;j betast = meanc(beta_store); Matrix beta_st(ns, k); for (int j = 0; j P_vec_st = meanc(P_store); const Matrix P_st(ns, ns); for (int j = 0; j< ns*ns; ++j){ P_st[j] = P_vec_st[j]; } // 1. beta Matrix density_beta(nstore, ns); for (int iter = 0; iter nstate(ns, 1); int beta_count = 0; const Matrix Z(n, 1); Z(_,0) = Z_store(iter,_); for (int j = 0; j Zj = Z((beta_count - nstate[j]), 0, (beta_count - 1), 0); const Matrix Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); const Matrix XpX = (::t(Xj)*Xj); const Matrix XpZ = (::t(Xj)*Zj); const Matrix Bn = invpd(B0 + XpX); const Matrix bn = Bn*gaxpy(B0, b0, XpZ); density_beta(iter, j) = exp(lndmvn(::t(beta_st(j,_)), bn, Bn)); } } double pdf_beta = log(prod(meanc(density_beta))); // 2. P Matrix density_P(nstore, ns); for (int iter = 0; iter < nstore; ++iter){ Matrix Sout = probit_state_sampler(stream, m, Y, X, beta_st, P); Matrix s = Sout(_, 0); Matrix ps(n, ns); for (int j = 0; j P_addN(ns, 1); for (int j = 0; j F = Matrix(n, ns); Matrix like(n, 1); Matrix pr1 = Matrix(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); for (int t=0; t mu = X(t,_)*::t(beta_st); for (int j=0; j unnorm_pstyt = pstyt1%py; Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j density_beta_prior(ns, 1); Matrix density_P_prior(ns, 1); density_P[ns-1] = 1; // for (int j=0; j0 ){ Rprintf("\nlogmarglike = %10.5f\n", logmarglike); Rprintf("loglike = %10.5f\n", loglike); Rprintf("log_prior = %10.5f\n", logprior); Rprintf("log_beta = %10.5f\n", pdf_beta); Rprintf("log_P = %10.5f\n", pdf_P); } } // end of marginal likelihood }//end extern "C" extern "C"{ void MCMCprobitChange(double *betaout, double *Pout, double *psout, double *sout, const double *Ydata, const int *Yrow, const int *Ycol, const double *Xdata, const int *Xrow, const int *Xcol, const int *m, const int *burnin, const int *mcmc, const int *thin, const int *verbose, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const double *betastart, const double *Pstart, const double *a, const double *b, const double *b0data, const double *B0data, const double *A0data, double *logmarglikeholder, double *loglikeholder, const int *chib){ // pull together Matrix objects const Matrix <> Y(*Yrow, *Ycol, Ydata); const Matrix <> X(*Xrow, *Xcol, Xdata); const unsigned int nstore = *mcmc / *thin; const int n = Y.rows(); const int k = X.cols(); const int ns = *m + 1; // generate starting values Matrix <> beta(ns, k, betastart); Matrix <> P(ns, ns, Pstart); Matrix <> b0(k, 1, b0data); Matrix <> B0(k, k, B0data); const Matrix <> A0(ns, ns, A0data); double logmarglike; double loglike; // storage matrices Matrix<> beta_store(nstore, ns*k); Matrix<> Z_store(nstore, n); Matrix<> P_store(nstore, ns*ns); Matrix<> ps_store(n, ns); Matrix s_store(nstore, n); MCMCPACK_PASSRNG2MODEL(MCMCprobitChange_impl, *m, Y, X, beta, P, b0, B0, A0, *burnin, *mcmc, *thin, *verbose, *chib, beta_store, Z_store, P_store, ps_store, s_store, logmarglike, loglike); logmarglikeholder[0] = logmarglike; loglikeholder[0] = loglike; // return output for (int i = 0; i<(nstore*ns*k); ++i){ betaout[i] = beta_store[i]; } for (int i = 0; i<(nstore*ns*ns); ++i){ Pout[i] = P_store[i]; } for (int i = 0; i<(n*ns); ++i){ psout[i] = ps_store[i]; } for (int i = 0; i<(nstore*n); ++i){ sout[i] = s_store[i]; } } } #endif MCMCpack/src/MCMCprobit.cc0000644000176000001440000001370712140061657014727 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCprobit.cc is C++ code to estimate a probit regression model with // a multivariate normal prior // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // updated to the new version of Scythe 7/26/2004 KQ // updated to Scythe 1.0.X 7/7/2007 ADM // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCPROBIT_CC #define MCMCPROBIT_CC #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; /* MCMCprobit implementation. Takes Matrix<> reference and fills with the * posterior. */ template void MCMCprobit_impl (rng& stream, const Matrix<>& Y, const Matrix<>& X, Matrix<>& beta, const Matrix<>& b0, const Matrix<>& B0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, bool chib, Matrix<>& result, double& logmarglike) { // define constants and from cross-product matrices const unsigned int tot_iter = burnin + mcmc; // total iterations const unsigned int nstore = mcmc / thin; // number of draws to store const unsigned int k = X.cols(); const unsigned int N = X.rows(); const Matrix<> XpX = crossprod(X); const Matrix<> B0inv = invpd(B0); // storage matrix or matrices Matrix<> beta_store(nstore, k); Matrix<> bn_store(nstore, k); // initialize Z Matrix<> Z(N,1); // MCMC sampling starts here unsigned int count = 0; for (unsigned int iter = 0; iter < tot_iter; ++iter) { // [Z| beta, y] const Matrix<> Z_mean = X * beta; for (unsigned int i=0; i XpZ = t(X) * Z; const Matrix Bn = invpd(B0 + XpX); const Matrix bn = Bn*gaxpy(B0, b0, XpZ); beta = stream.rmvnorm(bn, Bn); // store values in matrices if (iter >= burnin && (iter % thin==0)){ beta_store(count,_) = beta; bn_store(count,_) = bn; ++count; } // print output to stdout if(verbose > 0 && iter % verbose == 0){ Rprintf("\n\nMCMCprobit iteration %i of %i \n", (iter+1), tot_iter); Rprintf("beta = \n"); for (unsigned int j=0; j beta_star(k, 1); beta_star(_ ,0) = meanc(beta_store); Matrix bn_reduced(k, 1); Matrix density_beta(nstore, 1); for (int iter = 0; iter bn_reduced1 = bn_store(iter, _); const Matrix Bn = invpd(B0 + XpX); density_beta(iter) = ::exp(lndmvn(beta_star, bn_reduced, Bn)); } double logbeta = log(mean(density_beta)); double loglike = 0.0; Matrix<> eta = X * beta_star; for (unsigned int i = 0; i < N; ++i) { double phi = pnorm(eta(i), 0, 1); loglike += log(dbinom(Y(i), 1, phi)); } // calculate log prior ordinate double logprior = 0.0; if (k == 1){ logprior = log(dnorm(beta_star(0), b0(0), sqrt(B0inv(0)))); } else{ logprior = lndmvn(beta_star, b0, B0inv); } // logmarglike = loglike + logprior - logbeta; if (verbose > 0){ Rprintf("\nlogmarglike = %10.5f\n", logmarglike); Rprintf("loglike = %10.5f\n", loglike); Rprintf("log_prior = %10.5f\n", logprior); Rprintf("log_beta = %10.5f\n", logbeta); } }// end of marginal likelihood computation result = beta_store; } extern "C"{ void MCMCprobit(double *sampledata, const int *samplerow, const int *samplecol, const double *Ydata, const int *Yrow, const int *Ycol, const double *Xdata, const int *Xrow, const int *Xcol, const int *burnin, const int *mcmc, const int *thin, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int *verbose, const double *betastartdata, const int *betastartrow, const int *betastartcol, const double *b0data, const int *b0row, const int *b0col, const double *B0data, const int *B0row, const int *B0col, double *logmarglikeholder, // double *loglikeholder, const int *chib) { // pull together Matrix objects const Matrix <> Y(*Yrow, *Ycol, Ydata); const Matrix <> X(*Xrow, *Xcol, Xdata); Matrix <> beta (*betastartrow, *betastartcol, betastartdata); const Matrix <> b0(*b0row, *b0col, b0data); const Matrix <> B0(*B0row, *B0col, B0data); double logmarglike; // double loglike; Matrix<> storagematrix; MCMCPACK_PASSRNG2MODEL(MCMCprobit_impl, Y, X, beta, b0, B0, *burnin, *mcmc, *thin, *verbose, *chib, storagematrix, logmarglike); logmarglikeholder[0] = logmarglike; // loglikeholder[0] = loglike; const unsigned int size = *samplerow * *samplecol; for (unsigned int i=0; i #include #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include #include using namespace std; using namespace scythe; //tau and component sampler template Matrix<> tau_comp_sampler(rng& stream, const int m, const int totcomp, const Matrix<>& Y, const Matrix<>& X, const Matrix<>& wr, const Matrix<>& mr, const Matrix<>& sr, const Matrix<>& beta, const Matrix<>& s){ // itialize const int n = Y.rows(); const int k = X.cols(); Matrix component(totcomp, 1); Matrix<> tau(totcomp, 1); Matrix<> post_taut_mat(totcomp, 5); Matrix<> post_tau_mat(totcomp, 5); Matrix<> norm_post_tau(totcomp, 5); Matrix<> cumsum_post_tau(totcomp, 5); Matrix<> xt(1, k); Matrix<> wr_mat = eye(5); for (int i=0; i<5; ++i) { wr_mat(i,i) = wr[i]; } int tau_start = 0; int tau_end = 0; for(int t=0; t mu_t = exp(xt* ::t(beta(st-1,_))); double mut = mu_t[0]; if (yt == 0){ double tau_double = 1 + stream.rexp(mut); tau[tau_end - 1] = tau_double; for (int h=0; h<5; ++h){ double first = 1/(sr[h]*tau_double); double second = (log(tau_double) + log(mut) - mr[h])/sr[h]; post_taut_mat(tau_end-1, h) = first*exp(-0.5*second*second); } } else { Matrix<> ut = stream.runif(yt, 1); // redraw the uniform if there are any repeats // thanks to Matt Blackwell while (unique(ut).size() != ut.size()) { ut = stream.runif(yt, 1); } Matrix<> sort_ut = sort(ut); Matrix<> tau_tj(yt, 1); for(int i=1; i tau_mat(yt+1, 1); tau_mat(0, 0, yt-1, 0) = tau_tj(_,0); tau_mat[yt] = tau_last; for (int i = 0; i<(yt+1); ++i){ tau[i + tau_start] = tau_mat[i]; for (int h=0; h<5; ++h){ double first = 1/(sr[h]*tau_mat[i]); double second = (log(tau_mat[i]) + log(mut) - mr[h])/sr[h]; post_taut_mat(i+tau_start, h) = first*exp(-0.5*second*second); } } } } post_tau_mat = post_taut_mat*wr_mat; for(int i = 0; i TAUout(totcomp, 2); TAUout(_, 0) = tau(_, 0); TAUout(_, 1) = component(_, 0); return TAUout; } template Matrix<> poisson_state_sampler(rng& stream, const int& m, const Matrix<>& Y, const Matrix<>& lambda, const Matrix<>& P){ const int ns = m + 1; const int n = Y.rows(); Matrix<> F(n, ns); Matrix<> pr1(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (int t=0; t unnorm_pstyt = pstyt1%py; Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j s(n, 1); Matrix<> ps(n, ns); ps(n-1,_) = F(n-1,_); s(n-1) = ns; Matrix<> pstyn(ns, 1); double pone = 0.0; int t = n-2; while (t >= 0){ int st = s(t+1); Matrix<> Pst_1 = ::t(P(_,st-1)); Matrix<> unnorm_pstyn = F(t,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1) s(t) = 1; else{ pone = pstyn(st-2); if(stream.runif () < pone) s(t) = st-1; else s(t) = st; } ps(t,_) = pstyn; --t; }// end of while loop Matrix<> Sout(n, ns+1); Sout(_, 0) = s(_,0); for (int j = 0; j Matrix<> poisson_reg_state_sampler(rng& stream, const int m, const Matrix<>& Y, const Matrix<>& X, const Matrix<>& beta, const Matrix<>& P){ const int ns = m + 1; const int n = Y.rows(); Matrix<> F(n, ns); Matrix<> pr1(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (int t=0; t lambda = exp(X(t,_)*::t(beta)); for (int j = 0; j< ns; ++j){ py[j] = dpois(yt, lambda[j]); } if (t==0) pstyt1 = pr1; else { pstyt1 = ::t(F(t-1,_)*P); } Matrix<> unnorm_pstyt = pstyt1%py; const Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j s(n, 1); Matrix<> ps = Matrix<>(n, ns); ps(n-1,_) = F(n-1,_); s(n-1) = ns; Matrix<> pstyn = Matrix<>(ns, 1); double pone = 0.0; int t = n-2; while (t >= 0){ int st = s(t+1); Matrix<> Pst_1 = ::t(P(_,st-1)); Matrix<> unnorm_pstyn = F(t,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1) s(t) = 1; else{ pone = pstyn(st-2); if(stream.runif() < pone) s(t) = st-1; else s(t) = st; } ps(t,_) = pstyn; --t; } Matrix<> Sout(n, ns+1); Sout(_, 0) = s(_,0); for (int j = 0; j void MCMCpoissonChangepoint_impl(rng& stream, double *betaout, double *Pout, double *psout, double *sout, const double *Ydata, const int *Yrow, const int *Ycol, const int *m, const double *c0, const double *d0, const int *burnin, const int *mcmc, const int *thin, const int *verbose, const double *betastart, const double *Pstart, const double *a, const double *b, const double *A0data, double *logmarglikeholder, double *loglikeholder, const int *chib) { const Matrix <> Y(*Yrow, *Ycol, Ydata); const int tot_iter = *burnin + *mcmc; const int nstore = *mcmc / *thin; const int n = Y.rows(); const int ns = *m + 1; const Matrix <> A0(ns, ns, A0data); Matrix <> lambda(ns, 1, betastart); Matrix <> P(ns, ns, Pstart); Matrix<> lambda_store(nstore, ns); Matrix<> P_store(nstore, ns*ns); Matrix<> ps_store(n, ns); Matrix<> s_store(nstore, n); //MCMC loop unsigned int count = 0; for (int iter = 0; iter < tot_iter; ++iter){ ////////////////////// // 1. Sample s ////////////////////// Matrix<> Sout = poisson_state_sampler(stream, *m, Y, lambda, P); Matrix<> s = Sout(_, 0); Matrix<> ps(n, ns); for (int j = 0; j addY(ns, 1); Matrix<> addN(ns, 1); for (int j = 0; j= *burnin && ((iter % *thin)==0)){ for (int i=0; i 0 && iter % *verbose == 0){ Rprintf("\n\n MCMCpoissonChange iteration %i of %i", (iter+1), tot_iter); for (int j = 0;j lambda_st = meanc(lambda_store); Matrix<> P_vec_st = meanc(P_store); const Matrix<> P_st(ns, ns); for (int j = 0; j< ns*ns; ++j){ P_st[j] = P_vec_st[j]; } ////////////////////// // lambda ////////////////////// Matrix<> density_lambda(nstore, ns); for (int iter = 0; iter addY(ns, 1); Matrix<> addN(ns, 1); for (int j = 0; j density_P(nstore, ns); for (int iter = 0; iter < nstore; ++iter){ Matrix<> Sout = poisson_state_sampler(stream, *m, Y, lambda_st, P); Matrix <> s = Sout(_, 0); Matrix <> ps(n, ns); for (int j = 0; j P_addN(ns, 1); for (int j = 0; j F(n, ns); Matrix<> like(n, 1); Matrix<> pr1(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (int t=0; t unnorm_pstyt = pstyt1%py; Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j density_lambda_prior(ns, 1); Matrix<> density_P_prior(ns, 1); density_P[ns-1] = 1; // for (int j=0; j void MCMCpoissonRegChangepoint_impl(rng& stream, double *betaout, double *Pout, double *psout, double *sout, const double *Ydata, const int *Yrow, const int *Ycol, const double *Xdata, const int *Xrow, const int *Xcol, const int *m, const int *burnin, const int *mcmc, const int *thin, const int *verbose, const double *betastart, const double *Pstart, const double *taustart, const double *componentstart, const double *a, const double *b, const double *b0data, const double *B0data, const double *A0data, double *logmarglikeholder, double *loglikeholder, const double* wrin, const double* mrin, const double* srin, const int *chib) { const Matrix <> Y(*Yrow, *Ycol, Ydata); const Matrix <> X(*Xrow, *Xcol, Xdata); const int tot_iter = *burnin + *mcmc; const int nstore = *mcmc / *thin; const int n = Y.rows(); const int k = X.cols(); const int ns = *m + 1; const int totcomp = n + (int) sum(Y); const Matrix <> b0(k, 1, b0data); const Matrix <> B0(k, k, B0data); const Matrix <> B0inv = invpd(B0); Matrix <> wr(5, 1, wrin); Matrix <> mr(5, 1, mrin); Matrix <> sr(5, 1, srin); const Matrix <> A0(ns, ns, A0data); Matrix <> beta(ns, k, betastart); Matrix <> tau(totcomp, 1, taustart); Matrix <> component(totcomp, 1, componentstart); Matrix <> P(ns, ns, Pstart); Matrix<> beta_store(nstore, ns*k); Matrix<> P_store(nstore, ns*ns); Matrix<> ps_store(n, ns); Matrix<> s_store(nstore, n); Matrix<> component_store(nstore, totcomp); Matrix<> tau_store(nstore, totcomp); Matrix<> y_tilde(n ,1); Matrix<> Sigma_inv_sum(n, 1); //MCMC loop int count = 0; for (int iter = 0; iter < tot_iter; ++iter){ int y_count = 0; for (int t = 0; t Yt_over_Sigma(yt + 1, 1); Matrix<> Sigma_inv(yt + 1, 1); for(int j = (y_count - yt - 1); j< y_count; ++j){ int jone = (int) component[j] - 1 ; //zero base in C! Sigma_inv[j-(y_count-yt-1)] = 1/(sr[jone]*sr[jone]); Yt_over_Sigma[j-(y_count-yt-1)] = (log(tau[j])- mr[jone])*Sigma_inv[j-(y_count-yt-1)]; } y_tilde[t] = sum(Yt_over_Sigma); Sigma_inv_sum[t] = sum(Sigma_inv); } ////////////////////// // 1. Sample s ////////////////////// Matrix <> Sout = poisson_reg_state_sampler(stream, *m, Y, X, beta, P); Matrix <> s = Sout(_, 0); Matrix <> ps(n, ns); for (int j=0; j nstate(ns, 1); for (int j = 0; j yj = y_tilde((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); Matrix<> wi = Sigma_inv_sum((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> Xwj(nstate[j], k); for (int h = 0; h Bn = invpd(B0 + ::t(Xj)*Xwj); Matrix<> bn = Bn*gaxpy(B0, b0, -1*::t(Xj)*yj); beta(j,_) = stream.rmvnorm(bn, Bn); } ////////////////////// // 3. Sample P ////////////////////// double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; for (int j =0; j< (ns-1); ++j){ shape1 = A0(j,j) + nstate[j] - 1; shape2 = A0(j,j+1) + 1; P(j,j) = stream.rbeta(shape1, shape2); P(j,j+1) = 1 - P(j,j); } ////////////////////// // 4. Sample tau ////////////////////// Matrix <> TAUout = tau_comp_sampler(stream, *m, totcomp, Y, X, wr, mr, sr, beta, s); tau = TAUout(_, 0); component = TAUout(_, 1); if (iter >= *burnin && ((iter % *thin)==0)){ Matrix<> tbeta = ::t(beta); for (int i=0; i<(ns*k); ++i){ beta_store(count,i) = tbeta[i]; } for (int j=0; j 0 && iter % *verbose == 0){ Rprintf("\n\n MCMCpoissonChange iteration %i of %i \n", (iter+1), tot_iter); for (int j = 0;j(nstate[j])); } for (int i = 0; i betast = meanc(beta_store); Matrix beta_st(ns, k); for (int j = 0; j< ns*k; ++j){ beta_st[j] = betast[j]; } Matrix<> P_vec_st = meanc(P_store); const Matrix<> P_st(ns, ns); for (int j = 0; j< ns*ns; ++j){ P_st[j] = P_vec_st[j]; } ////////////////////// // beta ////////////////////// Matrix<> density_beta(nstore, ns); for (int iter = 0; iter Yt_over_Sigma(yt + 1, 1); Matrix<> Sigma_inv(yt + 1, 1); for(int j = (y_count - yt - 1); j< y_count; ++j){ int jone = (int)component_store(iter, j) - 1 ; Sigma_inv[j-(y_count-yt-1)] = 1/(sr[jone]*sr[jone]); Yt_over_Sigma[j-(y_count-yt-1)] = (log(tau_store(iter, j))- mr[jone])*Sigma_inv[j-(y_count-yt-1)]; } y_tilde[t] = sum(Yt_over_Sigma); Sigma_inv_sum[t] = sum(Sigma_inv); } int beta_count = 0; Matrix nstate(ns, 1); for (int j = 0; j yj = y_tilde((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); Matrix<> wi = Sigma_inv_sum((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> Xwj(nstate[j], k); for (int h = 0; h Bn = invpd(B0 + ::t(Xj)*Xwj); Matrix<> bn = Bn*gaxpy(B0, b0, -1*::t(Xj)*yj); density_beta(iter, j) = exp(lndmvn(::t(beta_st(j,_)), bn, Bn)); } } double pdf_beta = log(prod(meanc(density_beta))); ////////////////////// // P ////////////////////// Matrix<> density_P(nstore, ns); for (int iter = 0; iter < nstore; ++iter){ Matrix <> Sout = poisson_reg_state_sampler(stream, *m, Y, X, beta_st, P); Matrix <> s = Sout(_, 0); Matrix <> ps(n, ns); for (int j = 0; j P_addN(ns, 1); for (int j = 0; j F = Matrix<>(n, ns); Matrix<> like(n, 1); Matrix<> pr1 = Matrix<>(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (int t=0; t lambda = exp(X(t,_)*::t(beta_st)); for (int j = 0; j< ns; ++j){ py[j] = dpois(yt, lambda[j]); } if (t==0) pstyt1 = pr1; else { pstyt1 = ::t(F(t-1,_)*P_st); } Matrix<> unnorm_pstyt = pstyt1%py; Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j density_beta_prior(ns, 1); Matrix<> density_P_prior(ns, 1); density_P[ns-1] = 1; // for (int j=0; j0 ){ Rprintf("\nlogmarglike = %10.5f\n", logmarglike); Rprintf("loglike = %10.5f\n", loglike); Rprintf("log_prior = %10.5f\n", logprior); Rprintf("log_beta = %10.5f\n", pdf_beta); Rprintf("log_P = %10.5f\n", pdf_P); } logmarglikeholder[0] = logmarglike; loglikeholder[0] = loglike; } R_CheckUserInterrupt(); for (int i = 0; i<(nstore*ns*k); ++i){ betaout[i] = beta_store[i]; } for (int i = 0; i<(nstore*ns*ns); ++i){ Pout[i] = P_store[i]; } for (int i = 0; i<(n*ns); ++i){ psout[i] = ps_store[i]; } for (int i = 0; i<(nstore*n); ++i){ sout[i] = s_store[i]; } } extern "C" { void MCMCpoissonChange( double *betaout, double *Pout, double *psout, double *sout, const double *Ydata, const int *Yrow, const int *Ycol, const double *Xdata, const int *Xrow, const int *Xcol, // const double *logoffset const int *m, const int *burnin, const int *mcmc, const int *thin, const int *verbose, const double *betastart, const double *Pstart, const double *taustart, const double *componentstart, const double *a, const double *b, const double *c0, const double *d0, const int* uselecuyer, const int* seedarray, const int* lecuyerstream, const double *b0data, const double *B0data, const double *A0data, double *logmarglikeholder, double *loglikeholder, const double *wrin, const double *mrin, const double *srin, const int *chib){ if(*Xcol == 1){ MCMCPACK_PASSRNG2MODEL(MCMCpoissonChangepoint_impl, betaout, Pout, psout, sout, Ydata, Yrow, Ycol, m, c0, d0, burnin, mcmc, thin, verbose, betastart, Pstart, a, b, A0data, logmarglikeholder, loglikeholder, chib) } else{ MCMCPACK_PASSRNG2MODEL(MCMCpoissonRegChangepoint_impl, betaout, Pout, psout, sout, Ydata, Yrow, Ycol, Xdata, Xrow, Xcol, m, burnin, mcmc, thin, verbose, betastart, Pstart, taustart, componentstart, a, b, b0data, B0data, A0data, logmarglikeholder, loglikeholder, wrin, mrin, srin, chib); } } // end MCMC } // end extern "C" #endif MCMCpack/src/MCMCpoisson.cc0000644000176000001440000001301612140061657015113 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCpoisson.cc is C++ code to estimate a Poisson regression model with // a multivariate normal prior // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // updated to the new version of Scythe 7/26/2004 KQ // updated to Scythe 1.0.X 7/7/2007 ADM // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCPOISSON_CC #define MCMCPOISSON_CC #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; static double poisson_logpost(const Matrix<>& Y, const Matrix<>& X, const Matrix<>& beta, const Matrix<>& beta_prior_mean, const Matrix<>& beta_prior_prec){ // likelihood const Matrix<> eta = X * beta; const Matrix<> mu = exp(eta); double loglike = 0.0; for (unsigned int i=0; i reference and fills with the * posterior. */ template void MCMCpoisson_impl (rng& stream, const Matrix<>& Y, const Matrix<>& X, const Matrix<>& tune, Matrix<>& beta, const Matrix<>& b0, const Matrix<>& B0, const Matrix<>& V, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, Matrix<>& result) { // define constants const unsigned int tot_iter = burnin + mcmc; // total number iterations const unsigned int nstore = mcmc / thin; // number of draws to store const unsigned int k = X.cols(); // storage matrix or matrices Matrix<> storemat(nstore, k); // proposal parameters const Matrix<> propV = tune * invpd(B0 + invpd(V)) * tune; const Matrix<> propC = cholesky(propV) ; double logpost_cur = poisson_logpost(Y, X, beta, b0, B0); // MCMC loop int count = 0; int accepts = 0; for (unsigned int iter = 0; iter < tot_iter; ++iter){ // sample beta const Matrix<> beta_can = gaxpy(propC, stream.rnorm(k,1,0,1), beta); const double logpost_can = poisson_logpost(Y,X,beta_can, b0, B0); const double ratio = ::exp(logpost_can - logpost_cur); if (stream.runif() < ratio){ beta = beta_can; logpost_cur = logpost_can; ++accepts; } // store values in matrices if (iter >= burnin && (iter % thin==0)){ storemat(count,_) = beta; ++count; } // print output to stdout if(verbose > 0 && iter % verbose == 0){ Rprintf("\n\nMCMCpoisson iteration %i of %i \n", (iter+1), tot_iter); Rprintf("beta = \n"); for (unsigned int j=0; j(accepts) / static_cast(iter+1)); } R_CheckUserInterrupt(); // allow user interrupts }// end MCMC loop result = storemat; if (verbose > 0){ Rprintf("\n\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); Rprintf("The Metropolis acceptance rate for beta was %3.5f", static_cast(accepts) / static_cast(tot_iter)); Rprintf("\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); } } extern "C"{ void MCMCpoisson(double *sampledata, const int *samplerow, const int *samplecol, const double *Ydata, const int *Yrow, const int *Ycol, const double *Xdata, const int *Xrow, const int *Xcol, const int *burnin, const int *mcmc, const int *thin, const double *tunedata, const int *tunerow, const int *tunecol, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int *verbose, const double *betastartdata, const int *betastartrow, const int *betastartcol, const double *b0data, const int *b0row, const int *b0col, const double *B0data, const int *B0row, const int *B0col, const double *Vdata, const int *Vrow, const int *Vcol) { // pull together Matrix objects const Matrix <> Y(*Yrow, *Ycol, Ydata); const Matrix <> X(*Xrow, *Xcol, Xdata); const Matrix <> tune(*tunerow, *tunecol, tunedata); Matrix <> beta(*betastartrow, *betastartcol, betastartdata); const Matrix <> b0(*b0row, *b0col, b0data); const Matrix <> B0(*B0row, *B0col, B0data); const Matrix <> V(*Vrow, *Vcol, Vdata); Matrix<> storagematrix; MCMCPACK_PASSRNG2MODEL(MCMCpoisson_impl, Y, X, tune, beta, b0, B0, V, *burnin, *mcmc, *thin, *verbose, storagematrix); const unsigned int size = *samplerow * *samplecol; for (unsigned int i=0; i #include "matrix.h" #include "algorithm.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "MCMCrng.h" #include "MCMCfcds.h" #include // needed to use Rprintf() #include // needed to allow user interrupts typedef Matrix rmview; using namespace std; using namespace scythe; template void MCMCordfactanal_impl(rng& stream, const Matrix& X, Matrix<>& Lambda, Matrix<>& gamma, const Matrix<>& ncateg, const Matrix<>& Lambda_eq, const Matrix<>& Lambda_ineq, const Matrix<>& Lambda_prior_mean, const Matrix<>& Lambda_prior_prec, const double* tune, bool storelambda, bool storescores, int outswitch, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, Matrix& accepts, Matrix<>& output) { // constants const unsigned int K = X.cols(); // number of manifest variables const unsigned int N = X.rows(); // number of observations const unsigned int D = Lambda.cols(); // # of factors (incl constant) const unsigned int tot_iter = burnin + mcmc; const unsigned int nsamp = mcmc / thin; const Matrix<> I = eye(D-1); const Matrix Lambda_free_indic(K, D); for (unsigned int i = 0; i < (K * D); ++i) if (Lambda_eq(i) == -999) Lambda_free_indic(i) = true; const Matrix<> Psi = eye(K); const Matrix<> Psi_inv = eye(K); //Rprintf("Switches are %i %i %i\n", storelambda, storescores, outswitch); // starting values for phi, Xstar, and gamma_p Matrix<> phi(N, D-1); //Matrix phi = stream->rnorm(N, D-1); phi = cbind(ones(N,1), phi); Matrix<> Xstar(N, K); // storage matrices (row major order) Matrix<> Lambda_store; if (storelambda){ Lambda_store = Matrix(nsamp, K*D); } Matrix<> gamma_store(nsamp, gamma.size()); Matrix<> phi_store; if (storescores){ phi_store = Matrix<>(nsamp, N*D); } /////////////////// // Gibbs Sampler // /////////////////// int count = 0; for (unsigned int iter = 0; iter < tot_iter; ++iter) { // sample Xstar for (unsigned int i = 0; i < N; ++i) { Matrix<> X_mean = Lambda * t(phi(i,_)); for (unsigned int j = 0; j < K; ++j) { if (X(i,j) == -999) { // if missing Xstar(i,j) = stream.rnorm(X_mean[j], 1.0); } else { // if not missing Xstar(i,j) = stream.rtnorm_combo(X_mean[j], 1.0, gamma(X(i,j)-1, j), gamma(X(i,j), j)); } } } // sample phi Matrix<> Lambda_const = Lambda(_,0); Matrix<> Lambda_rest = Lambda(0, 1, K-1, D-1); Matrix<> phi_post_var = invpd(I + crossprod(Lambda_rest) ); Matrix<> phi_post_C = cholesky(phi_post_var); for (unsigned int i = 0; i < N; ++i) { Matrix<> phi_post_mean = phi_post_var * (t(Lambda_rest) * (t(Xstar(i,_))-Lambda_const)); Matrix<> phi_samp = gaxpy(phi_post_C, stream.rnorm(D-1, 1, 0, 1), phi_post_mean); for (unsigned int j = 0; j < (D-1); ++j) phi(i,j+1) = phi_samp(j); } // sample Lambda NormNormfactanal_Lambda_draw(Lambda, Lambda_free_indic, Lambda_prior_mean, Lambda_prior_prec, phi, Xstar, Psi_inv, Lambda_ineq, D, K, stream); // sample gamma for (unsigned int j = 0; j < K; ++j) { // do the sampling for each manifest var Matrix<> gamma_p = gamma(_,j); Matrix<> X_mean = phi * t(Lambda(j,_)); for (unsigned int i = 2; i < (ncateg(j)); ++i) { if (i == (ncateg(j)-1)) { gamma_p(i) = stream.rtbnorm_combo(gamma(i,j), std::pow(tune[j], 2.0), gamma_p[i-1]); } else { gamma_p[i] = stream.rtnorm_combo(gamma(i,j), std::pow(tune[j], 2.0), gamma_p[i-1], gamma(i+1, j)); } } double loglikerat = 0.0; double loggendenrat = 0.0; // loop over observations and construct the acceptance ratio for (unsigned int i = 0; i < N; ++i) { if (X(i,j) != -999) { if (X(i,j) == ncateg(j)) { loglikerat = loglikerat + log(1.0 - pnorm(gamma_p[X(i,j)-1] - X_mean[i], 0, 1) ) - log(1.0 - pnorm(gamma(X(i,j)-1,j) - X_mean[i], 0, 1) ); } else if (X(i,j) == 1) { loglikerat = loglikerat + log(pnorm(gamma_p[X(i,j)] - X_mean[i], 0, 1) ) - log(pnorm(gamma(X(i,j), j) - X_mean[i], 0, 1) ); } else { loglikerat = loglikerat + log(pnorm(gamma_p[X(i,j)] - X_mean[i], 0, 1) - pnorm(gamma_p[X(i,j)-1] - X_mean[i], 0, 1) ) - log(pnorm(gamma(X(i,j), j) - X_mean[i], 0, 1) - pnorm(gamma(X(i,j)-1, j) - X_mean[i], 0, 1) ); } } } for (unsigned int k = 2; k < ncateg(j); ++k) { loggendenrat = loggendenrat + log(pnorm(gamma(k+1,j), gamma(k,j), tune[j]) - pnorm(gamma_p[k-1], gamma(k,j), tune[j]) ) - log(pnorm(gamma_p[k+1], gamma_p[k], tune[j]) - pnorm(gamma(k-1,j), gamma_p[k], tune[j]) ); } double logacceptrat = loglikerat + loggendenrat; if (stream() <= exp(logacceptrat)) { for (unsigned int i = 0; i < gamma.rows(); ++i) { if (gamma(i,j) == 300) break; gamma(i,j) = gamma_p[i]; } ++accepts(j); } } // print results to screen if (verbose > 0 && iter % verbose == 0 && outswitch == 1) { Rprintf("\n\nMCMCordfactanal iteration %i of %i \n", (iter+1), tot_iter); Rprintf("Lambda = \n"); for (unsigned int i = 0; i < K; ++i) { for (unsigned int j = 0; j < D; ++j) { Rprintf("%10.5f", Lambda(i,j)); } Rprintf("\n"); } Rprintf("\nMetropolis-Hastings acceptance rates = \n"); for (unsigned int j = 0; j < K; ++j) { Rprintf("%6.2f", static_cast(accepts[j]) / static_cast((iter+1))); } } if (verbose > 0 && iter % verbose == 0 && outswitch == 2) { Rprintf("\n\nMCMCirtKd iteration %i of %i \n", (iter+1), tot_iter); } // store results if ((iter >= burnin) && ((iter % thin==0))) { // store Lambda if (storelambda) { if (outswitch == 2) { for(unsigned int l = 0; l < K; ++l) { Lambda(l,0) = Lambda(l,0) * -1.0; } } // XXX delete when sure working //Matrix<> Lambda_store_vec = reshape(Lambda, 1, K*D); //for (unsigned int l = 0; l < K * D; ++l) // Lambda_store(count, l) = Lambda_store_vec(l); rmview(Lambda_store(count, _)) = Lambda; //Rprintf("\n\n"); //for (int l = 0; l < K * D; ++l) // Rprintf("%10.5f", Lambda_store(count, l)); //Rprintf("\n\n"); } // store gamma //Matrix<> gamma_store_vec = reshape(gamma, 1, gamma.size()); //for (unsigned int l = 0; l < gamma.size(); ++l) // gamma_store(count, l) = gamma_store_vec(l); rmview(gamma_store(count, _)) = gamma; // store phi if (storescores) { //Matrix<> phi_store_vec = reshape(phi, 1, N*D); //for (unsigned int l = 0; l < N * D; ++l) // phi_store(count, l) = phi_store_vec(l); rmview(phi_store(count, _)) = phi; } count++; } // allow user interrupts R_CheckUserInterrupt(); } // end MCMC loop if (storelambda) { output = cbind(Lambda_store, gamma_store); } else { output = gamma_store; } if(storescores) { output = cbind(output, phi_store); } } extern "C"{ // function called by R to fit model void ordfactanalpost (double* sampledata, const int* samplerow, const int* samplecol, const int* Xdata, const int* Xrow, const int* Xcol, const int* burnin, const int* mcmc, const int* thin, const double* tune, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int* verbose, const double* Lamstartdata, const int* Lamstartrow, const int* Lamstartcol, const double* gamdata, const int* gamrow, const int* gamcol, const int* ncatdata, const int* ncatrow, const int* ncatcol, const double* Lameqdata, const int* Lameqrow, const int* Lameqcol, const double* Lamineqdata, const int* Lamineqrow, const int* Lamineqcol, const double* Lampmeandata, const int* Lampmeanrow, const int* Lampmeancol, const double* Lampprecdata, const int* Lampprecrow, const int* Lamppreccol, const int* storelambda, const int* storescores, int* acceptsdata, const int* acceptsrow, const int* acceptscol, const int* outswitch) { // put together matrices const Matrix X(*Xrow, *Xcol, Xdata); Matrix<> Lambda(*Lamstartrow, *Lamstartcol, Lamstartdata); Matrix<> gamma(*gamrow, *gamcol, gamdata); const Matrix<> ncateg(*ncatrow, *ncatcol, ncatdata); const Matrix<> Lambda_eq(*Lameqrow, *Lameqcol, Lameqdata); const Matrix<> Lambda_ineq(*Lamineqrow, *Lamineqcol, Lamineqdata); const Matrix<> Lambda_prior_mean(*Lampmeanrow, *Lampmeancol, Lampmeandata); const Matrix<> Lambda_prior_prec(*Lampprecrow, *Lamppreccol, Lampprecdata); Matrix accepts(*acceptsrow, *acceptscol, acceptsdata); // return output Matrix output; MCMCPACK_PASSRNG2MODEL(MCMCordfactanal_impl, X, Lambda, gamma, ncateg, Lambda_eq, Lambda_ineq, Lambda_prior_mean, Lambda_prior_prec, tune, *storelambda, *storescores, *outswitch, *burnin, *mcmc, *thin, *verbose, accepts, output); for (unsigned int i = 0; i < output.size(); ++i) sampledata[i] = output(i); for (unsigned int j = 0; j < X.cols(); ++j) acceptsdata[j] = accepts(j); } } #endif MCMCpack/src/MCMCoprobitChange.cc0000644000176000001440000005523312140061657016214 0ustar ripleyusers//////////////////////////////////////////////////////////////////// // MCMCoprobitChange.cc is C++ code to estimate a oprobit changepoint model // with linear approximation // // Jong Hee Park // Department of Political Science and International Relations // Seoul National University // jongheepark@snu.ac.kr // // 07/06/2007 Written // 11/02/2009 Modified //////////////////////////////////////////////////////////////////// #ifndef MCMCOPROBITCHANGE_CC #define MCMCOPROBITCHANGE_CC #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include #include using namespace std; using namespace scythe; // density function for truncated normal static double dtnormLX(const double x, const double mean, const double sd, const double lower, const double upper){ double out = 0.0; if (x>lower && x& gamma){ Matrix<> cat_prob(1, ncat-1); Matrix<> prob(1, ncat); for (int j=0; j< ncat-1; ++j){ cat_prob(0, j) = pnorm(gamma[j+1] - Xbeta, 0.0, 1.0); } prob(0, ncat-1) = 1 - cat_prob(0, ncat-2); prob(0, 0) = cat_prob(0, 0); for (int j=1; j<(ncat-1); ++j){ prob(0, j) = cat_prob(0,j) - cat_prob(0, j-1); } double like = prob(0,Y-1); return like; } static double oprobit_log_postLX(unsigned int j, const int ncat, const Matrix<>& gamma_p, const Matrix<>& gamma, const Matrix<>& Y, const Matrix<>& X, const Matrix<>& beta, const Matrix<>& tune, const int gammafixed){ const int N = Y.rows(); double loglikerat = 0.0; double loggendenrat = 0.0; Matrix<> Xbeta = X*t(beta(j,_)); if (gammafixed==1){ for (unsigned int i=0; i Matrix<> gaussian_ordinal_state_sampler_fixedsigma(rng& stream, const int m, const Matrix<>& Y, const Matrix<>& X, const Matrix<>& beta, const Matrix<>& Sigma, const Matrix<>& P){ const int ns = m + 1; const int n = Y.rows(); Matrix<> F(n, ns); Matrix<> pr1(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (int t=0; t mu = X(t,_)*::t(beta); for (int j = 0; j< ns; ++j){ py[j] = dnorm(Y[t], mu[j], sqrt(Sigma[0])); } if (t==0) pstyt1 = pr1; else { pstyt1 = ::t(F(t-1,_)*P); } Matrix<> unnorm_pstyt = pstyt1%py; const Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j s(n, 1); Matrix<> ps = Matrix<>(n, ns); ps(n-1,_) = F(n-1,_); s(n-1) = ns; Matrix<> pstyn = Matrix<>(ns, 1); double pone = 0.0; int t = n-2; while (t >= 0){ int st = s(t+1); Matrix<> Pst_1 = ::t(P(_,st-1)); Matrix<> unnorm_pstyn = F(t,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1) s(t) = 1; else{ pone = pstyn(st-2); if(stream.runif() < pone) s(t) = st-1; else s(t) = st; } ps(t,_) = pstyn; --t; } Matrix<> Sout(n, ns+1); Sout(_, 0) = s(_,0); for (int j = 0; j void MCMCoprobitChange_impl(rng& stream, const int m, const int ncat, const Matrix<>& Y, const Matrix<>& X, Matrix<>& beta, Matrix<>& beta_linear, Matrix<>& gamma, Matrix<>& P, Matrix<>& Sigma, Matrix<>& b0, Matrix<>& B0, const Matrix<>& A0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, const Matrix<>& tune, // const Matrix& tdf, bool chib, bool gammafixed, Matrix<>& beta_store, Matrix<>& beta_linear_store, Matrix<>& gamma_store, Matrix<>& Z_store, Matrix<>& P_store, Matrix<>& ps_store, Matrix& s_store, double& logmarglike, double& loglike) { const unsigned int tot_iter = burnin + mcmc; const unsigned int nstore = mcmc / thin; const unsigned int N = Y.rows(); const unsigned int ns = m + 1; const unsigned int k = X.cols(); const unsigned int gk = ncat + 1; const Matrix<> B0inv = invpd(B0); Matrix<> Z(N, 1); Matrix<> accepts(ns, 1); Matrix<> gamma_p = gamma; //MCMC loop unsigned int count = 0; for (int iter = 0; iter < tot_iter; ++iter){ // 1. Sample s Matrix<> Sout = gaussian_ordinal_state_sampler_fixedsigma(stream, m, Y, X, beta_linear, Sigma, P); Matrix s = Sout(_, 0); Matrix ps(N, ns); for (int j = 0; j mu = X(i,_)*t(beta(s[i]-1,_)); int yi = Y[i]; Z[i] = stream.rtnorm_combo(mu[0], 1.0, gamma(s[i]-1, yi-1), gamma(s[i]-1, yi)); } // 3. Sample beta int beta_count = 0; Matrix beta_count_storage(ns, 1); Matrix nstate(ns, 1); for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); Matrix<> Zj = Z((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> XpX = t(Xj)*Xj; Matrix<> XpZ = t(Xj)*Zj; Matrix<> XpY = t(Xj)*yj; Matrix<> Bn = invpd(B0 + XpX/Sigma[0]); Matrix<> bn = Bn*(B0*b0 + XpY/Sigma[0]); beta_linear(j,_) = stream.rmvnorm(bn, Bn); Matrix<> Bn2 = invpd(B0 + XpX); Matrix<> bn2 = Bn2*(B0*b0 + XpZ); beta(j,_) = stream.rmvnorm(bn2, Bn2); beta_count_storage[j] = beta_count; } // 4. Sample gamma for (int j = 0; j < ns ; ++j){ for (int i = 2; i< ncat; ++i){ if (i==(ncat-1)){ gamma_p(j, i) = stream.rtbnorm_combo(gamma(j, i), ::pow(tune[j], 2.0), gamma_p(j, i-1)); } else { gamma_p(j, i) = stream.rtnorm_combo(gamma(j, i), ::pow(tune[j], 2.0), gamma_p(j, i-1), gamma(j, i+1)); } } Matrix<> Yj = Y((beta_count_storage[j] - nstate[j]), 0, (beta_count_storage[j] - 1), 0); Matrix<> Xj = X((beta_count_storage[j] - nstate[j]), 0, (beta_count_storage[j] - 1), k-1); double alpha = oprobit_log_postLX(j, ncat, gamma_p, gamma, Yj, Xj, beta, tune, gammafixed); if (stream.runif() <= exp(alpha)){ gamma(j,_) = gamma_p(j,_); accepts[j] = accepts[j] + 1; } } // 5. Sample P double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; for (int j =0; j< (ns-1); ++j){ shape1 = A0(j,j) + (double)nstate[j] - 1; shape2 = A0(j,j+1) + 1; // P(j,j) = stream.rbeta(shape1, shape2); P(j,j+1) = 1 - P(j,j); } // load draws into sample array if (iter >= burnin && ((iter % thin)==0)){ Matrix<> tbeta = ::t(beta); Matrix<> tbetaLX = ::t(beta_linear); for (int i=0; i<(ns*k); ++i){ beta_store(count,i) = tbeta[i]; beta_linear_store(count,i) = tbetaLX[i]; } Matrix<> tgamma = ::t(gamma); for (int i=0; i<(ns*gk); ++i) gamma_store(count,i) = tgamma[i]; for (int j=0; j 1 && verbose > 0 && iter % verbose == 0){ Rprintf("\n\nMCMCoprobitChange iteration %i of %i \n", (iter+1), tot_iter); for (int j = 0;j betast = meanc(beta_store); Matrix<> betastLX = meanc(beta_linear_store); Matrix beta_st(ns, k); Matrix beta_linear_st(ns, k); for (int j = 0; j gammast = meanc(gamma_store); Matrix gamma_st(ns, gk); for (int j = 0; j P_vec_st = meanc(P_store); const Matrix<> P_st(ns, ns); for (int j = 0; j< ns*ns; ++j){ P_st[j] = P_vec_st[j]; } // storage Matrix<> pdf_numer_store(nstore, 1); Matrix<> pdf_alpha_store(nstore, 1); Matrix<> pdf_P_store(nstore, ns); // 1. gamma Matrix<> densityq(nstore, ns); Matrix<> alpha(nstore, ns); for (int iter = 0; iter < nstore; ++iter){ int beta_count = 0; Matrix nstate(ns, 1); Matrix gamma_g(ns, gk); for (int h = 0; h<(ns*gk); ++h){ gamma_g[h] = gamma_store(iter, h); } Matrix beta_g(ns, k); for (int h = 0; h<(ns*k); ++h){ beta_g[h] = beta_store(iter, h); } Matrix<> pdf_numer(ns, 1); for (int j = 0; j Yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); pdf_numer(j) = oprobit_log_postLX(j, ncat, gamma_st, gamma_g, Yj, Xj, beta_g, tune, gammafixed); for (int h = 2; h 0){ pdf_numer_store(iter) = 0; } else{ pdf_numer_store(iter) = sum(pdf_numer); } } double numerator = sum(meanc(pdf_numer_store)) + sum(meanc(densityq)); for (int iter = 0; iter < nstore; ++iter){ Matrix<> Sout = gaussian_ordinal_state_sampler_fixedsigma(stream, m, Y, X, beta_linear, Sigma, P); Matrix s = Sout(_, 0); for (unsigned int i = 0; i mu = X(i,_)*t(beta(s[i]-1,_)); Z[i] = stream.rtnorm_combo(mu[0], 1.0, gamma_st(s[i]-1, Y[i]-1), gamma_st(s[i]-1, Y[i])); } int beta_count = 0; Matrix beta_count_storage(ns, 1); Matrix nstate(ns, 1); for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); Matrix<> Zj = Z((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> XpX = t(Xj)*Xj; Matrix<> XpZ = t(Xj)*Zj; Matrix<> XpY = t(Xj)*yj; Matrix<> Bn = invpd(B0 + XpX/Sigma[0]); Matrix<> bn = Bn*(B0*b0 + XpY/Sigma[0]); beta_linear(j,_) = stream.rmvnorm(bn, Bn); Matrix<> Bn2 = invpd(B0 + XpX); Matrix<> bn2 = Bn2*(B0*b0 + XpZ); beta(j,_) = stream.rmvnorm(bn2, Bn2); beta_count_storage[j] = beta_count; } // Sample P double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; for (int j =0; j< (ns-1); ++j){ shape1 = A0(j,j) + nstate[j] - 1; shape2 = A0(j,j+1) + 1; // P(j,j) = stream.rbeta(shape1, shape2); P(j,j+1) = 1 - P(j,j); } Matrix<> alpha(ns, 1); for (int j = 0; j < ns ; ++j){ for (int i = 2; i< ncat; ++i){ if (i==(ncat-1)){ gamma_p(j, i) = stream.rtbnorm_combo(gamma_st(j, i), ::pow(tune[j], 2.0), gamma_p(j, i-1)); } else { gamma_p(j, i) = stream.rtnorm_combo(gamma_st(j, i), ::pow(tune[j], 2.0), gamma_p(j, i-1), gamma_st(j, i+1)); } } Matrix Yj = Y((beta_count_storage[j] - nstate[j]), 0, (beta_count_storage[j] - 1), 0); Matrix<> Xj = X((beta_count_storage[j] - nstate[j]), 0, (beta_count_storage[j] - 1), k-1); alpha[j] = oprobit_log_postLX(j, ncat, gamma_p, gamma_st, Yj, Xj, beta, tune, gammafixed); } if (sum(alpha) > 0){ pdf_alpha_store(iter) = 0; } else{ pdf_alpha_store(iter) = sum(alpha); } } double denominator = mean(pdf_alpha_store); double pdf_gamma = numerator - denominator; // 2. beta Matrix<> density_beta(nstore, ns); for (int iter = 0; iter < nstore; ++iter){ Matrix<> Sout = gaussian_ordinal_state_sampler_fixedsigma(stream, m, Y, X, beta_linear, Sigma, P); Matrix s = Sout(_, 0); for (unsigned int i = 0; i mu = X(i,_)*t(beta(s[i]-1,_)); Z[i] = stream.rtnorm_combo(mu[0], 1.0, gamma_st(s[i]-1, Y[i]-1), gamma_st(s[i]-1, Y[i])); } int beta_count = 0; Matrix beta_count_storage(ns, 1); Matrix nstate(ns, 1); for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); Matrix<> Zj = Z((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> XpX = t(Xj)*Xj; Matrix<> XpZ = t(Xj)*Zj; Matrix<> XpY = t(Xj)*yj; Matrix<> Bn = invpd(B0 + XpX/Sigma[0]); Matrix<> bn = Bn*(B0*b0 + XpY/Sigma[0]); beta_linear(j,_) = stream.rmvnorm(bn, Bn); Matrix<> Bn2 = invpd(B0 + XpX); Matrix<> bn2 = Bn2*(B0*b0 + XpZ); beta(j,_) = stream.rmvnorm(bn2, Bn2); beta_count_storage[j] = beta_count; density_beta(iter, j) = exp(lndmvn(t(beta_st(j,_)), bn2, Bn2)); } // Sample P double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; for (int j =0; j< (ns-1); ++j){ shape1 = A0(j,j) + nstate[j] - 1; shape2 = A0(j,j+1) + 1; // P(j,j) = stream.rbeta(shape1, shape2); P(j,j+1) = 1 - P(j,j); } } double pdf_beta = log(prod(meanc(density_beta))); // 3. P Matrix<> density_P(nstore, ns); for (int iter = 0; iter < nstore; ++iter){ Matrix<> Sout = gaussian_ordinal_state_sampler_fixedsigma(stream, m, Y, X, beta_linear_st, Sigma, P); Matrix s = Sout(_, 0); double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; Matrix P_addN(ns, 1); for (int j = 0; j F = Matrix<>(N, ns); Matrix<> like(N, 1); Matrix<> pr1 = Matrix<>(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (int t=0; t< N ; ++t){ Matrix<> mu = X(t,_)*::t(beta_st); for (int j=0; j unnorm_pstyt = pstyt1%py; Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j density_beta_prior(ns, 1); Matrix<> density_P_prior(ns, 1); density_P[ns-1] = 1; // for (int j=0; j0 ){ Rprintf("\nlogmarglike = %10.5f\n", logmarglike); Rprintf("loglike = %10.5f\n", loglike); Rprintf("log_prior = %10.5f\n", logprior); Rprintf("log_beta = %10.5f\n", pdf_beta); Rprintf("log_P = %10.5f\n", pdf_P); Rprintf("log_gamma = %10.5f\n", pdf_gamma); } } // end of marginal likelihood }//end extern "C"{ void MCMCoprobitChange(double *betaout, double *betalinearout, double *gammaout, double *Pout, double *psout, double *sout, const double *Ydata, const double *Xdata, const int *Xrow, const int *Xcol, const int *m, const int *ncat, const int *burnin, const int *mcmc, const int *thin, const int *verbose, const double *tunedata, // const int *tdfdata, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const double *betastart, const double *betalinearstart, const double *gammastart, const double *Pstart, const double *sigmastart, const double *a, const double *b, const double *b0data, const double *B0data, const double *A0data, double *logmarglikeholder, double *loglikeholder, const int *chib, const int *gammafixed) { // pull together Matrix objects const Matrix<> Y(*Xrow, 1, Ydata); const Matrix<> X(*Xrow, *Xcol, Xdata); const unsigned int nstore = *mcmc / *thin; const unsigned int N = *Xrow; const unsigned int k = *Xcol; const unsigned int gk = *ncat + 1; const unsigned int ns = *m + 1; // generate starting values Matrix<> beta(ns, k, betastart); Matrix<> beta_linear(ns, k, betalinearstart); Matrix<> Sigma(1, 1, sigmastart); Matrix<> P(ns, ns, Pstart); Matrix<> b0(k, 1, b0data); Matrix<> B0(k, k, B0data); Matrix<> tune(ns, 1, tunedata); Matrix<> A0(ns, ns, A0data); double logmarglike; double loglike; // storage matrices Matrix<> beta_store(nstore, ns*k); Matrix<> beta_linear_store(nstore, ns*k); Matrix<> Z_store(nstore, N); Matrix<> P_store(nstore, ns*ns); Matrix<> ps_store(N, ns); Matrix s_store(nstore, N); Matrix<> gamma(ns, gk, gammastart); Matrix<> gamma_store(nstore, ns*gk); MCMCPACK_PASSRNG2MODEL(MCMCoprobitChange_impl, *m, *ncat, Y, X, beta, beta_linear, gamma, P, Sigma, b0, B0, A0, *burnin, *mcmc, *thin, *verbose, tune, *chib, *gammafixed, beta_store, beta_linear_store, gamma_store, Z_store, P_store, ps_store, s_store, logmarglike, loglike); logmarglikeholder[0] = logmarglike; loglikeholder[0] = loglike; // return output for (int i = 0; i<(nstore*ns*k); ++i){ betaout[i] = beta_store[i]; betalinearout[i] = beta_linear_store[i]; } for (int i = 0; i<(nstore*ns*gk); ++i){ gammaout[i] = gamma_store[i]; } for (int i = 0; i<(nstore*ns*ns); ++i){ Pout[i] = P_store[i]; } for (int i = 0; i<(N*ns); ++i){ psout[i] = ps_store[i]; } for (int i = 0; i<(nstore*N); ++i){ sout[i] = s_store[i]; } } } #endif MCMCpack/src/MCMCoprobit.cc0000644000176000001440000003212312140061657015077 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCoprobit.cc is C++ code to estimate a ordinalprobit regression // model with a multivariate normal prior // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // updated to the new version of Scythe 7/26/2004 KQ // fixed a bug pointed out by Alexander Raach 1/16/2005 KQ // updated to Scythe 1.0.X 7/10/2007 ADM // Albert and Chib method added 9/20/2007 JHP // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCOPROBIT_CC #define MCMCOPROBIT_CC #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include "optimize.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; static inline double lnmulttdens(const Matrix<>& theta, const Matrix<>& mu, const Matrix<>& C, const double df){ const int d = theta.size(); //const Matrix<> z = t(theta - mu) * C; // C is now C' if VC mat is C C' const Matrix<> z = C * (theta - mu); double zsumsq = 0; for (int i=0; i gamma2alpha(const Matrix<>& gamma){ const int m = gamma.rows() - 2; Matrix<> alpha(m, 1); alpha[0] = std::log(gamma[1]); for (int j=1; j< m ; ++j){ alpha[j] = std::log(gamma[j+1] - gamma[j]); } return alpha; } // function that transforms alpha to gamma Matrix<> alpha2gamma(const Matrix<>& alpha){ const int m = alpha.rows(); Matrix<> gamma(m+2, 1); gamma[0] = -300; gamma[m+1] = 300; for (int j=1; j &x, const Matrix &mu, const Matrix &Sigma) { int k = Sigma.cols(); double first = (-k/2.0) * ::log(2 * M_PI) -0.5 * ::log(det(Sigma)); Matrix< > second = ::t(x-mu)*invpd(Sigma)*(x-mu); return (first - 0.5*second[0]); } // orpobit_logpost static inline double oprobit_logpost(const Matrix& nY, const Matrix& X, const Matrix& alpha, const Matrix& alpha_prior_mean, const Matrix& alpha_prior_var, const Matrix& beta){ // likelihood double loglike = 0.0; const int n = nY.rows(); const int ncat = alpha.rows() + 1; // the linear predictor Matrix<> mu = X*beta; Matrix<> gamma = alpha2gamma(alpha); // compute prob Matrix<> cat_prob(n, ncat-1); //cat_prob: CATegory specific PROBability //the first col of cat_prob = pnorm(gamma_1 - mu) //thus, the col number is ncat - 1 Matrix<> prob(n, ncat); for (int j=0; j< ncat-1; ++j){ for (int i=0; i alpha){ const int n = y_.rows(); const int ncat = alpha.rows() + 1; // the linear predictor Matrix<> mu = X_ * beta_; Matrix<> gamma = alpha2gamma(alpha); Matrix<> cat_prob(n, ncat-1); //cat_prob: CATegory specific PROBability //the first col of cat_prob = pnorm(gamma_1 - mu) //thus, the col number is ncat - 1 Matrix<> prob(n, ncat); for (int j=0; j< ncat-1; ++j){ for (int i=0; i y_; Matrix X_; Matrix beta_; }; /* MCMCoprobit implementation. Takes Matrix<> reference which it * fills with the posterior. */ template void MCMCoprobit_impl (rng& stream, const int * Y, const Matrix<>& nY, const Matrix<>& X, Matrix<>& beta, Matrix<>& gamma, const Matrix<>& b0, const Matrix<>& B0, const Matrix<>& alpha_prior_mean, const Matrix<>& alpha_prior_var, const unsigned int burnin, const unsigned int mcmc, const unsigned int thin, const unsigned int verbose, const Matrix<>& tune, const double tdf, const unsigned int cowles, Matrix<>& result) { // define constants and from cross-product matrices const unsigned int tot_iter = burnin + mcmc; // total number of mcmc iterations const unsigned int nstore = mcmc / thin; // number of draws to store const unsigned int k = X.cols(); const unsigned int N = X.rows(); const int ncat = gamma.rows() - 1; const Matrix<> XpX = crossprod(X); // storage matrix or matrices Matrix<> storemat(nstore, k+ncat+1); // initialize Z Matrix<> Z(N,1); Matrix<> Xbeta = X * beta; // Gibbs loop int count = 0; int accepts = 0; Matrix<> gamma_p = gamma; Matrix<> gamma_new = gamma + 1; Matrix<> alpha = gamma2alpha(gamma_new); Matrix<> alpha_hat = alpha; // initialize current value stuff // JHP Matrix<> propV = tune * alpha_prior_var * tune; // JHP Matrix<> propCinvT = t(cholesky(invpd(propV))); // JHP double logpost_cur = oprobit_logpost(nY, X, alpha, alpha_prior_mean, alpha_prior_var, beta); // JHP double logjump_cur = lnmulttdens(alpha_prior_mean, alpha_hat, propCinvT, tdf); double tune_double = tune[0]; for (unsigned int iter = 0; iter < tot_iter; ++iter) { ////////////////// if (cowles == 1){ ////////////////// // Cowles method [gamma | Z, beta] for (int i=2; i<(ncat); ++i){ if (i==(ncat-1)){ gamma_p[i] = stream.rtbnorm_combo(gamma[i], ::pow(tune_double, 2.0), gamma_p[i-1]); } else { gamma_p[i] = stream.rtnorm_combo(gamma[i], ::pow(tune_double, 2.0), gamma_p[i-1], gamma[i+1]); } } double loglikerat = 0.0; double loggendenrat = 0.0; // loop over observations and construct the acceptance ratio for (unsigned int i=0; i alpha_hat = BFGS(oprobit_model, alpha, stream, 100, 1e-5, false); Matrix<> alpha_V = invpd(hesscdif(oprobit_model, alpha_hat)); //note that oprobit_model contains the multiplication by -1 Matrix<> propV = tune * alpha_V * tune; Matrix<> propCinvT = ::t(cholesky(invpd(propV))); // Draw alpha_can from multivariate t Matrix<> alpha_can = alpha_hat + stream.rmvt(propV, tdf); // compute components of transition kernel double logpost_can = oprobit_logpost(nY, X, alpha_can, alpha_prior_mean, alpha_prior_var, beta); double logjump_can = lnmulttdens(alpha_can, alpha_hat, propCinvT, tdf); double logpost_cur = oprobit_logpost(nY, X, alpha, alpha_prior_mean, alpha_prior_var, beta); double logjump_cur = lnmulttdens(alpha, alpha_hat, propCinvT, tdf); double ratio = exp(logpost_can - logjump_can - logpost_cur + logjump_cur); const double u = stream(); if (u < ratio) { alpha = alpha_can; gamma = alpha2gamma(alpha); logpost_cur = logpost_can; logjump_cur = logjump_can; ++accepts; } }// end of AC method // Step 2: [Z| gamma, beta, y] Xbeta = X * beta; // Matrix<> Z_noconst(N,1); for (unsigned int i=0; i XpZ = t(X) * Z; beta = NormNormregress_beta_draw(XpX, XpZ, b0, B0, 1.0, stream); // store values in matrices if (iter >= burnin && ((iter % thin)==0)){ for (unsigned int j=0; j 0 && iter % verbose == 0){ Rprintf("\n\nMCMCoprobit iteration %i of %i \n", (iter+1), tot_iter); Rprintf("beta = \n"); Rprintf("%10.5f\n", beta[0]-gamma[1]); for (unsigned int j=1; j(accepts)/static_cast(iter+1)); } R_CheckUserInterrupt(); // allow user interrupts }// end of MCMC if (verbose > 0){ Rprintf("\n\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); Rprintf("The Metropolis acceptance rate for beta was %3.5f", static_cast(accepts) / static_cast(tot_iter)); Rprintf("\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); } result = storemat; } extern "C"{ void MCMCoprobit(double *sampledata, const int *samplerow, const int *samplecol, const int *Y, const double *nYdata, const int *nYrow, const int *nYcol, const double *Xdata, const int *Xrow, const int *Xcol, const int *burnin, const int *mcmc, const int *thin, const double *tunedata, const int *tunerow, const int *tunecol, const double* tdf, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int *verbose, const double *betadata, const int *betarow, const int *betacol, const double* gammadata, const int* gammarow, const int* gammacol, const double *b0data, const int *b0row, const int *b0col, const double *B0data, const int *B0row, const int *B0col, const double *a0data, const int *a0row, const int *a0col, const double *A0data, const int *A0row, const int *A0col, const int *cowles) { // pull together Matrix objects const Matrix <> nY(*nYrow, *nYcol, nYdata); const Matrix <> X(*Xrow, *Xcol, Xdata); Matrix <> beta(*betarow, *betacol, betadata); Matrix <> gamma(*gammarow, *gammacol, gammadata); const Matrix <> b0(*b0row, *b0col, b0data); const Matrix <> B0(*B0row, *B0col, B0data); const Matrix <> alpha_prior_mean(*a0row, *a0col, a0data); const Matrix <> alpha_prior_prec(*A0row, *A0col, A0data); const Matrix <> alpha_prior_var = invpd(alpha_prior_prec); const Matrix<> tune(*tunerow, *tunecol, tunedata); Matrix<> storagematrix; MCMCPACK_PASSRNG2MODEL(MCMCoprobit_impl, Y, nY, X, beta, gamma, b0, B0, alpha_prior_mean, alpha_prior_var, *burnin, *mcmc, *thin, *verbose, tune, *tdf, *cowles, storagematrix); const unsigned int size = *samplerow * *samplecol; for (unsigned int i = 0; i < size; ++i) sampledata[i] = storagematrix(i); } } #endif MCMCpack/src/MCMCmnlslice.cc0000644000176000001440000002132712140061657015233 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCmnlslice.cc DESCRIPTION HERE // // The initial version of this file was generated by the // auto.Scythe.call() function in the MCMCpack R package // written by: // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // This file was initially generated on Wed Dec 29 15:27:40 2004 // REVISION HISTORY // // 7/28/07 DBP ported to scythe 1.0 // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCMNLSLICE_CC #define MCMCMNLSLICE_CC #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "MCMCrng.h" #include "MCMCfcds.h" #include "MCMCmnl.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; // eventually all of the slice sampling functions should be made more // general and put in MCMCfcds.{h cc} // // Radford Neal's (2000) doubling procedure coded for a logdensity template static void doubling(double (*logfun)(const Matrix<>&, const Matrix<>&, const Matrix<>&, const Matrix<>&, const Matrix<>&), const Matrix<>& beta, int index, double z, double w, int p, const Matrix<>& Y, const Matrix<>& X, const Matrix<>& beta_prior_mean, const Matrix<>& beta_prior_prec, rng& stream, double& L, double& R){ const double U = stream(); const double x0 = beta(index); Matrix<> beta_L = beta; Matrix<> beta_R = beta; L = x0 - w * U; beta_L(index) = L; R = L + w; beta_R(index) = R; int K = p; while (K > 0 && (z < logfun(Y, X, beta_L, beta_prior_mean, beta_prior_prec) || z < logfun(Y, X, beta_R, beta_prior_mean, beta_prior_prec))) { double V = stream(); if (V < 0.5){ L = L - (R - L); beta_L(index) = L; } else { R = R + (R - L); beta_R(index) = R; } --K; } } // Radford Neal's (2000) Accept procedure coded for a logdensity static const bool Accept(double (*logfun)(const Matrix<>&, const Matrix<>&, const Matrix<>&, const Matrix<>&, const Matrix<>&), const Matrix<>& beta, int index, double x0, double z, double w, const Matrix<>& Y, const Matrix<>& X, const Matrix<>& beta_prior_mean, const Matrix<>& beta_prior_prec, double L, double R) { double Lhat = L; double Rhat = R; bool D = false; while ((Rhat - Lhat ) > 1.1 * w) { double M = (Lhat + Rhat) / 2.0; if ( (x0 < M && beta(index) >= M) || (x0 >= M && beta(index) < M)){ D = true; } if (beta(index) < M){ Rhat = M; } else { Lhat = M; } Matrix<> beta_L = beta; Matrix<> beta_R = beta; beta_L[index] = Lhat; beta_R[index] = Rhat; if (D && z >= logfun(Y, X, beta_L, beta_prior_mean, beta_prior_prec) && z >= logfun(Y, X, beta_R, beta_prior_mean, beta_prior_prec)) { return(false); } } return(true); } // Radford Neal's (2000) shrinkage procedure coded for a log density template static double shrinkage(double (*logfun)(const Matrix<>&, const Matrix<>&, const Matrix<>&, const Matrix<>&, const Matrix<>&), const Matrix<>& beta, int index, double z, double w, const Matrix<>& Y, const Matrix<>& X, const Matrix<>& beta_prior_mean, const Matrix<>& beta_prior_prec, rng& stream, double L, double R) { double Lbar = L; double Rbar = R; Matrix<> beta_x1 = beta; const double x0 = beta[index]; for (;;) { const double U = stream(); const double x1 = Lbar + U*(Rbar - Lbar); beta_x1(index) = x1; if (z <= logfun(Y, X, beta_x1, beta_prior_mean, beta_prior_prec) && Accept(logfun, beta_x1, index, x0, z, w, Y, X, beta_prior_mean, beta_prior_prec, L, R)) { return(x1); } if (x1 < x0) { Lbar = x1; } else { Rbar = x1; } } // end infinite loop } template void MCMCmnlslice_impl(rng& stream, const Matrix<>& Y, const Matrix<>& X, const Matrix<>& b0, const Matrix<>& B0, const Matrix<>& V, Matrix<>& beta, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, Matrix<>& storemat){ // DEFINE constants const unsigned int tot_iter = burnin + mcmc; // total iterations const unsigned int nstore = mcmc / thin; // # of draws to store const unsigned int k = X.cols(); // Initialize storage matrix storemat = Matrix<>(nstore, k, false); // proposal parameters const Matrix<> propV = invpd(B0 + invpd(V)); const Matrix<> w_init = ones(k, 1); for (unsigned int i = 0; i < k; ++i) w_init(i) = sqrt(propV(i,i)) *0.05; // starting values double L = -1.0; double R = 1.0; const unsigned int warmup_iter = 100; const unsigned int warmup_burnin = 10; const unsigned int p_init = 15; const Matrix<> widthmat(warmup_iter - warmup_burnin, k); // warm up sampling to choose the slice sampling parameters for (unsigned int iter = 0; iter < warmup_iter; ++iter) { for (unsigned int index = 0; index < k; ++index) { double funval = mnl_logpost(Y, X, beta, b0, B0); double z = funval - stream.rexp(1.0); doubling(&mnl_logpost, beta, index, z, w_init[index], p_init, Y, X, b0, B0, stream, L, R); beta(index) = shrinkage(&mnl_logpost, beta, index, z, w_init(index), Y, X, b0, B0, stream, L, R); if (iter >= warmup_burnin) widthmat(iter-warmup_burnin, index) = R - L; } } const Matrix<> w = meanc(widthmat); Matrix p = ones(k,1); for (unsigned int index = 0; index < k; ++index) { int p_temp = 2; while ((w(index) * pow(2.0, p_temp) ) < max(widthmat(_,index))) { ++p_temp; } p(index) = p_temp + 1; } unsigned int count = 0; ///// REAL MCMC SAMPLING OCCURS IN THIS FOR LOOP for(unsigned int iter = 0; iter < tot_iter; ++iter) { for (unsigned int index = 0; index < k; ++index) { double funval = mnl_logpost(Y, X, beta, b0, B0); double z = funval - stream.rexp(1.0); doubling(&mnl_logpost, beta, index, z, w(index), p(index), Y, X, b0, B0, stream, L, R); beta(index) = shrinkage(&mnl_logpost, beta, index, z, w(index), Y, X, b0, B0, stream, L, R); } // store draws in storage matrix (or matrices) if(iter >= burnin && (iter % thin == 0)) { for (unsigned int j = 0; j < k; j++) storemat(count, j) = beta(j); ++count; } // print output to stdout if(verbose > 0 && iter % verbose == 0) { Rprintf("\n\nMCMCmnl slice iteration %i of %i \n", (iter+1), tot_iter); Rprintf("beta = \n"); for (unsigned int j = 0; j < k; ++j) Rprintf("%10.5f\n", beta[j]); } R_CheckUserInterrupt(); // allow user interrupts } // end MCMC loop } extern "C" { // MCMC sampling for MNL model via slice sampling void MCMCmnlslice(double *sampledata, const int *samplerow, const int *samplecol, const double *Ydata, const int *Yrow, const int *Ycol, const double *Xdata, const int *Xrow, const int *Xcol, const int *burnin, const int *mcmc, const int *thin, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int *verbose, const double *betastartdata, const int *betastartrow, const int *betastartcol, const double *b0data, const int *b0row, const int *b0col, const double *B0data, const int *B0row, const int *B0col, const double *Vdata, const int *Vrow, const int *Vcol) { // pull together Matrix objects // REMEMBER TO ACCESS PASSED ints AND doubles PROPERLY const Matrix<> Y(*Yrow, *Ycol, Ydata); const Matrix<> X(*Xrow, *Xcol, Xdata); Matrix<> beta(*betastartrow, *betastartcol, betastartdata); const Matrix<> b0(*b0row, *b0col, b0data); const Matrix<> B0(*B0row, *B0col, B0data); const Matrix<> V(*Vrow, *Vcol, Vdata); // storage matrix or matrices Matrix storemat; MCMCPACK_PASSRNG2MODEL(MCMCmnlslice_impl, Y, X, b0, B0, V, beta, *burnin, *mcmc, *thin, *verbose, storemat); // load draws into sample array for(unsigned int i = 0; i < storemat.size(); ++i) sampledata[i] = storemat(i); } // end MCMCmnlslice } // end extern "C" #endif MCMCpack/src/MCMCmnlMH.cc0000644000176000001440000001613012140061657014434 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCmnlMH.cc samples from the posterior distribution of a multinomial // logit model using a random walk Metropolis algorithm. // // The initial version of this file was generated by the // auto.Scythe.call() function in the MCMCpack R package // written by: // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // This file was initially generated on Wed Dec 29 15:27:08 2004 // 12/31/2004 filled out template and got it initial version working (KQ) // 7/27/2007 DBP ported to scythe 1.0 // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCMNLMH_CC #define MCMCMNLMH_CC #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "MCMCrng.h" #include "MCMCfcds.h" #include "MCMCmnl.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; // natural log of the multivariate-t density (up to a constant of // proportionality) // theta: eval point // mu: mode // C: Cholesky factor of inverse scale matrix // df: degrees of freedom static inline double lnmulttdens(const Matrix<>& theta, const Matrix<>& mu, const Matrix<>& C, const double& df){ const int d = theta.size(); //const Matrix<> z = t(theta - mu) * C; // C is now C' if VC mat is C C' const Matrix<> z = C * (theta - mu); double zsumsq = 0; for (int i=0; i void MCMCmnlMH_impl(rng& stream, const Matrix<>& Y, const Matrix<>& X, const Matrix<>& b0, const Matrix<>& B0, const Matrix<>& V, Matrix<>& beta, const Matrix<>& beta_hat, const Matrix<>& tune, const unsigned int burnin, const unsigned int mcmc, const unsigned int thin, const unsigned int verbose, const unsigned int RW, const double tdf, Matrix<>& storemat) { // define constants const unsigned int tot_iter = burnin + mcmc; // total iterations const unsigned int nstore = mcmc / thin; // # of draws to store const unsigned int k = X.cols(); // Initialize storage matrix storemat = Matrix<>(nstore, k, false); // proposal parameters const Matrix<> propV = tune * V * tune; const Matrix<> propC = cholesky(propV); const Matrix<> propCinvT = t(cholesky(invpd(propV))); double logpost_cur = mnl_logpost(Y, X, beta, b0, B0); double logjump_cur = lnmulttdens(beta, beta_hat, propCinvT, tdf); int count = 0; int accepts = 0; ///// MCMC SAMPLING OCCURS IN THIS FOR LOOP for (unsigned int iter = 0; iter < tot_iter; ++iter) { // sample beta if (RW == 0){ // Independent Metropolis-Hastings const double u = stream(); if (u < 0.75){ const Matrix<> beta_can = beta_hat + stream.rmvt(propV, tdf); const double logpost_can = mnl_logpost(Y, X, beta_can, b0, B0); const double logjump_can = lnmulttdens(beta_can, beta_hat, propCinvT, tdf); const double ratio = std::exp( logpost_can - logjump_can - logpost_cur + logjump_cur ); if (stream() < ratio) { beta = beta_can; logpost_cur = logpost_can; logjump_cur = logjump_can; ++accepts; } } else{ const Matrix<> beta_can = beta_hat + beta_hat - beta; const double logpost_can = mnl_logpost(Y, X, beta_can, b0, B0); const double logjump_can = lnmulttdens(beta_can, beta_hat, propCinvT, tdf); const double ratio = std::exp( logpost_can - logpost_cur ); if (stream() < ratio) { beta = beta_can; logpost_cur = logpost_can; logjump_cur = logjump_can; ++accepts; } } } else{ // Random Walk Metropolis const Matrix<> beta_can = gaxpy(propC, stream.rnorm(k,1,0,1), beta); const double logpost_can = mnl_logpost(Y, X, beta_can, b0, B0); const double ratio = std::exp(logpost_can - logpost_cur); if (stream() < ratio) { beta = beta_can; logpost_cur = logpost_can; ++accepts; } } // store values in matrices if (iter >= burnin && ((iter % thin) == 0)) { for (unsigned int j = 0; j < k; j++) storemat(count, j) = beta[j]; ++count; } // print output to stdout if (verbose > 0 && iter % verbose == 0) { Rprintf("\n\nMCMCmnl Metropolis iteration %i of %i \n", (iter+1), tot_iter); Rprintf("beta = \n"); for (unsigned int j=0; j(accepts) / static_cast(iter+1)); } R_CheckUserInterrupt(); // allow user interrupts } // end MCMC loop } extern "C" { // MCMC sampling for multinomial logit via Metropolis-Hastings void MCMCmnlMH(double *sampledata, const int *samplerow, const int *samplecol, const double *Ydata, const int *Yrow, const int *Ycol, const double *Xdata, const int *Xrow, const int *Xcol, const int *burnin, const int *mcmc, const int *thin, const double *tunedata, const int *tunerow, const int *tunecol, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int *verbose, const double *betastartdata, const int *betastartrow, const int *betastartcol, const double *betamodedata, const int *betamoderow, const int *betamodecol, const double *b0data, const int *b0row, const int *b0col, const double *B0data, const int *B0row, const int *B0col, const double *Vdata, const int *Vrow, const int *Vcol, const int* RW, const double* tdf) { // pull together Matrix objects // REMEMBER TO ACCESS PASSED ints AND doubles PROPERLY const Matrix<> Y(*Yrow, *Ycol, Ydata); const Matrix<> X(*Xrow, *Xcol, Xdata); const Matrix<> tune(*tunerow, *tunecol, tunedata); Matrix<> beta(*betastartrow, *betastartcol, betastartdata); const Matrix<> betamode(*betamoderow, *betamodecol, betamodedata); const Matrix<> b0(*b0row, *b0col, b0data); const Matrix<> B0(*B0row, *B0col, B0data); const Matrix<> V(*Vrow, *Vcol, Vdata); // storage matrix or matrices Matrix<> storemat; MCMCPACK_PASSRNG2MODEL(MCMCmnlMH_impl, Y, X, b0, B0, V, beta, betamode, tune, *burnin, *mcmc, *thin, *verbose, *RW, *tdf, storemat); // load draws into sample array for(unsigned int i = 0; i < storemat.size(); ++i) sampledata[i] = storemat(i); } // end MCMCmnlMH } // end extern "C" #endif MCMCpack/src/MCMCmnl.h0000644000176000001440000000441012140061657014047 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCmnl.h contains multinomial logit functions called by both the // metropolis hastings and slice sampling implemenetation of MCMCmnl, // such as a function that returns the log posterior. // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // DBP 7/27/2007 // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCMNL_H #define MCMCMNL_H #include "matrix.h" #include "algorithm.h" #include "distributions.h" #include "la.h" #include "smath.h" using namespace std; using namespace scythe; inline double mnl_logpost(const Matrix<>& Y, const Matrix<>& X, const Matrix<>& beta, const Matrix<>& beta_prior_mean, const Matrix<>& beta_prior_prec) { // likelihood double loglike = 0.0; const Matrix numera = exp(X * beta); //numer = reshape(numer, Y.rows(), Y.cols()); //numer.resize(Y.rows(), Y.cols(), true); Matrix numer(Y.rows(), Y.cols(), false); copy(numera, numer); double *denom = new double[Y.rows()]; for (unsigned int i = 0; i < Y.rows(); ++i) { denom[i] = 0.0; for (unsigned int j = 0; j < Y.cols(); ++j) { if (Y(i,j) != -999){ denom[i] += numer(i,j); } } for (unsigned int j = 0; j < Y.cols(); ++j) { if (Y(i,j) == 1.0){ loglike += std::log(numer(i,j) / denom[i]); } } } delete [] denom; // prior // double logprior = 0.0; //if (beta_prior_prec(0,0) != 0) { // logprior = lndmvn(beta, beta_prior_mean, invpd(beta_prior_prec)); // } // // the following is only up to proportionality const double logprior = -0.5 *(t(beta - beta_prior_mean) * beta_prior_prec * (beta - beta_prior_mean))(0); return (loglike + logprior); } #endif MCMCpack/src/MCMCmixfactanal.cc0000644000176000001440000002776312140061657015726 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCmixfactanal.cc is C++ code to estimate a mixed data // factor analysis model // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // revised version of older MCMCordfactanal // 7/20/2004 KQ // updated to new version of Scythe 7/25/2004 // fixed a bug pointed out by Alexander Raach 1/16/2005 KQ // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCMIXFACTANAL_CC #define MCMCMIXFACTANAL_CC #include #include "matrix.h" #include "algorithm.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "MCMCrng.h" #include "MCMCfcds.h" #include // needed to use Rprintf() #include // needed to allow user interrupts typedef Matrix rmview; using namespace std; using namespace scythe; template void MCMCmixfactanal_impl(rng& stream, const Matrix& X, const Matrix<>& Xstar, Matrix<>& Psi, Matrix<>& Psi_inv, const Matrix<>& a0, const Matrix<>& b0, Matrix<>& Lambda, Matrix<>& gamma, const Matrix<>& ncateg, const Matrix<>& Lambda_eq, const Matrix<>& Lambda_ineq, const Matrix<>& Lambda_prior_mean, const Matrix<>& Lambda_prior_prec, const double* tune, bool storelambda, bool storescores, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, Matrix& accepts, Matrix<>& output ){ // constants const unsigned int K = X.cols(); // number of manifest variables int n_ord_ge3 = 0; // number of ordinal varibles with >= 3 categories for (unsigned int i=0; i= 3) ++n_ord_ge3; const unsigned int N = X.rows(); // number of observations const unsigned int D = Lambda.cols(); // number of factors // (including constant) const unsigned int tot_iter = burnin + mcmc; const unsigned int nsamp = mcmc / thin; const Matrix<> I = eye(D-1); const Matrix<> Lambda_free_indic(K, D); for (unsigned int i=0; i<(K*D); ++i){ if (Lambda_eq(i) == -999) Lambda_free_indic(i) = 1.0; } // starting values for phi and gamma_p Matrix<> phi(N,D-1); phi = cbind(ones(N,1), phi); // storage matrices (row major order) Matrix<> Lambda_store; if (storelambda==1){ Lambda_store = Matrix<>(nsamp,K*D); } Matrix<> gamma_store(nsamp, gamma.size()); Matrix<> phi_store; if (storescores==1){ phi_store = Matrix<>(nsamp, N*D); } Matrix<> Psi_store(nsamp, K); /////////////////// // Gibbs Sampler // /////////////////// int count = 0; for (unsigned int iter=0; iter < tot_iter; ++iter){ // sample Xstar for (unsigned int i=0; i X_mean = Lambda * t(phi(i,_)); for (unsigned int j=0; j= 2){ // ordinal data if (X(i,j) == -999){ // if missing Xstar(i,j) = stream.rnorm(X_mean[j], 1.0); } else { // if not missing Xstar(i,j) = stream.rtnorm_combo(X_mean[j], 1.0, gamma(X(i,j)-1, j), gamma(X(i,j), j)); } } else { // continuous data if (X(i,j) == -999){ // if missing Xstar(i,j) = stream.rnorm(X_mean[j], std::sqrt(Psi(j,j))); } } } } // sample phi const Matrix<> Lambda_const = Lambda(_,0); const Matrix<> Lambda_rest = Lambda(0, 1, K-1, D-1); // if Psi_inv is *not* diagonal then use: //Matrix phi_post_var = invpd(I + t(Lambda_rest) * // Psi_inv * Lambda_rest); // instead of the following 2 lines: const Matrix<> AAA = scythe::sqrt(Psi_inv) * Lambda_rest; const Matrix<> phi_post_var = invpd(I + crossprod(AAA)); // ///////////////////////////////////////////////////// const Matrix<> phi_post_C = cholesky(phi_post_var); for (unsigned int i=0; i phi_post_mean = phi_post_var * (t(Lambda_rest) * Psi_inv * (t(Xstar(i,_))-Lambda_const)); const Matrix<> phi_samp = gaxpy(phi_post_C, stream.rnorm(D-1, 1, 0.0, 1.0), phi_post_mean); for (unsigned int j=0; j<(D-1); ++j) phi(i,j+1) = phi_samp[j]; } // sample Lambda NormNormfactanal_Lambda_draw(Lambda, Lambda_free_indic, Lambda_prior_mean, Lambda_prior_prec, phi, Xstar, Psi_inv, Lambda_ineq, D, K, stream); // sample Psi (assumes diagonal Psi) for (unsigned int i=0; i epsilon = gaxpy(phi, -1*(t(Lambda(i,_))), Xstar(_,i)); const Matrix<> SSE = crossprod(epsilon); const double new_a0 = (a0[i] + N)*0.5; const double new_b0 = (b0[i] + SSE[0])*0.5; Psi(i,i) = stream.rigamma(new_a0, new_b0); Psi_inv(i,i) = 1.0 / Psi(i,i); } } // sample gamma for (unsigned int j=0; j gamma_p = gamma(_,j); if (ncateg[j] <= 2){ ++accepts[j]; } if (ncateg[j] > 2){ const Matrix<> X_mean = phi * t(Lambda(j,_)); for (int i=2; i<(ncateg[j]); ++i){ if (i==(ncateg[j]-1)){ gamma_p(i) = stream.rtbnorm_combo(gamma(i,j), ::pow(tune[j], 2.0), gamma_p[i-1]); } else { gamma_p(i) = stream.rtnorm_combo(gamma(i,j), ::pow(tune[j], 2.0), gamma_p[i-1], gamma(i+1, j)); } } double loglikerat = 0.0; double loggendenrat = 0.0; // loop over observations and construct the acceptance ratio for (unsigned int i=0; i 0 && iter % verbose == 0){ Rprintf("\n\nMCMCmixfactanal iteration %i of %i \n", (iter+1), tot_iter); Rprintf("Lambda = \n"); for (unsigned int i=0; i(accepts[j]) / static_cast((iter+1))); } } // store results if ((iter >= burnin) && ((iter % thin==0))) { // store Lambda if (storelambda==1){ //Matrix Lambda_store_vec = reshape(Lambda,1,K*D); //for (int l=0; l gamma_store_vec = reshape(gamma, 1, *gamrow* *gamcol); //for (int l=0; l<*gamrow* *gamcol; ++l) // gamma_store(count, l) = gamma_store_vec[l]; rmview(gamma_store(count, _)) = gamma; // store Psi for (unsigned int l=0; l phi_store_vec = reshape(phi, 1, N*D); //for (int l=0; l output; if (storelambda==1){ output = cbind(Lambda_store, gamma_store); } else { output = gamma_store; } if(storescores == 1) { output = cbind(output, phi_store); } output = cbind(output, Psi_store); } extern "C"{ // function called by R to fit model void mixfactanalpost (double* sampledata, const int* samplerow, const int* samplecol, const double* Xdata, const int* Xrow, const int* Xcol, const int* burnin, const int* mcmc, const int* thin, const double* tune, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int* verbose, const double* Lamstartdata, const int* Lamstartrow, const int* Lamstartcol, const double* gamdata, const int* gamrow, const int* gamcol, const double* Psistartdata, const int* Psistartrow, const int* Psistartcol, const int* ncatdata, const int* ncatrow, const int* ncatcol, const double* Lameqdata, const int* Lameqrow, const int* Lameqcol, const double* Lamineqdata, const int* Lamineqrow, const int* Lamineqcol, const double* Lampmeandata, const int* Lampmeanrow, const int* Lampmeancol, const double* Lampprecdata, const int* Lampprecrow, const int* Lamppreccol, const double* a0data, const int* a0row, const int* a0col, const double* b0data, const int* b0row, const int* b0col, const int* storelambda, const int* storescores, int* acceptsdata, const int* acceptsrow, const int* acceptscol ) { // put together matrices const Matrix<> Xstar(*Xrow, *Xcol, Xdata); const Matrix X = Matrix(*Xrow, *Xcol); for (int i=0; i<(*Xrow * *Xcol); ++i) X[i] = static_cast(Xstar[i]); Matrix<> Lambda(*Lamstartrow, *Lamstartcol, Lamstartdata); Matrix<> gamma(*gamrow, *gamcol, gamdata); Matrix<> Psi(*Psistartrow, *Psistartcol, Psistartdata); Matrix<> Psi_inv = invpd(Psi); const Matrix ncateg(*ncatrow, *ncatcol, ncatdata); const Matrix<> Lambda_eq(*Lameqrow, *Lameqcol, Lameqdata); const Matrix<> Lambda_ineq(*Lamineqrow, *Lamineqcol, Lamineqdata); const Matrix<> Lambda_prior_mean(*Lampmeanrow, *Lampmeancol, Lampmeandata); const Matrix<> Lambda_prior_prec(*Lampprecrow, *Lamppreccol, Lampprecdata); const Matrix <> a0(*a0row, *a0col, a0data); const Matrix <> b0(*b0row, *b0col, b0data); Matrix accepts(*acceptsrow, *acceptscol, acceptsdata); // return output Matrix output; MCMCPACK_PASSRNG2MODEL(MCMCmixfactanal_impl, X, Xstar, Psi, Psi_inv, a0, b0, Lambda, gamma, ncateg, Lambda_eq, Lambda_ineq, Lambda_prior_mean, Lambda_prior_prec, tune, *storelambda, *storescores, *burnin, *mcmc, *thin, *verbose, accepts, output); const unsigned int size = (unsigned int) *samplerow * *samplecol; for (unsigned int i=0; i // needed to use Rprintf() #include // needed to allow user interrupts #include #include using namespace std; using namespace scythe; // function that evaluatees the user supplied R function double user_fun_eval(SEXP fun, SEXP theta, SEXP myframe) { SEXP R_fcall; if(!isFunction(fun)) error("`fun' must be a function"); if(!isEnvironment(myframe)) error("myframe must be an environment"); PROTECT(R_fcall = lang2(fun, R_NilValue)); SETCADR(R_fcall, theta); SEXP funval; PROTECT(funval = eval(R_fcall, myframe)); if (!isReal(funval)) error("`fun' must return a double"); double fv = REAL(funval)[0]; if (fv == R_PosInf) error("`fun' returned +Inf"); if (R_IsNaN(fv) || R_IsNA(fv)) error("`fun' returned NaN or NA"); UNPROTECT(2); return fv; } template void MCMCmetrop1R_impl (rng& stream, SEXP& fun, SEXP& theta, SEXP& myframe, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, bool logfun, const Matrix<>& propvar, SEXP& sample_SEXP) { // define constants const unsigned int npar = length(theta); const unsigned int tot_iter = burnin + mcmc; const unsigned int nsamp = mcmc / thin; const Matrix <> propc = cholesky(propvar); // initialize matrix to hold the sample Matrix<> sample(nsamp, npar, false); // put theta into a Scythe Matrix double* theta_data = REAL(theta); const int theta_nr = length(theta); const int theta_nc = 1; Matrix <> theta_M (theta_nc, theta_nr, theta_data); theta_M = t(theta_M); // evaluate userfun at starting value double userfun_cur = user_fun_eval(fun, theta, myframe); if (! logfun) userfun_cur = std::log(userfun_cur); // THE METROPOLIS SAMPLING unsigned int count = 0; unsigned int accepts = 0; for (unsigned int iter = 0; iter < tot_iter; ++iter) { // generate candidate value of theta Matrix <> theta_can_M = theta_M + propc * stream.rnorm(npar,1, 0, 1); // put theta_can_M into a SEXP SEXP theta_can; PROTECT(theta_can = allocVector(REALSXP, npar)); for (unsigned int i = 0; i < npar; ++i) { REAL(theta_can)[i] = theta_can_M(i); } // evaluate user function fun at candidate theta double userfun_can = user_fun_eval(fun, theta_can, myframe); if (! logfun) userfun_can = std::log(userfun_can); const double ratio = std::exp(userfun_can - userfun_cur); if (stream() < ratio) { for (unsigned int i = 0; i < npar; ++i) { REAL(theta)[i] = theta_can_M(i); } // theta = theta_can; theta_M = theta_can_M; userfun_cur = userfun_can; ++accepts; } UNPROTECT(1); // store values in matrices if ((iter%thin) == 0 && iter >= burnin) { for (unsigned int j = 0; j < npar; j++) sample(count, j) = REAL(theta)[j]; ++count; } if (verbose && iter % verbose == 0) { Rprintf("MCMCmetrop1R iteration %i of %i \n", (iter+1), tot_iter); Rprintf("function value = %10.5f\n", userfun_cur); Rprintf("theta = \n"); for (unsigned int i = 0; i < npar; ++i) Rprintf("%10.5f\n", REAL(theta)[i]); Rprintf("Metropolis acceptance rate = %3.5f\n\n", static_cast(accepts) / static_cast(iter+1)); } R_CheckUserInterrupt(); // allow user interrupts } // put the sample into a SEXP and return it //sample_SEXP = PROTECT(allocMatrix(REALSXP, nsamp, npar)); for (unsigned int i = 0; i < nsamp; ++i) { for (unsigned int j = 0; j < npar; ++j) { REAL(sample_SEXP)[i + nsamp * j] = sample(i,j); } } //UNPROTECT(1); // print the the acceptance rate to the console in a way that // everyone (even Windows users) can see Rprintf("\n\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); Rprintf("The Metropolis acceptance rate was %3.5f", static_cast(accepts) / static_cast(tot_iter)); Rprintf("\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); } extern "C" { // the function that actually does the sampling and returns a value to R SEXP MCMCmetrop1R_cc(SEXP fun, SEXP theta, SEXP myframe, SEXP burnin_R, SEXP mcmc_R, SEXP thin_R, SEXP verbose, SEXP lecuyer_R, SEXP seedarray_R, SEXP lecuyerstream_R, SEXP logfun, SEXP propvar_R) { // put rng stuff together int seedarray[6]; for(int i=0; i<6; ++i) seedarray[i] = INTEGER(seedarray_R)[i]; int uselecuyer_cc = INTEGER(lecuyer_R)[0]; int lecuyerstream_cc = INTEGER(lecuyerstream_R)[0]; int* uselecuyer = &uselecuyer_cc; int* lecuyerstream = &lecuyerstream_cc; // put propvar_R into a Matrix double* propvar_data = REAL(propvar_R); const int propvar_nr = nrows(propvar_R); const int propvar_nc = ncols(propvar_R); Matrix <> propvar (propvar_nc, propvar_nr, propvar_data); propvar = t(propvar); const unsigned int npar = length(theta); const unsigned int nsamp = INTEGER(mcmc_R)[0] / INTEGER(thin_R)[0]; SEXP sample_SEXP; PROTECT(sample_SEXP = allocMatrix(REALSXP, nsamp, npar)); MCMCPACK_PASSRNG2MODEL(MCMCmetrop1R_impl, fun, theta, myframe, INTEGER(burnin_R)[0], INTEGER(mcmc_R)[0], INTEGER(thin_R)[0], INTEGER(verbose)[0], INTEGER(logfun)[0], propvar, sample_SEXP); UNPROTECT(1); // return the sample return sample_SEXP; } } #endif MCMCpack/src/MCMClogituserprior.cc0000644000176000001440000001675112140061657016523 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMClogituserprior.cc samples from the posterior distribution of a // logistic regression model with user-written prior coded in R using a // random walk Metropolis algorithm // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // KQ 8/17/2005 (based on current version of MCMCmetrop1R.cc) // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCLOGITUSERPRIOR_CC #define MCMCLOGITUSERPRIOR_CC #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "MCMCrng.h" #include "MCMCfcds.h" #include // needed to use Rprintf() #include // needed to allow user interrupts #include #include using namespace std; using namespace scythe; // function that evaluatees the user supplied R function static double user_fun_eval(SEXP fun, SEXP theta, SEXP myframe){ SEXP R_fcall; if(!isFunction(fun)) error("`fun' must be a function"); if(!isEnvironment(myframe)) error("myframe must be an environment"); PROTECT(R_fcall = lang2(fun, R_NilValue)); SETCADR(R_fcall, theta); SEXP funval = eval(R_fcall, myframe); if (!isReal(funval)) error("`fun' must return a double"); double fv = REAL(funval)[0]; UNPROTECT(1); return fv; } static double logit_loglike(const Matrix& Y, const Matrix& X, const Matrix& beta){ // likelihood const Matrix eta = X * beta; const Matrix p = 1.0/(1.0 + exp(-eta)); double loglike = 0.0; for (unsigned int i=0; i void MCMClogituserprior_impl(rng& stream, SEXP fun, SEXP theta, SEXP myframe, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, bool logfun, const Matrix<>& propvar, const Matrix<>& Y_M, const Matrix<>& X_M, SEXP& sample_SEXP){ // define constants const unsigned int npar = length(theta); const unsigned int tot_iter = burnin + mcmc; const unsigned int nsamp = mcmc/thin; const Matrix <> propc = cholesky(propvar); // define matrix to hold the sample Matrix <> sample (nsamp, npar, false); // put theta into a Scythe Matrix double* theta_data = REAL(theta); const int theta_nr = length(theta); const int theta_nc = 1; Matrix <> theta_M (theta_nc, theta_nr, theta_data); theta_M = t(theta_M); // evaluate userfun at starting value double loglike_val = logit_loglike(Y_M, X_M, theta_M); double userfun_cur = user_fun_eval(fun, theta, myframe); if (! logfun) userfun_cur = ::log(userfun_cur); userfun_cur += loglike_val; // THE METROPOLIS SAMPLING int count = 0; int accepts = 0; for (unsigned int iter=0; iter theta_can_M = theta_M + propc * stream.rnorm(npar,1, 0, 1); // put theta_can_M into a SEXP SEXP theta_can; theta_can = PROTECT(allocVector(REALSXP, npar)); for (unsigned int i=0; i= burnin){ for (unsigned int j = 0; j < npar; j++) sample(count, j) = REAL(theta)[j]; ++count; } if (verbose && iter % verbose == 0) { Rprintf("MCMClogit iteration %i of %i \n", (iter+1), tot_iter); Rprintf("beta = \n"); for (unsigned int i=0; i(accepts) / static_cast(iter+1)); } UNPROTECT(1); R_CheckUserInterrupt(); // allow user interrupts } // put the sample into a SEXP and return it sample_SEXP = PROTECT(allocMatrix(REALSXP, nsamp, npar)); for (unsigned int i=0; i(accepts) / static_cast(tot_iter)); Rprintf("\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); } extern "C" { // the function that actually does the sampling and returns a value to R SEXP MCMClogituserprior_cc(SEXP fun, SEXP Y_R, SEXP X_R, SEXP theta, SEXP myframe, SEXP burnin_R, SEXP mcmc_R, SEXP thin_R, SEXP verbose, SEXP lecuyer_R, SEXP seedarray_R, SEXP lecuyerstream_R, SEXP logfun, SEXP propvar_R){ // put rng stuff together and initiate stream int seedarray[6]; for(int i=0; i<6; ++i) seedarray[i] = INTEGER(seedarray_R)[i]; int uselecuyer_cc = INTEGER(lecuyer_R)[0]; int lecuyerstream_cc = INTEGER(lecuyerstream_R)[0]; int* uselecuyer = &uselecuyer_cc; int* lecuyerstream = &lecuyerstream_cc; // put propvar_R into a Matrix double* propvar_data = REAL(propvar_R); const int propvar_nr = nrows(propvar_R); const int propvar_nc = ncols(propvar_R); Matrix propvarpre (propvar_nc, propvar_nr, propvar_data); //propvar = Matrix<>(propvar.rows(), propvar.cols(), // propvar.begin_f()); Matrix<> propvar = t(propvarpre); // put Y_R into a Scythe Matrix int* Y_data = INTEGER(Y_R); const int Y_nr = length(Y_R); const int Y_nc = 1; Matrix Y_Mpre (Y_nc, Y_nr, Y_data); //Y_M = Matrix(Y_M.rows(), Y_M.cols(), Y_M.begin_f()); Matrix Y_M = t(Y_Mpre); // put X_R into a Scythe Matrix double* X_data = REAL(X_R); const int X_nr = nrows(X_R); const int X_nc = ncols(X_R); Matrix X_Mpre (X_nc, X_nr, X_data); //X_M = Matrix<>(X_M.rows(), X_M.cols(), X_M.begin_f()); Matrix<> X_M = t(X_Mpre); SEXP sample_SEXP; MCMCPACK_PASSRNG2MODEL(MCMClogituserprior_impl, fun, theta, myframe, INTEGER(burnin_R)[0], INTEGER(mcmc_R)[0], INTEGER(thin_R)[0], INTEGER(verbose)[0], INTEGER(logfun)[0], propvar, Y_M, X_M, sample_SEXP) // return the sample return sample_SEXP; } } #endif MCMCpack/src/MCMClogit.cc0000644000176000001440000001263612140061657014546 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMClogit.cc is C++ code to estimate a logistic regression model with // a multivariate normal prior // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // updated to the new version of Scythe 7/25/2004 KQ // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCLOGIT_CC #define MCMCLOGIT_CC #include #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace scythe; using namespace std; static double logit_logpost(const Matrix<>& Y, const Matrix<>& X, const Matrix<>& beta, const Matrix<>& beta_prior_mean, const Matrix<>& beta_prior_prec) { // likelihood const Matrix<> eta = X * beta; const Matrix<> p = 1.0 / (1.0 + exp(-eta)); double loglike = 0.0; for (unsigned int i = 0; i < Y.rows(); ++ i) loglike += Y(i) * ::log(p(i)) + (1 - Y(i)) * ::log(1 - p(i)); //prior double logprior = 0.0; if (beta_prior_prec(0) != 0) logprior = lndmvn(beta, beta_prior_mean, invpd(beta_prior_prec)); return (loglike + logprior); } template void MCMClogit_impl (rng& stream, const Matrix<>& Y, const Matrix<>& X, const Matrix<>& tune, Matrix<>& beta, const Matrix<>& b0, const Matrix<>& B0, const Matrix<>& V, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, Matrix<>& result) { // define constants const unsigned int tot_iter = burnin + mcmc; // total mcmc iterations const unsigned int k = X.cols(); // proposal parameters const Matrix<> propV = tune * invpd(B0 + invpd(V)) * tune; const Matrix<> propC = cholesky(propV) ; double logpost_cur = logit_logpost(Y, X, beta, b0, B0); // MCMC loop unsigned int count = 0; unsigned int accepts = 0; for (unsigned int iter = 0; iter < tot_iter; ++iter) { // sample beta const Matrix<> beta_can = gaxpy(propC, stream.rnorm(k, 1, 0, 1), beta); const double logpost_can = logit_logpost(Y, X, beta_can, b0, B0); const double ratio = ::exp(logpost_can - logpost_cur); if (stream.runif() < ratio) { beta = beta_can; logpost_cur = logpost_can; ++accepts; } // store values in matrices if (iter >= burnin && ((iter % thin) == 0)) { result(count++, _) = beta; } // print output to stdout if(verbose > 0 && iter % verbose == 0){ Rprintf("\n\nMCMClogit iteration %i of %i \n", (iter+1), tot_iter); Rprintf("beta = \n"); for (unsigned int j = 0; j < k; ++j) Rprintf("%10.5f\n", beta(j)); Rprintf("Metropolis acceptance rate for beta = %3.5f\n\n", static_cast(accepts) / static_cast(iter+1)); } R_CheckUserInterrupt(); // allow user interrupts }// end MCMC loop if (verbose > 0){ Rprintf("\n\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); Rprintf("The Metropolis acceptance rate for beta was %3.5f", static_cast(accepts) / static_cast(tot_iter)); Rprintf("\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); } } extern "C"{ void MCMClogit(double *sampledata, const int *samplerow, const int *samplecol, const double *Ydata, const int *Yrow, const int *Ycol, const double *Xdata, const int *Xrow, const int *Xcol, const int *burnin, const int *mcmc, const int *thin, const double *tunedata, const int *tunerow, const int *tunecol, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int *verbose, const double *betastartdata, const int *betastartrow, const int *betastartcol, const double *b0data, const int *b0row, const int *b0col, const double *B0data, const int *B0row, const int *B0col, const double *Vdata, const int *Vrow, const int *Vcol) { // pull together Matrix objects Matrix<> Y(*Yrow, *Ycol, Ydata); Matrix<> X(*Xrow, *Xcol, Xdata); Matrix<> tune(*tunerow, *tunecol, tunedata); Matrix<> beta(*betastartrow, *betastartcol, betastartdata); Matrix<> b0(*b0row, *b0col, b0data); Matrix<> B0(*B0row, *B0col, B0data); Matrix<> V(*Vrow, *Vcol, Vdata); Matrix<> result(*samplerow, *samplecol, false); MCMCPACK_PASSRNG2MODEL(MCMClogit_impl, Y, X, tune, beta, b0, B0, V, *burnin, *mcmc, *thin, *verbose, result); unsigned int size = *samplecol * *samplerow; for (unsigned int i = 0; i < size; ++i) sampledata[i] = result(i); } } #endif MCMCpack/src/MCMCirtKdRob.cc0000644000176000001440000007656612140061657015164 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCirtKdRob.cc is C++ code to estimate a robust K-dimensional // item response model // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // 2/18/2005 KQ // 8/1/2007 ported to Scythe 1.0.2 KQ // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCIRTKDROB_CC #define MCMCIRTKDROB_CC #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "MCMCrng.h" #include "MCMCfcds.h" #include "MCMCmnl.h" #include // needed to use Rprintf() #include // needed to allow user interrupts #include #include typedef Matrix rmview; using namespace std; using namespace scythe; /* Equal probability sampling; without-replacement case */ // pulled from R-2.0.1/src/main/random.c lines 352-364 // slightly modified by KQ 2/21/2005 // x: n array of original indices running from 0 to (n-1) // y: k array of samled indices // k: length of y (must be <= n) // n: length of x (must be >= k) template static void SampleNoReplace(const int k, int n, int *y, int *x, rng& stream){ for (int i = 0; i < n; i++) x[i] = i; for (int i = 0; i < k; i++) { int j = static_cast(n * stream.runif()); y[i] = x[j]; x[j] = x[--n]; } } // full conditional distribution for a single element of Lambda // this single element is Lambda(rowindex, colindex) static double Lambda_logfcd(const double& lam_ij, const Matrix& X, const Matrix<>& Lambda, const Matrix<>& theta, const double& delta0, const double& delta1, const Matrix<>& Lambda_prior_mean, const Matrix<>& Lambda_prior_prec, const Matrix<>& Lambda_ineq, const Matrix<>& theta_ineq, const double& k0, const double& k1, const double& c0, const double& d0, const double& c1, const double& d1, const int& rowindex, const int& colindex){ const int D = Lambda.cols(); // check to see if inequality constraint is satisfied and // evaluate prior double logprior = 0.0; if (Lambda_ineq(rowindex,colindex) * lam_ij < 0){ return log(0.0); } if (Lambda_prior_prec(rowindex,colindex) != 0){ logprior += lndnorm(lam_ij, Lambda_prior_mean(rowindex,colindex), sqrt(1.0 / Lambda_prior_prec(rowindex,colindex))); } // prior is uniform on hypersphere with radius 10 /* if (Lambda_ineq(rowindex,colindex) * lam_ij < 0){ return log(0.0); } double absqdist = 0.0; for (int i=0; i 100.0){ return log(0.0); } const double logprior = 0.0; */ // likelihood double loglike = 0.0; const int N = X.rows(); for (int i=0; i& X, const Matrix<>& Lambda, const Matrix<>& theta, const double& delta0, const double& delta1, const Matrix<>& Lambda_prior_mean, const Matrix<>& Lambda_prior_prec, const Matrix<>& Lambda_ineq, const Matrix<>& theta_ineq, const double& k0, const double& k1, const double& c0, const double& d0, const double& c1, const double& d1, const int& rowindex, const int& colindex){ const int D = Lambda.cols(); // evaluate prior if (theta_ineq(rowindex,colindex-1) * t_ij < 0){ return log(0.0); } const double logprior = lndnorm(t_ij, 0.0, 1.0); // prior is uniform on unit circle /* if (theta_ineq(rowindex,colindex-1) * t_ij < 0){ return log(0.0); } double tsqdist = 0.0; for (int i=0; i<(D-1); ++i){ if (i==(colindex-1)){ tsqdist += ::pow(t_ij, 2); } else{ tsqdist += ::pow(theta(rowindex,(i-1)), 2); } } if (tsqdist > 1.0){ return log(0.0); } const double logprior = 1.0; */ // likelihood double loglike = 0.0; const int K = X.cols(); for (int i=0; i& X, const Matrix<>& Lambda, const Matrix<>& theta, const double& junk, const double& delta1, const Matrix<>& Lambda_prior_mean, const Matrix<>& Lambda_prior_prec, const Matrix<>& Lambda_ineq, const Matrix<>& theta_ineq, const double& k0, const double& k1, const double& c0, const double& d0, const double& c1, const double& d1, const int& rowindex, const int& colindex){ // evaluate prior if (delta0 >=k0 || delta0 <=0){ return log(0.0); } const double logprior = lndbeta1(delta0 * (1.0/k0), c0, d0); // likelihood double loglike = 0.0; const int D = Lambda.cols(); const int N = X.rows(); const int K = X.cols(); for (int i=0; i& X, const Matrix<>& Lambda, const Matrix<>& theta, const double& delta0, const double& junk, const Matrix<>& Lambda_prior_mean, const Matrix<>& Lambda_prior_prec, const Matrix<>& Lambda_ineq, const Matrix<>& theta_ineq, const double& k0, const double& k1, const double& c0, const double& d0, const double& c1, const double& d1, const int& rowindex, const int& colindex){ // evaluate prior if (delta1 >=k1 || delta1 <=0){ return log(0.0); } const double logprior = lndbeta1(delta1 * (1.0/k1), c1, d1); // likelihood double loglike = 0.0; const int D = Lambda.cols(); const int N = X.rows(); const int K = X.cols(); for (int i=0; i static void doubling(double (*logfun)(const double&, const Matrix&, const Matrix<>&, const Matrix<>&, const double&, const double&, const Matrix<>&, const Matrix<>&, const Matrix<>&, const Matrix<>&, const double&, const double&, const double&, const double&, const double&, const double&, const int&, const int&), const Matrix& X, const Matrix<>& Lambda, const Matrix<>& theta, const double& delta0, const double& delta1, const Matrix<>& Lambda_prior_mean, const Matrix<>& Lambda_prior_prec, const Matrix<>& Lambda_ineq, const Matrix<>& theta_ineq, const double& k0, const double& k1, const double& c0, const double& d0, const double& c1, const double& d1, const int& rowindex, const int& colindex, const double& z, const double& w, const int& p, rng& stream, double& L, double& R, const int& param){ const double U = stream.runif(); double x0 = 0.0; if (param==0){ // Lambda x0 = Lambda(rowindex, colindex); } else if (param==1){ // theta x0 = theta(rowindex, colindex); } else if (param==2){ // delta0 x0 = delta0; } else if (param==3){ // delta1 x0 = delta1; } else { error("ERROR: param not in {0,1,2,3} in doubling()."); //Rprintf("\nERROR: param not in {0,1,2,3} in doubling().\n"); //exit(1); } L = x0 - w*U; R = L + w; int K = p; while (K > 0 && (z < logfun(L, X, Lambda, theta, delta0, delta1, Lambda_prior_mean, Lambda_prior_prec, Lambda_ineq, theta_ineq, k0, k1, c0, d0, c1, d1, rowindex, colindex) || z < logfun(R, X, Lambda, theta, delta0, delta1, Lambda_prior_mean, Lambda_prior_prec, Lambda_ineq, theta_ineq, k0, k1, c0, d0, c1, d1, rowindex, colindex))){ double V = stream.runif(); if (V < 0.5){ L = L - (R - L); } else { R = R + (R - L); } --K; } } // Radford Neal's (2000) stepping out procedure coded for a logdensity template static void StepOut(double (*logfun)(const double&, const Matrix&, const Matrix<>&, const Matrix<>&, const double&, const double&, const Matrix<>&, const Matrix<>&, const Matrix<>&, const Matrix<>&, const double&, const double&, const double&, const double&, const double&, const double&, const int&, const int&), const Matrix& X, const Matrix<>& Lambda, const Matrix<>& theta, const double& delta0, const double& delta1, const Matrix<>& Lambda_prior_mean, const Matrix<>& Lambda_prior_prec, const Matrix<>& Lambda_ineq, const Matrix<>& theta_ineq, const double& k0, const double& k1, const double& c0, const double& d0, const double& c1, const double& d1, const int& rowindex, const int& colindex, const double& z, const double& w, const int& m, rng& stream, double& L, double& R, const int& param){ const double U = stream.runif(); double x0 = 0.0; if (param==0){ // Lambda x0 = Lambda(rowindex, colindex); } else if (param==1){ // theta x0 = theta(rowindex, colindex); } else if (param==2){ // delta0 x0 = delta0; } else if (param==3){ // delta1 x0 = delta1; } else { error("ERROR: param not in {0,1,2,3} in StepOut()."); //Rprintf("\nERROR: param not in {0,1,2,3} in StepOut().\n"); //exit(1); } L = x0 - w*U; R = L + w; const double V = stream.runif(); int J = static_cast(m*V); int K = (m-1) - J; while (J > 0 && (z < logfun(L, X, Lambda, theta, delta0, delta1, Lambda_prior_mean, Lambda_prior_prec, Lambda_ineq, theta_ineq, k0, k1, c0, d0, c1, d1, rowindex, colindex))){ L = L - w; J = J - 1; } while (K > 0 && (z < logfun(R, X, Lambda, theta, delta0, delta1, Lambda_prior_mean, Lambda_prior_prec, Lambda_ineq, theta_ineq, k0, k1, c0, d0, c1, d1, rowindex, colindex))){ R = R + w; K = K - 1; } } // Radford Neal's (2000) Accept procedure coded for a logdensity static const bool Accept(double (*logfun)(const double&, const Matrix&, const Matrix<>&, const Matrix<>&, const double&, const double&, const Matrix<>&, const Matrix<>&, const Matrix<>&, const Matrix<>&, const double&, const double&, const double&, const double&, const double&, const double&, const int&, const int&), const Matrix& X, const Matrix<>& Lambda, const Matrix<>& theta, const double& delta0, const double& delta1, const Matrix<>& Lambda_prior_mean, const Matrix<>& Lambda_prior_prec, const Matrix<>& Lambda_ineq, const Matrix<>& theta_ineq, const double& k0, const double& k1, const double& c0, const double& d0, const double& c1, const double& d1, const int& rowindex, const int& colindex, const double& z, const double& w, const double& x0, const double& x1, const double& L, const double& R){ double Lhat = L; double Rhat = R; bool D = false; while ((Rhat - Lhat ) > 1.1 * w){ double M = (Lhat + Rhat) / 2.0; if ( (x0 < M && x1 >= M) || (x0 >= M && x1 < M)){ D = true; } if (x1 < M){ Rhat = M; } else { Lhat = M; } if (D && z >= logfun(Lhat, X, Lambda, theta, delta0, delta1, Lambda_prior_mean, Lambda_prior_prec, Lambda_ineq, theta_ineq, k0, k1, c0, d0, c1, d1, rowindex, colindex) && z >= logfun(Rhat, X, Lambda, theta, delta0, delta1, Lambda_prior_mean, Lambda_prior_prec, Lambda_ineq, theta_ineq, k0, k1, c0, d0, c1, d1, rowindex, colindex)){ return(false); } } return(true); } // Radford Neal's (2000) shrinkage procedure coded for a log density // assumes the doubling procedure has been used to find L and R template static double shrinkageDoubling(double (*logfun)(const double&, const Matrix&, const Matrix<>&, const Matrix<>&, const double&, const double&, const Matrix<>&, const Matrix<>&, const Matrix<>&, const Matrix<>&, const double&, const double&, const double&, const double&, const double&, const double&, const int&, const int&), const Matrix& X, const Matrix<>& Lambda, const Matrix<>& theta, const double& delta0, const double& delta1, const Matrix<>& Lambda_prior_mean, const Matrix<>& Lambda_prior_prec, const Matrix<>& Lambda_ineq, const Matrix<>& theta_ineq, const double& k0, const double& k1, const double& c0, const double& d0, const double& c1, const double& d1, const int& rowindex, const int& colindex, const double& z, const double& w, rng& stream, const double& L, const double& R, const int& param){ double Lbar = L; double Rbar = R; double x0; if (param==0){ // Lambda x0 = Lambda(rowindex, colindex); } else if (param==1){ // theta x0 = theta(rowindex, colindex); } else if (param==2){ // delta0 x0 = delta0; } else if (param==3){ // delta1 x0 = delta1; } else { error("ERROR: param not in {0,1,2,3} in shrinkageDoubling()."); //Rprintf("\nERROR: param not in {0,1,2,3} in shrinkageDoubling().\n"); //exit(1); } for (;;){ const double U = stream.runif(); const double x1 = Lbar + U*(Rbar - Lbar); if (z <= logfun(x1, X, Lambda, theta, delta0, delta1, Lambda_prior_mean, Lambda_prior_prec, Lambda_ineq, theta_ineq, k0, k1, c0, d0, c1, d1, rowindex, colindex) && Accept(logfun, X, Lambda, theta, delta0, delta1, Lambda_prior_mean, Lambda_prior_prec, Lambda_ineq, theta_ineq, k0, k1, c0, d0, c1, d1, rowindex, colindex, z, w, x0, x1, L, R)){ return(x1); } if (x1 < x0){ Lbar = x1; } else { Rbar = x1; } } // end infinite loop } // Radford Neal's (2000) shrinkage procedure coded for a log density // assumes the stepping out procedure has been used to find L and R template static double shrinkageStep(double (*logfun)(const double&, const Matrix&, const Matrix<>&, const Matrix<>&, const double&, const double&, const Matrix<>&, const Matrix<>&, const Matrix<>&, const Matrix<>&, const double&, const double&, const double&, const double&, const double&, const double&, const int&, const int&), const Matrix& X, const Matrix<>& Lambda, const Matrix<>& theta, const double& delta0, const double& delta1, const Matrix<>& Lambda_prior_mean, const Matrix<>& Lambda_prior_prec, const Matrix<>& Lambda_ineq, const Matrix<>& theta_ineq, const double& k0, const double& k1, const double& c0, const double& d0, const double& c1, const double& d1, const int& rowindex, const int& colindex, const double& z, const double& w, rng& stream, const double& L, const double& R, const int& param){ double Lbar = L; double Rbar = R; double x0; if (param==0){ // Lambda x0 = Lambda(rowindex, colindex); } else if (param==1){ // theta x0 = theta(rowindex, colindex); } else if (param==2){ // delta0 x0 = delta0; } else if (param==3){ // delta1 x0 = delta1; } else { error("ERROR: param not in {0,1,2,3} in shrinkageDoubling()."); //Rprintf("\nERROR: param not in {0,1,2,3} in shrinkageDoubling().\n"); //exit(1); } for (;;){ const double U = stream.runif(); const double x1 = Lbar + U*(Rbar - Lbar); if (z <= logfun(x1, X, Lambda, theta, delta0, delta1, Lambda_prior_mean, Lambda_prior_prec, Lambda_ineq, theta_ineq, k0, k1, c0, d0, c1, d1, rowindex, colindex) ){ return(x1); } if (x1 < x0){ Lbar = x1; } else { Rbar = x1; } } // end infinite loop } template void MCMCirtKdRob_impl(rng& stream, const Matrix& X, Matrix<>& Lambda, Matrix<>& theta, const Matrix<>& Lambda_eq, const Matrix<>& Lambda_ineq, const Matrix<>& theta_eq, const Matrix<>& theta_ineq, const Matrix<>& Lambda_prior_mean, const Matrix<>& Lambda_prior_prec, const int* burnin, const int* mcmc, const int* thin, const int* verbose, const int* method_step, const double* theta_w, const int* theta_p, const double* lambda_w, const int* lambda_p, const double* delta0_w, const int* delta0_p, const double* delta1_w, const int* delta1_p, const double * delta0start, const double* delta1start, const double* k0, const double* k1, const double* c0, const double* c1, const double* d0, const double* d1, const int* storeitem, const int* storeability, double* sampledata, const int* samplerow, const int* samplecol ){ // constants const int K = X.cols(); // number of items const int N = X.rows(); // number of subjects const int D = Lambda.cols(); // number of dimensions + 1 const int tot_iter = *burnin + *mcmc; const int nsamp = *mcmc / *thin; // const Matrix Lambda_free_indic = Matrix(K, D); //for (int i=0; i<(K*D); ++i){ // if (Lambda_eq[i] == -999) Lambda_free_indic[i] = 1.0; //} // starting values // Matrix theta = Matrix(N, D-1); Matrix<> onesvec = ones(N, 1); onesvec = onesvec * -1.0; theta = cbind(onesvec, theta); double delta0 = *delta0start; double delta1 = *delta1start; // index arrays (used for the random order of the sampling) // OLD //int K_array[K]; //int N_array[N]; //int D_array[D]; //int Dm1_array[D-1]; //int K_inds_perm[K]; //int N_inds_perm[N]; //int D_inds_perm[D]; //int Dm1_inds_perm[D-1]; // NEW int* K_array = new int[K]; int* N_array = new int[N]; int* D_array = new int[D]; int* Dm1_array = new int[D-1]; int* K_inds_perm = new int[K]; int* N_inds_perm = new int[N]; int* D_inds_perm = new int[D]; int* Dm1_inds_perm = new int[D-1]; // storage matrices (row major order) Matrix<> Lambda_store; if (storeitem[0]==1){ Lambda_store = Matrix(nsamp, K*D); } Matrix<> theta_store; if (*storeability==1){ theta_store = Matrix(nsamp, N*D); } Matrix<> delta0_store(nsamp, 1); Matrix<> delta1_store(nsamp, 1); /////////////////// // Slice Sampler // /////////////////// int count = 0; for (int iter=0; iter < tot_iter; ++iter){ double L, R, w, funval, z; int p; // sample theta int param = 1; SampleNoReplace(N, N, N_inds_perm, N_array, stream); for (int ii=0; ii 0 && iter % verbose[0] == 0){ Rprintf("\n\nMCMCirtKdRob iteration %i of %i \n", (iter+1), tot_iter); } // store results if ((iter >= burnin[0]) && ((iter % thin[0]==0))) { // store Lambda if (storeitem[0]==1){ //Matrix Lambda_store_vec = reshape(Lambda,1,K*D); //for (int l=0; l theta_store_vec = reshape(theta, 1, N*D); //for (int l=0; l output = delta0_store; output = cbind(output, delta1_store); if (*storeitem == 1){ output = cbind(output, Lambda_store); } if(*storeability == 1) { output = cbind(output, theta_store); } int size = *samplerow * *samplecol; for (int i=0; i X(*Xrow, *Xcol, Xdata); Matrix Lambda(*Lamstartrow, *Lamstartcol, Lamstartdata); Matrix theta(*thetstartrow, *thetstartcol, thetstartdata); const Matrix Lambda_eq(*Lameqrow, *Lameqcol, Lameqdata); const Matrix Lambda_ineq(*Lamineqrow, *Lamineqcol, Lamineqdata); const Matrix theta_eq(*theteqrow, *theteqcol, theteqdata); const Matrix theta_ineq(*thetineqrow, *thetineqcol, thetineqdata); const Matrix Lambda_prior_mean(*Lampmeanrow, *Lampmeancol, Lampmeandata); const Matrix Lambda_prior_prec(*Lampprecrow, *Lamppreccol, Lampprecdata); MCMCPACK_PASSRNG2MODEL(MCMCirtKdRob_impl, X, Lambda, theta, Lambda_eq, Lambda_ineq, theta_eq, theta_ineq, Lambda_prior_mean, Lambda_prior_prec, burnin, mcmc, thin, verbose, method_step, theta_w, theta_p, lambda_w, lambda_p, delta0_w, delta0_p, delta1_w, delta1_p, delta0start, delta1start, k0, k1, c0, c1, d0, d1, storeitem, storeability, sampledata, samplerow, samplecol ); } } #endif MCMCpack/src/MCMCirtKdHet.cc0000644000176000001440000003054412140061657015144 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCordfactanal.cc is C++ code to estimate an ordinal data // factor analysis model // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // revised version of older MCMCordfactanal // 7/16/2004 KQ // updated to new version of Scythe ADM 7/24/2004 // fixed a bug pointed out by Alexander Raach 1/16/2005 KQ // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCIRTKDHET_CC #define MCMCIRTKDHET_CC #include #include "matrix.h" #include "algorithm.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "MCMCrng.h" #include "MCMCfcds.h" #include // needed to use Rprintf() #include // needed to allow user interrupts typedef Matrix rmview; using namespace std; using namespace scythe; template void MCMCirtKdHet_impl(rng& stream, const Matrix& X, Matrix<>& Lambda, const Matrix<>& Lambda_eq, const Matrix<>& Lambda_ineq, const Matrix<>& Lambda_prior_mean, const Matrix<>& Lambda_prior_prec, const double sigmapriorc, const double sigmapriord, bool storelambda, bool storescores, bool storesigma, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, Matrix<>& output) { // constants const unsigned int K = X.cols(); // number of manifest variables const unsigned int N = X.rows(); // number of observations const unsigned int D = Lambda.cols(); // # of factors (incl constant) const unsigned int tot_iter = burnin + mcmc; const unsigned int nsamp = mcmc / thin; const Matrix<> I = eye(D-1); const Matrix Lambda_free_indic(K, D); for (unsigned int i = 0; i < (K * D); ++i) if (Lambda_eq(i) == -999) Lambda_free_indic(i) = true; const Matrix<> Psi = eye(K); const Matrix<> Psi_inv = eye(K); const double c0 = sigmapriorc; const double d0 = sigmapriord; //Rprintf("Switches are %i %i %i\n", storelambda, storescores, storesigma); // starting values for phi, Xstar, and sigma Matrix<> phi(N, D-1); phi = cbind(ones(N,1), phi); Matrix<> sigma2 = ones(N,1); Matrix<> sigma = ones(N,1); Matrix<> Xstar(N, K); double sigma_norm = 1; // storage matrices (row major order) Matrix<> Lambda_store; if (storelambda){ Lambda_store = Matrix<>(nsamp, K*D); } Matrix<> phi_store; if (storescores){ phi_store = Matrix<>(nsamp, N*D); } Matrix<> sigma_store; if (storesigma){ sigma_store = Matrix<>(nsamp, N*1); } /////////////////// // Gibbs Sampler // /////////////////// int count = 0; for (unsigned int iter = 0; iter < tot_iter; ++iter) { // sample Xstar for (unsigned int i = 0; i < N; ++i) { Matrix<> X_mean = Lambda * t(phi(i,_)); for (unsigned int j = 0; j < K; ++j) { if (X(i,j) == -999) // if missing Xstar(i,j) = stream.rnorm(X_mean[j], sigma(i,0)); if (X(i,j) == 0) // if not missing Xstar(i,j) = stream.rtnorm_combo(X_mean[j], sigma2(i,0), -300, 0); if (X(i,j) == 1) // if not missing Xstar(i,j) = stream.rtnorm_combo(X_mean[j], sigma2(i,0), 0, 300); } } // sample phi Matrix<> Lambda_const = Lambda(_,0); Matrix<> Lambda_rest = Lambda(0, 1, K-1, D-1); for (unsigned int i=0; i phi_post_var = invpd(I + crossprod(Lambda_rest) / sigma2(i,0) ); Matrix<> phi_post_C = cholesky(phi_post_var); Matrix<> phi_post_mean = phi_post_var * (t(Lambda_rest) * (t(Xstar(i,_))-Lambda_const)) / sigma2(i,0); Matrix<> phi_samp = gaxpy(phi_post_C, stream.rnorm(D-1, 1, 0, 1), phi_post_mean); for (unsigned int j=0; j<(D-1); ++j) phi(i,j+1) = phi_samp[j]; } // sample Lambda for (unsigned int i=0; i free_indic = t(Lambda_free_indic(i,_)); const Matrix not_free_indic = 1 - free_indic; if (sumc(free_indic)[0] > 0 && sumc(not_free_indic)[0] > 0){ // both constrnd & unconstrnd const Matrix<> phifree_i = t(selif(t(phi), free_indic)); const Matrix<> mulamfree_i = selif(t(Lambda_prior_mean(i,_)), free_indic); // prior mean const Matrix<> hold = selif(t(Lambda_prior_prec(i,_)), free_indic); Matrix<> sig2lamfree_inv_i = eye(hold.rows()); // prior prec for (unsigned int j=0; j<(hold.rows()); ++j) sig2lamfree_inv_i(j,j) = hold[j]; const Matrix<> Lambdacon_i = selif(t(Lambda(i,_)), not_free_indic); const Matrix<> phicon_i = t(selif(t(phi), not_free_indic)); const Matrix<> X_i = Xstar(_,i); const Matrix<> newX_i = gaxpy((-1.0*phicon_i), Lambdacon_i, X_i); for (unsigned int R=0; R Lam_post_var = invpd(sig2lamfree_inv_i + Psi_inv(i,i) * crossprod(phifree_i)); const Matrix<> Lam_post_C = cholesky(Lam_post_var); const Matrix<> Lam_post_mean = Lam_post_var * (sig2lamfree_inv_i * mulamfree_i + Psi_inv(i,i) * t(phifree_i) * newX_i); Matrix Lambdafree_i = gaxpy(Lam_post_C, stream.rnorm(hold.rows(), 1, 0, 1), Lam_post_mean); // check to see if inequality constraints hold const Matrix<> Lambda_ineq_vec = Lambda_ineq(i,_); double ineq_holds = 0; int Lam_count = 0; for (unsigned int j=0; j prodcheck = Lambda_ineq_vec[j]*Lambdafree_i[Lam_count]; test = std::min(test, prodcheck[0]); ++Lam_count; } } ineq_holds = test; } // put draw into Lambda Lam_count = 0; for (unsigned int j=0; j 0){ // just unconstrained const Matrix<> phifree_i = t(selif(t(phi), free_indic)); const Matrix<> mulamfree_i = selif(t(Lambda_prior_mean(i,_)), free_indic); // prior mean const Matrix<> hold = selif(t(Lambda_prior_prec(i,_)), free_indic); Matrix<> sig2lamfree_inv_i = eye(hold.rows()); // prior prec for (unsigned int j=0; j sX_i = ones(N,1); for (unsigned int R=0; R Lam_post_var = invpd(sig2lamfree_inv_i + Psi_inv(i,i) * crossprod(phifree_i)); // const Matrix<> Lam_post_C = cholesky(Lam_post_var); const Matrix<> Lam_post_mean = Lam_post_var * (sig2lamfree_inv_i * mulamfree_i + Psi_inv(i,i) * t(phifree_i) * sX_i); // Matrix<> Lambdafree_i = gaxpy(Lam_post_C, stream.rnorm(hold.rows(), 1, 0, 1), Lam_post_mean); // check to see if inequality constraints hold Matrix<> Lambda_ineq_vec = Lambda_ineq(i,_); double ineq_holds = 0; for (unsigned int j=0; j phi_i = phi(i,_); Matrix<> Xstar_i = Xstar(i,_); const Matrix<> e = gaxpy(phi_i, (-1*t(Lambda)), Xstar_i); const Matrix<> SSE = crossprod (t(e)); const double c_post = (c0 + K) * 0.5; const double d_post = (d0 + SSE[0]) * 0.5; sigma2(i,0) = stream.rigamma(c_post, d_post); } Matrix<> sigma = sqrt(sigma2); // store as sigma sigma_norm = N / sum(pow(sigma,-1)); // renormalize inverse sigma to sum to N for (unsigned int i=0; i 0 && iter % verbose == 0){ //Rprintf("xstar 22, xstar 23, xstar 24, phi, sigma = \n"); //for (int i=0; i= burnin) && ((iter % thin==0))) { // store Lambda if (storelambda==1) rmview(Lambda_store(count, _)) = Lambda; // store phi if (storescores==1) rmview(phi_store(count, _)) = phi; // store sigma if (storesigma==1) rmview(sigma_store(count, _)) = sigma; count++; } // allow user interrupts R_CheckUserInterrupt(); } // end MCMC loop if (storelambda == 1){ output = Lambda_store; if (storescores == 1) output = cbind(output, phi_store); if (storesigma == 1) output = cbind(output, sigma_store); } if (storelambda == 0) { if (storescores == 1){ output = phi_store; if (storesigma == 1) output = cbind(output, sigma_store); } if (storescores == 0) { output = sigma_store; } } } extern "C"{ // function called by R to fit model void irtKdHetpost(double *samdata, const int *samrow, const int *samcol, const int *Xdata, const int *Xrow, const int *Xcol, const int *burnin, const int *mcmc, const int *thin, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int *verbose, const double *Lamstartdata, const int *Lamstartrow, const int *Lamstartcol, const double *Lameqdata, const int *Lameqrow, const int *Lameqcol, const double *Lamineqdata, const int *Lamineqrow, const int *Lamineqcol, const double *Lampmeandata, const int *Lampmeanrow, const int *Lampmeancol, const double *Lampprecdata, const int *Lampprecrow, const int *Lamppreccol, const int *storelambda, const int *storescores, const int *storesigma, const double *sigmapriorc, const double *sigmapriord) { // put together matrices const Matrix X(*Xrow, *Xcol, Xdata); Matrix<> Lambda(*Lamstartrow, *Lamstartcol, Lamstartdata); const Matrix<> Lambda_eq(*Lameqrow, *Lameqcol, Lameqdata); const Matrix<> Lambda_ineq(*Lamineqrow, *Lamineqcol, Lamineqdata); const Matrix<> Lambda_prior_mean(*Lampmeanrow, *Lampmeancol, Lampmeandata); const Matrix<> Lambda_prior_prec(*Lampprecrow, *Lamppreccol, Lampprecdata); // return output Matrix output; MCMCPACK_PASSRNG2MODEL(MCMCirtKdHet_impl, X, Lambda, Lambda_eq, Lambda_ineq, Lambda_prior_mean, Lambda_prior_prec, *sigmapriorc, *sigmapriord, *storelambda, *storescores, *storesigma, *burnin, *mcmc, *thin, *verbose, output); for (unsigned int i = 0; i < output.size(); ++i) samdata[i] = output(i); } } #endif MCMCpack/src/MCMCirtHier1d.cc0000644000176000001440000002753612140061657015270 0ustar ripleyusers// MCMCirtHier1d.cc is C++ code to estimate a one-dimensional item response // theory model with subject-level predictors beta and common variance // sigma2. // // ADM and KQ 1/15/2003 // ADM 7/28/2004 [updated to new Scythe version] // completely rewritten and optimized for the 1-d case 8/2/2004 KQ // storage changed to save memory KQ 1/27/2006 // DBP 7/3/07 [ported to scythe 1.0.x] // // MJM added second level and marginal likelihood thereof // MJM implemented parameter expansion (alpha) for latent variance 2008-11-18 #ifndef MCMCIRTHIER1D_CC #define MCMCIRTHIER1D_CC #include "MCMCrng.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include // needed to use Rprintf() #include // needed to allow user interrupts #include "MCMCfcds.h" using namespace std; using namespace scythe; /* This fn basically defined in MCMCregress.cc and should be moved to Scythe proper */ static double lndigamma(double theta, double a, double b) { double logf = a * log(b) - lngammafn(a) + -(a+1) * log(theta) + -b/theta; return logf; //pow(b, a) / gammafn(a) * pow(theta, -(a+1)) * exp(-b/theta); } // Parameter-Expanded Latent Data Update for // 1d IRT. mjm, 2008-11-18 template double irt_W_update(Matrix<>& Z, const Matrix<>& X, const Matrix<>& theta, const Matrix<>& eta, const double& alpha, const double& px_a0, const double& px_b0, const Matrix<>& etahat, const Matrix<>& thetahat, rng& stream) { // define constants const unsigned int J = theta.rows(); const unsigned int K = eta.rows(); double RSS=0.0; int df=0; // perform update from truncated Normal / standard Normals for (unsigned int i = 0; i < J; ++i) { for (unsigned int j = 0; j < K; ++j){ const double Z_mean = alpha*( -eta(j,0) + theta(i) * eta(j,1) ); const double Zhat = ( -etahat(j,0) + thetahat(i) * etahat(j,1) ); if (X(i,j) == 1) { Z(i,j) = stream.rtbnorm_combo(Z_mean, alpha, 0); ++df; } else if (X(i,j) == 0) { Z(i,j) = stream.rtanorm_combo(Z_mean, alpha, 0); ++df; } else { Z(i,j) = stream.rnorm(Z_mean, std::pow(alpha, 2.0)); } Z(i,j) /= alpha; const double e = Z(i,j) - Zhat; RSS += std::pow(e , 2.0); } } // Liu and Wu 1999, p1272: // draw a0 ~ IG(a0,b0). Then draw a1 ~ IG(nu0+a0RSS/2, a0+n/2) const double c_post = (px_a0 + df) * 0.5; const double d_post = (px_b0 + RSS) * 0.5; double alpha1 = stream.rigamma(c_post,d_post); //Rprintf("\nRSS: %5f alpha0: %5f alpha1: %5f\n",RSS,alpha,alpha1); return(std::sqrt(alpha1/alpha)); } /* template void hirt_level0_metrop (Matrix<>& theta, Matrix<>& thetahat, const Matrix<>& Z, const Matrix<>& eta, const Matrix<>& beta, const Matrix<>& Xj, const double& sigma2, const double& alpha, rng& stream){ //construct proposal sum(Zjk ~N( eta1k*(-eta0k + thetaj), alpha? ) ) //proposal logdensity //current logdensity //exp( prop - cur) //runif & accept if > ratio //note the acceptance } */ /* MCMCirt1d implementation. */ template void MCMCirtHier1d_impl (rng& stream, const Matrix& X, Matrix<>& theta, Matrix<>& eta, Matrix<>& thetahat, Matrix<>& etahat, const Matrix<>& ab0, const Matrix<>& AB0, const Matrix<>& Xj, Matrix<>& beta, const Matrix<>& b0, const Matrix<>& B0, const double c0, const double d0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, bool storea, bool storei, double* sampledata, unsigned int samplesize, bool chib, double* logmarglike, bool px, const double px_a0, const double px_b0, bool metromix ) { // constants const unsigned int J = X.rows(); // # subjects (justices, legislators) const unsigned int K = X.cols(); // number of items (cases, roll calls) const unsigned int L = Xj.cols(); // covariates Xj for mutheta const unsigned int tot_iter = burnin + mcmc; const unsigned int nsamp = mcmc / thin; // storage matrices (col major order) Matrix<> theta_store; Matrix<> eta_store; Matrix<> beta_store(nsamp,L); Matrix<> sigma2_store(nsamp,1); if (storea) theta_store = Matrix<>(nsamp, J); if (storei) eta_store = Matrix<>(nsamp, K*2); // starting values Matrix<> Z(J, K); // pre-compute const Matrix<> AB0ab0 = AB0 * ab0; const Matrix<> XpX = crossprod(Xj); Matrix<> XpY; double alpha; if(!px) { alpha = 1.0; } else { alpha = stream.rigamma(px_a0,px_b0); } double sigma2 = NormIGregress_sigma2_draw ( Xj, theta, beta, c0, d0, stream); unsigned int count = 0; // MCMC sampling occurs in this for loop for (unsigned int iter = 0; iter < tot_iter; ++iter){ // sample latent data (Z) OR parameter-expanded data (W) if(!px) { irt_Z_update1(Z, X, theta,eta,stream); } else { // alpha here is the post variance of W alpha = irt_W_update(Z, X, theta,eta,alpha,px_a0,px_b0, etahat,thetahat,stream); } // Following Tierney(1994) it would be nice to 'restart' the chain // using a metropolis jump on the entire level0, something like // once every thin*10 iterations or so. if( metromix && (iter % (thin*10) == 0)) { // hirt_level0_metrop(theta, thetahat, Z, eta, etahat, // beta, Xj, sigma2, alpha, stream); } else { // sample ability (ideal points) (theta) hirt_theta_update1(theta, thetahat, Z, eta, beta, Xj, sigma2, alpha, stream); // sample item (case, bill) parameters (eta) hirt_eta_update1(eta, etahat, Z, theta, AB0, AB0ab0, alpha, stream); } XpY = t(Xj) * theta; beta = NormNormregress_beta_draw(XpX, XpY, b0, B0, sigma2, stream); // update level2 sigma2 sigma2 = NormIGregress_sigma2_draw (Xj, theta, beta, c0, d0, stream); // print results to screen if (verbose > 0 && iter % verbose == 0) { Rprintf("\n\nMCMCirt1d iteration %i of %i \n", (iter+1), tot_iter); //Rprintf("theta = \n"); //for (int j=0; j= burnin) && ((iter % thin == 0))) { // store ideal points if (storea) theta_store(count, _) = theta; // store bill parameters if (storei) eta_store(count, _) = t(eta); beta_store(count, _) = t(beta); sigma2_store(count,0) = sigma2; // store beta count++; } R_CheckUserInterrupt(); // allow user interrupts } // end Gibbs loop // return output Matrix<> output; if(! storei && storea) { // only theta output = theta_store; } else if (storei && ! storea){ // only eta output = eta_store; } else { // everything output = cbind(theta_store, eta_store); } output = cbind(output, beta_store); // always return beta, output = cbind(output, sigma2_store);// and sigma2. for (unsigned int i = 0; i < samplesize; ++i) sampledata[i] = output[i]; // BEGIN MARGINAL LIKELIHOOD COMPUTATION if (chib == 1) { // marginal likelihood calculation stuff starts here const double sigma2star = meanc(sigma2_store)(0); const Matrix<> thetastar = t(meanc(theta_store)); Matrix<> betastar = t(meanc(beta_store)); double sigma2fcdsum = 0.0; XpY = t(Xj) * thetastar; // second set of Gibbs scans for (unsigned int iter = 0; iter < tot_iter; ++iter) { double sigma2 = NormIGregress_sigma2_draw (Xj, thetastar, beta, c0, d0, stream); beta = NormNormregress_beta_draw (XpX, XpY, b0, B0, sigma2, stream); const Matrix<> e = gaxpy(Xj, (-1*beta), thetastar); const Matrix<> SSE = crossprod (e); const double c_post = (c0 + X.rows ()) * 0.5; const double d_post = (d0 + SSE(0)) * 0.5; sigma2fcdsum += lndigamma(sigma2star, c_post, d_post); // print output to stdout if(verbose > 0 && iter % verbose == 0) { Rprintf("\n\nMCMCregress (reduced) iteration %i of %i \n", (iter+1), tot_iter); } R_CheckUserInterrupt(); // allow user interrupts } // end MCMC loop double sigma2fcdmean = sigma2fcdsum / static_cast(tot_iter); const double sig2_inv = 1.0 / sigma2star; const Matrix<> sig_beta = invpd (B0 + XpX * sig2_inv); Matrix<> betahat = sig_beta * gaxpy(B0, b0, XpY*sig2_inv); double logbetafcd = 0.0; for (unsigned int i=0; i xi = Xj * betastar; double loglike = 0.0; // is there a reason to do this and not lndmvn? for (unsigned int i = 0; i < L; ++i) { loglike += lndnorm(thetastar(i), xi(i), sigmastar); } Rprintf("logLike: %10.5f\n",loglike); // calculate log prior ordinate const double logpriorsig = (lndigamma(sigma2star, c0/2.0, d0/2.0)); double logpriorbeta = lndmvn(betastar, b0, invpd(B0)); if (L==1) { logpriorbeta = lndnorm(betastar[0], b0[0], 1.0/B0(0)); } const double logprior = logpriorsig+logpriorbeta; Rprintf("logPrior: %10.5f\n",logprior); // put pieces together and print the marginal likelihood logmarglike[0] = loglike + logprior - logbetafcd - (sigma2fcdmean); Rprintf("logM: %10.5f\n",logmarglike[0]); } } extern "C" { void MCMCirtHier1d(double* sampledata, const int* samplerow, const int* samplecol, const int* Xdata, const int* Xrow, const int* Xcol, const int* burnin, const int* mcmc, const int* thin, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int* verbose, const double* thetastartdata, const int* thetastartrow, const int* thetastartcol, const double* astartdata, const int* astartrow, const int* astartcol, const double* bstartdata, const int* bstartrow, const int* bstartcol, const double* ab0data, const int* ab0row, const int* ab0col, const double* AB0data, const int* AB0row, const int* AB0col, const double* Xjdata, const int* Xjrow, const int* Xjcol, const double* betastartdata, const int* betastartrow, const int* betastartcol, const double* b0data, const int* b0row, const int* b0col, const double* B0data, const int* B0row, const int* B0col, const double* c0, const double* d0, const int* storei, const int* storea, double* logmarglikeholder, const int* chib, const int* px, const double* px_a0, const double* px_b0) { // put together matrices const Matrix X(*Xrow, *Xcol, Xdata); Matrix<> theta(*thetastartrow, *thetastartcol, thetastartdata); Matrix<>thetahat(*thetastartrow, *thetastartcol, thetastartdata); Matrix<> a(*astartrow, *astartcol, astartdata); Matrix<> b(*bstartrow, *bstartcol, bstartdata); const Matrix<> ab0(*ab0row, *ab0col, ab0data); const Matrix<> AB0(*AB0row, *AB0col, AB0data); Matrix<> eta = cbind(a,b); Matrix<> etahat = cbind(a,b); const Matrix<> Xj(*Xjrow, *Xjcol, Xjdata); Matrix<> beta(*betastartrow, *betastartcol, betastartdata); const Matrix<> b0(*b0row,*b0col,b0data); const Matrix<> B0(*B0row,*B0col,B0data); const int samplesize = (*samplerow) * (*samplecol); const bool metromix = 0; MCMCPACK_PASSRNG2MODEL(MCMCirtHier1d_impl, X, theta, eta, thetahat, etahat, ab0, AB0, Xj, beta, b0, B0, *c0, *d0, *burnin, *mcmc, *thin, *verbose, *storea, *storei, sampledata, samplesize, *chib, logmarglikeholder, *px, *px_a0, *px_b0, metromix); } } #endif MCMCpack/src/MCMCirt1d.cc0000644000176000001440000001204612140061657014446 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCirt1d.cc is C++ code to estimate a one-dimensional item response // theory model. // // ADM and KQ 1/15/2003 // ADM 7/28/2004 [updated to new Scythe version] // completely rewritten and optimized for the 1-d case 8/2/2004 KQ // storage changed to save memory KQ 1/27/2006 // DBP 7/3/07 [ported to scythe 1.0.x] // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCIRT1D_CC #define MCMCIRT1D_CC #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; /* MCMCirt1d implementation. */ template void MCMCirt1d_impl (rng& stream, const Matrix& X, Matrix<>& theta, Matrix<>& eta, const Matrix<>& ab0, const Matrix<>& AB0, const Matrix<>& theta_eq, const Matrix<>& theta_ineq, double t0, double T0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, bool storea, bool storei, double* sampledata, unsigned int samplesize) { // constants const unsigned int J = X.rows(); // # subjects (justices, legislators) const unsigned int K = X.cols(); // number of items (cases, roll calls) const unsigned int tot_iter = burnin + mcmc; const unsigned int nsamp = mcmc / thin; // storage matrices (col major order) Matrix<> theta_store; Matrix<> eta_store; if (storea) theta_store = Matrix<>(nsamp, J); if (storei) eta_store = Matrix<>(nsamp, K*2); // starting values Matrix<> Z(J, K); // pre-compute const Matrix<> AB0ab0 = AB0 * ab0; unsigned int count = 0; // MCMC sampling occurs in this for loop for (unsigned int iter = 0; iter < tot_iter; ++iter){ // sample latent utilities (Z) irt_Z_update1(Z, X, theta, eta, stream); // sample item (case, bill) parameters (eta) irt_eta_update1(eta, Z, theta, AB0, AB0ab0, stream); // sample ability (ideal points) (theta) irt_theta_update1(theta, Z, eta, t0, T0, theta_eq, theta_ineq, stream); // print results to screen if (verbose > 0 && iter % verbose == 0) { Rprintf("\n\nMCMCirt1d iteration %i of %i \n", (iter+1), tot_iter); //Rprintf("theta = \n"); //for (int j=0; j= burnin) && ((iter % thin == 0))) { // store ideal points if (storea) theta_store(count, _) = theta; // store bill parameters if (storei) eta_store(count, _) = t(eta); count++; } R_CheckUserInterrupt(); // allow user interrupts } // end Gibbs loop // return output Matrix<> output; if(! storei && storea) { output = theta_store; } else if (storei && ! storea){ output = eta_store; } else { output = cbind(theta_store, eta_store); } for (unsigned int i = 0; i < samplesize; ++i) sampledata[i] = output[i]; } extern "C" { void MCMCirt1d(double* sampledata, const int* samplerow, const int* samplecol, const int* Xdata, const int* Xrow, const int* Xcol, const int* burnin, const int* mcmc, const int* thin, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int* verbose, const double* thetastartdata, const int* thetastartrow, const int* thetastartcol, const double* astartdata, const int* astartrow, const int* astartcol, const double* bstartdata, const int* bstartrow, const int* bstartcol, const double* t0, const double* T0, const double* ab0data, const int* ab0row, const int* ab0col, const double* AB0data, const int* AB0row, const int* AB0col, const double* thetaeqdata, const int* thetaeqrow, const int* thetaeqcol, const double* thetaineqdata, const int* thetaineqrow, const int* thetaineqcol, const int* storei, const int* storea) { // put together matrices const Matrix X(*Xrow, *Xcol, Xdata); Matrix<> theta(*thetastartrow, *thetastartcol, thetastartdata); Matrix<> alpha(*astartrow, *astartcol, astartdata); Matrix<> beta(*bstartrow, *bstartcol, bstartdata); const Matrix<> ab0(*ab0row, *ab0col, ab0data); const Matrix<> AB0(*AB0row, *AB0col, AB0data); const Matrix<> theta_eq(*thetaeqrow, *thetaeqcol, thetaeqdata); const Matrix<> theta_ineq(*thetaineqrow, *thetaineqcol, thetaineqdata); Matrix<> eta = cbind(alpha, beta); const int samplesize = (*samplerow) * (*samplecol); MCMCPACK_PASSRNG2MODEL(MCMCirt1d_impl, X, theta, eta, ab0, AB0, theta_eq, theta_ineq, *t0, *T0, *burnin, *mcmc, *thin, *verbose, *storea, *storei, sampledata, samplesize); } } #endif MCMCpack/src/MCMCintervention.cc0000644000176000001440000015202112140061657016145 0ustar ripleyusers//////////////////////////////////////////////////////////////////// // MCMCintervention.cc is a C++ code to estimate // linear regression changepoint model // // Jong Hee Park // Department of Political Science and International Relations // Seoul National University // jongheepark@snu.ac.kr // // Written 03/03/2009 //////////////////////////////////////////////////////////////////// #ifndef MCMCREGRESSCHANGE_CC #define MCMCREGRESSCHANGE_CC #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; double lndinvgamma_pjh (const double x, const double shape, const double scale){ double log_density = shape *::log(scale) - lngammafn(shape) - (shape + 1) * ::log(x) - (scale/x); return (log_density); } template Matrix gaussian_state_fixedBeta_sampler(rng& stream, const int m, const Matrix& Y, const Matrix& X, const Matrix& beta, const Matrix& Sigma, const Matrix& P){ const int ns = m + 1; const int n = Y.rows(); Matrix F(n, ns); Matrix pr1(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); Matrix s(n, 1); // holder for state variables Matrix ps = Matrix(n, ns); // holder for state probabilities for (int tt=0; tt mu = X(tt,_)*beta; //k by 1 vector for (int j = 0; j< ns; ++j){ py[j] = dnorm(Y[tt], mu[0], sqrt(Sigma[j])); } if (tt==0) pstyt1 = pr1; else { pstyt1 = ::t(F(tt-1,_)*P); // make it an ns by 1 matrix } Matrix unnorm_pstyt = pstyt1%py; const Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); // pstyt = Pr(st|Yt) for (int j=0; j pstyn = Matrix(ns, 1); double pone = 0.0; int tt = n-2; while (tt >= 0){ int st = s(tt+1); Matrix Pst_1 = ::t(P(_,st-1)); Matrix unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1) s(tt) = 1; else{ pone = pstyn(st-2); if(stream.runif() < pone) s(tt) = st-1; else s(tt) = st; } ps(tt,_) = pstyn; --tt; }// end of while loop Matrix Sout(n, ns+1); Sout(_, 0) = s(_,0); for (int j = 0; j Matrix gaussian_state_sampler(rng& stream, const int m, const Matrix& Y, const Matrix& X, const Matrix& beta, const Matrix& Sigma, const Matrix& P){ const int ns = m + 1; const int n = Y.rows(); // P is a (m+1 by m+1) transition matrix // F matrix contains all the information of Pr(st|Yt) Matrix F(n, ns); Matrix pr1(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); Matrix s(n, 1); // holder for state variables Matrix ps = Matrix(n, ns); // holder for state probabilities // // Forward sampling: update F matrix // for (int tt=0; tt mu = X(tt,_)*::t(beta); //k by 1 vector for (int j = 0; j< ns; ++j){ py[j] = dnorm(Y[tt], mu[j], sqrt(Sigma[j])); } if (tt==0) pstyt1 = pr1; else { pstyt1 = ::t(F(tt-1,_)*P); // make it an ns by 1 matrix } Matrix unnorm_pstyt = pstyt1%py; const Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); // pstyt = Pr(st|Yt) for (int j=0; j pstyn = Matrix(ns, 1); double pone = 0.0; int tt = n-2; while (tt >= 0){ int st = s(tt+1); Matrix Pst_1 = ::t(P(_,st-1)); // prob of being at a previous state Matrix unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); // normalize into a prob. density if (st==1) s(tt) = 1; // If this is the first period, state should be 1. // Otherwise, draw a state from a discrete prob. distribution("pstyn") // using the inverse CDF method. else{ pone = pstyn(st-2); if(stream.runif() < pone) s(tt) = st-1; else s(tt) = st; } ps(tt,_) = pstyn; --tt; }// end of while loop // name and report outputs Matrix Sout(n, ns+1); Sout(_, 0) = s(_,0); for (int j = 0; j void MCMCintervention_random_impl(rng& stream, const double m, const int intervention, const Matrix<>& Y, const Matrix<>& X, Matrix<>& beta, Matrix<>& Sigma, Matrix<>& P, Matrix& s, Matrix<>& b0, Matrix<>& B0, const double c0, const double d0, const Matrix<>& A0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, bool chib, bool ar, Matrix<>& beta_store, Matrix<>& Sigma_store, Matrix<>& P_store, Matrix<>& ps_store, Matrix& s_store, double& logmarglike, Matrix<>& yhat_mat, Matrix<>& yerror_mat, Matrix<>& yfore_pred_mat, Matrix<>& yback_pred_mat) { // define constants and form cross-product matrices const int tot_iter = burnin + mcmc; //total iterations const int nstore = mcmc / thin; // number of draws to store const int n = Y.rows(); const int ns = m + 1; // number of states const int k = X.cols(); const Matrix<> B0inv = invpd(B0); Matrix<> sigma(ns, 1); //MCMC loop unsigned int count = 0; unsigned int reject = 0; double rejectionrate = 0; Matrix <> sum_e(ns, 1); for (int iter = 0; iter < tot_iter; ++iter){ ////////////////////// // 1. Sample beta and Sigma ////////////////////// int beta_count = 0; Matrix nstate(ns, 1); Matrix XtX(k, k); Matrix XtY(k, 1); for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); // SIGMA UPDATE double shape = (c0 + (double)nstate[j])/2; Matrix<> yhat_j = Xj*beta; Matrix ej = yj - yhat_j; Matrix sum_ej = t(ej)*ej; double scale =(d0 + sum_ej[0])/2; Sigma[j] = 1/stream.rgamma(shape, scale); sigma[j] = sqrt(Sigma[j]); if (iter >= burnin && ((iter % thin)==0)){ yhat_mat(count, (beta_count - nstate[j]), count, (beta_count - 1)) = yhat_j(_,0); yerror_mat(count, (beta_count - nstate[j]), count, (beta_count - 1)) = ej(_,0); } // CONSTANT BETA UPDATE XtX = XtX + (crossprod(Xj))/Sigma[j]; XtY = XtY + ::t(Xj)*yj/Sigma[j]; } Matrix Bn = invpd(B0 + XtX); Matrix bn = Bn*(B0*b0 + XtY); if (ar == 1){ Matrix beta_can = stream.rmvnorm(bn, Bn); if (beta_can(1) > 1 | beta_can(1) < -1){ // Rprintf("\n AR coefficient %10.5f is outside the stationary region! \n", beta_can(1)); ++reject; } else{ beta = beta_can; } } else{ beta = stream.rmvnorm(bn, Bn); } ////////////////////// // 2. Sample P ////////////////////// double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; //no jump at the last state for (int j =0; j<(ns-1); ++j){ shape1 = A0(j,j) + (double)nstate[j] - 1; shape2 = A0(j,j+1) + 1; // SS(j,j+1); P(j,j) = stream.rbeta(shape1, shape2); P(j,j+1) = 1 - P(j,j); } ////////////////////// // 3. Sample s ////////////////////// Matrix F(n, ns); Matrix pr1(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); Matrix ps = Matrix(n, ns); // holder for state probabilities // // Forward sampling: update F matrix // for (int tt=0; tt mu = X(tt,_)*beta; //k by 1 vector for (int j = 0; j< ns; ++j){ py[j] = dnorm(Y[tt], mu[0], sigma[j]); } if (tt==0) pstyt1 = pr1; else { pstyt1 = ::t(F(tt-1,_)*P); // make it an ns by 1 matrix } Matrix unnorm_pstyt = pstyt1%py; ///////////////////////////////////////////////////////////////////// // Prediction of future outcomes based on pre-intervention state ///////////////////////////////////////////////////////////////////// if (tt==(intervention - 1)&&iter >= burnin && ((iter % thin)==0)){ // Forward prediction Matrix <> yfore_pred(1, n); for (int ttt=tt; ttt yback_pred(1, n); for (int ttt=tt; ttt>=0 ; --ttt){ int ss = s(tt+1); mu = X(ttt,_)*beta; //k by 1 vector yback_pred(ttt) = stream.rnorm(mu(0), sigma[ss-1]); } yback_pred_mat(count, _) = yback_pred(0, _); } const Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); // pstyt = Pr(st|Yt) for (int j=0; j pstyn = Matrix(ns, 1); double pone = 0.0; int tt = n-2; while (tt >= 0){ int st = s(tt+1); Matrix Pst_1 = ::t(P(_,st-1)); // prob of being at a previous state Matrix unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); // normalize into a prob. density if (st==1) s(tt) = 1; // If this is the first period, state should be 1. // Otherwise, draw a state from a discrete prob. distribution("pstyn") // using the inverse CDF method. else{ pone = pstyn(st-2); if(stream.runif() < pone) s(tt) = st-1;// jump from tt-1 to tt else s(tt) = st;// stay } ps(tt,_) = pstyn; --tt; }// end of while loop // load draws into sample array if (iter >= burnin && ((iter % thin)==0)){ for (int i=0; i 0 && iter % verbose == 0){ Rprintf("\nMCMCintervention iteration %i of %i \n", (iter+1), tot_iter); if (ar == 1 ){ double rejectionrate = (double)reject/(double)(iter+1); Rprintf("\n\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); Rprintf("The acceptance rate was %3.5f", 1 - rejectionrate); Rprintf("\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); } for (int j = 0;j(nstate[j])); } Rprintf("\n beta \n"); for (int i = 0; i .05){ Rprintf("The acceptance rate is too low.\n"); } else { Matrix beta_st(k, 1); Matrix betast = meanc(beta_store); //meanc(beta_store)=(11, 12, 13, 21, 22, 23) for (int i = 0; i Sigma_st = meanc(Sigma_store); Matrix sigma_st(ns, 1); for (int j = 0; j P_vec_st = meanc(P_store); const Matrix P_st(ns, ns); for (int j = 0; j< ns*ns; ++j){ P_st[j] = P_vec_st[j]; } ////////////////////// // 1. pdf.beta ////////////////////// Matrix density_beta(nstore, 1); for (int iter = 0; iter XtX(k, k); Matrix XtY(k, 1); Matrix nstate(ns, 1); // contains total numbers of each state int beta_count = 0; for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); const Matrix Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); const double precision = 1.0/Sigma_store(iter, j); XtX = XtX + (crossprod(Xj))*precision; XtY = XtY + ::t(Xj)*yj*precision; } Matrix Bn = invpd(B0 + XtX); Matrix bn = Bn*(B0*b0 + XtY); if (k == 1){ density_beta(iter) = dnorm(beta_st(0), bn(0), sqrt(Bn(0))); } else{ density_beta(iter) = ::exp(lndmvn(beta_st, bn, Bn)); } } double pdf_beta = log(prod(meanc(density_beta))); ////////////////////// ////////////////////// ////////////////////// // 2. pdf.Sigma|beta_st, S, P ////////////////////// ////////////////////// ////////////////////// Matrix density_Sigma(nstore, ns); for (int iter = 0; iter Sout = gaussian_state_fixedBeta_sampler(stream, m, Y, X, beta_st, Sigma, P); Matrix s = Sout(_, 0); // STEP 2.2 Sigma|y, beta.st, S, P int beta_count = 0; Matrix nstate(ns, 1); // contains total numbers of each state for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); Matrix ej = yj - Xj*beta_st; Matrix sum_ej = ::t(ej)*ej; double shape = (c0 + (double)nstate[j])/2; double scale =(d0 + sum_ej[0])/2; Sigma[j] = stream.rigamma(shape, scale); density_Sigma(iter, j) = ::exp(lndinvgamma_pjh(Sigma_st[j], shape, scale)); }// end of sampling beta and Sigma // STEP 2.3 P|S double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; //no jump at the last state for (int j =0; j< (ns-1); ++j){ shape1 = A0(j,j) + (double)nstate[j] - 1; shape2 = A0(j,j+1) + 1; // P(j,j) = stream.rbeta(shape1, shape2); P(j,j+1) = 1 - P(j,j); } }// end of pdf.Sigma double pdf_Sigma = log(prod(meanc(density_Sigma))); ////////////////////// // 3. pdf.P|beta_st, Sigma_st, S ////////////////////// Matrix density_P(nstore, ns); for (int iter = 0; iter < nstore; ++iter){ // STEP 2.1 S|y, beta.st, Sigma, P Matrix Sout = gaussian_state_fixedBeta_sampler(stream, m, Y, X, beta_st, Sigma_st, P); Matrix s = Sout(_, 0); double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; //no jump at the last state // compute addN Matrix P_addN(ns, 1); for (int j = 0; j F = Matrix(n, ns); Matrix like(n, 1); Matrix pr1 = Matrix(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); for (int t=0; t mu = X(t,_)*beta_st; for (int j = 0; j< ns; ++j){ py[j] = dnorm(Y[t], mu[0], sigma_st[j]); } if (t==0) pstyt1 = pr1; else { pstyt1 = ::t(F(t-1,_)*P_st); } Matrix unnorm_pstyt = pstyt1%py; Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j density_Sigma_prior(ns, 1); Matrix density_P_prior(ns, 1); density_P[ns-1] = 1; // if (k == 1){ density_beta_prior = log(dnorm(beta_st(0), b0(0), sqrt(B0inv(0)))); } else{ density_beta_prior = lndmvn(beta_st, b0, B0inv); } for (int j=0; j 0){ Rprintf("logmarglike = %10.5f\n", logmarglike); Rprintf("loglike = %10.5f\n", loglike); Rprintf("logprior = %10.5f\n", logprior); Rprintf("pdf_beta = %10.5f\n", pdf_beta); Rprintf("pdf_Sigma = %10.5f\n", pdf_Sigma); Rprintf("pdf_P = %10.5f\n", pdf_P); } } }// end of marginal likelihood computation } //////////////////////////////////////////// // MCMCintervention fixed effect changes only //////////////////////////////////////////// template void MCMCintervention_fixed_impl(rng& stream, const double m, const int intervention, const Matrix<>& Y, const Matrix<>& X, Matrix<>& beta, Matrix<>& Sigma, Matrix<>& P, Matrix& s, Matrix<>& b0, Matrix<>& B0, const double c0, const double d0, const Matrix<>& A0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, bool chib, bool ar, Matrix<>& beta_store, Matrix<>& Sigma_store, Matrix<>& P_store, Matrix<>& ps_store, Matrix& s_store, double& logmarglike, Matrix<>& yhat_mat, Matrix<>& yerror_mat, Matrix<>& yfore_pred_mat, Matrix<>& yback_pred_mat, double acceptance) { // define constants and form cross-product matrices const int tot_iter = burnin + mcmc; //total iterations const int nstore = mcmc / thin; // number of draws to store const int n = Y.rows(); const int ns = m + 1; // number of states const int k = X.cols(); const Matrix<> B0inv = invpd(B0); double sigma2 = Sigma(0); double sigma = sqrt(sigma2); //MCMC loop unsigned int count = 0; unsigned int reject = 0; Matrix <> sum_e(ns, 1); for (int iter = 0; iter < tot_iter; ++iter){ ////////////////////// // 1. Sample beta and Sigma ////////////////////// int beta_count = 0; Matrix nstate(ns, 1); // contains total numbers of each state for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); Matrix tXj = ::t(Xj); Matrix Bn = invpd(B0 + crossprod(Xj)/sigma2); Matrix bn = Bn*(B0*b0 + tXj*yj/sigma2); if (ar == 1){ Matrix beta_can = stream.rmvnorm(bn, Bn); if (beta_can(1) > 1 | beta_can(1) < -1){ // Rprintf("\n AR coefficient %10.5f is outside the stationary region! \n", beta_can(1)); ++reject; } else{ for (int kk = 0; kk yhat_j = Xj*::t(beta(j,_)); Matrix ej = yj - yhat_j; Matrix sum_ej = t(ej)*ej; sum_e(j) = sum_ej[0]; if (iter >= burnin && ((iter % thin)==0)){ yhat_mat(count, (beta_count - nstate[j]), count, (beta_count - 1)) = yhat_j(_,0); yerror_mat(count, (beta_count - nstate[j]), count, (beta_count - 1)) = ej(_,0); } }// end of sampling beta and Sigma // SIGMA UPDATE double shape = (c0 + (double)n)/2; double scale =(d0 + sum(sum_e))/2; sigma2 = 1/stream.rgamma(shape, scale); sigma = sqrt(sigma2); ////////////////////// // 2. Sample P ////////////////////// double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; //no jump at the last state for (int j =0; j<(ns-1); ++j){ shape1 = A0(j,j) + (double)nstate[j] - 1; shape2 = A0(j,j+1) + 1; // SS(j,j+1); P(j,j) = stream.rbeta(shape1, shape2); P(j,j+1) = 1 - P(j,j); } ////////////////////// // 3. Sample s ////////////////////// Matrix F(n, ns); Matrix pr1(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); Matrix ps = Matrix(n, ns); // holder for state probabilities // // Forward sampling: update F matrix // for (int tt=0; tt mu = X(tt,_)*::t(beta); //k by 1 vector for (int j = 0; j< ns; ++j){ py[j] = dnorm(Y[tt], mu[j], sigma); } if (tt==0) pstyt1 = pr1; else { pstyt1 = ::t(F(tt-1,_)*P); // make it an ns by 1 matrix } Matrix unnorm_pstyt = pstyt1%py; ///////////////////////////////////////////////////////////////////// // Prediction of future outcomes based on pre-intervention state ///////////////////////////////////////////////////////////////////// if (tt==(intervention - 1)&&iter >= burnin && ((iter % thin)==0)){ // Forward prediction Matrix <> yfore_pred(1, n); for (int ttt=tt; ttt yback_pred(1, n); for (int ttt=tt; ttt>=0 ; --ttt){ int ss = s(tt+1); mu = X(ttt,_)*::t(beta); //k by 1 vector yback_pred(ttt) = stream.rnorm(mu[ss-1], sigma); } yback_pred_mat(count, _) = yback_pred(0, _); } const Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); // pstyt = Pr(st|Yt) for (int j=0; j pstyn = Matrix(ns, 1); double pone = 0.0; int tt = n-2; while (tt >= 0){ int st = s(tt+1); Matrix Pst_1 = ::t(P(_,st-1)); // prob of being at a previous state Matrix unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); // normalize into a prob. density if (st==1) s(tt) = 1; // If this is the first period, state should be 1. // Otherwise, draw a state from a discrete prob. distribution("pstyn") // using the inverse CDF method. else{ pone = pstyn(st-2); if(stream.runif() < pone) s(tt) = st-1;// jump from tt-1 to tt else s(tt) = st;// stay } ps(tt,_) = pstyn; --tt; }// end of while loop // load draws into sample array if (iter >= burnin && ((iter % thin)==0)){ Matrix tbeta = ::t(beta); //transpose beta for R output for (int i=0; i<(ns*k); ++i) beta_store(count,i) = tbeta[i];// stored by the order of (11, 12, 13, 21, 22, 23) Sigma_store(count) = sigma2; for (int j=0; j 0 && iter % verbose == 0){ Rprintf("\nMCMCintervention iteration %i of %i \n", (iter+1), tot_iter); if (ar == 1 ){ double rejectionrate = (double)reject/(double)(iter+1); Rprintf("\n\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); Rprintf("The acceptance rate was %3.5f", 1 - rejectionrate); Rprintf("\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); } for (int j = 0;j(nstate[j])); } Rprintf("\n beta \n"); for (int i = 0; i betast = meanc(beta_store); //meanc(beta_store)=(11, 12, 13, 21, 22, 23) Matrix beta_st(ns, k); for (int j = 0; j Sigma_st(ns, 1); for (int j = 0; j P_vec_st = meanc(P_store); const Matrix P_st(ns, ns); for (int j = 0; j< ns*ns; ++j){ P_st[j] = P_vec_st[j]; } ////////////////////// // 1. pdf.beta ////////////////////// Matrix density_beta(nstore, ns); for (int iter = 0; iter nstate(ns, 1); // contains total numbers of each state int beta_count = 0; for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); const Matrix Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); const double precision = 1.0/Sigma_store(iter); const Matrix XpX = crossprod(Xj); const Matrix XpY = (::t(Xj)*yj); const Matrix Bn = invpd(B0 + XpX*precision); const Matrix bn = Bn*gaxpy(B0, b0, XpY*precision); if (k == 1){ density_beta(iter, j) = dnorm(beta_st(j), bn(0), sqrt(Bn(0))); } else{ density_beta(iter, j) = ::exp(lndmvn(::t(beta_st(j,_)), bn, Bn)); } }// end of sampling beta and Sigma }// end of pdf.beta double pdf_beta = log(prod(meanc(density_beta))); ////////////////////// // 2. pdf.Sigma|beta_st, S, P ////////////////////// Matrix density_Sigma(nstore, 1); for (int iter = 0; iter Sout = gaussian_state_sampler(stream, m, Y, X, beta_st, Sigma, P); Matrix s = Sout(_, 0); // STEP 2.2 Sigma|y, beta.st, S, P int beta_count = 0; Matrix nstate(ns, 1); Matrix<> sum_e(ns, 1); for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); Matrix ej = yj - Xj*::t(beta_st(j,_)); Matrix sum_ej = ::t(ej)*ej; sum_e(j) = sum_ej(0); }// end of sampling beta and Sigma double shape = (c0 + (double)n)/2; double scale =(d0 + sum(sum_e))/2; sigma2 = stream.rigamma(shape, scale); density_Sigma(iter) = ::exp(lndinvgamma_pjh(Sigma_st(0), shape, scale)); for (int j = 0; j density_P(nstore, ns); for (int iter = 0; iter < nstore; ++iter){ // STEP 2.1 S|y, beta.st, Sigma, P Matrix Sout = gaussian_state_sampler(stream, m, Y, X, beta_st, Sigma_st, P); Matrix s = Sout(_, 0); double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; //no jump at the last state // compute addN Matrix P_addN(ns, 1); for (int j = 0; j F = Matrix(n, ns); Matrix like(n, 1); Matrix pr1 = Matrix(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); for (int t=0; t mu = X(t,_)*::t(beta_st); //k by 1 vector for (int j = 0; j< ns; ++j){ py[j] = dnorm(Y[t], mu[j], sqrt(Sigma_st[0])); } if (t==0) pstyt1 = pr1; else { pstyt1 = ::t(F(t-1,_)*P_st); // make it an ns by 1 matrix } Matrix unnorm_pstyt = pstyt1%py; Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); // pstyt = Pr(st|Yt) for (int j=0; j density_beta_prior(ns, 1); double density_Sigma_prior = 0.0; Matrix density_P_prior(ns, 1); density_P[ns-1] = 1; // if (k == 1){ for (int j=0; j 0){ Rprintf("logmarglike = %10.5f\n", logmarglike); Rprintf("loglike = %10.5f\n", loglike); Rprintf("logprior = %10.5f\n", logprior); Rprintf("pdf_beta = %10.5f\n", pdf_beta); Rprintf("pdf_Sigma = %10.5f\n", pdf_Sigma); Rprintf("pdf_P = %10.5f\n", pdf_P); } } }// end of marginal likelihood computation } template void MCMCintervention_impl(rng& stream, const double m, const int intervention, const Matrix<>& Y, const Matrix<>& X, Matrix<>& beta, Matrix<>& Sigma, Matrix<>& P, Matrix& s, Matrix<>& b0, Matrix<>& B0, const double c0, const double d0, const Matrix<>& A0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, bool chib, bool ar, Matrix<>& beta_store, Matrix<>& Sigma_store, Matrix<>& P_store, Matrix<>& ps_store, Matrix& s_store, double& logmarglike, Matrix<>& yhat_mat, Matrix<>& yerror_mat, Matrix<>& yfore_pred_mat, Matrix<>& yback_pred_mat, double acceptance) { // define constants and form cross-product matrices const int tot_iter = burnin + mcmc; //total iterations const int nstore = mcmc / thin; // number of draws to store const int n = Y.rows(); const int ns = m + 1; // number of states const int k = X.cols(); const Matrix<> B0inv = invpd(B0); Matrix sigma(ns, 1); //MCMC loop unsigned int count = 0; int reject = 0; for (int iter = 0; iter < tot_iter; ++iter){ ////////////////////// // 1. Sample beta and Sigma ////////////////////// int beta_count = 0; Matrix nstate(ns, 1); // contains total numbers of each state for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); Matrix tXj = ::t(Xj); Matrix Bn = invpd(B0 + crossprod(Xj)/Sigma[j]); Matrix bn = Bn*(B0*b0 + tXj*yj/Sigma[j]); if (ar == 1){ Matrix beta_can = stream.rmvnorm(bn, Bn); if (beta_can(1) > 1 | beta_can(1) < -1){ // Rprintf("\n AR coefficient %10.5f is outside the stationary region! \n", beta_can(1)); ++reject; } else{ for (int kk = 0; kk yhat_j = Xj*::t(beta(j,_)); Matrix ej = yj - yhat_j; Matrix sum_ej = t(ej)*ej; double scale =(d0 + sum_ej[0])/2; Sigma[j] = 1/stream.rgamma(shape, scale); sigma(j) = sqrt(Sigma(j)); if (iter >= burnin && ((iter % thin)==0)){ yhat_mat(count, (beta_count - nstate[j]), count, (beta_count - 1)) = yhat_j(_,0); yerror_mat(count, (beta_count - nstate[j]), count, (beta_count - 1)) = ej(_,0); } }// end of sampling beta and Sigma ////////////////////// // 2. Sample P ////////////////////// double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; //no jump at the last state for (int j =0; j<(ns-1); ++j){ shape1 = A0(j,j) + (double)nstate[j] - 1; shape2 = A0(j,j+1) + 1; // SS(j,j+1); P(j,j) = stream.rbeta(shape1, shape2); P(j,j+1) = 1 - P(j,j); } ////////////////////// // 3. Sample s ////////////////////// Matrix F(n, ns); Matrix pr1(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); Matrix ps = Matrix(n, ns); // holder for state probabilities // // Forward sampling: update F matrix // for (int tt=0; tt mu = X(tt,_)*::t(beta); //k by 1 vector for (int j = 0; j< ns; ++j){ py[j] = dnorm(Y[tt], mu[j], sigma[j]); } if (tt==0) pstyt1 = pr1; else { pstyt1 = ::t(F(tt-1,_)*P); // make it an ns by 1 matrix } Matrix unnorm_pstyt = pstyt1%py; ///////////////////////////////////////////////////////////////////// // Prediction of future outcomes based on pre-intervention state ///////////////////////////////////////////////////////////////////// if (tt==(intervention - 1)&&iter >= burnin && ((iter % thin)==0)){ // Forward prediction Matrix <> yfore_pred(1, n); for (int ttt=tt; ttt yback_pred(1, n); for (int ttt=tt; ttt>=0 ; --ttt){ int ss = s(tt+1); mu = X(ttt,_)*::t(beta); //k by 1 vector yback_pred(ttt) = stream.rnorm(mu[ss-1], sigma[ss-1]); } yback_pred_mat(count, _) = yback_pred(0, _); } const Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); // pstyt = Pr(st|Yt) for (int j=0; j pstyn = Matrix(ns, 1); double pone = 0.0; int tt = n-2; while (tt >= 0){ int st = s(tt+1); Matrix Pst_1 = ::t(P(_,st-1)); // prob of being at a previous state Matrix unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); // normalize into a prob. density if (st==1) s(tt) = 1; // If this is the first period, state should be 1. // Otherwise, draw a state from a discrete prob. distribution("pstyn") // using the inverse CDF method. else{ pone = pstyn(st-2); if(stream.runif() < pone) s(tt) = st-1;// jump from tt-1 to tt else s(tt) = st;// stay } ps(tt,_) = pstyn; --tt; }// end of while loop // load draws into sample array if (iter >= burnin && ((iter % thin)==0)){ Matrix tbeta = ::t(beta); //transpose beta for R output for (int i=0; i<(ns*k); ++i) beta_store(count,i) = tbeta[i];// stored by the order of (11, 12, 13, 21, 22, 23) for (int i=0; i 0 && iter % verbose == 0){ Rprintf("\nMCMCintervention iteration %i of %i \n", (iter+1), tot_iter); if (ar == 1 ){ double rejectionrate = (double)reject/(double)(iter+1); Rprintf("\n\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); Rprintf("The acceptance rate was %3.5f", 1 - rejectionrate); Rprintf("\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); } for (int j = 0;j(nstate[j])); } Rprintf("\n beta \n"); for (int i = 0; i betast = meanc(beta_store); Matrix beta_st(ns, k); for (int j = 0; j Sigma_st = meanc(Sigma_store); Matrix sigma_st(ns, 1); for (int j = 0; j P_vec_st = meanc(P_store); const Matrix P_st(ns, ns); for (int j = 0; j< ns*ns; ++j){ P_st[j] = P_vec_st[j]; } ////////////////////// // 1. pdf.beta ////////////////////// Matrix density_beta(nstore, ns); for (int iter = 0; iter nstate(ns, 1); int beta_count = 0; for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); const Matrix Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); const double precision = 1.0/Sigma_store(iter, j); const Matrix XpX = crossprod(Xj); const Matrix XpY = (::t(Xj)*yj); const Matrix Bn = invpd(B0 + XpX*precision); const Matrix bn = Bn*gaxpy(B0, b0, XpY*precision); if (k == 1){ density_beta(iter, j) = log(dnorm(beta_st(j), bn(0), sqrt(Bn(0)))); } else{ density_beta(iter, j) = lndmvn(::t(beta_st(j,_)), bn, Bn); } }// end of sampling beta and Sigma }// end of pdf.beta double pdf_beta = sum(meanc(density_beta)); ////////////////////// // 2. pdf.Sigma|beta_st, S, P ////////////////////// Matrix density_Sigma(nstore, ns); for (int iter = 0; iter Sout = gaussian_state_sampler(stream, m, Y, X, beta_st, Sigma, P); Matrix s = Sout(_, 0); // STEP 2.2 Sigma|y, beta.st, S, P int beta_count = 0; Matrix nstate(ns, 1); // contains total numbers of each state for (int j = 0; j yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1); Matrix ej = yj - Xj*::t(beta_st(j,_)); Matrix sum_ej = ::t(ej)*ej; double shape = (c0 + (double)nstate[j])/2; double scale =(d0 + sum_ej[0])/2; Sigma[j] = stream.rigamma(shape, scale); density_Sigma(iter, j) = ::exp(lndinvgamma_pjh(Sigma_st[j], shape, scale)); }// end of sampling beta and Sigma // STEP 2.3 P|S double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; //no jump at the last state for (int j =0; j< (ns-1); ++j){ shape1 = A0(j,j) + (double)nstate[j] - 1; shape2 = A0(j,j+1) + 1; // P(j,j) = stream.rbeta(shape1, shape2); P(j,j+1) = 1 - P(j,j); } }// end of pdf.Sigma double pdf_Sigma = log(prod(meanc(density_Sigma))); ////////////////////// // 3. pdf.P|beta_st, Sigma_st, S ////////////////////// Matrix density_P(nstore, ns); for (int iter = 0; iter < nstore; ++iter){ // STEP 2.1 S|y, beta.st, Sigma, P Matrix Sout = gaussian_state_sampler(stream, m, Y, X, beta_st, Sigma_st, P); Matrix s = Sout(_, 0); double shape1 = 0; double shape2 = 0; P(ns-1, ns-1) = 1; // compute addN Matrix P_addN(ns, 1); for (int j = 0; j F = Matrix(n, ns); Matrix like(n, 1); Matrix pr1 = Matrix(ns, 1); pr1[0] = 1; Matrix py(ns, 1); Matrix pstyt1(ns, 1); for (int t=0; t mu = X(t,_)*::t(beta_st); for (int j = 0; j< ns; ++j){ py[j] = dnorm(Y[t], mu[j], sigma_st[j]); } if (t==0) pstyt1 = pr1; else { pstyt1 = ::t(F(t-1,_)*P_st); } Matrix unnorm_pstyt = pstyt1%py; Matrix pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j density_beta_prior(ns, 1); Matrix density_Sigma_prior(ns, 1); Matrix density_P_prior(ns, 1); density_P[ns-1] = 1; // if (k == 1){ for (int j=0; j 0){ Rprintf("logmarglike = %10.5f\n", logmarglike); Rprintf("loglike = %10.5f\n", loglike); Rprintf("logprior = %10.5f\n", logprior); Rprintf("pdf_beta = %10.5f\n", pdf_beta); Rprintf("pdf_Sigma = %10.5f\n", pdf_Sigma); Rprintf("pdf_P = %10.5f\n", pdf_P); } }// end of marginal likelihood computation } } //////////////////////////////////////////// // Start MCMCinterventionpoint function /////////////////////////////////////////// extern "C"{ void MCMCintervention(double *accept, double *betaout, double *Sigmaout, double *Pout, double *psout, double *sout, double *yhatout, double *yerrorout, double *yforepredout, double *ybackpredout, const double *Ydata, const int *Yrow, const int *Ycol, const double *Xdata, const int *Xrow, const int *Xcol, const int *m, const int *intervention, const int *burnin, const int *mcmc, const int *thin, const int *verbose, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const double *betastart, const double *Sigmastart, const double *Pstart, const int *statestart, const double *a, const double *b, const double *b0data, const double *B0data, const double *c0, const double *d0, const double *A0data, double *logmarglikeholder, double *loglikeholder, const int *ar, const int *change, const int *chib){ // pull together Matrix objects const Matrix Y(*Yrow, *Ycol, Ydata); const Matrix X(*Xrow, *Xcol, Xdata); const unsigned int tot_iter = *burnin + *mcmc; //total iterations const unsigned int nstore = *mcmc / *thin; // number of draws to store const int n = Y.rows(); const int k = X.cols(); const int ns = *m + 1; // number of states // generate starting values Matrix <> Sigma(ns, 1, Sigmastart); Matrix <> P(ns, ns, Pstart); Matrix s(n, 1, statestart); Matrix <> b0(k, 1, b0data); Matrix <> B0(k, k, B0data); const Matrix <> A0(ns, ns, A0data); double logmarglike; double acceptance; // storage matrices Matrix<> P_store(nstore, ns*ns); Matrix<> ps_store(n, ns); Matrix s_store(nstore, n); Matrix<> yhat_mat(nstore, n); Matrix<> yerror_mat(nstore, n); Matrix<> yfore_pred_mat(nstore, n); Matrix<> yback_pred_mat(nstore, n); if (*change == 1){ // fixed effects change only Matrix <> beta(ns, k, betastart); Matrix<> beta_store(nstore, ns*k); Matrix<> Sigma_store(nstore, 1); MCMCPACK_PASSRNG2MODEL(MCMCintervention_fixed_impl, *m, *intervention, Y, X, beta, Sigma, P, s, b0, B0, *c0, *d0, A0, *burnin, *mcmc, *thin, *verbose, *chib, *ar, beta_store, Sigma_store, P_store, ps_store, s_store, logmarglike, yhat_mat, yerror_mat, yfore_pred_mat, yback_pred_mat, acceptance); for (int i = 0; i<(nstore*ns*k); ++i){ betaout[i] = beta_store[i]; } for (int i = 0; i<(nstore); ++i){ Sigmaout[i] = Sigma_store[i]; } } else if (*change == 2){ // random effects change only Matrix<> beta(k, 1, betastart); Matrix<> beta_store(nstore, k); Matrix<> Sigma_store(nstore, ns); MCMCPACK_PASSRNG2MODEL(MCMCintervention_random_impl, *m, *intervention, Y, X, beta, Sigma, P, s, b0, B0, *c0, *d0, A0, *burnin, *mcmc, *thin, *verbose, *chib, *ar, beta_store, Sigma_store, P_store, ps_store, s_store, logmarglike, yhat_mat, yerror_mat, yfore_pred_mat, yback_pred_mat) ; for (int i = 0; i<(nstore*k); ++i){ betaout[i] = beta_store[i]; } for (int i = 0; i<(nstore*ns); ++i){ Sigmaout[i] = Sigma_store[i]; } acceptance = 1; } else { Matrix <> beta(ns, k, betastart); Matrix<> beta_store(nstore, ns*k); Matrix<> Sigma_store(nstore, ns); MCMCPACK_PASSRNG2MODEL(MCMCintervention_impl, *m, *intervention, Y, X, beta, Sigma, P, s, b0, B0, *c0, *d0, A0, *burnin, *mcmc, *thin, *verbose, *chib, *ar, beta_store, Sigma_store, P_store, ps_store, s_store, logmarglike, yhat_mat, yerror_mat, yfore_pred_mat, yback_pred_mat, acceptance); // return output for (int i = 0; i<(nstore*ns*k); ++i){ betaout[i] = beta_store[i]; } for (int i = 0; i<(nstore*ns); ++i){ Sigmaout[i] = Sigma_store[i]; } } logmarglikeholder[0] = logmarglike; accept[0] = acceptance; for (int i = 0; i<(nstore*ns*ns); ++i){ Pout[i] = P_store[i]; } for (int i = 0; i<(n*ns); ++i){ psout[i] = ps_store[i]; } for (int i = 0; i<(nstore*n); ++i){ sout[i] = s_store[i]; yhatout[i] = yhat_mat[i]; yerrorout[i] = yerror_mat[i]; yforepredout[i] = yfore_pred_mat[i]; ybackpredout[i] = yback_pred_mat[i]; } }// end of MCMCpoissonChange }//end extern "C" #endif MCMCpack/src/MCMChregress.cc0000644000176000001440000002577112140061656015255 0ustar ripleyusers//////////////////////////////////////////////////////////////////// // MCMChregress.cc // // MCMChregress samples from the posterior distribution of a // Gaussian hierarchical linear regression model // // The code uses Algorithm 2 of Chib & Carlin (1999) for efficient // inference of (\beta | Y, sigma^2, Vb). // // Chib, S. & Carlin, B. P. (1999) On MCMC sampling in hierarchical // longitudinal models, Statistics and Computing, 9, 17-26 // //////////////////////////////////////////////////////////////////// // // Original code by Ghislain Vieilledent, may 2011 // CIRAD UR B&SEF // ghislain.vieilledent@cirad.fr / ghislainv@gmail.com // //////////////////////////////////////////////////////////////////// // // The initial version of this file was generated by the // auto.Scythe.call() function in the MCMCpack R package // written by: // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // Copyright (C) 2011 Andrew D. Martin and Kevin M. Quinn // //////////////////////////////////////////////////////////////////// // // Revisions: // - This file was initially generated on Wed May 4 10:42:50 2011 // - G. Vieilledent, on May 4 2011 // //////////////////////////////////////////////////////////////////// #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "MCMCrng.h" #include "MCMCfcds.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace scythe; using namespace std; extern "C"{ /* Gibbs sampler function */ void MCMChregress ( // Constants and data const int *ngibbs, const int *nthin, const int *nburn, // Number of iterations, burning and samples const int *nobs, const int *ngroup, // Constants const int *np, const int *nq, // Number of fixed and random covariates const int *IdentGroup, // Vector of group const double *Y_vect, // Observed response variable const double *X_vect, // Covariate for fixed effects const double *W_vect, // Covariate for random effects // Parameters to save double *beta_vect, // Fixed effects double *b_vect, // Random effects double *Vb_vect, // Variance of random effects double *V, // Variance of residuals // Defining priors const double *mubeta_vect, const double *Vbeta_vect, const double *r, const double *R_vect, const double *s1_V, const double *s2_V, // Diagnostic double *Deviance, double *Y_pred, // Fitted values (predictive posterior mean) // Seeds const int *seed, // Verbose const int *verbose ) { //////////////////////////////////////////////////////////////////////////////// //%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% // Defining and initializing objects /////////////////////////// // Redefining constants // const int NGIBBS=ngibbs[0]; const int NTHIN=nthin[0]; const int NBURN=nburn[0]; const int NSAMP=(NGIBBS-NBURN)/NTHIN; const int NOBS=nobs[0]; const int NGROUP=ngroup[0]; const int NP=np[0]; const int NQ=nq[0]; /////////////// // Constants // // Number of observations by group k int *nobsk = new int[NGROUP]; for (int k=0; k *Yk_arr = new Matrix[NGROUP]; Matrix *Xk_arr = new Matrix[NGROUP]; Matrix *Wk_arr = new Matrix[NGROUP]; for(int k=0; k(nobsk[k],NP); Wk_arr[k] = Matrix(nobsk[k],NQ); Yk_arr[k] = Matrix(nobsk[k],1); for (int m=0; m *tXk_arr = new Matrix[NGROUP]; Matrix *tWk_arr = new Matrix[NGROUP]; Matrix *cpXk_arr = new Matrix[NGROUP]; Matrix *tXWk_arr = new Matrix[NGROUP]; Matrix *tWXk_arr = new Matrix[NGROUP]; Matrix *tXYk_arr = new Matrix[NGROUP]; Matrix *tWYk_arr = new Matrix[NGROUP]; for(int k=0; k mubeta(NP,1,mubeta_vect); Matrix Vbeta(NP,NP,Vbeta_vect); Matrix R(NQ,NQ,R_vect); ///////////////////////////////////// // Initializing running parameters // Matrix *bk_run = new Matrix[NGROUP]; // Random effects for (int k=0;k(NQ,1); for (int q=0; q beta_run(NP,1,false); // Unicolumn matrix of fixed effects for (int p=0; p Vb_run(NQ,NQ,true,0.0); for (int q=0; q sum_V(NP,NP); Matrix sum_v(NP,1); for (int k=0; k big_V=invpd(invpd(Vbeta)+sum_V/V_run); // small_v Matrix small_v=invpd(Vbeta)*mubeta+sum_v/V_run; // Draw in the posterior distribution beta_run=myrng.rmvnorm(big_V*small_v,big_V); /////////////////////////////// // vector b: Gibbs algorithm // // Loop on group for (int k=0; k big_Vk=invpd(invpd(Vb_run)+crossprod(Wk_arr[k])/V_run); // small_vk Matrix small_vk=(t(Wk_arr[k])*(Yk_arr[k]-Xk_arr[k]*beta_run))/V_run; // Draw in the posterior distribution bk_run[k]=myrng.rmvnorm(big_Vk*small_vk,big_Vk); } //////////////////////////////////////////// // vector of variance Vb: Gibbs algorithm // Matrix SSBb(NQ,NQ,true,0.0); for(int k=0; k Vb_scale = invpd(SSBb+(*r)*R); Vb_run=invpd(myrng.rwish(Vb_dof,Vb_scale)); //////////////// // variance V // // e Matrix e(1,1,true,0.0); for (int k=0; k Y_hat=Xk_arr[k](m,_)*beta_run+Wk_arr[k](m,_)*bk_run[k]; // L logLk+=log(dnorm(Y_vect[w],Y_hat(0),sqrt(V_run))); } } // Deviance Deviance_run=-2*logLk; ////////////////////////////////////////////////// // Output if(((g+1)>NBURN) && (((g+1)%(NTHIN))==0)) { int isamp=((g+1)-NBURN)/(NTHIN); for (int p=0; p Y_hat=Xk_arr[k](m,_)*beta_run+Wk_arr[k](m,_)*bk_run[k]; // Y_pred Y_pred[w]+=Y_hat(0)/NSAMP; } } } ////////////////////////////////////////////////// // Progress bar double Perc=100*(g+1)/(NGIBBS); if(((g+1)%(NGIBBS/100))==0 && (*verbose==1)){ Rprintf("*"); R_FlushConsole(); //R_ProcessEvents(); for windows if(((g+1)%(NGIBBS/10))==0){ Rprintf(":%.1f%%\n",Perc); R_FlushConsole(); //R_ProcessEvents(); for windows } } ////////////////////////////////////////////////// // User interrupt R_CheckUserInterrupt(); // allow user interrupts } // end MCMC loop /////////////// // Delete memory allocation delete[] nobsk; for(int k=0; k // needed to use Rprintf() #include // needed to allow user interrupts using namespace scythe; using namespace std; extern "C"{ /* Gibbs sampler function */ void MCMChpoisson ( // Constants and data const int *ngibbs, const int *nthin, const int *nburn, // Number of iterations, burning and samples const int *nobs, const int *ngroup, // Constants const int *np, const int *nq, // Number of fixed and random covariates const int *IdentGroup, // Vector of group const double *Y_vect, // Observed response variable const double *X_vect, // Covariate for fixed effects const double *W_vect, // Covariate for random effects // Parameters to save double *beta_vect, // Fixed effects double *b_vect, // Random effects double *Vb_vect, // Variance of random effects double *V, // Variance of residuals // Defining priors const double *mubeta_vect, const double *Vbeta_vect, const double *r, const double *R_vect, const double *s1_V, const double *s2_V, // Diagnostic double *Deviance, double *lambda_pred, // Annual mortality rate // Seeds const int *seed, // Verbose const int *verbose, // Overdispersion const int *FixOD ) { //////////////////////////////////////////////////////////////////////////////// //%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% // Defining and initializing objects /////////////////////////// // Redefining constants // const int NGIBBS=ngibbs[0]; const int NTHIN=nthin[0]; const int NBURN=nburn[0]; const int NSAMP=(NGIBBS-NBURN)/NTHIN; const int NOBS=nobs[0]; const int NGROUP=ngroup[0]; const int NP=np[0]; const int NQ=nq[0]; /////////////// // Constants // // Number of observations by group k int *nobsk = new int[NGROUP]; for (int k=0; k *Xk_arr = new Matrix[NGROUP]; Matrix *Wk_arr = new Matrix[NGROUP]; for(int k=0; k(nobsk[k],NP); Wk_arr[k] = Matrix(nobsk[k],NQ); for (int m=0; m *tXk_arr = new Matrix[NGROUP]; Matrix *tWk_arr = new Matrix[NGROUP]; Matrix *cpXk_arr = new Matrix[NGROUP]; Matrix *tXWk_arr = new Matrix[NGROUP]; Matrix *tWXk_arr = new Matrix[NGROUP]; for(int k=0; k mubeta(NP,1,mubeta_vect); Matrix Vbeta(NP,NP,Vbeta_vect); Matrix R(NQ,NQ,R_vect); ///////////////////////////////////// // Initializing running parameters // Matrix *bk_run = new Matrix[NGROUP]; // Random effects for (int k=0;k(NQ,1); for (int q=0; q beta_run(NP,1,false); // Unicolumn matrix of fixed effects for (int p=0; p Vb_run(NQ,NQ,true,0.0); for (int q=0; q *log_lambdak_run = new Matrix[NGROUP]; for (int k=0;k(nobsk[k],1); for (int m=0; m log_lambda_hat=Xk_arr[k](m,_)*beta_run+Wk_arr[k](m,_)*bk_run[k]; // Proposal double log_lambda_prop=myrng.rnorm(log_lambdak_run[k](m),sigmap[w]); // lambda double lambda_prop=exp(log_lambda_prop); double lambda_run=exp(log_lambdak_run[k](m)); // Ratio of probabilities double p_prop=log(dpois(Y_vect[w],lambda_prop))+ log(dnorm(log_lambda_prop,log_lambda_hat(0),sqrt(V_run))); double p_now=log(dpois(Y_vect[w],lambda_run))+ log(dnorm(log_lambdak_run[k](m),log_lambda_hat(0),sqrt(V_run))); double r=exp(p_prop-p_now); // ratio double z=myrng.runif(); // Actualization if (z < r) { log_lambdak_run[k](m)=log_lambda_prop; nA[w]++; } } } ////////////////////////////////// // vector beta: Gibbs algorithm // // invVi, sum_V and sum_v Matrix sum_V(NP,NP); Matrix sum_v(NP,1); for (int k=0; k big_V=invpd(invpd(Vbeta)+sum_V/V_run); // small_v Matrix small_v=invpd(Vbeta)*mubeta+sum_v/V_run; // Draw in the posterior distribution beta_run=myrng.rmvnorm(big_V*small_v,big_V); /////////////////////////////// // vector b: Gibbs algorithm // // Loop on group for (int k=0; k big_Vk=invpd(invpd(Vb_run)+crossprod(Wk_arr[k])/V_run); // small_vk Matrix small_vk=(t(Wk_arr[k])*(log_lambdak_run[k]-Xk_arr[k]*beta_run))/V_run; // Draw in the posterior distribution bk_run[k]=myrng.rmvnorm(big_Vk*small_vk,big_Vk); } //////////////////////////////////////////// // vector of variance Vb: Gibbs algorithm // Matrix SSBb(NQ,NQ,true,0.0); for(int k=0; k Vb_scale = invpd(SSBb+(*r)*R); Vb_run=invpd(myrng.rwish(Vb_dof,Vb_scale)); //////////////// // variance V // // e Matrix e(1,1,true,0.0); for (int k=0; kNBURN) && (((g+1)%(NTHIN))==0)){ int isamp=((g+1)-NBURN)/(NTHIN); for (int p=0; p log_lambda_hat=Xk_arr[k](m,_)*beta_run+Wk_arr[k](m,_)*bk_run[k]; lambda_pred[w]+=exp(log_lambda_hat(0)+0.5*V_run)/(NSAMP); // We compute the mean of NSAMP values } } } /////////////////////////////////////////////////////// // Adaptive sampling (on the burnin period) int DIV=0; if (NGIBBS >=1000) DIV=100; else DIV=NGIBBS/10; if((g+1)%DIV==0 && (g+1)<=NBURN){ for (int n=0; n=ropt) sigmap[n]=sigmap[n]*(2-(1-Ar[n])/(1-ropt)); else sigmap[n]=sigmap[n]/(2-Ar[n]/ropt); nA[n]=0.0; // We reinitialize the number of acceptance to zero } } if((g+1)%DIV==0 && (g+1)>NBURN){ for (int n=0; n // needed to use Rprintf() #include // needed to allow user interrupts using namespace scythe; using namespace std; /* Fonction logit */ double logit (double x) { double Result=log(x)-log(1-x); return Result; } /* Fonction invlogit */ double invlogit (double x) { double Result=1/(1+exp(-x)); return Result; } extern "C"{ /* Gibbs sampler function */ void MCMChlogit ( // Constants and data const int *ngibbs, const int *nthin, const int *nburn, // Number of iterations, burning and samples const int *nobs, const int *ngroup, // Constants const int *np, const int *nq, // Number of fixed and random covariates const int *IdentGroup, // Vector of group const double *Y_vect, // Observed response variable const double *X_vect, // Covariate for fixed effects const double *W_vect, // Covariate for random effects // Parameters to save double *beta_vect, // Fixed effects double *b_vect, // Random effects double *Vb_vect, // Variance of random effects double *V, // Variance of residuals // Defining priors const double *mubeta_vect, const double *Vbeta_vect, const double *r, const double *R_vect, const double *s1_V, const double *s2_V, // Diagnostic double *Deviance, double *theta_pred, // Annual mortality rate // Seeds const int *seed, // Verbose const int *verbose, // Overdispersion const int *FixOD ) { //////////////////////////////////////////////////////////////////////////////// //%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% // Defining and initializing objects /////////////////////////// // Redefining constants // const int NGIBBS=ngibbs[0]; const int NTHIN=nthin[0]; const int NBURN=nburn[0]; const int NSAMP=(NGIBBS-NBURN)/NTHIN; const int NOBS=nobs[0]; const int NGROUP=ngroup[0]; const int NP=np[0]; const int NQ=nq[0]; /////////////// // Constants // // Number of observations by group k int *nobsk = new int[NGROUP]; for (int k=0; k *Xk_arr = new Matrix[NGROUP]; Matrix *Wk_arr = new Matrix[NGROUP]; for(int k=0; k(nobsk[k],NP); Wk_arr[k] = Matrix(nobsk[k],NQ); for (int m=0; m *tXk_arr = new Matrix[NGROUP]; Matrix *tWk_arr = new Matrix[NGROUP]; Matrix *cpXk_arr = new Matrix[NGROUP]; Matrix *tXWk_arr = new Matrix[NGROUP]; Matrix *tWXk_arr = new Matrix[NGROUP]; for(int k=0; k mubeta(NP,1,mubeta_vect); Matrix Vbeta(NP,NP,Vbeta_vect); Matrix R(NQ,NQ,R_vect); ///////////////////////////////////// // Initializing running parameters // Matrix *bk_run = new Matrix[NGROUP]; // Random effects for (int k=0;k(NQ,1); for (int q=0; q beta_run(NP,1,false); // Unicolumn matrix of fixed effects for (int p=0; p Vb_run(NQ,NQ,true,0.0); for (int q=0; q *logit_thetak_run = new Matrix[NGROUP]; for (int k=0;k(nobsk[k],1); for (int m=0; m logit_theta_hat=Xk_arr[k](m,_)*beta_run+Wk_arr[k](m,_)*bk_run[k]; // Proposal double logit_theta_prop=myrng.rnorm(logit_thetak_run[k](m),sigmap[w]); // theta double theta_prop=invlogit(logit_theta_prop); double theta_run=invlogit(logit_thetak_run[k](m)); // Ratio of probabilities double p_prop=log(dbinom(Y_vect[w],1,theta_prop))+ log(dnorm(logit_theta_prop,logit_theta_hat(0),sqrt(V_run))); double p_now=log(dbinom(Y_vect[w],1,theta_run))+ log(dnorm(logit_thetak_run[k](m),logit_theta_hat(0),sqrt(V_run))); double r=exp(p_prop-p_now); // ratio double z=myrng.runif(); // Actualization if (z < r) { logit_thetak_run[k](m)=logit_theta_prop; nA[w]++; } } } ////////////////////////////////// // vector beta: Gibbs algorithm // // invVi, sum_V and sum_v Matrix sum_V(NP,NP); Matrix sum_v(NP,1); for (int k=0; k big_V=invpd(invpd(Vbeta)+sum_V/V_run); // small_v Matrix small_v=invpd(Vbeta)*mubeta+sum_v/V_run; // Draw in the posterior distribution beta_run=myrng.rmvnorm(big_V*small_v,big_V); /////////////////////////////// // vector b: Gibbs algorithm // // Loop on group for (int k=0; k big_Vk=invpd(invpd(Vb_run)+crossprod(Wk_arr[k])/V_run); // small_vk Matrix small_vk=(t(Wk_arr[k])*(logit_thetak_run[k]-Xk_arr[k]*beta_run))/V_run; // Draw in the posterior distribution bk_run[k]=myrng.rmvnorm(big_Vk*small_vk,big_Vk); } //////////////////////////////////////////// // vector of variance Vb: Gibbs algorithm // Matrix SSBb(NQ,NQ,true,0.0); for(int k=0; k Vb_scale = invpd(SSBb+(*r)*R); Vb_run=invpd(myrng.rwish(Vb_dof,Vb_scale)); //////////////// // variance V // // e Matrix e(1,1,true,0.0); for (int k=0; kNBURN) && (((g+1)%(NTHIN))==0)){ int isamp=((g+1)-NBURN)/(NTHIN); for (int p=0; p logit_theta_hat=Xk_arr[k](m,_)*beta_run+Wk_arr[k](m,_)*bk_run[k]; theta_pred[w]+=invlogit(logit_theta_hat(0)/sqrt(1+C*V_run))/(NSAMP); // We compute the mean of NSAMP values } } } /////////////////////////////////////////////////////// // Adaptive sampling (on the burnin period) int DIV=0; if (NGIBBS >=1000) DIV=100; else DIV=NGIBBS/10; if((g+1)%DIV==0 && (g+1)<=NBURN){ for (int n=0; n=ropt) sigmap[n]=sigmap[n]*(2-(1-Ar[n])/(1-ropt)); else sigmap[n]=sigmap[n]/(2-Ar[n]/ropt); nA[n]=0.0; // We reinitialize the number of acceptance to zero } } if((g+1)%DIV==0 && (g+1)>NBURN){ for (int n=0; n // needed to use Rprintf() #include // needed to allow user interrupts using namespace scythe; using namespace std; static double Lev1thetaPost(double theta[], const double& r0, const double& r1, const double& c0, const double& mu0, const double& mu1, const double& sigma0, const double& sigma1) { const double theta0 = theta[0]; const double theta1 = theta[1]; const double p0 = 1.0/(1.0 + exp(-1*theta0)); const double p1 = 1.0/(1.0 + exp(-1*theta1)); const double logprior = lndnorm(theta0, mu0, sqrt(sigma0)) + lndnorm(theta1, mu1, sqrt(sigma1)); const double loglike = lndnorm(c0, r0*p0 + r1*p1, sqrt(r0*p0*(1.0-p0) + r1*p1*(1.0-p1))); return(loglike + logprior); } // eventually all of the slice sampling functions should be made more // general and put in MCMCfcds.{h cc} // // Radford Neal's (2000) doubling procedure coded for a logdensity template static void doubling(double (*logfun)(double[], const double&, const double&, const double&, const double&, const double&, const double&, const double&), double theta[], const int& index, const double& z, const double& w, const int& p, const double& r0, const double& r1, const double& c0, const double& mu0, const double& mu1, const double& sigma0, const double& sigma1, rng& stream, double& L, double& R) { const double U = stream.runif(); const double x0 = theta[index]; double theta_L[2]; double theta_R[2]; theta_L[0] = theta_R[0] = theta[0]; theta_L[1] = theta_R[1] = theta[1]; L = x0 - w*U; theta_L[index] = L; R = L + w; theta_R[index] = R; int K = p; while (K > 0 && (z < logfun(theta_L, r0, r1, c0, mu0, mu1, sigma0, sigma1) || z < logfun(theta_R, r0, r1, c0, mu0, mu1, sigma0, sigma1))){ double V = stream.runif(); if (V < 0.5){ L = L - (R - L); theta_L[index] = L; } else { R = R + (R - L); theta_R[index] = R; } --K; } } // Radford Neal's (2000) Accept procedure coded for a logdensity static const bool Accept(double (*logfun)(double[], const double&, const double&, const double&, const double&, const double&, const double&, const double&), double theta[], const int& index, const double x0, const double& z, const double& w, const double& r0, const double& r1, const double& c0, const double& mu0, const double& mu1, const double& sigma0, const double& sigma1, const double& L, const double& R) { double Lhat = L; double Rhat = R; bool D = false; while ((Rhat - Lhat ) > 1.1 * w){ double M = (Lhat + Rhat) / 2.0; if ( (x0 < M && theta[index] >= M) || (x0 >= M && theta[index] < M)){ D = true; } if (theta[index] < M){ Rhat = M; } else { Lhat = M; } int ind0; if (index==0){ ind0 = 1; } else { ind0 = 0; } double theta_L[2]; double theta_R[2]; theta_L[ind0] = theta_R[ind0] = theta[ind0]; theta_L[index] = Lhat; theta_R[index] = Rhat; if (D && z >= logfun(theta_L, r0, r1, c0, mu0, mu1, sigma0, sigma1) && z >= logfun(theta_R, r0, r1, c0, mu0, mu1, sigma0, sigma1)){ return(false); } } return(true); } // Radford Neal's (2000) shrinkage procedure coded for a log density template static double shrinkage(double (*logfun)(double[], const double&, const double&, const double&, const double&, const double&, const double&, const double&), double theta[], const int& index, const double& z, const double& w, const double& r0, const double& r1, const double& c0, const double& mu0, const double& mu1, const double& sigma0, const double& sigma1, rng& stream, const double& L, const double& R) { double Lbar = L; double Rbar = R; // int ind0; //if (index==0){ // ind0 = 1; // } //else { // ind0 = 0; // } double theta_x1[2]; theta_x1[0] = theta[0]; theta_x1[1] = theta[1]; const double x0 = theta[index]; for (;;){ const double U = stream.runif(); const double x1 = Lbar + U*(Rbar - Lbar); theta_x1[index] = x1; if (z <= logfun(theta_x1, r0, r1, c0, mu0, mu1, sigma0, sigma1) && Accept(logfun, theta_x1, index, x0, z, w, r0, r1, c0, mu0, mu1, sigma0, sigma1, L, R)){ return(x1); } if (x1 < x0){ Lbar = x1; } else { Rbar = x1; } } // end infinite loop } template void MCMChierEI_impl(rng& stream, const Matrix<>& r0, const Matrix<>& r1, const Matrix<>& c0, const Matrix<>& c1, double mu0_prior_mean, double mu0_prior_var, double mu1_prior_mean, double mu1_prior_var, double nu0, double delta0, double nu1, double delta1, unsigned int ntables, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, Matrix& result) { // MCMC-related quantities unsigned int tot_iter = burnin + mcmc; // storage matrices Matrix<> p0mat(mcmc/thin, ntables, false); Matrix<> p1mat(mcmc/thin, ntables, false); Matrix<> mu0mat(mcmc/thin, 1, false); Matrix<> mu1mat(mcmc/thin, 1, false); Matrix<> sig0mat(mcmc/thin, 1, false); Matrix<> sig1mat(mcmc/thin, 1, false); unsigned int count = 0; // starting values Matrix<> p0 = stream.runif(ntables, 1)*0.5 + 0.25; Matrix<> p1 = stream.runif(ntables, 1)*0.5 + 0.25; Matrix<> theta0 = log(p0/(1.0 - p0)); Matrix<> theta1 = log(p1/(1.0 - p1)); double mu0 = 0.0; double mu1 = 0.0; double sigma0 = 1.0; double sigma1 = 1.0; double L = -2.0; double R = 2.0; // sampling constants const unsigned int warmup_iter = 4000; const unsigned int warmup_burnin = 2000; const double w_init = .000000001; const unsigned int p_init = 50; const Matrix<> widthmat(warmup_iter - warmup_burnin, 2); // warm up sampling to choose slice sampling parameters adaptively for (unsigned int iter = 0; iter < warmup_iter; ++iter) { // loop over tables for (unsigned int i = 0; i < ntables; ++i) { // sample theta0, theta1 using slice sampling for (int index = 0; index<2; ++index){ double theta_i[2]; theta_i[0] = theta0(i); theta_i[1] = theta1(i); double funval = Lev1thetaPost(theta_i, r0[i], r1[i], c0[i], mu0, mu1, sigma0, sigma1); double z = funval - stream.rexp(1.0); doubling(&Lev1thetaPost, theta_i, index, z, w_init, p_init, r0[i], r1[i], c0[i], mu0, mu1, sigma0, sigma1, stream, L, R); theta_i[index] = shrinkage(&Lev1thetaPost, theta_i, index, z, w_init, r0[i], r1[i], c0[i], mu0, mu1, sigma0, sigma1, stream, L, R); if (iter >= warmup_burnin) { widthmat(iter-warmup_burnin, index) = R - L; } theta0(i) = theta_i[0]; theta1(i) = theta_i[1]; } // end index loop } // end tables loop // sample mu0 and mu1 // mu0 double post_var = 1.0/(1.0/mu0_prior_var + ntables*(1.0/sigma0)); double post_mean = post_var*(sumc(theta0)[0]*(1.0/sigma0) + (1.0/mu0_prior_var)*mu0_prior_mean); mu0 = stream.rnorm(post_mean, sqrt(post_var)); // mu1 post_var = 1.0/(1.0/mu1_prior_var + ntables*(1.0/sigma1)); post_mean = post_var*(sumc(theta1)[0]*(1.0/sigma1) + (1.0/mu1_prior_var)*mu1_prior_mean); mu1 = stream.rnorm(post_mean, sqrt(post_var)); // sample sigma0 and sigma1 // sigma0 Matrix<> e = theta0 - mu0; Matrix<> SSE = crossprod(e); double nu2 = (nu0 + ntables)*0.5; double delta2 = (delta0 + SSE[0])*0.5; sigma0 = stream.rigamma(nu2, delta2); // sigma1 e = theta1 - mu1; SSE = crossprod(e); nu2 = (nu1 + ntables)*0.5; delta2 = (delta1 + SSE[0])*0.5; sigma1 = stream.rigamma(nu2, delta2); // allow user interrupts R_CheckUserInterrupt(); } // @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ // sampling constants double w = mean(widthmat); int p_temp = 2; while ((w * pow(2.0, p_temp) ) < max(widthmat)){ ++p_temp; } const int p = p_temp + 1; // @@@@@@@@@@@@@@ The real sampling @@@@@@@@@@@@@@@ for (unsigned int iter = 0; iter < tot_iter; ++iter) { // loop over tables for (unsigned int i = 0; i < ntables; ++i) { // sample theta0, theta1 using slice sampling for (unsigned int index = 0; index < 2; ++index) { double theta_i[2]; theta_i[0] = theta0(i); theta_i[1] = theta1(i); double funval = Lev1thetaPost(theta_i, r0[i], r1[i], c0[i], mu0, mu1, sigma0, sigma1); double z = funval - stream.rexp(1.0); doubling(&Lev1thetaPost, theta_i, index, z, w, p, r0[i], r1[i], c0[i], mu0, mu1, sigma0, sigma1, stream, L, R); //Rprintf("L = %10.5f R = %10.5f\n", L, R); theta_i[index] = shrinkage(&Lev1thetaPost, theta_i, index, z, w, r0[i], r1[i], c0[i], mu0, mu1, sigma0, sigma1, stream, L, R); theta0[i] = theta_i[0]; theta1[i] = theta_i[1]; } // end index loop // if after burnin store samples if ((iter >= burnin) && ((iter % thin)==0)){ p0mat(count,i) = 1.0/(1.0 + exp(-1*theta0[i])); p1mat(count,i) = 1.0/(1.0 + exp(-1*theta1[i])); } } // end tables loop // sample mu0 and mu1 // mu0 double post_var = 1.0/(1.0/mu0_prior_var + ntables*(1.0/sigma0)); double post_mean = post_var*(sumc(theta0)[0]*(1.0/sigma0) + (1.0/mu0_prior_var)*mu0_prior_mean); mu0 = stream.rnorm(post_mean, sqrt(post_var)); // mu1 post_var = 1.0/(1.0/mu1_prior_var + ntables*(1.0/sigma1)); post_mean = post_var*(sumc(theta1)[0]*(1.0/sigma1) + (1.0/mu1_prior_var)*mu1_prior_mean); mu1 = stream.rnorm(post_mean, sqrt(post_var)); // sample sigma0 and sigma1 // sigma0 Matrix e = theta0 - mu0; Matrix SSE = crossprod(e); double nu2 = (nu0 + ntables)*0.5; double delta2 = (delta0 + SSE[0])*0.5; sigma0 = stream.rigamma(nu2,delta2); // sigma1 e = theta1 - mu1; SSE = crossprod(e); nu2 = (nu1 + ntables)*0.5; delta2 = (delta1 + SSE[0])*0.5; sigma1 = stream.rigamma(nu2,delta2); // if after burnin store samples if ((iter >= burnin) && ((iter % thin)==0)){ mu0mat(count,0) = mu0; mu1mat(count,0) = mu1; sig0mat(count,0) = sigma0; sig1mat(count,0) = sigma1; ++ count; } // print output to screen if (verbose>0 && (iter%verbose)==0) Rprintf("\nMCMChierEI iteration %i of %i \n", (iter+1), tot_iter); // allow user interrupts R_CheckUserInterrupt(); } // return sample result = cbind(p0mat, p1mat); result = cbind(result, mu0mat); result = cbind(result, mu1mat); result = cbind(result, sig0mat); result = cbind(result, sig1mat); } extern "C"{ void hierEI(double* sample, const int* samrow, const int* samcol, const double* Rr0, const double* Rr1, const double* Rc0, const double* Rc1, const int* Rntables, const int* Rburnin, const int* Rmcmc, const int* Rthin, const double* Rmu0pm, const double* Rmu0pv, const double* Rmu1pm, const double* Rmu1pv, const double* Rnu0, const double* Rdelta0, const double* Rnu1, const double* Rdelta1, const int* Rverbose, const int *uselecuyer, const int *seedarray, const int *lecuyerstream) { // load data // table notation is: // -------------------- // Y0 | | r0 // -------------------- // Y1 | | r1 // -------------------- // c0 | c1 | N unsigned int ntables = *Rntables; Matrix<> r0(ntables, 1, Rr0); Matrix<> r1(ntables, 1, Rr1); Matrix<> c0(ntables, 1, Rc0); Matrix<> c1(ntables, 1, Rc1); Matrix result(*samrow, *samcol, false); MCMCPACK_PASSRNG2MODEL(MCMChierEI_impl, r0, r1, c0, c1, *Rmu0pm, *Rmu0pv, *Rmu1pm, *Rmu1pv, *Rnu0, *Rdelta0, *Rnu1, *Rdelta1, ntables, *Rburnin, *Rmcmc, *Rthin, *Rverbose, result); for (unsigned int i = 0; i < result.size(); ++i) sample[i] = result[i]; } } // extern "C" #endif MCMCpack/src/MCMChierBetaBinom.cc0000644000176000001440000002354112140061656016134 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMChierBetaBinom.cc is C++ code to fit a hierarchical beta binomial // model // // y_{ij} ~ Binomial(s_{ij}, theta_{ij}) // theta_{ij} ~ Beta(alpha_j, beta_j) // alpha_j ~ Pareto(1, a) // beta_j ~ Pareto(1, b) // // // uses adaptive Metropolis scheme similar to that of Haario et al. (2001) // as implemented by Roberts and Rosenthal (2006/2008) // "Examples of Adaptive MCMC" // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // 5/28/2011 KQ // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCHIERBETABINOM_CC #define MCMCHIERBETABINOM_CC #include #include "matrix.h" #include "algorithm.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "MCMCrng.h" #include "MCMCfcds.h" #include // needed to use Rprintf() #include // needed to allow user interrupts typedef Matrix rmview; using namespace std; using namespace scythe; // used to access Ydata like a 2d array #define M(ROW,COL,NROWS) (COL*NROWS+ROW) // log of the pareto density double logdpareto(const double& x, const double& xm, const double& a){ double logfunval; if (x > xm && a > 0){ logfunval = log(a) + a*log(xm) - (a+1)*log(x); } else{ logfunval = -numeric_limits::infinity(); } return logfunval; } // log of the full conditional density for alpha_j, beta_j double logABfcd(const double& alpha, const double& beta, const vector& theta, const double& a, const double& b){ double term1 = 0.0; double term2 = 0.0; const int len_theta = theta.size(); if (alpha > 1.0 && beta > 1.0){ for (int i=0; i::infinity(); } // a and/or b <= 0 is treated as improper uniform prior if (a > 0){ term2 += logdpareto(alpha, 1.0, a); } if (b > 0){ term2 += logdpareto(beta, 1.0, b); } double logfcd = term1 + term2; return logfcd; } // candidate generating function template Matrix mixcangen(const double& alpha, const double& beta, const double& mixfrac, const double& base_sigma, const double& alpha_var_n, const double& beta_var_n, const double& ab_cov_n, rng& stream){ Matrix ab_V(2, 2, false); ab_V = alpha_var_n, ab_cov_n, ab_cov_n, beta_var_n; ab_V = (5.6644 * ab_V) / 2.0; Matrix ab_mean(2, 1, false); ab_mean = alpha, beta; Matrix ab_can(2, 1, false); double u = stream.runif(); if (u < mixfrac){ double alpha_can = stream.rnorm(alpha, base_sigma); double beta_can = stream.rnorm(beta, base_sigma); ab_can = alpha_can, beta_can; } else{ ab_can = stream.rmvnorm(ab_mean, ab_V); } return ab_can; } template void hierBetaBinom_impl(rng& stream, double* sampledata, const int samplerow, const int samplecol, const int* y, const int* s, const double* theta_start, const double* alpha_start, const double* beta_start, const double a, const double b, const int* ilabels, const int* jlabels, const int* ilabelsunique, const int* jlabelsunique, const int n, const int ni, const int nj, const int burnin, const int mcmc, const int thin, const int verbose, int * accepts, const double* base_sigma){ const int tot_iter = burnin + mcmc; // JHP const int nstore = mcmc/thin; // these probably should not be hard coded const double mixfrac = 0.05; double* theta; theta = new double[n]; for (int i=0; i > js_thetas_ptr; js_thetas_ptr.reserve(nj); for (int j=0; j holder; holder.reserve(n); for (int i=0; i(y[i]) + alpha[cluster_id-1], static_cast(s[i] - y[i]) + beta[cluster_id - 1]); } // sample [alpha_j, beta_j | theta, y, s, a, b] for (int j=0; j(iter); double beta_mean_n = sums_beta_n[j] / static_cast(iter); double alpha_var_n = 0.0; double beta_var_n = 0.0; double ab_cov_n = 0.0; for (int i=0; i(iter); beta_var_n = beta_var_n / static_cast(iter); ab_cov_n = ab_cov_n / static_cast(iter); if (alpha_var_n <= 0.0){ alpha_var_n = std::pow(base_sigma[j], 2); ab_cov_n = 0.0; } if (beta_var_n <= 0.0){ beta_var_n = std::pow(base_sigma[j], 2); ab_cov_n = 0.0; } Matrix ab_can = mixcangen(alpha[j], beta[j], mixfrac, base_sigma[j], alpha_var_n, beta_var_n, ab_cov_n, stream); double alpha_can = ab_can(0); double beta_can = ab_can(1); double accept_numer; double accept_denom; accept_numer = logABfcd(alpha_can, beta_can, js_thetas_ptr[j], a, b); accept_denom = logABfcd(alpha[j], beta[j], js_thetas_ptr[j], a, b); const double ratio = exp(accept_numer - accept_denom); if (stream.runif() < ratio) { alpha[j] = alpha_can; beta[j] = beta_can; ++accepts[j]; } } // end else iter not less than 10 sums_alpha_n[j] += alpha[j]; sums_beta_n[j] += beta[j]; alpha_storage[M(iter, j, tot_iter)] = alpha[j]; beta_storage[M(iter, j, tot_iter)] = beta[j]; } // end j loop // store values if (iter >= burnin && (iter % thin == 0)) { for (int i=0; i 0 && iter % verbose == 0){ Rprintf("\n\niteration %i of %i \n", (iter+1), tot_iter); } // allow user interrupts R_CheckUserInterrupt(); } // end MCMC iterations // clear memory delete [] theta; delete [] alpha; delete [] beta; delete [] alpha_storage; delete [] beta_storage; delete [] sums_alpha_n; delete [] sums_beta_n; } // end hierBetaBinom_impl extern "C"{ // function called by R to fit model void hierBetaBinom(double* sampledata, const int* samplerow, const int* samplecol, const int* y, const int* s, const double* theta_start, const double* alpha_start, const double* beta_start, const double* a, const double* b, const int* ilabels, const int* jlabels, const int* ilabelsunique, const int* jlabelsunique, const int* n, const int* ni, const int* nj, const int* burnin, const int* mcmc, const int* thin, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int* verbose, int *accepts, const double* base_sigma){ MCMCPACK_PASSRNG2MODEL(hierBetaBinom_impl, sampledata, *samplerow, *samplecol, y, s, theta_start, alpha_start, beta_start, *a, *b, ilabels, jlabels, ilabelsunique, jlabelsunique, *n, *ni, *nj, *burnin, *mcmc, *thin, *verbose, accepts, base_sigma); } // end hierBetaBinom } // end extern "C" #endif MCMCpack/src/MCMCfcds.h0000644000176000001440000006246312140061656014213 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCfcds.h is the header file for MCMCfcds.cc. It contains declarations // for a number of functions that produce draws from full conditional // distributions. // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // KQ 6/10/2004 // DBP 7/01/2007 [ported to scythe 1.0.x (partial)] // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCFCDS_H #define MCMCFCDS_H #include "matrix.h" #include "rng.h" #include "stat.h" #include "smath.h" #include "ide.h" #include "la.h" #include "distributions.h" #include using namespace std; using namespace scythe; // linear regression with Gaussian errors beta draw // (multivariate Normal prior) // regression model is y = X * beta + epsilon, epsilon ~ N(0,sigma2) // XpX is X'X // XpY is X'y // b0 is the prior mean of beta // B0 is the prior precision (the inverse variance) of beta template Matrix NormNormregress_beta_draw (const Matrix<>& XpX, const Matrix<>& XpY, const Matrix<>& b0, const Matrix<>& B0, double sigma2, rng& stream) { // this function gets the cross-product matrix X'X and the matrix X'Y // to minimize the amount of computation within the function const unsigned int k = XpX.cols (); const double sig2_inv = 1.0 / sigma2; const Matrix<> sig_beta = invpd (B0 + XpX * sig2_inv); const Matrix<> C = cholesky (sig_beta); const Matrix<> betahat = sig_beta * gaxpy(B0, b0, XpY*sig2_inv); return( gaxpy(C, stream.rnorm(k,1, 0, 1), betahat) ); } // linear regression with Gaussian errors sigma2 draw // (inverse-Gamma prior) // regression model is y = X * beta + epsilon, epsilon ~ N(0,sigma2) // c0/2 is the prior shape parameter for sigma2 // d0/2 is the prior scale parameter for sigma2 template double NormIGregress_sigma2_draw (const Matrix <> &X, const Matrix <> &Y, const Matrix <> &beta, double c0, double d0, rng& stream) { const Matrix <> e = gaxpy(X, (-1*beta), Y); const Matrix <> SSE = crossprod (e); const double c_post = (c0 + X.rows ()) * 0.5; const double d_post = (d0 + SSE[0]) * 0.5; return stream.rigamma (c_post, d_post); } // Bayesian quantile (including median) regression beta draw // (multivariate Normal prior) // // b0 is the prior mean of beta(tau) // B0 is the prior precision (the inverse variance) of beta(tau) template Matrix ALaplaceNormregress_beta_draw (double tau, const Matrix<>& X, const Matrix<>& Y, const Matrix<>& weights, const Matrix<>& b0, const Matrix<>& B0, rng& stream) { const unsigned int k = X.cols(); const unsigned int n_obs = X.rows(); Matrix<> U(Y); if (tau!=0.5){ U -= (1.0-2.0*tau)*weights; } Matrix<> XtwX(k,k,false); Matrix<> XtwU(k,1,false); double temp_x = 0.0; double temp_u = 0.0; //Calculate XtwU where w denotes a diagonal matrix with the augmented data (weights) on the diagonal for (unsigned int i=0; i var_matrix_beta = invpd(B0+0.5*XtwX); const Matrix<> C = cholesky(var_matrix_beta); const Matrix<> betahat = var_matrix_beta*gaxpy(B0,b0,0.5*XtwU); return( gaxpy(C, stream.rnorm(k,1, 0, 1), betahat) ); } // This function draws from the full conditional distribution of the latent random variables (weights) under quantile regression (including median regression) and returns a column vector of those weights. template Matrix ALaplaceIGaussregress_weights_draw (const Matrix <> &abse, rng& stream) { const Matrix nu_params = pow(abse,-1.0); Matrix<> w(abse); const unsigned int n_obs = abse.rows(); // The inverse Gaussian distribution for (unsigned int i=0; i Cnew; bool newtrial; double logdetminhalf; }; // updating the indicator variable corresponding to whether a // covariate is included in the model, given that it was // previously absent template COV_TRIAL QR_SSVS_covariate_trials_draw_absent(const Matrix<>& C, const Matrix<>& X_gamma, const Matrix<>& U, const Matrix<>& newXcol, unsigned int row_index, const Matrix<>& weights, double pi0, double newlambda, double logolddetminhalf, rng& stream){ const unsigned int n_obs = U.rows(); const unsigned int k = C.rows(); //Calculate new row required to update the Cholesky decomposition Matrix<> XUXnewtXnew(k+1,1,false); double temp_xux1 = 0.0; double temp_xux2 = 0.0; double temp_xux3 = 0.0; //Calculate XUXnewtXnew for (unsigned int i=0; i z(k,1,false); for (unsigned int i = 0; i < k; ++i) { double sum = 0; for (unsigned int j = 0; j < i; ++j) { sum += C(i,j) * z(j); } z(i) = (XUXnewtXnew(i) - sum) / C(i, i); } double rho = std::sqrt(XUXnewtXnew(k)-crossprod(z)(0)); Matrix<> Cnew(k+1, k+1, true, 0.0); Cnew(0,0,k-1,k-1) = C; Cnew(k,0,k,k-1) = z; Cnew(k,k) = rho; // Permuting the Cholesky decomposition so that it corresponds to what would be obtained if the X matrix included the covariate Matrix<> temp(Cnew); if (row_index != 0){ temp(0,0,row_index-1,k) = Cnew(0,0,row_index-1,k); } temp(row_index,_) = Cnew(k,_); temp(row_index+1,0,k,k) = Cnew(row_index,0,k-1,k); // Givens rotations Matrix<> Q(2,2,false); for (unsigned int i=k; i>row_index; --i) { double two_norm = std::sqrt(temp(row_index,i)*temp(row_index,i) +temp(row_index,i-1)*temp(row_index,i-1)); Q(0,0) = temp(row_index,i-1)/two_norm; Q(1,0) = temp(row_index,i)/two_norm; Q(1,1) = Q(0,0); Q(0,1) = -1.0*Q(1,0); if (i!=k){ temp(i+1,i-1,k,i) = temp(i+1,i-1,k,i) * Q; } double temp2 = temp(i,i-1); temp(i,i-1) = Q(0,0)*temp2; temp(i,i) = Q(0,1)*temp2; if (temp(i,i) < 0){ temp(i,i,k,i) = -1.0*temp(i,i,k,i); } temp(row_index,i-1) = two_norm; temp(row_index,i) = 0.0; } Cnew=temp; //Work out -0.5*log(det(Cnew'Cnew)) double lognewdetminhalf = 0.0; for (unsigned int i=0; i COV_TRIAL QR_SSVS_covariate_trials_draw_present(const Matrix<>& C, unsigned int row_index, unsigned int n_obs, double pi0, double oldlambda, double logolddetminhalf, rng& stream){ unsigned int k = C.rows(); // Permuting the Cholesky decomposition so that it corresponds to what would be obtained if the X matrix had the covariate in the final column Matrix<> temp(C); if (row_index != 0){ temp(0,0,row_index-1,k-1) = C(0,0,row_index-1,k-1); } temp(k-1,_) = C(row_index,_); temp(row_index,0,k-2,k-1) = C(row_index+1,0,k-1,k-1); // Givens rotations Matrix<> Q(2,2,false); for (unsigned int i=row_index; i Cnew = temp(0,0,k-2,k-2); // Work out -1/2*log(det(Cnew'Cnew)) double lognewdetminhalf = 0.0; for (unsigned int i=0; i Matrix<> QR_SSVS_beta_draw(const Matrix<>& C, rng& stream){ unsigned int k = C.rows(); Matrix<> standnorm = stream.rnorm(k-1,1,0,1); Matrix<> z(k-1,1,false); z = t(C(k-1,0,k-1,k-2)); Matrix<> Q = z+standnorm*std::sqrt(2.0); Matrix<> result(k-1,1,false); for (int i = k-2; i >= 0; --i) { double sum = 0; for (unsigned int j = i+1; j < k-1; ++j) { sum += C(j,i) * result(j); } result(i) = (Q(i) - sum) / C(i, i); } return result; } //hyperparameter pi0 updating template double QR_SSVS_pi0_draw(unsigned int n_uncert_cov, unsigned int tot_n_uncert_cov, double pi0a0, double pi0b0, rng& stream){ double pi0a1 = pi0a0 + n_uncert_cov; double pi0b1 = pi0b0 + tot_n_uncert_cov - n_uncert_cov; return(stream.rbeta(pi0a1,pi0b1)); } //update latent lambdas template Matrix QR_SSVS_lambda_draw(const Matrix<>& beta_red, const Matrix<>& gamma, unsigned int tot_n_cov, unsigned int n_cert_cov, rng& stream) { unsigned int n_uncert_cov = tot_n_cov - n_cert_cov; Matrix<> newlambda(n_uncert_cov,1,false); for (unsigned int i=n_cert_cov; i void irt_Z_update1 (Matrix<>& Z, const Matrix& X, const Matrix<>& theta, const Matrix<>& eta, rng& stream) { // define constants const unsigned int J = theta.rows(); const unsigned int K = eta.rows(); // perform update from truncated Normal / standard Normals for (unsigned int i = 0; i < J; ++i) { for (unsigned int j = 0; j < K; ++j){ const double Z_mean = -eta(j,0) + theta(i) * eta(j,1); if (X(i,j) == 1) { Z(i,j) = stream.rtbnorm_combo(Z_mean, 1.0, 0); } else if (X(i,j) == 0) { Z(i,j) = stream.rtanorm_combo(Z_mean, 1.0, 0); } else { Z(i,j) = stream.rnorm(Z_mean, 1.0); } } } } // update item (case, roll call) parameters for item response model // note: works only for one-dimensional case template void irt_eta_update1 (Matrix<>& eta, const Matrix<>& Z, const Matrix<>& theta, const Matrix<>& AB0, const Matrix<>& AB0ab0, rng& stream) { // define constants const unsigned int J = theta.rows(); const unsigned int K = Z.cols(); // perform update //const Matrix Ttheta_star = t(cbind(-1.0*ones(J,1),theta)); // only needed for option 2 const Matrix<> tpt(2,2); for (unsigned int i = 0; i < J; ++i) { const double theta_i = theta(i); tpt(0,1) -= theta_i; tpt(1,1) += std::pow(theta_i, 2.0); } tpt(1,0) = tpt(0,1); tpt(0,0) = J; const Matrix<> eta_post_var = invpd(tpt + AB0); const Matrix<> eta_post_C = cholesky(eta_post_var); for (unsigned int k = 0; k < K; ++k) { const Matrix<> TZ(2, 1); for (unsigned int j = 0; j < J; ++j) { TZ[0] -= Z(j,k); TZ[1] += Z(j,k) * theta[j]; } const Matrix<> eta_post_mean = eta_post_var * (TZ + AB0ab0); const Matrix<> new_eta = gaxpy(eta_post_C, stream.rnorm(2, 1, 0, 1), eta_post_mean); eta(k,0) = new_eta(0); eta(k,1) = new_eta(1); } } // update ability parameters (ideal points) for one dimensional // item response model // note: works only for one-dimensional case template void irt_theta_update1 (Matrix<>& theta, const Matrix<>& Z, const Matrix<>& eta, double t0, double T0, const Matrix<>& theta_eq, const Matrix<>& theta_ineq, rng& stream) { const unsigned int J = Z.rows(); const unsigned int K = Z.cols(); // perform update from multivariate Normal const double T0t0 = T0*t0; const Matrix<> alpha = eta(_, 0); const Matrix<> beta = eta(_, 1); //const Matrix tbeta = t(beta); // only neede for option 2 //const Matrix talpha = t(alpha); // only needed for option 2 // calculate the posterior variance outside the justice specific loop double theta_post_var = T0; for (unsigned int i = 0; i < K; ++i) theta_post_var += std::pow(beta(i), 2.0); theta_post_var = 1.0 / theta_post_var; const double theta_post_sd = std::sqrt(theta_post_var); // sample for each justice for (unsigned int j = 0; j < J; ++j) { // no equality constraints if (theta_eq(j) == -999) { double betaTZjalpha = 0; for (unsigned int k = 0; k < K; ++k) betaTZjalpha += beta(k) * (Z(j,k) + alpha(k)); const double theta_post_mean = theta_post_var * (T0t0 + betaTZjalpha); if (theta_ineq(j) == 0) { // no inequality constraint theta(j) = theta_post_mean + stream.rnorm(0.0, theta_post_sd); } else if (theta_ineq(j) > 0) { // theta[j] > 0 theta(j) = stream.rtbnorm_combo(theta_post_mean, theta_post_var, 0); } else { // theta[j] < 0 theta(j) = stream.rtanorm_combo(theta_post_mean, theta_post_var, 0); } } else { // equality constraints theta(j) = theta_eq(j); } } } // factor analysis model with normal mean 0, precision F0 prior on // factor scores // X follows a multivariate normal distribution // Lambda is the matrix of factor loadings // Psi_inv is the inverse of the uniqueness matrix // N is number of observations // D is the number of factors // this function draws the factor scores // // IMPORTANT // ***********Psi_inv IS ASSUMED TO DIAGONAL *********** template void NormNormfactanal_phi_draw(Matrix<> &phi, const Matrix<> &F0, const Matrix<> &Lambda, const Matrix<> &Psi_inv, const Matrix<> &X, const int& N, const int& D, rng & stream){ // If Psi_inv is *not* diagonal then use: // Matrix phi_post_var = invpd(F0 + t(Lambda) * Psi_inv * // Lambda); //Instead of the following 2 lines: const Matrix<> AAA = scythe::sqrt(Psi_inv) * Lambda; const Matrix<> phi_post_var = invpd(F0 + crossprod(AAA)); const Matrix<> phi_post_C = cholesky(phi_post_var); for (int i=0; i phi_post_mean = phi_post_var * (t(Lambda) * Psi_inv * t(X(i,_))); const Matrix<> phi_samp = gaxpy(phi_post_C, stream.rnorm(D, 1, 0.0, 1.0), phi_post_mean); for (int j=0; j void NormIGfactanal_Psi_draw(Matrix<> &Psi, const Matrix<> &X, const Matrix<> &phi, const Matrix<> &Lambda, const Matrix<> &a0, const Matrix<> &b0, const int& K, const int& N, rng& stream){ for (int i=0; i epsilon = gaxpy(phi, -1*(t(Lambda(i,_))), X(_,i)); const Matrix SSE = crossprod(epsilon); const double a1 = (a0[i] + N)*0.5; const double b1 = (b0[i] + SSE[0])*0.5; Psi(i,i) = stream.rigamma(a1, b1); } } // Psi_inv assumed diagnonal // this function draws the factor loading matrix template void NormNormfactanal_Lambda_draw(Matrix<>& Lambda, const Matrix &Lambda_free_indic, const Matrix<> &Lambda_prior_mean, const Matrix<> &Lambda_prior_prec, const Matrix<> &phi, const Matrix<> &X, const Matrix<> &Psi_inv, const Matrix<> &Lambda_ineq, const unsigned int D, const unsigned int K, rng& stream) { for (unsigned int i = 0; i < K; ++i) { const Matrix free_indic = t(Lambda_free_indic(i,_)); // end replacement if (sumc(free_indic)(0) > 0 && sumc(! free_indic)(0) > 0) { // both constrnd & unconstrnd const Matrix<> phifree_i = t(selif(t(phi), free_indic)); const Matrix<> mulamfree_i = selif(t(Lambda_prior_mean(i,_)), free_indic); // prior mean const Matrix<> hold = selif(t(Lambda_prior_prec(i,_)), free_indic); Matrix<> sig2lamfree_inv_i = eye(hold.rows()); // prior prec for (unsigned int j = 0; j < (hold.rows()); ++j) sig2lamfree_inv_i(j,j) = hold[j]; const Matrix<> Lambdacon_i = selif(t(Lambda(i,_)), ! free_indic); const Matrix<> phicon_i = t(selif(t(phi), ! free_indic)); const Matrix<> newX_i = gaxpy((-1.0*phicon_i), Lambdacon_i, X(_,i)); const Matrix<> Lam_post_var = invpd(sig2lamfree_inv_i + Psi_inv(i,i) * crossprod(phifree_i)); const Matrix<> Lam_post_C = cholesky(Lam_post_var); const Matrix<> Lam_post_mean = Lam_post_var * (sig2lamfree_inv_i * mulamfree_i + Psi_inv(i,i) * t(phifree_i) * newX_i); Matrix<> Lambdafree_i = gaxpy(Lam_post_C, stream.rnorm(hold.rows(), 1, 0, 1),Lam_post_mean); // check to see if inequality constraints hold const Matrix<> Lambda_ineq_vec = Lambda_ineq(i,_); double ineq_holds = 0; int Lam_count = 0; for (unsigned int j = 0; j < D; ++j) { if (free_indic(j)){ ineq_holds = std::min(ineq_holds, Lambda_ineq_vec(j) * Lambdafree_i(Lam_count)); ++Lam_count; } } while (ineq_holds < 0) { Lambdafree_i = gaxpy(Lam_post_C, stream.rnorm(hold.rows(), 1, 0, 1), Lam_post_mean); Lam_count = 0; double test = 0; for (unsigned int j = 0; j < D; ++j) { if (free_indic(j) == 1) { Matrix<> prodcheck = Lambda_ineq_vec(j) * Lambdafree_i(Lam_count); test = std::min(test, prodcheck(0)); ++Lam_count; } } ineq_holds = test; } // put draw into Lambda Lam_count = 0; for (unsigned int j = 0; j < D; ++j) { if (free_indic(j) == 1) { Lambda(i,j) = Lambdafree_i(Lam_count); ++Lam_count; } } } else if (sumc(free_indic)(0) > 0) { // just unconstrained const Matrix<> phifree_i = t(selif(t(phi), free_indic)); const Matrix<> mulamfree_i = selif(t(Lambda_prior_mean(i,_)), free_indic); // prior mean const Matrix<> hold = selif(t(Lambda_prior_prec(i,_)), free_indic); Matrix<> sig2lamfree_inv_i = eye(hold.rows()); // prior prec for (unsigned int j = 0; j < hold.rows(); ++j) sig2lamfree_inv_i(j,j) = hold(j); const Matrix<> Lam_post_var = invpd(sig2lamfree_inv_i + Psi_inv(i,i) * crossprod(phifree_i)); const Matrix<> Lam_post_C = cholesky(Lam_post_var); const Matrix<> Lam_post_mean = Lam_post_var * (sig2lamfree_inv_i * mulamfree_i + Psi_inv(i,i) * t(phifree_i) * X(_,i)); Matrix<> Lambdafree_i = gaxpy(Lam_post_C, stream.rnorm(hold.rows(), 1, 0, 1), Lam_post_mean); // check to see if inequality constraints hold Matrix<> Lambda_ineq_vec = Lambda_ineq(i,_); double ineq_holds = 0; for (unsigned int j = 0; j < D; ++j) { ineq_holds = std::min(ineq_holds, Lambda_ineq_vec(j) * Lambdafree_i(j)); } while (ineq_holds < 0) { Lambdafree_i = gaxpy(Lam_post_C, stream.rnorm(hold.rows(), 1, 0, 1), Lam_post_mean); double test = 0; for (unsigned int j = 0; j < D; ++j) { //if (free_indic[j]==1) double prodcheck = Lambda_ineq_vec[j]*Lambdafree_i[j]; test = std::min(test, prodcheck); } ineq_holds = test; } // put draw into Lambda for (unsigned int j = 0; j < D; ++j) { Lambda(i,j) = Lambdafree_i(j); } } } // return(Lambda); } // update ability parameters (ideal points) for one dimensional // Hierarchical item response model. // 2008-11-18 now deals with PX alpha and holds on to thetahat template void hirt_theta_update1 (Matrix<>& theta, Matrix<>& thetahat, const Matrix<>& Z, const Matrix<>& eta, const Matrix<>& beta, const Matrix<>& Xj, const double& sigma2, const double& alpha, rng& stream) { const unsigned int J = Z.rows(); const unsigned int K = Z.cols(); // Get level1 prior mean const Matrix Xbeta = (Xj * beta); // a and b are backwards here, different parameterizations // common for edu people and us. const Matrix<> b = eta(_, 0); // location const Matrix<> a = eta(_, 1); // relevance // calculate the posterior variance outside the justice specific loop const double sig2_inv = 1.0 / sigma2; const Matrix apa = crossprod(a); const Matrix theta_post_var = scythe::invpd(apa + sig2_inv); const double theta_post_sd = std::sqrt(theta_post_var[0]); // sample for each justice for (unsigned int j = 0; j < J; ++j) { thetahat(j) = 0.0; for (unsigned int k = 0; k < K; ++k) { thetahat(j) += a[k] * ( Z(j,k) + b[k]); // bill contribution } thetahat(j) += (Xbeta[j] / sigma2); // j prior level1 contribution thetahat(j) *= theta_post_var[0]; const double t = thetahat(j) / alpha; theta(j) = stream.rnorm( t , theta_post_sd ); } } // update item (case, roll call) parameters for item response model // note: works only for one-dimensional case // updated for PX (alpha) and hold on to etahat 2008-11-18 template void hirt_eta_update1 (Matrix<>& eta, Matrix<>& etahat, const Matrix<>& Z, const Matrix<>& theta, const Matrix<>& AB0, const Matrix<>& AB0ab0, const double& alpha, rng& stream) { // define constants const unsigned int J = theta.rows(); const unsigned int K = Z.cols(); // perform update //const Matrix Ttheta_star = t(cbind(-1.0*ones(J,1),theta)); // only needed for option 2 const Matrix<> tpt(2,2); for (unsigned int i = 0; i < J; ++i) { const double theta_i = theta(i); tpt(0,1) -= theta_i; tpt(1,1) += std::pow(theta_i, 2.0); } tpt(1,0) = tpt(0,1); tpt(0,0) = J; const Matrix<> eta_post_var = invpd(tpt + AB0); const Matrix<> eta_post_C = cholesky(eta_post_var); for (unsigned int k = 0; k < K; ++k) { const Matrix<> TZ(2, 1); for (unsigned int j = 0; j < J; ++j) { TZ[0] -= Z(j,k); TZ[1] += Z(j,k) * theta[j]; } Matrix<> eta_post_mean = eta_post_var * (TZ + AB0ab0); etahat(k,0) = eta_post_mean(0); etahat(k,1) = eta_post_mean(1); eta_post_mean /= alpha; const Matrix<> new_eta = gaxpy(eta_post_C, stream.rnorm(2, 1, 0, 1), (eta_post_mean) ); eta(k,0) = new_eta(0); eta(k,1) = new_eta(1); //Rprintf("\n\a: %3.1f,b:%3.1f ",eta(k,0),eta(k,1)); } } #endif MCMCpack/src/MCMCfactanal.cc0000644000176000001440000001476512140061656015205 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCfactanal.cc is C++ code to estimate a factor analysis model // // Andrew D. Martin // Dept. of Political Science // Washington University in St. Louis // admartin@wustl.edu // // Kevin M. Quinn // Dept. of Government // Harvard University // kevin_quinn@harvard.edu // // This software is distributed under the terms of the GNU GENERAL // PUBLIC LICENSE Version 2, June 1991. See the package LICENSE // file for more information. // // revised version of older MCMCfactanal 5/11/2004 KQ // updated to new verion of scythe 7/25/2004 ADM // updated to Scythe 1.0.X 7/10/2007 ADM // finished update to Scythe 1.0.X 7/30/2007 KQ // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCFACTANAL_CC #define MCMCFACTANAL_CC #include "matrix.h" #include "algorithm.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "MCMCrng.h" #include "MCMCfcds.h" #include // needed to use Rprintf() #include // needed to allow user interrupts typedef Matrix rmview; using namespace std; using namespace scythe; template void MCMCfactanal_impl (rng& stream, const Matrix<>& X, Matrix<>& Lambda, Matrix<>& Psi, Matrix<>& Psi_inv, const Matrix<>& Lambda_eq, const Matrix<>& Lambda_ineq, const Matrix<>& Lambda_prior_mean, const Matrix<>& Lambda_prior_prec, const Matrix<>& a0, const Matrix<>& b0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, unsigned int storescores, Matrix<>& result) { // constants const unsigned int K = X.cols(); // number of manifest variables const unsigned int N = X.rows(); // number of observations const unsigned int D = Lambda.cols(); // number of factors const unsigned int tot_iter = burnin + mcmc; const unsigned int nsamp = mcmc / thin; const Matrix<> I = eye(D); const Matrix<> Lambda_free_indic = Matrix<>(K, D); for (unsigned int i=0; i<(K*D); ++i){ if (Lambda_eq[i] == -999) Lambda_free_indic[i] = 1.0; } // starting value for phi Matrix<> phi = Matrix<>(N,D); // storage matrices (row major order) Matrix<> Lambda_store = Matrix<>(nsamp, K*D); Matrix<> Psi_store = Matrix<>(nsamp, K); Matrix<> phi_store; if (storescores==1){ phi_store = Matrix(nsamp, N*D); } unsigned int count = 0; // sampling begins here for (unsigned int iter=0; iter < tot_iter; ++iter){ // sample phi NormNormfactanal_phi_draw(phi, I, Lambda, Psi_inv, X, N, D, stream); // sample Lambda NormNormfactanal_Lambda_draw(Lambda, Lambda_free_indic, Lambda_prior_mean, Lambda_prior_prec, phi, X, Psi_inv, Lambda_ineq, D, K, stream); // sample Psi NormIGfactanal_Psi_draw(Psi, X, phi, Lambda, a0, b0, K, N, stream); for (unsigned int i=0; i 0 && iter % verbose == 0){ Rprintf("\n\nMCMCfactanal iteration %i of %i \n", (iter+1), tot_iter); Rprintf("Lambda = \n"); for (unsigned int i=0; i= burnin ) { // store Lambda //Matrix<> Lambda_store_vec = Lambda.resize(1, K*D, true); //for (int l=0; l phi_store_vec = phi.resize(1, N*D, true); //for (int l=0; l result; result = cbind(Lambda_store, Psi_store); if(storescores == 1) { result = cbind(result, phi_store); } } extern "C" { void MCMCfactanal(double *sampledata, const int *samplerow, const int *samplecol, const double *Xdata, const int *Xrow, const int *Xcol, const int *burnin, const int *mcmc, const int *thin, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const int *verbose, const double *Lambdadata, const int *Lambdarow, const int *Lambdacol, const double *Psidata, const int *Psirow, const int *Psicol, const double *Lameqdata, const int *Lameqrow, const int *Lameqcol, const double *Lamineqdata, const int *Lamineqrow, const int *Lamineqcol, const double *Lampmeandata, const int *Lampmeanrow, const int *Lampmeancol, const double *Lampprecdata, const int *Lampprecrow, const int *Lamppreccol, const double *a0data, const int *a0row, const int *a0col, const double *b0data, const int *b0row, const int *b0col, const int *storescores) { // pull together Matrix objects const Matrix <> X(*Xrow, *Xcol, Xdata); Matrix <> Lambda(*Lambdarow, *Lambdacol, Lambdadata); Matrix <> Psi(*Psirow, *Psicol, Psidata); Matrix <> Psi_inv = invpd(Psi); const Matrix <> Lambda_eq(*Lameqrow, *Lameqcol, Lameqdata); const Matrix <> Lambda_ineq(*Lamineqrow, *Lamineqcol, Lamineqdata); const Matrix <> Lambda_prior_mean(*Lampmeanrow, *Lampmeancol, Lampmeandata); const Matrix <> Lambda_prior_prec(*Lampprecrow, *Lamppreccol, Lampprecdata); const Matrix <> a0(*a0row, *a0col, a0data); const Matrix <> b0(*b0row, *b0col, b0data); Matrix<> storagematrix; MCMCPACK_PASSRNG2MODEL(MCMCfactanal_impl, X, Lambda, Psi, Psi_inv, Lambda_eq, Lambda_ineq, Lambda_prior_mean, Lambda_prior_prec, a0, b0, *burnin, *mcmc, *thin, *verbose, *storescores, storagematrix); const unsigned int size = *samplerow * *samplecol; for (unsigned int i = 0; i < size; ++i) sampledata[i] = storagematrix(i); } } #endif MCMCpack/src/MCMCdynamicIRT1d.cc0000644000176000001440000004552012140061656015655 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // MCMCdynamicIRT1d.cc is C++ code to estimate a dynamic 1d IRT model // // Kevin Quinn // 1/29/2008 // // Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn // Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, // and Jong Hee Park ////////////////////////////////////////////////////////////////////////// #ifndef MCMCDYNAMICIRT1D_CC #define MCMCDYNAMICIRT1D_CC #include #include #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; // used to access 1d arrays holding R matrices like a 2d array #define M(ROW,COL,NROWS) (COL*NROWS+ROW) // MCMCdynamicIRT1d implementation template void MCMCdynamicIRT1d_impl(rng& stream, double* thetadraws, const int* nrowthetadraws, const int* ncolthetadraws, double* alphadraws, const int* nrowalphadraws, const int* ncolalphadraws, double* betadraws, const int* nrowbetadraws, const int* ncolbetadraws, double* tau2draws, const int* nrowtau2draws, const int* ncoltau2draws, const int* nsubj, const int* nitems, const int* ntime, const int* Ydata, const int* nrowYdata, const int* ncolYdata, const int* ITdata, const int* lengthITdata, const int* burnin, const int* mcmc, const int* thin, const int* verbose, const double* thetadata, const int* lengththeta, const int* thetainfodata, const int* nrowthetainfo, const int* ncolthetainfo, double* alphadata, const int* lengthalpha, double* betadata, const int* lengthbeta, double* tau2data, const int* lengthtau2, const double* c0, const int* lengthc0, const double* d0, const int* lengthd0, const double* a0, const double* A0, const double* b0, const double* B0, const double* e0, const double* E0inv, const double* thetaeqdata, const int* nrowthetaeq, const int* ncolthetaeq, const double* thetaineqdata, const int* nrowthetaineq, const int* ncolthetaineq, const int* storeitem, const int* storeability){ const int tot_iter = *burnin + *mcmc; // JHP const int nsamp = *mcmc / *thin; //sparse matrix of latent outcome variables double** Z; Z = new double*[*nsubj]; for (int s=0; s<*nsubj; ++s){ Z[s] = new double[*nitems]; for (int i=0; i<*nitems; ++i){ Z[s][i] = -999; } } for (int j=0; j<*nrowYdata; ++j){ const int s = Ydata[M(j, 0, *nrowYdata)]; // subject id const int i = Ydata[M(j, 1, *nrowYdata)]; // item id // JHP const int y = Ydata[M(j, 2, *nrowYdata)]; // y value Z[s][i] = 0.0; } // stuff to make working with theta easy // theta[s][t] gives the tth theta for subject s // the actual time period corresponding to theta[s][t] is theta_offset[s]+t vector< vector > theta; vector theta_offset; int count = 0; for (int s=0; s<*nsubj; ++s){ vector holder; int ntime_s = thetainfodata[M(s, 1, *nrowthetainfo)]; theta_offset.push_back(thetainfodata[M(s, 2, *nrowthetainfo)]); for (int t=0; t > IS; for (int i=0; i<*nitems; ++i){ vector subjholder; for (int j=0; j<*nrowYdata; ++j){ if (Ydata[M(j,1,*nrowYdata)] == i){ subjholder.push_back(Ydata[M(j,0,*nrowYdata)]); } } sort(subjholder.begin(), subjholder.end()); IS.push_back(subjholder); } // SI gives the mapping from subjects to items // SI[s] provides a vector of integers corresponding to the items // voted on by subject s. // SI[s][i] gets the item index of the ith item voted on by subject s vector< vector > SI; for (int i=0; i<*nsubj; ++i){ vector itemholder; for (int j=0; j<*nrowYdata; ++j){ if (Ydata[M(j,0,*nrowYdata)] == i){ itemholder.push_back(Ydata[M(j,1,*nrowYdata)]); } } sort(itemholder.begin(), itemholder.end()); SI.push_back(itemholder); } // TI gives the mapping from times to items // TI[t] provides a vector of integers corresponding to the items // voted on in time period t. // TI[t][i] gets the item index of the ith item voted on in time period t vector< vector > TI; for (int t=0; t<*ntime; ++t){ vector itemholder; for (int i=0; i<*lengthITdata; ++i){ if (ITdata[i] == t){ itemholder.push_back(i); } } sort(itemholder.begin(), itemholder.end()); TI.push_back(itemholder); } // IT gives the mapping from items to times and is just fancy // way of holding the stuff in *ITdata vector IT; for (int i=0; i<*nitems; ++i){ IT.push_back(ITdata[i]); } // ST gives the mapping from subjects to times // ST[s] provides a vector of integers corresponding to the times // in which items were voted on by subject s. // ST[s][t] gets the time index of the tth time period served by subject s /* vector > ST; for (int s=0; s<*nsubj; ++s){ vector timeholder; for (int iind=0; iind::iterator myiter; myiter = find(timeholder.begin(), timeholder.end(), t); if (myiter == timeholder.end()){ // t not currently in timeholder timeholder.push_back(t); } } sort(timeholder.begin(), timeholder.end()); ST.push_back(timeholder); } */ // STI gives the list of item indices of items voted on by by subject s // in the tth time period served by s vector< vector < vector > > STI; for (int s=0; s<*nsubj; ++s){ vector < vector > timeitemholder; for (int tt=0; tt itemholder; for (int ii=0; ii beta_s0; beta_s0.reserve(STI[s][0].size()); vector zalpha_s0; zalpha_s0.reserve(STI[s][0].size()); for (int ii=0; ii a; a.reserve(ntime_s); a.push_back(e0[s]); vector R; R.reserve(ntime_s); R.push_back(E0inv[s] + tau2data[s]); vector f_0 = beta_s0; for (int i=0; i Q_0(beta_s0.size(), beta_s0.size()); for (int i=0; i e_0 = zalpha_s0; for (int i=0; i Q_0_inv = invpd(Q_0); vector A_0; A_0.reserve(beta_s0.size()); for (int i=0; i m; m.reserve(ntime_s); double mhold = a[0]; for (int i=0; i C; C.reserve(ntime_s); double Chold = 0.0; for (int i=0; i #include #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; // used to access 1d arrays holding R matrices like a 2d array #define M(ROW,COL,NROWS) (COL*NROWS+ROW) // MCMCdynamicIRT1d implementation template void MCMCdynamicIRT1d_b_impl(rng& stream, double* thetadraws, const int* nrowthetadraws, const int* ncolthetadraws, double* alphadraws, const int* nrowalphadraws, const int* ncolalphadraws, double* betadraws, const int* nrowbetadraws, const int* ncolbetadraws, double* tau2draws, const int* nrowtau2draws, const int* ncoltau2draws, const int* nsubj, const int* nitems, const int* ntime, const int* Ydata, const int* nrowYdata, const int* ncolYdata, const int* ITdata, const int* lengthITdata, const int* burnin, const int* mcmc, const int* thin, const int* verbose, const double* thetadata, const int* lengththeta, const int* thetainfodata, const int* nrowthetainfo, const int* ncolthetainfo, double* alphadata, const int* lengthalpha, double* betadata, const int* lengthbeta, double* tau2data, const int* lengthtau2, const double* c0, const int* lengthc0, const double* d0, const int* lengthd0, const double* a0, const double* A0, const double* b0, const double* B0, const double* e0, const double* E0inv, const double* thetaeqdata, const int* nrowthetaeq, const int* ncolthetaeq, const double* thetaineqdata, const int* nrowthetaineq, const int* ncolthetaineq, const int* storeitem, const int* storeability){ const int tot_iter = *burnin + *mcmc; // JHP const int nsamp = *mcmc / *thin; //sparse matrix of latent outcome variables double** Z; Z = new double*[*nsubj]; for (int s=0; s<*nsubj; ++s){ Z[s] = new double[*nitems]; for (int i=0; i<*nitems; ++i){ Z[s][i] = -999; } } for (int j=0; j<*nrowYdata; ++j){ const int s = Ydata[M(j, 0, *nrowYdata)]; // subject id const int i = Ydata[M(j, 1, *nrowYdata)]; // item id // JHP const int y = Ydata[M(j, 2, *nrowYdata)]; // y value Z[s][i] = 0.0; } // stuff to make working with theta easy // theta[s][t] gives the tth theta for subject s // the actual time period corresponding to theta[s][t] is theta_offset[s]+t vector< vector > theta; vector theta_offset; int count = 0; for (int s=0; s<*nsubj; ++s){ vector holder; int ntime_s = thetainfodata[M(s, 1, *nrowthetainfo)]; theta_offset.push_back(thetainfodata[M(s, 2, *nrowthetainfo)]); for (int t=0; t > IS; for (int i=0; i<*nitems; ++i){ vector subjholder; for (int j=0; j<*nrowYdata; ++j){ if (Ydata[M(j,1,*nrowYdata)] == i){ subjholder.push_back(Ydata[M(j,0,*nrowYdata)]); } } sort(subjholder.begin(), subjholder.end()); IS.push_back(subjholder); } // SI gives the mapping from subjects to items // SI[s] provides a vector of integers corresponding to the items // voted on by subject s. // SI[s][i] gets the item index of the ith item voted on by subject s vector< vector > SI; for (int i=0; i<*nsubj; ++i){ vector itemholder; for (int j=0; j<*nrowYdata; ++j){ if (Ydata[M(j,0,*nrowYdata)] == i){ itemholder.push_back(Ydata[M(j,1,*nrowYdata)]); } } sort(itemholder.begin(), itemholder.end()); SI.push_back(itemholder); } // TI gives the mapping from times to items // TI[t] provides a vector of integers corresponding to the items // voted on in time period t. // TI[t][i] gets the item index of the ith item voted on in time period t vector< vector > TI; for (int t=0; t<*ntime; ++t){ vector itemholder; for (int i=0; i<*lengthITdata; ++i){ if (ITdata[i] == t){ itemholder.push_back(i); } } sort(itemholder.begin(), itemholder.end()); TI.push_back(itemholder); } // IT gives the mapping from items to times and is just fancy // way of holding the stuff in *ITdata vector IT; for (int i=0; i<*nitems; ++i){ IT.push_back(ITdata[i]); } // ST gives the mapping from subjects to times // ST[s] provides a vector of integers corresponding to the times // in which items were voted on by subject s. // ST[s][t] gets the time index of the tth time period served by subject s /* vector > ST; for (int s=0; s<*nsubj; ++s){ vector timeholder; for (int iind=0; iind::iterator myiter; myiter = find(timeholder.begin(), timeholder.end(), t); if (myiter == timeholder.end()){ // t not currently in timeholder timeholder.push_back(t); } } sort(timeholder.begin(), timeholder.end()); ST.push_back(timeholder); } */ // STI gives the list of item indices of items voted on by by subject s // in the tth time period served by s vector< vector < vector > > STI; for (int s=0; s<*nsubj; ++s){ vector < vector > timeitemholder; for (int tt=0; tt itemholder; for (int ii=0; ii 4 || std::fabs(betadata[i]) > 2 || std::fabs(alphadata[i] / betadata[i]) > 2){ alphadata[i] = stream.rnorm(m1star, s1star); if (iter < 20 ){ alphadata[i] = 0.2*((m1star > 0) - (m1star < 0)); } const double cond_mean = m2star - m1star * (v12star / v11star) + alphadata[i] * (v12star / v11star); const double cond_sd = std::sqrt(v22star * ( 1 - std::pow(rho, 2.0))); betadata[i] = stream.rnorm(cond_mean, cond_sd); if (iter < 20 ){ betadata[i] = 1.0*((cond_mean > 0) - (cond_mean < 0)); } } } // end i loop // sample subject parameters (theta, tau2) for (int s=0; s<*nsubj; ++s){ const int ntime_s = theta[s].size(); if (thetaeqdata[s] != -999){ for (int t=0; t beta_s0; beta_s0.reserve(STI[s][0].size()); vector zalpha_s0; zalpha_s0.reserve(STI[s][0].size()); for (int ii=0; ii a; a.reserve(ntime_s); a.push_back(e0[s]); vector R; R.reserve(ntime_s); R.push_back(E0inv[s] + tau2data[s]); vector f_0 = beta_s0; for (int i=0; i Q_0(beta_s0.size(), beta_s0.size()); for (int i=0; i e_0 = zalpha_s0; for (int i=0; i Q_0_inv = invpd(Q_0); vector A_0; A_0.reserve(beta_s0.size()); for (int i=0; i m; m.reserve(ntime_s); double mhold = a[0]; for (int i=0; i C; C.reserve(ntime_s); double Chold = 0.0; for (int i=0; i // needed to use Rprintf() #include // needed to allow user interrupts using namespace scythe; using namespace std; static double Lev1thetaPost(double theta[], const double& r0, const double& r1, const double& c0, const double& mu0, const double& mu1, const double& sigma0, const double& sigma1){ const double theta0 = theta[0]; const double theta1 = theta[1]; const double p0 = 1.0/(1.0 + exp(-1*theta0)); const double p1 = 1.0/(1.0 + exp(-1*theta1)); const double logprior = lndnorm(theta0, mu0, sqrt(sigma0)) + lndnorm(theta1, mu1, sqrt(sigma1)); const double loglike = lndnorm(c0, r0*p0 + r1*p1, sqrt(r0*p0*(1.0-p0) + r1*p1*(1.0-p1))); return(loglike + logprior); } // eventually all of the slice sampling functions should be made more // general and put in MCMCfcds.{h cc} // // Radford Neal's (2000) doubling procedure coded for a logdensity template static void doubling(double (*logfun)(double[], const double&, const double&, const double&, const double&, const double&, const double&, const double&), double theta[], const int& index, const double& z, const double& w, const int& p, const double& r0, const double& r1, const double& c0, const double& mu0, const double& mu1, const double& sigma0, const double& sigma1, rng& stream, double& L, double& R){ const double U = stream.runif(); const double x0 = theta[index]; double theta_L[2]; double theta_R[2]; theta_L[0] = theta_R[0] = theta[0]; theta_L[1] = theta_R[1] = theta[1]; L = x0 - w*U; theta_L[index] = L; R = L + w; theta_R[index] = R; int K = p; while (K > 0 && (z < logfun(theta_L, r0, r1, c0, mu0, mu1, sigma0, sigma1) || z < logfun(theta_R, r0, r1, c0, mu0, mu1, sigma0, sigma1))){ double V = stream.runif(); if (V < 0.5){ L = L - (R - L); theta_L[index] = L; } else { R = R + (R - L); theta_R[index] = R; } --K; } } // Radford Neal's (2000) Accept procedure coded for a logdensity static const bool Accept(double (*logfun)(double[], const double&, const double&, const double&, const double&, const double&, const double&, const double&), double theta[], const int& index, const double x0, const double& z, const double& w, const double& r0, const double& r1, const double& c0, const double& mu0, const double& mu1, const double& sigma0, const double& sigma1, const double& L, const double& R){ double Lhat = L; double Rhat = R; bool D = false; while ((Rhat - Lhat ) > 1.1 * w){ double M = (Lhat + Rhat) / 2.0; if ( (x0 < M && theta[index] >= M) || (x0 >= M && theta[index] < M)){ D = true; } if (theta[index] < M){ Rhat = M; } else { Lhat = M; } int ind0; if (index==0){ ind0 = 1; } else { ind0 = 0; } double theta_L[2]; double theta_R[2]; theta_L[ind0] = theta_R[ind0] = theta[ind0]; theta_L[index] = Lhat; theta_R[index] = Rhat; if (D && z >= logfun(theta_L, r0, r1, c0, mu0, mu1, sigma0, sigma1) && z >= logfun(theta_R, r0, r1, c0, mu0, mu1, sigma0, sigma1)){ return(false); } } return(true); } // Radford Neal's (2000) shrinkage procedure coded for a log density template static double shrinkage(double (*logfun)(double[], const double&, const double&, const double&, const double&, const double&, const double&, const double&), double theta[], const int& index, const double& z, const double& w, const double& r0, const double& r1, const double& c0, const double& mu0, const double& mu1, const double& sigma0, const double& sigma1, rng& stream, const double& L, const double& R){ double Lbar = L; double Rbar = R; int ind0; if (index==0){ ind0 = 1; } else { ind0 = 0; } double theta_x1[2]; theta_x1[0] = theta[0]; theta_x1[1] = theta[1]; const double x0 = theta[index]; for (;;){ const double U = stream.runif(); const double x1 = Lbar + U*(Rbar - Lbar); theta_x1[index] = x1; if (z <= logfun(theta_x1, r0, r1, c0, mu0, mu1, sigma0, sigma1) && Accept(logfun, theta_x1, index, x0, z, w, r0, r1, c0, mu0, mu1, sigma0, sigma1, L, R)){ return(x1); } if (x1 < x0){ Lbar = x1; } else { Rbar = x1; } } // end infinite loop } template void MCMCdynamicEI_impl(rng& stream, const Matrix<>& r0, const Matrix<>& r1, const Matrix<>& c0, const Matrix<>& c1, const Matrix<>& W, double nu0, double delta0, double nu1, double delta1, unsigned int ntables, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, Matrix& result ){ unsigned int tot_iter = burnin + mcmc; Matrix<> N = c0 + c1; // sum of weights across each row Matrix<> W_sum = sumc(t(W)); // precision matrix (not the weight matrix) for theta0 and theta1 Matrix<> P = -1*W; for (unsigned int i=0; i p0mat(mcmc/thin, ntables); Matrix p1mat(mcmc/thin, ntables); Matrix sig0mat(mcmc/thin, 1); Matrix sig1mat(mcmc/thin, 1); int count = 0; // starting values Matrix p0 = stream.runif(ntables,1)*0.5 + 0.25; Matrix p1 = stream.runif(ntables,1)*0.5 + 0.25; Matrix theta0 = log(p0/(1.0 - p0)); Matrix theta1 = log(p1/(1.0 - p1)); // evolution variance for theta0 double sigma_theta0 = ::pow(0.25, 2); // evolution variance for theta1 double sigma_theta1 = ::pow(0.25, 2); double L = -2.0; double R = 2.0; // sampling constants const unsigned int warmup_iter = 4000; const unsigned int warmup_burnin = 2000; const double w_init = .000000001; const int p_init = 50; const Matrix widthmat(warmup_iter - warmup_burnin, 2); // warm up sampling to chose slice sampling parameters adaptively for (unsigned int iter=0; iter= warmup_burnin){ widthmat(iter- warmup_burnin, index) = R - L; } theta0[i] = theta_i[0]; theta1[i] = theta_i[1]; } // end index loop // if after burnin store samples if ((iter >= burnin) && ((iter%thin)==0)){ p0mat(count,i) = 1.0/(1.0 + exp(-1*theta0[i]));; p1mat(count,i) = 1.0/(1.0 + exp(-1*theta1[i]));; } } // end tables loop // sample sigma_theta0 and sigma_theta1 Matrix SSE = t(theta0-meanc(theta0)) * P * (theta0 - meanc(theta0)); double nu2 = (nu0 + ntables)*0.5; double delta2 = (delta0 + SSE[0])*0.5; sigma_theta0 = stream.rigamma(nu2, delta2); SSE = t(theta1-meanc(theta1)) * P * (theta1 - meanc(theta1)); nu2 = (nu1 + ntables)*0.5; delta2 = (delta1 + SSE[0])*0.5; sigma_theta1 = stream.rigamma(nu2, delta2); } // allow user interrupts R_CheckUserInterrupt(); // @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ // sampling constants const double w = mean(widthmat); int p_temp = 2; while ((w * pow(2.0, p_temp) ) < max(widthmat)){ ++p_temp; } const int p = p_temp + 1; // @@@@@@@@@@ the real sampling takes place here @@@@@@@@@@@@@@ for (unsigned int iter=0; iter= burnin) && ((iter%thin)==0)){ p0mat(count,i) = 1.0/(1.0 + exp(-1*theta0[i])); p1mat(count,i) = 1.0/(1.0 + exp(-1*theta1[i])); } } // end tables loop // sample sigma_theta0 and sigma_theta1 Matrix SSE = t(theta0-meanc(theta0)) * P * (theta0 - meanc(theta0)); double nu2 = (nu0 + ntables)*0.5; double delta2 = (delta0 + SSE[0])*0.5; sigma_theta0 = stream.rigamma(nu2, delta2); SSE = t(theta1-meanc(theta1)) * P * (theta1 - meanc(theta1)); nu2 = (nu1 + ntables)*0.5; delta2 = (delta1 + SSE[0])*0.5; sigma_theta1 = stream.rigamma(nu2, delta2); if ((iter >= burnin) && ((iter%thin)==0)){ sig0mat(count,0) = sigma_theta0; sig1mat(count,0) = sigma_theta1; ++count; } // print output to screen if (verbose>0 && (iter%verbose)==0){ Rprintf("\nMCMCdynamicEI iteration %i of %i \n", (iter+1), tot_iter); } // allow user interrupts R_CheckUserInterrupt(); } // return sample result = cbind(p0mat, p1mat); result = cbind(result, sig0mat); result = cbind(result, sig1mat); } extern "C"{ void dynamicEI(double* sample, const int* samrow, const int* samcol, const double* Rr0, const double* Rr1, const double* Rc0, const double* Rc1, const int* Rntables, const int* Rburnin, const int* Rmcmc, const int* Rthin, const double* RW, const double* Rnu0, const double* Rdelta0, const double* Rnu1, const double* Rdelta1, const int* Rverbose, const int *uselecuyer, const int *seedarray, const int *lecuyerstream){ // load data // table notation is: // -------------------- // Y0 | | r0 // -------------------- // Y1 | | r1 // -------------------- // c0 | c1 | N const int ntables = *Rntables; const Matrix<> r0(ntables, 1, Rr0); const Matrix<> r1(ntables, 1, Rr1); const Matrix<> c0(ntables, 1, Rc0); const Matrix<> c1(ntables, 1, Rc1); const Matrix W(ntables, ntables, RW); Matrix result(*samrow, *samcol, false); MCMCPACK_PASSRNG2MODEL(MCMCdynamicEI_impl, r0, r1, c0, c1, W, *Rnu0, *Rdelta0, *Rnu1, *Rdelta1, ntables, *Rburnin, *Rmcmc, *Rthin, *Rverbose, result); for (unsigned int i = 0; i < result.size(); ++i) sample[i] = result[i]; } } // extern "C" #endif MCMCpack/src/MCMCbinaryChange.cc0000644000176000001440000002341112140061656016012 0ustar ripleyusers//////////////////////////////////////////////////////////////////// // MCMCbinaryChange.cc is C++ code to estimate a binary changepoint model // with a beta prior // // Jong Hee Park // Department of Political Science and International Relations // Seoul National University // jongheepark@snu.ac.kr // // 03/03/2009 Written //////////////////////////////////////////////////////////////////// #ifndef MCMCBINARYCHANGE_CC #define MCMCBINARYCHANGE_CC #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include #include template // bianry state sampler Matrix<> binary_state_sampler(rng& stream, const int m, const Matrix<>& Y, const Matrix<>& theta, const Matrix<>& P){ const int ns = m + 1; const int n = Y.rows(); Matrix<> F = Matrix<>(n, ns); Matrix<> pr1 = Matrix<>(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (int t=0; t unnorm_pstyt = pstyt1%py; const Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j s(n, 1); Matrix<> ps = Matrix<>(n, ns); ps(n-1,_) = F(n-1,_); s(n-1) = ns; Matrix<> pstyn = Matrix<>(ns, 1); double pone = 0.0; int t = n-2; while (t >= 0){ int st = s(t+1); Matrix<> Pst_1 = ::t(P(_,st-1)); Matrix<> unnorm_pstyn = F(t,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1) s(t) = 1; else{ pone = pstyn(st-2); if(stream.runif() < pone) s(t) = st-1; else s(t) = st; } ps(t,_) = pstyn; --t; } Matrix<> Sout(n, ns+1); Sout(_, 0) = s(_,0); for (int j = 0; j void MCMCbinaryChange_impl(rng& stream, const Matrix<>& Y, Matrix<>& phi, Matrix<>& P, const Matrix<>& A0, const double m, const double c0, const double d0, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, bool chib, Matrix<>& phi_store, Matrix<>& P_store, Matrix<>& ps_store, Matrix<>& s_store, double& logmarglike) { const unsigned int tot_iter = burnin + mcmc; const unsigned int nstore = mcmc / thin; const int n = Y.rows(); const int ns = m + 1; //MCMC loop unsigned int count = 0; for (int iter = 0; iter < tot_iter; ++iter){ ////////////////////// // 1. Sample s ////////////////////// Matrix<> Sout = binary_state_sampler(stream, m, Y, phi, P); Matrix<> s = Sout(_, 0); Matrix<> ps(n, ns); for (int j = 0; j addY(ns, 1); Matrix<> addN(ns, 1); for (int j = 0; j= burnin && ((iter % thin)==0)){ for (int i=0; i 0 && iter % verbose == 0){ Rprintf("\n\n MCMCbinaryChange iteration %i of %i", (iter+1), tot_iter); for (int j = 0;j phi_st = meanc(phi_store); Matrix<> P_vec_st = meanc(P_store); const Matrix<> P_st(ns, ns); for (int j = 0; j< ns*ns; ++j){ P_st[j] = P_vec_st[j]; } ////////////////////// // phi ////////////////////// Matrix<> density_phi(nstore, ns); // Matrix<> density_phi_j(ns, 1); for (int iter = 0; iter addY(ns, 1); Matrix<> addN(ns, 1); for (int j = 0; j density_P(nstore, ns); for (int iter = 0; iter < nstore; ++iter){ Matrix<> Sout = binary_state_sampler(stream, m, Y, phi_st, P); Matrix <> s = Sout(_, 0); Matrix <> ps(n, ns); for (int j = 0; j P_addN(ns, 1); for (int j = 0; j F(n, ns); Matrix<> like(n, 1); Matrix<> pr1(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (int t=0; t unnorm_pstyt = pstyt1%py; Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j density_phi_prior(ns, 1); Matrix<> density_P_prior(ns, 1); for (int j=0; j 0){ Rprintf("\nlogmarglike = %10.5f\n", logmarglike); Rprintf("loglike = %10.5f\n", loglike); Rprintf("log_prior = %10.5f\n", logprior); Rprintf("log_phi = %10.5f\n", pdf_phi); Rprintf("log_P = %10.5f\n", pdf_P); } } } //////////////////////////////////////////// // Start MCMCbinaryChangepoint function /////////////////////////////////////////// extern "C"{ void MCMCbinaryChange(double *phiout, double *Pout, double *psout, double *sout, const double *Ydata, const int *Yrow, const int *Ycol, const int *m, const int *burnin, const int *mcmc, const int *thin, const int *verbose, const int *uselecuyer, const int *seedarray, const int *lecuyerstream, const double *phistart, const double *Pstart, const double *a, const double *b, const double *c0, const double *d0, const double *A0data, double *logmarglikeholder, const int *chib){ // pull together Matrix objects const Matrix <> Y(*Yrow, *Ycol, Ydata); const unsigned int nstore = *mcmc / *thin; const int n = Y.rows(); const int ns = *m + 1; // generate starting values Matrix <> phi(ns, 1, phistart); const Matrix <> A0(ns, ns, A0data); Matrix <> P(ns, ns, Pstart); double logmarglike; // storage matrices Matrix<> phi_store(nstore, ns); Matrix<> P_store(nstore, ns*ns); Matrix<> ps_store(n, ns); Matrix<> s_store(nstore, n); MCMCPACK_PASSRNG2MODEL(MCMCbinaryChange_impl, Y, phi, P, A0, *m, *c0, *d0, *burnin, *mcmc, *thin, *verbose, *chib, phi_store, P_store, ps_store, s_store, logmarglike) logmarglikeholder[0] = logmarglike; // return output for (int i = 0; i<(nstore*ns); ++i){ phiout[i] = phi_store[i]; } for (int i = 0; i<(nstore*ns*ns); ++i){ Pout[i] = P_store[i]; } for (int i = 0; i<(n*ns); ++i){ psout[i] = ps_store[i]; } for (int i = 0; i<(nstore*n); ++i){ sout[i] = s_store[i]; } } } #endif MCMCpack/src/matrix_random_access_iterator.h0000644000176000001440000004362312140061657020730 0ustar ripleyusers/* * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/matrix_random_access_iterator.h * * Random access iterators for the matrix class. * */ /*! \file matrix_random_access_iterator.h * \brief Definitions of STL-compliant random access iterators for * the Matrix class. * * Contains definitions of const_matrix_random_access_iterator, * matrix_random_access_iterator, and related operators. See a * Standard Template Library reference, such as Josuttis (1999), for a * full description of the capabilities of random access iterators. * * These iterators are templated on the type, order and style of the * Matrix they iterate over and their own order, which need not match * the iterated-over matrix. Same-order iteration over concrete * matrices is extremely fast. Cross-grain concrete and/or view * iteration is slower. */ #ifndef SCYTHE_MATRIX_RANDOM_ACCESS_ITERATOR_H #define SCYTHE_MATRIX_RANDOM_ACCESS_ITERATOR_H #include #ifdef SCYTHE_COMPILE_DIRECT #include "defs.h" #include "error.h" #include "matrix.h" #else #include "scythestat/defs.h" #include "scythestat/error.h" #include "scythestat/matrix.h" #endif /* The const_matrix_iterator and matrix_iterator classes are * essentially identical, except for the return types of the *, ->, * and [] operators. matrix_iterator extends const_matrix_iterator, * overriding most of its members. */ /* TODO Current setup uses template argument based branches to * handle views and cross-grained orderings differently than simple * in-order concrete matrices. The work for this gets done at * compile time, but we end with a few unused instance variables in * the concrete case. It might be better to specialize the entire * class, although this will lead to a lot of code duplication. We * should bench the difference down the road and see if it is worth * the maintenance hassle. * * At the moment this is looking like it won't be worth it. * Iterator-based operations on concretes provide comparable * performance to element-access based routines in previous versions * of the library, indicating little performance penalty. */ namespace scythe { /* convenience typedefs */ namespace { // local to this file typedef unsigned int uint; } /* forward declaration of the matrix class */ template class Matrix; /*! \brief An STL-compliant const random access iterator for Matrix. * * Provides random access iteration over const Matrix objects. See * Josuttis (1999), or some other STL reference, for a full * description of the random access iterator interface. * * \see Matrix * \see matrix_random_access_iterator * \see const_matrix_forward_iterator * \see matrix_forward_iterator * \see const_matrix_bidirectional_iterator * \see matrix_bidirectional_iterator */ template class const_matrix_random_access_iterator : public std::iterator { public: /**** TYPEDEFS ***/ typedef const_matrix_random_access_iterator self; /* These are a little formal, but useful */ typedef typename std::iterator_traits::value_type value_type; typedef typename std::iterator_traits::iterator_category iterator_category; typedef typename std::iterator_traits::difference_type difference_type; typedef typename std::iterator_traits::pointer pointer; typedef typename std::iterator_traits::reference reference; /**** CONSTRUCTORS ****/ /* Default constructor */ const_matrix_random_access_iterator () {} /* Standard constructor */ const_matrix_random_access_iterator ( const Matrix &M) : start_ (M.getArray()) { SCYTHE_CHECK_30 (start_ == 0, scythe_null_error, "Requesting iterator to NULL matrix"); pos_ = start_; /* The basic story is: when M_STYLE == Concrete and ORDER == * M_ORDER, we only need pos_ and start_ and iteration will be * as fast as possible. All other types of iteration need * more variables to keep track of things and are slower. */ if (M_STYLE != Concrete || M_ORDER != ORDER) { offset_ = 0; if (ORDER == Col) { lead_length_ = M.rows(); lead_inc_ = M.rowstride(); trail_inc_ = M.colstride(); } else { lead_length_ = M.cols(); lead_inc_ = M.colstride(); trail_inc_ = M.rowstride(); } jump_ = trail_inc_ + (1 - lead_length_) * lead_inc_; } #if SCYTHE_DEBUG > 2 size_ = M.size(); #endif } /* Copy constructor */ const_matrix_random_access_iterator (const self &mi) : start_ (mi.start_), pos_ (mi.pos_) { if (M_STYLE != Concrete || M_ORDER != ORDER) { offset_ = mi.offset_; lead_length_ = mi.lead_length_; lead_inc_ = mi.lead_inc_; trail_inc_ = mi.trail_inc_; jump_ = mi.jump_; } #if SCYTHE_DEBUG > 2 size_ = mi.size_; #endif } /**** FORWARD ITERATOR FACILITIES ****/ inline self& operator= (const self& mi) { start_ = mi.start_; pos_ = mi.pos_; if (M_STYLE != Concrete || M_ORDER != ORDER) { offset_ = mi.offset_; lead_length_ = mi.lead_length_; lead_inc_ = mi.lead_inc_; trail_inc_ = mi.trail_inc_; jump_ = mi.jump_; } #if SCYTHE_DEBUG > 2 size_ = mi.size_; #endif return *this; } inline const reference operator* () const { SCYTHE_ITER_CHECK_BOUNDS(); return *pos_; } inline const pointer operator-> () const { SCYTHE_ITER_CHECK_BOUNDS(); return pos_; } inline self& operator++ () { if (M_STYLE == Concrete && ORDER == M_ORDER) ++pos_; else if (++offset_ % lead_length_ == 0) pos_ += jump_; else pos_ += lead_inc_; return *this; } inline self operator++ (int) { self tmp = *this; ++(*this); return tmp; } /* == is only defined for iterators of the same template type * that point to the same matrix. Behavior for any other * comparison is undefined. * * Note that we have to be careful about iterator comparisons * when working with views and cross-grain iterators. * Specifically, we always have to rely on the offset value. * Obviously, with <> checks pos_ can jump all over the place in * cross-grain iterators, but also end iterators point to the * value after the last in the matrix. In some cases, the * equation in += and -= will actually put pos_ inside the * matrix (often in an early position) in this case. */ inline bool operator== (const self& x) const { if (M_STYLE == Concrete && ORDER == M_ORDER) { return pos_ == x.pos_; } else { return offset_ == x.offset_; } } /* Again, != is only officially defined for iterators over the * same matrix although the test will be trivially true for * matrices that don't view the same data, by implementation. */ inline bool operator!= (const self &x) const { return !(*this == x); } /**** BIDIRECTIONAL ITERATOR FACILITIES ****/ inline self& operator-- () { if (M_STYLE == Concrete && ORDER == M_ORDER) --pos_; else if (offset_-- % lead_length_ == 0) pos_ -= jump_; else pos_ -= lead_inc_; return *this; } inline self operator-- (int) { self tmp = *this; --(*this); return tmp; } /**** RANDOM ACCESS ITERATOR FACILITIES ****/ inline const reference operator[] (difference_type n) const { if (M_STYLE == Concrete && ORDER == M_ORDER) { SCYTHE_ITER_CHECK_OFFSET_BOUNDS(start_ + n); return *(start_ + n); } else { uint trailing = n / lead_length_; uint leading = n % lead_length_; T_type* place = start_ + leading * lead_inc_ + trailing * trail_inc_; SCYTHE_ITER_CHECK_POINTER_BOUNDS(place); return *place; } } inline self& operator+= (difference_type n) { if (M_STYLE == Concrete && ORDER == M_ORDER) { pos_ += n; } else { offset_ += n; uint trailing = offset_ / lead_length_; uint leading = offset_ % lead_length_; pos_ = start_ + leading * lead_inc_ + trailing * trail_inc_; } return *this; } inline self& operator-= (difference_type n) { if (M_STYLE == Concrete && ORDER == M_ORDER) { pos_ -= n; } else { offset_ -= n; uint trailing = offset_ / lead_length_; uint leading = offset_ % lead_length_; pos_ = start_ + leading * lead_inc_ + trailing * trail_inc_; } return *this; } /* + and - difference operators are outside the class */ inline difference_type operator- (const self& x) const { if (M_STYLE == Concrete && ORDER == M_ORDER) { return pos_ - x.pos_; } else { return offset_ - x.offset_; } } inline difference_type operator< (const self& x) const { if (M_STYLE == Concrete && ORDER == M_ORDER) { return pos_ < x.pos_; } else { return offset_ < x.offset_; } } inline difference_type operator> (const self& x) const { if (M_STYLE == Concrete && ORDER == M_ORDER) { return pos_ > x.pos_; } else { return offset_ > x.offset_; } } inline difference_type operator<= (const self& x) const { if (M_STYLE == Concrete && ORDER == M_ORDER) { return pos_ <= x.pos_; } else { return offset_ <= x.offset_; } } inline difference_type operator>= (const self& x) const { if (M_STYLE == Concrete && ORDER == M_ORDER) { return pos_ >= x.pos_; } else { return offset_ >= x.offset_; } } protected: /**** INSTANCE VARIABLES ****/ T_type* start_; // pointer to beginning of data array T_type* pos_; // pointer to current position in array uint offset_; // Logical offset into matrix // TODO Some of these can probably be uints int lead_length_; // Logical length of leading dimension int lead_inc_; // Memory distance between vectors in ldim int trail_inc_; // Memory distance between vectors in tdim int jump_; // Memory distance between end of one ldim vector and // begin of next // Size variable for range checking #if SCYTHE_DEBUG > 2 uint size_; // Logical matrix size #endif }; /*! \brief An STL-compliant random access iterator for Matrix. * * Provides random access iteration over Matrix objects. See * Josuttis (1999), or some other STL reference, for a full * description of the random access iterator interface. * * \see Matrix * \see const_matrix_random_access_iterator * \see const_matrix_forward_iterator * \see matrix_forward_iterator * \see const_matrix_bidirectional_iterator * \see matrix_bidirectional_iterator */ template class matrix_random_access_iterator : public const_matrix_random_access_iterator { /**** TYPEDEFS ***/ typedef matrix_random_access_iterator self; typedef const_matrix_random_access_iterator Base; public: /* These are a little formal, but useful */ typedef typename std::iterator_traits::value_type value_type; typedef typename std::iterator_traits::iterator_category iterator_category; typedef typename std::iterator_traits::difference_type difference_type; typedef typename std::iterator_traits::pointer pointer; typedef typename std::iterator_traits::reference reference; /**** CONSTRUCTORS ****/ /* Default constructor */ matrix_random_access_iterator () : Base () {} /* Standard constructor */ matrix_random_access_iterator (const Matrix &M) : Base(M) {} /* Copy constructor */ matrix_random_access_iterator (const self &mi) : Base (mi) {} /**** FORWARD ITERATOR FACILITIES ****/ /* We have to override a lot of these to get return values * right.*/ inline self& operator= (const self& mi) { start_ = mi.start_; pos_ = mi.pos_; if (M_STYLE != Concrete || M_ORDER != ORDER) { offset_ = mi.offset_; lead_length_ = mi.lead_length_; lead_inc_ = mi.lead_inc_; trail_inc_ = mi.trail_inc_; jump_ = mi.jump_; } #if SCYTHE_DEBUG > 2 size_ = mi.size_; #endif return *this; } inline reference operator* () const { SCYTHE_ITER_CHECK_BOUNDS(); return *pos_; } inline pointer operator-> () const { SCYTHE_ITER_CHECK_BOUNDS(); return pos_; } inline self& operator++ () { Base::operator++(); return *this; } inline self operator++ (int) { self tmp = *this; ++(*this); return tmp; } /**** BIDIRECTIONAL ITERATOR FACILITIES ****/ inline self& operator-- () { Base::operator--(); return *this; } inline self operator-- (int) { self tmp = *this; --(*this); return tmp; } /**** RANDOM ACCESS ITERATOR FACILITIES ****/ inline reference operator[] (difference_type n) const { if (M_STYLE == Concrete && ORDER == M_ORDER) { SCYTHE_ITER_CHECK_POINTER_BOUNDS(start_ + n); return *(start_ + n); } else { uint trailing = n / lead_length_; uint leading = n % lead_length_; T_type* place = start_ + leading * lead_inc_ + trailing * trail_inc_; SCYTHE_ITER_CHECK_POINTER_BOUNDS(place); return *place; } } inline self& operator+= (difference_type n) { Base::operator+=(n); return *this; } inline self& operator-= (difference_type n) { Base::operator-= (n); return *this; } /* + and - difference_type operators are outside the class */ private: /* Get handles to base members. It boggles the mind */ using Base::start_; using Base::pos_; using Base::offset_; using Base::lead_length_; using Base::lead_inc_; using Base::trail_inc_; using Base::jump_; #if SCYTHE_DEBUG > 2 using Base::size_; #endif }; template inline const_matrix_random_access_iterator operator+ (const_matrix_random_access_iterator x, int n) { x += n; return x; } template inline const_matrix_random_access_iterator operator+ (int n, const_matrix_random_access_iterator x) { x += n; return x; } template inline const_matrix_random_access_iterator operator- (const_matrix_random_access_iterator x, int n) { x -= n; return x; } template inline matrix_random_access_iterator operator+ (matrix_random_access_iterator x, int n) { x += n; return x; } template inline matrix_random_access_iterator operator+ (int n, matrix_random_access_iterator x) { x += n; return x; } template inline matrix_random_access_iterator operator- (matrix_random_access_iterator x, int n) { x -= n; return x; } } // namespace scythe #endif /* SCYTHE_MATRIX_ITERATOR_H */ MCMCpack/src/matrix_forward_iterator.h0000644000176000001440000003020312140061657017561 0ustar ripleyusers/* * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/matrix_forward_iterator.h * * Forward iterators for the matrix class. * */ /*! \file matrix_forward_iterator.h * \brief Definitions of STL-compliant forward iterators for the * Matrix class. * * Contains definitions of const_matrix_forward_iterator, * matrix_forward_iterator, and related operators. See a Standard * Template Library reference, such as Josuttis (1999), for a full * description of the capabilities of forward iterators. * * These iterators are templated on the type, order and style of the * Matrix they iterate over and their own order, which need not match * the iterated-over matrix. Same-order iteration over concrete * matrices is extremely fast. Cross-grain concrete and/or view * iteration is slower. */ #ifndef SCYTHE_MATRIX_FORWARD_ITERATOR_H #define SCYTHE_MATRIX_FORWARD_ITERATOR_H #include #ifdef SCYTHE_COMPILE_DIRECT #include "defs.h" #include "error.h" #include "matrix.h" #else #include "scythestat/defs.h" #include "scythestat/error.h" #include "scythestat/matrix.h" #endif namespace scythe { /* convenience typedefs */ namespace { // local to this file typedef unsigned int uint; } /* forward declaration of the matrix class */ template class Matrix; /*! \brief An STL-compliant const forward iterator for Matrix. * * Provides forward iteration over const Matrix objects. See * Josuttis (1999), or some other STL reference, for a full * description of the forward iterator interface. * * \see Matrix * \see matrix_forward_iterator * \see const_matrix_random_access_iterator * \see matrix_random_access_iterator * \see const_matrix_bidirectional_iterator * \see matrix_bidirectional_iterator */ template class const_matrix_forward_iterator : public std::iterator { public: /**** TYPEDEFS ***/ typedef const_matrix_forward_iterator self; /* These are a little formal, but useful */ typedef typename std::iterator_traits::value_type value_type; typedef typename std::iterator_traits::iterator_category iterator_category; typedef typename std::iterator_traits::difference_type difference_type; typedef typename std::iterator_traits::pointer pointer; typedef typename std::iterator_traits::reference reference; /**** CONSTRUCTORS ****/ /* Default constructor */ const_matrix_forward_iterator () {} /* Standard constructor */ const_matrix_forward_iterator (const Matrix &M) : pos_ (M.getArray()), matrix_ (&M) { SCYTHE_CHECK_30 (pos_ == 0, scythe_null_error, "Requesting iterator to NULL matrix"); /* The basic story is: when M_STYLE == Concrete and ORDER == * M_ORDER, we only need pos_ and iteration will be as fast as * possible. All other types of iteration need more variables * to keep track of things and are slower. */ if (M_STYLE != Concrete || M_ORDER != ORDER) { offset_ = 0; if (ORDER == Col) { lead_length_ = M.rows(); lead_inc_ = M.rowstride(); trail_inc_ = M.colstride(); } else { lead_length_ = M.cols(); lead_inc_ = M.colstride(); trail_inc_ = M.rowstride(); } jump_ = trail_inc_ + (1 - lead_length_) * lead_inc_; vend_ = pos_ + (lead_length_ - 1) * lead_inc_; } #if SCYTHE_DEBUG > 2 size_ = M.size(); start_ = pos_; #endif } /* Copy constructor */ const_matrix_forward_iterator (const self &mi) : pos_ (mi.pos_), matrix_ (mi.matrix_) { if (M_STYLE != Concrete || M_ORDER != ORDER) { offset_ = mi.offset_; lead_length_ = mi.lead_length_; lead_inc_ = mi.lead_inc_; trail_inc_ = mi.trail_inc_; vend_ = mi.vend_; jump_ = mi.jump_; } #if SCYTHE_DEBUG > 2 size_ = mi.size_; start_ = mi.start_; #endif } /**** EXTRA MODIFIER ****/ /* This function lets us grab an end iterator quickly, for both * concrete and view matrices. The view code is a bit of a * kludge, but it works. */ inline self& set_end () { if (M_STYLE == Concrete && ORDER == M_ORDER) { pos_ = matrix_->getArray() + matrix_->size(); } else { offset_ = matrix_->size(); } return *this; } /* Returns the current index (in logical matrix terms) of the * iterator. */ unsigned int get_index () const { return offset_; } /**** FORWARD ITERATOR FACILITIES ****/ inline self& operator= (const self& mi) { pos_ = mi.pos_; matrix_ = mi.matrix_; if (M_STYLE != Concrete || M_ORDER != ORDER) { offset_ = mi.offset_; lead_length_ = mi.lead_length_; lead_inc_ = mi.lead_inc_; trail_inc_ = mi.trail_inc_; vend_ = mi.vend_; jump_ = mi.jump_; } #if SCYTHE_DEBUG > 2 size_ = mi.size_; start_ = mi.start_; #endif return *this; } inline const reference operator* () const { SCYTHE_ITER_CHECK_BOUNDS(); return *pos_; } inline const pointer operator-> () const { SCYTHE_ITER_CHECK_BOUNDS(); return pos_; } inline self& operator++ () { if (M_STYLE == Concrete && ORDER == M_ORDER) ++pos_; else { if (pos_ == vend_) { vend_ += trail_inc_; pos_ += jump_; } else { pos_ += lead_inc_; } ++offset_; } return *this; } inline self operator++ (int) { self tmp = *this; ++(*this); return tmp; } /* == is only defined for iterators of the same template type * that point to the same matrix. Behavior for any other * comparison is undefined. * * Note that we have to be careful about iterator comparisons * when working with views and cross-grain iterators. * Specifically, we always have to rely on the offset value. * Obviously, with <> checks pos_ can jump all over the place in * cross-grain iterators, but also end iterators point to the * value after the last in the matrix. In some cases, the * equation in += and -= will actually put pos_ inside the * matrix (often in an early position) in this case. */ inline bool operator== (const self& x) const { if (M_STYLE == Concrete && ORDER == M_ORDER) { return pos_ == x.pos_; } else { return offset_ == x.offset_; } } /* Again, != is only officially defined for iterators over the * same matrix although the test will be trivially true for * matrices that don't view the same data, by implementation. */ inline bool operator!= (const self &x) const { return !(*this == x); } protected: /**** INSTANCE VARIABLES ****/ T_type* pos_; // pointer to current position in array T_type *vend_; // pointer to end of current vector uint offset_; // logical offset into matrix // TODO Some of these can probably be uints int lead_length_; // Logical length of leading dimension int lead_inc_; // Memory distance between vectors in ldim int trail_inc_; // Memory distance between vectors in tdim int jump_; // Memory distance between end of one ldim vector and // begin of next // Pointer to the matrix we're iterating over. This is really // only needed to get variables necessary to set the end. // TODO Handle this more cleanly. const Matrix* matrix_; // Size variable for range checking #if SCYTHE_DEBUG > 2 uint size_; // Logical matrix size T_type* start_; // Not normally needed, but used for bound check #endif }; /*! \brief An STL-compliant forward iterator for Matrix. * * Provides forward iteration over Matrix objects. See * Josuttis (1999), or some other STL reference, for a full * description of the forward iterator interface. * * \see Matrix * \see const_matrix_forward_iterator * \see const_matrix_random_access_iterator * \see matrix_random_access_iterator * \see const_matrix_bidirectional_iterator * \see matrix_bidirectional_iterator */ template class matrix_forward_iterator : public const_matrix_forward_iterator { /**** TYPEDEFS ***/ typedef matrix_forward_iterator self; typedef const_matrix_forward_iterator Base; public: /* These are a little formal, but useful */ typedef typename std::iterator_traits::value_type value_type; typedef typename std::iterator_traits::iterator_category iterator_category; typedef typename std::iterator_traits::difference_type difference_type; typedef typename std::iterator_traits::pointer pointer; typedef typename std::iterator_traits::reference reference; /**** CONSTRUCTORS ****/ /* Default constructor */ matrix_forward_iterator () : Base () {} /* Standard constructor */ matrix_forward_iterator (const Matrix &M) : Base(M) {} /* Copy constructor */ matrix_forward_iterator (const self &mi) : Base (mi) {} /**** EXTRA MODIFIER ****/ inline self& set_end () { Base::set_end(); return *this; } /**** FORWARD ITERATOR FACILITIES ****/ /* We have to override a lot of these to get return values * right.*/ inline self& operator= (const self& mi) { pos_ = mi.pos_; matrix_ = mi.matrix_; if (M_STYLE != Concrete || M_ORDER != ORDER) { offset_ = mi.offset_; lead_length_ = mi.lead_length_; lead_inc_ = mi.lead_inc_; trail_inc_ = mi.trail_inc_; vend_ = mi.vend_; jump_ = mi.jump_; } #if SCYTHE_DEBUG > 2 size_ = mi.size_; start_ = mi.start_; #endif return *this; } inline reference operator* () const { SCYTHE_ITER_CHECK_BOUNDS(); return *pos_; } inline pointer operator-> () const { SCYTHE_ITER_CHECK_BOUNDS(); return pos_; } inline self& operator++ () { Base::operator++(); return *this; } inline self operator++ (int) { self tmp = *this; ++(*this); return tmp; } private: /* Get handles to base members. It boggles the mind */ using Base::pos_; using Base::vend_; using Base::offset_; using Base::lead_length_; using Base::lead_inc_; using Base::trail_inc_; using Base::jump_; using Base::matrix_; #if SCYTHE_DEBUG > 2 using Base::size_; using Base::start_; #endif }; } // namespace scythe #endif /* SCYTHE_MATRIX_ITERATOR_H */ MCMCpack/src/matrix_bidirectional_iterator.h0000644000176000001440000003235412140061657020736 0ustar ripleyusers/* * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/matrix_bidirectional_iterator.h * * Bidirectional iterators for the matrix class. * */ /*! \file matrix_bidirectional_iterator.h * \brief Definitions of STL-compliant bidirectional iterators for the * Matrix class. * * Contains definitions of const_matrix_bidirectional_iterator, * matrix_bidirectional_iterator, and related operators. See a * Standard Template Library reference, such as Josuttis (1999), for a * full description of the capabilities of bidirectional iterators. * * These iterators are templated on the type, order and style of the * Matrix they iterate over and their own order, which need not match * the iterated-over matrix. Same-order iteration over concrete * matrices is extremely fast. Cross-grain concrete and/or view * iteration is slower. */ #ifndef SCYTHE_MATRIX_BIDIRECTIONAL_ITERATOR_H #define SCYTHE_MATRIX_BIDIRECTIONAL_ITERATOR_H #include #ifdef SCYTHE_COMPILE_DIRECT #include "defs.h" #include "error.h" #include "matrix.h" #else #include "scythestat/defs.h" #include "scythestat/error.h" #include "scythestat/matrix.h" #endif namespace scythe { /* convenience typedefs */ namespace { // local to this file typedef unsigned int uint; } /* forward declaration of the matrix class */ template class Matrix; /*! \brief An STL-compliant const bidirectional iterator for Matrix. * * Provides bidirectional iteration over const Matrix objects. See * Josuttis (1999), or some other STL reference, for a full * description of the bidirectional iterator interface. * * \see Matrix * \see matrix_bidirectional_iterator * \see const_matrix_forward_iterator * \see matrix_forward_iterator * \see const_matrix_random_access_iterator * \see matrix_random_access_iterator */ template class const_matrix_bidirectional_iterator : public std::iterator { public: /**** TYPEDEFS ***/ typedef const_matrix_bidirectional_iterator self; /* These are a little formal, but useful */ typedef typename std::iterator_traits::value_type value_type; typedef typename std::iterator_traits::iterator_category iterator_category; typedef typename std::iterator_traits::difference_type difference_type; typedef typename std::iterator_traits::pointer pointer; typedef typename std::iterator_traits::reference reference; /**** CONSTRUCTORS ****/ /* Default constructor */ const_matrix_bidirectional_iterator () {} /* Standard constructor */ const_matrix_bidirectional_iterator (const Matrix &M) : pos_ (M.getArray()), matrix_ (&M) { SCYTHE_CHECK_30 (pos_ == 0, scythe_null_error, "Requesting iterator to NULL matrix"); /* The basic story is: when M_STYLE == Concrete and ORDER == * M_ORDER, we only need pos_ and iteration will be as fast as * possible. All other types of iteration need more variables * to keep track of things and are slower. */ if (M_STYLE != Concrete || M_ORDER != ORDER) { offset_ = 0; if (ORDER == Col) { lead_length_ = M.rows(); lead_inc_ = M.rowstride(); trail_inc_ = M.colstride(); } else { lead_length_ = M.cols(); lead_inc_ = M.colstride(); trail_inc_ = M.rowstride(); } jump_ = trail_inc_ + (1 - lead_length_) * lead_inc_; vend_ = pos_ + (lead_length_ - 1) * lead_inc_; vbegin_ = pos_; } #if SCYTHE_DEBUG > 2 size_ = M.size(); start_ = pos_; #endif } /* Copy constructor */ const_matrix_bidirectional_iterator (const self &mi) : pos_ (mi.pos_), matrix_ (mi.matrix_) { if (M_STYLE != Concrete || M_ORDER != ORDER) { offset_ = mi.offset_; lead_length_ = mi.lead_length_; lead_inc_ = mi.lead_inc_; trail_inc_ = mi.trail_inc_; vend_ = mi.vend_; vbegin_ = mi.vbegin_; jump_ = mi.jump_; } #if SCYTHE_DEBUG > 2 size_ = mi.size_; start_ = mi.start_; #endif } /**** EXTRA MODIFIER ****/ /* This function lets us grab an end iterator. It is cheap for * Concrete matrices but somewhat more costly for views. */ inline self& set_end () { if (M_STYLE == Concrete && ORDER == M_ORDER) { pos_ = matrix_->getArray() + matrix_->size(); } else { if (ORDER == Col) { vbegin_ += trail_inc_ * matrix_->cols(); vend_ += trail_inc_ * matrix_->cols(); } else { // ORDER == Rows vbegin_ += trail_inc_ * matrix_->rows(); vend_ += trail_inc_ * matrix_->rows(); } pos_ = vbegin_; offset_ = matrix_->size(); } return *this; } /**** FORWARD ITERATOR FACILITIES ****/ inline self& operator= (const self& mi) { pos_ = mi.pos_; matrix_ = mi.matrix_; if (M_STYLE != Concrete || M_ORDER != ORDER) { offset_ = mi.offset_; lead_length_ = mi.lead_length_; lead_inc_ = mi.lead_inc_; trail_inc_ = mi.trail_inc_; vend_ = mi.vend_; vbegin_ = mi.vbegin_; jump_ = mi.jump_; } #if SCYTHE_DEBUG > 2 size_ = mi.size_; start_ = mi.start_; #endif return *this; } inline const reference operator* () const { SCYTHE_ITER_CHECK_BOUNDS(); return *pos_; } inline const pointer operator-> () const { SCYTHE_ITER_CHECK_BOUNDS(); return pos_; } inline self& operator++ () { if (M_STYLE == Concrete && ORDER == M_ORDER) ++pos_; else { if (pos_ == vend_) { vend_ += trail_inc_; vbegin_ += trail_inc_; pos_ += jump_; } else { pos_ += lead_inc_; } ++offset_; } return *this; } inline self operator++ (int) { self tmp = *this; ++(*this); return tmp; } /* == is only defined for iterators of the same template type * that point to the same matrix. Behavior for any other * comparison is undefined. * * Note that we have to be careful about iterator comparisons * when working with views and cross-grain iterators. * Specifically, we always have to rely on the offset value. * Obviously, with <> checks pos_ can jump all over the place in * cross-grain iterators, but also end iterators point to the * value after the last in the matrix. In some cases, the * equation in += and -= will actually put pos_ inside the * matrix (often in an early position) in this case. */ inline bool operator== (const self& x) const { if (M_STYLE == Concrete && ORDER == M_ORDER) { return pos_ == x.pos_; } else { return offset_ == x.offset_; } } /* Again, != is only officially defined for iterators over the * same matrix although the test will be trivially true for * matrices that don't view the same data, by implementation. */ inline bool operator!= (const self &x) const { return !(*this == x); } /**** BIDIRECTIONAL ITERATOR FACILITES ****/ inline self& operator-- () { if (M_STYLE == Concrete && ORDER == M_ORDER) --pos_; else { if (pos_ == vbegin_) { vend_ -= trail_inc_; vbegin_ -= trail_inc_; pos_ -= jump_; } else { pos_ -= lead_inc_; } --offset_; } return *this; } inline self operator-- (int) { self tmp = *this; --(*this); return tmp; } protected: /**** INSTANCE VARIABLES ****/ T_type* pos_; // pointer to current position in array T_type *vend_; // pointer to end of current vector T_type *vbegin_; // pointer to begin of current vector uint offset_; // logical offset into matrix // TODO Some of these can probably be uints int lead_length_; // Logical length of leading dimension int lead_inc_; // Memory distance between vectors in ldim int trail_inc_; // Memory distance between vectors in tdim int jump_; // Memory distance between end of one ldim vector and // begin of next // Pointer used only for set_end. TODO Cleaner impl. const Matrix* matrix_; // Size variable for range checking #if SCYTHE_DEBUG > 2 uint size_; // Logical matrix size T_type* start_; // only needed for bounds checking #endif }; /*! \brief An STL-compliant bidirectional iterator for Matrix. * * Provides bidirectional iteration over Matrix objects. See * Josuttis (1999), or some other STL reference, for a full * description of the bidirectional iterator interface. * * \see Matrix * \see const_matrix_bidirectional_iterator * \see const_matrix_forward_iterator * \see matrix_forward_iterator * \see const_matrix_random_access_iterator * \see matrix_random_access_iterator */ template class matrix_bidirectional_iterator : public const_matrix_bidirectional_iterator { /**** TYPEDEFS ***/ typedef matrix_bidirectional_iterator self; typedef const_matrix_bidirectional_iterator Base; public: /* These are a little formal, but useful */ typedef typename std::iterator_traits::value_type value_type; typedef typename std::iterator_traits::iterator_category iterator_category; typedef typename std::iterator_traits::difference_type difference_type; typedef typename std::iterator_traits::pointer pointer; typedef typename std::iterator_traits::reference reference; /**** CONSTRUCTORS ****/ /* Default constructor */ matrix_bidirectional_iterator () : Base () {} /* Standard constructor */ matrix_bidirectional_iterator (const Matrix &M) : Base(M) {} /* Copy constructor */ matrix_bidirectional_iterator (const self &mi) : Base (mi) {} /**** EXTRA MODIFIER ****/ inline self& set_end () { Base::set_end(); return *this; } /**** FORWARD ITERATOR FACILITIES ****/ /* We have to override a lot of these to get return values * right.*/ inline self& operator= (const self& mi) { pos_ = mi.pos_; matrix_ = mi.matrix_; if (M_STYLE != Concrete || M_ORDER != ORDER) { offset_ = mi.offset_; lead_length_ = mi.lead_length_; lead_inc_ = mi.lead_inc_; trail_inc_ = mi.trail_inc_; vend_ = mi.vend_; vbegin_ = mi.vbegin_; jump_ = mi.jump_; } #if SCYTHE_DEBUG > 2 size_ = mi.size_; start_ = mi.start_; #endif return *this; } inline reference operator* () const { SCYTHE_ITER_CHECK_BOUNDS(); return *pos_; } inline pointer operator-> () const { SCYTHE_ITER_CHECK_BOUNDS(); return pos_; } inline self& operator++ () { Base::operator++(); return *this; } inline self operator++ (int) { self tmp = *this; ++(*this); return tmp; } inline self& operator-- () { Base::operator--(); return *this; } inline self operator-- (int) { self tmp = *this; --(*this); return tmp; } private: /* Get handles to base members. It boggles the mind */ using Base::pos_; using Base::vend_; using Base::vbegin_; using Base::offset_; using Base::lead_length_; using Base::lead_inc_; using Base::trail_inc_; using Base::jump_; using Base::matrix_; #if SCYTHE_DEBUG > 2 using Base::size_; using Base::start_; #endif }; } // namespace scythe #endif /* SCYTHE_MATRIX_BIDIRECTIONAL_ITERATOR_H */ MCMCpack/src/matrix.h0000644000176000001440000057734212140061657014150 0ustar ripleyusers/* * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythe's/matrix.h * */ /*! * \file matrix.h * \brief Definitions of Matrix and related classes and functions. * * This file contains the definitions of the Matrix, Matrix_base, and * associated classes. It also contains a number of external * functions that operate on Matrix objects, such as mathematical * operators. * * Many of the arithmetic and logical operators in this file are * implemented in terms of overloaded template definitions to provide * both generic and default versions of each operation. Generic * templates allow (and force) the user to fully specify the * template type of the returned matrix object (row or column order, * concrete or view) while default templates automatically return * concrete matrices with the ordering of the first or only Matrix * argument to the function. Furthermore, we overload binary * functions to provide scalar by Matrix operations, in addition to * basic Matrix by Matrix arithmetic and logic. Therefore, * definitions for multiple versions appear in the functions list * below. We adopt the convention of providing explicit documentation * for only the most generic Matrix by Matrix version of each of these * operators and describe the behavior of the various overloaded * versions in these documents. */ #ifndef SCYTHE_MATRIX_H #define SCYTHE_MATRIX_H #include #include #include #include #include #include #include #include //#include #include #include #ifdef SCYTHE_COMPILE_DIRECT #include "defs.h" #include "algorithm.h" #include "error.h" #include "datablock.h" #include "matrix_random_access_iterator.h" #include "matrix_forward_iterator.h" #include "matrix_bidirectional_iterator.h" #ifdef SCYTHE_LAPACK #include "lapack.h" #endif #else #include "scythestat/defs.h" #include "scythestat/algorithm.h" #include "scythestat/error.h" #include "scythestat/datablock.h" #include "scythestat/matrix_random_access_iterator.h" #include "scythestat/matrix_forward_iterator.h" #include "scythestat/matrix_bidirectional_iterator.h" #ifdef SCYTHE_LAPACK #include "scythestat/lapack.h" #endif #endif namespace scythe { namespace { // make the uint typedef local to this file /* Convenience typedefs */ typedef unsigned int uint; } /* Forward declare the matrix multiplication functions for *= to use * within Matrix proper . */ template Matrix operator* (const Matrix& lhs, const Matrix& rhs); template Matrix operator* (const Matrix& lhs, const Matrix& rhs); /* forward declaration of the matrix class */ template class Matrix; /*! \brief A helper class for list-wise initialization. * * This class gets used behind the scenes to provide listwise * initialization for Matrix objects. This documentation is mostly * intended for developers. * * The Matrix class's assignment operator returns a ListInitializer * object when passed a scalar. The assignment operator binds before * the comma operator, so this happens first, no matter if there is * one scalar, or a list of scalars on the right hand side of the * assignment sign. The ListInitializer constructor keeps an iterator * to the Matrix that created it and places the initial item at the * head of a list. Then the ListInitializer comma operator gets * called 0 or more times, appending items to the list. At this * point the ListInitializer object gets destructed because the * expression is done and it is just a temporary. All the action is * in the destructor where the list is copied into the Matrix with * R-style recycling. * * To handle chained assignments, such as A = B = C = 1.2 where A, * B, and C are matrices, correctly, we encapsulate the Matrix * population sequence that is typically called by the destructor in * the private function populate, and make Matrix a friend of this * class. The Matrix class contains an assignment operator for * ListInitializer objects that calls this function. When a call * like "A = B = C = 1.2" occurs the compiler first evaluates C = * 1.2 and returns a ListInitializer object. Then, the * ListInitializer assignment operator in the Matrix class (being * called on B = (C = 1.2)) forces the ListInitializer object to * populate C early (it would otherwise not occur until destruction * at the end of th entire call) by calling the populate method and * then does a simple Matrix assignment of B = C and the populated C * and the chain of assignment proceeds from there in the usual * fashion. * * Based on code in Blitz++ (http://www.oonumerics.org/blitz/) by * Todd Veldhuizen. Blitz++ is distributed under the terms of the * GNU GPL. */ template class ListInitializer { // An unbound friend template friend class Matrix; public: ListInitializer (T_elem val, T_iter begin, T_iter end, Matrix* matrix) : vals_ (), iter_ (begin), end_ (end), matrix_ (matrix), populated_ (false) { vals_.push_back(val); } ~ListInitializer () { if (! populated_) populate(); } ListInitializer &operator, (T_elem x) { vals_.push_back(x); return *this; } private: void populate () { typename std::list::iterator vi = vals_.begin(); while (iter_ < end_) { if (vi == vals_.end()) vi = vals_.begin(); *iter_ = *vi; ++iter_; ++vi; } populated_ = true; } std::list vals_; T_iter iter_; T_iter end_; Matrix* matrix_; bool populated_; }; /*! \brief Matrix superclass. * * The Matrix_base class handles Matrix functionality that doesn't * need to be templatized with respect to data type. This helps * reduce code bloat by reducing replication of code for member * functions that don't rely on templating. Furthermore, it * hides all of the implementation details of indexing. The * constructors of this class are protected and end-users should * always work with full-fledged Matrix objects. * * The public functions in this class generally provide Matrix * metadata like information on dimensionality and size. */ template class Matrix_base { protected: /**** CONSTRUCTORS ****/ /* Default constructor */ Matrix_base () : rows_ (0), cols_ (0), rowstride_ (0), colstride_ (0), storeorder_ (ORDER) {} /* Standard constructor */ Matrix_base (uint rows, uint cols) : rows_ (rows), cols_ (cols), storeorder_ (ORDER) { if (ORDER == Col) { rowstride_ = 1; colstride_ = rows; } else { rowstride_ = cols; colstride_ = 1; } } /* Copy constructors * * The first version handles matrices of the same order and * style. The second handles matrices with different * orders/styles. The same- templates are more specific, * so they will always catch same- cases. * */ Matrix_base (const Matrix_base &m) : rows_ (m.rows()), cols_ (m.cols()), rowstride_ (m.rowstride()), colstride_ (m.colstride()) { if (STYLE == View) storeorder_ = m.storeorder(); else storeorder_ = ORDER; } template Matrix_base (const Matrix_base &m) : rows_ (m.rows()), cols_ (m.cols()) { if (STYLE == View) { storeorder_ = m.storeorder(); rowstride_ = m.rowstride(); colstride_ = m.colstride(); } else { storeorder_ = ORDER; if (ORDER == Col) { rowstride_ = 1; colstride_ = rows_; } else { rowstride_ = cols_; colstride_ = 1; } } } /* Submatrix constructor */ template Matrix_base (const Matrix_base &m, uint x1, uint y1, uint x2, uint y2) : rows_ (x2 - x1 + 1), cols_ (y2 - y1 + 1), rowstride_ (m.rowstride()), colstride_ (m.colstride()), storeorder_ (m.storeorder()) { /* Submatrices always have to be views, but the whole * concrete-view thing is just a policy maintained by the * software. Therefore, we need to ensure this constructor * only returns views. There's no neat way to do it but this * is still a compile time check, even if it only reports at * run-time. Of course, we should never get this far. */ if (STYLE == Concrete) { SCYTHE_THROW(scythe_style_error, "Tried to construct a concrete submatrix (Matrix_base)!"); } } /**** DESTRUCTOR ****/ ~Matrix_base () {} /**** OPERRATORS ****/ // I'm defining one just to make sure we don't get any subtle // bugs from defaults being called. Matrix_base& operator=(const Matrix_base& m) { SCYTHE_THROW(scythe_unexpected_default_error, "Unexpected call to Matrix_base default assignment operator"); } /**** MODIFIERS ****/ /* Make this Matrix_base an exact copy of the matrix passed in. * Just like an assignment operator but can be called from a derived * class w/out making the = optor public and doing something * like * *(dynamic_cast (this)) = M; * in the derived class. * * Works across styles, but should be used with care */ template inline void mimic (const Matrix_base &m) { rows_ = m.rows(); cols_ = m.cols(); rowstride_ = m.rowstride(); colstride_ = m.colstride(); storeorder_ = m.storeorder(); } /* Reset the dimensions of this Matrix_base. * * TODO This function is a bit of an interface weakness. It * assumes a resize always means a fresh matrix (concrete or * view) with no slack between dims and strides. This happens to * always be the case when it is called, but it tightly couples * Matrix_base and extending classes. Not a big issue (since * Matrix is probably the only class that will ever extend this) * but something to maybe fix down the road. */ inline void resize (uint rows, uint cols) { rows_ = rows; cols_ = cols; if (ORDER == Col) { rowstride_ = 1; colstride_ = rows; } else { rowstride_ = cols; colstride_ = 1; } storeorder_ = ORDER; } public: /**** ACCESSORS ****/ /*! \brief Returns the total number of elements in the Matrix. * * \see rows() * \see cols() * \see max_size() */ inline uint size () const { return (rows() * cols()); } /*! \brief Returns the number of rows in the Matrix. * * \see size() * \see cols() */ inline uint rows() const { return rows_; } /*! \brief Returns the number of columns in the Matrix. * * \see size() * \see rows() */ inline uint cols () const { return cols_; } /*! \brief Check matrix ordering. * * This method returns the matrix_order of this Matrix. * * \see style() * \see storeorder() */ inline matrix_order order() const { return ORDER; } /*! \brief Check matrix style. * * This method returns the matrix_style of this Matrix. * * \see order() * \see storeorder() */ inline matrix_style style() const { return STYLE; } /*! \brief Returns the storage order of the underlying * DataBlock. * * In view matrices, the storage order of the data may not be the * same as the template ORDER. * * * \see rowstride() * \see colstride() * \see order() * \see style() */ inline matrix_order storeorder () const { return storeorder_; } /*! \brief Returns the in-memory distance between elements in * successive rows of the matrix. * * \see colstride() * \see storeorder() */ inline uint rowstride () const { return rowstride_; } /*! \brief Returns the in-memory distance between elements in * successive columns of the matrix. * * \see rowstride() * \see storeorder() */ inline uint colstride () const { return colstride_; } /*! \brief Returns the maximum possible matrix size. * * Maximum matrix size is simply the highest available unsigned * int on your system. * * \see size() */ inline uint max_size () const { return UINT_MAX; } /*! \brief Returns true if this Matrix is 1x1. * * \see isNull() */ inline bool isScalar () const { return (rows() == 1 && cols() == 1); } /*! \brief Returns true if this Matrix is 1xm. * * \see isColVector() * \see isVector() */ inline bool isRowVector () const { return (rows() == 1); } /*! \brief Returns true if this Matrix is nx1. * * \see isRowVector() * \see isVector() */ inline bool isColVector () const { return (cols() == 1); } /*! \brief Returns true if this Matrix is nx1 or 1xn. * * \see isRowVector() * \see isColVector() */ inline bool isVector () const { return (cols() == 1 || rows() == 1); } /*! \brief Returns true if this Matrix is nxn. * * Null and scalar matrices are both considered square. * * \see isNull() * \see isScalar() */ inline bool isSquare () const { return (cols() == rows()); } /*! \brief Returns true if this Matrix has 0 elements. * * \see empty() * \see isScalar() */ inline bool isNull () const { return (rows() == 0); } /*! \brief Returns true if this Matrix has 0 elements. * * This function is identical to isNull() but conforms to STL * container class conventions. * * \see isNull() */ inline bool empty () const { return (rows() == 0); } /**** HELPERS ****/ /*! \brief Check if an index is in bounds. * * This function takes a single-argument index into a Matrix and * returns true iff that index is within the bounds of the * Matrix. This function is equivalent to the expression: * \code * i < M.size() * \endcode * for a given Matrix M. * * \param i The element index to check. * * \see inRange(uint, uint) */ inline bool inRange (uint i) const { return (i < size()); } /*! \brief Check if an index is in bounds. * * This function takes a two-argument index into a Matrix and * returns true iff that index is within the bounds of the * Matrix. This function is equivalent to the expression: * \code * i < M.rows() && j < M.cols() * \endcode * for a given Matrix M. * * \param i The row value of the index to check. * \param j The column value of the index to check. * * \see inRange(uint) */ inline bool inRange (uint i, uint j) const { return (i < rows() && j < cols()); } protected: /* These methods take offsets into a matrix and convert them * into that actual position in the referenced memory block, * taking stride into account. Protection is debatable. They * could be useful to outside functions in the library but most * callers should rely on the public () operator in the derived * class or iterators. * * Note that these are very fast for concrete matrices but not * so great for views. Of course, the type checks are done at * compile time. */ /* Turn an index into a true offset into the data. */ inline uint index (uint i) const { if (STYLE == View) { if (ORDER == Col) { uint col = i / rows(); uint row = i % rows(); return (index(row, col)); } else { uint row = i / cols(); uint col = i % cols(); return (index(row, col)); } } else return(i); } /* Turn an i, j into an index. */ inline uint index (uint row, uint col) const { if (STYLE == Concrete) { if (ORDER == Col) return (col * rows() + row); else return (row * cols() + col); } else { // view if (storeorder_ == Col) return (col * colstride() + row); else return (row * rowstride() + col); } } /**** INSTANCE VARIABLES ****/ protected: uint rows_; // # of rows uint cols_; // # of cols private: /* The derived class shouldn't have to worry about this * implementation detail. */ uint rowstride_; // the in-memory number of elements from the uint colstride_; // beginning of one column(row) to the next matrix_order storeorder_; // The in-memory storage order of this // matrix. This will always be the // same as ORDER for concrete // matrices but views can look at // matrices with storage orders that // differ from their own. // TODO storeorder is always known at // compile time, so we could probably // add a third template param to deal // with this. That would speed up // views a touch. Bit messy maybe. }; /**** MATRIX CLASS ****/ /*! \brief An STL-compliant matrix container class. * * The Matrix class provides a matrix object with an interface similar * to standard mathematical notation. The class provides a number * of unary and binary operators for manipulating matrices. * Operators provide such functionality as addition, multiplication, * and access to specific elements within the Matrix. One can test * two matrices for equality or use provided methods to test the * size, shape, or symmetry of a given Matrix. In addition, we * provide a number of facilities for saving, loading, and printing * matrices. Other portions of the library provide functions for * manipulating matrices. Most notably, la.h provides definitions * of common linear algebra routines and ide.h defines functions * that perform inversion and decomposition. * * This Matrix data structure sits at the core of the library. In * addition to standard matrix operations, this class allows * multiple matrices to view and modify the same underlying data. * This ability provides an elegant way in which to access and * modify submatrices such as isolated row vectors and greatly * increases the overall flexibility of the class. In addition, we * provide iterators (defined in matrix_random_access_iterator.h, * matrix_forward_iterator.h, and matrix_bidirectional_iterator.h) * that allow Matrix objects to interact seamlessly with the generic * algorithms provided by the Standard Template Library (STL). * * The Matrix class uses template parameters to define multiple * behaviors. Matrices are templated on data type, matrix_order, * and matrix_style. * * Matrix objects can contain elements of any type. For the most * part, uses will wish to fill their matrices with single or double * precision floating point numbers, but matrices of integers, * boolean values, complex numbers, and user-defined types are all * possible and useful. Although the basic book-keeping methods in * the Matrix class will support virtually any type, certain * operators require that one or more mathematical operator be * defined for the given type and many of the functions in the wider * Scythe library expect, or even demand, matrices containing floating * point numbers. * * There are two possible Matrix element orderings, row- or * column-major. Differences in matrix ordering will be most * noticeable at construction time. Constructors that build matrices * from streams or other list-like structures will place elements * into the matrix in its given order. In general, any method that * processes a matrix in order will use the given matrix_order. For * the most part, matrices of both orderings should exhibit the same * performance, but when a trade-off must be made, we err on the * side of column-major ordering. In one respect, this bias is very * strong. If you enable LAPACK/BLAS support in with the * SCYTHE_LAPACK compiler flag, the library will use these optimized * fortran routines to perform a number of operations on column * major matrices; we provide no LAPACK/BLAS support for row-major * matrices. Operations on matrices with mismatched ordering are * legal and supported, but not guaranteed to be as fast as * same-order operations, especially when SCYTHE_LAPACK is enabled. * * There are also two possible styles of Matrix template, concrete * and view. These two types of matrix provide distinct ways in * which to interact with an underlying block of data. * * Concrete matrices behave like matrices in previous * Scythe releases. They directly encapsulate a block of data and * always process it in the same order as it is stored (their * matrix_order always matches the underlying storage order). * All copy constructions and assignments on * concrete matrices make deep copies and it is not possible to use * the reference() method to make a concrete Matrix a view of * another Matrix. Furthermore, concrete matrices are guaranteed to * have unit stride (That is, adjacent Matrix elements are stored * adjacently in memory). * * Views, on the other hand, provide references to data blocks. * More than one view can look at the same underlying block of data, * possibly at different portions of the data at the same time. * Furthermore, a view may look at the data block of a concrete * matrix, perhaps accessing a single row vector or a small * submatrix of a larger matrix. When you copy construct * a view a deep copy is not made, rather the view simply provides * access to the extant block of data underlying the copied object. * Furthermore, when * you assign to a view, you overwrite the data the view is * currently pointing to, rather than generating a new data block. * Together, these behaviors allow * for matrices that view portions of other matrices * (submatrices) and submatrix assignment. Views do not guarantee * unit stride and may even logically access their elements in a * different order than they are stored in memory. Copying between * concretes and views is fully supported and often transparent to * the user. * * There is a fundamental trade-off between concrete matrices and * views. Concrete matrices are simpler to work with, but not * as flexible as views. Because they always have unit stride, * concrete matrices * have fast iterators and access operators but, because they must * always be copied deeply, they provide slow copies (for example, * copy construction of a Matrix returned from a function wastes * cycles). Views are more flexible but also somewhat more * complicated to program with. Furthermore, because they cannot * guarantee unit stride, their iterators and access operations are * somewhat slower than those for concrete matrices. On the other * hand, they provide very fast copies. The average Scythe user may * find that she virtually never works with views directly (although * they can be quite useful in certain situations) but they provide * a variety of functionality underneath the hood of the library and * underpin many common operations. * * \note * The Matrix interface is split between two classes: this Matrix * class and Matrix_base, which Matrix extends. Matrix_base * includes a range of accessors that provide the programmer with * information about such things as the dimensionality of Matrix * objects. */ template class Matrix : public Matrix_base, public DataBlockReference { public: /**** TYPEDEFS ****/ /* Iterator types */ /*! \brief Random Access Iterator type. * * This typedef for matrix_random_access_iterator provides a * convenient shorthand for the default, and most general, * Matrix iterator type. * * \see const_iterator * \see reverse_iterator * \see const_reverse_iterator * \see forward_iterator * \see const_forward_iterator * \see reverse_forward_iterator * \see const_reverse_forward_iterator * \see bidirectional_iterator * \see const_bidirectional_iterator * \see reverse_bidirectional_iterator * \see const_reverse_bidirectional_iterator */ typedef matrix_random_access_iterator iterator; /*! \brief Const Random Access Iterator type. * * This typedef for const_matrix_random_access_iterator provides * a convenient shorthand for the default, and most general, * Matrix const iterator type. * * \see iterator * \see reverse_iterator * \see const_reverse_iterator * \see forward_iterator * \see const_forward_iterator * \see reverse_forward_iterator * \see const_reverse_forward_iterator * \see bidirectional_iterator * \see const_bidirectional_iterator * \see reverse_bidirectional_iterator * \see const_reverse_bidirectional_iterator */ typedef const_matrix_random_access_iterator const_iterator; /*! \brief Reverse Random Access Iterator type. * * This typedef uses std::reverse_iterator to describe a * reversed matrix_random_access_iterator type. This is the * reverse of iterator. * * \see iterator * \see const_iterator * \see const_reverse_iterator * \see forward_iterator * \see const_forward_iterator * \see reverse_forward_iterator * \see const_reverse_forward_iterator * \see bidirectional_iterator * \see const_bidirectional_iterator * \see reverse_bidirectional_iterator * \see const_reverse_bidirectional_iterator */ typedef typename std::reverse_iterator > reverse_iterator; /*! \brief Reverse Const Random Access Iterator type. * * This typedef uses std::reverse_iterator to describe a * reversed const_matrix_random_access_iterator type. This is * the reverse of const_iterator. * * \see iterator * \see const_iterator * \see reverse_iterator * \see forward_iterator * \see const_forward_iterator * \see reverse_forward_iterator * \see const_reverse_forward_iterator * \see bidirectional_iterator * \see const_bidirectional_iterator * \see reverse_bidirectional_iterator * \see const_reverse_bidirectional_iterator */ typedef typename std::reverse_iterator > const_reverse_iterator; /*! \brief Forward Iterator type. * * This typedef for matrix_forward_iterator provides * a convenient shorthand for a fast (when compared to * matrix_random_access_iterator) Matrix iterator type. * * \see iterator * \see const_iterator * \see reverse_iterator * \see const_reverse_iterator * \see const_forward_iterator * \see reverse_forward_iterator * \see const_reverse_forward_iterator * \see bidirectional_iterator * \see const_bidirectional_iterator * \see reverse_bidirectional_iterator * \see const_reverse_bidirectional_iterator */ typedef matrix_forward_iterator forward_iterator; /*! \brief Const Forward Iterator type. * * This typedef for const_matrix_forward_iterator provides a * convenient shorthand for a fast (when compared to * const_matrix_random_access_iterator) const Matrix iterator * type. * * \see iterator * \see const_iterator * \see reverse_iterator * \see const_reverse_iterator * \see forward_iterator * \see reverse_forward_iterator * \see const_reverse_forward_iterator * \see bidirectional_iterator * \see const_bidirectional_iterator * \see reverse_bidirectional_iterator * \see const_reverse_bidirectional_iterator */ typedef const_matrix_forward_iterator const_forward_iterator; /*! \brief Bidirectional Iterator type. * * This typedef for matrix_bidirectional_iterator provides * a convenient shorthand for a compromise (with speed and * flexibility between matrix_random_access_iterator and * matrix_forward_iterator) Matrix iterator type. * * \see iterator * \see const_iterator * \see reverse_iterator * \see const_reverse_iterator * \see forward_iterator * \see const_forward_iterator * \see reverse_forward_iterator * \see const_reverse_forward_iterator * \see const_bidirectional_iterator * \see reverse_bidirectional_iterator * \see const_reverse_bidirectional_iterator */ typedef matrix_bidirectional_iterator bidirectional_iterator; /*! \brief Const Bidirectional Iterator type. * * This typedef for const_matrix_bidirectional_iterator provides * a convenient shorthand for a compromise (with speed and * flexibility between const_matrix_random_access_iterator and * const_matrix_forward_iterator) const Matrix iterator type. * * \see iterator * \see const_iterator * \see reverse_iterator * \see const_reverse_iterator * \see forward_iterator * \see const_forward_iterator * \see reverse_forward_iterator * \see const_reverse_forward_iterator * \see bidirectional_iterator * \see reverse_bidirectional_iterator * \see const_reverse_bidirectional_iterator */ typedef const_matrix_bidirectional_iterator const_bidirectional_iterator; /*! \brief Const Bidirectional Iterator type. * * This typedef uses std::reverse_iterator to describe a * reversed matrix_bidirectional_iterator type. This is * the reverse of bidirectional_iterator. * * \see iterator * \see const_iterator * \see reverse_iterator * \see const_reverse_iterator * \see forward_iterator * \see const_forward_iterator * \see reverse_forward_iterator * \see const_reverse_forward_iterator * \see bidirectional_iterator * \see const_bidirectional_iterator * \see const_reverse_bidirectional_iterator */ typedef typename std::reverse_iterator > reverse_bidirectional_iterator; /*! \brief Reverse Const Bidirectional Iterator type. * * This typedef uses std::reverse_iterator to describe a * reversed const_matrix_bidirectional_iterator type. This is * the reverse of const_bidirectional_iterator. * * \see iterator * \see const_iterator * \see reverse_iterator * \see const_reverse_iterator * \see forward_iterator * \see const_forward_iterator * \see reverse_forward_iterator * \see const_reverse_forward_iterator * \see bidirectional_iterator * \see const_bidirectional_iterator * \see reverse_bidirectional_iterator */ typedef typename std::reverse_iterator > const_reverse_bidirectional_iterator; /*!\brief The Matrix' element type. * * This typedef describes the element type (T_type) of this * Matrix. */ typedef T_type ttype; private: /* Some convenience typedefs */ typedef DataBlockReference DBRef; typedef Matrix_base Base; public: /**** CONSTRUCTORS ****/ /*! \brief Default constructor. * * The default constructor creates an empty/null matrix. Using * null matrices in operations will typically cause errors; this * constructor exists primarily for initialization within * aggregate types. * * \see Matrix(T_type) * \see Matrix(uint, uint, bool, T_type) * \see Matrix(uint, uint, T_iterator) * \see Matrix(const std::string&) * \see Matrix(const Matrix&) * \see Matrix(const Matrix &) * \see Matrix(const Matrix &) * \see Matrix(const Matrix&, uint, uint, uint, uint) * * \b Example: * \include example.matrix.constructor.default.cc */ Matrix () : Base (), DBRef () { } /*! \brief Parameterized type constructor. * * Creates a 1x1 matrix (scalar). * * \param element The scalar value of the constructed Matrix. * * \see Matrix() * \see Matrix(uint, uint, bool, T_type) * \see Matrix(uint, uint, T_iterator) * \see Matrix(const std::string&) * \see Matrix(const Matrix&) * \see Matrix(const Matrix &) * \see Matrix(const Matrix &) * \see Matrix(const Matrix&, uint, uint, uint, uint) * * \throw scythe_alloc_error (Level 1) * * \b Example: * \include example.matrix.constructor.ptype.cc */ Matrix (T_type element) : Base (1, 1), DBRef (1) { data_[Base::index(0)] = element; // ALWAYS use index() } /*! \brief Standard constructor. * * The standard constructor creates a rowsXcols Matrix, filled * with zeros by default. Optionally, you can leave the Matrix * uninitialized, or choose a different fill value. * * \param rows The number of rows in the Matrix. * \param cols The number of columns in the Matrix. * \param fill Indicates whether or not the Matrix should be * initialized. * \param fill_value The scalar value to fill the Matrix with * when fill == true. * * \see Matrix() * \see Matrix(T_type) * \see Matrix(uint, uint, T_iterator) * \see Matrix(const std::string&) * \see Matrix(const Matrix&) * \see Matrix(const Matrix &) * \see Matrix(const Matrix &) * \see Matrix(const Matrix&, uint, uint, uint, uint) * * \throw scythe_alloc_error (Level 1) * * \b Example: * \include example.matrix.constructor.standard.cc */ Matrix (uint rows, uint cols, bool fill = true, T_type fill_value = 0) : Base (rows, cols), DBRef (rows * cols) { // TODO Might use iterator here for abstraction. if (fill) for (uint i = 0; i < Base::size(); ++i) data_[Base::index(i)] = fill_value; // we know data contig } /*! \brief Iterator constructor. * * Creates a \a rows X \a cols matrix, filling it sequentially * (based on this template's matrix_order) with values * referenced by the input iterator \a it. Pointers are a form * of input iterator, so one can use this constructor to * initialize a matrix object from a c-style array. The caller * is responsible for supplying an iterator that won't be * exhausted too soon. * * \param rows The number of rows in the Matrix. * \param cols The number of columns in the Matrix. * \param it The input iterator to read from. * * \see Matrix() * \see Matrix(T_type) * \see Matrix(uint, uint, bool, T_type) * \see Matrix(const std::string&) * \see Matrix(const Matrix&) * \see Matrix(const Matrix &) * \see Matrix(const Matrix &) * \see Matrix(const Matrix&, uint, uint, uint, uint) * * \throw scythe_alloc_error (Level 1) * * \b Example: * \include example.matrix.constructor.iterator.cc */ template Matrix (uint rows, uint cols, T_iterator it) : Base (rows, cols), DBRef (rows * cols) { // TODO again, should probably use iterator for (uint i = 0; i < Base::size(); ++i) { data_[Base::index(i)] = *it; // we know data_ is contig ++it; } } /*! \brief File constructor. * * Constructs a matrix from the contents of a file. The * standard input file format is a simple rectangular text file * with one matrix row per line and spaces delimiting values in * a row. Optionally, one can also use Scythe's old file format * which is a space-delimited, row-major ordered, list of values * with row and column lengths in the first two slots. * * \param path The path of the input file. * \param oldstyle Whether or not to use Scythe's old file format. * * \see Matrix() * \see Matrix(T_type) * \see Matrix(uint, uint, bool, T_type) * \see Matrix(uint, uint, T_iterator) * \see Matrix(const Matrix&) * \see Matrix(const Matrix &) * \see Matrix(const Matrix &) * \see Matrix(const Matrix&, uint, uint, uint, uint) * \see save(const std::string&) * * \throw scythe_alloc_error (Level 1) * \throw scythe_file_error (Level 1) * \throw scythe_bounds_error (Level 3) * * \b Example: * \include example.matrix.constructor.file.cc */ Matrix (const std::string& path, bool oldstyle=false) : Base (), DBRef () { std::ifstream file(path.c_str()); SCYTHE_CHECK_10(! file, scythe_file_error, "Could not open file " << path); if (oldstyle) { uint rows, cols; file >> rows >> cols; resize(rows, cols); std::copy(std::istream_iterator (file), std::istream_iterator(), begin_f()); } else { std::string row; unsigned int cols = -1; std::vector > vals; unsigned int rows = 0; while (std::getline(file, row)) { std::vector column; std::istringstream rowstream(row); std::copy(std::istream_iterator (rowstream), std::istream_iterator(), std::back_inserter(column)); if (cols == -1) cols = (unsigned int) column.size(); SCYTHE_CHECK_10(cols != column.size(), scythe_file_error, "Row " << (rows + 1) << " of input file has " << column.size() << " elements, but should have " << cols); vals.push_back(column); rows++; } resize(rows, cols); for (unsigned int i = 0; i < rows; ++i) operator()(i, _) = Matrix(1, cols, vals[i].begin()); } } /* Copy constructors. Uses template args to set up correct * behavior for both concrete and view matrices. The branches * are no-ops and get optimized away at compile time. * * We have to define this twice because we must explicitly * override the default copy constructor; otherwise it is the * most specific template in a lot of cases and causes ugliness. */ /*! \brief Default copy constructor. * * Copy constructing a concrete Matrix makes an exact copy of M * in a new data block. On the other hand, copy constructing a * view Matrix generates a new Matrix object that references (or * views) M's existing data block. * * \param M The Matrix to copy or make a view of. * * \see Matrix() * \see Matrix(T_type) * \see Matrix(uint, uint, bool, T_type) * \see Matrix(uint, uint, T_iterator) * \see Matrix(const std::string&) * \see Matrix(const Matrix &) * \see Matrix(const Matrix &) * \see Matrix(const Matrix&, uint, uint, uint, uint) * \see copy() * \see copy(const Matrix &) * \see reference(const Matrix &) * * \throw scythe_alloc_error (Level 1) * * \b Example: * \include example.matrix.constructor.copy.cc */ Matrix (const Matrix& M) : Base (M), // this call deals with concrete-view conversions DBRef () { if (STYLE == Concrete) { this->referenceNew(M.size()); scythe::copy(M, *this); } else // STYLE == View this->referenceOther(M); } /*! \brief Cross order and/or style copy constructor. * * Copy constructing a concrete Matrix makes an exact copy of M * in a new data block. On the other hand, copy constructing a * view Matrix generates a new Matrix object that references (or * views) M's existing data block. * * This version of the copy constructor extends Matrix(const * Matrix &) by allowing the user to make concrete copies and * views of matrices that have matrix_order or matrix_style that * does not match that of the constructed Matrix. That is, this * constructor makes it possible to create views of concrete * matrices and concrete copies of views, row-major copies of * col-major matrices, and so on. * * \param M The Matrix to copy or make a view of. * * \see Matrix() * \see Matrix(T_type) * \see Matrix(uint, uint, bool, T_type) * \see Matrix(uint, uint, T_iterator) * \see Matrix(const std::string&) * \see Matrix(const Matrix&) * \see Matrix(const Matrix &) * \see Matrix(const Matrix&, uint, uint, uint, uint) * \see copy() * \see copy(const Matrix &) * \see reference(const Matrix &) * * \throw scythe_alloc_error (Level 1) * * \b Example: * \include example.matrix.constructor.crosscopy.cc */ template Matrix (const Matrix &M) : Base (M), // this call deals with concrete-view conversions DBRef () { if (STYLE == Concrete) { this->referenceNew(M.size()); scythe::copy (M, *this); } else // STYLE == View this->referenceOther(M); } /*! \brief Cross type copy constructor * * The type conversion copy constructor takes a reference to an * existing matrix containing elements of a different type than * the constructed matrix and creates a copy. This constructor * will only work if it is possible to cast elements from the * copied matrix to the type of elements in the constructed * matrix. * * This constructor always creates a deep copy of the existing * matrix, even if the constructed matrix is a view. It is * impossible for a matrix view with one element type to * reference the data block of a matrix containing elements of a * different type. * * \param M The Matrix to copy. * * \see Matrix() * \see Matrix(T_type) * \see Matrix(uint, uint, bool, T_type) * \see Matrix(uint, uint, T_iterator) * \see Matrix(const std::string&) * \see Matrix(const Matrix&) * \see Matrix(const Matrix &) * \see Matrix(const Matrix&, uint, uint, uint, uint) * * \throw scythe_alloc_error (Level 1) * * \b Example: * \include example.matrix.constructor.convcopy.cc */ template Matrix (const Matrix &M) : Base(M), // this call deal with concrete-view conversions DBRef (M.size()) { scythe::copy (M, *this); } /*! \brief Submatrix constructor * * The submatrix constructor takes a reference to an existing * matrix and a set of indices, and generates a new Matrix * object referencing the submatrix described by the indices. * One can only construct a submatrix with a view template and * this constructor will throw an error if one tries to use it * to construct a concrete matrix. * * \note * The submatrix-returning operators provide the same * functionality as this constructor with less obtuse syntax. * Users should generally employ these methods instead of this * constructor. * * \param M The Matrix to view. * \param x1 The first row coordinate, \a x1 <= \a x2. * \param y1 The first column coordinate, \a y1 <= \a y2. * \param x2 The second row coordinate, \a x2 > \a x1. * \param y2 The second column coordinate, \a y2 > \a y1. * * \see Matrix() * \see Matrix(T_type) * \see Matrix(uint, uint, bool, T_type) * \see Matrix(uint, uint, T_iterator) * \see Matrix(const std::string&) * \see Matrix(const Matrix&) * \see Matrix(const Matrix &) * \see Matrix(const Matrix &) * \see operator()(uint, uint, uint, uint) * \see operator()(uint, uint, uint, uint) const * \see operator()(all_elements, uint) * \see operator()(all_elements, uint) const * \see operator()(uint, all_elements) * \see operator()(uint, all_elements) const * \see reference(const Matrix &) * * \throw scythe_style_error (Level 0) * \throw scythe_alloc_error (Level 1) */ template Matrix (const Matrix &M, uint x1, uint y1, uint x2, uint y2) : Base(M, x1, y1, x2, y2), DBRef(M, Base::index(x1, y1)) { /* Submatrices always have to be views, but the whole * concrete-view thing is just a policy maintained by the * software. Therefore, we need to ensure this constructor * only returns views. There's no neat way to do it but this * is still a compile time check, even if it only reports at * run-time. */ if (STYLE == Concrete) { SCYTHE_THROW(scythe_style_error, "Tried to construct a concrete submatrix (Matrix)!"); } } public: /**** DESTRUCTOR ****/ /*!\brief Destructor. */ ~Matrix() {} /**** COPY/REFERENCE METHODS ****/ /* Make this matrix a view of another's data. If this matrix's * previous datablock is not viewed by any other object it is * deallocated. Concrete matrices cannot be turned into views * at run-time! Therefore, we generate an error here if *this * is concrete. */ /*!\brief View another Matrix's data. * * This modifier makes this matrix a view of another's data. * The action detaches the Matrix from its current view; if no * other Matrix views the detached DataBlock, it will be * deallocated. * * Concrete matrices cannot convert into views at * run time. Therefore, it is an error to invoke this method on * a concrete Matrix. * * \param M The Matrix to view. * * \see Matrix(const Matrix&) * \see Matrix(const Matrix &) * \see Matrix(const Matrix &) * \see copy() * \see copy(const Matrix &) * * \throw scythe_style_error (Level 0) * * \b Example: * \include example.matrix.reference.cc */ template inline void reference (const Matrix &M) { if (STYLE == Concrete) { SCYTHE_THROW(scythe_style_error, "Concrete matrices cannot reference other matrices"); } else { this->referenceOther(M); this->mimic(M); } } /*!\brief Create a copy of this matrix. * * Creates a deep copy of this Matrix. The returned concrete * matrix references a newly created DataBlock that contains * values that are identical to, but distinct from, the values * contained in the original Matrix. * * \see Matrix(const Matrix&) * \see Matrix(const Matrix &) * \see Matrix(const Matrix &) * \see copy(const Matrix &) * \see reference(const Matrix &) * * \throw scythe_alloc_error (Level 1) * * \b Example: * \include example.matrix.copy.cc */ inline Matrix copy () const { Matrix res (Base::rows(), Base::cols(), false); std::copy(begin_f(), end_f(), res.begin_f()); return res; } /* Make this matrix a copy of another. The matrix retains its * own order and style in this case, because that can't change * at run time. */ /*!\brief Make this Matrix a copy of another. * * Converts this Matrix into a deep copy of another Matrix. * This Matrix retains its own matrix_order and matrix_style but * contains copies of M's elements and becomes the same size and * shape as M. Calling this method automatically detaches this * Matrix from its previous DataBlock before copying. * * \param M The Matrix to copy. * * \see Matrix(const Matrix&) * \see Matrix(const Matrix &) * \see Matrix(const Matrix &) * \see copy() * \see reference(const Matrix &) * \see detach() * * \throw scythe_alloc_error (Level 1) * * \b Example: * \include example.matrix.copyother.cc */ template inline void copy (const Matrix& M) { resize2Match(M); scythe::copy (M, *this); } /**** INDEXING OPERATORS ****/ /*! \brief Access or modify an element in this Matrix. * * This indexing operator allows the caller to access or modify * the ith (indexed in this Matrix's matrix_order) element of * this Matrix, indexed from 0 to n - 1, where n is the number * of elements in the Matrix. * * \param i The index of the element to access/modify. * * \see operator[](uint) const * \see operator()(uint) * \see operator()(uint) const * \see operator()(uint, uint) * \see operator()(uint, uint) const * * \throw scythe_bounds_error (Level 3) */ inline T_type& operator[] (uint i) { SCYTHE_CHECK_30 (! Base::inRange(i), scythe_bounds_error, "Index " << i << " out of range"); return data_[Base::index(i)]; } /*! \brief Access an element in this Matrix. * * This indexing operator allows the caller to access * the ith (indexed in this Matrix's matrix_order) element of * this Matrix, indexed from 0 to n - 1, where n is the number * of elements in the Matrix. * * \param i The index of the element to access. * * \see operator[](uint) * \see operator()(uint) * \see operator()(uint) const * \see operator()(uint, uint) * \see operator()(uint, uint) const * * \throw scythe_bounds_error (Level 3) */ inline T_type& operator[] (uint i) const { SCYTHE_CHECK_30 (! Base::inRange(i), scythe_bounds_error, "Index " << i << " out of range"); return data_[Base::index(i)]; } /*! \brief Access or modify an element in this Matrix. * * This indexing operator allows the caller to access or modify * the ith (indexed in this Matrix's matrix_order) element of * this Matrix, indexed from 0 to n - 1, where n is the number * of elements in the Matrix. * * \param i The index of the element to access/modify. * * \see operator[](uint) * \see operator[](uint) const * \see operator()(uint) const * \see operator()(uint, uint) * \see operator()(uint, uint) const * * \throw scythe_bounds_error (Level 3) */ inline T_type& operator() (uint i) { SCYTHE_CHECK_30 (! Base::inRange(i), scythe_bounds_error, "Index " << i << " out of range"); return data_[Base::index(i)]; } /*! \brief Access an element in this Matrix. * * This indexing operator allows the caller to access * the ith (indexed in this Matrix's matrix_order) element of * this Matrix, indexed from 0 to n - 1, where n is the number * of elements in the Matrix. * * \param i The index of the element to access. * * \see operator[](uint) * \see operator[](uint) const * \see operator()(uint) * \see operator()(uint, uint) * \see operator()(uint, uint) const * * \throw scythe_bounds_error (Level 3) */ inline T_type& operator() (uint i) const { SCYTHE_CHECK_30 (! Base::inRange(i), scythe_bounds_error, "Index " << i << " out of range"); return data_[Base::index(i)]; } /*! \brief Access or modify an element in this Matrix. * * This indexing operator allows the caller to access or modify * the (i, j)th element of * this Matrix, where i is an element of 0, 1, ..., rows - 1 and * j is an element of 0, 1, ..., columns - 1. * * \param i The row index of the element to access/modify. * \param j The column index of the element to access/modify. * * \see operator[](uint) * \see operator[](uint) const * \see operator()(uint) * \see operator()(uint) const * \see operator()(uint, uint) const * * \throw scythe_bounds_error (Level 3) */ inline T_type& operator() (uint i, uint j) { SCYTHE_CHECK_30 (! Base::inRange(i, j), scythe_bounds_error, "Index (" << i << ", " << j << ") out of range"); return data_[Base::index(i, j)]; } /*! \brief Access an element in this Matrix. * * This indexing operator allows the caller to access * the (i, j)th element of * this Matrix, where i is an element of 0, 1, ..., rows - 1 and * j is an element of 0, 1, ..., columns - 1. * * \param i The row index of the element to access. * \param j The column index of the element to access. * * \see operator[](uint) * \see operator[](uint) const * \see operator()(uint) * \see operator()(uint) const * \see operator() (uint, uint) * * \throw scythe_bounds_error (Level 3) */ inline T_type& operator() (uint i, uint j) const { SCYTHE_CHECK_30 (! Base::inRange(i, j), scythe_bounds_error, "Index (" << i << ", " << j << ") out of range"); return data_[Base::index(i, j)]; } /**** SUBMATRIX OPERATORS ****/ /* Submatrices are always views. An extra (but relatively * cheap) copy constructor call is made when mixing and matching * orders like * * Matrix<> A; * ... * Matrix B = A(2, 2, 4, 4); * * It is technically possible to get around this, by providing * templates of each function of the form * template * Matrix operator() (...) * * but the syntax to call them (crappy return type inference): * * Matrix B = A.template operator()(2, 2, 4, 4) * * is such complete gibberish that I don't think this is worth * the slight optimization. */ /*! \brief Returns a view of a submatrix. * * This operator returns a rectangular submatrix view of this * Matrix with its upper left corner at (x1, y1) and its lower * right corner at (x2, y2). * * \param x1 The upper row of the submatrix. * \param y1 The leftmost column of the submatrix. * \param x2 The lowest row of the submatrix. * \param y2 The rightmost column of the submatrix. * * \see operator()(uint, uint, uint, uint) const * \see operator()(all_elements, uint) * \see operator()(all_elements, uint) const * \see operator()(uint, all_elements) * \see operator()(uint, all_elements) const * * \throw scythe_bounds_error (Level 2) * * \b Example: * \include example.matrix.submatrix.cc */ inline Matrix operator() (uint x1, uint y1, uint x2, uint y2) { SCYTHE_CHECK_20 (! Base::inRange(x1, y1) || ! Base::inRange(x2, y2) || x1 > x2 || y1 > y2, scythe_bounds_error, "Submatrix (" << x1 << ", " << y1 << ") ; (" << x2 << ", " << y2 << ") out of range or ill-formed"); return (Matrix(*this, x1, y1, x2, y2)); } /*! \brief Returns a view of a submatrix. * * This operator returns a rectangular submatrix view of this * Matrix with its upper left corner at (x1, y1) and its lower * right corner at (x2, y2). * * \param x1 The upper row of the submatrix. * \param y1 The leftmost column of the submatrix. * \param x2 The lowest row of the submatrix. * \param y2 The rightmost column of the submatrix. * * \see operator()(uint, uint, uint, uint) * \see operator()(all_elements, uint) * \see operator()(all_elements, uint) const * \see operator()(uint, all_elements) * \see operator()(uint, all_elements) const * * \throw scythe_bounds_error (Level 2) */ inline Matrix operator() (uint x1, uint y1, uint x2, uint y2) const { SCYTHE_CHECK_20 (! Base::inRange(x1, y1) || ! Base::inRange(x2, y2) || x1 > x2 || y1 > y2, scythe_bounds_error, "Submatrix (" << x1 << ", " << y1 << ") ; (" << x2 << ", " << y2 << ") out of range or ill-formed"); return (Matrix(*this, x1, y1, x2, y2)); } /*! \brief Returns a view of a column vector. * * This operator returns a vector view of column j in this Matrix. * * \param a An all_elements object signifying whole vector access. * \param j The column to view. * * \see operator()(uint, uint, uint, uint) * \see operator()(uint, uint, uint, uint) const * \see operator()(all_elements, uint) const * \see operator()(uint, all_elements) * \see operator()(uint, all_elements) const * * \throw scythe_bounds_error (Level 2) * * \b Example: * \include example.matrix.vector.cc */ inline Matrix operator() (const all_elements a, uint j) { SCYTHE_CHECK_20 (j >= Base::cols(), scythe_bounds_error, "Column vector index " << j << " out of range"); return (Matrix (*this, 0, j, Base::rows() - 1, j)); } /*! \brief Returns a view of a column vector. * * This operator returns a vector view of column j in this Matrix. * * \param a An all_elements object signifying whole vector access. * \param j The column to view. * * \see operator()(uint, uint, uint, uint) * \see operator()(uint, uint, uint, uint) const * \see operator()(all_elements, uint) * \see operator()(uint, all_elements) * \see operator()(uint, all_elements) const * * \throw scythe_bounds_error (Level 2) */ inline Matrix operator() (const all_elements a, uint j) const { SCYTHE_CHECK_20 (j >= Base::cols(), scythe_bounds_error, "Column vector index " << j << " out of range"); return (Matrix (*this, 0, j, Base::rows() - 1, j)); } /*! \brief Returns a view of a row vector. * * This operator returns a vector view of row i in this Matrix. * * \param i The row to view. * \param b An all_elements object signifying whole vector access. * * \see operator()(uint, uint, uint, uint) * \see operator()(uint, uint, uint, uint) const * \see operator()(all_elements, uint) * \see operator()(all_elements, uint) const * \see operator()(uint, all_elements) const * * \throw scythe_bounds_error (Level 2) * * \b Example: * \include example.matrix.vector.cc */ inline Matrix operator() (uint i, const all_elements b) { SCYTHE_CHECK_20 (i >= Base::rows(), scythe_bounds_error, "Row vector index " << i << " out of range"); return (Matrix (*this, i, 0, i, Base::cols() - 1)); } /*! \brief Returns a view of a row vector. * * This operator returns a vector view of row i in this Matrix. * * \param i The row to view. * \param b An all_elements object signifying whole vector access. * * \see operator()(uint, uint, uint, uint) * \see operator()(uint, uint, uint, uint) const * \see operator()(all_elements, uint) * \see operator()(all_elements, uint) const * \see operator()(uint, all_elements) * * \throw scythe_bounds_error (Level 2) */ inline Matrix operator() (uint i, const all_elements b) const { SCYTHE_CHECK_20 (i >= Base::rows(), scythe_bounds_error, "Row vector index " << i << " out of range"); return (Matrix (*this, i, 0, i, Base::cols() - 1)); } /*! \brief Returns single element in matrix as scalar type * * This method converts a matrix object to a single scalar * element of whatever type the matrix is composed of. The * method simply returns the element at position zero; if error * checking is turned on the method with throw an error if the * matrix is not, in fact, 1x1. * * \throw scythe_conformation_error (Level 1) */ /**** ASSIGNMENT OPERATORS ****/ /* * As with the copy constructor, we need to * explicitly define the same-order-same-style assignment * operator or the default operator will take over. * * TODO With views, it may be desirable to auto-grow (and, * technically, detach) views to the null matrix. This means * you can write something like: * * Matrix X; * X = ... * * and not run into trouble because you didn't presize. Still, * not sure this won't encourage silly mistakes...need to think * about it. */ /*! \brief Assign the contents of one Matrix to another. * * Like copy construction, assignment works differently for * concrete matrices than it does for views. When you assign to * a concrete Matrix it resizes itself to match the right hand * side Matrix and copies over the values. Like all resizes, * this causes this Matrix to detach() from its original * DataBlock. This means that any views attached to this Matrix * will no longer view this Matrix's data after the assignment; * they will continue to view this Matrix's previous DataBlock. * When you assign to a view it first checks that * the right hand side conforms to its dimensions (by default, * see below), and then copies the right hand side values over * into its current DataBlock, overwriting the current contents. * * Scythe also supports a slightly different model of view * assignment. If the user compiled her program with the * SCYTHE_VIEW_ASSIGNMENT_RECYCLE flag set then it is possible * to copy into a view that is not of the same size as the * Matrix on the right hand side of the equation. In this case, * the operator copies elements from the right hand side * object into this matrix until either this matrix runs out of * room, or the right hand side one does. In the latter case, * the operator starts over at the beginning of the right hand * side object, recycling its values as many times as necessary * to fill the left hand side object. The * SCYTHE_VIEW_ASSIGNMENT_RECYCLE flag does not affect the * behavior of the concrete matrices in any way. * * \param M The Matrix to copy. * * \see operator=(const Matrix&) * \see operator=(T_type x) * \see operator=(ListInitializer) * \see Matrix(const Matrix&) * \see Matrix(const Matrix &) * \see Matrix(const Matrix &) * \see copy() * \see copy(const Matrix &) * \see reference(const Matrix &) * \see resize(uint, uint, bool) * \see detach() * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) * * \b Example: * \include example.matrix.operator.assignment.cc */ Matrix& operator= (const Matrix& M) { if (STYLE == Concrete) { resize2Match(M); scythe::copy (M, *this); } else { #ifndef SCYTHE_VIEW_ASSIGNMENT_RECYCLE SCYTHE_CHECK_10 (Base::size() != M.size(), scythe_conformation_error, "LHS has dimensions (" << Base::rows() << ", " << Base::cols() << ") while RHS has dimensions (" << M.rows() << ", " << M.cols() << ")"); scythe::copy (M, *this); #else copy_recycle(M, *this); #endif } return *this; } /*! \brief Assign the contents of one Matrix to another. * * Like copy construction, assignment works differently for * concrete matrices than it does for views. When you assign to * a concrete Matrix it resizes itself to match the right hand * side Matrix and copies over the values. Like all resizes, * this causes this Matrix to detach() from its original * DataBlock. When you assign to a view it first checks that * the right hand side conforms to its dimensions, and then * copies the right hand side values over into its current * DataBlock, overwriting the current contents. * * Scythe also supports a slightly different model of view * assignment. If the user compiled her program with the * SCYTHE_VIEW_ASSIGNMENT_RECYCLE flag set then it is possible * to copy into a view that is not of the same size as the * Matrix on the right hand side of the equation. In this case, * the operator copies elements from the right hand side * object into this matrix until either this matrix runs out of * room, or the right hand side one does. In the latter case, * the operator starts over at the beginning of the right hand * side object, recycling its values as many times as necessary * to fill the left hand side object. The * SCYTHE_VIEW_ASSIGNMENT_RECYCLE flag does not affect the * behavior of the concrete matrices in any way. * * This version of the assignment operator handles assignments * between matrices of different matrix_order and/or * matrix_style. * * \param M The Matrix to copy. * * \see operator=(const Matrix&) * \see operator=(T_type x) * \see operator=(ListInitializer) * \see Matrix(const Matrix&) * \see Matrix(const Matrix &) * \see Matrix(const Matrix &) * \see copy() * \see copy(const Matrix &) * \see reference(const Matrix &) * \see resize(uint, uint, bool) * \see detach() * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) * * \b Example: * \include example.matrix.operator.assignment.cc */ template Matrix &operator= (const Matrix &M) { if (STYLE == Concrete) { resize2Match(M); scythe::copy (M, *this); } else { #ifndef SCYTHE_VIEW_ASSIGNMENT_RECYCLE SCYTHE_CHECK_10 (Base::size() != M.size(), scythe_conformation_error, "LHS has dimensions (" << Base::rows() << ", " << Base::cols() << ") while RHS has dimensions (" << M.rows() << ", " << M.cols() << ")"); scythe::copy (M, *this); #else copy_recycle(M, *this); #endif } return *this; } /* List-wise initialization behavior is a touch complicated. * List needs to be less than or equal to matrix in size and it * is copied into the matrix with R-style recycling. * * The one issue is that, if you want true assignment of a * scalar to a concrete matrix (resize the matrix to a scalar) * you need to be explicit: * * Matrix<> A(2, 2); * double x = 3; * ... * A = Matrix<>(x); // -> 3 * * A = x; // -> 3 3 * // 3 3 */ /*! \brief Copy values in a comma-separated list into this Matrix. * * This assignment operator allows the user to copy the values in * a bare, comma-separated, list into this Matrix. The list * should have no more elements in it than the Matrix has * elements. If the list has fewer elements than the Matrix, it * will be recycled until the Matrix is full. * * If you wish to convert a concrete Matrix to a scalar-valued * Matrix object you need to explicitly promote the scalar to a * Matrix, using the parameterized type constructor * (Matrix(T_type)). * * \param x The first element in the list. * * \see operator=(const Matrix&) * \see operator=(const Matrix&) * \see operator=(ListInitializer) * \see Matrix(const Matrix&) * \see Matrix(const Matrix &) * \see Matrix(const Matrix &) * \see copy() * \see copy(const Matrix &) * \see reference(const Matrix &) * \see resize(uint, uint, bool) * \see detach() * * \b Example: * \include example.matrix.operator.assignment.cc */ ListInitializer operator=(T_type x) { return (ListInitializer (x, begin(),end(), this)); } /*! \brief A special assignment operator. * * This assignment operator provides the necessary glue to allow * chained assignments of matrices where the last assignment is * achieved through list initialization. This allows users to * write code like * \code * Matrix<> A, B, C; * Matrix<> D(4, 4, false); * A = B = C = (D = 1, 2, 3, 4); * \endcode * where the assignment in the parentheses technically returns a * ListInitializer object, not a Matrix object. The details of * this mechanism are not important for the average user and the * distinction can safely be ignored. * * \note * The parentheses in the code above are necessary because of * the precedence of the assignment operator. * * \see operator=(const Matrix&) * \see operator=(const Matrix&) * \see operator=(T_type x) * * \b Example: * \include example.matrix.operator.assignment.cc */ template Matrix &operator=(ListInitializer li) { li.populate(); *this = *(li.matrix_); return *this; } /**** ARITHMETIC OPERATORS ****/ private: /* Reusable chunk of code for element-wise operator * assignments. Updates are done in-place except for the 1x1 by * nXm case, which forces a resize. */ template inline Matrix& elementWiseOperatorAssignment (const Matrix& M, OP op) { SCYTHE_CHECK_10 (Base::size() != 1 && M.size() != 1 && (Base::rows () != M.rows() || Base::cols() != M.cols()), scythe_conformation_error, "Matrices with dimensions (" << Base::rows() << ", " << Base::cols() << ") and (" << M.rows() << ", " << M.cols() << ") are not conformable"); if (Base::size() == 1) { // 1x1 += nXm T_type tmp = (*this)(0); resize2Match(M); std::transform(M.template begin_f(), M.template end_f(), begin_f(), std::bind1st(op, tmp)); } else if (M.size() == 1) { // nXm += 1x1 std::transform(begin_f(), end_f(), begin_f(), std::bind2nd(op, M(0))); } else { // nXm += nXm std::transform(begin_f(), end_f(), M.template begin_f(), begin_f(), op); } return *this; } public: /*! \brief Add another Matrix to this Matrix. * * This operator sums this Matrix with another and places the * result into this Matrix. The two matrices must have the same * dimensions or one of the matrices must be 1x1. * * \param M The Matrix to add to this one. * * \see operator+=(T_type) * \see operator-=(const Matrix &) * \see operator%=(const Matrix &) * \see operator/=(const Matrix &) * \see operator^=(const Matrix &) * \see operator*=(const Matrix &) * \see kronecker(const Matrix &) * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ template inline Matrix& operator+= (const Matrix &M) { return elementWiseOperatorAssignment(M, std::plus ()); } /*! \brief Add a scalar to this Matrix. * * This operator sums each element of this Matrix with the * scalar \a x and places the result into this Matrix. * * \param x The scalar to add to each element. * * \see operator+=(const Matrix &) * \see operator-=(T_type) * \see operator%=(T_type) * \see operator/=(T_type) * \see operator^=(T_type) * \see kronecker(T_type) * * \throw scythe_conformation_error (Level 1) */ inline Matrix& operator+= (T_type x) { return elementWiseOperatorAssignment(Matrix(x), std::plus ()); } /*! \brief Subtract another Matrix from this Matrix. * * This operator subtracts another Matrix from this one and * places the result into this Matrix. The two matrices must * have the same dimensions or one of the matrices must be 1x1. * * \param M The Matrix to subtract from this one. * * \see operator-=(T_type) * \see operator+=(const Matrix &) * \see operator%=(const Matrix &) * \see operator/=(const Matrix &) * \see operator^=(const Matrix &) * \see operator*=(const Matrix &) * \see kronecker(const Matrix &) * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ template inline Matrix& operator-= (const Matrix &M) { return elementWiseOperatorAssignment(M, std::minus ()); } /*! \brief Subtract a scalar from this Matrix. * * This operator subtracts \a x from each element of this * Matrix and places the result into this Matrix. * * \param x The scalar to subtract from each element. * * \see operator-=(const Matrix &) * \see operator+=(T_type) * \see operator%=(T_type) * \see operator/=(T_type) * \see operator^=(T_type) * \see kronecker(T_type) * * \throw scythe_conformation_error (Level 1) */ inline Matrix& operator-= (T_type x) { return elementWiseOperatorAssignment(Matrix(x), std::minus ()); } /*! \brief Multiply the elements of this Matrix with another's. * * This operator multiplies the elements of this Matrix with * another's and places the result into this Matrix. The two * matrices must have the same dimensions, or one of the * matrices must be 1x1. * * This operator performs element-by-element multiplication * (calculates the Hadamard product), not conventional matrix * multiplication. * * \param M The Matrix to multiply with this one. * * \see operator%=(T_type) * \see operator+=(const Matrix &) * \see operator-=(const Matrix &) * \see operator/=(const Matrix &) * \see operator^=(const Matrix &) * \see operator*=(const Matrix &) * \see kronecker(const Matrix &) * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ template inline Matrix& operator%= (const Matrix &M) { return elementWiseOperatorAssignment(M, std::multiplies ()); } /*! \brief Multiply this Matrix by a scalar. * * This operator multiplies each element of this * Matrix with \a x and places the result into this Matrix. * * \param x The scalar to multiply each element by. * * \see operator%=(const Matrix &) * \see operator+=(T_type) * \see operator-=(T_type) * \see operator/=(T_type) * \see operator^=(T_type) * \see kronecker(T_type) * * \throw scythe_conformation_error (Level 1) */ inline Matrix& operator%= (T_type x) { return elementWiseOperatorAssignment(Matrix(x), std::multiplies ()); } /*! \brief Divide the elements of this Matrix by another's. * * This operator divides the elements of this Matrix by * another's and places the result into this Matrix. The two * matrices must have the same dimensions, or one of the * Matrices must be 1x1. * * \param M The Matrix to divide this one by. * * \see operator/=(T_type) * \see operator+=(const Matrix &) * \see operator-=(const Matrix &) * \see operator%=(const Matrix &) * \see operator^=(const Matrix &) * \see operator*=(const Matrix &) * \see kronecker(const Matrix &) * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ template inline Matrix& operator/= (const Matrix &M) { return elementWiseOperatorAssignment(M, std::divides()); } /*! \brief Divide this Matrix by a scalar. * * This operator divides each element of this * Matrix by \a x and places the result into this Matrix. * * \param x The scalar to divide each element by. * * \see operator/=(const Matrix &) * \see operator+=(T_type) * \see operator-=(T_type) * \see operator%=(T_type) * \see operator^=(T_type) * \see kronecker(T_type) * * \throw scythe_conformation_error (Level 1) */ inline Matrix& operator/= (T_type x) { return elementWiseOperatorAssignment(Matrix(x), std::divides ()); } /*! \brief Exponentiate the elements of this Matrix by another's. * * This operator exponentiates the elements of this Matrix by * another's and places the result into this Matrix. The two * matrices must have the same dimensions, or one of the * Matrices must be 1x1. * * \param M The Matrix to exponentiate this one by. * * \see operator^=(T_type) * \see operator+=(const Matrix &) * \see operator-=(const Matrix &) * \see operator%=(const Matrix &) * \see operator^=(const Matrix &) * \see operator*=(const Matrix &) * \see kronecker(const Matrix &) * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ template inline Matrix& operator^= (const Matrix &M) { return elementWiseOperatorAssignment(M, exponentiate()); } /*! \brief Exponentiate this Matrix by a scalar. * * This operator exponentiates each element of this * Matrix by \a x and places the result into this Matrix. * * \param x The scalar to exponentiate each element by. * * \see operator^=(const Matrix &) * \see operator+=(T_type) * \see operator-=(T_type) * \see operator%=(T_type) * \see operator/=(T_type) * \see kronecker(T_type) * * \throw scythe_conformation_error (Level 1) */ inline Matrix& operator^= (T_type x) { return elementWiseOperatorAssignment(Matrix(x), exponentiate ()); } /* Matrix mult always disengages views because it generally * requires a resize. We force a disengage in the one place it * isn't absolutely necessary(this->size()==1), for consistency. */ /*! \brief Multiply this Matrix by another. * * This operator multiplies this Matrix by another and places * the result into this Matrix. The two matrices must conform; * this Matrix must have as many columns as the right hand side * Matrix has rows. * * Matrix multiplication always causes a Matrix to detach() from * its current view, because it generally requires a resize(). * Even when it is not absolutely necessary to detach() the * Matrix, this function will do so to maintain consistency. * * Scythe will use LAPACK/BLAS routines to multiply concrete * column-major matrices of double-precision floating point * numbers if LAPACK/BLAS is available and you compile your * program with the SCYTHE_LAPACK flag enabled. * * \param M The Matrix to multiply this one by. * * \see operator*=(T_type) * \see operator+=(const Matrix &) * \see operator-=(const Matrix &) * \see operator%=(const Matrix &) * \see operator/=(const Matrix &) * \see operator^=(const Matrix &) * \see kronecker(const Matrix &) * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ template Matrix& operator*= (const Matrix& M) { /* Farm out the work to the plain old * operator and make this * matrix a reference (the only reference) to the result. We * always have to create a new matrix here, so there is no * speed-up from using *=. */ /* This saves a copy over * *this = (*this) * M; * if we're concrete */ Matrix res = (*this) * M; this->referenceOther(res); this->mimic(res); return *this; } /*! \brief Multiply this Matrix by a scalar. * * This operator multiplies each element of this * Matrix with \a x and places the result into this Matrix. * * \note This method is identical in behavior to * operator%=(T_type). It also slightly overgeneralizes matrix * multiplication but makes life easy on the user by allowing * the matrix multiplication operator to work for basic scaler * multiplications. * * \param x The scalar to multiply each element by. * * \see operator*=(const Matrix &) * \see operator+=(T_type) * \see operator-=(T_type) * \see operator%=(T_type) * \see operator/=(T_type) * \see operator^=(T_type) * \see kronecker(T_type) * * \throw scythe_conformation_error (Level 1) */ inline Matrix& operator*= (T_type x) { return elementWiseOperatorAssignment(Matrix(x), std::multiplies ()); } /*! \brief Kronecker multiply this Matrix by another. * * This method computes the Kronecker product of this Matrix and * \a M, and sets the value of this Matrix to the result. * * Kronecker multiplication always causes a Matrix to detach() * from its current view, because it generally requires a * resize(). * * \note This method would have been implemented as an operator * if we had any reasonable operator choices left. * * \param M The Matrix to Kronecker multiply this one by. * * \see kronecker(T_type) * \see operator+=(const Matrix &) * \see operator-=(const Matrix &) * \see operator%=(const Matrix &) * \see operator/=(const Matrix &) * \see operator^=(const Matrix &) * \see operator*=(const Matrix &) * * \throw scythe_alloc_error (Level 1) */ template Matrix& kronecker (const Matrix& M) { uint totalrows = Base::rows() * M.rows(); uint totalcols = Base::cols() * M.cols(); // Even if we're a view, make this guy concrete. Matrix res(totalrows, totalcols, false); /* TODO: This the most natural way to write this in scythe * (with a small optimization based on ordering) but probably * not the fastest because it uses submatrix assignments. * Optimizations should be considered. */ forward_iterator it = begin_f(); if (ORDER == Row) { for (uint row = 0; row < totalrows; row += M.rows()) { for (uint col = 0; col < totalcols; col += M.cols()){ res(row, col, row + M.rows() - 1, col + M.cols() - 1) = (*it) * M; it++; } } } else { for (uint col = 0; col < totalcols; col += M.cols()) { for (uint row = 0; row < totalrows; row += M.rows()){ res(row, col, row + M.rows() - 1, col + M.cols() - 1) = (*it) * M; it++; } } } this->referenceOther(res); this->mimic(res); return *this; } /*! \brief Kronecker multiply this Matrix by a scalar. * * This method Kronecker multiplies this Matrix with some scalar, * \a x. This is a degenerate case of Kronecker * multiplication, simply multiplying every element in the * Matrix by \a x. * * \note This method is identical in behavior to * operator%=(T_type) and operator*=(T_type). * * \param x The scalar to Kronecker multiply this Matrix by. * * \see kronecker(const Matrix &) * \see operator+=(T_type) * \see operator-=(T_type) * \see operator%=(T_type) * \see operator/=(T_type) * \see operator^=(T_type) * \see operator*=(T_type) * */ inline Matrix& kronecker (T_type x) { return elementWiseOperatorAssignment(Matrix(x), std::multiplies ()); } /* Logical assignment operators */ /*! \brief Logically AND this Matrix with another. * * This operator computes the element-wise logical AND of this * Matrix and another and places the result into this Matrix. * That is, after the operation, an element in this Matrix will * evaluate to true (or the type-specific analog of true, * typically 1) iff the corresponding element previously * residing in this Matrix and the corresponding element in \a M * both evaluate to true. The two matrices must have the same * dimensions, or one of the Matrices must be 1x1. * * \param M The Matrix to AND with this one. * * \see operator&=(T_type) * \see operator|=(const Matrix &) * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ template inline Matrix& operator&= (const Matrix &M) { return elementWiseOperatorAssignment(M, std::logical_and()); } /*! \brief Logically AND this Matrix with a scalar. * * This operator computes the element-wise logical AND of this * Matrix and a scalar. That is, after the operation, an * element in this Matrix will evaluate to true (or the * type-specific analog of true, typically 1) iff the * corresponding element previously residing in this Matrix and * \a x both evaluate to true. * * \param x The scalar to AND with each element. * * \see operator&=(const Matrix &) * \see operator|=(T_type) * * \throw scythe_conformation_error (Level 1) */ inline Matrix& operator&= (T_type x) { return elementWiseOperatorAssignment(Matrix(x), std::logical_and ()); } /*! \brief Logically OR this Matrix with another. * * This operator computes the element-wise logical OR of this * Matrix and another and places the result into this Matrix. * That is, after the operation, an element in this Matrix will * evaluate to true (or the type-specific analog of true, * typically 1) if the corresponding element previously * residing in this Matrix or the corresponding element in \a M * evaluate to true. The two matrices must have the same * dimensions, or one of the Matrices must be 1x1. * * \param M The Matrix to OR with this one. * * \see operator|=(T_type) * \see operator&=(const Matrix &) * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ template inline Matrix& operator|= (const Matrix &M) { return elementWiseOperatorAssignment(M, std::logical_or()); } /*! \brief Logically OR this Matrix with a scalar. * * This operator computes the element-wise logical OR of this * Matrix and a scalar. That is, after the operation, an * element in this Matrix will evaluate to true (or the * type-specific analog of true, typically 1) if the * corresponding element previously residing in this Matrix or * \a x evaluate to true. * * \param x The scalar to OR with each element. * * \see operator|=(const Matrix &) * \see operator&=(T_type) * * \throw scythe_conformation_error (Level 1) */ inline Matrix& operator|= (T_type x) { return elementWiseOperatorAssignment(Matrix(x), std::logical_or ()); } /**** MODIFIERS ****/ /* Resize a matrix view. resize() takes dimensions as * parameters while resize2Match() takes a matrix reference and * uses its dimensions. */ /*! \brief Resize or reshape a Matrix. * * This modifier resizes this Matrix to the given dimensions. * Matrix contents after a resize is undefined (junk) unless the * preserve flag is set to true. In this case, the old contents * of the Matrix remains at the same indices it occupied in the * old Matrix. Any excess capacity is junk. * * Resizing a Matrix ALWAYS disengages it from its current view, * even if the dimensions passed to resize are the same as the * current Matrix's dimensions. Resized matrices point to new, * uninitialized data blocks (technically, the Matrix might * recycle its current block if it is the only Matrix viewing * the block, but callers cannot rely on this). It is important * to realize that concrete matrices behave just like views in * this respect. Any views to a concrete Matrix will be * pointing to a different underlying data block than the * concrete Matrix after the concrete Matrix is resized. * * \param rows The number of rows in the resized Matrix. * \param cols The number of columns in the resized Matrix. * \param preserve Whether or not to retain the current contents * of the Matrix. * * \see resize2Match(const Matrix&, bool) * \see detach() * * \throw scythe_alloc_error (Level 1) */ void resize (uint rows, uint cols, bool preserve=false) { if (preserve) { /* TODO Optimize this case. It is rather clunky. */ Matrix tmp(*this); this->referenceNew(rows * cols); Base::resize(rows, cols); uint min_cols = std::min(Base::cols(), tmp.cols()); uint min_rows = std::min(Base::rows(), tmp.rows()); // TODO use iterators here perhaps if (ORDER == Col) { for (uint j = 0; j < min_cols; ++j) for (uint i = 0; i < min_rows; ++i) (*this)(i, j) = tmp(i, j); } else { for (uint i = 0; i < min_rows; ++i) for (uint j = 0; j < min_cols; ++j) (*this)(i, j) = tmp(i, j); } } else { this->referenceNew(rows * cols); Base::resize(rows, cols); } } /*!\brief Resize a Matrix to match another. * * This modifier resizes this Matrix to match the dimensions of * the argument. In all other respects, it behaves just like * resize(). * * \param M The Matrix providing the dimensions to mimic. * \param preserve Whether or not to train the current contents * of the Matrix. * * \see resize(uint, uint, bool) * \see detach() * * \throw scythe_alloc_error (Level 1) */ template inline void resize2Match(const Matrix &M, bool preserve=false) { resize(M.rows(), M.cols(), preserve); } /* Copy this matrix to a new datablock in contiguous storage */ /*! \brief Copy the contents of this Matrix to a new DataBlock. * * The detach method copies the data viewed by this Matrix to a * fresh DataBlock, detaches this Matrix from its old block and * attaches it to the new block. The old DataBlock will be * deallocated if no other matrices view the block after this * one detaches. * * This method can be used to ensure that this Matrix is the * sole viewer of its DataBlock. It also ensures that the * underlying data is stored contiguously in memory. * * \see copy() * \see resize(uint, uint, bool) * * \throw scythe_alloc_error (Level 1) */ inline void detach () { resize2Match(*this, true); } /* Swap operator: sort of a dual copy constructor. Part of the * standard STL container interface. We only support swaps * between matrices of like order and style because things get * hairy otherwise. The behavior of this for concrete matrices * is a little hairy in any case. * * Matrix<> A, B; * ... // fill in A and B * Matrix v1 = A(_, 1); * A.swap(B); * Matrix v2 = B(_, 1); * * v1 == v2; // evaluates to true * */ /*! \brief Swap this Matrix with another. * * This modifier is much like a dual copy constructor and is * part of the Standard Template Library (STL) * interface for container objects. It is only possible to swap * two matrices of the same matrix_order and matrix_style. When * two matrices are swapped, they trade their underlying * DataBlock and dimensions. This behavior is perfectly natural * for views, but my seem somewhat surprising for concrete * matrices. When two concrete matrices are swapped, any views * that referenced either matrices' DataBlock will reference the * other matrices' DataBlock after the swap. * * \param M - The Matrix to swap with. */ inline void swap (Matrix &M) { Matrix tmp = *this; /* This are just reference() calls, but we do this explicitly * here to avoid throwing errors on the concrete case. While * having a concrete matrix reference another matrix is * generally a bad idea, it is safe when the referenced matrix * is concrete, has the same order, and gets deallocated (or * redirected at another block) like here. */ this->referenceOther(M); this->mimic(M); M.referenceOther(tmp); M.mimic(tmp); } /**** ACCESSORS ****/ /* Accessors that don't access the data itself (that don't rely * on T_type) are in Matrix_base */ /* Are all the elements of this Matrix == 0 */ /*! \brief Returns true if every element in this Matrix equals 0. * * The return value of this method is undefined for null * matrices. * * \see empty() * \see isNull() */ inline bool isZero () const { const_forward_iterator last = end_f(); return (last == std::find_if(begin_f(), last, std::bind1st(std::not_equal_to (), 0))); } /* M(i,j) == 0 when i != j */ /*! \brief Returns true if this Matrix is square and its * off-diagonal elements are all 0. * * The return value of this method is undefined for null * matrices. * * \see isSquare() * \see isIdentity() * \see isLowerTriangular() * \see isUpperTriangular() */ inline bool isDiagonal() const { if (! Base::isSquare()) return false; /* Always travel in order. It would be nice to use iterators * here, but we'd need to take views and their iterators are * too slow at the moment. * TODO redo with views and iterators if optimized. */ if (ORDER == Row) { for (uint i = 0; i < Base::rows(); ++i) { for (uint j = 0; j < Base::cols(); ++j) { if (i != j && (*this)(i, j) != 0) return false; } } } else { // ORDER == Col for (uint j = 0; j < Base::cols(); ++j) { for (uint i = 0; i < Base::rows(); ++i) { if (i != j && (*this)(i, j) != 0) return false; } } } return true; } /* M(I, j) == 0 when i!= j and 1 when i == j */ /*! \brief Returns true if this Matrix is diagonal and its * diagonal elements are all 1s. * * The return value of this method is undefined for null * matrices. * * \see isSquare() * \see isDiagonal() * \see isLowerTriangular() * \see isUpperTriangular() */ inline bool isIdentity () const { if (! Base::isSquare()) return false; // TODO redo with views and iterator if optimized if (ORDER == Row) { for (uint i = 0; i < Base::rows(); ++i) { for (uint j = 0; j < Base::cols(); ++j) { if (i != j) { if ((*this)(i,j) != 0) return false; } else if ((*this)(i,j) != 1) return false; } } } else { // ORDER == Col for (uint j = 0; j < Base::rows(); ++j) { for (uint i = 0; i < Base::cols(); ++i) { if (i != j) { if ((*this)(i,j) != 0) return false; } else if ((*this)(i,j) != 1) return false; } } } return true; } /* M(i,j) == 0 when i < j */ /*! \brief Returns true if all of this Matrix's above-diagonal * elements equal 0. * * The return value of this method is undefined for null * matrices. * * \see isDiagonal() * \see isUpperTriangular */ inline bool isLowerTriangular () const { if (! Base::isSquare()) return false; // TODO view+iterator if optimized if (ORDER == Row) { for (uint i = 0; i < Base::rows(); ++i) for (uint j = i + 1; j < Base::cols(); ++j) if ((*this)(i,j) != 0) return false; } else { for (uint j = 0; j < Base::cols(); ++j) for (uint i = 0; i < j; ++i) if ((*this)(i,j) != 0) return false; } return true; } /* M(i,j) == 0 when i > j */ /*! \brief Returns true if all of this Matrix's below-diagonal * elements equal 0. * * The return value of this method is undefined for null * matrices. * * \see isDiagonal() * \see isLowerTriangular */ inline bool isUpperTriangular () const { if (! Base::isSquare()) return false; // TODO view+iterator if optimized if (ORDER == Row) { for (uint i = 0; i < Base::rows(); ++i) for (uint j = 0; j < i; ++j) if ((*this)(i,j) != 0) return false; } else { for (uint j = 0; j < Base::cols(); ++j) for (uint i = j + 1; i < Base::rows(); ++i) if ((*this)(i,j) != 0) return false; } return true; } /*! \brief Returns true if this Matrix is square and has no * inverse. * * \see isSquare() * \see operator~() */ inline bool isSingular() const { if (! Base::isSquare() || Base::isNull()) return false; if ((~(*this)) == (T_type) 0) return true; return false; } /* Square and t(M) = M(inv(M) * t(M) == I */ /*! Returns true if this Matrix is equal to its transpose. * * A Matrix is symmetric when \f$M^T = M\f$ or, equivalently, * \f$M^{-1} M^T = I\f$. In simple terms, this means that the * (i,j)th element of the Matrix is equal to the (j, i)th * element for all i, j. * * \see isSkewSymmetric() */ inline bool isSymmetric () const { if (! Base::isSquare()) return false; // No point in order optimizing here for (uint i = 1; i < Base::rows(); ++i) for (uint j = 0; j < i; ++j) if ((*this)(i, j) != (*this)(j, i)) return false; return true; } /* The matrix is square and t(A) = -A */ /*! Returns true if this Matrix is equal to its negated * transpose. * * A Matrix is skew symmetric when \f$-M^T = M\f$ or, * equivalently, \f$-M^{-1} M^T = I\f$. In simple terms, this * means that the (i, j)th element of the Matrix is equal to the * negation of the (j, i)th element for all i, j. * * \see isSymmetric() */ inline bool isSkewSymmetric () const { if (! Base::isSquare()) return false; // No point in order optimizing here for (uint i = 1; i < Base::rows(); ++i) for (uint j = 0; j < i; ++j) if ((*this)(i, j) != 0 - (*this)(j, i)) return false; return true; } /*! \brief Test Matrix equality. * * This method returns true if all of \a M's elements are equal * to those in this Matrix. To be equal, two matrices must * be of the same dimension. Matrices with differing * matrix_order or matrix_style may equal one another. * * \param M The Matrix to test equality with. * * \see equals(T_type x) const * \see operator==(const Matrix& lhs, const Matrix& rhs) */ template inline bool equals(const Matrix& M) const { if (data_ == M.getArray() && STYLE == Concrete && S == Concrete) return true; else if (data_ == M.getArray() && Base::rows() == M.rows() && Base::cols() == M.cols()) { return true; } else if (this->isNull() && M.isNull()) return true; else if (Base::rows() != M.rows() || Base::cols() != M.cols()) return false; return std::equal(begin_f(), end_f(), M.template begin_f()); } /*! \brief Test Matrix equality. * * This method returns true if all of the elements in this * Matrix are equal to \a x. * * \param x The scalar value to test equality with. * * \see equals(const Matrix& M) const * \see operator==(const Matrix& lhs, const Matrix& rhs) */ inline bool equals(T_type x) const { const_forward_iterator last = end_f(); return (last == std::find_if(begin_f(), last, std::bind1st(std::not_equal_to (), x))); } /**** OTHER UTILITIES ****/ /*! \brief Returns a pointer to this Matrix's internal data * array. * * This method returns a pointer to the internal data array * contained within the DataBlock that this Matrix references. * * \warning It is generally a bad idea to use this method. We * provide it only for convenience. Please note that, when * working with views, the internal data array may not even be * stored in this Matrix's matrix_order. Furthermore, data * encapsulated by a view will generally not be contiguous * within the data array. It this is a concrete Matrix, * getArray() will always return a pointer to a data array * ordered like this Matrix and in contiguous storage. */ inline T_type* getArray () const { return data_; } /*! \brief Saves a Matrix to disk. * * This method writes the contents of this Matrix to the file * specified by \a path. The user can control file overwriting * with \a flag. The parameter \a header controls the output * style. When one sets \a header to true the Matrix is written * as a space-separated list of values, with the number of rows * and columns placed in the first two positions in the list. * If header is set to false, the file is written as a space * separated ascii block, with end-of-lines indicating ends of * rows. The Matrix is always written out in row-major order. * * \param path The name of the file to write. * \param flag Overwrite flag taking values 'a': append, 'o': * overwrite, or 'n': do not replace. * \param header Boolean value indicating whether to write as a * flat list with dimension header or as a rectangular block. * * \see Matrix(const std::string& file) * \see operator>>(std::istream& is, Matrix& M) * * \throw scythe_invalid_arg (Level 0) * \throw scythe_file_error (Level 0) */ inline void save (const std::string& path, const char flag = 'n', const bool header = false) const { std::ofstream out; if (flag == 'n') { std::fstream temp(path.c_str(), std::ios::in); if (! temp) out.open(path.c_str(), std::ios::out); else { temp.close(); SCYTHE_THROW(scythe_file_error, "Cannot overwrite file " << path << " when flag = n"); } } else if (flag == 'o') out.open(path.c_str(), std::ios::out | std::ios::trunc); else if (flag == 'a') out.open(path.c_str(), std::ios::out | std::ios::app); else SCYTHE_THROW(scythe_invalid_arg, "Invalid flag: " << flag); if (! out) SCYTHE_THROW(scythe_file_error, "Could not open file " << path); if (header) { out << Base::rows() << " " << Base::cols(); for (uint i = 0; i < Base::size(); ++i) out << " " << (*this)[i]; out << std::endl; } else { for (uint i = 0; i < Base::rows(); ++i) { for (uint j = 0; j < Base::cols(); ++j) out << (*this)(i,j) << " "; out << "\n"; } } out.close(); } /**** ITERATOR FACTORIES ****/ /* TODO Write some cpp macro code to reduce this to something * manageable. */ /* Random Access Iterator Factories */ /* Generalized versions */ /*! \brief Get an iterator pointing to the start of a Matrix. * * This is a factory that returns a random_access_iterator that * points to the first element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline matrix_random_access_iterator begin () { return matrix_random_access_iterator(*this); } /*! \brief Get an iterator pointing to the start of a Matrix. * * This is a factory that returns a * const_random_access_iterator that * points to the first element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline const_matrix_random_access_iterator begin() const { return const_matrix_random_access_iterator (*this); } /*! \brief Get an iterator pointing to the end of a Matrix. * * This is a factory that returns a * matrix_random_access_iterator that * points to just after the last element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline matrix_random_access_iterator end () { return (begin() + Base::size()); } /*! \brief Get an iterator pointing to the end of a Matrix. * * This is a factory that returns an * const_matrix_random_access_iterator that * points to just after the last element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline const_matrix_random_access_iterator end () const { return (begin() + Base::size()); } /*! \brief Get a reverse iterator pointing to the end of a Matrix. * * This is a factory that returns a reverse * matrix_random_access_iterator that * points to the last element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline std::reverse_iterator > rbegin() { return std::reverse_iterator > (end()); } /*! \brief Get a reverse iterator pointing to the end of a Matrix. * * This is a factory that returns a reverse * const_matrix_random_access_iterator that points to the last * element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline std::reverse_iterator > rbegin() const { return std::reverse_iterator > (end()); } /*! \brief Get a reverse iterator pointing to the start of a Matrix. * * This is a factory that returns a reverse * matrix_random_access_iterator * that points to the just before the first element in the given * Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline std::reverse_iterator > rend() { return std::reverse_iterator > (begin()); } /*! \brief Get a reverse iterator pointing to the start of a Matrix. * * This is a factory that returns a reverse * const_matrix_random_access_iterator that points to the just * before the first element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline std::reverse_iterator > rend() const { return std::reverse_iterator > (begin()); } /* Specific versions --- the generalized versions force you * choose the ordering explicitly. These definitions set up * in-order iteration as a default */ /*! \brief Get an iterator pointing to the start of a Matrix. * * This is a factory that returns a Matrix::iterator that * points to the first element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline iterator begin () { return iterator(*this); } /*! \brief Get an iterator pointing to the start of a Matrix. * * This is a factory that returns a Matrix::const_iterator that * points to the first element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline const_iterator begin() const { return const_iterator (*this); } /*! \brief Get an iterator pointing to the end of a Matrix. * * This is a factory that returns an Matrix::iterator that * points to just after the last element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline iterator end () { return (begin() + Base::size()); } /*! \brief Get an iterator pointing to the end of a Matrix. * * This is a factory that returns an Matrix::const_iterator that * points to just after the last element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline const_iterator end () const { return (begin() + Base::size()); } /*! \brief Get a reverse iterator pointing to the end of a Matrix. * * This is a factory that returns a Matrix::reverse_iterator that * points to the last element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline reverse_iterator rbegin() { return reverse_iterator (end()); } /*! \brief Get a reverse iterator pointing to the end of a Matrix. * * This is a factory that returns a * Matrix::const_reverse_iterator that points to the last * element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline const_reverse_iterator rbegin() const { return const_reverse_iterator (end()); } /*! \brief Get a reverse iterator pointing to the start of a Matrix. * * This is a factory that returns a Matrix::reverse_iterator * that points to the just before the first element in the given * Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline reverse_iterator rend() { return reverse_iterator (begin()); } /*! \brief Get a reverse iterator pointing to the start of a Matrix. * * This is a factory that returns a Matrix::const_reverse_iterator * that points to the just before the first element in the given * Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline const_reverse_iterator rend() const { return const_reverse_iterator (begin()); } /* Forward Iterator Factories */ /* Generalized versions */ /*! \brief Get an iterator pointing to the start of a Matrix. * * This is a factory that returns a matrix_forward_iterator that * points to the first element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline matrix_forward_iterator begin_f () { return matrix_forward_iterator(*this); } /*! \brief Get an iterator pointing to the start of a Matrix. * * This is a factory that returns a * const_matrix_forward_iterator that * points to the first element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline const_matrix_forward_iterator begin_f () const { return const_matrix_forward_iterator (*this); } /*! \brief Get an iterator pointing to the end of a Matrix. * * This is a factory that returns an matrix_forward_iterator that * points to just after the last element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline matrix_forward_iterator end_f () { return (begin_f().set_end()); } /*! \brief Get an iterator pointing to the end of a Matrix. * * This is a factory that returns an * const_matrix_forward_iterator that points to just after the * last element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline const_matrix_forward_iterator end_f () const { return (begin_f().set_end()); } /* Default Versions */ /*! \brief Get an iterator pointing to the start of a Matrix. * * This is a factory that returns a Matrix::forward_iterator that * points to the first element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline forward_iterator begin_f () { return forward_iterator(*this); } /*! \brief Get an iterator pointing to the start of a Matrix. * * This is a factory that returns a * Matrix::const_forward_iterator that points to the first * element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline const_forward_iterator begin_f () const { return const_forward_iterator (*this); } /*! \brief Get an iterator pointing to the end of a Matrix. * * This is a factory that returns an Matrix::forward_iterator that * points to just after the last element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline forward_iterator end_f () { return (begin_f().set_end()); } /*! \brief Get an iterator pointing to the end of a Matrix. * * This is a factory that returns an * Matrix::const_forward_iterator that points to just after the * last element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline const_forward_iterator end_f () const { return (begin_f().set_end()); } /* Bidirectional Iterator Factories */ /* Generalized versions */ /*! \brief Get an iterator pointing to the start of a Matrix. * * This is a factory that returns a * matrix_bidirectional_iterator that * points to the first element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline matrix_bidirectional_iterator begin_bd () { return matrix_bidirectional_iterator(*this); } /*! \brief Get an iterator pointing to the start of a Matrix. * * This is a factory that returns a * const_matrix_bidirectional_iterator that points to the first * element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline const_matrix_bidirectional_iterator begin_bd () const { return const_matrix_bidirectional_iterator (*this); } /*! \brief Get an iterator pointing to the end of a Matrix. * * This is a factory that returns an * matrix_bidirectional_iterator that points to just after the * last element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline matrix_bidirectional_iterator end_bd () { return (begin_bd().set_end()); } /*! \brief Get an iterator pointing to the end of a Matrix. * * This is a factory that returns an * const_matrix_bidirectional_iterator that points to just after * the last element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline const_matrix_bidirectional_iterator end_bd () const { return (begin_bd().set_end()); } /*! \brief Get a reverse iterator pointing to the end of a Matrix. * * This is a factory that returns a reverse * matrix_bidirectional_iterator that points to the last element * in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline std::reverse_iterator > rbegin_bd () { return std::reverse_iterator > (end_bd()); } /*! \brief Get a reverse iterator pointing to the end of a Matrix. * * This is a factory that returns a reverse * const_matrix_bidirectional_iterator that points to the last * element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline std::reverse_iterator > rbegin_bd () const { return std::reverse_iterator > (end_bd()); } /*! \brief Get a reverse iterator pointing to the start of a Matrix. * * This is a factory that returns a reverse * matrix_bidirectional_iterator that points to the just before * the first element in the given * Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline std::reverse_iterator > rend_bd () { return std::reverse_iterator > (begin_bd()); } /*! \brief Get a reverse iterator pointing to the start of a Matrix. * * This is a factory that returns a reverse * const_matrix_bidirectional_iterator that points to the just * before the first element in the given Matrix. * * This is a general template of this function. It allows the * user to generate iterators that iterate over the given Matrix * in any order through an explicit template instantiation. */ template inline std::reverse_iterator > rend_bd () const { return std::reverse_iterator > (begin_bd()); } /* Specific versions --- the generalized versions force you * choose the ordering explicitly. These definitions set up * in-order iteration as a default */ /*! \brief Get an iterator pointing to the start of a Matrix. * * This is a factory that returns a * Matrix::bidirectional_iterator that points to the first * element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline bidirectional_iterator begin_bd () { return bidirectional_iterator(*this); } /*! \brief Get an iterator pointing to the start of a Matrix. * * This is a factory that returns a * Matrix::const_bidirectional_iterator that points to the first * element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline const_bidirectional_iterator begin_bd() const { return const_bidirectional_iterator (*this); } /*! \brief Get an iterator pointing to the end of a Matrix. * * This is a factory that returns an * Matrix::bidirectional_iterator that points to just after the * last element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline bidirectional_iterator end_bd () { return (begin_bd().set_end()); } /*! \brief Get an iterator pointing to the end of a Matrix. * * This is a factory that returns an Matrix::const_bidirectional * iterator that points to just after the last element in the * given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline const_bidirectional_iterator end_bd () const { return (begin_bd().set_end()); } /*! \brief Get a reverse iterator pointing to the end of a Matrix. * * This is a factory that returns a * Matrix::reverse_bidirectional_iterator that points to the * last element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline reverse_bidirectional_iterator rbegin_bd() { return reverse_bidirectional_iterator (end_bd()); } /*! \brief Get a reverse iterator pointing to the end of a Matrix. * * This is a factory that returns a * Matrix::const_reverse_bidirectional_iterator that points to * the last element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline const_reverse_bidirectional_iterator rbegin_bd () const { return const_reverse_bidirectional_iterator (end_bd()); } /*! \brief Get a reverse iterator pointing to the start of a Matrix. * * This is a factory that returns a * Matrix::reverse_bidirectional_iterator that points to the * just before the first element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline reverse_bidirectional_iterator rend_bd () { return reverse_bidirectional_iterator (begin_bd()); } /*! \brief Get a reverse iterator pointing to the start of a Matrix. * * This is a factory that returns a * Matrix::const_reverse_bidirectional_iterator that points to * the just before the first element in the given Matrix. * * This is the default template of this function. It allows the * user to generate iterators of a given Matrix without * explicitly stating the order of iteration. The iterator * returned by this function always iterates in the same order * as the given Matrix' matrix_order. */ inline const_reverse_iterator rend_bd () const { return const_reverse_bidirectiona_iterator (begin_bd()); } protected: /**** INSTANCE VARIABLES ****/ /* I know the point of C++ is to force you to write 20 times * more code than should be necessary but "using" inherited ivs * is just stupid. */ using DBRef::data_; // refer to inherited data pointer directly using Base::rows_; // " # of rows directly using Base::cols_; // " # of cols directly }; // end class Matrix /**** EXTERNAL OPERATORS ****/ /* External operators include a range of binary matrix operations * such as tests for equality, and arithmetic. Style * (concrete/view) of the returned matrix is that of the left hand * side parameter by default * * There is also a question of the ordering of the returned matrix. * We adopt the convention of returning a matrix ordered like that * of the left hand side argument, by default. * * Whenever there is only one matrix argument (lhs is scalar) we use * its order and style as the default. * * A general template version of each operator also exists and users * can coerce the return type to whatever they prefer using some * ugly syntax; ex: * * Matrix<> A; ... Matrix B = operator* * (A, A); * * In general, the matrix class copy constructor will quietly * convert whatever matrix template is returned to the type of the * matrix it is being copied into on return, but one might want to * specify the type for objects that only exist for a second (ex: * (operator*(A, A)).begin()). Also, note that the * fact that we return concrete matrices by default does not * preclude the user from taking advantage of fast view copies. It * is the template type of the object being copy-constructed that * matters---in terms of underlying implementation all matrices are * views, concrete matrices just maintain a particular policy. * * TODO Consider the best type for scalar args to these functions. * For the most part, these will be primitives---doubles mostly. * Passing these by reference is probably less efficient than * passing by value. But, for user-defined types pass-by-reference * might be the way to go and the cost in this case will be much * higher than the value-reference trade-off for primitives. Right * now we use pass-by-reference but we might reconsider... */ /**** ARITHMETIC OPERATORS ****/ /* These macros provide templates for the basic definitions required * for all of the binary operators. Each operator requires 6 * definitions. First, a general matrix definition must be * provided. This definition can return a matrix of a different * style and order than its arguments but can only be called if its * template type is explicitly specified. The actual logic of the * operator should be specified within this function. The macros * provide definitions for the other 5 required templates, one * default matrix by matrix, general matrix by scalar, default * matrix by scalar, general scalar by matrix, default scalar by * matrix. The default versions call the more general versions with * such that they will return concrete matrices with order equal to * the left-hand (or only) matrix passed to the default version. * */ #define SCYTHE_BINARY_OPERATOR_DMM(OP) \ template \ inline Matrix \ OP (const Matrix& lhs, \ const Matrix& rhs) \ { \ return OP (lhs, rhs); \ } #define SCYTHE_BINARY_OPERATOR_GMS(OP) \ template \ inline Matrix \ OP (const Matrix& lhs, \ const typename Matrix::ttype &rhs) \ { \ return (OP \ (lhs, Matrix(rhs))); \ } #define SCYTHE_BINARY_OPERATOR_DMS(OP) \ template \ inline Matrix \ OP (const Matrix& lhs, \ const typename Matrix::ttype &rhs) \ { \ return (OP (lhs, rhs)); \ } #define SCYTHE_BINARY_OPERATOR_GSM(OP) \ template \ inline Matrix \ OP (const typename Matrix::ttype &lhs, \ const Matrix& rhs) \ { \ return (OP \ (Matrix(lhs), rhs)); \ } #define SCYTHE_BINARY_OPERATOR_DSM(OP) \ template \ inline Matrix \ OP (const typename Matrix::ttype &lhs, \ const Matrix& rhs) \ { \ return (OP (lhs, rhs)); \ } #define SCYTHE_BINARY_OPERATOR_DEFS(OP) \ SCYTHE_BINARY_OPERATOR_DMM(OP) \ SCYTHE_BINARY_OPERATOR_GMS(OP) \ SCYTHE_BINARY_OPERATOR_DMS(OP) \ SCYTHE_BINARY_OPERATOR_GSM(OP) \ SCYTHE_BINARY_OPERATOR_DSM(OP) /* Matrix multiplication */ /* General template version. Must be called with operator*<> syntax */ /* We provide two symmetric algorithms for matrix multiplication, * one for col-major and the other for row-major matrices. They are * designed to minimize cache misses.The decision is based on the * return type of the template so, when using matrices of multiple * orders, this can get ugly. These optimizations only really start * paying dividends as matrices get big, because cache misses are * rare with smaller matrices. */ /*! \brief Multiply two matrices. * * This operator multiplies the matrices \a lhs and \a rhs together, * returning the result in a new Matrix object. This operator is * overloaded to provide both Matrix by Matrix multiplication and * Matrix by scalar multiplication. In the latter case, the scalar * on the left- or right-hand side of the operator is promoted to a * 1x1 Matrix and then multiplied with the Matrix on the other side * of the operator. In either case, the matrices must conform; that * is, the number of columns in the left-hand side argument must * equal the number of rows in the right-hand side argument. The * one exception is when one matrix is a scalar. In this case we * allow Matrix by scalar multiplication with the "*" operator that * is comparable to element-by-element multiplication of a Matrix by * a scalar value, for convenience. * * In addition, we define multiple templates of the overloaded * operator to provide maximal flexibility when working with * matrices with differing matrix_order and/or matrix_style. Each * version of the overloaded operator (Matrix by Matrix, scalar by * Matrix, and Matrix by scalar) provides both a default and * general behavior, using templates. By default, the returned * Matrix is concrete and has the same matrix_order as the * left-hand (or only) Matrix argument. Alternatively, one may * coerce the matrix_order and matrix_style of the returned Matrix * to preferred values by using the full template declaration of * the operator. * * Scythe will use LAPACK/BLAS routines to multiply concrete * column-major matrices of double-precision floating point * numbers if LAPACK/BLAS is available and you compile your * program with the SCYTHE_LAPACK flag enabled. * * \param lhs The left-hand-side Matrix or scalar. * \param rhs The right-hand-side Matrix or scalar. * * \see operator*(const Matrix& lhs, const Matrix& rhs) * \see operator*(const Matrix& lhs, const Matrix& rhs) * \see operator*(const Matrix& lhs, const T_type& rhs) * \see operator*(const Matrix& lhs, const T_type& rhs) * \see operator*(const T_type& lhs, const Matrix& rhs) * \see operator*(const T_type& lhs, const Matrix& rhs) * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ template inline Matrix operator* (const Matrix& lhs, const Matrix& rhs) { if (lhs.size() == 1 || rhs.size() == 1) return (lhs % rhs); SCYTHE_CHECK_10 (lhs.cols() != rhs.rows(), scythe_conformation_error, "Matrices with dimensions (" << lhs.rows() << ", " << lhs.cols() << ") and (" << rhs.rows() << ", " << rhs.cols() << ") are not multiplication-conformable"); Matrix result (lhs.rows(), rhs.cols(), false); T_type tmp; if (ORDER == Col) { // col-major optimized for (uint j = 0; j < rhs.cols(); ++j) { for (uint i = 0; i < lhs.rows(); ++i) result(i, j) = (T_type) 0; for (uint l = 0; l < lhs.cols(); ++l) { tmp = rhs(l, j); for (uint i = 0; i < lhs.rows(); ++i) result(i, j) += tmp * lhs(i, l); } } } else { // row-major optimized for (uint i = 0; i < lhs.rows(); ++i) { for (uint j = 0; j < rhs.cols(); ++j) result(i, j) = (T_type) 0; for (uint l = 0; l < rhs.rows(); ++l) { tmp = lhs(i, l); for (uint j = 0; j < rhs.cols(); ++j) result(i, j) += tmp * rhs(l,j); } } } SCYTHE_VIEW_RETURN(T_type, ORDER, STYLE, result) } SCYTHE_BINARY_OPERATOR_DEFS(operator*) /*! \brief Kronecker multiply two matrices. * * This functions computes the Kronecker product of two Matrix * objects. This function is overloaded to provide both Matrix by * Matrix addition and Matrix by scalar addition. In the former * case, the dimensions of the two matrices must be the same. * * In addition, we define multiple templates of the overloaded * operator to provide maximal flexibility when working with * matrices with differing matrix_order and/or matrix_style. Each * version of the overloaded operator (Matrix by Matrix, scalar by * Matrix, and Matrix by scalar) provides both a default and * general behavior, using templates. By default, the returned * Matrix is concrete and has the same matrix_order as the * left-hand (or only) Matrix argument. Alternatively, one may * coerce the matrix_order and matrix_style of the returned Matrix * to preferred values by using the full template declaration of * the operator. * * \param lhs The left-hand-side Matrix or scalar. * \param rhs The right-hand-side Matrix or scalar. * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ template inline Matrix kronecker (const Matrix& lhs, const Matrix& rhs) { Matrix res = lhs; res.kronecker(rhs); return (res); } SCYTHE_BINARY_OPERATOR_DEFS(kronecker) /* Macro definition for general return type templates of standard * binary operators (this handles, +, -, %, /, but not *) */ #define SCYTHE_GENERAL_BINARY_OPERATOR(OP,FUNCTOR) \ template \ inline Matrix \ OP (const Matrix& lhs, \ const Matrix& rhs) \ { \ SCYTHE_CHECK_10(lhs.size() != 1 && rhs.size() != 1 && \ (lhs.rows() != rhs.rows() || lhs.cols() != rhs.cols()), \ scythe_conformation_error, \ "Matrices with dimensions (" << lhs.rows() \ << ", " << lhs.cols() \ << ") and (" << rhs.rows() << ", " << rhs.cols() \ << ") are not conformable"); \ \ if (lhs.size() == 1) { \ Matrix res(rhs.rows(),rhs.cols(),false); \ std::transform(rhs.begin_f(), rhs.end_f(), \ res.template begin_f(), \ std::bind1st(FUNCTOR (), lhs(0))); \ SCYTHE_VIEW_RETURN(T_type, ORDER, STYLE, res) \ } \ \ Matrix res(lhs.rows(), lhs.cols(), false); \ \ if (rhs.size() == 1) { \ std::transform(lhs.begin_f(), lhs.end_f(), \ res.template begin_f (), \ std::bind2nd(FUNCTOR (), rhs(0))); \ } else { \ std::transform(lhs.begin_f(), lhs.end_f(), \ rhs.template begin_f(), \ res.template begin_f(), \ FUNCTOR ()); \ } \ \ SCYTHE_VIEW_RETURN(T_type, ORDER, STYLE, res) \ } /* Addition operators */ /*! \fn operator+(const Matrix&lhs, * const Matrix&rhs) * * \brief Add two matrices. * * This operator adds the matrices \a lhs and \a rhs together, * returning the result in a new Matrix object. This operator is * overloaded to provide both Matrix by Matrix addition and * Matrix by scalar addition. In the former case, the dimensions of * the two matrices must be the same. * * In addition, we define multiple templates of the overloaded * operator to provide maximal flexibility when working with * matrices with differing matrix_order and/or matrix_style. Each * version of the overloaded operator (Matrix by Matrix, scalar by * Matrix, and Matrix by scalar) provides both a default and * general behavior, using templates. By default, the returned * Matrix is concrete and has the same matrix_order as the * left-hand (or only) Matrix argument. Alternatively, one may * coerce the matrix_order and matrix_style of the returned Matrix * to preferred values by using the full template declaration of * the operator. * * \param lhs The left-hand-side Matrix or scalar. * \param rhs The right-hand-side Matrix or scalar. * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ SCYTHE_GENERAL_BINARY_OPERATOR (operator+, std::plus) SCYTHE_BINARY_OPERATOR_DEFS (operator+) /* Subtraction operators */ /*! \fn operator-(const Matrix&lhs, * const Matrix&rhs) * * \brief Subtract two matrices. * * This operator subtracts the Matrix \a rhs from \a lhs, returning * the result in a new Matrix object. This operator is overloaded * to provide both Matrix by Matrix subtraction and Matrix by scalar * subtraction. In the former case, the dimensions of the two * matrices must be the same. * * In addition, we define multiple templates of the overloaded * operator to provide maximal flexibility when working with * matrices with differing matrix_order and/or matrix_style. Each * version of the overloaded operator (Matrix by Matrix, scalar by * Matrix, and Matrix by scalar) provides both a default and * general behavior, using templates. By default, the returned * Matrix is concrete and has the same matrix_order as the * left-hand (or only) Matrix argument. Alternatively, one may * coerce the matrix_order and matrix_style of the returned Matrix * to preferred values by using the full template declaration of * the operator. * * \param lhs The left-hand-side Matrix or scalar. * \param rhs The right-hand-side Matrix or scalar. * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ SCYTHE_GENERAL_BINARY_OPERATOR (operator-, std::minus) SCYTHE_BINARY_OPERATOR_DEFS (operator-) /* Element-by-element multiplication operators */ /*! \fn operator%(const Matrix&lhs, * const Matrix&rhs) * * \brief Element multiply two matrices. * * This operator multiplies the elements of the matrices \a lhs and * \a rhs together, returning the result in a new Matrix object. * This operator is overloaded to provide both Matrix by Matrix * element-wise multiplication and Matrix by scalar element-wise * multiplication. In the former case, the dimensions of the two * matrices must be the same. * * In addition, we define multiple templates of the overloaded * operator to provide maximal flexibility when working with * matrices with differing matrix_order and/or matrix_style. Each * version of the overloaded operator (Matrix by Matrix, scalar by * Matrix, and Matrix by scalar) provides both a default and * general behavior, using templates. By default, the returned * Matrix is concrete and has the same matrix_order as the * left-hand (or only) Matrix argument. Alternatively, one may * coerce the matrix_order and matrix_style of the returned Matrix * to preferred values by using the full template declaration of * the operator. * * \param lhs The left-hand-side Matrix or scalar. * \param rhs The right-hand-side Matrix or scalar. * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ SCYTHE_GENERAL_BINARY_OPERATOR (operator%, std::multiplies) SCYTHE_BINARY_OPERATOR_DEFS(operator%) /* Element-by-element division */ /*! \fn operator/(const Matrix&lhs, * const Matrix&rhs) * * \brief Divide two matrices. * * This operator divides the Matrix \a lhs from \a rhs, * returning the result in a new Matrix object. This operator is * overloaded to provide both Matrix by Matrix division and * Matrix by scalar division. In the former case, the dimensions of * the two matrices must be the same. * * In addition, we define multiple templates of the overloaded * operator to provide maximal flexibility when working with * matrices with differing matrix_order and/or matrix_style. Each * version of the overloaded operator (Matrix by Matrix, scalar by * Matrix, and Matrix by scalar) provides both a default and * general behavior, using templates. By default, the returned * Matrix is concrete and has the same matrix_order as the * left-hand (or only) Matrix argument. Alternatively, one may * coerce the matrix_order and matrix_style of the returned Matrix * to preferred values by using the full template declaration of * the operator. * * \param lhs The left-hand-side Matrix or scalar. * \param rhs The right-hand-side Matrix or scalar. * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ SCYTHE_GENERAL_BINARY_OPERATOR (operator/, std::divides) SCYTHE_BINARY_OPERATOR_DEFS (operator/) /* Element-by-element exponentiation */ /*! \fn operator^(const Matrix&lhs, * const Matrix&rhs) * * \brief Exponentiate one Matrix by another. * * This operator exponentiates the elements of Matrix \a lhs by * those in \a rhs, returning the result in a new Matrix object. * This operator is overloaded to provide both Matrix by Matrix * exponentiation and Matrix by scalar exponentiation. In the * former case, the dimensions of the two matrices must be the same. * * In addition, we define multiple templates of the overloaded * operator to provide maximal flexibility when working with * matrices with differing matrix_order and/or matrix_style. Each * version of the overloaded operator (Matrix by Matrix, scalar by * Matrix, and Matrix by scalar) provides both a default and * general behavior, using templates. By default, the returned * Matrix is concrete and has the same matrix_order as the * left-hand (or only) Matrix argument. Alternatively, one may * coerce the matrix_order and matrix_style of the returned Matrix * to preferred values by using the full template declaration of * the operator. * * \param lhs The left-hand-side Matrix or scalar. * \param rhs The right-hand-side Matrix or scalar. * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ SCYTHE_GENERAL_BINARY_OPERATOR (operator^, exponentiate) SCYTHE_BINARY_OPERATOR_DEFS (operator^) /* Negation operators */ // General return type version /*! \brief Negate a Matrix. * * This unary operator returns the negation of \a M. This version * of the operator is a general template and can provide a Matrix * with any matrix_order or matrix_style as its return value. * * We also provide an overloaded default template that returns a * concrete matrix with the same matrix_order as \a M. * * \param M The Matrix to negate. * * \throw scythe_alloc_error (Level 1) */ template inline Matrix operator- (const Matrix& M) { Matrix result(M.rows(), M.cols(), false); std::transform(M.template begin_f(), M.template end_f(), result.template begin_f(), std::negate ()); SCYTHE_VIEW_RETURN(T_type, R_ORDER, R_STYLE, result) } // Default return type version template inline Matrix operator- (const Matrix& M) { return operator- (M); } /* Unary not operators */ /*! \brief Logically NOT a Matrix. * * This unary operator returns NOT \a M. This version of the * operator is a general template and can provide a boolean Matrix * with any matrix_order or matrix_style as its return value. * * We also provide a default template for this function that returns * a concrete boolean with the same matrix_order as \a M. * * \param M The Matrix to NOT. * * \see operator!(const Matrix& M) * * \throw scythe_alloc_error (Level 1) */ template inline Matrix operator! (const Matrix& M) { Matrix result(M.rows(), M.cols(), false); std::transform(M.template begin_f(), M.template end_f(), result.template begin_f(), std::logical_not ()); SCYTHE_VIEW_RETURN(T_type, R_ORDER, R_STYLE, result) } // Default return type version template inline Matrix operator! (const Matrix& M) { return (operator! (M)); } /**** COMPARISON OPERATORS ****/ /* These macros are analogous to those above, except they return * only boolean matrices and use slightly different template * parameter orderings. Kind of redundant, but less confusing than * making omnibus macros that handle both cases. */ #define SCYTHE_GENERAL_BINARY_BOOL_OPERATOR(OP,FUNCTOR) \ template \ inline Matrix \ OP (const Matrix& lhs, \ const Matrix& rhs) \ { \ SCYTHE_CHECK_10(lhs.size() != 1 && rhs.size() != 1 && \ (lhs.rows() != rhs.rows() || lhs.cols() != rhs.cols()), \ scythe_conformation_error, \ "Matrices with dimensions (" << lhs.rows() \ << ", " << lhs.cols() \ << ") and (" << rhs.rows() << ", " << rhs.cols() \ << ") are not conformable"); \ \ if (lhs.size() == 1) { \ Matrix res(rhs.rows(),rhs.cols(),false); \ std::transform(rhs.begin_f(), rhs.end_f(), \ res.template begin_f(), \ std::bind1st(FUNCTOR (), lhs(0))); \ SCYTHE_VIEW_RETURN(T_type, ORDER, STYLE, res) \ } \ \ Matrix res(lhs.rows(), lhs.cols(), false); \ \ if (rhs.size() == 1) { \ std::transform(lhs.begin_f(), lhs.end_f(), \ res.template begin_f (), \ std::bind2nd(FUNCTOR (), rhs(0))); \ } else { \ std::transform(lhs.begin_f(), lhs.end_f(), \ rhs.template begin_f(), \ res.template begin_f(), \ FUNCTOR ()); \ } \ \ SCYTHE_VIEW_RETURN(T_type, ORDER, STYLE, res) \ } #define SCYTHE_BINARY_BOOL_OPERATOR_DMM(OP) \ template \ inline Matrix \ OP (const Matrix& lhs, \ const Matrix& rhs) \ { \ return OP (lhs, rhs); \ } #define SCYTHE_BINARY_BOOL_OPERATOR_GMS(OP) \ template \ inline Matrix \ OP (const Matrix& lhs, \ const typename Matrix::ttype &rhs) \ { \ return (OP \ (lhs, Matrix(rhs))); \ } #define SCYTHE_BINARY_BOOL_OPERATOR_DMS(OP) \ template \ inline Matrix \ OP (const Matrix& lhs, \ const typename Matrix::ttype &rhs) \ { \ return (OP (lhs, rhs)); \ } #define SCYTHE_BINARY_BOOL_OPERATOR_GSM(OP) \ template \ inline Matrix \ OP (const typename Matrix::ttype &lhs, \ const Matrix& rhs) \ { \ return (OP \ (Matrix(lhs), rhs)); \ } #define SCYTHE_BINARY_BOOL_OPERATOR_DSM(OP) \ template \ inline Matrix \ OP (const typename Matrix::ttype &lhs, \ const Matrix& rhs) \ { \ return (OP (lhs, rhs)); \ } #define SCYTHE_BINARY_BOOL_OPERATOR_DEFS(OP) \ SCYTHE_BINARY_BOOL_OPERATOR_DMM(OP) \ SCYTHE_BINARY_BOOL_OPERATOR_GMS(OP) \ SCYTHE_BINARY_BOOL_OPERATOR_DMS(OP) \ SCYTHE_BINARY_BOOL_OPERATOR_GSM(OP) \ SCYTHE_BINARY_BOOL_OPERATOR_DSM(OP) /* Element-wise Equality operator * See equals() method for straight equality checks */ /*! \fn operator==(const Matrix&lhs, * const Matrix&rhs) * * \brief Test Matrix equality. * * This operator compares the elements of \a lhs and \a rhs and * returns a boolean Matrix of true and false values, indicating * whether each pair of compared elements is equal. This operator * is overloaded to provide both Matrix by Matrix equality testing * and Matrix by scalar equality testing. In the former case, the * dimensions of the two matrices must be the same. The boolean * Matrix returned has the same dimensions as \a lhs and \a rhs, or * matches the dimensionality of the larger Matrix object when one * of the two parameters is a scalar or a 1x1 Matrix. * * In addition, we define multiple templates of the overloaded * operator to provide maximal flexibility when working with * matrices with differing matrix_order and/or matrix_style. Each * version of the overloaded operator (Matrix by Matrix, scalar by * Matrix, and Matrix by scalar) provides both a default and * general behavior, using templates. By default, the returned * Matrix is concrete and has the same matrix_order as the * left-hand (or only) Matrix argument. Alternatively, one may * coerce the matrix_order and matrix_style of the returned Matrix * to preferred values by using the full template declaration of * the operator. * * \param lhs The left-hand-side Matrix or scalar. * \param rhs The right-hand-side Matrix or scalar. * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ SCYTHE_GENERAL_BINARY_BOOL_OPERATOR (operator==, std::equal_to) SCYTHE_BINARY_BOOL_OPERATOR_DEFS (operator==) /*! \fn operator!=(const Matrix&lhs, * const Matrix&rhs) * * \brief Test Matrix equality. * * This operator compares the elements of \a lhs and \a rhs and * returns a boolean Matrix of true and false values, indicating * whether each pair of compared elements is not equal. This operator * is overloaded to provide both Matrix by Matrix inequality testing * and Matrix by scalar inequality testing. In the former case, the * dimensions of the two matrices must be the same. The boolean * Matrix returned has the same dimensions as \a lhs and \a rhs, or * matches the dimensionality of the larger Matrix object when one * of the two parameters is a scalar or a 1x1 Matrix. * * In addition, we define multiple templates of the overloaded * operator to provide maximal flexibility when working with * matrices with differing matrix_order and/or matrix_style. Each * version of the overloaded operator (Matrix by Matrix, scalar by * Matrix, and Matrix by scalar) provides both a default and * general behavior, using templates. By default, the returned * Matrix is concrete and has the same matrix_order as the * left-hand (or only) Matrix argument. Alternatively, one may * coerce the matrix_order and matrix_style of the returned Matrix * to preferred values by using the full template declaration of * the operator. * * \param lhs The left-hand-side Matrix or scalar. * \param rhs The right-hand-side Matrix or scalar. * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ SCYTHE_GENERAL_BINARY_BOOL_OPERATOR (operator!=, std::not_equal_to) SCYTHE_BINARY_BOOL_OPERATOR_DEFS (operator!=) /*! \fn operator<(const Matrix&lhs, * const Matrix&rhs) * * \brief Test Matrix inequality. * * This operator compares the elements of \a lhs and \a rhs and * returns a boolean Matrix of true and false values, indicating * whether each of the left-hand side elements is less than its * corresponding right hand side element. This operator is * overloaded to provide both Matrix by Matrix inequality testing * and Matrix by scalar inequality testing. In the former case, the * dimensions of the two matrices must be the same. The boolean * Matrix returned has the same dimensions as \a lhs and \a rhs, or * matches the dimensionality of the larger Matrix object when one * of the two parameters is a scalar or a 1x1 Matrix. * * In addition, we define multiple templates of the overloaded * operator to provide maximal flexibility when working with * matrices with differing matrix_order and/or matrix_style. Each * version of the overloaded operator (Matrix by Matrix, scalar by * Matrix, and Matrix by scalar) provides both a default and * general behavior, using templates. By default, the returned * Matrix is concrete and has the same matrix_order as the * left-hand (or only) Matrix argument. Alternatively, one may * coerce the matrix_order and matrix_style of the returned Matrix * to preferred values by using the full template declaration of * the operator. * * \param lhs The left-hand-side Matrix or scalar. * \param rhs The right-hand-side Matrix or scalar. * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ SCYTHE_GENERAL_BINARY_BOOL_OPERATOR (operator<, std::less) SCYTHE_BINARY_BOOL_OPERATOR_DEFS (operator<) /*! \fn operator<=(const Matrix&lhs, * const Matrix&rhs) * * \brief Test Matrix inequality. * * This operator compares the elements of \a lhs and \a rhs and * returns a boolean Matrix of true and false values, indicating * whether each of the left-hand side elements is less than * or equal to its * corresponding right hand side element. This operator is * overloaded to provide both Matrix by Matrix inequality testing * and Matrix by scalar inequality testing. In the former case, the * dimensions of the two matrices must be the same. The boolean * Matrix returned has the same dimensions as \a lhs and \a rhs, or * matches the dimensionality of the larger Matrix object when one * of the two parameters is a scalar or a 1x1 Matrix. * * In addition, we define multiple templates of the overloaded * operator to provide maximal flexibility when working with * matrices with differing matrix_order and/or matrix_style. Each * version of the overloaded operator (Matrix by Matrix, scalar by * Matrix, and Matrix by scalar) provides both a default and * general behavior, using templates. By default, the returned * Matrix is concrete and has the same matrix_order as the * left-hand (or only) Matrix argument. Alternatively, one may * coerce the matrix_order and matrix_style of the returned Matrix * to preferred values by using the full template declaration of * the operator. * * \param lhs The left-hand-side Matrix or scalar. * \param rhs The right-hand-side Matrix or scalar. * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ SCYTHE_GENERAL_BINARY_BOOL_OPERATOR (operator<=, std::less_equal) SCYTHE_BINARY_BOOL_OPERATOR_DEFS (operator<=) /*! \fn operator>(const Matrix&lhs, * const Matrix&rhs) * * \brief Test Matrix inequality. * * This operator compares the elements of \a lhs and \a rhs and * returns a boolean Matrix of true and false values, indicating * whether each of the left-hand side elements is greater than its * corresponding right hand side element. This operator is * overloaded to provide both Matrix by Matrix inequality testing * and Matrix by scalar inequality testing. In the former case, the * dimensions of the two matrices must be the same. The boolean * Matrix returned has the same dimensions as \a lhs and \a rhs, or * matches the dimensionality of the larger Matrix object when one * of the two parameters is a scalar or a 1x1 Matrix. * * In addition, we define multiple templates of the overloaded * operator to provide maximal flexibility when working with * matrices with differing matrix_order and/or matrix_style. Each * version of the overloaded operator (Matrix by Matrix, scalar by * Matrix, and Matrix by scalar) provides both a default and * general behavior, using templates. By default, the returned * Matrix is concrete and has the same matrix_order as the * left-hand (or only) Matrix argument. Alternatively, one may * coerce the matrix_order and matrix_style of the returned Matrix * to preferred values by using the full template declaration of * the operator. * * \param lhs The left-hand-side Matrix or scalar. * \param rhs The right-hand-side Matrix or scalar. * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ SCYTHE_GENERAL_BINARY_BOOL_OPERATOR (operator>, std::greater) SCYTHE_BINARY_BOOL_OPERATOR_DEFS (operator>) /*! \fn operator>=(const Matrix&lhs, * const Matrix&rhs) * * \brief Test Matrix inequality. * * This operator compares the elements of \a lhs and \a rhs and * returns a boolean Matrix of true and false values, indicating * whether each of the left-hand side elements is greater than * or equal to its * corresponding right hand side element. This operator is * overloaded to provide both Matrix by Matrix inequality testing * and Matrix by scalar inequality testing. In the former case, the * dimensions of the two matrices must be the same. The boolean * Matrix returned has the same dimensions as \a lhs and \a rhs, or * matches the dimensionality of the larger Matrix object when one * of the two parameters is a scalar or a 1x1 Matrix. * * In addition, we define multiple templates of the overloaded * operator to provide maximal flexibility when working with * matrices with differing matrix_order and/or matrix_style. Each * version of the overloaded operator (Matrix by Matrix, scalar by * Matrix, and Matrix by scalar) provides both a default and * general behavior, using templates. By default, the returned * Matrix is concrete and has the same matrix_order as the * left-hand (or only) Matrix argument. Alternatively, one may * coerce the matrix_order and matrix_style of the returned Matrix * to preferred values by using the full template declaration of * the operator. * * \param lhs The left-hand-side Matrix or scalar. * \param rhs The right-hand-side Matrix or scalar. * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ SCYTHE_GENERAL_BINARY_BOOL_OPERATOR (operator>=, std::greater_equal) SCYTHE_BINARY_BOOL_OPERATOR_DEFS (operator>=) /*! \fn operator&(const Matrix&lhs, * const Matrix&rhs) * * \brief Logically AND two matrices. * * This operator logically ANDs the elements of \a lhs and \a rhs * and returns a boolean Matrix of true and false values, with true * values in each position where both matrices' elements evaluate to * true (or the type specific analog to true, typically any non-zero * value). This operator is overloaded to provide both Matrix by * Matrix AND and Matrix by scalar AND. In the former case, the * dimensions of the two matrices must be the same. The boolean * Matrix returned has the same dimensions as \a lhs and \a rhs, or * matches the dimensionality of the larger Matrix object when one * of the two parameters is a scalar or a 1x1 Matrix. * * In addition, we define multiple templates of the overloaded * operator to provide maximal flexibility when working with * matrices with differing matrix_order and/or matrix_style. Each * version of the overloaded operator (Matrix by Matrix, scalar by * Matrix, and Matrix by scalar) provides both a default and * general behavior, using templates. By default, the returned * Matrix is concrete and has the same matrix_order as the * left-hand (or only) Matrix argument. Alternatively, one may * coerce the matrix_order and matrix_style of the returned Matrix * to preferred values by using the full template declaration of * the operator. * * \param lhs The left-hand-side Matrix or scalar. * \param rhs The right-hand-side Matrix or scalar. * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ SCYTHE_GENERAL_BINARY_BOOL_OPERATOR (operator&, std::logical_and) SCYTHE_BINARY_BOOL_OPERATOR_DEFS (operator&) /*! \fn operator|(const Matrix&lhs, * const Matrix&rhs) * * \brief Logically OR two matrices. * * This operator logically ORs the elements of \a lhs and \a rhs * and returns a boolean Matrix of true and false values, with true * values in each position where either Matrix's elements evaluate to * true (or the type specific analog to true, typically any non-zero * value). This operator is overloaded to provide both Matrix by * Matrix OR and Matrix by scalar OR. In the former case, the * dimensions of the two matrices must be the same. The boolean * Matrix returned has the same dimensions as \a lhs and \a rhs, or * matches the dimensionality of the larger Matrix object when one * of the two parameters is a scalar or a 1x1 Matrix. * * In addition, we define multiple templates of the overloaded * operator to provide maximal flexibility when working with * matrices with differing matrix_order and/or matrix_style. Each * version of the overloaded operator (Matrix by Matrix, scalar by * Matrix, and Matrix by scalar) provides both a default and * general behavior, using templates. By default, the returned * Matrix is concrete and has the same matrix_order as the * left-hand (or only) Matrix argument. Alternatively, one may * coerce the matrix_order and matrix_style of the returned Matrix * to preferred values by using the full template declaration of * the operator. * * \param lhs The left-hand-side Matrix or scalar. * \param rhs The right-hand-side Matrix or scalar. * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ SCYTHE_GENERAL_BINARY_BOOL_OPERATOR (operator|, std::logical_or) SCYTHE_BINARY_BOOL_OPERATOR_DEFS (operator|) /**** INPUT-OUTPUT ****/ /* This function simply copies values from an input stream into a * matrix. It relies on the iterators for bounds checking. */ /*! \brief Populate a Matrix from a stream. * * This operator reads values from a stream and enters them into an * existing Matrix in order. * * \param is The istream to read from. * \param M The Matrix to populate. * * \see operator<<(std::ostream& os, const Matrix& M) * \see Matrix::Matrix(const std::string& file) * * \throw scythe_bounds_error (Level 3) */ template std::istream& operator>> (std::istream& is, Matrix& M) { std::copy(std::istream_iterator (is), std::istream_iterator(), M.begin_f()); return is; } /* Writes a matrix to an ostream in readable format. This is * intended to be used to pretty-print to the terminal. */ /*!\brief Write a Matrix to a stream. * * Writes a matrix to an ostream in a column-aligned format. This * operator is primarily intended for pretty-printing to the * terminal and uses two passes in order to correctly align the * output. If you wish to write a Matrix to disk, Matrix::save() is * probably a better option. * * \param os The ostream to write to. * \param M The Matrix to write out. * * \see operator>>(std::istream& is, Matrix& M) * \see Matrix::save() */ template std::ostream& operator<< (std::ostream& os, const Matrix& M) { /* This function take two passes to figure out appropriate field * widths. Speed isn't really the point here. */ // Store previous io settings std::ios_base::fmtflags preop = os.flags(); uint mlen = os.width(); std::ostringstream oss; oss.precision(os.precision()); oss << std::setiosflags(std::ios::fixed); typename Matrix::const_forward_iterator last = M.end_f(); for (typename Matrix::const_forward_iterator i = M.begin_f(); i != last; ++i) { oss.str(""); oss << (*i); if (oss.str().length() > mlen) mlen = oss.str().length(); } /* Write the stream */ // Change to a fixed with format. Users should control precision os << std::setiosflags(std::ios::fixed); for (uint i = 0; i < M.rows(); ++i) { Matrix row = M(i, _); //for (uint i = 0; i < row.size(); ++i) // os << std::setw(mlen) << row[i] << " "; typename Matrix::const_forward_iterator row_last = row.end_f(); for (typename Matrix::forward_iterator el = row.begin_f(); el != row_last; ++el) { os << std::setw(mlen) << *el << " "; } os << std::endl; } // Restore pre-op flags os.flags(preop); return os; } #ifdef SCYTHE_LAPACK /* A template specialization of operator* for col-major, concrete * matrices of doubles that is only visible when a LAPACK library is * available. This function is an analog of the above function and * the above doxygen documentation serves for both. * * This needs to go below % so it can see the template definition * (since it isn't actually in the template itself. */ template<> inline Matrix<> operator* (const Matrix<>& lhs, const Matrix<>& rhs) { if (lhs.size() == 1 || rhs.size() == 1) return (lhs % rhs); SCYTHE_DEBUG_MSG("Using lapack/blas for matrix multiplication"); SCYTHE_CHECK_10 (lhs.cols() != rhs.rows(), scythe_conformation_error, "Matrices with dimensions (" << lhs.rows() << ", " << lhs.cols() << ") and (" << rhs.rows() << ", " << rhs.cols() << ") are not multiplication-conformable"); Matrix<> result (lhs.rows(), rhs.cols(), false); // Get pointers to the internal arrays and set up some vars double* lhspnt = lhs.getArray(); double* rhspnt = rhs.getArray(); double* resultpnt = result.getArray(); const double one(1.0); const double zero(0.0); int rows = (int) lhs.rows(); int cols = (int) rhs.cols(); int innerDim = (int) rhs.rows(); // Call the lapack routine. lapack::dgemm_("N", "N", &rows, &cols, &innerDim, &one, lhspnt, &rows, rhspnt, &innerDim, &zero, resultpnt, &rows); return result; } #endif } // end namespace scythe #endif /* SCYTHE_MATRIX_H */ MCMCpack/src/Makevars.in0000644000176000001440000000017112133644106014547 0ustar ripleyusersPKG_CPPFLAGS = -DSCYTHE_COMPILE_DIRECT -DSCYTHE_DEBUG=0 -DSCYTHE_RPACK -DHAVE_TRUNC @MV_HAVE_IEEEFP_H@ @MV_HAVE_TRUNC@ MCMCpack/src/Makevars0000644000176000001440000000014412133644106014142 0ustar ripleyusersPKG_CPPFLAGS = -DSCYTHE_COMPILE_DIRECT -DSCYTHE_DEBUG=0 -DSCYTHE_RPACK -DHAVE_TRUNC -DHAVE_TRUNC MCMCpack/src/lecuyer.h0000644000176000001440000005473412140061657014307 0ustar ripleyusers/* * Scythe Statistical Library * Copyright (C) 2000-2002 Andrew D. Martin and Kevin M. Quinn; * 2002-present Andrew D. Martin, Kevin M. Quinn, and Daniel * Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * under the terms of the GNU General Public License as published by * Free Software Foundation; either version 2 of the License, or (at * your option) any later version. See the text files COPYING * and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/rng/lecuyer.h * * Provides the class definition for the L'Ecuyer random number * generator, a rng capable of generating many independent substreams. * This class extends the abstract rng class by implementing runif(). * Based on RngStream.cpp, by Pierre L'Ecuyer. * * Pierre L'Ecuyer agreed to the following dual-licensing terms in an * email received 7 August 2004. This dual-license was prompted by * the Debian maintainers of R and MCMCpack. * * This software is Copyright (C) 2004 Pierre L'Ecuyer. * * License: this code can be used freely for personal, academic, or * non-commercial purposes. For commercial licensing, please contact * P. L'Ecuyer at lecuyer@iro.umontreal.ca. * * This code may also be redistributed and modified under the terms of * the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. * */ /*! \file lecuyer.h * \brief The L'Ecuyer random number generator. * * This file contains the lecuyer class, a class that extends Scythe's * base random number generation class (scythe::rng) by providing an * implementation of scythe::rng::runif(), using L'Ecuyer's algorithm. * */ #ifndef SCYTHE_LECUYER_H #define SCYTHE_LECUYER_H #include #include #include #ifdef SCYTHE_COMPILE_DIRECT #include "rng.h" #else #include "scythestat/rng.h" #endif /* We want to use an anonymous namespace to make the following consts * and functions local to this file, but mingw doesn't play nice with * anonymous namespaces so we do things differently when using the * cross-compiler. */ #ifdef __MINGW32__ #define SCYTHE_MINGW32_STATIC static #else #define SCYTHE_MINGW32_STATIC #endif namespace scythe { #ifndef __MINGW32__ namespace { #endif SCYTHE_MINGW32_STATIC const double m1 = 4294967087.0; SCYTHE_MINGW32_STATIC const double m2 = 4294944443.0; SCYTHE_MINGW32_STATIC const double norm = 1.0 / (m1 + 1.0); SCYTHE_MINGW32_STATIC const double a12 = 1403580.0; SCYTHE_MINGW32_STATIC const double a13n = 810728.0; SCYTHE_MINGW32_STATIC const double a21 = 527612.0; SCYTHE_MINGW32_STATIC const double a23n = 1370589.0; SCYTHE_MINGW32_STATIC const double two17 =131072.0; SCYTHE_MINGW32_STATIC const double two53 =9007199254740992.0; /* 1/2^24 */ SCYTHE_MINGW32_STATIC const double fact = 5.9604644775390625e-8; // The following are the transition matrices of the two MRG // components (in matrix form), raised to the powers -1, 1, 2^76, // and 2^127, resp. SCYTHE_MINGW32_STATIC const double InvA1[3][3] = { // Inverse of A1p0 { 184888585.0, 0.0, 1945170933.0 }, { 1.0, 0.0, 0.0 }, { 0.0, 1.0, 0.0 } }; SCYTHE_MINGW32_STATIC const double InvA2[3][3] = { // Inverse of A2p0 { 0.0, 360363334.0, 4225571728.0 }, { 1.0, 0.0, 0.0 }, { 0.0, 1.0, 0.0 } }; SCYTHE_MINGW32_STATIC const double A1p0[3][3] = { { 0.0, 1.0, 0.0 }, { 0.0, 0.0, 1.0 }, { -810728.0, 1403580.0, 0.0 } }; SCYTHE_MINGW32_STATIC const double A2p0[3][3] = { { 0.0, 1.0, 0.0 }, { 0.0, 0.0, 1.0 }, { -1370589.0, 0.0, 527612.0 } }; SCYTHE_MINGW32_STATIC const double A1p76[3][3] = { { 82758667.0, 1871391091.0, 4127413238.0 }, { 3672831523.0, 69195019.0, 1871391091.0 }, { 3672091415.0, 3528743235.0, 69195019.0 } }; SCYTHE_MINGW32_STATIC const double A2p76[3][3] = { { 1511326704.0, 3759209742.0, 1610795712.0 }, { 4292754251.0, 1511326704.0, 3889917532.0 }, { 3859662829.0, 4292754251.0, 3708466080.0 } }; SCYTHE_MINGW32_STATIC const double A1p127[3][3] = { { 2427906178.0, 3580155704.0, 949770784.0 }, { 226153695.0, 1230515664.0, 3580155704.0 }, { 1988835001.0, 986791581.0, 1230515664.0 } }; SCYTHE_MINGW32_STATIC const double A2p127[3][3] = { { 1464411153.0, 277697599.0, 1610723613.0 }, { 32183930.0, 1464411153.0, 1022607788.0 }, { 2824425944.0, 32183930.0, 2093834863.0 } }; // Return (a*s + c) MOD m; a, s, c and m must be < 2^35 SCYTHE_MINGW32_STATIC double MultModM (double a, double s, double c, double m) { double v; long a1; v = a * s + c; if (v >= two53 || v <= -two53) { a1 = static_cast (a / two17); a -= a1 * two17; v = a1 * s; a1 = static_cast (v / m); v -= a1 * m; v = v * two17 + a * s + c; } a1 = static_cast (v / m); /* in case v < 0)*/ if ((v -= a1 * m) < 0.0) return v += m; else return v; } // Compute the vector v = A*s MOD m. Assume that -m < s[i] < m. // Works also when v = s. SCYTHE_MINGW32_STATIC void MatVecModM (const double A[3][3], const double s[3], double v[3], double m) { int i; double x[3]; // Necessary if v = s for (i = 0; i < 3; ++i) { x[i] = MultModM (A[i][0], s[0], 0.0, m); x[i] = MultModM (A[i][1], s[1], x[i], m); x[i] = MultModM (A[i][2], s[2], x[i], m); } for (i = 0; i < 3; ++i) v[i] = x[i]; } // Compute the matrix C = A*B MOD m. Assume that -m < s[i] < m. // Note: works also if A = C or B = C or A = B = C. SCYTHE_MINGW32_STATIC void MatMatModM (const double A[3][3], const double B[3][3], double C[3][3], double m) { int i, j; double V[3], W[3][3]; for (i = 0; i < 3; ++i) { for (j = 0; j < 3; ++j) V[j] = B[j][i]; MatVecModM (A, V, V, m); for (j = 0; j < 3; ++j) W[j][i] = V[j]; } for (i = 0; i < 3; ++i) for (j = 0; j < 3; ++j) C[i][j] = W[i][j]; } // Compute the matrix B = (A^(2^e) Mod m); works also if A = B. SCYTHE_MINGW32_STATIC void MatTwoPowModM(const double A[3][3], double B[3][3], double m, long e) { int i, j; /* initialize: B = A */ if (A != B) { for (i = 0; i < 3; ++i) for (j = 0; j < 3; ++j) B[i][j] = A[i][j]; } /* Compute B = A^(2^e) mod m */ for (i = 0; i < e; i++) MatMatModM (B, B, B, m); } // Compute the matrix B = (A^n Mod m); works even if A = B. SCYTHE_MINGW32_STATIC void MatPowModM (const double A[3][3], double B[3][3], double m, long n) { int i, j; double W[3][3]; /* initialize: W = A; B = I */ for (i = 0; i < 3; ++i) for (j = 0; j < 3; ++j) { W[i][j] = A[i][j]; B[i][j] = 0.0; } for (j = 0; j < 3; ++j) B[j][j] = 1.0; /* Compute B = A^n mod m using the binary decomposition of n */ while (n > 0) { if (n % 2) MatMatModM (W, B, B, m); MatMatModM (W, W, W, m); n /= 2; } } // Check that the seeds are legitimate values. Returns 0 if legal // seeds, -1 otherwise. SCYTHE_MINGW32_STATIC int CheckSeed (const unsigned long seed[6]) { int i; for (i = 0; i < 3; ++i) { if (seed[i] >= m1) { SCYTHE_THROW(scythe_randseed_error, "Seed[" << i << "] >= 4294967087, Seed is not set"); return -1; } } for (i = 3; i < 6; ++i) { if (seed[i] >= m2) { SCYTHE_THROW(scythe_randseed_error, "Seed[" << i << "] >= 4294944443, Seed is not set"); return -1; } } if (seed[0] == 0 && seed[1] == 0 && seed[2] == 0) { SCYTHE_THROW(scythe_randseed_error, "First 3 seeds = 0"); return -1; } if (seed[3] == 0 && seed[4] == 0 && seed[5] == 0) { SCYTHE_THROW(scythe_randseed_error, "Last 3 seeds = 0"); return -1; } return 0; } #ifndef __MINGW32__ } // end anonymous namespace #endif /*! \brief The L'Ecuyer random number generator. * * This class defines a random number generator, using Pierre * L'Ecuyer's algorithm (2000) and source code (2001) for * generating multiple simultaneous streams of random uniform * variates. The period of the underlying single-stream generator * is approximately \f$3.1 \times 10^{57}\f$. Each individual * stream is implemented in terms of a sequence of substreams (see * L'Ecuyer et al (2000) for details). * * The lecuyer class extends Scythe's basic random number * generating class, scythe::rng, implementing the interface that * it defines. * * \see rng * \see mersenne * */ class lecuyer : public rng { public: // Constructor /*! \brief Constructor * * This constructor creates an object encapsulating a random * number stream, with an optional name. It also sets the seed * of the stream to the package (default or user-specified) seed * if this is the first stream generated, or, otherwise, to a * value \f$2^{127}\f$ steps ahead of the seed of the previously * constructed stream. * * \param streamname The optional name for the stream. * * \see SetPackageSeed(unsigned long seed[6]) * \see SetSeed(unsigned long seed[6]) * \see SetAntithetic(bool) * \see IncreasedPrecis(bool) * \see name() */ lecuyer (std::string streamname = "") : rng (), streamname_ (streamname) { anti = false; incPrec = false; /* Information on a stream. The arrays {Cg, Bg, Ig} contain * the current state of the stream, the starting state of the * current SubStream, and the starting state of the stream. * This stream generates antithetic variates if anti = true. * It also generates numbers with extended precision (53 bits * if machine follows IEEE 754 standard) if incPrec = true. * nextSeed will be the seed of the next declared RngStream. */ for (int i = 0; i < 6; ++i) { Bg[i] = Cg[i] = Ig[i] = nextSeed[i]; } MatVecModM (A1p127, nextSeed, nextSeed, m1); MatVecModM (A2p127, &nextSeed[3], &nextSeed[3], m2); } /*! \brief Get the stream's name. * * This method returns a stream's name string. * * \see lecuyer(const char*) */ std::string name() const { return streamname_; } /*! \brief Reset the stream. * * This method resets the stream to its initial seeded state. * * \see ResetStartSubstream() * \see ResetNextSubstream() * \see SetSeed(unsigned long seed[6]) */ void ResetStartStream () { for (int i = 0; i < 6; ++i) Cg[i] = Bg[i] = Ig[i]; } /*! \brief Reset the current substream. * * * This method resets the stream to the first state of its * current substream. * * \see ResetStartStream() * \see ResetNextSubstream() * \see SetSeed(unsigned long seed[6]) * */ void ResetStartSubstream () { for (int i = 0; i < 6; ++i) Cg[i] = Bg[i]; } /*! \brief Jump to the next substream. * * This method resets the stream to the first state of its next * substream. * * \see ResetStartStream() * \see ResetStartSubstream() * \see SetSeed(unsigned long seed[6]) * */ void ResetNextSubstream () { MatVecModM(A1p76, Bg, Bg, m1); MatVecModM(A2p76, &Bg[3], &Bg[3], m2); for (int i = 0; i < 6; ++i) Cg[i] = Bg[i]; } /*! \brief Set the package seed. * * This method sets the overall package seed. The default * initial seed is (12345, 12345, 12345, 12345, 12345, 12345). * The package seed is the seed used to initialize the first * constructed random number stream in a given program. * * \param seed An array of six integers to seed the package. * The first three values cannot all equal 0 and must all be * less than 4294967087 while the second trio of integers must * all be less than 4294944443 and not all 0. * * \see SetSeed(unsigned long seed[6]) * * \throw scythe_randseed_error (Level 0) */ static void SetPackageSeed (unsigned long seed[6]) { if (CheckSeed (seed)) return; for (int i = 0; i < 6; ++i) nextSeed[i] = seed[i]; } /*! \brief Set the stream seed. * * This method sets the stream seed which is used to initialize * the state of the given stream. * * \warning This method sets the stream seed in isolation and * does not coordinate with any other streams. Therefore, * using this method without care can result in multiple * streams that overlap in the course of their runs. * * \param seed An array of six integers to seed the stream. * The first three values cannot all equal 0 and must all be * less than 4294967087 while the second trio of integers must * all be less than 4294944443 and not all 0. * * \see SetPackageSeed(unsigned long seed[6]) * \see ResetStartStream() * \see ResetStartSubstream() * \see ResetNextSubstream() * * \throw scythe_randseed_error (Level 0) */ void SetSeed (unsigned long seed[6]) { if (CheckSeed (seed)) return; for (int i = 0; i < 6; ++i) Cg[i] = Bg[i] = Ig[i] = seed[i]; } // XXX: get the cases formula working! /*! \brief Advances the state of the stream. * * This method advances the input \f$n\f$ steps, using the rule: * \f[ * n = * \begin{cases} * 2^e + c \quad if~e > 0, \\ * -2^{-e} + c \quad if~e < 0, \\ * c \quad if~e = 0. * \end{cases} * \f] * * \param e This parameter controls state advancement. * \param c This parameter also controls state advancement. * * \see GetState() * \see ResetStartStream() * \see ResetStartSubstream() * \see ResetNextSubstream() */ void AdvanceState (long e, long c) { double B1[3][3], C1[3][3], B2[3][3], C2[3][3]; if (e > 0) { MatTwoPowModM (A1p0, B1, m1, e); MatTwoPowModM (A2p0, B2, m2, e); } else if (e < 0) { MatTwoPowModM (InvA1, B1, m1, -e); MatTwoPowModM (InvA2, B2, m2, -e); } if (c >= 0) { MatPowModM (A1p0, C1, m1, c); MatPowModM (A2p0, C2, m2, c); } else { MatPowModM (InvA1, C1, m1, -c); MatPowModM (InvA2, C2, m2, -c); } if (e) { MatMatModM (B1, C1, C1, m1); MatMatModM (B2, C2, C2, m2); } MatVecModM (C1, Cg, Cg, m1); MatVecModM (C2, &Cg[3], &Cg[3], m2); } /*! \brief Get the current state. * * This method places the current state of the stream, as * represented by six integers, into the array argument. This * is useful for saving and restoring streams across program * runs. * * \param seed An array of six integers that will hold the state values on return. * * \see AdvanceState() */ void GetState (unsigned long seed[6]) const { for (int i = 0; i < 6; ++i) seed[i] = static_cast (Cg[i]); } /*! \brief Toggle generator precision. * * This method sets the precision level of the given stream. By * default, streams generate random numbers with 32 bit * resolution. If the user invokes this method with \a incp = * true, then the stream will begin to generate variates with * greater precision (53 bits on machines following the IEEE 754 * standard). Calling this method again with \a incp = false * will return the precision of generated numbers to 32 bits. * * \param incp A boolean value where true implies high (most * likely 53 bit) precision and false implies low (32 bit) * precision. * * \see SetAntithetic(bool) */ void IncreasedPrecis (bool incp) { incPrec = incp; } /*! \brief Toggle the orientation of generated random numbers. * * This methods causes the given stream to generate antithetic * (1 - U, where U is the default number generated) when called * with \a a = true. Calling this method with \a a = false will * return generated numbers to their default orientation. * * \param a A boolean value that selects regular or antithetic * variates. * * \see IncreasedPrecis(bool) */ void SetAntithetic (bool a) { anti = a; } /*! \brief Generate a random uniform variate on (0, 1). * * This routine returns a random double precision floating point * number from the uniform distribution on the interval (0, * 1). This method overloads the pure virtual method of the * same name in the rng base class. * * \see runif(unsigned int, unsigned int) * \see RandInt(long, long) * \see rng */ double runif () { if (incPrec) return U01d(); else return U01(); } /* We have to override the overloaded form of runif because * overloading the no-arg runif() hides the base class * definition; C++ stops looking once it finds the above. */ /*! \brief Generate a Matrix of random uniform variates. * * This routine returns a Matrix of double precision random * uniform variates. on the interval (0, 1). This method * overloads the virtual method of the same name in the rng base * class. * * This is the general template version of this method and * is called through explicit template instantiation. * * \param rows The number of rows in the returned Matrix. * \param cols The number of columns in the returned Matrix. * * \see runif() * \see rng * * \note We are forced to override this overloaded method * because the 1-arg version of runif() hides the base class's * definition of this method from the compiler, although it * probably should not. */ template Matrix runif(unsigned int rows, unsigned int cols) { return rng::runif(rows,cols); } /*! \brief Generate a Matrix of random uniform variates. * * This routine returns a Matrix of double precision random * uniform variates on the interval (0, 1). This method * overloads the virtual method of the same name in the rng base * class. * * This is the default template version of this method and * is called through implicit template instantiation. * * \param rows The number of rows in the returned Matrix. * \param cols The number of columns in the returned Matrix. * * \see runif() * \see rng * * \note We are forced to override this overloaded method * because the 1-arg version of runif() hides the base class's * definition of this method from the compiler, although it * probably should not. */ Matrix runif(unsigned int rows, unsigned int cols) { return rng::runif(rows, cols); } /*! \brief Generate the next random integer. * * This method generates a random integer from the discrete * uniform distribution on the interval [\a low, \a high]. * * \param low The lower bound of the interval to evaluate. * \param high the upper bound of the interval to evaluate. * * \see runif() */ long RandInt (long low, long high) { return low + static_cast ((high - low + 1) * runif ()); } protected: // Generate the next random number. // double U01 () { long k; double p1, p2, u; /* Component 1 */ p1 = a12 * Cg[1] - a13n * Cg[0]; k = static_cast (p1 / m1); p1 -= k * m1; if (p1 < 0.0) p1 += m1; Cg[0] = Cg[1]; Cg[1] = Cg[2]; Cg[2] = p1; /* Component 2 */ p2 = a21 * Cg[5] - a23n * Cg[3]; k = static_cast (p2 / m2); p2 -= k * m2; if (p2 < 0.0) p2 += m2; Cg[3] = Cg[4]; Cg[4] = Cg[5]; Cg[5] = p2; /* Combination */ u = ((p1 > p2) ? (p1 - p2) * norm : (p1 - p2 + m1) * norm); return (anti == false) ? u : (1 - u); } // Generate the next random number with extended (53 bits) precision. double U01d () { double u; u = U01(); if (anti) { // Don't forget that U01() returns 1 - u in the antithetic case u += (U01() - 1.0) * fact; return (u < 0.0) ? u + 1.0 : u; } else { u += U01() * fact; return (u < 1.0) ? u : (u - 1.0); } } // Public members of the class start here // The default seed of the package; will be the seed of the first // declared RngStream, unless SetPackageSeed is called. static double nextSeed[6]; /* Instance variables */ double Cg[6], Bg[6], Ig[6]; bool anti, incPrec; std::string streamname_; }; #ifndef SCYTHE_RPACK /* Default seed definition */ double lecuyer::nextSeed[6] = { 12345.0, 12345.0, 12345.0, 12345.0, 12345.0, 12345.0 }; #endif } #endif /* SCYTHE_LECUYER_H */ MCMCpack/src/lecuyer.cc0000644000176000001440000000037012140061657014430 0ustar ripleyusers#ifndef SCYTHE_LECUYER_CC #define SCYTHE_LECUYER_CC #include "lecuyer.h" namespace scythe { /* Default seed definition */ double lecuyer::nextSeed[6] = { 12345.0, 12345.0, 12345.0, 12345.0, 12345.0, 12345.0 }; } #endif MCMCpack/src/lapack.h0000644000176000001440000001202712140061657014057 0ustar ripleyusers/* * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythe/lapack.h * */ /*! * \file lapack.h * \brief Definitions that provide access to LAPACK/BLAS fortran * routines for internal library functions. * * This file provides function definitions that help provide * LAPACK/BLAS support to Scythe functions. These definitions are not * part of Scythe's public interface and are used exclusively from * within the library. * */ #ifndef SCYTHE_LAPACK_H #define SCYTHE_LAPACK_H #ifdef SCYTHE_COMPILE_DIRECT #endif namespace scythe { namespace lapack { inline void make_symmetric(double* matrix, int rows) { for (int i = 1; i < rows; ++i) for (int j = 0; j < i; ++j) matrix[i * rows + j] = matrix[j * rows + i]; } extern "C" { /* Matrix multiplication and gaxpy */ void dgemm_ (char* transa, char* transb, const int* m, const int* n, const int* k, const double* alpha, const double* a, const int* lda, const double* b, const int* ldb, const double* beta, double* c, const int* ldc); /* Matrix cross product A'A */ void dsyrk_(const char* uplo, const char* trans, const int* n, const int* k, const double* alpha, const double* a, const int* lda, const double* beta, double* c, const int* ldc); /* LU decomposition */ void dgetrf_ (const int* rows, const int* cols, double* a, const int* lda, int* ipiv, int *info); /* General inversion (given LU decomposion)*/ void dgetri_ (const int* n, double* a, const int* lda, const int* ipiv, double* work, const int* lwork, int* info); /* Cholesky decomposition */ void dpotrf_(const char* uplo, const int* n, double* a, const int* lda, int* info); /* chol_solve give cholesky */ void dpotrs_ (const char* uplo, const int* n, const int* nrhs, const double* a, const int* lda, double *b, const int* ldb, int* info); /* chol_solve from A and b */ void dposv_ (const char* uplo, const int* n, const int* nrhs, double* a, const int* lda, double* b, const int* ldb, int* info); /* Positive Definite Inversion (given LU decomposition) */ void dpotri_(const char* uplo, const int* n, double* a, const int* lda, int* info); /* Eigenvalues/vectors for general (nonsymmetric) square matrices */ void dgeev_(const char* jobvl, const char* jobvr, const int* n, double* a, const int* lda, double* wr, double* wi, double* vl, const int* ldvl, double* vr, const int* ldvr, double* work, const int* lwork, int* info); /* Eigenvalues/vectors for symmetric matrices */ void dsyevr_ (const char* jobz, const char* range, const char* uplo, const int* n, double* a, const int* lda, double* vl, double* vu, const int* il, const int* iu, const double* abstol, const int* m, double* w, double* z, const int* ldz, int* isuppz, double* work, int* lwork, int* iwork, const int* liwork, int* info); /* QR decomposition */ void dgeqp3_ (const int* m, const int* n, double* a, const int* lda, int* jpvt, double* tau, double* work, const int* lwork, int* info); /* QR solve routines */ void dormqr_ (const char* side, const char* trans, const int* m, const int* n, const int* k, const double* a, const int* lda, const double* tau, double* c, const int* ldc, double* work, const int* lwork, int* info); void dtrtrs_ (const char* uplo, const char* trans, const char* diag, const int* n, const int* nrhs, const double* a, const int* lda, double* b, const int* ldb, int* info); /* SVD */ void dgesdd_ (const char* jobz, const int* m, const int* n, double* a, const int* lda, double* s, double* u, const int* ldu, double* vt, const int* ldvt, double* work, const int* lwork, int* iwork, int* info); } // end extern } // end namespace lapack } // end namespace scythe #endif /* SCYTHE_LAPACK_H */ MCMCpack/src/la.h0000644000176000001440000007215512140061657013230 0ustar ripleyusers/* * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/la.h * */ /*! * \file la.h * \brief Definitions and implementations for functions that perform * common linear algebra manipulations on Scythe Matrix objects. * * This file provides a number of common linear algebraic functions * for use with the Matrix class. These functions include common * operations such as transposition, a number of utility functions for * creating useful matrices like the identity matrix, and efficient * implementations for common operations like the cross-product. * * \note As is the case throughout the library, we provide both * general and default template definitions of the Matrix-returning * functions in this file, explicitly providing documentation for only * the general template versions. */ #ifndef SCYTHE_LA_H #define SCYTHE_LA_H #ifdef SCYTHE_COMPILE_DIRECT #include "matrix.h" #include "algorithm.h" #include "error.h" #ifdef SCYTHE_LAPACK #include "lapack.h" #endif #else #include "scythestat/matrix.h" #include "scythestat/algorithm.h" #include "scythestat/error.h" #ifdef SCYTHE_LAPACK #include "scythestat/lapack.h" #endif #endif #include #include #include namespace scythe { namespace { typedef unsigned int uint; } /* Matrix transposition */ /*!\brief Transpose a Matrix. * * This function transposes \a M, returning a Matrix \a R where each * element of \a M, \f$M_ij\f$ is placed in position \f$R_ji\f$. * Naturally, the returned Matrix has M.cols() rows and M.rows() * columns. * * \param M The Matrix to transpose. * * \throw scythe_alloc_error (Level 1) * */ template Matrix t (const Matrix& M) { uint rows = M.rows(); uint cols = M.cols(); Matrix ret(cols, rows, false); if (PO == Col) copy(M, ret); else copy(M, ret); SCYTHE_VIEW_RETURN(T, RO, RS, ret) } template Matrix t (const Matrix& M) { return t(M); } /* Ones matrix generation */ /*! * \brief Create a matrix of ones. * * This function creates a matrix of ones, with the given dimensions * \a rows and \a cols. * * \param rows The number of rows in the resulting Matrix. * \param cols The number of columns in the resulting Matrix. * * \see eye (unsigned int k) * * \throw scythe_alloc_error (Level 1) */ template Matrix ones (unsigned int rows, unsigned int cols) { return Matrix (rows, cols, true, (T) 1); } template Matrix ones (unsigned int rows, unsigned int cols) { return ones(rows, cols); } template Matrix ones (unsigned int rows, unsigned int cols) { return ones(rows, cols); } inline Matrix ones (unsigned int rows, unsigned int cols) { return ones(rows, cols); } /* Identity Matrix generation */ // This functor contains the working parts of the eye algorithm. namespace { template struct eye_alg { T operator() (uint i, uint j) { if (i == j) return (T) 1.0; return (T) 0.0; } }; } /*!\brief Create a \a k by \a k identity Matrix. * * This function creates a \a k by \a k Matrix with 1s along the * diagonal and 0s on the off-diagonal. This template is overloaded * multiple times to provide default type, matrix_order, and * matrix_style. The default call to eye returns a Concrete Matrix * containing double precision floating point numbers, in * column-major order. The user can write explicit template calls * to generate matrices with other orders and/or styles. * * \param k The dimension of the identity Matrix. * * \see diag(const Matrix& M) * \see ones(unsigned int rows, unsigned int cols) * * \throw scythe_alloc_error (Level 1) * */ template Matrix eye (unsigned int k) { Matrix ret(k, k, false); for_each_ij_set(ret, eye_alg()); SCYTHE_VIEW_RETURN(T, O, S, ret) } template Matrix eye (uint k) { return eye(k); } template Matrix eye (uint k) { return eye(k); } inline Matrix eye (uint k) { return eye(k); } /* Create a k x 1 vector-additive sequence matrix */ // The seqa algorithm namespace { template struct seqa_alg { T cur_; T inc_; seqa_alg(T start, T inc) : cur_ (start), inc_ (inc) {} T operator() () { T ret = cur_; cur_ += inc_; return ret; } }; } /*! * \brief Create a \a rows x 1 vector-additive sequence Matrix. * * This function creates a \a rows x 1 Matrix \f$v\f$, where * \f$v_i = \mbox{start} + i \cdot \mbox{incr}\f$. * * This function is defined by a series of templates. This template * is the most general, requiring the user to explicitly instantiate * the template in terms of element type, matrix_order and * matrix_style. Further versions allow for explicit instantiation * based just on type and matrix_order (with matrix_style defaulting * to Concrete) and just on type (with matrix_style defaulting to * Col). Finally, the default version of th function generates * column-major concrete Matrix of doubles. * * \param start Desired start value. * \param incr Amount to add in each step of the sequence. * \param rows Total number of rows in the Matrix. * * \throw scythe_alloc_error (Level 1) */ template Matrix seqa (T start, T incr, uint rows) { Matrix ret(rows, 1, false); generate(ret.begin_f(), ret.end_f(), seqa_alg(start, incr)); SCYTHE_VIEW_RETURN(T, O, S, ret) } template Matrix seqa (T start, T incr, uint rows) { return seqa(start, incr, rows); } template Matrix seqa (T start, T incr, uint rows) { return seqa(start, incr, rows); } inline Matrix seqa (double start, double incr, uint rows) { return seqa(start, incr, rows); } /* Uses the STL sort to sort a Matrix in ascending row-major order */ /*! * \brief Sort a Matrix. * * This function returns a copy of \a M, sorted in ascending order. * The sorting order is determined by the template parameter * SORT_ORDER or, by default, to matrix_order of \a M. * * \param M The Matrix to sort. * * \see sortc * * \throw scythe_alloc_error (Level 1) */ template Matrix sort (const Matrix& M) { Matrix ret = M; std::sort(ret.template begin(), ret.template end()); SCYTHE_VIEW_RETURN(T, RO, RS, ret) } template Matrix sort (const Matrix& M) { return sort(M); } template Matrix sort (const Matrix& M) { return sort(M); } /*!\brief Sort the columns of a Matrix. * * This function returns a copy of \a M, with each column sorted in * ascending order. * * \param M The Matrix to sort. * * \see sort * * \throw scythe_alloc_error (Level 1) */ template Matrix sortc (const Matrix& M) { Matrix ret = M; // TODO need to figure out a way to do fully optimized // vector iteration for (uint col = 0; col < ret.cols(); ++col) { Matrix column = ret(_, col); std::sort(column.begin(), column.end()); } SCYTHE_VIEW_RETURN(T, RO, RS, ret) } template Matrix sortc(const Matrix& M) { return sortc(M); } /* Column bind two matrices */ /*! * \brief Column bind two matrices. * * This function column binds two matrices, \a A and \a B. * * \param A The left-hand Matrix. * \param B The right-hand Matrix. * * \see rbind(const Matrix& A, * const Matrix& B) * * \throw scythe_conformation_error (Level 1) * \throw scythe_alloc_error (Level 1) */ template Matrix cbind (const Matrix& A, const Matrix& B) { SCYTHE_CHECK_10(A.rows() != B.rows(), scythe_conformation_error, "Matrices have different numbers of rows"); Matrix ret(A.rows(), A.cols() + B.cols(), false); std::copy(B.template begin_f(), B.template end_f(), std::copy(A.template begin_f(), A.template end_f(), ret.template begin_f())); SCYTHE_VIEW_RETURN(T, RO, RS, ret) } template Matrix cbind (const Matrix& A, const Matrix& B) { return cbind(A, B); } /* Row bind two matrices */ /*! * \brief Row bind two matrices. * * This function row binds two matrices, \a A and \a B. * * \param A The upper Matrix. * \param B The lower Matrix. * * \see cbind(const Matrix& A, * const Matrix& B) * * \throw scythe_alloc_error (Level 1) * \throw scythe_conformation_error (Level 1) */ template Matrix rbind (const Matrix& A, const Matrix& B) { SCYTHE_CHECK_10(A.cols() != B.cols(), scythe_conformation_error, "Matrices have different numbers of columns"); Matrix ret(A.rows() + B.rows(), A.cols(), false); std::copy(B.template begin_f(), B.template end_f(), std::copy(A.template begin_f(), A.template end_f(), ret.template begin_f())); SCYTHE_VIEW_RETURN(T, RO, RS, ret) } template Matrix rbind (const Matrix& A, const Matrix& B) { return rbind(A, B); } /* Calculates the order of each element in a Matrix */ // Functor encapsulating the meat of the algorithm namespace { template struct order_alg { Matrix M_; order_alg (const Matrix& M) : M_ (M) {} uint operator() (T x) { Matrix diff = (M_ < x); return std::accumulate(diff.begin_f(), diff.end_f(), (uint) 0); } }; } /*! * \brief Calculate the rank-order of each element in a Matrix. * * This function calculates the rank-order of each element in a * Matrix, returning a Matrix in which the \e i'th element * indicates the order position of the \e i'th element of \a M. * The returned Matrix contains unsigned integers. * * \param M A column vector. * * \throw scythe_alloc_error (Level 1) */ /* NOTE This function used to only work on column vectors. I see no * reason to maintain this restriction. */ template Matrix order (const Matrix& M) { Matrix ranks(M.rows(), M.cols(), false); std::transform(M.begin_f(), M.end_f(), ranks.template begin_f(), order_alg(M)); SCYTHE_VIEW_RETURN(uint, RO, RS, ranks) } template Matrix order (const Matrix& M) { return order(M); } /* Selects all the rows of Matrix A for which binary column vector e * has an element equal to 1 */ /*! * \brief Locate rows for which a binary column vector equals 1 * This function identifies all the rows of a Matrix \a M for which * the binary column vector \a e has an element equal to 1, * returning a Matrix * \param M The Matrix of interest. * \param e A boolean column vector. * * \see unique(const Matrix& M) * * \throw scythe_conformation_error (Level 1) * \throw scythe_dimension_error (Level 1) * \throw scythe_alloc_error (Level 1) */ template Matrix selif (const Matrix& M, const Matrix& e) { SCYTHE_CHECK_10(M.rows() != e.rows(), scythe_conformation_error, "Data matrix and selection vector have different number of rows"); SCYTHE_CHECK_10(! e.isColVector(), scythe_dimension_error, "Selection matrix is not a column vector"); uint N = std::accumulate(e.begin_f(), e.end_f(), (uint) 0); Matrix res(N, M.cols(), false); int cnt = 0; for (uint i = 0; i < e.size(); ++i) { if (e[i]) { Matrix Mvec = M(i, _); // TODO again, need optimized vector iteration std::copy(Mvec.begin_f(), Mvec.end_f(), res(cnt++, _).begin_f()); } } SCYTHE_VIEW_RETURN(T, RO, RS, res) } template Matrix selif (const Matrix& M, const Matrix& e) { return selif(M, e); } /* Find unique elements in a matrix and return a sorted row vector */ /*! * \brief Find unique elements in a Matrix. * * This function identifies all of the unique elements in a Matrix, * and returns them in a sorted row vector. * * \param M The Matrix to search. * * \see selif(const Matrix& M, const Matrix& e) * * \throw scythe_alloc_error (Level 1) */ template Matrix unique (const Matrix& M) { std::set u(M.begin_f(), M.end_f()); Matrix res(1, u.size(), false); std::copy(u.begin(), u.end(), res.begin_f()); SCYTHE_VIEW_RETURN(T, RO, RS, res) } template Matrix unique (const Matrix& M) { return unique(M); } /* NOTE I killed reshape. It seems redundant with resize. DBP */ /* Make vector out of unique elements of a symmetric Matrix. */ /*! * \brief Vectorize a symmetric Matrix. * * This function returns a column vector containing only those * elements necessary to reconstruct the symmetric Matrix, \a M. In * practice, this means extracting one triangle of \a M and * returning it as a vector. * * Note that the symmetry check in this function (active at error * level 3) is quite costly. * * \param M A symmetric Matrix. * * \throw scythe_dimension_error (Level 3) * \throw scythe_alloc_error (Level 1) * * \see xpnd(const Matrix& v) */ template Matrix vech (const Matrix& M) { SCYTHE_CHECK_30(! M.isSymmetric(), scythe_dimension_error, "Matrix not symmetric"); Matrix res((uint) (0.5 * (M.size() - M.rows())) + M.rows(), 1, false); typename Matrix::forward_iterator it = res.begin_f(); /* We want to traverse M in storage order if possible so we take * the upper triangle of row-order matrices and the lower triangle * of column-order matrices. */ if (M.storeorder() == Col) { for (uint i = 0; i < M.rows(); ++i) { Matrix strip = M(i, i, M.rows() - 1, i); it = std::copy(strip.begin_f(), strip.end_f(), it); } } else { for (uint j = 0; j < M.cols(); ++j) { Matrix strip = M(j, j, j, M.cols() - 1); it = std::copy(strip.begin_f(), strip.end_f(), it); } } SCYTHE_VIEW_RETURN(T, RO, RS, res) } template Matrix vech (const Matrix& M) { return vech(M); } /*! Expand a vector into a symmetric Matrix. * * This function takes the vector \a v and returns a symmetric * Matrix containing the elements of \a v within each triangle. * * \param \a v The vector expand. * * \see vech(const Matrix& M) * * \throw scythe_dimension_error (Level 1) * \throw scythe_alloc_error (Level 1) */ template Matrix xpnd (const Matrix& v) { double size_d = -.5 + .5 * std::sqrt(1. + 8 * v.size()); SCYTHE_CHECK_10(std::fmod(size_d, 1.) != 0., scythe_dimension_error, "Input vector can't generate square matrix"); uint size = (uint) size_d; Matrix res(size, size, false); /* It doesn't matter if we travel in order here. * TODO Might want to use iterators. */ uint cnt = 0; for (uint i = 0; i < size; ++i) for (uint j = i; j < size; ++j) res(i, j) = res(j, i) = v[cnt++]; SCYTHE_VIEW_RETURN(T, RO, RS, res) } template Matrix xpnd (const Matrix& v) { return xpnd(v); } /* Get the diagonal of a Matrix. */ /*! * \brief Return the diagonal of a Matrix. * * This function returns the diagonal of a Matrix in a row vector. * * \param M The Matrix one wishes to extract the diagonal of. * * \see crossprod (const Matrix &M) * * \throw scythe_alloc_error (Level 1) */ template Matrix diag (const Matrix& M) { Matrix res(std::min(M.rows(), M.cols()), 1, false); /* We want to use iterators to maximize speed for both concretes * and views, but we always want to tranvers M in order to avoid * slowing down concretes. */ uint incr = 1; if (PO == Col) incr += M.rows(); else incr += M.cols(); typename Matrix::const_iterator pit; typename Matrix::forward_iterator rit = res.begin_f(); for (pit = M.begin(); pit < M.end(); pit += incr) *rit++ = *pit; SCYTHE_VIEW_RETURN(T, RO, RS, res) } template Matrix diag (const Matrix& M) { return diag(M); } /* Fast calculation of A*B+C. */ namespace { // Algorithm when one matrix is 1x1 template void gaxpy_alg(Matrix& res, const Matrix& X, const Matrix& B, T constant) { res = Matrix(X.rows(), X.cols(), false); if (maj_col()) std::transform(X.template begin_f(), X.template end_f(), B.template begin_f(), res.template begin_f(), ax_plus_b(constant)); else std::transform(X.template begin_f(), X.template end_f(), B.template begin_f(), res.template begin_f(), ax_plus_b(constant)); } } /*! Fast caclulation of \f$AB + C\f$. * * This function calculates \f$AB + C\f$ efficiently, traversing the * matrices in storage order where possible, and avoiding the use of * extra temporary matrix objects. * * Matrices conform when \a A, \a B, and \a C are chosen with * dimensions * \f$((m \times n), (1 \times 1), (m \times n))\f$, * \f$((1 \times 1), (n \times k), (n \times k))\f$, or * \f$((m \times n), (n \times k), (m \times k))\f$. * * Scythe will use LAPACK/BLAS routines to compute \f$AB+C\f$ * with column-major matrices of double-precision floating point * numbers if LAPACK/BLAS is available and you compile your program * with the SCYTHE_LAPACK flag enabled. * * \param A A \f$1 \times 1\f$ or \f$m \times n\f$ Matrix. * \param B A \f$1 \times 1\f$ or \f$n \times k\f$ Matrix. * \param C A \f$m \times n\f$ or \f$n \times k\f$ or * \f$m \times k\f$ Matrix. * * \throw scythe_conformation_error (Level 0) * \throw scythe_alloc_error (Level 1) */ template Matrix gaxpy (const Matrix& A, const Matrix& B, const Matrix& C) { Matrix res; if (A.isScalar() && B.rows() == C.rows() && B.cols() == C.cols()) { // Case 1: 1x1 * nXk + nXk gaxpy_alg(res, B, C, A[0]); } else if (B.isScalar() && A.rows() == C.rows() && A.cols() == C.cols()) { // Case 2: m x n * 1 x 1 + m x n gaxpy_alg(res, A, C, B[0]); } else if (A.cols() == B.rows() && A.rows() == C.rows() && B.cols() == C.cols()) { // Case 3: m x n * n x k + m x k res = Matrix (A.rows(), B.cols(), false); /* These are identical to matrix mult, one optimized for * row-major and one for col-major. */ T tmp; if (RO == Col) { // col-major optimized for (uint j = 0; j < B.cols(); ++j) { for (uint i = 0; i < A.rows(); ++i) res(i, j) = C(i, j); for (uint l = 0; l < A.cols(); ++l) { tmp = B(l, j); for (uint i = 0; i < A.rows(); ++i) res(i, j) += tmp * A(i, l); } } } else { // row-major optimized for (uint i = 0; i < A.rows(); ++i) { for (uint j = 0; j < B.cols(); ++j) res(i, j) = C(i, j); for (uint l = 0; l < B.rows(); ++l) { tmp = A(i, l); for (uint j = 0; j < B.cols(); ++j) res(i, j) += tmp * B(l,j); } } } } else { SCYTHE_THROW(scythe_conformation_error, "Expects (m x n * 1 x 1 + m x n)" << "or (1 x 1 * n x k + n x k)" << "or (m x n * n x k + m x k)"); } SCYTHE_VIEW_RETURN(T, RO, RS, res) } template Matrix gaxpy (const Matrix& A, const Matrix& B, const Matrix& C) { return gaxpy(A,B,C); } /*! Fast caclulation of \f$A'A\f$. * * This function calculates \f$A'A\f$ efficiently, traversing the * matrices in storage order where possible, and avoiding the use of * the temporary matrix objects. * * Scythe will use LAPACK/BLAS routines to compute the cross-product * of column-major matrices of double-precision floating point * numbers if LAPACK/BLAS is available and you compile your program * with the SCYTHE_LAPACK flag enabled. * * \param A The Matrix to return the cross product of. * * \see diag (const Matrix& M) */ template Matrix crossprod (const Matrix& A) { /* When rows > 1, we provide differing implementations of the * algorithm depending on A's ordering to maximize strided access. * * The non-vector version of the algorithm fills in a triangle and * then copies it over. */ Matrix res; T tmp; if (A.rows() == 1) { res = Matrix(A.cols(), A.cols(), true); for (uint k = 0; k < A.rows(); ++k) { for (uint i = 0; i < A.cols(); ++i) { tmp = A(k, i); for (uint j = i; j < A.cols(); ++j) { res(j, i) = res(i, j) += tmp * A(k, j); } } } } else { if (PO == Row) { // row-major optimized /* TODO: This is a little slower than the col-major. Improve. */ res = Matrix(A.cols(), A.cols(), true); for (uint k = 0; k < A.rows(); ++k) { for (uint i = 0; i < A.cols(); ++i) { tmp = A(k, i); for (uint j = i; j < A.cols(); ++j) { res(i, j) += tmp * A(k, j); } } } for (uint i = 0; i < A.cols(); ++i) for (uint j = i + 1; j < A.cols(); ++j) res(j, i) = res(i, j); } else { // col-major optimized res = Matrix(A.cols(), A.cols(), false); for (uint j = 0; j < A.cols(); ++j) { for (uint i = j; i < A.cols(); ++i) { tmp = (T) 0; for (uint k = 0; k < A.rows(); ++k) tmp += A(k, i) * A(k, j); res(i, j) = tmp; } } for (uint i = 0; i < A.cols(); ++i) for (uint j = i + 1; j < A.cols(); ++j) res(i, j) = res(j, i); } } SCYTHE_VIEW_RETURN(T, RO, RS, res) } template Matrix crossprod (const Matrix& M) { return crossprod(M); } #ifdef SCYTHE_LAPACK /* Template specializations of for col-major, concrete * matrices of doubles that are only available when a lapack library * is available. */ template<> inline Matrix<> gaxpy (const Matrix<>& A, const Matrix<>& B, const Matrix<>& C) { SCYTHE_DEBUG_MSG("Using lapack/blas for gaxpy"); Matrix<> res; if (A.isScalar() && B.rows() == C.rows() && B.cols() == C.cols()) { // Case 1: 1x1 * nXk + nXk gaxpy_alg(res, B, C, A[0]); } else if (B.isScalar() && A.rows() == C.rows() && A.cols() == C.cols()) { // Case 2: m x n * 1 x 1 + m x n gaxpy_alg(res, A, C, B[0]); } else if (A.cols() == B.rows() && A.rows() == C.rows() && B.cols() == C.cols()) { res = C; // NOTE: this copy may eat up speed gains, but can't be // avoided. // Case 3: m x n * n x k + m x k double* Apnt = A.getArray(); double* Bpnt = B.getArray(); double* respnt = res.getArray(); const double one(1.0); int rows = (int) res.rows(); int cols = (int) res.cols(); int innerDim = A.cols(); lapack::dgemm_("N", "N", &rows, &cols, &innerDim, &one, Apnt, &rows, Bpnt, &innerDim, &one, respnt, &rows); } return res; } template<> inline Matrix<> crossprod(const Matrix<>& A) { SCYTHE_DEBUG_MSG("Using lapack/blas for crossprod"); // Set up some constants const double zero = 0.0; const double one = 1.0; // Set up return value and arrays Matrix<> res(A.cols(), A.cols(), false); double* Apnt = A.getArray(); double* respnt = res.getArray(); int rows = (int) A.rows(); int cols = (int) A.cols(); lapack::dsyrk_("L", "T", &cols, &rows, &one, Apnt, &rows, &zero, respnt, &cols); lapack::make_symmetric(respnt, cols); return res; } #endif } // end namespace scythe #endif /* SCYTHE_LA_H */ MCMCpack/src/ide.h0000644000176000001440000020271612140061657013373 0ustar ripleyusers/* * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/ide.h * * */ /*! \file ide.h * * \brief Definitions for inversion and decomposition functions that * operate on Scythe's Matrix objects. * * This file provides a number of common inversion and decomposition * routines that operate on Matrix objects. It also provides related * functions for solving linear systems of equations and calculating * the determinant of a Matrix. * * Scythe will use LAPACK/BLAS routines to perform these operations on * concrete column-major matrices of double-precision floating point * numbers if LAPACK/BLAS is available and you compile your program * with the SCYTHE_LAPACK flag enabled. * * \note As is the case throughout the library, we provide both * general and default template definitions of the Matrix-returning * functions in this file, explicitly providing documentation for only * the general template versions. As is also often the case, Doxygen * does not always correctly add the default template definition to * the function list below; there is always a default template * definition available for every function. */ /* TODO: This interface exposes the user to too much implementation. * We need a solve function and a solver object. By default, solve * would run lu_solve and the solver factory would return lu_solvers * (or perhaps a solver object encapsulating an lu_solver). Users * could choose cholesky when appropriate. Down the road, qr or svd * would become the default and we'd be able to handle non-square * matrices. Instead of doing an lu_decomp or a cholesky and keeping * track of the results to repeatedly solve for different b's with A * fixed in Ax=b, you'd just call the operator() on your solver object * over and over, passing the new b each time. No decomposition * specific solvers (except as toggles to the solver object and * solve function). We'd still provide cholesky and lu_decomp. We * could also think about a similar approach to inversion (one * inversion function with an option for method). * * If virtual dispatch in C++ wasn't such a performance killer (no * compiler optimization across virtual calls!!!) there would be an * obvious implementation of this interface using simple polymorphism. * Unfortunately, we need compile-time typing to maintain performance * and makes developing a clean interface that doesn't force users to * be template wizards much harder. Initial experiments with the * Barton and Nackman trick were ugly. The engine approach might work * a bit better but has its problems too. This is not going to get * done for the 1.0 release, but it is something we should come back * to. * */ #ifndef SCYTHE_IDE_H #define SCYTHE_IDE_H #ifdef SCYTHE_COMPILE_DIRECT #include "matrix.h" #include "error.h" #include "defs.h" #ifdef SCYTHE_LAPACK #include "lapack.h" #include "stat.h" #endif #else #include "scythestat/matrix.h" #include "scythestat/error.h" #include "scythestat/defs.h" #ifdef SCYTHE_LAPACK #include "scythestat/lapack.h" #include "scythestat/stat.h" #endif #endif #include #include #include namespace scythe { namespace { typedef unsigned int uint; } /*! * \brief Cholesky decomposition of a symmetric positive-definite * matrix. * * This function performs Cholesky decomposition. That is, given a * symmetric positive definite Matrix, \f$A\f$, cholesky() returns a * lower triangular Matrix \f$L\f$ such that \f$A = LL^T\f$. This * function is faster than lu_decomp() and, therefore, preferable in * cases where one's Matrix is symmetric positive definite. * * \param A The symmetric positive definite Matrix to decompose. * * \see chol_solve(const Matrix &, const Matrix &) * \see chol_solve(const Matrix &, const Matrix &, const Matrix &) * \see lu_decomp(Matrix, Matrix&, Matrix&, Matrix&) * * \throw scythe_alloc_error (Level 1) * \throw scythe_dimension_error (Level 1) * \throw scythe_null_error (Level 1) * \throw scythe_type_error (Level 2) * \throw scythe_alloc_error (Level 1) * */ template Matrix cholesky (const Matrix& A) { SCYTHE_CHECK_10(! A.isSquare(), scythe_dimension_error, "Matrix not square"); SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "Matrix is NULL"); // Rounding errors can make this problematic. Leaving out for now //SCYTHE_CHECK_20(! A.isSymmetric(), scythe_type_error, // "Matrix not symmetric"); Matrix temp (A.rows(), A.cols(), false); T h; if (PO == Row) { // row-major optimized for (uint i = 0; i < A.rows(); ++i) { for (uint j = i; j < A.cols(); ++j) { h = A(i,j); for (uint k = 0; k < i; ++k) h -= temp(i, k) * temp(j, k); if (i == j) { SCYTHE_CHECK_20(h <= (T) 0, scythe_type_error, "Matrix not positive definite"); temp(i,i) = std::sqrt(h); } else { temp(j,i) = (((T) 1) / temp(i,i)) * h; temp(i,j) = (T) 0; } } } } else { // col-major optimized for (uint j = 0; j < A.cols(); ++j) { for (uint i = j; i < A.rows(); ++i) { h = A(i, j); for (uint k = 0; k < j; ++k) h -= temp(j, k) * temp(i, k); if (i == j) { SCYTHE_CHECK_20(h <= (T) 0, scythe_type_error, "Matrix not positive definite"); temp(j,j) = std::sqrt(h); } else { temp(i,j) = (((T) 1) / temp(j,j)) * h; temp(j,i) = (T) 0; } } } } SCYTHE_VIEW_RETURN(T, RO, RS, temp) } template Matrix cholesky (const Matrix& A) { return cholesky(A); } namespace { /* This internal routine encapsulates the * algorithm used within chol_solve and lu_solve. */ template inline void solve(const Matrix& L, const Matrix& U, Matrix b, T* x, T* y) { T sum; /* TODO: Consider optimizing for ordering. Experimentation * shows performance gains are probably minor (compared col-major * with and without lapack solve routines). */ // solve M*y = b for (uint i = 0; i < b.size(); ++i) { sum = T (0); for (uint j = 0; j < i; ++j) { sum += L(i,j) * y[j]; } y[i] = (b[i] - sum) / L(i, i); } // solve M'*x = y if (U.isNull()) { // A= LL^T for (int i = b.size() - 1; i >= 0; --i) { sum = T(0); for (uint j = i + 1; j < b.size(); ++j) { sum += L(j,i) * x[j]; } x[i] = (y[i] - sum) / L(i, i); } } else { // A = LU for (int i = b.size() - 1; i >= 0; --i) { sum = T(0); for (uint j = i + 1; j < b.size(); ++j) { sum += U(i,j) * x[j]; } x[i] = (y[i] - sum) / U(i, i); } } } } /*!\brief Solve \f$Ax=b\f$ for x via backward substitution, given a * lower triangular matrix resulting from Cholesky decomposition * * This function solves the system of equations \f$Ax = b\f$ via * backward substitution. \a L is the lower triangular matrix generated * by Cholesky decomposition such that \f$A = LL'\f$. * * This function is intended for repeatedly solving systems of * equations based on \a A. That is \a A stays constant while \a * b varies. * * \param A A symmetric positive definite Matrix. * \param b A column vector with as many rows as \a A. * \param M The lower triangular matrix from the Cholesky decomposition of \a A. * * \see chol_solve(const Matrix&, const Matrix&) * \see cholesky(const Matrix&) * \see lu_solve (const Matrix&, const Matrix&, const Matrix&, const Matrix&, const Matrix&) * \see lu_solve (Matrix, const Matrix&) * * \throw scythe_alloc_error (Level 1) * \throw scythe_null_error (Level 1) * \throw scythe_dimension_error (Level 1) * \throw scythe_conformation_error (Level 1) * */ template Matrix chol_solve (const Matrix& A, const Matrix& b, const Matrix& M) { SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "A is NULL") SCYTHE_CHECK_10(! b.isColVector(), scythe_dimension_error, "b must be a column vector"); SCYTHE_CHECK_10(A.rows() != b.rows(), scythe_conformation_error, "A and b do not conform"); SCYTHE_CHECK_10(A.rows() != M.rows(), scythe_conformation_error, "A and M do not conform"); SCYTHE_CHECK_10(! M.isSquare(), scythe_dimension_error, "M must be square"); T *y = new T[A.rows()]; T *x = new T[A.rows()]; solve(M, Matrix<>(), b, x, y); Matrix result(A.rows(), 1, x); delete[]x; delete[]y; return result; } template Matrix chol_solve (const Matrix& A, const Matrix& b, const Matrix& M) { return chol_solve(A,b,M); } /*!\brief Solve \f$Ax=b\f$ for x via backward substitution, * using Cholesky decomposition * * This function solves the system of equations \f$Ax = b\f$ via * backward substitution and Cholesky decomposition. \a A must be a * symmetric positive definite matrix for this method to work. This * function calls cholesky() to perform the decomposition. * * \param A A symmetric positive definite matrix. * \param b A column vector with as many rows as \a A. * * \see chol_solve(const Matrix&, const Matrix&, const Matrix&) * \see cholesky(const Matrix&) * \see lu_solve (const Matrix&, const Matrix&, const Matrix&, const Matrix&, const Matrix&) * \see lu_solve (Matrix, const Matrix&) * * \throw scythe_alloc_error (Level 1) * \throw scythe_null_error (Level 1) * \throw scythe_conformation_error (Level 1) * \throw scythe_dimension_error (Level 1) * \throw scythe_type_error (Level 2) * \throw scythe_alloc_error (Level 1) * */ template Matrix chol_solve (const Matrix& A, const Matrix& b) { /* NOTE: cholesky() call does check for square/posdef of A, * and the overloaded chol_solve call handles dimensions */ return chol_solve(A, b, cholesky(A)); } template Matrix chol_solve (const Matrix& A, const Matrix& b) { return chol_solve(A, b); } /*!\brief Calculates the inverse of a symmetric positive definite * matrix, given a lower triangular matrix resulting from Cholesky * decomposition. * * This function returns the inverse of a symmetric positive * definite matrix. Unlike the one-parameter version, this function * requires the caller to perform Cholesky decomposition on the * matrix to invert, ahead of time. * * \param A The symmetric positive definite matrix to invert. * \param M The lower triangular matrix from the Cholesky decomposition of \a A. * * \see invpd(const Matrix&) * \see inv(const Matrix&, const Matrix&, const Matrix&, const Matrix&) * \see inv(const Matrix&) * \see cholesky(const Matrix&) * * \throw scythe_alloc_error (Level 1) * \throw scythe_null_error (Level 1) * \throw scythe_conformation_error (Level 1) * \throw scythe_dimension_error (Level 1) */ template Matrix invpd (const Matrix& A, const Matrix& M) { SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "A is NULL") SCYTHE_CHECK_10(! A.isSquare(), scythe_dimension_error, "A is not square") SCYTHE_CHECK_10(A.rows() != M.cols() || A.cols() != M.rows(), scythe_conformation_error, "A and M do not conform"); // for chol_solve block T *y = new T[A.rows()]; T *x = new T[A.rows()]; Matrix b(A.rows(), 1); // full of zeros Matrix null; // For final answer Matrix Ainv(A.rows(), A.cols(), false); for (uint k = 0; k < A.rows(); ++k) { b[k] = (T) 1; solve(M, null, b, x, y); b[k] = (T) 0; for (uint l = 0; l < A.rows(); ++l) Ainv(l,k) = x[l]; } delete[] y; delete[] x; SCYTHE_VIEW_RETURN(T, RO, RS, Ainv) } template Matrix invpd (const Matrix& A, const Matrix& M) { return invpd(A, M); } /*!\brief Calculate the inverse of a symmetric positive definite * matrix. * * This function returns the inverse of a symmetric positive definite * matrix, using cholesky() to do the necessary decomposition. This * method is significantly faster than the generalized inverse * function. * * \param A The symmetric positive definite matrix to invert. * * \see invpd(const Matrix&, const Matrix&) * \see inv (const Matrix&, const Matrix&, const Matrix&, const Matrix&) * \see inv (const Matrix&) * * \throw scythe_alloc_error (Level 1) * \throw scythe_null_error (Level 1) * \throw scythe_conformation_error (Level 1) * \throw scythe_dimension_error (Level 1) * \throw scythe_type_error (Level 2) */ template Matrix invpd (const Matrix& A) { // Cholesky checks to see if A is square and symmetric return invpd(A, cholesky(A)); } template Matrix invpd (const Matrix& A) { return invpd(A); } /* This code is based on Algorithm 3.4.1 of Golub and Van Loan 3rd * edition, 1996. Major difference is in how the output is * structured. Returns the sign of the row permutation (used by * det). Internal function, doesn't need doxygen. */ namespace { template inline T lu_decomp_alg(Matrix& A, Matrix& L, Matrix& U, Matrix& perm_vec) { if (A.isRowVector()) { L = Matrix (1, 1, true, 1); // all 1s U = A; perm_vec = Matrix(1, 1); // all 0s return (T) 0; } L = U = Matrix(A.rows(), A.cols(), false); perm_vec = Matrix (A.rows() - 1, 1, false); uint pivot; T temp; T sign = (T) 1; for (uint k = 0; k < A.rows() - 1; ++k) { pivot = k; // find pivot for (uint i = k; i < A.rows(); ++i) { if (std::fabs(A(pivot,k)) < std::fabs(A(i,k))) pivot = i; } SCYTHE_CHECK_20(A(pivot,k) == (T) 0, scythe_type_error, "Matrix is singular"); // permute if (k != pivot) { sign *= -1; for (uint i = 0; i < A.rows(); ++i) { temp = A(pivot,i); A(pivot,i) = A(k,i); A(k,i) = temp; } } perm_vec[k] = pivot; for (uint i = k + 1; i < A.rows(); ++i) { A(i,k) = A(i,k) / A(k,k); for (uint j = k + 1; j < A.rows(); ++j) A(i,j) = A(i,j) - A(i,k) * A(k,j); } } L = A; for (uint i = 0; i < A.rows(); ++i) { for (uint j = i; j < A.rows(); ++j) { U(i,j) = A(i,j); L(i,j) = (T) 0; L(i,i) = (T) 1; } } return sign; } } /* Calculates the LU Decomposition of a square Matrix */ /* Note that the L, U, and perm_vec must be concrete. A is passed by * value, because it is changed during the decomposition. If A is a * view, it will get mangled, but the decomposition will work fine. * Not sure what the copy/view access trade-off is, but passing a * view might speed things up if you don't care about messing up * your matrix. */ /*! \brief LU decomposition of a square matrix. * * This function performs LU decomposition. That is, given a * non-singular square matrix \a A and three matrix references, \a * L, \a U, and \a perm_vec, lu_decomp fills the latter three * matrices such that \f$LU = A\f$. This method does not actually * calculate the LU decomposition of \a A, but of a row-wise * permutation of \a A. This permutation is recorded in perm_vec. * * \note Note that \a L, \a U, and \a perm_vec must be concrete. * \a A is passed by value because the function modifies it during * the decomposition. Users should generally avoid passing Matrix * views as the first argument to this function because this * results in modification to the Matrix being viewed. * * \param A Non-singular square matrix to decompose. * \param L Lower triangular portion of LU decomposition of A. * \param U Upper triangular portion of LU decomposition of A. * \param perm_vec Permutation vector recording the row-wise permutation of A actually decomposed by the algorithm. * * \see cholesky (const Matrix&) * \see lu_solve (const Matrix&, const Matrix&, const Matrix&, const Matrix&, const Matrix&) * \see lu_solve (Matrix, const Matrix&) * * \throw scythe_null_error (Level 1) * \throw scythe_dimension_error (Level 1) * \throw scythe_type_error (Level 2) */ template void lu_decomp(Matrix A, Matrix& L, Matrix& U, Matrix& perm_vec) { SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "A is NULL") SCYTHE_CHECK_10(! A.isSquare(), scythe_dimension_error, "Matrix A not square"); lu_decomp_alg(A, L, U, perm_vec); } /* lu_solve overloaded: you need A, b + L, U, perm_vec from * lu_decomp. * */ /*! \brief Solve \f$Ax=b\f$ for x via forward and backward * substitution, given the results of a LU decomposition. * * This function solves the system of equations \f$Ax = b\f$ via * forward and backward substitution and LU decomposition. \a A * must be a non-singular square matrix for this method to work. * This function requires the actual LU decomposition to be * performed ahead of time; by lu_decomp() for example. * * This function is intended for repeatedly solving systems of * equations based on \a A. That is \a A stays constant while \a * b varies. * * \param A Non-singular square Matrix to decompose, passed by reference. * \param b Column vector with as many rows as \a A. * \param L Lower triangular portion of LU decomposition of \a A. * \param U Upper triangular portion of LU decomposition of \a A. * \param perm_vec Permutation vector recording the row-wise permutation of \a A actually decomposed by the algorithm. * * \see lu_solve (Matrix, const Matrix&) * \see lu_decomp(Matrix, Matrix&, Matrix&, Matrix&) * \see chol_solve(const Matrix &, const Matrix &) * \see chol_solve(const Matrix &, const Matrix &, const Matrix &) * * \throw scythe_null_error (Level 1) * \throw scythe_dimension_error (Level 1) * \throw scythe_conformation_error (Level 1) */ template Matrix lu_solve (const Matrix& A, const Matrix& b, const Matrix& L, const Matrix& U, const Matrix &perm_vec) { SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "A is NULL") SCYTHE_CHECK_10(! b.isColVector(), scythe_dimension_error, "b is not a column vector"); SCYTHE_CHECK_10(! A.isSquare(), scythe_dimension_error, "A is not square"); SCYTHE_CHECK_10(A.rows() != b.rows(), scythe_conformation_error, "A and b have different row sizes"); SCYTHE_CHECK_10(A.rows() != L.rows() || A.rows() != U.rows() || A.cols() != L.cols() || A.cols() != U.cols(), scythe_conformation_error, "A, L, and U do not conform"); SCYTHE_CHECK_10(perm_vec.rows() + 1 != A.rows(), scythe_conformation_error, "perm_vec does not have exactly one less row than A"); T *y = new T[A.rows()]; T *x = new T[A.rows()]; Matrix bb = row_interchange(b, perm_vec); solve(L, U, bb, x, y); Matrix result(A.rows(), 1, x); delete[]x; delete[]y; return result; } template Matrix lu_solve (const Matrix& A, const Matrix& b, const Matrix& L, const Matrix& U, const Matrix &perm_vec) { return lu_solve(A, b, L, U, perm_vec); } /*! \brief Solve \f$Ax=b\f$ for x via forward and backward * substitution, using LU decomposition * * This function solves the system of equations \f$Ax = b\f$ via * forward and backward substitution and LU decomposition. \a A * must be a non-singular square matrix for this method to work. * * \param A A non-singular square Matrix to decompose. * \param b A column vector with as many rows as \a A. * * \see lu_solve (const Matrix&, const Matrix&, const Matrix&, const Matrix&, const Matrix&) * \see lu_decomp(Matrix, Matrix&, Matrix&, Matrix&) * \see chol_solve(const Matrix &, const Matrix &) * \see chol_solve(const Matrix &, const Matrix &, const Matrix &) * * \throw scythe_null_error (Level 1) * \throw scythe_dimension_error (Level 1) * \throw scythe_conformation_error (Level 1) * \throw scythe_type_error (Level 2) */ template Matrix lu_solve (Matrix A, const Matrix& b) { // step 1 compute the LU factorization Matrix L, U; Matrix perm_vec; lu_decomp_alg(A, L, U, perm_vec); return lu_solve(A, b, L, U, perm_vec); } template Matrix lu_solve (Matrix A, const Matrix& b) { // Slight code rep here, but very few lines // step 1 compute the LU factorization Matrix L, U; Matrix perm_vec; lu_decomp_alg(A, L, U, perm_vec); return lu_solve(A, b, L, U, perm_vec); } /*!\brief Calculates the inverse of a non-singular square matrix, * given an LU decomposition. * * This function returns the inverse of an arbitrary, non-singular, * square matrix \a A when passed a permutation of an LU * decomposition, such as that returned by lu_decomp(). A * one-parameter version of this function exists that does not * require the user to pre-decompose the system. * * \param A The Matrix to be inverted. * \param L A Lower triangular matrix resulting from decomposition. * \param U An Upper triangular matrix resulting from decomposition. * \param perm_vec The permutation vector recording the row-wise permutation of \a A actually decomposed by the algorithm. * * \see inv (const Matrix&) * \see invpd(const Matrix&) * \see invpd(const Matrix&, const Matrix&) * \see lu_decomp(Matrix, Matrix&, Matrix&, Matrix&) * * \throw scythe_null_error(Level 1) * \throw scythe_dimension_error (Level 1) * \throw scythe_conformation_error (Level 1) */ template Matrix inv (const Matrix& A, const Matrix& L, const Matrix& U, const Matrix& perm_vec) { SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "A is NULL") SCYTHE_CHECK_10 (! A.isSquare(), scythe_dimension_error, "A is not square"); SCYTHE_CHECK_10(A.rows() != L.rows() || A.rows() != U.rows() || A.cols() != L.cols() || A.cols() != U.cols(), scythe_conformation_error, "A, L, and U do not conform"); SCYTHE_CHECK_10(perm_vec.rows() + 1 != A.rows() && !(A.isScalar() && perm_vec.isScalar()), scythe_conformation_error, "perm_vec does not have exactly one less row than A"); // For the final result Matrix Ainv(A.rows(), A.rows(), false); // for the solve block T *y = new T[A.rows()]; T *x = new T[A.rows()]; Matrix b(A.rows(), 1); // full of zeros Matrix bb; for (uint k = 0; k < A.rows(); ++k) { b[k] = (T) 1; bb = row_interchange(b, perm_vec); solve(L, U, bb, x, y); b[k] = (T) 0; for (uint l = 0; l < A.rows(); ++l) Ainv(l,k) = x[l]; } delete[] y; delete[] x; SCYTHE_VIEW_RETURN(T, RO, RS, Ainv) } template Matrix inv (const Matrix& A, const Matrix& L, const Matrix& U, const Matrix& perm_vec) { return inv(A, L, U, perm_vec); } /*!\brief Invert an arbitrary, non-singular, square matrix. * * This function returns the inverse of a non-singular square matrix, * using lu_decomp() to do the necessary decomposition. This method * is significantly slower than the inverse function for symmetric * positive definite matrices, invpd(). * * \param A The Matrix to be inverted. * * \see inv (const Matrix&, const Matrix&, const Matrix&, const Matrix&) * \see invpd(const Matrix&) * \see invpd(const Matrix&, const Matrix&) * * \throw scythe_null_error(Level 1) * \throw scythe_dimension_error (Level 1) * \throw scythe_conformation_error (Level 1) * \throw scythe_type_error (Level 2) */ template Matrix inv (const Matrix& A) { // Make a copy of A for the decomposition (do it with an explicit // copy to a concrete case A is a view) Matrix AA = A; // step 1 compute the LU factorization Matrix L, U; Matrix perm_vec; lu_decomp_alg(AA, L, U, perm_vec); return inv(A, L, U, perm_vec); } template Matrix inv (const Matrix& A) { return inv(A); } /* Interchanges the rows of A with those in vector p */ /*!\brief Interchange the rows of a Matrix according to a * permutation vector. * * This function permutes the rows of Matrix \a A according to \a * perm_vec. Each element i of perm_vec contains a row-number, r. * For each row, i, in \a A, A[i] is interchanged with A[r]. * * \param A The matrix to permute. * \param p The column vector describing the permutations to perform * on \a A. * * \see lu_decomp(Matrix, Matrix&, Matrix&, Matrix&) * * \throw scythe_dimension_error (Level 1) * \throw scythe_conformation_error (Level 1) */ template Matrix row_interchange (Matrix A, const Matrix& p) { SCYTHE_CHECK_10(! p.isColVector(), scythe_dimension_error, "p not a column vector"); SCYTHE_CHECK_10(p.rows() + 1 != A.rows() && ! p.isScalar(), scythe_conformation_error, "p must have one less row than A"); for (uint i = 0; i < A.rows() - 1; ++i) { Matrix vec1 = A(i, _); Matrix vec2 = A(p[i], _); std::swap_ranges(vec1.begin_f(), vec1.end_f(), vec2.begin_f()); } return A; } template Matrix row_interchange (const Matrix& A, const Matrix& p) { return row_interchange(A, p); } /*! \brief Calculate the determinant of a square Matrix. * * This routine calculates the determinant of a square Matrix, using * LU decomposition. * * \param A The Matrix to calculate the determinant of. * * \see lu_decomp(Matrix, Matrix&, Matrix&, Matrix&) * * \throws scythe_dimension_error (Level 1) * \throws scythe_null_error (Level 1) */ template T det (const Matrix& A) { SCYTHE_CHECK_10(! A.isSquare(), scythe_dimension_error, "Matrix is not square") SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "Matrix is NULL") // Make a copy of A for the decomposition (do it here instead of // at parameter pass in case A is a view) Matrix AA = A; // step 1 compute the LU factorization Matrix L, U; Matrix perm_vec; T sign = lu_decomp_alg(AA, L, U, perm_vec); // step 2 calculate the product of diag(U) and sign T det = (T) 1; for (uint i = 0; i < AA.rows(); ++i) det *= AA(i, i); return sign * det; } #ifdef SCYTHE_LAPACK template<> inline Matrix<> cholesky (const Matrix<>& A) { SCYTHE_DEBUG_MSG("Using lapack/blas for cholesky"); SCYTHE_CHECK_10(! A.isSquare(), scythe_dimension_error, "Matrix not square"); SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "Matrix is NULL"); // We have to do an explicit copy within the func to match the // template declaration of the more general template. Matrix<> AA = A; // Get a pointer to the internal array and set up some vars double* Aarray = AA.getArray(); // internal array pointer int rows = (int) AA.rows(); // the dim of the matrix int err = 0; // The output error condition // Cholesky decomposition step lapack::dpotrf_("L", &rows, Aarray, &rows, &err); SCYTHE_CHECK_10(err > 0, scythe_type_error, "Matrix is not positive definite") SCYTHE_CHECK_10(err < 0, scythe_invalid_arg, "The " << err << "th value of the matrix had an illegal value") // Zero out upper triangle for (uint j = 1; j < AA.cols(); ++j) for (uint i = 0; i < j; ++i) AA(i, j) = 0; return AA; } template<> inline Matrix<> chol_solve (const Matrix<>& A, const Matrix<>& b, const Matrix<>& M) { SCYTHE_DEBUG_MSG("Using lapack/blas for chol_solve"); SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "A is NULL") SCYTHE_CHECK_10(! b.isColVector(), scythe_dimension_error, "b must be a column vector"); SCYTHE_CHECK_10(A.rows() != b.rows(), scythe_conformation_error, "A and b do not conform"); SCYTHE_CHECK_10(A.rows() != M.rows(), scythe_conformation_error, "A and M do not conform"); SCYTHE_CHECK_10(! M.isSquare(), scythe_dimension_error, "M must be square"); // The algorithm modifies b in place. We make a copy. Matrix<> bb = b; // Get array pointers and set up some vars const double* Marray = M.getArray(); double* barray = bb.getArray(); int rows = (int) bb.rows(); int cols = (int) bb.cols(); // currently always one, but generalizable int err = 0; // Solve the system lapack::dpotrs_("L", &rows, &cols, Marray, &rows, barray, &rows, &err); SCYTHE_CHECK_10(err > 0, scythe_type_error, "Matrix is not positive definite") SCYTHE_CHECK_10(err < 0, scythe_invalid_arg, "The " << err << "th value of the matrix had an illegal value") return bb; } template<> inline Matrix<> chol_solve (const Matrix<>& A, const Matrix<>& b) { SCYTHE_DEBUG_MSG("Using lapack/blas for chol_solve"); SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "A is NULL") SCYTHE_CHECK_10(! b.isColVector(), scythe_dimension_error, "b must be a column vector"); SCYTHE_CHECK_10(A.rows() != b.rows(), scythe_conformation_error, "A and b do not conform"); // The algorithm modifies both A and b in place, so we make copies Matrix<> AA =A; Matrix<> bb = b; // Get array pointers and set up some vars double* Aarray = AA.getArray(); double* barray = bb.getArray(); int rows = (int) bb.rows(); int cols = (int) bb.cols(); // currently always one, but generalizable int err = 0; // Solve the system lapack::dposv_("L", &rows, &cols, Aarray, &rows, barray, &rows, &err); SCYTHE_CHECK_10(err > 0, scythe_type_error, "Matrix is not positive definite") SCYTHE_CHECK_10(err < 0, scythe_invalid_arg, "The " << err << "th value of the matrix had an illegal value") return bb; } template inline double lu_decomp_alg(Matrix<>& A, Matrix& L, Matrix& U, Matrix& perm_vec) { SCYTHE_DEBUG_MSG("Using lapack/blas for lu_decomp_alg"); SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "A is NULL") SCYTHE_CHECK_10 (! A.isSquare(), scythe_dimension_error, "A is not square"); if (A.isRowVector()) { L = Matrix (1, 1, true, 1); // all 1s U = A; perm_vec = Matrix(1, 1); // all 0s return 0.; } L = U = Matrix(A.rows(), A.cols(), false); perm_vec = Matrix (A.rows(), 1, false); // Get a pointer to the internal array and set up some vars double* Aarray = A.getArray(); // internal array pointer int rows = (int) A.rows(); // the dim of the matrix int* ipiv = (int*) perm_vec.getArray(); // Holds the lu decomp pivot array int err = 0; // The output error condition // Do the decomposition lapack::dgetrf_(&rows, &rows, Aarray, &rows, ipiv, &err); SCYTHE_CHECK_10(err > 0, scythe_type_error, "Matrix is singular"); SCYTHE_CHECK_10(err < 0, scythe_lapack_internal_error, "The " << err << "th value of the matrix had an illegal value"); // Now fill in the L and U matrices. L = A; for (uint i = 0; i < A.rows(); ++i) { for (uint j = i; j < A.rows(); ++j) { U(i,j) = A(i,j); L(i,j) = 0.; L(i,i) = 1.; } } // Change to scythe's rows-1 perm_vec format and c++ indexing // XXX Cutting off the last pivot term may be buggy if it isn't // always just pointing at itself if (perm_vec(perm_vec.size() - 1) != perm_vec.size()) SCYTHE_THROW(scythe_unexpected_default_error, "This is an unexpected error. Please notify the developers.") perm_vec = perm_vec(0, 0, perm_vec.rows() - 2, 0) - 1; // Finally, figure out the sign of perm_vec if (sum(perm_vec > 0) % 2 == 0) return 1; return -1; } /*! \brief The result of a QR decomposition. * * Objects of this type contain three matrices, \a QR, \a tau, and * \a pivot, representing the results of a QR decomposition of a * \f$m \times n\f$ matrix. After decomposition, the upper triangle * of \a QR contains the min(\f$m\f$, \f$n\f$) by \f$n\f$ upper * trapezoidal matrix \f$R\f$, while \a tau and the elements of \a * QR below the diagonal represent the orthogonal matrix \f$Q\f$ as * a product of min(\f$m\f$, \f$n\f$) elementary reflectors. The * vector \a pivot is a permutation vector containing information * about the pivoting strategy used in the factorization. * * \a QR is \f$m \times n\f$, tau is a vector of dimension * min(\f$m\f$, \f$n\f$), and pivot is a vector of dimension * \f$n\f$. * * \see qr_decomp (const Matrix<>& A) */ struct QRdecomp { Matrix<> QR; Matrix<> tau; Matrix<> pivot; }; /*! \brief QR decomposition of a matrix. * * This function performs QR decomposition. That is, given a * \f$m \times n \f$ matrix \a A, qr_decomp computes the QR factorization * of \a A with column pivoting, such that \f$A \cdot P = Q \cdot * R\f$. The resulting QRdecomp object contains three matrices, \a * QR, \a tau, and \a pivot. The upper triangle of \a QR contains the * min(\f$m\f$, \f$n\f$) by \f$n\f$ upper trapezoidal matrix * \f$R\f$, while \a tau and the elements of \a QR below the * diagonal represent the orthogonal matrix \f$Q\f$ as a product of * min(\f$m\f$, \f$n\f$) elementary reflectors. The vector \a pivot * is a permutation vector containing information about the pivoting * strategy used in the factorization. * * \note This function requires BLAS/LAPACK functionality and is * only available on machines that provide these libraries. Make * sure you enable the SCYTHE_LAPACK preprocessor flag if you wish * to use this function. Furthermore, note that this function takes * and returns only column-major concrete matrices. Future versions * of Scythe will provide a native C++ implementation of this * function with support for general matrix templates. * * \param A A matrix to decompose. * * \see QRdecomp * \see lu_decomp(Matrix, Matrix&, Matrix&, Matrix&) * \see cholesky (const Matrix&) * \see qr_solve (const Matrix<>& A, const Matrix<>& b, const QRdecomp& QR) * \see qr_solve (const Matrix<>& A, const Matrix<>& b); * * \throw scythe_null_error (Level 1) * \throw scythe_lapack_internal_error (Level 1) */ inline QRdecomp qr_decomp (const Matrix<>& A) { SCYTHE_DEBUG_MSG("Using lapack/blas for qr_decomp"); SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "A is NULL"); // Set up working variables Matrix<> QR = A; double* QRarray = QR.getArray(); // input/output array pointer int rows = (int) QR.rows(); int cols = (int) QR.cols(); Matrix pivot(cols, 1); // pivot vector int* parray = (int*) pivot.getArray(); // pivot vector array pointer Matrix<> tau = Matrix<>(rows < cols ? rows : cols, 1); double* tarray = tau.getArray(); // tau output array pointer double tmp, *work; // workspace vars int lwork, info; // workspace size var and error info var // Get workspace size lwork = -1; lapack::dgeqp3_(&rows, &cols, QRarray, &rows, parray, tarray, &tmp, &lwork, &info); SCYTHE_CHECK_10(info != 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dgeqp3"); lwork = (int) tmp; work = new double[lwork]; // run the routine for real lapack::dgeqp3_(&rows, &cols, QRarray, &rows, parray, tarray, work, &lwork, &info); SCYTHE_CHECK_10(info != 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dgeqp3"); delete[] work; pivot -= 1; QRdecomp result; result.QR = QR; result.tau = tau; result.pivot = pivot; return result; } /*! \brief Solve \f$Ax=b\f$ given a QR decomposition. * * This function solves the system of equations \f$Ax = b\f$ using * the results of a QR decomposition. This function requires the * actual QR decomposition to be performed ahead of time; by * qr_decomp() for example. * * This function is intended for repeatedly solving systems of * equations based on \a A. That is \a A stays constant while \a b * varies. * * \note This function requires BLAS/LAPACK functionality and is * only available on machines that provide these libraries. Make * sure you enable the SCYTHE_LAPACK preprocessor flag if you wish * to use this function. Furthermore, note that this function takes * and returns only column-major concrete matrices. Future versions * of Scythe will provide a native C++ implementation of this * function with support for general matrix templates. * * \param A A Matrix to decompose. * \param b A Matrix with as many rows as \a A. * \param QR A QRdecomp object containing the result of the QR decomposition of \a A. * * \see QRdecomp * \see qr_solve (const Matrix<>& A, const Matrix<>& b) * \see qr_decomp (const Matrix<>& A) * \see lu_solve (const Matrix&, const Matrix&, const Matrix&, const Matrix&, const Matrix&) * \see lu_solve (Matrix, const Matrix&) * \see chol_solve(const Matrix &, const Matrix &) * \see chol_solve(const Matrix &, const Matrix &, const Matrix &) * * \throw scythe_null_error (Level 1) * \throw scythe_conformation_error (Level 1) * \throw scythe_type_error (Level 1) * \throw scythe_lapack_internal_error (Level 1) */ inline Matrix<> qr_solve(const Matrix<>& A, const Matrix<>& b, const QRdecomp& QR) { SCYTHE_DEBUG_MSG("Using lapack/blas for qr_solve"); SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "A is NULL") SCYTHE_CHECK_10(A.rows() != b.rows(), scythe_conformation_error, "A and b do not conform"); SCYTHE_CHECK_10(A.rows() != QR.QR.rows() || A.cols() != QR.QR.cols(), scythe_conformation_error, "A and QR do not conform"); int taudim = (int) (A.rows() < A.cols() ? A.rows() : A.cols()); SCYTHE_CHECK_10(QR.tau.size() != taudim, scythe_conformation_error, "A and tau do not conform"); SCYTHE_CHECK_10(QR.pivot.size() != A.cols(), scythe_conformation_error, "pivot vector is not the right length"); int rows = (int) QR.QR.rows(); int cols = (int) QR.QR.cols(); int nrhs = (int) b.cols(); int lwork, info; double *work, tmp; double* QRarray = QR.QR.getArray(); double* tarray = QR.tau.getArray(); Matrix<> bb = b; double* barray = bb.getArray(); // Get workspace size lwork = -1; lapack::dormqr_("L", "T", &rows, &nrhs, &taudim, QRarray, &rows, tarray, barray, &rows, &tmp, &lwork, &info); SCYTHE_CHECK_10(info != 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dormqr"); // And now for real lwork = (int) tmp; work = new double[lwork]; lapack::dormqr_("L", "T", &rows, &nrhs, &taudim, QRarray, &rows, tarray, barray, &rows, work, &lwork, &info); SCYTHE_CHECK_10(info != 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dormqr"); lapack::dtrtrs_("U", "N", "N", &taudim, &nrhs, QRarray, &rows, barray, &rows, &info); SCYTHE_CHECK_10(info > 0, scythe_type_error, "Matrix is singular"); SCYTHE_CHECK_10(info < 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dtrtrs"); delete[] work; Matrix<> result(A.cols(), b.cols(), false); for (uint i = 0; i < QR.pivot.size(); ++i) result(i, _) = bb((uint) QR.pivot(i), _); return result; } /*! \brief Solve \f$Ax=b\f$ using QR decomposition. * * This function solves the system of equations \f$Ax = b\f$ using * QR decomposition. This function is intended for repeatedly * solving systems of equations based on \a A. That is \a A stays * constant while \a b varies. * * \note This function used BLAS/LAPACK support functionality and is * only available on machines that provide these libraries. Make * sure you enable the SCYTHE_LAPACK preprocessor flag if you wish * to use this function. Furthermore, note that the function takes * and returns only column-major concrete matrices. Future versions * of Scythe will provide a native C++ implementation of this * function with support for general matrix templates. * * \param A A Matrix to decompose. * \param b A Matrix with as many rows as \a A. * * \see QRdecomp * \see qr_solve (const Matrix<>& A, const Matrix<>& b, const QRdecomp& QR) * \see qr_decomp (const Matrix<>& A) * \see lu_solve (const Matrix&, const Matrix&, const Matrix&, const Matrix&, const Matrix&) * \see lu_solve (Matrix, const Matrix&) * \see chol_solve(const Matrix &, const Matrix &) * \see chol_solve(const Matrix &, const Matrix &, const Matrix &) * * \throw scythe_null_error (Level 1) * \throw scythe_conformation_error (Level 1) * \throw scythe_type_error (Level 1) * \throw scythe_lapack_internal_error (Level 1) */ inline Matrix<> qr_solve (const Matrix<>& A, const Matrix<>& b) { SCYTHE_DEBUG_MSG("Using lapack/blas for qr_solve"); SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "A is NULL") SCYTHE_CHECK_10(A.rows() != b.rows(), scythe_conformation_error, "A and b do not conform"); /* Do decomposition */ // Set up working variables Matrix<> QR = A; double* QRarray = QR.getArray(); // input/output array pointer int rows = (int) QR.rows(); int cols = (int) QR.cols(); Matrix pivot(cols, 1); // pivot vector int* parray = (int*) pivot.getArray(); // pivot vector array pointer Matrix<> tau = Matrix<>(rows < cols ? rows : cols, 1); double* tarray = tau.getArray(); // tau output array pointer double tmp, *work; // workspace vars int lwork, info; // workspace size var and error info var // Get workspace size lwork = -1; lapack::dgeqp3_(&rows, &cols, QRarray, &rows, parray, tarray, &tmp, &lwork, &info); SCYTHE_CHECK_10(info != 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dgeqp3"); lwork = (int) tmp; work = new double[lwork]; // run the routine for real lapack::dgeqp3_(&rows, &cols, QRarray, &rows, parray, tarray, work, &lwork, &info); SCYTHE_CHECK_10(info != 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dgeqp3"); delete[] work; pivot -= 1; /* Now solve the system */ // working vars int nrhs = (int) b.cols(); Matrix<> bb = b; double* barray = bb.getArray(); int taudim = (int) tau.size(); // Get workspace size lwork = -1; lapack::dormqr_("L", "T", &rows, &nrhs, &taudim, QRarray, &rows, tarray, barray, &rows, &tmp, &lwork, &info); SCYTHE_CHECK_10(info != 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dormqr"); // And now for real lwork = (int) tmp; work = new double[lwork]; lapack::dormqr_("L", "T", &rows, &nrhs, &taudim, QRarray, &rows, tarray, barray, &rows, work, &lwork, &info); SCYTHE_CHECK_10(info != 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dormqr"); lapack::dtrtrs_("U", "N", "N", &taudim, &nrhs, QRarray, &rows, barray, &rows, &info); SCYTHE_CHECK_10(info > 0, scythe_type_error, "Matrix is singular"); SCYTHE_CHECK_10(info < 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dtrtrs"); delete[] work; Matrix<> result(A.cols(), b.cols(), false); for (uint i = 0; i < pivot.size(); ++i) result(i, _) = bb(pivot(i), _); return result; } template<> inline Matrix<> invpd (const Matrix<>& A) { SCYTHE_DEBUG_MSG("Using lapack/blas for invpd"); SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "A is NULL") SCYTHE_CHECK_10 (! A.isSquare(), scythe_dimension_error, "A is not square"); // We have to do an explicit copy within the func to match the // template declaration of the more general template. Matrix<> AA = A; // Get a pointer to the internal array and set up some vars double* Aarray = AA.getArray(); // internal array pointer int rows = (int) AA.rows(); // the dim of the matrix int err = 0; // The output error condition // Cholesky decomposition step lapack::dpotrf_("L", &rows, Aarray, &rows, &err); SCYTHE_CHECK_10(err > 0, scythe_type_error, "Matrix is not positive definite") SCYTHE_CHECK_10(err < 0, scythe_invalid_arg, "The " << err << "th value of the matrix had an illegal value") // Inversion step lapack::dpotri_("L", &rows, Aarray, &rows, &err); SCYTHE_CHECK_10(err > 0, scythe_type_error, "The (" << err << ", " << err << ") element of the matrix is zero" << " and the inverse could not be computed") SCYTHE_CHECK_10(err < 0, scythe_invalid_arg, "The " << err << "th value of the matrix had an illegal value") lapack::make_symmetric(Aarray, rows); return AA; } template<> inline Matrix<> invpd (const Matrix<>& A, const Matrix<>& M) { SCYTHE_DEBUG_MSG("Using lapack/blas for invpd"); SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "A is NULL") SCYTHE_CHECK_10 (! A.isSquare(), scythe_dimension_error, "A is not square"); SCYTHE_CHECK_10(A.rows() != M.cols() || A.cols() != M.rows(), scythe_conformation_error, "A and M do not conform"); // We have to do an explicit copy within the func to match the // template declaration of the more general template. Matrix<> MM = M; // Get pointer and set up some vars double* Marray = MM.getArray(); int rows = (int) MM.rows(); int err = 0; // Inversion step lapack::dpotri_("L", &rows, Marray, &rows, &err); SCYTHE_CHECK_10(err > 0, scythe_type_error, "The (" << err << ", " << err << ") element of the matrix is zero" << " and the inverse could not be computed") SCYTHE_CHECK_10(err < 0, scythe_invalid_arg, "The " << err << "th value of the matrix had an illegal value") lapack::make_symmetric(Marray, rows); return MM; } template <> inline Matrix<> inv(const Matrix<>& A) { SCYTHE_DEBUG_MSG("Using lapack/blas for inv"); SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "A is NULL") SCYTHE_CHECK_10 (! A.isSquare(), scythe_dimension_error, "A is not square"); // We have to do an explicit copy within the func to match the // template declaration of the more general template. Matrix<> AA = A; // Get a pointer to the internal array and set up some vars double* Aarray = AA.getArray(); // internal array pointer int rows = (int) AA.rows(); // the dim of the matrix int* ipiv = new int[rows]; // Holds the lu decomp pivot array int err = 0; // The output error condition // LU decomposition step lapack::dgetrf_(&rows, &rows, Aarray, &rows, ipiv, &err); SCYTHE_CHECK_10(err > 0, scythe_type_error, "Matrix is singular"); SCYTHE_CHECK_10(err < 0, scythe_invalid_arg, "The " << err << "th value of the matrix had an illegal value"); // Inversion step; first do a workspace query, then the actual // inversion double work_query = 0; int work_size = -1; lapack::dgetri_(&rows, Aarray, &rows, ipiv, &work_query, &work_size, &err); double* workspace = new double[(work_size = (int) work_query)]; lapack::dgetri_(&rows, Aarray, &rows, ipiv, workspace, &work_size, &err); delete[] ipiv; delete[] workspace; SCYTHE_CHECK_10(err > 0, scythe_type_error, "Matrix is singular"); SCYTHE_CHECK_10(err < 0, scythe_invalid_arg, "Internal error in LAPACK routine dgetri"); return AA; } /*!\brief The result of a singular value decomposition. * * Objects of this type hold the results of a singular value * decomposition (SVD) of an \f$m \times n\f$ matrix \f$A\f$, as * returned by svd(). The SVD takes the form: \f$A = U * \cdot \Sigma \cdot V'\f$. SVD objects contain \a d, which * holds the singular values of \f$A\f$ (the diagonal of * \f$\Sigma\f$) in descending order. Furthermore, depending on the * options passed to svd(), they may hold some or all of the * left singular vectors of \f$A\f$ in \a U and some or all of the * right singular vectors of \f$A\f$ in \a Vt. * * \see svd(const Matrix<>& A, int nu, int nv); */ struct SVD { Matrix<> d; // singular values Matrix<> U; // left singular vectors Matrix<> Vt; // transpose of right singular vectors }; /*!\brief Calculates the singular value decomposition of a matrix, * optionally computing the left and right singular vectors. * * This function returns the singular value decomposition (SVD) of a * \f$m \times n\f$ matrix \a A, optionally computing the left and right * singular vectors. It returns the singular values and vectors in * a SVD object. * * \note This function requires BLAS/LAPACK functionality and is * only available on machines that provide these libraries. Make * sure you enable the SCYTHE_LAPACK preprocessor flag if you wish * to use this function. Furthermore, note that this function takes * and returns only column-major concrete matrices. Future versions * of Scythe will provide a native C++ implementation of this * function with support for general matrix templates. * * \param A The matrix to decompose. * \param nu The number of left singular vectors to compute and return. Values less than zero are equivalent to min(\f$m\f$, \f$n\f$). * \param nv The number of right singular vectors to compute and return. Values less than zero are equivalent to min(\f$m\f$, \f$n\f$). * * \throw scythe_null_error (Level 1) * \throw scythe_convergence_error (Level 1) * \throw scythe_lapack_internal_error (Level 1) * * \see SVD * \see eigen(const Matrix<>& A, bool vectors) */ inline SVD svd (const Matrix<>& A, int nu = -1, int nv = -1) { SCYTHE_DEBUG_MSG("Using lapack/blas for eigen"); SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "Matrix is NULL"); char* jobz; int m = (int) A.rows(); int n = (int) A.cols(); int mn = (int) std::min(A.rows(), A.cols()); Matrix<> U; Matrix<> V; if (nu < 0) nu = mn; if (nv < 0) nv = mn; if (nu <= mn && nv<= mn) { jobz = "S"; U = Matrix<>(m, mn, false); V = Matrix<>(mn, n, false); } else if (nu == 0 && nv == 0) { jobz = "N"; } else { jobz = "A"; U = Matrix<>(m, m, false); V = Matrix<>(n, n, false); } double* Uarray = U.getArray(); double* Varray = V.getArray(); int ldu = (int) U.rows(); int ldvt = (int) V.rows(); Matrix<> X = A; double* Xarray = X.getArray(); Matrix<> d(mn, 1, false); double* darray = d.getArray(); double tmp, *work; int lwork, info; int *iwork = new int[8 * mn]; // get optimal workspace lwork = -1; lapack::dgesdd_(jobz, &m, &n, Xarray, &m, darray, Uarray, &ldu, Varray, &ldvt, &tmp, &lwork, iwork, &info); SCYTHE_CHECK_10(info < 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dgessd"); SCYTHE_CHECK_10(info > 0, scythe_convergence_error, "Did not converge"); lwork = (int) tmp; work = new double[lwork]; // Now for real lapack::dgesdd_(jobz, &m, &n, Xarray, &m, darray, Uarray, &ldu, Varray, &ldvt, work, &lwork, iwork, &info); SCYTHE_CHECK_10(info < 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dgessd"); SCYTHE_CHECK_10(info > 0, scythe_convergence_error, "Did not converge"); delete[] work; if (nu < mn && nu > 0) U = U(0, 0, U.rows() - 1, (unsigned int) std::min(m, nu) - 1); if (nv < mn && nv > 0) V = V(0, 0, (unsigned int) std::min(n, nv) - 1, V.cols() - 1); SVD result; result.d = d; result.U = U; result.Vt = V; return result; } /*!\brief The result of an eigenvalue/vector decomposition. * * Objects of this type hold the results of the eigen() function. * That is the eigenvalues and, optionally, the eigenvectors of a * symmetric matrix of order \f$n\f$. The eigenvalues are stored in * ascending order in the member column vector \a values. The * vectors are stored in the \f$n \times n\f$ matrix \a vectors. * * \see eigen(const Matrix<>& A, bool vectors) */ struct Eigen { Matrix<> values; Matrix<> vectors; }; /*!\brief Calculates the eigenvalues and eigenvectors of a symmetric * matrix. * * This function returns the eigenvalues and, optionally, * eigenvectors of a symmetric matrix \a A of order \f$n\f$. It * returns an Eigen object containing the vector of values, in * ascending order, and, optionally, a matrix holding the vectors. * * \note This function requires BLAS/LAPACK functionality and is * only available on machines that provide these libraries. Make * sure you enable the SCYTHE_LAPACK preprocessor flag if you wish * to use this function. Furthermore, note that this function takes * and returns only column-major concrete matrices. Future versions * of Scythe will provide a native C++ implementation of this * function with support for general matrix templates. * * \param A The Matrix to be decomposed. * \param vectors This boolean value indicates whether or not to * return eigenvectors in addition to eigenvalues. It is set to true * by default. * * \throw scythe_null_error (Level 1) * \throw scythe_dimension_error (Level 1) * \throw scythe_lapack_internal_error (Level 1) * * \see Eigen * \see svd(const Matrix<>& A, int nu, int nv); */ inline Eigen eigen (const Matrix<>& A, bool vectors=true) { SCYTHE_DEBUG_MSG("Using lapack/blas for eigen"); SCYTHE_CHECK_10(! A.isSquare(), scythe_dimension_error, "Matrix not square"); SCYTHE_CHECK_10(A.isNull(), scythe_null_error, "Matrix is NULL"); // Should be symmetric but rounding errors make checking for this // difficult. // Make a copy of A Matrix<> AA = A; // Get a point to the internal array and set up some vars double* Aarray = AA.getArray(); // internal array points int order = (int) AA.rows(); // input matrix is order x order double dignored = 0; // we don't use this option int iignored = 0; // or this one double abstol = 0.0; // tolerance (default) int m; // output value Matrix<> result; // result matrix char getvecs[1]; // are we getting eigenvectors? if (vectors) { getvecs[0] = 'V'; result = Matrix<>(order, order + 1, false); } else { result = Matrix<>(order, 1, false); getvecs[0] = 'N'; } double* eigenvalues = result.getArray(); // pointer to result array int* isuppz = new int[2 * order]; // indices of nonzero eigvecs double tmp; // inital temporary value for getting work-space info int lwork, liwork, *iwork, itmp; // stuff for workspace double *work; // and more stuff for workspace int info = 0; // error code holder // get optimal size for work arrays lwork = -1; liwork = -1; lapack::dsyevr_(getvecs, "A", "L", &order, Aarray, &order, &dignored, &dignored, &iignored, &iignored, &abstol, &m, eigenvalues, eigenvalues + order, &order, isuppz, &tmp, &lwork, &itmp, &liwork, &info); SCYTHE_CHECK_10(info != 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dsyevr"); lwork = (int) tmp; liwork = itmp; work = new double[lwork]; iwork = new int[liwork]; // do the actual operation lapack::dsyevr_(getvecs, "A", "L", &order, Aarray, &order, &dignored, &dignored, &iignored, &iignored, &abstol, &m, eigenvalues, eigenvalues + order, &order, isuppz, work, &lwork, iwork, &liwork, &info); SCYTHE_CHECK_10(info != 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dsyevr"); delete[] isuppz; delete[] work; delete[] iwork; Eigen resobj; if (vectors) { resobj.values = result(_, 0); resobj.vectors = result(0, 1, result.rows() -1, result.cols() - 1); } else { resobj.values = result; } return resobj; } struct GeneralEigen { Matrix > values; Matrix<> vectors; }; inline GeneralEigen geneigen (const Matrix<>& A, bool vectors=true) { SCYTHE_CHECK_10 (! A.isSquare(), scythe_dimension_error, "Matrix not square"); SCYTHE_CHECK_10 (A.isNull(), scythe_null_error, "Matrix is NULL"); Matrix<> AA = A; // Copy A // Get a point to the internal array and set up some vars double* Aarray = AA.getArray(); // internal array points int order = (int) AA.rows(); // input matrix is order x order GeneralEigen result; int info, lwork; double *left, *right, *valreal, *valimag, *work, tmp; valreal = new double[order]; valimag = new double[order]; left = right = (double *) 0; char leftvecs[1], rightvecs[1]; leftvecs[0] = rightvecs[0] = 'N'; if (vectors) { rightvecs[0] = 'V'; result.vectors = Matrix<>(order, order, false); right = result.vectors.getArray(); } // Get working are size lwork = -1; lapack::dgeev_ (leftvecs, rightvecs, &order, Aarray, &order, valreal, valimag, left, &order, right, &order, &tmp, &lwork, &info); SCYTHE_CHECK_10(info != 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dgeev"); lwork = (int) tmp; work = new double[lwork]; // Run for real lapack::dgeev_ (leftvecs, rightvecs, &order, Aarray, &order, valreal, valimag, left, &order, right, &order, work, &lwork, &info); SCYTHE_CHECK_10(info != 0, scythe_lapack_internal_error, "Internal error in LAPACK routine dgeev"); // Pack value into result result.values = Matrix > (order, 1, false); for (unsigned int i = 0; i < result.values.size(); ++i) result.values(i) = std::complex (valreal[i], valimag[i]); // Clean up delete[] valreal; delete[] valimag; delete[] work; return result; } #endif } // end namespace scythe #endif /* SCYTHE_IDE_H */ MCMCpack/src/HMMpanelRE.cc0000644000176000001440000013473612140061656014665 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // HMMpanelRE.cc is C++ code to estimate a Gaussian panel model with a structural break // y_{it} = \x'_{it}\b + \w'_{it}\bi_i + \z'_{it}\d_{s_{it}} +\varepsilon_{it} // \varepsilon_{it} \sim \normdist{0}{\sigma^2} // \bi_i \sim \normdist{0}{\D} // Parameters = {beta, bi, D, sigma2, || {s_it}, delta, P_i}// static + changing // // Jong Hee Park // Department of Political Science and International Relations // Seoul National University // jongheepark@snu.ac.kr // Modified (consistent with MCMChregress by Ghislain Vieilledent) on 07/04/2011 // Included in MCMCpack on 09/2011 ////////////////////////////////////////////////////////////////////////// #ifndef HMMPANELRE_CC #define HMMPANELRE_CC #include #include #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include "lapack.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; // used to access 1d arrays holding R matrices like a 2d array #define M(ROW,COL,NROWS) (COL*NROWS+ROW) static double lndwish (const Matrix<>& W, unsigned int v, const Matrix<>& S) { const int K = S.rows(); double gammapart = 1; for (int i=0; i hold = invpd(S) * W; Matrix<> diaghold(K, 1); diaghold(_,0) = diag(hold); double tracehold = sum(diaghold); double lognum = -1*(v *.5)*log(detS) + (v - K - 1)/2*log(detW) - 1/2 * tracehold; return(lognum - logdenom); } static double lndinvgamma(double theta, double a, double b) { double logf = a * log(b) - lngammafn(a) + -(a+1) * log(theta) + -b/theta; return (logf); //pow(b, a) / gammafn(a) * pow(theta, -(a+1)) * exp(-b/theta); } template void GaussianPanelRE_impl(rng& stream, unsigned int nsubj, unsigned int ntime, unsigned int nobs, const Matrix& subjectid, const Matrix& timeid, const Matrix<>& Y, const Matrix<>& X, const Matrix<>& W, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, const double sigma2start, Matrix<>& D, const Matrix<>& b0, const Matrix<>& B0, const double c0, const double d0, unsigned int r0, const Matrix<>& R0, const Matrix<>& time_groupinfo, const Matrix<>& subject_groupinfo, Matrix<>& beta_store, Matrix<>& sigma_store, Matrix<>& D_store, double& logmarglike, double& loglike, unsigned int chib) { // redefine constants const unsigned int K = X.cols();; // ncol(X) const unsigned int Q = W.cols(); // ncol(W) const int NOBS = nobs; double sigma2 = sigma2start; const Matrix<> R0inv = invpd(R0); const Matrix<> B0inv = invpd(B0); const int tot_iter = burnin + mcmc; const int nstore = mcmc / thin; // number of draws to store Matrix Dinv = invpd(D); // generate posk_arr and post_arr // Number of observations by group k int *nobsk = new int[nsubj]; for (int k=0; k *Yk_arr = new Matrix[nsubj]; Matrix *Xk_arr = new Matrix[nsubj]; Matrix *Wk_arr = new Matrix[nsubj]; for(int k=0; k(nobsk[k], K); Wk_arr[k] = Matrix(nobsk[k], Q); Yk_arr[k] = Matrix(nobsk[k], 1); for (int l=0; l *tXk_arr = new Matrix[nsubj]; Matrix *tWk_arr = new Matrix[nsubj]; Matrix *tXWk_arr = new Matrix[nsubj]; Matrix *tWXk_arr = new Matrix[nsubj]; Matrix *tWYk_arr = new Matrix[nsubj]; Matrix *tXYk_arr = new Matrix[nsubj]; Matrix *cpXk_arr = new Matrix[nsubj]; Matrix *cpWk_arr = new Matrix[nsubj]; for(int k=0; k beta(K, 1); ///////////////////////////////////////////////// // initialize Yhat for the first loop only ///////////////////////////////////////////////// for (int iter=0; iter < tot_iter; ++iter){ ///////////////////////////////////////////////// // Step 1. Sample beta (fixed-effects coef) ///////////////////////////////////////////////// Matrix XVX(K, K); Matrix XVY(K, 1); for(int s = 0; s beta_var = invpd(B0 + XVX/sigma2); Matrix<> beta_mean = beta_var*(B0*b0 + XVY/sigma2); beta = stream.rmvnorm(beta_mean, beta_var); ///////////////////////////////////////////////// // Step 2. Sample bi (random-effects coef) ///////////////////////////////////////////////// Matrix bi(Q, nsubj); for(int s = 0; s b_var = invpd(Dinv + cpWk_arr[s]/sigma2); Matrix b_mean = b_var*tWk_arr[s]*(Yk_arr[s] - Xk_arr[s]*beta)/sigma2; bi(_,s) = stream.rmvnorm(b_mean, b_var); } ///////////////////////////////////////////////// // Step 3. Sample sigma2 ///////////////////////////////////////////////// double SSE = 0; for(int s=0;s e = t(Yk_arr[s]-Xk_arr[s]*beta - Wk_arr[s]*bi(_,s))* (Yk_arr[s] - Xk_arr[s]*beta - Wk_arr[s]*bi(_,s)); SSE = SSE + e[0]; } double nu = (c0 + NOBS)/2; double delta = (d0 + SSE)/2; sigma2 = stream.rigamma(nu, delta); ///////////////////////////////////////////////// // Step 4. Sample D ///////////////////////////////////////////////// Matrix SSB = bi*t(bi); Matrix D_scale = invpd(R0inv + SSB); int D_dof = r0 + nsubj; Dinv = stream.rwish(D_dof, D_scale); D = inv(Dinv); ///////////////////////////////////////////////// // STORE ///////////////////////////////////////////////// if (iter >= burnin && ((iter % thin) == 0)) { for(int j=0;j 0 && iter % verbose == 0){ Rprintf("\n ----------------------------------------------------------------------- \n"); Rprintf("\n Gaussian Panel iteration %i of %i ", (iter+1), tot_iter); // store global estimates Rprintf("\n beta = "); for(int j=0;j beta_st(K, 1); beta_st(_, 0) = meanc(beta_store); const double sigma2_st = mean(sigma_store); Matrix<> Dst = meanc(D_store); Matrix<> D_st(Q, Q); for (int k = 0; k<(Q*Q); ++k){ D_st[k] = Dst[k]; } const Matrix <> Dinv_st = invpd(D_st); Matrix<> beta_density(nstore, 1); ////////////////////////////////////////////////////////////////// // 1. pdf.beta | D_g, sigma2_g ////////////////////////////////////////////////////////////////// for (int iter=0; iter < nstore; ++iter){ for(int j=0;j<(Q*Q); ++j) { D(j) = D_store(iter,j); } Dinv = invpd(D); Matrix XVX(K, K); Matrix XVY(K, 1); for(int s = 0; s beta_var = invpd(B0 + XVX/sigma_store(iter)); Matrix<> beta_mean = beta_var*(B0*b0 + XVY/sigma_store(iter)); if (K == 1){ beta_density(iter) = dnorm(beta_st(0), beta_mean[0], sqrt(beta_var[0])); } else{ beta_density(iter) = ::exp(lndmvn(beta_st, beta_mean, beta_var)); } } double pdf_beta = log(mean(beta_density)); ////////////////////////////////////////////////////////////////// // 2. pdf.D ////////////////////////////////////////////////////////////////// Matrix <> D_density(nstore, 1); for (int iter=0; iter < nstore; ++iter){ ///////////////////////////////////////////////// // Step 2.1 Sample bi (random-effects coef) ///////////////////////////////////////////////// Matrix bi(Q, nsubj); for(int s = 0; s b_var = invpd(Dinv + cpWk_arr[s]/sigma2); Matrix b_mean = b_var*t(Wk_arr[s])*(Yk_arr[s] - Xk_arr[s]*beta_st)/sigma2; bi(_,s) = stream.rmvnorm(b_mean, b_var); } ///////////////////////////////////////////////// // Step 2.2 Sample sigma2 ///////////////////////////////////////////////// double SSE = 0; for(int s=0;s e = t(Yk_arr[s]-Xk_arr[s]*beta_st - Wk_arr[s]*bi(_,s))* (Yk_arr[s] - Xk_arr[s]*beta_st - Wk_arr[s]*bi(_,s)); SSE = SSE + e[0]; } double nu = (c0 + NOBS)/2; double delta = (d0 + SSE)/2; sigma2 = stream.rigamma(nu, delta); ///////////////////////////////////////////////// // Step 2.3 Sample D ///////////////////////////////////////////////// Matrix SSB = bi*t(bi); Matrix D_scale = invpd(R0inv + SSB); int D_dof = r0 + nsubj; Dinv = stream.rwish(D_dof, D_scale); D_density(iter) = ::exp(lndwish(invpd(D_st), D_dof, D_scale)); D = inv(Dinv); } double pdf_D = log(mean(D_density)); ////////////////////////////////////////////////////////////////// // 3. pdf.sigma2 ////////////////////////////////////////////////////////////////// Matrix <> sigma_density(nstore, 1); for (int iter=0; iter < nstore; ++iter){ ///////////////////////////////////////////////// // Step 3.1 Sample bi (random-effects coef) ///////////////////////////////////////////////// Matrix bi(Q, nsubj); for(int s = 0; s b_var = invpd(Dinv_st + cpWk_arr[s]/sigma2); Matrix b_mean = b_var*tWk_arr[s]*(Yk_arr[s] - Xk_arr[s]*beta_st)/sigma2; bi(_,s) = stream.rmvnorm(b_mean, b_var); } ///////////////////////////////////////////////// // Step 3.2 Sample sigma2 ///////////////////////////////////////////////// double SSE = 0; for(int s=0;s e = t(Yk_arr[s]-Xk_arr[s]*beta_st - Wk_arr[s]*bi(_,s))* (Yk_arr[s] - Xk_arr[s]*beta_st - Wk_arr[s]*bi(_,s)); SSE = SSE + e[0]; } double nu = (c0 + NOBS)/2; double delta = (d0 + SSE)/2; sigma2 = stream.rigamma(nu, delta); sigma_density(iter) = ::exp(lndinvgamma(sigma2_st, nu, delta)); } double pdf_sigma2 = log(mean(sigma_density)); ////////////////////////////////////////////////////////////////// // likelihood f(y|beta_st, D_st, sigma2_st) ////////////////////////////////////////////////////////////////// loglike = 0; for(int s = 0; s Sigma = sigma2_st*eye(ntime_s) + Wk_arr[s] *D_st* t(Wk_arr[s]); Matrix<> Mu = Xk_arr[s]*beta_st; loglike += lndmvn(Yk_arr[s], Mu, Sigma); } ////////////////////// // log prior ordinate ////////////////////// double density_beta_prior = 0; if (K == 1){ density_beta_prior =log(dnorm(beta_st(0), b0[0], sqrt(B0inv[0]))); } else{ density_beta_prior = lndmvn(beta_st, b0, B0inv); } double density_D_prior = lndwish(Dinv_st, r0, R0); double density_sigma2_prior = lndinvgamma(sigma2_st, c0/2, d0/2); // compute marginal likelihood double logprior = density_beta_prior + density_sigma2_prior + density_D_prior; logmarglike = (loglike + logprior) - (pdf_beta + pdf_sigma2 + pdf_D); if (verbose >0){ Rprintf("\n ----------------------------------------------------------------------- \n"); Rprintf("\n logmarglike %10.5f", logmarglike); Rprintf("\n loglike %10.5f", loglike, "\n"); Rprintf("\n log_prior %10.5f", logprior, "\n"); Rprintf("\n pdf_beta is %10.5f", pdf_beta, "\n"); Rprintf("\n pdf_D is %10.5f", pdf_D, "\n"); Rprintf("\n pdf_sigma2 is %10.5f", pdf_sigma2, "\n"); } }// end of marginal likelihood computation delete [] Xk_arr; delete [] Yk_arr; delete [] Wk_arr; delete [] cpWk_arr; delete [] cpXk_arr; delete [] tWk_arr; delete [] tXk_arr; delete [] tXYk_arr; delete [] tXWk_arr; delete [] tWXk_arr; delete [] tWYk_arr; } template void HMMpanelRE_impl(rng& stream, unsigned int nsubj, unsigned int ntime, unsigned int m, unsigned int nobs, const Matrix& subjectid, const Matrix& timeid, const Matrix<>& Y, const Matrix<>& X, const Matrix<>& W, const Matrix<>& YT, const Matrix<>& XT, const Matrix<>& WT, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, Matrix<>& betastart, double sigma2start, Matrix<>& Dstart, const Matrix<>& b0, const Matrix<>& B0, const double c0, const double d0, unsigned int r0, const Matrix<>& R0, const Matrix<>& P0, const Matrix<>& time_groupinfo, const Matrix<>& subject_groupinfo, Matrix<>& beta_store, Matrix<>& sigma_store, Matrix<>& D_store, Matrix<>& P_store, Matrix<>& ps_store, Matrix<>& s_store, double& logmarglike, double& loglike, unsigned int chib) { // redefine constants const unsigned int K = X.cols(); const unsigned int Q = W.cols(); const int NOBS = nobs; const Matrix<> R0inv = invpd(R0); const Matrix<> B0inv = invpd(B0); const int tot_iter = burnin + mcmc; const int nstore = mcmc / thin; // number of draws to store const int ns = m + 1; Matrix<>* D = new Matrix<>[ns]; Matrix<>* Dinv = new Matrix<>[ns]; for (int j=0; j D_record(ns, Q*Q);// for record D in D_store for (int j = 0; j *Yk_arr = new Matrix[nsubj]; Matrix *Xk_arr = new Matrix[nsubj]; Matrix *Wk_arr = new Matrix[nsubj]; for(int k=0; k(nobsk[k], K); Wk_arr[k] = Matrix(nobsk[k], Q); Yk_arr[k] = Matrix(nobsk[k], 1); for (int l=0; l* Xt_arr = new Matrix<>[ntime]; Matrix<>* Wt_arr = new Matrix<>[ntime]; Matrix<>* Yt_arr = new Matrix<>[ntime]; for(int k=0; k 0){ Xt_arr[k] = Matrix(nobst[k], K); Wt_arr[k] = Matrix(nobst[k], Q); Yt_arr[k] = Matrix(nobst[k], 1); for (int l = 0; l *cpWt_arr = new Matrix[ntime]; Matrix *cpXt_arr = new Matrix[ntime]; Matrix *tXWt_arr = new Matrix[ntime]; Matrix *tWXt_arr = new Matrix[ntime]; Matrix *tWYt_arr = new Matrix[ntime]; Matrix *tXYt_arr = new Matrix[ntime]; Matrix *tXt_arr = new Matrix[ntime]; Matrix *tWt_arr = new Matrix[ntime]; for(int k=0; k *bk_run = new Matrix[nsubj]; // Random effects for (int k=0;k(Q,1); } Matrix<>* bi = new Matrix<>[ns]; Matrix<> beta(ns, K); Matrix<> sigma2(ns, 1); for (int j=0; j(Q, nsubj); beta(j,_) = betastart; sigma2(j) = sigma2start; } Matrix<> P = P0; // MCMC iterations start here int sampcount = 0; // initialize Yhat for the first loop only for (int iter=0; iter < tot_iter; ++iter){ ////////////////////// // Step 1. Sample state ////////////////////// Matrix<> F(ntime, ns); Matrix<> pr1(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (int tt=0; tt Wbi(nsubj_s, 1); for (int j=0; j Sigma = eye(nsubj_s)*sigma2[j]; Matrix<> WDW = Wt_arr[tt]*D[j]*tWt_arr[tt]; Matrix<> Mu = Xt_arr[tt]*::t(beta(j,_)); py[j] = ::exp(lndmvn(Yt_arr[tt], Mu, WDW + Sigma)); } if (tt==0) pstyt1 = pr1; else { pstyt1 = ::t(F(tt-1,_)*P); } Matrix<> unnorm_pstyt = pstyt1%py; Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j state(ntime, 1); Matrix<> ps = Matrix<>(ntime, ns); ps(ntime-1,_) = F(ntime-1,_); state(ntime-1) = ns; Matrix<> pstyn = Matrix<>(ns, 1); double pone = 0.0; int tt = ntime-2; while (tt >= 0){ int st = state(tt+1); Matrix<> Pst_1 = ::t(P(_, st-1)); Matrix<> unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1){ state(tt) = 1; } else{ pone = pstyn(st-2); if(stream.runif() < pone) state(tt) = st-1; else state(tt) = st; } ps(tt,_) = pstyn; --tt; }// end of while loop ////////////////////// // Step 2. Sample beta ////////////////////// Matrix nstate(ns, 1); // refresh the container for numbers of each state int beta_count = 0; for (int j = 0; j XVX(K, K); Matrix<> XVY(K, 1); for(int tt = (beta_count - nstate[j]); tt beta_var = invpd(B0 + XVX/sigma2[j]); Matrix<> beta_mean = beta_var*(B0*b0 + XVY/sigma2[j]); beta(j,_) = stream.rmvnorm(beta_mean, beta_var); } ///////////////////////////////////////////////// // Step 3. Sample bi (random-effects coef) ///////////////////////////////////////////////// beta_count = 0; Matrix YN(ns, 1); Matrix<> SSE(ns, 1); for (int j = 0; j yj = Yk_arr[s]((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> Xj = Xk_arr[s]((beta_count - nstate[j]), 0, (beta_count - 1), K-1); Matrix<> Wj = Wk_arr[s]((beta_count - nstate[j]), 0, (beta_count - 1), Q-1); Matrix<> b_var = invpd(Dinv[j]+ ::t(Wj)*Wj/sigma2[j]); Matrix<> b_mean = b_var*::t(Wj)*(yj - Xj*::t(beta(j,_)))/sigma2[j]; bi[j](_,s) = stream.rmvnorm(b_mean, b_var); // FOR SIGMA YN[j] = YN[j] + yj.rows(); Matrix<> e = ::t(yj - Xj*::t(beta(j,_)) - Wj*bi[j](_,s))*(yj - Xj*::t(beta(j,_)) - Wj*bi[j](_,s)); SSE[j] = SSE[j] + e[0]; } } ///////////////////////////////////////////////// // Step 4. Sample sigma2 ///////////////////////////////////////////////// for (int j = 0; j SSB = bi[j]*t(bi[j]); Matrix<> D_scale = invpd(R0inv + SSB); int D_dof = r0 + nsubj; Dinv[j] = stream.rwish(D_dof, D_scale); D[j] = invpd(Dinv[j]); for (int i=0; i<(Q*Q); ++i){ D_record(j,i) = D[j](i); } } ////////////////////// // Step 6. Sample P ////////////////////// double shape1 = 0; double shape2 = 0; for (int j =0; j= burnin && ((iter % thin) == 0)) { Matrix<> tbeta = ::t(beta); //transpose beta for R output for (int i=0; i<(ns*K); ++i){ beta_store(sampcount, i) = tbeta[i];// stored by the order of (11, 12, 13, 21, 22, 23) } for (int i=0; i tD_record = ::t(D_record); for (int i=0; i<(ns*Q*Q); ++i){ D_store(sampcount,i) = tD_record(i);// stored by the order of (D11, D12, D13, 21, 22, 23) } for (int j=0; j 0 && iter % verbose == 0){ Rprintf("\n ----------------------------------------------------------------------- \n"); Rprintf("HMMpanelRE iteration %i of %i \n", (iter+1), tot_iter); for(int j=0;j betast = meanc(beta_store); Matrix beta_st(ns, K); for (int j = 0; j< ns*K; ++j){ beta_st[j] = betast[j]; } Matrix sigma2st = meanc(sigma_store); Matrix sigma2_st(ns, 1); for (int j = 0; j< ns; ++j){ sigma2_st[j] = sigma2st[j]; } Matrix Dst = meanc(D_store); Matrix<>* Dinv_st = new Matrix<>[ns]; Matrix<>* D_st = new Matrix<>[ns]; int Dcount = 0; for (int j=0; j Dtemp(Q, Q); for (int k = 0; k<(Q*Q); ++k){ Dtemp[k] = Dst[Dcount + k]; } Dcount = Dcount + Q*Q; D_st[j] = Dtemp; Dinv_st[j] = invpd(Dtemp); } Matrix<> Dst_record(ns, Q*Q);// for record D in D_store for (int j = 0; j P_vec_st = meanc(P_store); const Matrix P_st(ns, ns); for (int j = 0; j< ns*ns; ++j){ P_st[j] = P_vec_st[j]; } Matrix<> density_beta(nstore, ns); Matrix<> density_local_beta(ns, 1); ////////////////////////////////////////////////////////////////// // 1. pdf.beta | D_g, sigma2_g, P_g, bi_g ////////////////////////////////////////////////////////////////// Matrix<> Dmcmc(Q, Q); for (int iter = 0; iter nstate(ns, 1); for (int j = 0; j Dinv_j = invpd(Dmcmc); Matrix<> XVX(K, K); Matrix<> XVY(K, 1); for(int tt = (beta_count - nstate[j]); tt beta_var = invpd(B0 + XVX/sigma_store(iter, j)); Matrix<> beta_mean = beta_var*(B0*b0 + XVY/sigma_store(iter, j)); if (K == 1){ density_beta(iter, j) = dnorm(beta_st(j), beta_mean[0], sqrt(beta_var[0])); } else{ density_beta(iter, j) = ::exp(lndmvn(::t(beta_st(j,_)), beta_mean, beta_var)); } } } double pdf_beta = log(prod(meanc(density_beta))); ////////////////////////////////////////////////////////////////// // 2. pdf.D ////////////////////////////////////////////////////////////////// Matrix<> density_D(nstore, ns); for (int iter = 0; iter SSE(ns, 1); int beta_count = 0; Matrix YN(ns, 1); Matrix nstate(ns, 1); ///////////////////////////////////////////////// // 2.1. s | beta_st, y, bi, sigma, D, P ///////////////////////////////////////////////// Matrix<> F(ntime, ns); Matrix<> pr1(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (int tt=0; tt Wbi(nsubj_s, 1); for (int j=0; j Sigma = eye(nsubj_s)*sigma2[j]; Matrix<> WDW = Wt_arr[tt]*D[j]*tWt_arr[tt]; Matrix<> Mu = Xt_arr[tt]*::t(beta_st(j,_)); py[j] = ::exp(lndmvn(Yt_arr[tt], Mu, WDW+Sigma)); } if (tt==0) pstyt1 = pr1; else { pstyt1 = ::t(F(tt-1,_)*P); } Matrix<> unnorm_pstyt = pstyt1%py; Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j state(ntime, 1); Matrix<> ps = Matrix<>(ntime, ns); ps(ntime-1, _) = F(ntime-1, _); state(ntime-1) = ns; Matrix<> pstyn = Matrix<>(ns, 1); double pone = 0.0; int tt = ntime-2; while (tt >= 0){ int st = state(tt+1); Matrix<> Pst_1 = ::t(P(_, st-1)); Matrix<> unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1){ state(tt) = 1; } else{ pone = pstyn(st-2); if(stream.runif() < pone) state(tt) = st-1; else state(tt) = st; } ps(tt,_) = pstyn; --tt; }// end of while loop ///////////////////////////////////////////////// // 2.2. P | beta_st, y, bi, sigma, s, D ///////////////////////////////////////////////// for (int j = 0; j yj = Yk_arr[s]((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> Xj = Xk_arr[s]((beta_count - nstate[j]), 0, (beta_count - 1), K-1); Matrix<> Wj = Wk_arr[s]((beta_count - nstate[j]), 0, (beta_count - 1), Q-1); Matrix<> b_var = invpd(Dinv[j]+ ::t(Wj)*Wj/sigma2[j]); Matrix<> b_mean = b_var*::t(Wj)*(yj - Xj*::t(beta_st(j,_)))/sigma2[j]; bi[j](_,s) = stream.rmvnorm(b_mean, b_var); // FOR SIGMA YN[j] = YN[j] + yj.rows(); Matrix<> e = ::t(yj - Xj*::t(beta_st(j,_)) - Wj*bi[j](_,s))*(yj - Xj*::t(beta_st(j,_)) - Wj*bi[j](_,s)); SSE[j] = SSE[j] + e[0]; } } ///////////////////////////////////////////////// // 2.4. D | beta_st, y, bi, sigma, s, P ///////////////////////////////////////////////// for (int j = 0; j SSB = bi[j]*t(bi[j]); Matrix<> D_scale = invpd(R0inv + SSB); int D_dof = r0 + nsubj; Dinv[j] = stream.rwish(D_dof, D_scale); density_D(iter, j) = ::exp(lndwish(Dinv_st[j], D_dof, D_scale)); D[j] = inv(Dinv[j]); } ///////////////////////////////////////////////// // 2.5. sigma | beta_st, y, bi, D, s, P ///////////////////////////////////////////////// for (int j = 0; j density_sigma2(nstore, ns); for (int iter = 0; iter SSE(ns, 1); int beta_count = 0; Matrix YN(ns, 1); Matrix nstate(ns, 1); ///////////////////////////////////////////////// // 3.1. s| beta_st, D_st, y, bi, sigma2, P ///////////////////////////////////////////////// Matrix<> F(ntime, ns); Matrix<> pr1(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (int tt=0; tt Wbi(nsubj_s, 1); for (int j=0; j Sigma = eye(nsubj_s)*sigma2[j]; Matrix<> WDW = Wt_arr[tt]*D_st[j]*tWt_arr[tt]; Matrix<> Mu = Xt_arr[tt]*::t(beta_st(j,_)); py[j] = ::exp(lndmvn(Yt_arr[tt], Mu, WDW+Sigma)); } if (tt==0) pstyt1 = pr1; else { pstyt1 = ::t(F(tt-1,_)*P); } Matrix<> unnorm_pstyt = pstyt1%py; Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j state(ntime, 1); Matrix<> ps = Matrix<>(ntime, ns); ps(ntime-1,_) = F(ntime-1,_); state(ntime-1) = ns; Matrix<> pstyn = Matrix<>(ns, 1); double pone = 0.0; int tt = ntime-2; while (tt >= 0){ int st = state(tt+1); Matrix<> Pst_1 = ::t(P(_, st-1)); Matrix<> unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1){ state(tt) = 1; } else{ pone = pstyn(st-2); if(stream.runif() < pone) state(tt) = st-1; else state(tt) = st; } ps(tt,_) = pstyn; --tt; }// end of while loop ///////////////////////////////////////////////// // 3.2. bi| beta_st, D_st, y, sigma2, s, P ///////////////////////////////////////////////// for (int j = 0; j yj = Yk_arr[s]((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> Xj = Xk_arr[s]((beta_count - nstate[j]), 0, (beta_count - 1), K-1); Matrix<> Wj = Wk_arr[s]((beta_count - nstate[j]), 0, (beta_count - 1), Q-1); Matrix<> b_var = inv(Dinv_st[j]+ ::t(Wj)*Wj/sigma2[j]); Matrix<> b_mean = b_var*::t(Wj)*(yj - Xj*::t(beta_st(j,_)))/sigma2[j]; bi[j](_,s) = stream.rmvnorm(b_mean, b_var); // FOR SIGMA YN[j] = YN[j] + yj.rows(); Matrix<> e = ::t(yj - Xj*::t(beta_st(j,_)) - Wj*bi[j](_,s))*(yj - Xj*::t(beta_st(j,_)) - Wj*bi[j](_,s)); SSE[j] = SSE[j] + e[0]; } ///////////////////////////////////////////////// // 3.3. sigma2| beta_st, D_st, y, bi, s, P ///////////////////////////////////////////////// double nu = (c0 + (double)YN[j])/2; double scale = (d0 + SSE[j])/2; sigma2[j] = stream.rigamma(nu, scale); density_sigma2(iter, j) = ::exp(lndinvgamma(sigma2_st[j], nu, scale)); } ///////////////////////////////////////////////// // 3.4. P| beta_st, D_st, y, bi, sigma2, s ///////////////////////////////////////////////// double shape1 = 0; double shape2 = 0; for (int j =0; j density_P(nstore, ns); for (int iter = 0; iter < nstore; ++iter){ Matrix nstate(ns, 1); ///////////////////////////////////////////////// // 4.1. s| y, P, beta_st, sigma_st, D_st ///////////////////////////////////////////////// Matrix<> F(ntime, ns); Matrix<> pr1(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (int tt=0; tt Wbi(nsubj_s, 1); for (int j=0; j Sigma = eye(nsubj_s)*sigma2_st[j]; Matrix<> WDW = Wt_arr[tt]*D_st[j]*t(Wt_arr[tt]); Matrix<> Mu = Xt_arr[tt]*::t(beta_st(j,_)); py[j] = ::exp(lndmvn(Yt_arr[tt], Mu, WDW+Sigma)); } if (tt==0) pstyt1 = pr1; else { pstyt1 = ::t(F(tt-1,_)*P); } Matrix<> unnorm_pstyt = pstyt1%py; Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j state(ntime, 1); Matrix<> ps = Matrix<>(ntime, ns); ps(ntime-1,_) = F(ntime-1,_); state(ntime-1) = ns; Matrix<> pstyn = Matrix<>(ns, 1); double pone = 0.0; int tt = ntime-2; while (tt >= 0){ int st = state(tt+1); Matrix<> Pst_1 = ::t(P(_, st-1)); Matrix<> unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1){ state(tt) = 1; } else{ pone = pstyn(st-2); if(stream.runif() < pone) state(tt) = st-1; else state(tt) = st; } ps(tt,_) = pstyn; --tt; }// end of while loop for (int j = 0; j F(ntime, ns); Matrix<> pr1(ns, 1); Matrix<> like(ntime, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (int tt=0; tt Wbi(nsubj, 1); for (int j=0; j Sigma = eye(nsubj)*sigma2_st[j]; Matrix<> WDW = Wt_arr[tt]*D_st[j]*t(Wt_arr[tt]); Matrix<> Mu = Xt_arr[tt]*::t(beta_st(j,_)); py[j] = ::exp(lndmvn(Yt_arr[tt], Mu, WDW+Sigma)); } if (tt==0) pstyt1 = pr1; else { pstyt1 = ::t(F(tt-1,_)*P_st); } Matrix<> unnorm_pstyt = pstyt1%py; Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j density_beta_prior(ns, 1); Matrix density_sigma2_prior(ns, 1); Matrix density_D_prior(ns, 1); Matrix density_P_prior(ns, 1); density_P_prior[ns-1] = 0; // for (int j=0; j0){ Rprintf("\n ----------------------------------------------------------------------- \n"); Rprintf("\nlogmarglike = %10.5f\n", logmarglike); Rprintf("loglike = %10.5f\n", loglike); Rprintf("logprior = %10.5f\n", logprior); Rprintf("pdf_beta = %10.5f\n", pdf_beta); Rprintf("pdf_Sigma = %10.5f\n", pdf_sigma2); Rprintf("pdf_D = %10.5f\n", pdf_D); Rprintf("pdf_P = %10.5f\n", pdf_P); } }// end of marginal likelihood computation delete[] nobst; for(int k=0; k Y(*Yrow, *Ycol, Ydata); Matrix<> X(*Xrow, *Xcol, Xdata); Matrix<> W(*Wrow, *Wcol, Wdata); Matrix<> YT(*Yrow, *Ycol, YTdata); Matrix<> XT(*Xrow, *Xcol, XTdata); Matrix<> WT(*Wrow, *Wcol, WTdata); Matrix<> betastart(*Xcol, 1, betastartdata); Matrix<> b0(*Xcol, 1, b0data); Matrix<> B0(*Xcol, *Xcol, B0data); Matrix<> R0(*Wcol, *Wcol, R0data); Matrix<> Dstart = invpd(R0); Matrix subjectid_mat(*nobs, 1, subjectid); Matrix timeid_mat(*nobs, 1, timeid); Matrix<> subject_groupinfo(*nsubj, 3, subject_groupinfodata); Matrix<> time_groupinfo(*ntime, 3, time_groupinfodata); const int mns = *m + 1; Matrix<> beta_store(*betarow, *betacol); Matrix<> sigma_store(*betarow, mns); Matrix<> D_store(*betarow, *Wcol* *Wcol * mns); double logmarglike; double loglike; if (*m == 0){ MCMCPACK_PASSRNG2MODEL(GaussianPanelRE_impl, *nsubj, *ntime, *nobs, subjectid_mat, timeid_mat, Y, X, W, *burnin, *mcmc, *thin, *verbose, *sigma2start, Dstart, b0, B0, *c0, *d0, *r0, R0, time_groupinfo, subject_groupinfo, beta_store, sigma_store, D_store, logmarglike, loglike, *chib); // store marginal likelihood logmarglikeholder[0] = logmarglike; loglikeholder[0] = loglike; for (int i=0; i < (*betarow* *betacol); ++i){ betadata[i] = beta_store(i); } for (int i=0; i < (*betarow); ++i){ sigmadata[i] = sigma_store(i); } for (int i=0; i < (*betarow*mns* *Wcol* *Wcol); ++i){ Ddata[i] = D_store(i); } } else { Matrix <> P(mns, mns, Pstart); Matrix<> P_store(*betarow, mns*mns); Matrix<> s_store(*betarow, *ntime*mns); Matrix<> ps_store(*ntime, mns); MCMCPACK_PASSRNG2MODEL(HMMpanelRE_impl, *nsubj, *ntime, *m, *nobs, subjectid_mat, timeid_mat, Y, X, W, YT, XT, WT, *burnin, *mcmc, *thin, *verbose, betastart, *sigma2start, Dstart, b0, B0, *c0, *d0, *r0, R0, P, time_groupinfo, subject_groupinfo, beta_store, sigma_store, D_store, P_store, ps_store, s_store, logmarglike, loglike, *chib); // store marginal likelihood logmarglikeholder[0] = logmarglike; loglikeholder[0] = loglike; for (int i=0; i < (*betarow* *betacol); ++i){ betadata[i] = beta_store(i); } for (int i=0; i < (*betarow*mns); ++i){ sigmadata[i] = sigma_store(i); } for (int i=0; i < (*betarow*mns* *Wcol* *Wcol); ++i){ Ddata[i] = D_store(i); } for (int i = 0; i<(*ntime *mns); ++i){ psout[i] = ps_store[i]; } for (int i = 0; i<(*betarow* *ntime *mns); ++i){ sout[i] = s_store[i]; } } }// end of HMMpanelRE_CC }// end of extern C #endif /* HMMpanelRE_CC */ MCMCpack/src/HMMpanelFE.cc0000644000176000001440000003202512140061656014635 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // HMMpanelFE.cc is C++ code // // Jong Hee Park // Department of Political Science and International Relations // Seoul National University // jongheepark@snu.ac.kr // Written 11/19/2008 // Modified 09/20/2009 ////////////////////////////////////////////////////////////////////////// #ifndef HMMPANELFE_CC #define HMMPANELFE_CC #include #include #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include "lapack.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; // used to access 1d arrays holding R matrices like a 2d array #define M(ROW,COL,NROWS) (COL*NROWS+ROW) template // For better identification, first two and last two states are constrained to be 1 and ns // 10/31/2011 JHP Matrix hetero_state_sampler(rng& stream, const int m, const int ntime_s, const Matrix<>& Y, const Matrix<>& delta, const Matrix<>& Sigma, const Matrix<>& P){ const int ns = m + 1; const int ntime = ntime_s; Matrix<> F(ntime, ns); Matrix<> pr1(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (int tt=0; tt unnorm_pstyt = pstyt1%py; const Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); // pstyt = Pr(st|Yt) for (int j=0; j state(ntime, 1); Matrix<> ps = Matrix<>(ntime, ns); ps(ntime-1,_) = F(ntime-1,_); state(ntime-1) = ns; Matrix<> pstyn = Matrix<>(ns, 1); double pone = 0.0; int tt = ntime-2; while (tt >= 0){ int st = state(tt+1); Matrix<> Pst_1 = ::t(P(_,st-1)); Matrix<> unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1){ state(tt) = 1; } else{ pone = pstyn(st-2); if(stream.runif() < pone) state(tt) = st-1; else state(tt) = st; } ps(tt,_) = pstyn; --tt; }// end of while loop return state; } // end of state sampler template void HMMpanelFE_impl(rng& stream, unsigned int nsubj, unsigned int ntime, unsigned int mmax, unsigned int mmin, const Matrix& mvector, unsigned int totalstates, const Matrix<>& Y, const Matrix<>& X, const Matrix& subjectid, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, Matrix<>& beta, double sigma2, Matrix<>& deltastart, const Matrix<>& b0, const Matrix<>& B0, const double delta0, const double Delta0, const double c0, const double d0, const Matrix<>& P0data, const Matrix<>& Pstart, const Matrix<>& subject_groupinfo, Matrix<>& betastorage, Matrix<>& statestorage, Matrix<>& deltastorage, Matrix<>& sigmastorage){ // redefine constants const unsigned int K = X.cols();; // ncol(X) const int NOBS = Y.rows(); const int tot_iter = burnin + mcmc; vector< vector > P0_holder; vector< vector > P_holder; int count = 0; for (int s=0; s< nsubj; ++s){ const int nms = mvector[s] + 1; vector P0mat; vector Pmat; for(int ii=0; ii<(nms*nms); ++ii){ P0mat.push_back(P0data(count + ii)); Pmat.push_back(Pstart(count + ii)); } count = count + nms*nms; P0_holder.push_back(P0mat); P_holder.push_back(Pmat); } vector< vector > delta_holder; vector< vector > sigma2_holder; vector< vector > nstate; count = 0; for (int s=0; s< nsubj; ++s){ int nms = mvector[s] + 1; int ntime_s = subject_groupinfo(s, 2); vector deltamat; vector sigmamat; vector nstatemat; for(int ii=0; ii *Yk_arr = new Matrix[nsubj]; Matrix *Xk_arr = new Matrix[nsubj]; for(int k=0; k(nobsk[k], K); Yk_arr[k] = Matrix(nobsk[k], 1); for (int l=0; l *tXk_arr = new Matrix[nsubj]; for(int k=0; k* newY = new Matrix<>[nsubj]; // newY = Y - delta Matrix<>* Yres = new Matrix<>[nsubj]; // Yres = Y - Xbeta for (int s=0; s Zk_arr(ntime_s, 1); for (int tt=0; tt< ntime_s; ++tt) { Zk_arr(tt) = 1; } Yres[s] = Yk_arr[s] - Xk_arr[s]*beta; if(mvector[s]==0){ double Dn = 1/(Delta0 + (double)ntime_s/sigma2_holder[s][0]); double dn = Dn*(Delta0*delta0 + sum(Yres[s])/sigma2_holder[s][0]); delta_holder[s][0] = stream.rnorm(dn, sqrt(Dn)); delta_sum = delta_sum + delta_holder[s][0]; newY[s] = Yk_arr[s] - Zk_arr*delta_holder[s][0]; // Sample sigma double shape = (c0 + (double)ntime_s)/2; const Matrix<> SSE = crossprod (newY[s]); double scale =(d0 + SSE[0])/2; sigma2_holder[s][0] = 1/stream.rgamma(shape, scale); } else { const int nscur = mvector[s] + 1; Matrix<> P(nscur, nscur); Matrix<> P0(nscur, nscur); Matrix<> delta(nscur, 1); Matrix<> Sigma(nscur, 1); for (int i=0;i<(nscur*nscur); ++i){ P0[i] = P0_holder[s][i]; P[i] = P_holder[s][i]; } for (int i=0;i state_s = hetero_state_sampler(stream, mvector[s], ntime_s, Yres[s], delta, Sigma, P); // Sample delta and Sigma int delta_count = 0; for (int j = 0; j yj = Yres[s]((delta_count - nstate[s][j]), 0, (delta_count - 1), 0); Matrix<> Yj = Yk_arr[s]((delta_count - nstate[s][j]), 0, (delta_count - 1), 0); double Dn = 1/(Delta0 + (double)nstate[s][j]/sigma2_holder[s][j]); double dn = Dn*(Delta0*delta0 + sum(yj)/sigma2_holder[s][j]); delta_holder[s][j] = stream.rnorm(dn, sqrt(Dn)); delta_sum = delta_sum + delta_holder[s][j]; newY[s]((delta_count - nstate[s][j]), 0, (delta_count - 1), 0) = Yj - delta_holder[s][j]; // Sample sigma double shape = (c0 + (double)nstate[s][j])/2; const Matrix<> SSE = crossprod (newY[s]((delta_count - nstate[s][j]), 0, (delta_count - 1), 0)); double scale =(d0 + SSE[0])/2; sigma2_holder[s][j] = 1/stream.rgamma(shape, scale); } // assure that there is no label switching problem // the code needs to be added here // Sample P double shape1 = 0; double shape2 = 0; for (int j =0; j<(nscur-1); ++j){ shape1 = std::abs(P0(j,j) + nstate[s][j] - 1); shape2 = P0(j,j+1) + 1; // P(j,j) = stream.rbeta(shape1, shape2); P(j,j+1) = 1 - P(j,j); } P(mvector[s], mvector[s]) = 1; //no jump at the last state for(int ii=0; ii<(nscur*nscur) ;++ii) { P_holder[s][ii] = P[ii]; } }//end of else (mvector!=0) }// end of subject specific looping // Sample beta Matrix<> XVX(K, K); Matrix<> XVY(K, 1); for(int s = 0; s Vi = eye(ntime_s); for (int j = 0; j <(mvector[s] + 1); ++j){ delta_count = delta_count + nstate[s][j]; for(int i = (delta_count - nstate[s][j]); i beta_var = invpd(B0 + XVX); Matrix<> beta_mean = beta_var*(B0*b0 + XVY); beta = stream.rmvnorm(beta_mean, beta_var); // STORE if (iter >= burnin && ((iter % thin) == 0)) { for(int j=0;j 0 && iter % verbose == 0){ Rprintf("\n ----------------------------------------------------------------------- "); Rprintf("\n\n HMMpanelFE %i of %i \n", iter, tot_iter); Rprintf("\n beta = \n"); for(int i=0;i Y(*Yrow, *Ycol, Ydata); Matrix<> X(*Xrow, *Xcol, Xdata); Matrix<> betastart(*Xcol, 1, betastartdata); Matrix<> deltastart(*deltastartrow, 1, deltastartdata); Matrix<> b0(*Xcol, 1, b0data); Matrix<> B0(*Xcol, *Xcol, B0data); Matrix subjectid_mat(*nobs, 1, subjectid); Matrix<> subject_groupinfo(*nsubj, 3, subject_groupinfodata); Matrix<> P0(*P0row, 1, P0data); Matrix<> Pstart(*P0row, 1, Pstartdata); Matrix mvector(*nsubj, 1, m); Matrix<> betastorage(*betarow, *betacol); Matrix<> sigmastorage(*betarow, *totalstates); Matrix<> deltastorage(*betarow, *totalstates); Matrix<> statestorage(*betarow, *totalstates); MCMCPACK_PASSRNG2MODEL(HMMpanelFE_impl, *nsubj, *ntime, *mmax, *mmin, mvector, *totalstates, Y, X, subjectid_mat, *burnin, *mcmc, *thin, *verbose, betastart, *sigma2start, deltastart, b0, B0, *delta0, *Delta0, *c0, *d0, P0, Pstart, subject_groupinfo, betastorage, statestorage, deltastorage, sigmastorage); unsigned int deltasize = *betarow * *totalstates; for (int i=0; i < deltasize; ++i){ deltadraws[i] = deltastorage(i); sigmadraws[i] = sigmastorage(i); statedraws[i] = statestorage(i); } unsigned int betasize = *betarow * *betacol; for (int i=0; i < betasize; ++i){ betadraws[i] = betastorage(i); } }// end of HMMpanelFE }// end of extern C #endif /* HMMPANELFE_CC */ MCMCpack/src/HMMmultivariateGaussian.cc0000644000176000001440000007576312140061656017544 0ustar ripleyusers////////////////////////////////////////////////////////////////////////// // HMMmultivariateGaussian.cc is C++ code to estimate a Gaussian panel model with a structural break // y_{it} = \x'_{it}\b + \varepsilon_{it} // \varepsilon_{it} \sim \normdist{0}{\sigma^2} // Parameters = {beta, sigma2, P_i}// static + changing // // Jong Hee Park // Department of Political Science and International Relations // Seoul National University // jongheepark@snu.ac.kr // Written 11/19/2008 // Modified 07/04/2011 // ////////////////////////////////////////////////////////////////////////// #ifndef HMMMULTIVARIATEGAUSSIAN_CC #define HMMMULTIVARIATEGAUSSIAN_CC #include #include #include "MCMCrng.h" #include "MCMCfcds.h" #include "matrix.h" #include "distributions.h" #include "stat.h" #include "la.h" #include "ide.h" #include "smath.h" #include "rng.h" #include "mersenne.h" #include "lecuyer.h" #include "lapack.h" #include // needed to use Rprintf() #include // needed to allow user interrupts using namespace std; using namespace scythe; static double ln_invgamma(double theta, double a, double b) { double logf = a * log(b) - lngammafn(a) + -(a+1) * log(theta) + -b/theta; return (logf); //pow(b, a) / gammafn(a) * pow(theta, -(a+1)) * exp(-b/theta); } // used to access 1d arrays holding R matrices like a 2d array #define M(ROW,COL,NROWS) (COL*NROWS+ROW) template void MultivariateGaussian_impl(rng& stream, unsigned int nsubj, unsigned int ntime, unsigned int nobs, const Matrix& subjectid, const Matrix& timeid, const Matrix<>& Y, const Matrix<>& X, const Matrix<>& YT, const Matrix<>& XT, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, double sigma2start, const Matrix<>& b0, const Matrix<>& B0, const double c0, const double d0, const Matrix<>& time_groupinfo, const Matrix<>& subject_groupinfo, Matrix<>& beta_store, Matrix<>& sigma_store, double& logmarglike, double& loglike, unsigned int chib) { // redefine constants const int K = X.cols();; // ncol(X) const int NOBS = nobs; const Matrix<> B0inv = invpd(B0); const unsigned int tot_iter = burnin + mcmc; const unsigned int nstore = mcmc / thin; // number of draws to store // generate posk_arr and post_arr // Number of observations by group k int *nobsk = new int[nsubj]; for (unsigned int k=0; k *Yk_arr = new Matrix[nsubj]; Matrix *Xk_arr = new Matrix[nsubj]; for(unsigned int k=0; k(nobsk[k], K); Yk_arr[k] = Matrix(nobsk[k], 1); for (int l=0; l *cpXk_arr = new Matrix[nsubj]; Matrix *tXYk_arr = new Matrix[nsubj]; for(unsigned int k=0; k beta(K, 1); double sigma2 = sigma2start; // MCMC iterations start here int sampcount = 0; Rprintf("\n ///////////////////////////////////////////////// \n"); Rprintf("\n MCMC for Multivariate Gaussian loop starts! \n"); Rprintf("\n ///////////////////////////////////////////////// \n"); ///////////////////////////////////////////////// // initialize Yhat for the first loop only ///////////////////////////////////////////////// for (unsigned int iter=0; iter < tot_iter; ++iter){ ////////////////////// // Step 1. Sample beta ////////////////////// Matrix<> XVX(K, K); Matrix<> XVY(K, 1); for(unsigned int s = 0; s beta_var = invpd(B0 + XVX/sigma2); Matrix<> beta_mean = beta_var*(B0*b0 + XVY/sigma2); beta = stream.rmvnorm(beta_mean, beta_var); ///////////////////////////////////////////////// // Step 2. Sample sigma2 ///////////////////////////////////////////////// double SSE = 0; int counter = 0; for(unsigned int s=0;s e = t(Yk_arr[s]-Xk_arr[s]*beta)*(Yk_arr[s] - Xk_arr[s]*beta); SSE = SSE + e[0]; counter = counter + ntime_s; } double nu = (c0 + NOBS)/2; double delta = (d0 + SSE)/2; sigma2 = stream.rigamma(nu, delta); ///////////////////////////////////////////////// // STORE ///////////////////////////////////////////////// if (iter >= burnin && ((iter % thin) == 0)) { for (int i=0; i< K; ++i){ beta_store(sampcount, i) = beta(i);// stored by the order of (11, 12, 13, 21, 22, 23) } sigma_store(sampcount) = sigma2; ++sampcount; } ///////////////////////////////////////////////// // REPORT ///////////////////////////////////////////////// if(verbose > 0 && iter % verbose == 0){ Rprintf("\n ----------------------------------------------------------------------- \n"); Rprintf("Multivaraite Gaussian iteration %i of %i \n", (iter+1), tot_iter); Rprintf("\n beta is "); for(int i=0;i beta_st(K, 1); beta_st(_, 0) = meanc(beta_store); const double sigma2_st = mean(sigma_store); Matrix<> density_beta(nstore, 1); ////////////////////////////////////////////////////////////////// // 1. pdf.beta | sigma2_g ////////////////////////////////////////////////////////////////// for (unsigned int iter = 0; iter XVX(K, K); Matrix<> XVY(K, 1); for(unsigned int s = 0; s beta_var = invpd(B0 + XVX/sigma_store(iter)); Matrix<> beta_mean = beta_var*(B0*b0 + XVY/sigma_store(iter)); beta = stream.rmvnorm(beta_mean, beta_var); if (K == 1){ density_beta(iter) = dnorm(beta_st(0), beta_mean[0], sqrt(beta_var[0])); } else{ density_beta(iter) = ::exp(lndmvn(beta_st, beta_mean, beta_var)); } } double pdf_beta = log(mean(density_beta)); ////////////////////////////////////////////////////////////////// // 2. pdf.sigma2 ////////////////////////////////////////////////////////////////// Matrix<> density_sigma2(nstore, 1); for (unsigned int iter = 0; iter e = t(Yk_arr[s]-Xk_arr[s]*beta_st)*(Yk_arr[s] - Xk_arr[s]*beta_st); SSE = SSE + e[0]; } double nu = (c0 + NOBS)/2; double delta = (d0 + SSE)/2; sigma2 = stream.rigamma(nu, delta); density_sigma2(iter) = ::exp(ln_invgamma(sigma2_st, nu, delta)); } double pdf_sigma2 = log(mean(density_sigma2)); ////////////////////////////////////////////////////////////////// // likelihood f(y|beta_st, D_st, sigma2_st, P_st) ////////////////////////////////////////////////////////////////// loglike = 0; for(unsigned int s = 0; s Sigma = sigma2_st*eye(ntime_s); Matrix<> Mu = Xk_arr[s]*beta_st; loglike += lndmvn(Yk_arr[s], Mu, Sigma); } ////////////////////// // log prior ordinate ////////////////////// double density_beta_prior = 0; if (K == 1){ density_beta_prior = log(dnorm(beta_st(0), b0[0], sqrt(B0inv[0]))); } else{ density_beta_prior = lndmvn(beta_st, b0, B0inv); } double density_sigma2_prior = ln_invgamma(sigma2_st, c0/2, d0/2); // compute marginal likelihood double logprior = density_beta_prior + density_sigma2_prior; logmarglike = (loglike + logprior) - (pdf_beta + pdf_sigma2); if(verbose > 0){ Rprintf("\nlogmarglike = %10.5f\n", logmarglike); Rprintf("loglike = %10.5f\n", loglike); Rprintf("logprior = %10.5f\n", logprior); Rprintf("pdf_beta = %10.5f\n", pdf_beta); Rprintf("pdf_Sigma = %10.5f\n", pdf_sigma2); // Rprintf("pdf_P = %10.5f\n", pdf_P); } } delete[] nobsk; for(unsigned int k=0; k void HMMmultivariateGaussian_impl(rng& stream, unsigned int nsubj, unsigned int ntime, unsigned int m, unsigned int nobs, const Matrix& subjectid, const Matrix& timeid, const Matrix<>& Y, const Matrix<>& X, const Matrix<>& YT, const Matrix<>& XT, unsigned int burnin, unsigned int mcmc, unsigned int thin, unsigned int verbose, Matrix<>& betastart, double sigma2start, const Matrix<>& b0, const Matrix<>& B0, const double c0, const double d0, const Matrix<>& P0, const Matrix<>& time_groupinfo, const Matrix<>& subject_groupinfo, Matrix<>& beta_store, Matrix<>& sigma_store, Matrix<>& P_store, Matrix<>& ps_store, Matrix<>& s_store, double& logmarglike, double& loglike, unsigned int chib) { // redefine constants const int K = X.cols();; // ncol(X) const int NOBS = nobs; const Matrix<> B0inv = invpd(B0); const unsigned int tot_iter = burnin + mcmc; const unsigned int nstore = mcmc / thin; // number of draws to store const int ns = m + 1; // generate posk_arr and post_arr // Number of observations by group k int *nobsk = new int[nsubj]; for (unsigned int k=0; k *Yk_arr = new Matrix[nsubj]; Matrix *Xk_arr = new Matrix[nsubj]; for(int k=0; k(nobsk[k], K); Yk_arr[k] = Matrix(nobsk[k], 1); for (int l=0; l* Xt_arr = new Matrix<>[ntime]; Matrix<>* Yt_arr = new Matrix<>[ntime]; for(unsigned int k=0; k 0){ Xt_arr[k] = Matrix(nobst[k], K); Yt_arr[k] = Matrix(nobst[k], 1); for (int l = 0; l *cpXt_arr = new Matrix[ntime]; Matrix *tXt_arr = new Matrix[ntime]; Matrix *tXYt_arr = new Matrix[ntime]; for(unsigned int k=0; k beta(ns, K); Matrix<> sigma2(ns, 1); for (int j=0; j P = P0; // MCMC iterations start here int sampcount = 0; ///////////////////////////////////////////////// // initialize Yhat for the first loop only ///////////////////////////////////////////////// for (unsigned int iter=0; iter < tot_iter; ++iter){ ////////////////////// // Step 1. Sample state ////////////////////// Matrix<> F(ntime, ns); Matrix<> pr1(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (unsigned int tt=0; tt0){ int nsubj_s = time_groupinfo(tt, 2); for (int j=0; j Sigma = eye(nsubj_s)*sigma2[j]; Matrix<> Mu = Xt_arr[tt]*::t(beta(j,_)); py[j] = ::exp(lndmvn(Yt_arr[tt], Mu, Sigma)); } if (tt==0) pstyt1 = pr1; else { pstyt1 = ::t(F(tt-1,_)*P); } Matrix<> unnorm_pstyt = pstyt1%py; Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j state(ntime, 1); Matrix<> ps = Matrix<>(ntime, ns); ps(ntime-1,_) = F(ntime-1,_); state(ntime-1) = ns; Matrix<> pstyn = Matrix<>(ns, 1); double pone = 0.0; int tt = ntime-2; while (tt >= 0){ if(nobst[tt]>0){ int st = state(tt+1); Matrix<> Pst_1 = ::t(P(_, st-1)); Matrix<> unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1){ state(tt) = 1; } else{ pone = pstyn(st-2); if(stream.runif() < pone) state(tt) = st-1; else state(tt) = st; } ps(tt,_) = pstyn; --tt; } }// end of while loop ////////////////////// // Step 2. Sample beta ////////////////////// int beta_count = 0; Matrix nstate(ns, 1); for (int j = 0; j XVX(K, K); Matrix<> XVY(K, 1); for(int tt = (beta_count - nstate[j]); tt 0){ XVX = XVX + cpXt_arr[tt]; XVY = XVY + tXYt_arr[tt]; } } Matrix<> beta_var = invpd(B0 + XVX/sigma2[j]); Matrix<> beta_mean = beta_var*(B0*b0 + XVY/sigma2[j]); beta(j,_) = stream.rmvnorm(beta_mean, beta_var); } ///////////////////////////////////////////////// // Step 3. Sample sigma2 ///////////////////////////////////////////////// beta_count = 0; Matrix YN(ns, 1); Matrix<> SSE(ns, 1); for (int j = 0; j yj = Yk_arr[s]((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> Xj = Xk_arr[s]((beta_count - nstate[j]), 0, (beta_count - 1), K-1); Matrix<> e = ::t(yj - Xj*::t(beta(j,_)))*(yj - Xj*::t(beta(j,_))); YN[j] = YN[j] + yj.rows(); SSE[j] = SSE[j] + e[0]; } double nu = c0 + (double)YN[j]*0.5; double scale = d0 + SSE[j]*0.5; sigma2[j] = stream.rigamma(nu, scale); } ////////////////////// // Step 4. Sample P ////////////////////// double shape1 = 0; double shape2 = 0; for (unsigned int j =0; j= burnin && ((iter % thin) == 0)) { Matrix<> tbeta = ::t(beta); for (int i=0; i<(ns*K); ++i){ beta_store(sampcount, i) = tbeta[i]; } for (int i=0; i 0 && iter % verbose == 0){ Rprintf("\n ----------------------------------------------------------------------- \n"); Rprintf("HMM Multivaraite Gaussian iteration %i of %i \n", (iter+1), tot_iter); for(int j=0;j betast = meanc(beta_store); Matrix beta_st(ns, K); for (int j = 0; j< ns*K; ++j){ beta_st[j] = betast[j]; } Matrix sigma2st = meanc(sigma_store); Matrix sigma2_st(ns, 1); for (int j = 0; j< ns; ++j){ sigma2_st[j] = sigma2st[j]; } Matrix P_vec_st = meanc(P_store); const Matrix P_st(ns, ns); for (int j = 0; j< ns*ns; ++j){ P_st[j] = P_vec_st[j]; } Matrix<> density_beta(nstore, ns); ////////////////////////////////////////////////////////////////// // 1. pdf.beta | sigma2_g, P_g ////////////////////////////////////////////////////////////////// for (unsigned int iter = 0; iter nstate(ns, 1); for (int j = 0; j XVX(K, K); Matrix<> XVY(K, 1); for(int tt = (beta_count - nstate[j]); tt beta_var = invpd(B0 + XVX/sigma_store(iter, j)); Matrix<> beta_mean = beta_var*(B0*b0 + XVY/sigma_store(iter, j)); beta(j,_) = stream.rmvnorm(beta_mean, beta_var); if (K == 1){ density_beta(iter, j) = ::exp(log(dnorm(beta_st(j), beta_mean[0], sqrt(beta_var[0])))); } else{ density_beta(iter, j) = ::exp(lndmvn(::t(beta_st(j,_)), beta_mean, beta_var)); } } } double pdf_beta = log(prod(meanc(density_beta))); ////////////////////////////////////////////////////////////////// // 2. pdf.sigma2 ////////////////////////////////////////////////////////////////// Matrix<> density_sigma2(nstore, ns); for (unsigned int iter = 0; iter F(ntime, ns); Matrix<> pr1(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (unsigned int tt=0; tt Sigma = eye(nsubj_s)*sigma2[j]; Matrix<> Mu = Xt_arr[tt]*::t(beta_st(j,_)); py[j] = ::exp(lndmvn(Yt_arr[tt], Mu, Sigma)); } if (tt==0) pstyt1 = pr1; else { pstyt1 = ::t(F(tt-1,_)*P); } Matrix<> unnorm_pstyt = pstyt1%py; Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j state(ntime, 1); Matrix<> ps = Matrix<>(ntime, ns); ps(ntime-1,_) = F(ntime-1,_); state(ntime-1) = ns; Matrix<> pstyn = Matrix<>(ns, 1); double pone = 0.0; int tt = ntime-2; while (tt >= 0){ int st = state(tt+1); Matrix<> Pst_1 = ::t(P(_, st-1)); Matrix<> unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1){ state(tt) = 1; } else{ pone = pstyn(st-2); if(stream.runif() < pone) state(tt) = st-1; else state(tt) = st; } ps(tt,_) = pstyn; --tt; }// end of while loop ///////////////////////////////////////////////// // 2.2. sigma2| beta_st, s, P ///////////////////////////////////////////////// Matrix nstate(ns, 1); for (int j = 0; j SSE(ns, 1); int beta_count = 0; Matrix YN(ns, 1); for (int j = 0; j yj = Yk_arr[s]((beta_count - nstate[j]), 0, (beta_count - 1), 0); Matrix<> Xj = Xk_arr[s]((beta_count - nstate[j]), 0, (beta_count - 1), K-1); YN[j] = YN[j] + yj.rows(); Matrix<> e = ::t(yj - Xj*::t(beta_st(j,_)))*(yj - Xj*::t(beta_st(j,_))); SSE[j] = SSE[j] + e[0]; } double nu = (c0 + (double)YN[j])/2; double scale = (d0 + SSE[j])/2; sigma2[j] = stream.rigamma(nu, scale); density_sigma2(iter, j) = ::exp(ln_invgamma(sigma2_st[j], nu, scale)); } ///////////////////////////////////////////////// // 2.3. P| beta_st, sigma2, s ///////////////////////////////////////////////// double shape1 = 0; double shape2 = 0; for (unsigned int j =0; j density_P(nstore, ns); // Matrix<> density_local_P(ns, 1); for (unsigned int iter = 0; iter < nstore; ++iter){ Matrix<> F(ntime, ns); Matrix<> pr1(ns, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (unsigned int tt=0; tt0){ int nsubj_s = time_groupinfo(tt, 2); for (int j=0; j Sigma = eye(nsubj_s)*sigma2_st[j]; Matrix<> Mu = Xt_arr[tt]*::t(beta_st(j,_)); py[j] = ::exp(lndmvn(Yt_arr[tt], Mu, Sigma)); } if (tt==0) pstyt1 = pr1; else { pstyt1 = ::t(F(tt-1,_)*P); } Matrix<> unnorm_pstyt = pstyt1%py; Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j state(ntime, 1); Matrix<> ps = Matrix<>(ntime, ns); ps(ntime-1,_) = F(ntime-1,_); state(ntime-1) = ns; Matrix<> pstyn = Matrix<>(ns, 1); double pone = 0.0; int tt = ntime-2; while (tt >= 0){ if(nobst[tt]>0){ int st = state(tt+1); Matrix<> Pst_1 = ::t(P(_, st-1)); Matrix<> unnorm_pstyn = F(tt,_)%Pst_1; pstyn = unnorm_pstyn/sum(unnorm_pstyn); if (st==1){ state(tt) = 1; } else{ pone = pstyn(st-2); if(stream.runif() < pone) state(tt) = st-1; else state(tt) = st; } ps(tt,_) = pstyn; --tt; } }// end of while loop Matrix nstate(ns, 1); for (int j = 0; j F(ntime, ns); Matrix<> pr1(ns, 1); Matrix<> like(ntime, 1); pr1[0] = 1; Matrix<> py(ns, 1); Matrix<> pstyt1(ns, 1); for (unsigned int tt=0; tt Sigma = eye(nsubj)*sigma2_st[j]; Matrix<> Mu = Xt_arr[tt]*::t(beta_st(j,_)); py[j] = ::exp(lndmvn(Yt_arr[tt], Mu, Sigma)); } if (tt==0) pstyt1 = pr1; else { pstyt1 = ::t(F(tt-1,_)*P_st); } Matrix<> unnorm_pstyt = pstyt1%py; Matrix<> pstyt = unnorm_pstyt/sum(unnorm_pstyt); for (int j=0; j density_beta_prior(ns, 1); Matrix density_sigma2_prior(ns, 1); Matrix density_P_prior(ns, 1); density_P_prior[ns-1] = 0; // for (int j=0; j 0){ Rprintf("\nlogmarglike = %10.5f\n", logmarglike); Rprintf("loglike = %10.5f\n", loglike); Rprintf("logprior = %10.5f\n", logprior); Rprintf("pdf_beta = %10.5f\n", pdf_beta); Rprintf("pdf_Sigma = %10.5f\n", pdf_sigma2); Rprintf("pdf_P = %10.5f\n", pdf_P); } }// end of marginal likelihood computation delete[] nobst; for(unsigned int k=0; k Y(*Yrow, *Ycol, Ydata); Matrix<> X(*Xrow, *Xcol, Xdata); Matrix<> YT(*Yrow, *Ycol, YTdata); Matrix<> XT(*Xrow, *Xcol, XTdata); Matrix<> betastart(*Xcol, 1, betastartdata); Matrix<> b0(*Xcol, 1, b0data); Matrix<> B0(*Xcol, *Xcol, B0data); Matrix<> Pstart(*P0row, *P0col, P0data); Matrix subjectid_mat(*nobs, 1, subjectid); Matrix timeid_mat(*nobs, 1, timeid); Matrix<> subject_groupinfo(*nsubj, 3, subject_groupinfodata); Matrix<> time_groupinfo(*ntime, 3, time_groupinfodata); const int ns = *m + 1; double logmarglike; double loglike; if (*m == 0) { Matrix<> beta_store(*betarow, *betacol); Matrix<> sigma_store(*betarow, 1); MCMCPACK_PASSRNG2MODEL(MultivariateGaussian_impl, *nsubj, *ntime, *nobs, subjectid_mat, timeid_mat, Y, X, YT, XT, *burnin, *mcmc, *thin, *verbose, *sigma2start, b0, B0, *c0, *d0, time_groupinfo, subject_groupinfo, beta_store, sigma_store, logmarglike, loglike, *chib); // store marginal likelihood logmarglikeholder[0] = logmarglike; loglikeholder[0] = loglike; for (int i=0; i < (*betarow* *betacol); ++i){ betadata[i] = beta_store(i); } for (int i=0; i < (*betarow*ns); ++i){ sigmadata[i] = sigma_store(i); } }// end of if m == 0 else { Matrix<> beta_store(*betarow, *betacol); Matrix<> sigma_store(*betarow, ns); Matrix<> P_store(*betarow, ns*ns); Matrix<> s_store(*betarow, *ntime); Matrix<> ps_store(*ntime, ns); MCMCPACK_PASSRNG2MODEL(HMMmultivariateGaussian_impl, *nsubj, *ntime, *m, *nobs, subjectid_mat, timeid_mat, Y, X, YT, XT, *burnin, *mcmc, *thin, *verbose, betastart, *sigma2start, b0, B0, *c0, *d0, Pstart, time_groupinfo, subject_groupinfo, beta_store, sigma_store, P_store, ps_store, s_store, logmarglike, loglike, *chib); // store marginal likelihood logmarglikeholder[0] = logmarglike; loglikeholder[0] = loglike; for (int i=0; i < (*betarow* *betacol); ++i){ betadata[i] = beta_store(i); } for (int i=0; i < (*betarow*ns); ++i){ sigmadata[i] = sigma_store(i); } for (int i = 0; i<(*ntime *ns); ++i){ psout[i] = ps_store[i]; } }// end of m>0 }// end of HMMmultivariateGaussian_CC }// end of extern C #endif /*HMMmultivariateGaussian_CC */ MCMCpack/src/error.h0000644000176000001440000005136712140061657013767 0ustar ripleyusers/* * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/error.h */ /*! \file error.h * * \brief Definitions of Scythe exception classes. * * This file contains the class definitions for * scythe::scythe_exception and its children. These exception classes * describe all of the error conditions generated by Scythe library * routines. * * Furthermore, error.h contains a series of macro definitions that * regulate the inclusion of the library's error checking code in * compiled code. These macros are controlled by the compiler flag * SCYTHE_DEBUG and define four levels of scythe debug * info, SCYTHE_DEBUG = 0, 1, 2, or 3. The library uses these macros to * specify the debug level of thrown exceptions. If we are at level * three, all throws are expanded into actual code, at level 2 only * SCYTHE_THROW_10 AND SCYTHE_THROW_20 calls are expanded, and so on. * Scythe developers should balance exception importance and * efficiency costs when making exception level choices. For example, * bounds checking in matrices is done at level three primarily * because the added branch results in high performance penalties and * out-of-bounds errors shouldn't occur in well-written code, while * conformance checks in matrix multiplication are level 1 because the * checks result in little overhead relative to the cost of matrix * multiplication and conformation errors are easy to introduce by * accident. At level 0, the library performs virtually no error * checking. * * While the various SCYTHE_THROW, SCYTHE_CHECK, and SCYTHE_WARN * macros will only typically be used by library developers, users * should make extensive use the tiered error reporting in Scythe by * setting the compiler flag SCYTHE_DEBUG. If not explicitly set by * the user, the SCYTHE_DEBUG level is automatically set to 3. */ #ifndef SCYTHE_ERROR_H #define SCYTHE_ERROR_H #include #include #include #include #include #include #ifdef SCYTHE_RPACK #include // needed to use Rprintf() #include // needed to allow user interrupts #endif /*! @cond */ #ifdef SCYTHE_DEBUG_LIB #define SCYTHE_DEBUG_MSG(MSG) \ { std::cout << "SCYTHE_DEBUG_LIB: " << MSG << std::endl; } #else #define SCYTHE_DEBUG_MSG(MSG) #endif /*! @endcond */ #define SCYTHE_THROW(EXCEP,MSG) \ { \ std::stringstream _SCYTHE_DEBUG_ss; \ _SCYTHE_DEBUG_ss << MSG; \ throw EXCEP(__FILE__, __func__, __LINE__, \ _SCYTHE_DEBUG_ss.str()); \ } #define SCYTHE_CHECK(CHECK,EXCEP,MSG) \ { \ if (CHECK) \ SCYTHE_THROW(EXCEP,MSG) \ } #define SCYTHE_WARN_RPACK(MSG) \ { \ std::stringstream _SCYTHE_WARN_ss; \ _SCYTHE_WARN_ss << "WARNING in " << __FILE__ << ", " \ << __func__ << ", " << __LINE__ << ": " \ << MSG << "\n"; \ Rprintf(_SCYTHE_WARN_ss.str().c_str()); \ } #define SCYTHE_WARN_STD(MSG) \ std::cerr << "WARNING in " << __FILE__ << ", " \ << __func__ << ", " << __LINE__ << ": " \ << MSG << "\n"; #ifdef SCYTHE_RPACK #define SCYTHE_WARN SCYTHE_WARN_RPACK #else #define SCYTHE_WARN SCYTHE_WARN_STD #endif #define SCYTHE_CHECK_WARN(CHECK,MSG) \ { \ if (CHECK) \ SCYTHE_WARN(MSG) \ } /*! @cond */ #ifndef SCYTHE_DEBUG #define SCYTHE_DEBUG 3 #endif /*! @endcond */ #if SCYTHE_DEBUG > 0 #define SCYTHE_CHECK_10(CHECK,EXCEP,MSG) SCYTHE_CHECK(CHECK,EXCEP,MSG) #else #define SCYTHE_CHECK_10(CHECK, EXCEP, MSG) #endif #if SCYTHE_DEBUG > 1 #define SCYTHE_CHECK_20(CHECK,EXCEP,MSG) SCYTHE_CHECK(CHECK,EXCEP,MSG) #else #define SCYTHE_CHECK_20(CHECK, EXCEP, MSG) #endif #if SCYTHE_DEBUG > 2 #define SCYTHE_CHECK_30(CHECK,EXCEP,MSG) SCYTHE_CHECK(CHECK,EXCEP,MSG) #else #define SCYTHE_CHECK_30(CHECK, EXCEP, MSG) #endif #if SCYTHE_DEBUG > 0 #define SCYTHE_THROW_10(EXCEP,MSG) SCYTHE_THROW(EXCEP,MSG) #else #define SCYTHE_THROW_10(EXCEP,MSG) #endif #if SCYTHE_DEBUG > 1 #define SCYTHE_THROW_20(EXCEP,MSG) SCYTHE_THROW(EXCEP,MSG) #else #define SCYTHE_THROW_20(EXCEP,MSG) #endif #if SCYTHE_DEBUG > 2 #define SCYTHE_THROW_30(EXCEP,MSG) SCYTHE_THROW(EXCEP,MSG) #else #define SCYTHE_THROW_30(EXCEP,MSG) #endif namespace scythe { /* Forward declaration for serr */ class scythe_exception; /**** This file-local variable holds the output of the last * scythe_exception constructed. ****/ #ifdef __MINGW32__ static scythe_exception *serr; #else namespace { scythe_exception *serr; } #endif /**** A replacement for the default terminate handler. This outputs * the string held in serr before calling abort, thereby notifying * the user of why the program crashed. ****/ inline void scythe_terminate (); /**** The scythe exception abstract base class ****/ /*! * \brief The Scythe exception abstract base class. * * The is the base class in Scythe's error handling class tree. * This class extends std::exception and provides fields for * information about the exception, including where the exception * occurred in the library and a message describing the error. */ class scythe_exception:public std::exception { public: scythe_exception (const std::string & head, const std::string & file, const std::string & function, const unsigned int &line, const std::string & message = "", const bool & halt = false) throw () : exception (), head_ (head), file_ (file), function_ (function), line_ (line), message_ (message), call_files_ (), call_funcs_ (), call_lines_ () { std::ostringstream os; os << head_ << " in " << file_ << ", " << function_ << ", " << line_ << ": " << message_ << "!\n\n"; serr = this; std::set_terminate (scythe_terminate); if (halt) { #ifdef SCYTHE_RPACK error("Aborting Scythe C++ execution"); #else std::terminate (); #endif } } scythe_exception (const scythe_exception & e) throw () : exception (), head_ (e.head_), file_ (e.file_), function_ (e.function_), line_ (e.line_), message_ (e.message_), call_files_ (e.call_files_), call_funcs_ (e.call_funcs_), call_lines_ (e.call_lines_) { } scythe_exception & operator= (const scythe_exception & e) throw () { head_ = e.head_; file_ = e.file_; function_ = e.function_; line_ = e.line_; message_ = e.message_; return *this; } virtual ~ scythe_exception () throw () { } /* This function is only called from scythe_terminate, and only * once, so this memory leak is not an issue. We can't just return * os.str().c_str() because that is a dangling pointer after the * function returns... * TODO: Deal with memory leak issue that might affect R packages */ virtual const char *what () const throw () { std::ostringstream os; for (int i = call_files_.size() - 1; i > -1; ++i) { os << "Called from " << call_files_[i] << ", " << call_funcs_[i] << ", " << call_lines_[i] << std::endl; } os << head_ << " in " << file_ << ", " << function_ << ", " << line_ << ": " << message_ << "!"; char *retval = new char[os.str().length()]; std::strcpy(retval, os.str().c_str()); return retval; } virtual std::string message () const throw () { return message_; } virtual void add_caller (const std::string &file, const std::string &function, const unsigned int &line) throw () { /* This if allows one to catch and rethrow an error in the same * function w/out messing things up. Nice to keep try-catch * blocks to a minimum */ if (file != file_ && function != function_) { call_files_.push_back(file); call_funcs_.push_back(function); call_lines_.push_back(line); } } private: std::string head_; std::string file_; std::string function_; unsigned int line_; std::string message_; std::vector call_files_; std::vector call_funcs_; std::vector call_lines_; }; /**** Exception class types, added as needed ****/ /*! * \brief Memory allocation error. * * Library members throw this exception in response to insufficient * memory conditions, such as when one attempts to create a Matrix * object that is bigger than available memory. */ class scythe_alloc_error:public scythe_exception { public: scythe_alloc_error (const std::string & file, const std::string & function, const unsigned int &line, const std::string & message = "", const bool & halt = false) throw () : scythe_exception ("SCYTHE_ALLOCATION_ERROR", file, function, line, message, halt) { } }; /*! * \brief Invalid function argument. * * Library members throw this exception when callers pass incorrect * arguments to a function, such as when one calls the factorial * method with an argument less than 0. */ class scythe_invalid_arg:public scythe_exception { public: scythe_invalid_arg (const std::string & file, const std::string & function, const unsigned int &line, const std::string & message = "", const bool & halt = false) throw () : scythe_exception ("SCYTHE_INVALID ARGUMENT", file, function, line, message, halt) { } }; /*! * \brief File i/o error. * * Library members throw this exception when errors occur during * file reading, writing, or creation, such as when one passes an * invalid file name to the Matrix class's save method. */ class scythe_file_error:public scythe_exception { public: scythe_file_error(const std::string & file, const std::string & function, const unsigned int &line, const std::string & message = "", const bool & halt = false) throw () : scythe_exception ("SCYTHE FILE ERROR", file, function, line, message, halt) { } }; /*! \brief Matrix conformation error. * * Library members throw this exception when a caller passes * non-conforming matrices (matrices of incompatible dimensions) to * a routine, such as when one attempt two row vectors. */ class scythe_conformation_error:public scythe_exception { public: scythe_conformation_error(const std::string & file, const std::string & function, const unsigned int &line, const std::string & message = "", const bool & halt = false) throw () : scythe_exception ("SCYTHE CONFORMATION ERROR", file, function, line, message, halt) { } }; /*! \brief Matrix dimension error. * * Library members throw this exception when a caller passes a * Matrix of the wrong size or shape to a routine. For example, * trying to take the Cholesky decomposition of a non-square Matrix * causes this error. */ class scythe_dimension_error:public scythe_exception { public: scythe_dimension_error (const std::string & file, const std::string & function, const unsigned int &line, const std::string & message = "", const bool & halt = false) throw () : scythe_exception ("SCYTHE DIMENSION ERROR", file, function, line, message, halt) { } }; /*! \brief Null Matrix error. * * Library members throw this exception when a caller passes a null * Matrix to a routine when it expects a non-null argument. For * example, taking the inverse of a null Matrix is impossible, * resulting in this exception. */ class scythe_null_error:public scythe_exception { public: scythe_null_error(const std::string & file, const std::string & function, const unsigned int &line, const std::string & message = "", const bool & halt = false) throw () : scythe_exception ("SCYTHE NULL ERROR", file, function, line, message, halt) { } }; /*! \brief Matrix type error. * * Library members throw this exception when a caller passes a * Matrix that does not satisfy some required property to a routine. * For example, Cholesky decomposition is designed to work on * positive definite matrices; trying to perform Cholesky * decomposition on a Matrix that does not satisfy this requirement * causes this exception. */ class scythe_type_error:public scythe_exception { public: scythe_type_error(const std::string & file, const std::string & function, const unsigned int &line, const std::string & message = "", const bool & halt = false) throw () : scythe_exception ("SCYTHE TYPE ERROR", file, function, line, message, halt) { } }; /*! \brief Element out of bounds error. * * Library members throw this exception when a caller attempts to * access an element outside the bounds of a data structure, such as * when one tries to access the 1000th element of a 200-element * Matrix. */ class scythe_bounds_error:public scythe_exception { public: scythe_bounds_error(const std::string & file, const std::string & function, const unsigned int &line, const std::string & message = "", const bool & halt = false) throw () : scythe_exception ("SCYTHE BOUNDS ERROR", file, function, line, message, halt) { } }; /*! \brief Numerical convergence error. * * Library members throw this exception when a numerical algorithm * fails to converge to a stable value. For example, the BFGS * optimization routine throws this exception when it cannot locate * the minimum of a function to a given tolerance. */ class scythe_convergence_error:public scythe_exception { public: scythe_convergence_error (const std::string & file, const std::string & function, const unsigned int &line, const std::string & message = "", const bool & halt = false) throw () : scythe_exception ("SCYTHE CONVERGENCE ERROR", file, function, line, message, halt) { } }; /*! \brief Numerical underflow or overflow error. * * Library members throw this exception when the result of a * calculation, assignment, or other operation is to small or large * for the data type holding the value. For example, passing * certain values to the gammafn function can result in underflow or * overflow conditions in the resulting calculations. */ class scythe_range_error:public scythe_exception { public: scythe_range_error (const std::string & file, const std::string & function, const unsigned int &line, const std::string & message = "", const bool & halt = false) throw () : scythe_exception ("SCYTHE RANGE ERROR", file, function, line, message, halt) { } }; /*! \brief Numerical precision error. * * Library members throw this exception when a routine cannot * complete a computation effectively and will sacrifice reasonable * precision as a consequence. For example, passing a value too * close to a negative integer to the gammafn function renders the * function incapable of returning an accurate result and thus * generates this exception. */ class scythe_precision_error:public scythe_exception { public: scythe_precision_error (const std::string & file, const std::string & function, const unsigned int &line, const std::string & message = "", const bool & halt = false) throw () : scythe_exception ("SCYTHE PRECISION ERROR", file, function, line, message, halt) { } }; /*! \brief Random number seed error. * * Library members throw this exception when a random number * generator is provided with an illegitimate starting seed value. * For example, the lecuyer class requires seeds within a certain * range to operate properly and will throw this exception when * seeded with a number outside of that range. */ class scythe_randseed_error:public scythe_exception { public: scythe_randseed_error(const std::string & file, const std::string & function, const unsigned int &line, const std::string & message = "", const bool & halt = false) throw () : scythe_exception ("SCYTHE RANDOM SEED ERROR", file, function, line, message, halt) { } }; /*! \brief Matrix style error. * * Library members throw this exception when they are asked to * operate on a Matrix of the incorrect style. Some routines * require specifically a concrete Matrix or view to work correctly. * For example, only views may reference other matrices; invoking * the reference function on a concrete Matrix will generate this * exception. */ class scythe_style_error:public scythe_exception { public: scythe_style_error(const std::string& file, const std::string& function, const unsigned int& line, const std::string& message = "", const bool& halt = false) throw () : scythe_exception("SCYTHE STYLE ERROR", file, function, line, message, halt) {} }; /*! \brief LAPACK Internal Error * * Library members throw this exception when an underlying LAPACK or * BLAS routine indicates that an internal error has occurred. * */ class scythe_lapack_internal_error:public scythe_exception { public: scythe_lapack_internal_error(const std::string& file, const std::string& function, const unsigned int& line, const std::string& message = "", const bool& halt = false) throw () : scythe_exception("SCYTHE LAPACK/BLAS INTERNAL ERROR", file, function, line, message, halt) {} }; /*! \brief Unexpected call to default error. * * This error should not occur. If it occurs in your code, please * contact the Scythe developers to report the problem. * */ class scythe_unexpected_default_error:public scythe_exception { public: scythe_unexpected_default_error(const std::string& file, const std::string& function, const unsigned int& line, const std::string& message = "", const bool& halt = false) throw () : scythe_exception("SCYTHE UNEXPECTED DEFAULT ERROR", file, function, line, message, halt) {} }; // The definition of our terminate handler described above inline void scythe_terminate () { #ifdef SCYTHE_RPACK Rprintf(serr->what()); error("Aborting Scythe C++ execution"); #else std::cerr << serr->what() << std::endl; std::cerr << std::endl; abort (); #endif } } // end namspace SCYTHE #endif /* SCYTHE_ERROR_H */ MCMCpack/src/distributions.h0000644000176000001440000024360112140061657015532 0ustar ripleyusers/* * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/distributions.h * */ /*! \file distributions.h * * \brief Definitions for probability density functions * (PDFs), cumulative distribution functions (CDFs), and some common * functions (gamma, beta, etc). * * This file provides functions that evaluate the PDFs and CDFs of a * number of probability distributions. In addition, it includes * definitions for another of related functions, such as the gamma * and beta functions. * * The various distribution functions in this file operate on both * scalar quantiles and matrices of quantiles and the * definitions of both forms of these functions appear below. We * provide explicit documentation only for the scalar versions of the * these functions and describe the Matrix versions in the scalar * calls' documents. Much like the operators in matrix.h, we * implement these overloaded versions of the distribution functions * in terms of both generalized and default templates to allow for * explicit control over the template type of the returned Matrix. * * \note Doxygen does not correctly expand the macro definitions we use * to generate the Matrix versions of the various distribution * functions. Therefore, it incorrectly substitutes the macro * variable * __VA_ARGS__ for the actual parameter values in the parameter lists * of each of these functions. For example, the definitions of the * Matrix versions of pbeta are listed as * \code * template * Matrix scythe::pbeta (const Matrix &X, __VA_ARGS__) * * template * Matrix scythe::pbeta (const Matrix &X, __VA_ARGS__) * \endcode * when they should be * \code * template * Matrix scythe::pbeta (const Matrix &X, double a, double b) * * template * Matrix scythe::pbeta (const Matrix &X, double a, double b) * \endcode * * \par * Furthermore, Doxygen erroneously lists a number of variables at the * end of this document that are not, in fact, declared in * distributions.h. Again, this error is a result of Doxygen's macro * parsing capabilities. * */ /* TODO: Figure out how to get doxygen to stop listing erroneous * variables at the end of the doc for this file. They stem from it * misreading the nested macro calls used to generate matrix procs. */ /* TODO: Full R-style versions of these function including arbitrary * recycling of matrix arguments. This is going to have to wait for * variadic templates to be doable without a complete mess. There is * currently a variadic patch available for g++, perhaps we can add a * SCYTHE_VARIADIC flag and include these as option until they become * part of the standard in 2009. Something to come back to after 1.0. */ #ifndef SCYTHE_DISTRIBUTIONS_H #define SCYTHE_DISTRIBUTIONS_H #include #include #include #include #include #include #ifdef HAVE_IEEEFP_H #include #endif #ifdef SCYTHE_COMPILE_DIRECT #include "matrix.h" #include "ide.h" #include "error.h" #else #include "scythestat/matrix.h" #include "scythestat/ide.h" #include "scythestat/error.h" #endif /* Fill in some defs from R that aren't in math.h */ #ifndef M_PI #define M_PI 3.141592653589793238462643383280 #endif #define M_LN_SQRT_2PI 0.918938533204672741780329736406 #define M_LN_SQRT_PId2 0.225791352644727432363097614947 #define M_1_SQRT_2PI 0.39894228040143267793994605993 #define M_2PI 6.28318530717958647692528676655 #define M_SQRT_32 5.656854249492380195206754896838 #ifndef HAVE_TRUNC /*! @cond */ inline double trunc(double x) throw () { if (x >= 0) return std::floor(x); else return std::ceil(x); } /*! @endcond */ #endif /* Many random number generators, pdfs, cdfs, and functions (gamma, * etc) in this file are based on code from the R Project, version * 1.6.0-1.7.1. This code is available under the terms of the GNU * GPL. Original copyright: * * Copyright (C) 1998 Ross Ihaka * Copyright (C) 2000-2002 The R Development Core Team * Copyright (C) 2003 The R Foundation */ namespace scythe { /*! @cond */ /* Forward declarations */ double gammafn (double); double lngammafn (double); double lnbetafn (double, double); double pgamma (double, double, double); double dgamma(double, double, double); double pnorm (double, double, double); /*! @endcond */ /******************** * Helper Functions * ********************/ namespace { /* Evaluate a Chebysheve series at a given point */ double chebyshev_eval (double x, const double *a, int n) { SCYTHE_CHECK_10(n < 1 || n > 1000, scythe_invalid_arg, "n not on [1, 1000]"); SCYTHE_CHECK_10(x < -1.1 || x > 1.1, scythe_invalid_arg, "x not on [-1.1, 1.1]"); double b0, b1, b2; b0 = b1 = b2 = 0; double twox = x * 2; for (int i = 1; i <= n; ++i) { b2 = b1; b1 = b0; b0 = twox * b1 - b2 + a[n - i]; } return (b0 - b2) * 0.5; } /* Computes the log gamma correction factor for x >= 10 */ double lngammacor(double x) { const double algmcs[15] = { +.1666389480451863247205729650822e+0, -.1384948176067563840732986059135e-4, +.9810825646924729426157171547487e-8, -.1809129475572494194263306266719e-10, +.6221098041892605227126015543416e-13, }; SCYTHE_CHECK_10(x < 10, scythe_invalid_arg, "This function requires x >= 10"); SCYTHE_CHECK_10(x >= 3.745194030963158e306, scythe_range_error, "Underflow"); if (x < 94906265.62425156) { double tmp = 10 / x; return chebyshev_eval(tmp * tmp * 2 - 1, algmcs, 5) / x; } return 1 / (x * 12); } /* Evaluates the "deviance part" */ double bd0(double x, double np) { if(std::fabs(x - np) < 0.1 * (x + np)) { double v = (x - np) / (x + np); double s = (x - np) * v; double ej = 2 * x * v; v = v * v; for (int j = 1; ; j++) { ej *= v; double s1 = s + ej / ((j << 1) + 1); if (s1 == s) return s1; s = s1; } } return x * std::log(x / np) + np - x; } /* Computes the log of the error term in Stirling's formula */ double stirlerr(double n) { #define S0 0.083333333333333333333 /* 1/12 */ #define S1 0.00277777777777777777778 /* 1/360 */ #define S2 0.00079365079365079365079365 /* 1/1260 */ #define S3 0.000595238095238095238095238 /* 1/1680 */ #define S4 0.0008417508417508417508417508/* 1/1188 */ /* error for 0, 0.5, 1.0, 1.5, ..., 14.5, 15.0 */ const double sferr_halves[31] = { 0.0, /* n=0 - wrong, place holder only */ 0.1534264097200273452913848, /* 0.5 */ 0.0810614667953272582196702, /* 1.0 */ 0.0548141210519176538961390, /* 1.5 */ 0.0413406959554092940938221, /* 2.0 */ 0.03316287351993628748511048, /* 2.5 */ 0.02767792568499833914878929, /* 3.0 */ 0.02374616365629749597132920, /* 3.5 */ 0.02079067210376509311152277, /* 4.0 */ 0.01848845053267318523077934, /* 4.5 */ 0.01664469118982119216319487, /* 5.0 */ 0.01513497322191737887351255, /* 5.5 */ 0.01387612882307074799874573, /* 6.0 */ 0.01281046524292022692424986, /* 6.5 */ 0.01189670994589177009505572, /* 7.0 */ 0.01110455975820691732662991, /* 7.5 */ 0.010411265261972096497478567, /* 8.0 */ 0.009799416126158803298389475, /* 8.5 */ 0.009255462182712732917728637, /* 9.0 */ 0.008768700134139385462952823, /* 9.5 */ 0.008330563433362871256469318, /* 10.0 */ 0.007934114564314020547248100, /* 10.5 */ 0.007573675487951840794972024, /* 11.0 */ 0.007244554301320383179543912, /* 11.5 */ 0.006942840107209529865664152, /* 12.0 */ 0.006665247032707682442354394, /* 12.5 */ 0.006408994188004207068439631, /* 13.0 */ 0.006171712263039457647532867, /* 13.5 */ 0.005951370112758847735624416, /* 14.0 */ 0.005746216513010115682023589, /* 14.5 */ 0.005554733551962801371038690 /* 15.0 */ }; double nn; if (n <= 15.0) { nn = n + n; if (nn == (int)nn) return(sferr_halves[(int)nn]); return (scythe::lngammafn(n + 1.) - (n + 0.5) * std::log(n) + n - std::log(std::sqrt(2 * M_PI))); } nn = n*n; if (n > 500) return((S0 - S1 / nn) / n); if (n > 80) return((S0 - (S1 - S2 / nn) / nn) / n); if (n > 35) return((S0 - (S1 - (S2 - S3 / nn) / nn) / nn) / n); /* 15 < n <= 35 : */ return((S0 - (S1 - (S2 - (S3 - S4 / nn) / nn) / nn) / nn) / n); #undef S1 #undef S2 #undef S3 #undef S4 } /* Helper for dpois and dgamma */ double dpois_raw (double x, double lambda) { if (lambda == 0) return ( (x == 0) ? 1.0 : 0.0); if (x == 0) return std::exp(-lambda); if (x < 0) return 0.0; return std::exp(-stirlerr(x) - bd0(x, lambda)) / std::sqrt(2 * M_PI * x); } /* helper for pbeta */ double pbeta_raw(double x, double pin, double qin) { double ans, c, finsum, p, ps, p1, q, term, xb, xi, y; int n, i, ib, swap_tail; const double eps = .5 * DBL_EPSILON; const double sml = DBL_MIN; const double lneps = std::log(eps); const double lnsml = std::log(eps); if (pin / (pin + qin) < x) { swap_tail = 1; y = 1 - x; p = qin; q = pin; } else { swap_tail=0; y = x; p = pin; q = qin; } if ((p + q) * y / (p + 1) < eps) { ans = 0; xb = p * std::log(std::max(y,sml)) - std::log(p) - lnbetafn(p,q); if (xb > lnsml && y != 0) ans = std::exp(xb); if (swap_tail) ans = 1-ans; } else { ps = q - std::floor(q); if (ps == 0) ps = 1; xb = p * std::log(y) - lnbetafn(ps, p) - std::log(p); ans = 0; if (xb >= lnsml) { ans = std::exp(xb); term = ans * p; if (ps != 1) { n = (int)std::max(lneps/std::log(y), 4.0); for(i = 1; i <= n; i++){ xi = i; term *= (xi-ps)*y/xi; ans += term/(p+xi); } } } if (q > 1) { xb = p * std::log(y) + q * std::log(1 - y) - lnbetafn(p, q) - std::log(q); ib = (int) std::max(xb / lnsml, 0.0); term = std::exp(xb - ib * lnsml); c = 1 / (1 - y); p1 = q * c / (p + q - 1); finsum = 0; n = (int) q; if(q == n) n--; for (i = 1; i <= n; i++) { if(p1 <= 1 && term / eps <= finsum) break; xi = i; term = (q -xi + 1) * c * term / (p + q - xi); if (term > 1) { ib--; term *= sml; } if (ib == 0) finsum += term; } ans += finsum; } if(swap_tail) ans = 1-ans; ans = std::max(std::min(ans,1.),0.); } return ans; } /* Helper for dbinom */ double dbinom_raw (double x, double n, double p, double q) { double f, lc; if (p == 0) return((x == 0) ? 1.0 : 0.0); if (q == 0) return((x == n) ? 1.0 : 0.0); if (x == 0) { if(n == 0) return 1.0; lc = (p < 0.1) ? -bd0(n, n * q) - n * p : n * std::log(q); return(std::exp(lc)); } if (x == n) { lc = (q < 0.1) ? -bd0(n,n * p) - n * q : n * std::log(p); return(std::exp(lc)); } if (x < 0 || x > n) return 0.0; lc = stirlerr(n) - stirlerr(x) - stirlerr(n-x) - bd0(x,n*p) - bd0(n - x, n * q); f = (M_2PI * x * (n-x)) / n; return (std::exp(lc) / std::sqrt(f)); } /* The normal probability density function implementation. */ #define SIXTEN 16 #define do_del(X) \ xsq = trunc(X * SIXTEN) / SIXTEN; \ del = (X - xsq) * (X + xsq); \ if(log_p) { \ *cum = (-xsq * xsq * 0.5) + (-del * 0.5) + std::log(temp); \ if((lower && x > 0.) || (upper && x <= 0.)) \ *ccum = ::log1p(-std::exp(-xsq * xsq * 0.5) * \ std::exp(-del * 0.5) * temp); \ } \ else { \ *cum = std::exp(-xsq * xsq * 0.5) * std::exp(-del * 0.5) * temp; \ *ccum = 1.0 - *cum; \ } #define swap_tail \ if (x > 0.) {/* swap ccum <--> cum */ \ temp = *cum; if(lower) *cum = *ccum; *ccum = temp; \ } void pnorm_both(double x, double *cum, double *ccum, int i_tail, bool log_p) { const double a[5] = { 2.2352520354606839287, 161.02823106855587881, 1067.6894854603709582, 18154.981253343561249, 0.065682337918207449113 }; const double b[4] = { 47.20258190468824187, 976.09855173777669322, 10260.932208618978205, 45507.789335026729956 }; const double c[9] = { 0.39894151208813466764, 8.8831497943883759412, 93.506656132177855979, 597.27027639480026226, 2494.5375852903726711, 6848.1904505362823326, 11602.651437647350124, 9842.7148383839780218, 1.0765576773720192317e-8 }; const double d[8] = { 22.266688044328115691, 235.38790178262499861, 1519.377599407554805, 6485.558298266760755, 18615.571640885098091, 34900.952721145977266, 38912.003286093271411, 19685.429676859990727 }; const double p[6] = { 0.21589853405795699, 0.1274011611602473639, 0.022235277870649807, 0.001421619193227893466, 2.9112874951168792e-5, 0.02307344176494017303 }; const double q[5] = { 1.28426009614491121, 0.468238212480865118, 0.0659881378689285515, 0.00378239633202758244, 7.29751555083966205e-5 }; double xden, xnum, temp, del, eps, xsq, y; int i, lower, upper; /* Consider changing these : */ eps = DBL_EPSILON * 0.5; /* i_tail in {0,1,2} =^= {lower, upper, both} */ lower = i_tail != 1; upper = i_tail != 0; y = std::fabs(x); if (y <= 0.67448975) { /* qnorm(3/4) = .6744.... -- earlier had 0.66291 */ if (y > eps) { xsq = x * x; xnum = a[4] * xsq; xden = xsq; for (i = 0; i < 3; ++i) { xnum = (xnum + a[i]) * xsq; xden = (xden + b[i]) * xsq; } } else xnum = xden = 0.0; temp = x * (xnum + a[3]) / (xden + b[3]); if(lower) *cum = 0.5 + temp; if(upper) *ccum = 0.5 - temp; if(log_p) { if(lower) *cum = std::log(*cum); if(upper) *ccum = std::log(*ccum); } } else if (y <= M_SQRT_32) { /* Evaluate pnorm for 0.674.. = qnorm(3/4) < |x| <= sqrt(32) * ~= 5.657 */ xnum = c[8] * y; xden = y; for (i = 0; i < 7; ++i) { xnum = (xnum + c[i]) * y; xden = (xden + d[i]) * y; } temp = (xnum + c[7]) / (xden + d[7]); do_del(y); swap_tail; } else if (log_p || (lower && -37.5193 < x && x < 8.2924) || (upper && -8.2929 < x && x < 37.5193) ) { /* Evaluate pnorm for x in (-37.5, -5.657) union (5.657, 37.5) */ xsq = 1.0 / (x * x); xnum = p[5] * xsq; xden = xsq; for (i = 0; i < 4; ++i) { xnum = (xnum + p[i]) * xsq; xden = (xden + q[i]) * xsq; } temp = xsq * (xnum + p[4]) / (xden + q[4]); temp = (M_1_SQRT_2PI - temp) / y; do_del(x); swap_tail; } else { if (x > 0) { *cum = 1.; *ccum = 0.; } else { *cum = 0.; *ccum = 1.; } //XXX commented out for debug-on testing of ordfactanal //(and perhaps others) since they tend to throw on the first //iteration //SCYTHE_THROW_10(scythe_convergence_error, "Did not converge"); } return; } #undef SIXTEN #undef do_del #undef swap_tail /* The standard normal distribution function */ double pnorm1 (double x, bool lower_tail, bool log_p) { SCYTHE_CHECK_10(! R_finite(x), scythe_invalid_arg, "Quantile x is inifinte (+/-Inf) or NaN"); double p, cp; pnorm_both(x, &p, &cp, (lower_tail ? 0 : 1), log_p); return (lower_tail ? p : cp); } } // anonymous namespace /************* * Functions * *************/ /* The gamma function */ /*! \brief The gamma function. * * Computes the gamma function, evaluated at \a x. * * \param x The value to compute gamma at. * * \see lngammafn(double x) * \see pgamma(double x, double shape, double scale) * \see dgamma(double x, double shape, double scale) * \see rng::rgamma(double shape, double scale) * * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) */ inline double gammafn (double x) { const double gamcs[22] = { +.8571195590989331421920062399942e-2, +.4415381324841006757191315771652e-2, +.5685043681599363378632664588789e-1, -.4219835396418560501012500186624e-2, +.1326808181212460220584006796352e-2, -.1893024529798880432523947023886e-3, +.3606925327441245256578082217225e-4, -.6056761904460864218485548290365e-5, +.1055829546302283344731823509093e-5, -.1811967365542384048291855891166e-6, +.3117724964715322277790254593169e-7, -.5354219639019687140874081024347e-8, +.9193275519859588946887786825940e-9, -.1577941280288339761767423273953e-9, +.2707980622934954543266540433089e-10, -.4646818653825730144081661058933e-11, +.7973350192007419656460767175359e-12, -.1368078209830916025799499172309e-12, +.2347319486563800657233471771688e-13, -.4027432614949066932766570534699e-14, +.6910051747372100912138336975257e-15, -.1185584500221992907052387126192e-15, }; double y = std::fabs(x); if (y <= 10) { /* Compute gamma(x) for -10 <= x <= 10 * Reduce the interval and find gamma(1 + y) for 0 <= y < 1 * first of all. */ int n = (int) x; if (x < 0) --n; y = x - n;/* n = floor(x) ==> y in [ 0, 1 ) */ --n; double value = chebyshev_eval(y * 2 - 1, gamcs, 22) + .9375; if (n == 0) return value;/* x = 1.dddd = 1+y */ if (n < 0) { /* compute gamma(x) for -10 <= x < 1 */ /* If the argument is exactly zero or a negative integer */ /* then return NaN. */ SCYTHE_CHECK_10(x == 0 || (x < 0 && x == n + 2), scythe_range_error, "x is 0 or a negative integer"); /* The answer is less than half precision */ /* because x too near a negative integer. */ SCYTHE_CHECK_10(x < -0.5 && std::fabs(x - (int)(x - 0.5) / x) < 67108864.0, scythe_precision_error, "Answer < 1/2 precision because x is too near" << " a negative integer"); /* The argument is so close to 0 that the result * * would overflow. */ SCYTHE_CHECK_10(y < 2.2474362225598545e-308, scythe_range_error, "x too close to 0"); n = -n; for (int i = 0; i < n; i++) value /= (x + i); return value; } else { /* gamma(x) for 2 <= x <= 10 */ for (int i = 1; i <= n; i++) { value *= (y + i); } return value; } } else { /* gamma(x) for y = |x| > 10. */ /* Overflow */ SCYTHE_CHECK_10(x > 171.61447887182298, scythe_range_error,"Overflow"); /* Underflow */ SCYTHE_CHECK_10(x < -170.5674972726612, scythe_range_error, "Underflow"); double value = std::exp((y - 0.5) * std::log(y) - y + M_LN_SQRT_2PI + lngammacor(y)); if (x > 0) return value; SCYTHE_CHECK_10(std::fabs((x - (int)(x - 0.5))/x) < 67108864.0, scythe_precision_error, "Answer < 1/2 precision because x is " << "too near a negative integer"); double sinpiy = std::sin(M_PI * y); /* Negative integer arg - overflow */ SCYTHE_CHECK_10(sinpiy == 0, scythe_range_error, "Overflow"); return -M_PI / (y * sinpiy * value); } } /* The natural log of the absolute value of the gamma function */ /*! \brief The natural log of the absolute value of the gamma * function. * * Computes the natural log of the absolute value of the gamma * function, evaluated at \a x. * * \param x The value to compute log(abs(gamma())) at. * * \see gammafn(double x) * \see pgamma(double x, double shape, double scale) * \see dgamma(double x, double shape, double scale) * \see rng::rgamma(double shape, double scale) * * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) */ inline double lngammafn(double x) { SCYTHE_CHECK_10(x <= 0 && x == (int) x, scythe_range_error, "x is 0 or a negative integer"); double y = std::fabs(x); if (y <= 10) return std::log(std::fabs(gammafn(x))); SCYTHE_CHECK_10(y > 2.5327372760800758e+305, scythe_range_error, "Overflow"); if (x > 0) /* i.e. y = x > 10 */ return M_LN_SQRT_2PI + (x - 0.5) * std::log(x) - x + lngammacor(x); /* else: x < -10; y = -x */ double sinpiy = std::fabs(std::sin(M_PI * y)); if (sinpiy == 0) /* Negative integer argument */ throw scythe_exception("UNEXPECTED ERROR", __FILE__, __func__, __LINE__, "ERROR: Should never happen!"); double ans = M_LN_SQRT_PId2 + (x - 0.5) * std::log(y) - x - std::log(sinpiy) - lngammacor(y); SCYTHE_CHECK_10(std::fabs((x - (int)(x - 0.5)) * ans / x) < 1.490116119384765696e-8, scythe_precision_error, "Answer < 1/2 precision because x is " << "too near a negative integer"); return ans; } /* The beta function */ /*! \brief The beta function. * * Computes beta function, evaluated at (\a a, \a b). * * \param a The first parameter. * \param b The second parameter. * * \see lnbetafn(double a, double b) * \see pbeta(double x, double a, double b) * \see dbeta(double x, double a, double b) * \see rng::rbeta(double a, double b) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) */ inline double betafn(double a, double b) { SCYTHE_CHECK_10(a <= 0 || b <= 0, scythe_invalid_arg, "a or b < 0"); if (a + b < 171.61447887182298) /* ~= 171.61 for IEEE */ return gammafn(a) * gammafn(b) / gammafn(a+b); double val = lnbetafn(a, b); SCYTHE_CHECK_10(val < -708.39641853226412, scythe_range_error, "Underflow"); return std::exp(val); } /* The natural log of the beta function */ /*! \brief The natural log of the beta function. * * Computes the natural log of the beta function, * evaluated at (\a a, \a b). * * \param a The first parameter. * \param b The second parameter. * * \see betafn(double a, double b) * \see pbeta(double x, double a, double b) * \see dbeta(double x, double a, double b) * \see rng::rbeta(double a, double b) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) */ inline double lnbetafn (double a, double b) { double p, q; p = q = a; if(b < p) p = b;/* := min(a,b) */ if(b > q) q = b;/* := max(a,b) */ SCYTHE_CHECK_10(p <= 0 || q <= 0,scythe_invalid_arg, "a or b <= 0"); if (p >= 10) { /* p and q are big. */ double corr = lngammacor(p) + lngammacor(q) - lngammacor(p + q); return std::log(q) * -0.5 + M_LN_SQRT_2PI + corr + (p - 0.5) * std::log(p / (p + q)) + q * std::log(1 + (-p / (p + q))); } else if (q >= 10) { /* p is small, but q is big. */ double corr = lngammacor(q) - lngammacor(p + q); return lngammafn(p) + corr + p - p * std::log(p + q) + (q - 0.5) * std::log(1 + (-p / (p + q))); } /* p and q are small: p <= q > 10. */ return std::log(gammafn(p) * (gammafn(q) / gammafn(p + q))); } /* Compute the factorial of a non-negative integer */ /*! \brief The factorial function. * * Computes the factorial of \a n. * * \param n The non-negative integer value to compute the factorial of. * * \see lnfactorial(unsigned int n) * */ inline int factorial (unsigned int n) { if (n == 0) return 1; return n * factorial(n - 1); } /* Compute the natural log of the factorial of a non-negative * integer */ /*! \brief The log of the factorial function. * * Computes the natural log of the factorial of \a n. * * \param n The non-negative integer value to compute the natural log of the factorial of. * * \see factorial(unsigned int n) * */ inline double lnfactorial (unsigned int n) { double x = n+1; double cof[6] = { 76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5 }; double y = x; double tmp = x + 5.5 - (x + 0.5) * std::log(x + 5.5); double ser = 1.000000000190015; for (int j = 0; j <= 5; j++) { ser += (cof[j] / ++y); } return(std::log(2.5066282746310005 * ser / x) - tmp); } /********************************* * Fully Specified Distributions * *********************************/ /* These macros provide a nice shorthand for the matrix versions of * the pdf and cdf functions. */ #define SCYTHE_ARGSET(...) __VA_ARGS__ #define SCYTHE_DISTFUN_MATRIX(NAME, XTYPE, ARGNAMES, ...) \ template \ Matrix \ NAME (const Matrix& X, __VA_ARGS__) \ { \ Matrix ret(X.rows(), X.cols(), false); \ const_matrix_forward_iterator xit; \ const_matrix_forward_iterator xlast \ = X.template end_f(); \ typename Matrix::forward_iterator rit \ = ret.begin_f(); \ for (xit = X.template begin_f(); xit != xlast; ++xit) { \ *rit = NAME (*xit, ARGNAMES); \ ++rit; \ } \ SCYTHE_VIEW_RETURN(double, RO, RS, ret) \ } \ \ template \ Matrix \ NAME (const Matrix& X, __VA_ARGS__) \ { \ return NAME (X, ARGNAMES); \ } /**** The Beta Distribution ****/ /* CDFs */ /*! \brief The beta distribution function. * * Computes the value of the beta cumulative distribution function * with shape parameters \a a and \a b at the desired quantile, * \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile, between 0 and 1. * \param a The first non-negative beta shape parameter. * \param b The second non-negative beta shape parameter. * * \see dbeta(double x, double a, double b) * \see rng::rbeta(double a, double b) * \see betafn(double a, double b) * \see lnbetafn(double a, double b) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) */ inline double pbeta(double x, double a, double b) { SCYTHE_CHECK_10(a <= 0 || b <= 0,scythe_invalid_arg, "a or b <= 0"); if (x <= 0) return 0.; if (x >= 1) return 1.; return pbeta_raw(x,a,b); } SCYTHE_DISTFUN_MATRIX(pbeta, double, SCYTHE_ARGSET(a, b), double a, double b) /* PDFs */ /*! \brief The beta density function. * * Computes the value of the beta probability density function * with shape parameters \a a and \a b at the desired quantile, * \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile, between 0 and 1. * \param a The first non-negative beta shape parameter. * \param b The second non-negative beta shape parameter. * * \see pbeta(double x, double a, double b) * \see rng::rbeta(double a, double b) * \see betafn(double a, double b) * \see lnbetafn(double a, double b) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) */ inline double dbeta(double x, double a, double b) { SCYTHE_CHECK_10((x < 0.0) || (x > 1.0), scythe_invalid_arg, "x not in [0,1]"); SCYTHE_CHECK_10(a < 0.0, scythe_invalid_arg, "a < 0"); SCYTHE_CHECK_10(b < 0.0, scythe_invalid_arg, "b < 0"); return (std::pow(x, (a-1.0)) * std::pow((1.0-x), (b-1.0)) ) / betafn(a,b); } SCYTHE_DISTFUN_MATRIX(dbeta, double, SCYTHE_ARGSET(a, b), double a, double b) /* Returns the natural log of the ordinate of the Beta density * evaluated at x with Shape1 a, and Shape2 b */ /*! \brief The natural log of the ordinate of the beta density * function. * * Computes the value of the natural log of the ordinate of the beta * probability density function * with shape parameters \a a and \a b at the desired quantile, * \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile, between 0 and 1. * \param a The first non-negative beta shape parameter. * \param b The second non-negative beta shape parameter. * * \see dbeta(double x, double a, double b) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) */ inline double lndbeta1(double x, double a, double b) { SCYTHE_CHECK_10((x < 0.0) || (x > 1.0), scythe_invalid_arg, "x not in [0,1]"); SCYTHE_CHECK_10(a < 0.0, scythe_invalid_arg, "a < 0"); SCYTHE_CHECK_10(b < 0.0, scythe_invalid_arg, "b < 0"); return (a-1.0) * std::log(x) + (b-1) * std::log(1.0-x) - lnbetafn(a,b); } SCYTHE_DISTFUN_MATRIX(lndbeta1, double, SCYTHE_ARGSET(a, b), double a, double b) /**** The Binomial Distribution ****/ /* CDFs */ /*! \brief The binomial distribution function. * * Computes the value of the binomial cumulative distribution function * with \a n trials and \a p probability of success on each trial, * at the desired quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param n The number of trials. * \param p The probability of success on each trial. * * \see dbinom(double x, unsigned int n, double p) * \see rng::rbinom(unsigned int n, double p) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) */ inline double pbinom(double x, unsigned int n, double p) { SCYTHE_CHECK_10(p < 0 || p > 1, scythe_invalid_arg, "p not in [0,1]"); double X = std::floor(x); if (X < 0.0) return 0; if (n <= X) return 1; return pbeta(1 - p, n - X, X + 1); } SCYTHE_DISTFUN_MATRIX(pbinom, double, SCYTHE_ARGSET(n, p), unsigned int n, double p) /* PDFs */ /*! \brief The binomial density function. * * Computes the value of the binomial probability density function * with \a n trials and \a p probability of success on each trial, * at the desired quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param n The number of trials. * \param p The probability of success on each trial. * * \see pbinom(double x, unsigned int n, double p) * \see rng::rbinom(unsigned int n, double p) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) */ inline double dbinom(double x, unsigned int n, double p) { SCYTHE_CHECK_10(p < 0 || p > 1, scythe_invalid_arg, "p not in [0, 1]"); double X = std::floor(x + 0.5); return dbinom_raw(X, n, p, 1 - p); } SCYTHE_DISTFUN_MATRIX(dbinom, double, SCYTHE_ARGSET(n, p), unsigned int n, double p) /**** The Chi Squared Distribution ****/ /* CDFs */ /*! \brief The \f$\chi^2\f$ distribution function. * * Computes the value of the \f$\chi^2\f$ cumulative distribution * function with \a df degrees of freedom, at the desired quantile * \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param df The degrees of freedom. * \see dchisq(double x, double df) * \see rng::rchisq(double df) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) * \throw scythe_convergence_error (Level 1) * */ inline double pchisq(double x, double df) { return pgamma(x, df/2.0, 2.0); } SCYTHE_DISTFUN_MATRIX(pchisq, double, df, double df) /* PDFs */ /*! \brief The \f$\chi^2\f$ density function. * * Computes the value of the \f$\chi^2\f$ probability density * function with \a df degrees of freedom, at the desired quantile * \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param df The degrees of freedom. * \see pchisq(double x, double df) * \see rng::rchisq(double df) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) * \throw scythe_convergence_error (Level 1) * */ inline double dchisq(double x, double df) { return dgamma(x, df / 2.0, 2.0); } SCYTHE_DISTFUN_MATRIX(dchisq, double, df, double df) /**** The Exponential Distribution ****/ /* CDFs */ /*! \brief The exponential distribution function. * * Computes the value of the exponential cumulative distribution * function with given \a scale, at the desired quantile * \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param scale The positive scale of the function. * * \see dexp(double x, double scale) * \see rng::rexp(double scale) * * \throw scythe_invalid_arg (Level 1) */ inline double pexp(double x, double scale) { SCYTHE_CHECK_10(scale <= 0, scythe_invalid_arg, "scale <= 0"); if (x <= 0) return 0; return (1 - std::exp(-x*scale)); } SCYTHE_DISTFUN_MATRIX(pexp, double, scale, double scale) /* PDFs */ /*! \brief The exponential density function. * * Computes the value of the exponential probability density * function with given \a scale, at the desired quantile * \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param scale The positive scale of the function. * * \see pexp(double x, double scale) * \see rng::rexp(double scale) * * \throw scythe_invalid_arg (Level 1) */ inline double dexp(double x, double scale) { SCYTHE_CHECK_10(scale <= 0, scythe_invalid_arg, "scale <= 0"); if (x < 0) return 0; return std::exp(-x * scale) * scale; } SCYTHE_DISTFUN_MATRIX(dexp, double, scale, double scale) /**** The f Distribution ****/ /* CDFs */ /*! \brief The F distribution function. * * Computes the value of the F cumulative distribution function with * \a df1 and \a df2 degrees of freedom, at the desired quantile \a * x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param df1 The non-negative degrees of freedom for the * \f$\chi^2\f$ variate in the nominator of the F statistic. * \param df2 The non-negative degrees of freedom for the * \f$\chi^2\f$ variate in the denominator of the F statistic. * * * \see df(double x, double df1, double df2) * \see rng::rf(double df1, double df2) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) * \throw scythe_convergence_error (Level 1) */ inline double pf(double x, double df1, double df2) { SCYTHE_CHECK_10(df1 <= 0 || df2 <= 0, scythe_invalid_arg, "df1 or df2 <= 0"); if (x <= 0) return 0; if (df2 > 4e5) return pchisq(x*df1,df1); if (df1 > 4e5) return 1-pchisq(df2/x,df2); return (1-pbeta(df2 / (df2 + df1 * x), df2 / 2.0, df1 / 2.0)); } SCYTHE_DISTFUN_MATRIX(pf, double, SCYTHE_ARGSET(df1, df2), double df1, double df2) /* PDFs */ /*! \brief The F density function. * * Computes the value of the F probability density function with * \a df1 and \a df2 degrees of freedom, at the desired quantile \a * x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param df1 The non-negative degrees of freedom for the * \f$\chi^2\f$ variate in the nominator of the F statistic. * \param df2 The non-negative degrees of freedom for the * \f$\chi^2\f$ variate in the denominator of the F statistic. * * \see df(double x, double df1, double df2) * \see rng::rf(double df1, double df2) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) * \throw scythe_convergence_error (Level 1) */ inline double df(double x, double df1, double df2) { double dens; SCYTHE_CHECK_10(df1 <= 0 || df2 <= 0, scythe_invalid_arg, "df1 or df2 <= 0"); if (x <= 0) return 0; double f = 1 / (df2 + x * df1); double q = df2 * f; double p = x * df1 * f; if (df1 >= 2) { f = df1 * q / 2; dens = dbinom_raw((df1 - 2) / 2,(df1 + df2 - 2) / 2, p, q); } else { f = (df1 * df1 * q) /(2 * p * (df1 + df2)); dens = dbinom_raw(df1 / 2,(df1 + df2)/ 2, p, q); } return f*dens; } SCYTHE_DISTFUN_MATRIX(df, double, SCYTHE_ARGSET(df1, df2), double df1, double df2) /**** The Gamma Distribution ****/ /* CDFs */ /*! \brief The gamma distribution function. * * Computes the value of the gamma cumulative distribution * function with given \a shape and \a scale, at the desired quantile * \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param shape The non-negative shape of the distribution. * \param scale The non-negative scale of the distribution. * * \see dgamma(double x, double shape, double scale) * \see rng::rgamma(double shape, double scale) * \see gammafn(double x) * \see lngammafn(double x) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) * \throw scythe_convergence_error (Level 1) */ inline double pgamma (double x, double shape, double scale) { const double xbig = 1.0e+8, xlarge = 1.0e+37, alphlimit = 1000.;/* normal approx. for shape > alphlimit */ int lower_tail = 1; double pn1, pn2, pn3, pn4, pn5, pn6, arg, a, b, c, an, osum, sum; long n; int pearson; /* check that we have valid values for x and shape */ SCYTHE_CHECK_10(shape <= 0. || scale <= 0., scythe_invalid_arg, "shape or scale <= 0"); x /= scale; if (x <= 0.) return 0.0; /* use a normal approximation if shape > alphlimit */ if (shape > alphlimit) { pn1 = std::sqrt(shape) * 3. * (std::pow(x/shape, 1./3.) + 1. / (9. * shape) - 1.); return pnorm(pn1, 0., 1.); } /* if x is extremely large __compared to shape__ then return 1 */ if (x > xbig * shape) return 1.0; if (x <= 1. || x < shape) { pearson = 1;/* use pearson's series expansion. */ arg = shape * std::log(x) - x - lngammafn(shape + 1.); c = 1.; sum = 1.; a = shape; do { a += 1.; c *= x / a; sum += c; } while (c > DBL_EPSILON); arg += std::log(sum); } else { /* x >= max( 1, shape) */ pearson = 0;/* use a continued fraction expansion */ arg = shape * std::log(x) - x - lngammafn(shape); a = 1. - shape; b = a + x + 1.; pn1 = 1.; pn2 = x; pn3 = x + 1.; pn4 = x * b; sum = pn3 / pn4; for (n = 1; ; n++) { a += 1.;/* = n+1 -shape */ b += 2.;/* = 2(n+1)-shape+x */ an = a * n; pn5 = b * pn3 - an * pn1; pn6 = b * pn4 - an * pn2; if (std::fabs(pn6) > 0.) { osum = sum; sum = pn5 / pn6; if (std::fabs(osum - sum) <= DBL_EPSILON * std::min(1., sum)) break; } pn1 = pn3; pn2 = pn4; pn3 = pn5; pn4 = pn6; if (std::fabs(pn5) >= xlarge) { /* re-scale terms in continued fraction if they are large */ pn1 /= xlarge; pn2 /= xlarge; pn3 /= xlarge; pn4 /= xlarge; } } arg += std::log(sum); } lower_tail = (lower_tail == pearson); sum = std::exp(arg); return (lower_tail) ? sum : 1 - sum; } SCYTHE_DISTFUN_MATRIX(pgamma, double, SCYTHE_ARGSET(shape, scale), double shape, double scale) /* PDFs */ /*! \brief The gamma density function. * * Computes the value of the gamma probability density * function with given \a shape and \a scale, at the desired quantile * \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param shape The non-negative shape of the distribution. * \param scale The non-negative scale of the distribution. * * \see pgamma(double x, double shape, double scale) * \see rng::rgamma(double shape, double scale) * \see gammafn(double x) * \see lngammafn(double x) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) * \throw scythe_convergence_error (Level 1) */ inline double dgamma(double x, double shape, double scale) { SCYTHE_CHECK_10(shape <= 0 || scale <= 0,scythe_invalid_arg, "shape or scale <= 0"); if (x < 0) return 0.0; if (x == 0) { SCYTHE_CHECK_10(shape < 1,scythe_invalid_arg, "x == 0 and shape < 1"); if (shape > 1) return 0.0; return 1 / scale; } if (shape < 1) { double pr = dpois_raw(shape, x/scale); return pr * shape / x; } /* else shape >= 1 */ double pr = dpois_raw(shape - 1, x / scale); return pr / scale; } SCYTHE_DISTFUN_MATRIX(dgamma, double, SCYTHE_ARGSET(shape, scale), double shape, double scale) /**** The Logistic Distribution ****/ /* CDFs */ /*! \brief The logistic distribution function. * * Computes the value of the logistic cumulative distribution * function with given \a location and \a scale, at the desired * quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param location The location of the distribution. * \param scale The positive scale of the distribution. * * \see dlogis(double x, double location, double scale) * \see rng::rlogis(double location, double scale) * * \throw scythe_invalid_arg (Level 1) */ inline double plogis (double x, double location, double scale) { SCYTHE_CHECK_10(scale <= 0.0, scythe_invalid_arg, "scale <= 0"); double X = (x-location) / scale; X = std::exp(-X); return 1 / (1+X); } SCYTHE_DISTFUN_MATRIX(plogis, double, SCYTHE_ARGSET(location, scale), double location, double scale) /* PDFs */ /*! \brief The logistic density function. * * Computes the value of the logistic probability density * function with given \a location and \a scale, at the desired * quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param location The location of the distribution. * \param scale The positive scale of the distribution. * * \see plogis(double x, double location, double scale) * \see rng::rlogis(double location, double scale) * * \throw scythe_invalid_arg (Level 1) */ inline double dlogis(double x, double location, double scale) { SCYTHE_CHECK_10(scale <= 0.0, scythe_invalid_arg, "scale <= 0"); double X = (x - location) / scale; double e = std::exp(-X); double f = 1.0 + e; return e / (scale * f * f); } SCYTHE_DISTFUN_MATRIX(dlogis, double, SCYTHE_ARGSET(location, scale), double location, double scale) /**** The Log Normal Distribution ****/ /* CDFs */ /*! \brief The log-normal distribution function. * * Computes the value of the log-normal cumulative distribution * function with mean \a logmean and standard * deviation \a logsd, at the desired quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param logmean The mean of the distribution. * \param logsd The positive standard deviation of the distribution. * * \see dlnorm(double x, double logmean, double logsd) * \see rng::rlnorm(double logmean, double logsd) * \see pnorm(double x, double logmean, double logsd) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_convergence_error (Level 1) */ inline double plnorm (double x, double logmean, double logsd) { SCYTHE_CHECK_10(logsd <= 0, scythe_invalid_arg, "logsd <= 0"); if (x > 0) return pnorm(std::log(x), logmean, logsd); return 0; } SCYTHE_DISTFUN_MATRIX(plnorm, double, SCYTHE_ARGSET(logmean, logsd), double logmean, double logsd) /* PDFs */ /*! \brief The log-normal density function. * * Computes the value of the log-normal probability density * function with mean \a logmean and standard * deviation \a logsd, at the desired quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param logmean The mean of the distribution. * \param logsd The positive standard deviation of the distribution. * * \see plnorm(double x, double logmean, double logsd) * \see rng::rlnorm(double logmean, double logsd) * \see dnorm(double x, double logmean, double logsd) * * \throw scythe_invalid_arg (Level 1) */ inline double dlnorm(double x, double logmean, double logsd) { SCYTHE_CHECK_10(logsd <= 0, scythe_invalid_arg, "logsd <= 0"); if (x == 0) return 0; double y = (std::log(x) - logmean) / logsd; return (1 / (std::sqrt(2 * M_PI))) * std::exp(-0.5 * y * y) / (x * logsd); } SCYTHE_DISTFUN_MATRIX(dlnorm, double, SCYTHE_ARGSET(logmean, logsd), double logmean, double logsd) /**** The Negative Binomial Distribution ****/ /* CDFs */ /*! \brief The negative binomial distribution function. * * Computes the value of the negative binomial cumulative distribution * function with \a n target number of successful trials and \a p * probability of success on each trial, at the desired quantile \a * x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired non-negative, integer, quantile. * \param n The positive target number of successful trials * (dispersion parameter). * \param p The probability of success on each trial. * * \see dnbinom(unsigned int x, double n, double p) * \see rng::rnbinom(double n, double p) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) */ inline double pnbinom(unsigned int x, double n, double p) { SCYTHE_CHECK_10(n == 0 || p <= 0 || p >= 1, scythe_invalid_arg, "n == 0 or p not in (0,1)"); return pbeta(p, n, x + 1); } SCYTHE_DISTFUN_MATRIX(pnbinom, unsigned int, SCYTHE_ARGSET(n, p), double n, double p) /* PDFs */ /*! \brief The negative binomial density function. * * Computes the value of the negative binomial probability density * function with \a n target number of successful trials and \a p * probability of success on each trial, at the desired quantile \a * x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired non-negative, integer, quantile. * \param n The positive target number of successful trials * (dispersion parameter). * \param p The probability of success on each trial. * * \see dnbinom(unsigned int x, double n, double p) * \see rng::rnbinom(double n, double p) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) */ inline double dnbinom(unsigned int x, double n, double p) { SCYTHE_CHECK_10(n == 0 || p <= 0 || p >= 1, scythe_invalid_arg, "n == 0 or p not in (0,1)"); double prob = dbinom_raw(n, x + n, p, 1 - p); double P = (double) n / (n + x); return P * prob; } SCYTHE_DISTFUN_MATRIX(dnbinom, unsigned int, SCYTHE_ARGSET(n, p), double n, double p) /**** The Normal Distribution ****/ /* CDFs */ /*! \brief The normal distribution function. * * Computes the value of the normal cumulative distribution * function with given \a mean and standard deviation \a sd, at the * desired quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param mean The mean of the distribution. * \param sd The positive standard deviation of the distribution. * * \see dnorm(double x, double mean, double sd) * \see rng::rnorm(double mean, double sd) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_convergence_error (Level 1) */ inline double pnorm (double x, double mean, double sd) { SCYTHE_CHECK_10(sd <= 0, scythe_invalid_arg, "negative standard deviation"); return pnorm1((x - mean) / sd, true, false); } SCYTHE_DISTFUN_MATRIX(pnorm, double, SCYTHE_ARGSET(mean, sd), double mean, double sd) /* PDFs */ /*! \brief The normal density function. * * Computes the value of the normal probability density * function with given \a mean and standard deviation \a sd, at the * desired quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param mean The mean of the distribution. * \param sd The positive standard deviation of the distribution. * * \see pnorm(double x, double mean, double sd) * \see rng::rnorm(double mean, double sd) * * \throw scythe_invalid_arg (Level 1) */ inline double dnorm(double x, double mean, double sd) { SCYTHE_CHECK_10(sd <= 0, scythe_invalid_arg, "negative standard deviation"); double X = (x - mean) / sd; return (M_1_SQRT_2PI * std::exp(-0.5 * X * X) / sd); } SCYTHE_DISTFUN_MATRIX(dnorm, double, SCYTHE_ARGSET(mean, sd), double mean, double sd) /* Return the natural log of the normal PDF */ /*! \brief The natural log of normal density function. * * Computes the value of the natural log of the normal probability * density function with given \a mean and standard deviation \a sd, * at the desired quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param mean The mean of the distribution. * \param sd The positive standard deviation of the distribution. * * \see dnorm(double x, double mean, double sd) * \see pnorm(double x, double mean, double sd) * \see rng::rnorm(double mean, double sd) * * \throw scythe_invalid_arg (Level 1) */ inline double lndnorm (double x, double mean, double sd) { SCYTHE_CHECK_10(sd <= 0, scythe_invalid_arg, "negative standard deviation"); double X = (x - mean) / sd; return -(M_LN_SQRT_2PI + 0.5 * X * X + std::log(sd)); } SCYTHE_DISTFUN_MATRIX(lndnorm, double, SCYTHE_ARGSET(mean, sd), double mean, double sd) /* Quantile functions */ /*! \brief The standard normal quantile function. * * Computes the value of the standard normal quantile function * at the desired probability \a in_p. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param in_p The desired probability. * * \see pnorm(double x, double mean, double sd) * \see dnorm(double x, double mean, double sd) * \see rng::rnorm(double mean, double sd) * * \throw scythe_invalid_arg (Level 1) */ inline double qnorm1 (double in_p) { double p0 = -0.322232431088; double q0 = 0.0993484626060; double p1 = -1.0; double q1 = 0.588581570495; double p2 = -0.342242088547; double q2 = 0.531103462366; double p3 = -0.0204231210245; double q3 = 0.103537752850; double p4 = -0.453642210148e-4; double q4 = 0.38560700634e-2; double xp = 0.0; double p = in_p; if (p > 0.5) p = 1 - p; SCYTHE_CHECK_10(p < 10e-20, scythe_range_error, "p outside accuracy limit"); if (p == 0.5) return xp; double y = std::sqrt (std::log (1.0 / std::pow (p, 2))); xp = y + ((((y * p4 + p3) * y + p2) * y + p1) * y + p0) / ((((y * q4 + q3) * y + q2) * y + q1) * y + q0); if (in_p < 0.5) xp = -1 * xp; return xp; } SCYTHE_DISTFUN_MATRIX(qnorm1, double, in_p, double in_p) /**** The Poisson Distribution ****/ /* CDFs */ /*! \brief The Poisson distribution function. * * Computes the value of the Poisson cumulative distribution * function with expected number of occurrences \a lambda, at the * desired quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired integer quantile. * \param lambda The expected positive number of occurrences. * * \see dpois(unsigned int x, double lambda) * \see rng::rpois(double lambda) * * \throws scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) * \throw scythe_convergence_error (Level 1) */ inline double ppois(unsigned int x, double lambda) { SCYTHE_CHECK_10(lambda<=0.0, scythe_invalid_arg, "lambda <= 0"); if (lambda == 1) return 1; return 1 - pgamma(lambda, x + 1, 1.0); } SCYTHE_DISTFUN_MATRIX(ppois, unsigned int, lambda, double lambda) /* PDFs */ /*! \brief The Poisson density function. * * Computes the value of the Poisson probability density * function with expected number of occurrences \a lambda, at the * desired quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired integer quantile. * \param lambda The expected positive number of occurrences. * * \see ppois(unsigned int x, double lambda) * \see rng::rpois(double lambda) * * \throws scythe_invalid_arg (Level 1) */ inline double dpois(unsigned int x, double lambda) { SCYTHE_CHECK_10(lambda<=0.0, scythe_invalid_arg, "lambda <= 0"); // compute log(x!) double xx = x+1; double cof[6] = { 76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5 }; double y = xx; double tmp = xx + 5.5 - (xx + 0.5) * std::log(xx + 5.5); double ser = 1.000000000190015; for (int j = 0; j <= 5; j++) { ser += (cof[j] / ++y); } double lnfactx = std::log(2.5066282746310005 * ser / xx) - tmp; return (std::exp( -1*lnfactx + x * std::log(lambda) - lambda)); } SCYTHE_DISTFUN_MATRIX(dpois, unsigned int, lambda, double lambda) /**** The t Distribution ****/ /* CDFs */ /*! \brief The Student t distribution function. * * Computes the value of the Student t cumulative distribution * function with \a n degrees of freedom, at the desired quantile * \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param n The positive degrees of freedom of the distribution. * * \see dt(double x, bool b1, bool b2) * \see rng::rt1(double mu, double sigma2, double nu) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_convergence_error (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) */ inline double pt(double x, double n) { double val; SCYTHE_CHECK_10(n <= 0, scythe_invalid_arg, "n <= 0"); if (n > 4e5) { val = 1/(4*n); return pnorm1(x * (1 - val) / ::sqrt(1 + x * x * 2. * val), true, false); } val = pbeta(n / (n + x * x), n / 2.0, 0.5); val /= 2; if (x <= 0) return val; else return 1 - val; } SCYTHE_DISTFUN_MATRIX(pt, double, n, double n) /* PDFs */ /*! \brief The Student t distribution function. * * Computes the value of the Student t cumulative distribution * function with \a n degrees of freedom, at the desired quantile * \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param n The positive degrees of freedom of the distribution. * * \see pt(double x, bool b1, bool b2) * \see rng::rt1(double mu, double sigma2, double nu) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) */ inline double dt(double x, double n) { double u; SCYTHE_CHECK_10(n <= 0, scythe_invalid_arg, "n <= 0"); double t = -bd0(n/2., (n + 1) / 2.) + stirlerr((n + 1) / 2.) - stirlerr(n / 2.); if(x*x > 0.2*n) u = std::log(1+x*x/n)*n/2; else u = -bd0(n/2., (n+x*x)/2.) + x*x/2; return std::exp(t-u)/std::sqrt(2*M_PI*(1+x*x/n)); } SCYTHE_DISTFUN_MATRIX(dt, double, n, double n) /* Returns the univariate Student-t density evaluated at x * with mean mu, scale sigma^2, and nu degrees of freedom. * * TODO: Do we want a pt for this distribution? */ /*! \brief The univariate Student t density function. * * Computes the value of the univariate Student t probability * density function with mean \a mu, variance \a sigma2, * and degrees of freedom \a nu, at the desired quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param mu The mean of the distribution. * \param sigma2 The variance of the distribution. * \param nu The degrees of freedom of the distribution. * * \see rng::rt1(double mu, double sigma2, double nu) * \see dt(double x, bool b1, bool b2) * \see pt(double x, bool b1, bool b2) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) */ inline double dt1(double x, double mu, double sigma2, double nu) { double logdens = lngammafn((nu + 1.0) /2.0) - std::log(std::sqrt(nu * M_PI)) - lngammafn(nu / 2.0) - std::log(std::sqrt(sigma2)) - (nu + 1.0) / 2.0 * std::log(1.0 + (std::pow((x - mu), 2.0)) / (nu * sigma2)); return(std::exp(logdens)); } SCYTHE_DISTFUN_MATRIX(dt1, double, SCYTHE_ARGSET(mu, sigma2, nu), double mu, double sigma2, double nu) /* Returns the natural log of the univariate Student-t density * evaluated at x with mean mu, scale sigma^2, and nu * degrees of freedom */ /*! \brief The natural log of the univariate Student t density * function. * * Computes the value of the natural log of the univariate Student t * probability density function with mean \a mu, variance \a sigma2, * and degrees of freedom \a nu, at the desired quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param mu The mean of the distribution. * \param sigma2 The variance of the distribution. * \param nu The degrees of freedom of the distribution. * * \see rng::rt1(double mu, double sigma2, double nu) * \see dt(double x, bool b1, bool b2) * \see pt(double x, bool b1, bool b2) * * \throw scythe_invalid_arg (Level 1) * \throw scythe_range_error (Level 1) * \throw scythe_precision_error (Level 1) */ inline double lndt1(double x, double mu, double sigma2, double nu) { double logdens = lngammafn((nu+1.0)/2.0) - std::log(std::sqrt(nu*M_PI)) - lngammafn(nu/2.0) - std::log(std::sqrt(sigma2)) - (nu+1.0)/2.0 * std::log(1.0 + (std::pow((x-mu),2.0)) /(nu * sigma2)); return(logdens); } SCYTHE_DISTFUN_MATRIX(lndt1, double, SCYTHE_ARGSET(mu, sigma2, nu), double mu, double sigma2, double nu) /**** The Uniform Distribution ****/ /* CDFs */ /*! \brief The uniform distribution function. * * Computes the value of the uniform cumulative distribution * function evaluated on the interval [\a a, \a b], at the desired * quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile x. * \param a The lower end-point of the distribution. * \param b The upper end-point of the distribution. * * \see dunif(double x, double a, double b) * \see rng::runif() * * \throw scythe_invalid_arg (Level 1) */ inline double punif(double x, double a, double b) { SCYTHE_CHECK_10(b <= a, scythe_invalid_arg, "b <= a"); if (x <= a) return 0.0; if (x >= b) return 1.0; return (x - a) / (b - a); } SCYTHE_DISTFUN_MATRIX(punif, double, SCYTHE_ARGSET(a, b), double a, double b) /* PDFs */ /*! \brief The uniform density function. * * Computes the value of the uniform probability density * function evaluated on the interval [\a a, \a b], at the desired * quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile x. * \param a The lower end-point of the distribution. * \param b The upper end-point of the distribution. * * \see punif(double x, double a, double b) * \see rng::runif() * * \throw scythe_invalid_arg (Level 1) */ inline double dunif(double x, double a, double b) { SCYTHE_CHECK_10(b <= a, scythe_invalid_arg, "b <= a"); if (a <= x && x <= b) return 1.0 / (b - a); return 0.0; } SCYTHE_DISTFUN_MATRIX(dunif, double, SCYTHE_ARGSET(a, b), double a, double b) /**** The Weibull Distribution ****/ /* CDFs */ /*! \brief The Weibull distribution function. * * Computes the value of the Weibull cumulative distribution * function with given \a shape and \a scale, at the desired * quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param shape The positive shape of the distribution. * \param scale The positive scale of the distribution. * * \see dweibull(double x, double shape, double scale) * \see rng::rweibull(double shape, double scale) * * \throw scythe_invalid_arg (Level 1) */ inline double pweibull(double x, double shape, double scale) { SCYTHE_CHECK_10(shape <= 0 || scale <= 0, scythe_invalid_arg, "shape or scale <= 0"); if (x <= 0) return 0.0; return 1 - std::exp(-std::pow(x / scale, shape)); } SCYTHE_DISTFUN_MATRIX(pweibull, double, SCYTHE_ARGSET(shape, scale), double shape, double scale) /* PDFs */ /*! \brief The Weibull density function. * * Computes the value of the Weibull probability density * function with given \a shape and \a scale, at the desired * quantile \a x. * * It is also possible to call this function with a Matrix of * doubles as its first argument. In this case the function will * return a Matrix of doubles of the same dimension as \a x, * containing the result of evaluating this function at each value * in \a x, given the remaining fixed parameters. By default, the * returned Matrix will be concrete and have the same matrix_order * as \a x, but you may invoke a generalized version of the function * with an explicit template call. * * \param x The desired quantile. * \param shape The positive shape of the distribution. * \param scale The positive scale of the distribution. * * \see pweibull(double x, double shape, double scale) * \see rng::rweibull(double shape, double scale) * * \throw scythe_invalid_arg (Level 1) */ inline double dweibull(double x, double shape, double scale) { SCYTHE_CHECK_10(shape <= 0 || scale <= 0, scythe_invalid_arg, "shape or scale <= 0"); if (x < 0) return 0.; double tmp1 = std::pow(x / scale, shape - 1); double tmp2 = tmp1*(x / scale); return shape * tmp1 * std::exp(-tmp2) / scale; } SCYTHE_DISTFUN_MATRIX(dweibull, double, SCYTHE_ARGSET(shape, scale), double shape, double scale) /* Multivariate Normal */ // TODO: distribution function. Plain old (non-logged) dmvnorm. /*! \brief The natural log of the multivariate normal density * function. * * Computes the value of the natural log of the multivariate normal * probability density function with vector of mean \a mu and * variance-covariance matrix \a Sigma, at the vector of desired * quantiles \a x. * * \param x The vector of desired quantiles. * \param mu The vector of means. * \param Sigma The variance-covariance matrix. * * \see rng:rmvnorm(const Matrix& mu, const Matrix& sigma) * * \throw scythe_dimension_error (Level 1) * \throw scythe_conformation_error (Level 1) * \throw scythe_null_error (Level 1) */ template double lndmvn (const Matrix& x, const Matrix& mu, const Matrix& Sigma) { SCYTHE_CHECK_10(! x.isColVector(), scythe_dimension_error, "x is not a column vector"); SCYTHE_CHECK_10(! mu.isColVector(), scythe_dimension_error, "mu is not a column vector"); SCYTHE_CHECK_10(! Sigma.isSquare(), scythe_dimension_error, "Sigma is not square"); SCYTHE_CHECK_10(mu.rows()!=Sigma.rows() || x.rows()!=Sigma.rows(), scythe_conformation_error, "mu, x and Sigma have mismatched row lengths") int k = (int) mu.rows(); return ( (-k/2.0)*std::log(2*M_PI) -0.5 * std::log(det(Sigma)) -0.5 * (t(x - mu)) * invpd(Sigma) * (x-mu) )[0]; } } // end namespace scythe #endif /* SCYTHE_DISTRIBUTIONS_H */ MCMCpack/src/defs.h0000644000176000001440000002436512140061657013555 0ustar ripleyusers/* * Scythe Statistical Library * Copyright (C) 2000-2002 Andrew D. Martin and Kevin M. Quinn; * 2002-present Andrew D. Martin, Kevin M. Quinn, and Daniel * Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * under the terms of the GNU General Public License as published by * Free Software Foundation; either version 2 of the License, or (at * your option) any later version. See the text files COPYING * and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/defs.h */ /*! \file defs.h * \brief Global Scythe definitions. * * This file provides a variety of global definitions used throughout * the Scythe library. * * The majority of these definitions are used only within the library * itself. Those definitions that are part of the public interface * are documented. * */ /* Doxygen main page text */ /*! \mainpage Scythe Statistical Library: Application Programmers' Interface * * \section intro Introduction * * The Scythe Statistical Library is an open source C++ library for * statistical computation, written by Daniel Pemstein (University of * Illinois), Kevin M. Quinn (Harvard University), and Andrew D. * Martin (Washington University). It includes a suite of matrix * manipulation functions, a suite of pseudo-random number generators, * and a suite of numerical optimization routines. Programs written * using Scythe are generally much faster than those written in * commonly used interpreted languages, such as R and MATLAB, and can * be compiled on any system with the GNU GCC compiler (and perhaps * with other C++ compilers). One of the primary design goals of the * Scythe developers has been ease of use for non-expert C++ * programmers. We provide ease of use through three primary * mechanisms: (1) operator and function over-loading, (2) numerous * pre-fabricated utility functions, and (3) clear documentation and * example programs. Additionally, Scythe is quite flexible and * entirely extensible because the source code is available to all * users under the GNU General Public License. * * \section thisdoc About This Document * * This document is the application programmer's interface (API) to * Scythe. It provides documentation for every class, function, and * object in Scythe that is part of the library's public interface. * In addition, the sections below explain how to obtain, install, and * compile the library. * * \section obtain Obtaining Scythe * * The most recent version of Scythe is available for download at * http://scythe.wustl.edu. * * \section install Installation * * Scythe installs as a header-only C++ library. After uncompressing, * simply follow the instructions in the INSTALL file included with * Scythe to install the library. Alternatively, you may copy the * source files in scythestat and scythestat/rng into your project * directory and compile directly, using the SCYTHE_COMPILE_DIRECT * pre-processor flag. * * \section compile Compilation * * Scythe should work with the GNU GCC compiler, version 4.0.1 and * greater. Scythe has not been tested with other compilers. Scythe * provides a number of pre-processor flags. The * SCYTHE_COMPILE_DIRECT allows the user to compile Scythe sources * directly. The SCYTHE_VIEW_ASSIGNMENT_FLAG turns on R-style * recycling in Matrix::operator=() for view matrices. * * The SCYTHE_DEBUG controls the amount of error trapping in * Scythe. This level ranges from 0 (virtually no checking) to 3 (all * checking, including Matrix bounds checking, turned on). By * default, the level is set to 3. Here's an example of how to * compile a program with only basic error checking: * * \verbatim $ g++ myprog.cc -DSCYTHE_DEBUG=1 \endverbatim * * Finally, the SCYTHE_LAPACK flag enables LAPACK/BLAS. You must have * the LAPACK and BLAS libraries installed on your system and compile * your program with the appropriate linker flags for this to work. For * example, on linux you can enable LAPACK/BLAS support like this: * * \verbatim $ g++ myprog.cc -DSCYTHE_LAPACK -llapack -lblas -pthread \endverbatim * * \section copy Copyright * * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with library's source code, for * further information. * * \section acknowledge Acknowledgments * * We gratefully acknowledge support from the United States National * Science Foundation (Grants SES-0350646 and SES-0350613), the * Department of Political Science, the Weidenbaum Center, and the * Center for Empirical Research in the Law at Washington University, * and the Department of Government and The Institute for Quantitative * Social Science at Harvard University. Neither the foundation, * Washington University, nor Harvard University bear any * responsibility for this software. * * We'd also like to thank the research assistants who have helped us * with Scythe: Matthew Fasman, Steve Haptonstahl, Kate Jensen, Laura * Keys, Kevin Rompala, Joe Sheehan, and Jean Yang. */ #ifndef SCYTHE_DEFS_H #define SCYTHE_DEFS_H /* In many functions returning matrices, we want to allow the user to * get a matrix of any style, but want to work with concretes inside * the function, for efficiency. This macro originally contained the * code: * * if (_STYLE_ == View) \ * return Matrix<_TYPE_,_ORDER_,View>(_MATRIX_); \ * else \ * return _MATRIX_; * * to convert to View before return if necessary. Of course, this is * completely redundant, since the copy constructor gets called on * return anyway, so the body of the macro was replaced with a simple * return. If we change our minds down the road about how to handle * these returns, code changes will be centered on this macro. */ #define SCYTHE_VIEW_RETURN(_TYPE_, _ORDER_, _STYLE_, _MATRIX_) \ return _MATRIX_; /* Some macros to do bounds checking for iterator accesses. The first * two are only called by the [] operator in the random access * iterator. The third macro handles everything for checks on simple * current iterator location accesses. */ #define SCYTHE_ITER_CHECK_POINTER_BOUNDS(POINTER) \ { \ SCYTHE_CHECK_30(POINTER >= start_ + size_ || POINTER < start_, \ scythe_bounds_error, "Iterator access (offset " \ << offset_ << ") out of matrix bounds") \ } #define SCYTHE_ITER_CHECK_OFFSET_BOUNDS(OFFSET) \ { \ SCYTHE_CHECK_30(OFFSET >= size_, scythe_bounds_error, \ "Iterator access (offset " << offset_ << ") out of matrix bounds")\ } #define SCYTHE_ITER_CHECK_BOUNDS() \ { \ if (M_STYLE != Concrete || M_ORDER != ORDER) { \ SCYTHE_ITER_CHECK_OFFSET_BOUNDS(offset_); \ } else { \ SCYTHE_ITER_CHECK_POINTER_BOUNDS(pos_); \ } \ } /*! \namespace scythe * \brief The Scythe library namespace. * * All Scythe library declarations are defined within the scythe * namespace. This prevents name clashing with other libraries' * members or with declarations in users' program code. */ namespace scythe { /*! * \brief Matrix order enumerator. * * Matrix templates may be either column-major or row-major ordered * and this enumerator is used to differentiate between the two * types. * * The enumerator provides two values: Concrete and View. * * \see Matrix */ enum matrix_order { Col, Row }; /*! * \brief Matrix style enumerator. * * Matrix templates may be either concrete matrices or views and * this enumerator is used to differentiate between the two types. * * Concrete matrices provide direct access to an underlying array of * matrix data, while views offer a more general interface to data * arrays, with potentially many views referencing the same * underlying data structure. * * The enum provides two values: Col and Row. * * \see Matrix */ enum matrix_style { Concrete, View }; /*! * \brief A convenient marker for vector submatrix access. * Passing an all_elements object to a two-argument Matrix submatrix * method allows the caller to access a full vector submatrix. We * further define an instance of all_elements named "_" in the * scythe namespace to allow users to easily reference entire * vectors within matrices. * * \see Matrix::operator()(const all_elements, uint) * \see Matrix::operator()(const all_elements, uint) const * \see Matrix::operator()(uint, const all_elements) * \see Matrix::operator()(uint, const all_elements) const * */ struct all_elements { } const _ = {}; // A little helper method to see if more col-order or row-order. // Tie breaks towards col. template bool maj_col() { if ((o1 == Col && o2 == Col) || (o1 == Col && o3 == Col) || (o2 == Col && o3 == Col)) return true; return false; } template bool maj_col() { if ((o1 == Col && o2 == Col) || (o1 == Col && o3 == Col) || (o1 == Col && o4 == Col) || (o2 == Col && o3 == Col) || (o2 == Col && o4 == Col) || (o3 == Col && o4 == Col)) return true; return false; } } // end namespace scythe #endif /* SCYTHE_ERROR_H */ MCMCpack/src/datablock.h0000644000176000001440000002476112140061657014560 0ustar ripleyusers /* * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn, * and Daniel Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify under the terms of the GNU General Public License as * published by Free Software Foundation; either version 2 of the * License, or (at your option) any later version. See the text files * COPYING and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/datablock.h */ /*! \file datablock.h * \brief Definitions of internal Matrix data management classes * * DataBlock and DataBlockReference objects provide the data half of * the data/view container model used by Scythe's matrices. A * DataBlock contains a data array of a given type, some information * about the DataBlock, and a reference count. Matrix objects * provide views to the DataBlock, thus allowing us to provide * Matrix objects that reference subsets of other matrices. When no * matrices remain that reference the DataBlock the reference count * falls to zero and the block is automatically deallocated. * * DataBlock uses a simple doubling/halving memory allocation scheme * but this may change in later releases. * * The DataBlock classes are used exclusively within the library and * do not constitute a part of Scythe's public interface. * * Based on code in Blitz++ (http://www.oonumerics.org/blitz/) by * Todd Veldhuizen . Blitz++ is * distributed under the terms of the GNU GPL. */ #ifndef SCYTHE_DATABLOCK_H #define SCYTHE_DATABLOCK_H #ifdef SCYTHE_COMPILE_DIRECT #include "error.h" #else #include "scythestat/error.h" #endif #ifdef SCYTHE_PTHREAD #include #endif namespace scythe { /* Convenience typedefs */ namespace { // local to this file typedef unsigned int uint; } /*! \brief Handles Matrix data internals. * * Handles data allocation, reallocation, and deletion of blocks of * elements; the actual data Matrix objects point to. Keeps a * reference count. */ template class DataBlock { public: /**** CONSTRUCTORS ****/ /* * Create an empty data block. */ DataBlock () : data_ (0), size_ (0), refs_ (0) {} /* * Create a block of a given size. */ explicit DataBlock (uint size) : data_ (0), size_ (0), refs_ (0) { resize(size); SCYTHE_DEBUG_MSG("Constructed new " << size << "(" << size_ << ") DataBlock at address " << data_); } /* * Create an exact copy of another data block. */ DataBlock (const DataBlock& b) : data_ (b.data_), size_ (b.size_), refs_ (b.refs_) {} /**** DESTRUCTOR ****/ ~DataBlock () { SCYTHE_DEBUG_MSG("Destructing block at " << data_); deallocate(); } /**** REFERENCE COUNTING ****/ inline uint addReference () { SCYTHE_DEBUG_MSG("Added reference to DataBlock at address " << data_); return ++refs_; } inline uint removeReference () { SCYTHE_DEBUG_MSG("Removed reference to DataBlock at address " << data_); return --refs_ ; } inline uint references () { return refs_; } /**** ACCESSORS ****/ inline T_type* data() { return data_; } inline const T_type* data() const { return data_; } inline uint size () const { return size_; } protected: /**** (DE)ALLOCATION AND RESIZING ****/ /* Allocate data given the current block size. */ inline void allocate (uint size) { /* TODO Think about cache boundary allocations for big blocks * see blitz++ */ if (data_ != 0) // Get rid of previous allocation if it exists deallocate(); data_ = new (std::nothrow) T_type[size]; SCYTHE_CHECK_10(data_ == 0, scythe_alloc_error, "Failure allocating DataBlock of size " << size); } /* Deallocate a block's data */ inline void deallocate () { SCYTHE_DEBUG_MSG(" Deallocating DataBlock of size " << size_ << " at address " << data_); delete[] data_; data_ = 0; } public: /* TODO At the moment, references call this method directly. Not * sure if this is the best interface choice. */ /* Resize a block. */ void resize (uint newsize) { if (newsize > size_) grow(newsize); else if (newsize < size_ / 4) shrink(); } protected: /* Make a block larger. Expects to be called by resize and does * not reset the size_ variable. */ inline void grow (uint newsize) { size_ = size_ ? size_ : 1; // make sure not zero /* TODO Can we speed this up? In 20 iters we're at * 1048576 elems doing the math might be more costly... */ while (size_ < newsize) size_ <<= 1; allocate(size_); } /* Make a block smaller. Expects to be called by resize */ inline void shrink () { size_ >>= 1; allocate(size_); } private: /**** INSTANCE VARIABLES ****/ T_type *data_; // The data array uint size_; // The number of elements in the block uint refs_; // The number of views looking at this block }; // end class DataBlock /*! \brief Null data block object. * * A nice little way to represent empty data blocks. */ template class NullDataBlock : public DataBlock { typedef DataBlock T_base; public: NullDataBlock () : DataBlock () { // never want to deallocate (or resize) this one T_base::addReference(); SCYTHE_DEBUG_MSG("Constructed NULL datablock"); } ~NullDataBlock () {} }; // end class NullDataBlock /*! * \brief Handle to DataBlock objects. * * Matrices inherit from this object. It provides a handle into * DataBlock objects and automates cleanup when the referenced * object runs out of referants. */ template class DataBlockReference { public: /**** CONSTRUCTORS ****/ /* Default constructor: points the object at a static null block */ DataBlockReference () : data_ (0), block_ (&nullBlock_) { #ifdef SCYTHE_PTHREAD pthread_mutex_lock (&ndbMutex_); #endif block_->addReference(); #ifdef SCYTHE_PTHREAD pthread_mutex_unlock (&ndbMutex_); #endif } /* New block constructor: creates a new underlying block of a * given size and points at it. */ explicit DataBlockReference (uint size) : data_ (0), block_ (0) { block_ = new (std::nothrow) DataBlock (size); SCYTHE_CHECK_10 (block_ == 0, scythe_alloc_error, "Could not allocate DataBlock object"); data_ = block_->data(); block_->addReference(); } /* Refrence to an existing block constructor: points to an * offset within an existing block. */ DataBlockReference (const DataBlockReference& reference, uint offset = 0) : data_ (reference.data_ + offset), block_ (reference.block_) { #ifdef SCYTHE_PTHREAD bool lock = false; if (block_ == &nullBlock_) { pthread_mutex_lock (&ndbMutex_); lock = true; } #endif block_->addReference(); #ifdef SCYTHE_PTHREAD if (lock) pthread_mutex_unlock (&ndbMutex_); #endif } /**** DESTRUCTOR ****/ /* Automates removal of underlying block objects when refcount * hits nil. */ virtual ~DataBlockReference () { #ifdef SCYTHE_PTHREAD bool lock = false; if (block_ == &nullBlock_) { pthread_mutex_lock (&ndbMutex_); lock = true; } #endif withdrawReference(); #ifdef SCYTHE_PTHREAD if (lock) pthread_mutex_unlock (&ndbMutex_); #endif } protected: /**** MEMBERS CALLED BY DERIVED CLASS ****/ void referenceOther (const DataBlockReference& ref, uint offset = 0) { #ifdef SCYTHE_PTHREAD bool lock = false; if (block_ == &nullBlock_ || ref.block_ == &nullBlock_) { pthread_mutex_lock (&ndbMutex_); lock = true; } #endif withdrawReference (); block_ = ref.block_; block_->addReference(); data_ = ref.data_ + offset; #ifdef SCYTHE_PTHREAD if (lock) pthread_mutex_lock (&ndbMutex_); #endif } void referenceNew (uint size) { #ifdef SCYTHE_PTHREAD bool lock = false; if (block_ == &nullBlock_) { pthread_mutex_lock (&ndbMutex_); lock = true; } #endif /* If we are the only referent to this data block, resize it. * Otherwise, shift the reference to point to a newly * constructed block. */ if (block_->references() == 1) { block_->resize(size); data_ = block_->data(); // This is a pretty good indication // that the interface and implementation are too tightly // coupled for resizing. } else { withdrawReference(); block_ = 0; block_ = new (std::nothrow) DataBlock (size); SCYTHE_CHECK_10(block_ == 0, scythe_alloc_error, "Could not allocate new data block"); data_ = block_->data(); block_->addReference(); } #ifdef SCYTHE_PTHREAD if (lock) pthread_mutex_unlock (&ndbMutex_); #endif } private: /**** INTERNAL MEMBERS ****/ void withdrawReference () { // All calls to withdrawReference are mutex protected and protecting // this too can create a race condition. if (block_->removeReference() == 0 && block_ != &nullBlock_) delete block_; } void referenceNull () { #ifdef SCYTHE_PTHREAD pthread_mutex_lock (&ndbMutex_); #endif withdrawReference(); block_ = &nullBlock_; block_->addReference(); data_ = 0; #ifdef SCYTHE_PTHREAD pthread_mutex_unlock (&ndbMutex_); #endif } /**** INSTANCE VARIABLES ****/ protected: T_type* data_; // Pointer to the underlying data (offset) private: DataBlock* block_; static NullDataBlock nullBlock_; #ifdef SCYTHE_PTHREAD static pthread_mutex_t ndbMutex_; #endif }; // end class DataBlockReference /* Instantiation of the static null memory block */ template NullDataBlock DataBlockReference::nullBlock_; #ifdef SCYTHE_PTHREAD // mutex initialization template pthread_mutex_t DataBlockReference::ndbMutex_ = PTHREAD_MUTEX_INITIALIZER; #endif } // end namespace scythe #endif /* SCYTHE_DATABLOCK_H */ MCMCpack/src/algorithm.h0000644000176000001440000001470512140061657014617 0ustar ripleyusers/* * Scythe Statistical Library * Copyright (C) 2000-2002 Andrew D. Martin and Kevin M. Quinn; * 2002-present Andrew D. Martin, Kevin M. Quinn, and Daniel * Pemstein. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * under the terms of the GNU General Public License as published by * Free Software Foundation; either version 2 of the License, or (at * your option) any later version. See the text files COPYING * and LICENSE, distributed with this source code, for further * information. * -------------------------------------------------------------------- * scythestat/algorithm.h */ /*! \file algorithm.h * * \brief Generic algorithms for Scythe objects. * * This file provides implementations of a few algorithms that operate * on Scythe objects and also contains the definitions of a handful of * useful function objects. These functions and functors are primarily * intended for use within the library. We add algorithms to this * header as need arises and do not currently attempt to provide a * comprehensive set of generic algorithms for working with Scythe * matrices. * */ #ifndef SCYTHE_ALGORITHM_H #define SCYTHE_ALGORITHM_H #include #include #include #ifdef SCYTHE_COMPILE_DIRECT #include "defs.h" #include "matrix.h" #include "matrix_random_access_iterator.h" #else #include "scythestat/defs.h" #include "scythestat/matrix.h" #include "scythestat/matrix_random_access_iterator.h" #endif // These are just goofy #ifdef SCYTHE_RPACK #undef DO #undef DS #undef SO #undef SS #endif namespace scythe { namespace { typedef unsigned int uint; } /* Matrix forward declaration */ template class Matrix; /*! \brief A Functor encapsulating exponentiation. * * This function object wraps exponentiation operations for use in * generic algorithms. */ template struct exponentiate : std::binary_function { T operator() (T base, T exp) const { return std::pow(base, exp); } }; /*! \brief A Functor encapsulating \f$ax+b\f$. * * This function object wraps the operation \f$ax+b\f$ for use in * generic algorithms, where a is some constant. */ template struct ax_plus_b : std::binary_function { T a_; ax_plus_b (T a) : a_ (a) {} T operator() (T x, T b) const { return (a_ * x + b); } }; /*! \brief Iterate through a Matrix in order. * * This function iterates through a Matrix, \a M, in order, * setting each element in the Matrix to the result of an invocation * of the function object, \a func. The () operator of \a func * should take two unsigned integer parameters (i - the row offset * into \a M; j - the column offset into \a M) and return a result * of type T. * * \param M The Matrix to iterate over. * \param func The functor to execute on each iteration. * */ template void for_each_ij_set (Matrix& M, FUNCTOR func) { if (O == Col) { for (uint j = 0; j < M.cols(); ++j) for (uint i = 0; i < M.rows(); ++i) M(i, j) = func(i, j); } else { for (uint i = 0; i < M.cols(); ++i) for (uint j = 0; j < M.rows(); ++j) M(i, j) = func(i, j); } } /*! \brief Copy the contents of one Matrix into another. * * This function copies the contents of one Matrix into * another, traversing each Matrix in the order specified by the * template terms ORDER1 and ORDER2. This function requires an * explicit template call that specifies ORDER1 and ORDER2. * * \param source The Matrix to copy. * \param dest The Matrix to copy into. */ template void copy(const Matrix& source, Matrix& dest) { std::copy(source.template begin_f(), source.template end_f(), dest.template begin_f()); } /*! \brief Copy the contents of one Matrix into another. * * This function copies the contents of one Matrix into * another, traversing each Matrix in the order specified by the * template terms ORDER1 and ORDER2. If \a source is larger than \a * dest, the function only copies as many elements from \a source as * will fit in \a dest. On the other hand, if \a source is smaller * than \a dest, the function will start over at the beginning of * \a source, recycling the contents of \a source as many times as * necessary to fill \a dest. This function requires an explicit * template call that specifies ORDER1 and ORDER2. * * \param source The Matrix to copy. * \param dest The Matrix to copy into. */ template void copy_recycle (const Matrix& source, Matrix& dest) { if (source.size() == dest.size()) { copy (source, dest); } else if (source.size() > dest.size()) { const_matrix_random_access_iterator s_iter = source.template begin(); std::copy(s_iter, s_iter + dest.size(), dest.template begin_f()); } else { const_matrix_random_access_iterator s_begin = source.template begin (); matrix_random_access_iterator d_iter = dest.template begin(); matrix_random_access_iterator d_end = dest.template end(); while (d_iter != d_end) { unsigned int span = std::min(source.size(), (unsigned int) (d_end - d_iter)); d_iter = std::copy(s_begin, s_begin + span, d_iter); } } } /*! \brief Determine the sign of a number. * * This function compares \a x to (T) 0, returning (T) 1 if \a x is * greater than zero, (T) -1 if \a x is less than zero, and (T) 0 * otherwise. * * \param x The value to check. */ template inline T sgn (const T & x) { if (x > (T) 0) return (T) 1; else if (x < (T) 0) return (T) -1; else return (T) 0; } } // end namespace scythe #endif /* SCYTHE_ALGORITHM_H */ MCMCpack/README0000644000176000001440000000333012133644110012532 0ustar ripleyusers///////////////////// // MCMCpack README // ///////////////////// // Authors Andrew D. Martin Kevin M. Quinn Jong Hee Park // Compilation This package (along with Scythe) uses C++ and the Standard Template Library (STL). We suggest using of the GCC compiler 4.0 or greater. The current package has been tested using GCC 4.0 on Linux and MacOS X. Many thanks to Dan Pemstein for helping with all sorts of C++ issues, and to Kurt Hornik and Fritz Leisch for their help with debugging as well as their service to the R community. We are also very grateful to Brian Ripley who provided C++ patches to fix a number of clang and Solaris issues. // Acknowledgments We gratefully acknowledge support from: * National Science Foundation, Program in Methodology, Measurement, and Statistics, Grants SES-0350646 and SES-0350613 * Washington University, Department of Political Science, the Weidenbaum Center on the Economy, Government, and Public Policy (http://wc.wustl.edu), and the Center for Empirical Research in the Law (http://cerl.wustl.edu) * Harvard University, Department of Government and the Institute for Quantitative Social Sciences (http://iq.harvard.edu) Neither the National Science Foundation, Washington University, or Harvard University bear any responsibility for the content of this package. Please contact Andrew D. Martin if you have any problems or questions. -- Andrew D. Martin, Ph.D. Charles Nagel Chair of Constitutional Law and Political Science Vice Dean and CERL Director, School of Law Washington University in St. Louis (314) 935-5863 (Office) (314) 935-3836 (Fax) Email: admartin@wustl.edu WWW: http://adm.wustl.edu MCMCpack/R/0000755000176000001440000000000012140061656012062 5ustar ripleyusersMCMCpack/R/zzz.R0000644000176000001440000000246012133644103013040 0ustar ripleyusers########################################################################## ## start-up and clean-up functions ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## .onAttach <- function(...) { # figure out year automatically (probably could be done more elegantly) date <- date() x <- regexpr("[0-9]{4}", date) this.year <- substr(date, x[1], x[1] + attr(x, "match.length") - 1) # echo output to screen packageStartupMessage("##\n## Markov Chain Monte Carlo Package (MCMCpack)") packageStartupMessage("## Copyright (C) 2003-", this.year, " Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park", sep="") packageStartupMessage("##\n## Support provided by the U.S. National Science Foundation") packageStartupMessage("## (Grants SES-0350646 and SES-0350613)\n##") ## require(coda, quietly=TRUE) ## require(MASS, quietly=TRUE) ## require(stats, quietly=TRUE) } .onUnload <- function(libpath) { library.dynam.unload("MCMCpack", libpath) } MCMCpack/R/utility.R0000644000176000001440000000257212133644103013712 0ustar ripleyusers########################################################################## ## Utility Functions ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## # takes a symmetric matrix x and returns lower diagonal # note: does not check for symmetry # # ADM 4/18/2003 "vech" <- function (x) { x <- as.matrix(x) if (dim(x)[1] != dim(x)[2]) { stop("Non-square matrix passed to vech().\n") } output <- x[lower.tri(x, diag = TRUE)] dim(output) <- NULL return(output) } # takes vector x and returns an nrow times nrow symmetric matrix # this will recycle the elements of x as needed to fill the matrix # # ADM 4/18/2003 # ADM 11/13/2003 [bug fix] # ADM 1/25/2006 [patch to automatically compute matrix size] "xpnd" <- function (x, nrow = NULL) { dim(x) <- NULL if(is.null(nrow)) nrow <- (-1 + sqrt(1 + 8 * length(x))) / 2 output <- matrix(0, nrow, nrow) output[lower.tri(output, diag = TRUE)] <- x hold <- output hold[upper.tri(hold, diag=TRUE)] <- 0 output <- output + t(hold) return(output) } MCMCpack/R/tomog.R0000644000176000001440000001020312133644103013322 0ustar ripleyusers########################################################################## ## Tomography Plots for Ecological Inference ## ## produces tomography plots (see King, 1997, A Solution to the ## Ecological Inference Problem, Princeton University Press) ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## KQ 11/9/2002 ## ## Modification added suggested by David Hugh-Jones 6/10/2006 ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "tomogplot" <- function(r0, r1, c0, c1, xlab="fraction of r0 in c0 (p0)", ylab="fraction of r1 in c0 (p1)", bgcol="white", ...) { if (length(r0) != length(r1)) { stop("r0 and r1 different lengths in tomogplot().\n") } if (length(r0) != length(c0)) { stop("r0 and c0 different lengths in tomogplot().\n") } if (length(r0) != length(c1)) { stop("r0 and c1 different lengths in tomogplot().\n") } intercept <- c0/r1 slope <- -1 * r0/r1 N <- length(r0) par(pty="s") plot(0:1, 0:1, type="n", main="", xlab=xlab, ylab=ylab) rect(0, 0, 1, 1, col=bgcol, lty=0) for (year in 1:N) { if (is.finite(intercept[year]) & is.finite(slope[year])) abline(intercept[year], slope[year]) else abline(v=c0[year]/(c0[year]+c1[year])) } rect(-0.05, -0.05, 1.05, 0, col="white", lty=0) rect(-0.05, -0.05, 0, 1.05, col="white", lty=0) rect(-0.05, 1, 1.05, 1.05, col="white", lty=0) rect(1, -0.05, 1.05, 1.05, col="white", lty=0) box() par(pty="m") return(0) } ## produces temporally organized tomography plots ## (see King, 1997, A Solution to the Ecological Inference ## Problem, Princeton University Press) ## ## KQ 11/9/2002 ## Modification added suggested by David Hugh-Jones 6/10/2006 "dtomogplot" <- function(r0, r1, c0, c1, time.vec=NA, delay=0, xlab="fraction of r0 in c0 (p0)", ylab="fraction of r1 in c0 (p1)", color.palette=heat.colors, bgcol="black", ...) { if (length(r0) != length(r1)){ stop("r0 and r1 different lengths in dtomogplot().\n") } if (length(r0) != length(c0)){ stop("r0 and c0 different lengths in dtomogplot().\n") } if (length(r0) != length(c1)){ stop("r0 and c1 different lengths in dtomogplot().\n") } if (length(r0) != length(time.vec) & !is.na(time.vec)[1]){ stop("r0 and time.vec different lengths in dtomogplot().\n") } intercept <- c0/r1 slope <- -1 * r0/r1 N <- length(r0) if (is.na(time.vec)[1]) time.vec <- 1:N col.vec <- color.palette(N) mar.orig <- (par.orig <- par(c("mar", "las", "mfrow")))$mar on.exit(par(par.orig)) w <- (3 + mar.orig[2]) * par("csi") * 2.54 layout(matrix(c(2,1), ncol=2), widths=c(1,lcm(w))) par(las=1) mar <- mar.orig mar[4] <- mar[2] mar[2] <- 1 par(mar=mar) par(pty="m") plot.new() plot.window(xlim=c(0,1), ylim=range(time.vec), xaxs="i", yaxs="i") rect(0, time.vec[-length(time.vec)], 1, time.vec[-1], col=col.vec) axis(4) box() mar <- mar.orig mar[4] <- 1 par(mar=mar) par(pty="s") plot(0:1, 0:1, type="n", main="", xlab=xlab, ylab=ylab) rect(0, 0, 1, 1, col=bgcol, lty=0) for (year in 1:N) { time.last <- proc.time()[3] time.next <- proc.time()[3] while ( (time.next - time.last) < delay ){ time.next <- proc.time()[3] } if (is.finite(intercept[year]) & is.finite(slope[year])) abline(intercept[year], slope[year], col=col.vec[year]) else abline(v=c0[year]/(c0[year]+c1[year]), col=col.vec[year]) } rect(-0.05, -0.05, 1.05, 0, col="white", lty=0) rect(-0.05, -0.05, 0, 1.05, col="white", lty=0) rect(-0.05, 1, 1.05, 1.05, col="white", lty=0) rect(1, -0.05, 1.05, 1.05, col="white", lty=0) box() par(pty="m") return(0) } MCMCpack/R/testpanelSubjectBreak.R0000644000176000001440000000623612133644103016474 0ustar ripleyusers#################################################################### ## test subject-level breaks from panel residuals ## ## written by Jong Hee Park 03/2009 ## modified and integrated with other codes by JHP 07/2011 ## fixed a starting.id and ending.id ###################################################################### "testpanelSubjectBreak" <- function(subject.id, time.id, resid, max.break=2, minimum = 10, mcmc=1000, burnin=1000, thin=1, verbose=0, b0, B0, c0, d0, a = NULL, b = NULL, seed = NA, Time = NULL, ps.out = FALSE){ ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## Data N <- length(subject.id) ## groupinfo matrix ## col1: subj ID, col2: offset (first time C indexing), col3: #time periods if (min(subject.id) != 1){ stop("subject.id should start 1!") } if (min(time.id) != 1){ stop("time.id should start 1!") } if (is.null(Time)){ Time <- rep(N, 1) } NC <- length(unique(subject.id)) time.list <- as.numeric(table(subject.id)) ## Make a residula list resid.list <- as.list(rep(NA, NC)) start <- 1; end <- 0 for (i in 1:NC){ end <- start + time.list[i] - 1 resid.list[[i]] <- ts(resid[start:end], start=Time[start]) start <- end + 1 } ## Do the break analysis BFout <- matrix(NA, NC, max.break + 1) if (ps.out ==TRUE){ psout <- NULL } else { psout <- array(NA, c(max(time.list), sum(2:(max.break+1)), NC)) } for (i in 1:NC){ residual <- resid.list[[i]] nk <- length(residual) out <- as.list(rep(NA, max.break)) if(nk > minimum){ for (k in 0:max.break){ out[[k+1]] <- MCMCpack:::MCMCresidualBreakAnalysis(residual, m=k, b0=b0, B0=B0, c0=c0, d0=d0, a=a, b=b, burnin=burnin, mcmc=mcmc, thin=thin, verbose=verbose, marginal.likelihood="Chib95") if (ps.out ==TRUE&k>0){ if(k==1){ start <- 1 } else{ start <- sum(2:k)+1 } probstate <- attr(out[[k+1]], "prob.state") psout[1:length(probstate[,1]), start:(start+k), i] <- probstate } ## if no convergence diagnostic BFout[i, k+1] <- attr(out[[k+1]], "logmarglike") } } if (verbose > 0){ cat("\n ------------------------------------------------------------- ") cat("\n Break analysis for subject=", i, "is just finished! \n") } } if (ps.out ==TRUE){ attr(BFout, "psout") <- psout } model.prob.mat <- matrix(NA, NC, max.break + 1) for (i in 1:NC){ model.prob <- exp(BFout[i, ])/sum(exp(BFout[i, ])) winner <- which.max(model.prob) if (verbose > 0){ cat("\nPr(no residual break) for subject", i, "=", model.prob[1]) } model.prob.mat[i,] <- model.prob } attr(BFout, "model.prob") <- model.prob.mat return(BFout) } MCMCpack/R/testpanelGroupBreak.R0000644000176000001440000001542212133644103016166 0ustar ripleyusers#################################################################### ## test group-level breaks from panel residuals ## ## written by Jong Hee Park 03/2009 ## modified and integrated with other codes by JHP 07/2011 ###################################################################### "testpanelGroupBreak" <- function(subject.id, time.id, resid, m=1, mcmc=1000, burnin=1000, thin=1, verbose=0, b0, B0, c0, d0, a = NULL, b = NULL, seed = NA, marginal.likelihood = c("none", "Chib95"), ...){ ## beta.start and sigma2.start are not arguments. OLS estimates will be used! ## subject.id is a numeric list indicating the group number. It should start from 1. ## time.id is a numeric list indicating the time, starting from 1. ## subject.offset is the obs number from which a new subject unit starts ## time.offset is the obs number from which a new time unit starts when we stack data by time.id cl <- match.call() ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## Data ns <- m + 1 nobs <- length(subject.id) newY <- matrix(resid, nobs, 1) newX <- matrix(1, nobs, 1) K <- 1 ## Sort Data based on time.id oldTSCS <- cbind(time.id, subject.id, newY, newX) newTSCS <- oldTSCS[order(oldTSCS[,1]),] newYT <- as.matrix(newTSCS[,3]) newXT <- as.matrix(newTSCS[,4]) b0 <- as.matrix(b0) B0 <- as.matrix(B0) nstore <- mcmc/thin nsubj <- length(unique(subject.id)) ## subject.groupinfo matrix if (unique(subject.id)[1] != 1){ stop("subject.id should start 1!") } subject.offset <- c(0, which(diff(sort(subject.id))==1)[-nsubj]) nsubject.vec <- rep(NA, nsubj) for (i in 1:nsubj){ nsubject.vec[i] <- sum(subject.id==unique(subject.id)[i]) } subject.groupinfo <- cbind(unique(subject.id), subject.offset, nsubject.vec) ## time.groupinfo if(unique(time.id)[1] != 1){ time.id <- time.id - unique(time.id)[1] + 1 cat("time.id does not start from 1. So it is modified by subtracting the first unit of time.") } ntime <- max(nsubject.vec)## maximum time length ntime.vec <- rep(NA, ntime) for (i in 1:ntime){ ntime.vec[i] <- sum(time.id==unique(time.id)[i]) } time.offset <- c(0, which(diff(sort(time.id))==1)[-ntime]) time.groupinfo <- cbind(unique(time.id), time.offset, ntime.vec) ## prior inputs if (m > 0){ P0 <- trans.mat.prior(m=m, n=ntime, a=a, b=b) } else { P0 <- matrix(1, 1, 1) } betadraws <- matrix(data=0, nstore, ns*K) sigmadraws <- matrix(data=0, nstore, ns) psdraws <- matrix(data=0, ntime, ns) ols <- lm(newY ~ newX - 1) beta.start <- rep(coef(ols)[1], ns) sigma2.start <- summary(ols)$sigma^2 ## get marginal likelihood argument marginal.likelihood <- match.arg(marginal.likelihood) ## following MCMCregress, set chib as binary logmarglike <- loglik <- NULL chib <- 0 if (marginal.likelihood == "Chib95"){ chib <- 1 } ## call C++ code to draw sample posterior <- .C("HMMmultivariateGaussian", betadata = as.double(betadraws), betarow = as.integer(nrow(betadraws)), betacol = as.integer(ncol(betadraws)), sigmadata = as.double(sigmadraws), psout = as.double(psdraws), nsubj = as.integer(nsubj), ntime = as.integer(ntime), m = as.integer(m), nobs = as.integer(nobs), subjectid = as.integer(subject.id), timeid = as.integer(time.id), Ydata = as.double(newY), Yrow = as.integer(nrow(newY)), Ycol = as.integer(ncol(newY)), Xdata = as.double(newX), Xrow = as.integer(nrow(newX)), Xcol = as.integer(ncol(newX)), YTdata = as.double(newYT), XTdata = as.double(newXT), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), verbose = as.integer(verbose), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), betastartdata = as.double(beta.start), sigma2start = as.double(sigma2.start), b0data = as.double(b0), B0data = as.double(B0), c0 = as.double(c0), d0 = as.double(d0), P0data = as.double(P0), P0row = as.integer(nrow(P0)), P0col = as.integer(ncol(P0)), subject_groupinfodata = as.double(subject.groupinfo), time_groupinfodata = as.double(time.groupinfo), logmarglikeholder = as.double(0), loglikeholder = as.double(0), chib = as.integer(chib), PACKAGE="MCMCpack" ) ## pull together matrix and build MCMC object to return beta.samp <- matrix(posterior$betadata, posterior$betarow, posterior$betacol) ## stored by the order of (11, 12, 13, 21, 22, 23) sigma.samp <- matrix(posterior$sigmadata, posterior$betarow, ns) xnames <- sapply(c(1:K), function(i){paste("beta", i, sep = "")}) output1 <- mcmc(data=beta.samp, start=burnin+1, end=burnin + mcmc, thin=thin) output2 <- mcmc(data=sigma.samp, start=burnin+1, end=burnin + mcmc, thin=thin) if(m>1){ varnames(output1) <- sapply(c(1:ns), function(i){ paste(xnames, "_regime", i, sep = "") }) varnames(output2) <- sapply(c(1:ns), function(i){ paste("sigma2_regime", i, sep = "") }) } output <- as.mcmc(cbind(output1, output2)) attr(output, "title") <- "testpanelGroupBreak Posterior Sample" attr(output, "call") <- cl attr(output, "y") <- resid[1:ntime] attr(output, "m") <- m attr(output, "nsubj") <- nsubj attr(output, "ntime") <- ntime if(m>0){ ps.holder <- matrix(posterior$psout, ntime, ns) attr(output, "prob.state") <- ps.holder/nstore } attr(output, "logmarglike") <- posterior$logmarglikeholder attr(output, "loglike") <- posterior$loglikeholder ## report the results in a simple manner return(output) } MCMCpack/R/SSVSquantregsummary.R0000644000176000001440000000507512133644103016173 0ustar ripleyusers"print.qrssvs"<-function(x, ...){ x.orig<-x cat("Quantile regression stochastic search \nvariable selection (QR-SSVS) output:\nStart = ", attr(x,"mcpar")[1], "\nEnd = ", attr(x,"mcpar")[2], "\nThinning interval = ", attr(x,"mcpar")[3], "\n") attr(x, "mcpar") <- NULL attr(x, "class") <- NULL NextMethod("print", ...) invisible(x.orig) } "mptable"<-function(qrssvs){ if (!is(qrssvs, "qrssvs")){ stop("Can only be used on objects of class qrssvs.\n") } ssvs.start <- attr(qrssvs, "mcpar")[1] ssvs.end <- attr(qrssvs, "mcpar")[2] ssvs.thin <- attr(qrssvs, "mcpar")[3] nstore <- (ssvs.end-ssvs.start)/ssvs.thin + 1 probs<-apply(qrssvs,2,function(z){length(which(z==1))})/nstore return(data.frame(Probability=probs)) } "topmodels"<-function(qrssvs, nmodels=5, abbreviate=FALSE, minlength=3){ if (!is(qrssvs, "qrssvs")){ stop("Can only be used on objects of class qrssvs.\n") } ssvs.start <- attr(qrssvs, "mcpar")[1] ssvs.end <- attr(qrssvs, "mcpar")[2] ssvs.thin <- attr(qrssvs, "mcpar")[3] nstore <- (ssvs.end-ssvs.start)/ssvs.thin + 1 xnames <- attr(qrssvs, "xnames") if (abbreviate){ xnames <- abbreviate(xnames, minlength) } model.list<-apply(qrssvs,1,function(z)xnames[which(z==1)]) model.vector<-sapply(model.list, function(z)paste(z, collapse=",")) model.count<-sort(table(model.vector), decreasing=T)/nstore if (nmodels>length(model.count)){ warning("Number of models requested exceeds total number of models visited.\n") } if (rownames(model.count)[1]==""){ rownames(model.count)[1]<-"Null model" } return(data.frame(Probability=model.count[1:(min(nmodels, length(model.count)))])) } "plot.qrssvs"<-function(x, ...){ probs<-mptable(x) dotplot(as.matrix(probs), panel=function(x, y, ...){ panel.abline(v=0.5, lty=3) panel.dotplot(x, y, ...) }, origin=0, type=c("p","h"), pch=16, xlim=c(-0.05,1.05), scales = list(x = list(at = c(0,0.2,0.4,0.5,0.6,0.8,1))), xlab="Marginal inclusion probability", ...) } "summary.qrssvs"<-function(object, ...){ covnames <- attr(object, "xnames") probs<-mptable(object) median.model<-covnames[probs>=0.5] results<-probs attr(results, "median.model")<-median.model attr(results, "tau")<-attr(object, "tau") class(results)<-"summary.qrssvs" return(results) } "print.summary.qrssvs"<-function(x, digits=max(3, .Options$digits-3), ...){ attr(x, "class")<-"data.frame" cat("\nMarginal inclusion probability of each predictor:\n\n") print(x, digits=digits, ...) cat("\nFor tau = ", attr(x,"tau"), ", the median probability model \nincludes the following predictors:\n\n", paste(attr(x, "median.model"), collapse=", "), ".\n\n", sep="") invisible(x) } MCMCpack/R/SSVSquantreg.R0000644000176000001440000000566012133644103014555 0ustar ripleyusers"SSVSquantreg" <- function(formula, data=NULL, tau=0.5, include=NULL, burnin=1000, mcmc = 10000, thin=1, verbose = 0, seed = sample(1:1000000,1), pi0a0=1, pi0b0=1, ...) { ## checks check.offset(list(...)) if (pi0a0<=0 || pi0b0<=0){ stop("Parameters pi0a0 and pi0b0 must be positive. \nPlease respecify and call again.\n") } cl <- match.call() if (tau<=0 || tau>=1){ stop("tau must be in (0,1). \nPlease respecify and call again.\n") } ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## form response and model matrices holder <- parse.formula(formula, data=data) Y <- holder[[1]] X <- holder[[2]] xnames <- holder[[3]] K <- ncol(X) # number of covariates q <- length(include) #number of covariates that are pre-specified to appear in the model if (!is.null(include)){ if(is.character(include)){ if(!all(include%in%xnames)){ include.positions<-NA } else{ include.positions<-match(include, xnames) } } else{ if(max(include)>length(xnames)){ include.positions<-NA } else{ include.positions<-include } } if (any(is.na(include.positions))){ stop("One or more covariates to be included are not present in the design matrix\n or one or more indices are out of range. Please respecify and call again.\n") } ## Bring those covariates that are pre-specified to appear in the model to the first positions in the X matrix X <- cbind(X[,include.positions], X[,-include.positions]) xnames <- c(xnames[include.positions], xnames[-include.positions]) } ## define holder for posterior sample sample <- matrix(data=0, mcmc/thin, 2*K) posterior <- NULL ## call C++ code to draw sample auto.Scythe.call(output.object="posterior", cc.fun.name="SSVSquantreg", sample.nonconst=sample, tau=as.double(tau), Y=Y, X=X, q=as.integer(q), burnin=as.integer(burnin), mcmc=as.integer(mcmc), thin=as.integer(thin), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), verbose=as.integer(verbose), pi0a0 = as.double(pi0a0), pi0b0=as.double(pi0b0), package="MCMCpack") output <- form.mcmc.object(posterior, names=rep(xnames, times=2), title="SSVSquantreg Posterior Sample", y=Y, call=cl ) gammasample<-output[,1:K] attr(gammasample, "tau")<-tau attr(gammasample, "xnames")<-xnames attr(gammasample, "class")<-"qrssvs" betasample<-output[,-(1:K)] attr(betasample,"tau")<-tau attr(gammasample, "call") <- attr(betasample, "call") <- cl return(list(gamma=gammasample,beta=betasample)) } MCMCpack/R/scythe.R0000644000176000001440000000373112133644103013504 0ustar ripleyusers########################################################################## ## Scythe Inter-Operation Functions ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## # writes a matrix out to an ASCII file that can be read by Scythe. # it puts the number of rows and columns in the first row # followed by the data. # # ADM 1/29/2003 "write.Scythe" <- function(outmatrix, outfile = NA, overwrite=FALSE) { outmatrix <- as.matrix(outmatrix) if(is.na(outfile)) { stop("Please specify a file name in the write.Scythe() call.\n") } if(overwrite==FALSE & file.exists(outfile)) { cat("File already exists in the write.Scythe() call.\n") stop("Either delete the file, or flip the overwrite switch.\n") } outfile <- file(outfile, "w") cat(dim(outmatrix), "\n", file=outfile) write.table(outmatrix, file=outfile, row.names=FALSE, col.names=FALSE, quote=FALSE) close(outfile) return(0) } # reads in a matrix from an ASCII file written by Scythe. # the number of rows and columns should be in the first row followed # by the data. # # Kevin Rompala 5/1/2003 # fixed by ADM 7/25/2004 "read.Scythe" <- function(infile = NA) { if(is.na(infile)) { stop("Please specify a file name in the read.Scythe() call.\n") } if(!file.exists(infile)) { stop("Specified source file does not exist in read.Scythe() call.\n") } infile <- file(infile, "r") dimensions <- scan(file=infile,n=2) inputdata <- scan(file=infile) close(infile) hold <- matrix(data=inputdata, nrow=dimensions[1], ncol=dimensions[2], byrow=TRUE) return(hold) } MCMCpack/R/procrust.R0000644000176000001440000000364712133644103014074 0ustar ripleyusers########################################################################## ## function that performs procrustes transformation on X with target Xstar ## ## returns the rotation matrix R, translation vector tt, ## and dilation factor s for which: ## ## s X R + 1 tt' \approx Xstar ## ## along with X.new = s X R + 1 tt' ## ## Based on Borg and Groenen 1997. Modern Multidimensional ## Scaling. New York: Springer. pp. 340-342. ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Kevin Quinn ## Harvard University ## 6/13/2005 ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## procrustes <- function(X, Xstar, translation=FALSE, dilation=FALSE){ if (nrow(X) != nrow(Xstar)){ cat("X and Xstar do not have same number of rows.\n") stop("Check data and call procrustes() again. \n") } if (ncol(X) != ncol(Xstar)){ cat("X and Xstar do not have same number of columns.\n") stop("Check data and call procrustes() again. \n") } n <- nrow(X) m <- ncol(X) if (translation){ J <- diag(n) - 1/n * matrix(1, n, n) } else{ J <- diag(n) } C <- t(Xstar) %*% J %*% X svd.out <- svd(C) R <- svd.out$v %*% t(svd.out$u) s <- 1 if (dilation){ mat1 <- t(Xstar) %*% J %*% X %*% R mat2 <- t(X) %*% J %*% X s.numer <- 0 s.denom <- 0 for (i in 1:m){ s.numer <- s.numer + mat1[i,i] s.denom <- s.denom + mat2[i,i] } s <- s.numer / s.denom } tt <- matrix(0, m, 1) if (translation){ tt <- 1/n * t(Xstar - s*X %*% R) %*% matrix(1, n, 1) } X.new <- s * X %*% R + matrix(tt, nrow(X), ncol(X), byrow=TRUE) return(list(X.new=X.new, R=R, tt=tt, s=s)) } MCMCpack/R/MCmodels.R0000644000176000001440000000741212133644103013710 0ustar ripleyusers########################################################################## ## simple instructional models using Monte Carlo simulation ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## ## Monte Carlo simulation from the likelihood of a ## binomial distribution with a Beta(alpha, beta) prior ## ADM 1/25/2006 MCbinomialbeta <- function(y, n, alpha=1, beta=1, mc=1000, ...) { # check data if(y < 0) { cat("Error: Number of successes negative.\n") stop("Please respecify and call function again.") } if(n < 0) { cat("Error: Number of trials negative.\n") stop("Please respecify and call function again.") } if(y > n) { cat("Error: Number of successes greater than number of trials.\n") stop("Please respecify and call function again.") } # check other parameters check.beta.prior(alpha, beta) check.mc.parameter(mc) # draw sample and return output <- mcmc(matrix(rbeta(mc, alpha+y, beta+n-y),mc,1)) varnames(output) <- as.list("pi") attr(output,"title") <- "MCbinomialbeta Posterior Sample" return(output) } ## Monte Carlo simulation from the likelihood of a ## Poisson distribution with a Gamma(alpha, beta) prior ## ADM 1/25/2006 MCpoissongamma <- function(y, alpha, beta, mc=1000, ...) { # check data if(any(y < 0)) { cat("Error: Some counts negative in y.\n") stop("Please respecify and call function again.") } n <- length(y) # check other parameters check.gamma.prior(alpha, beta) check.mc.parameter(mc) # draw sample and return output <- mcmc(matrix(rgamma(mc, alpha+sum(y), beta+n),mc,1)) varnames(output) <- as.list("lambda") attr(output,"title") <- "MCpoissongamma Posterior Sample" return(output) } ## Monte Carlo simulation from the likelihood of a ## Normal distribution with a Normal(mu0, tau20) prior ## the variance sigma2 is known ## ADM 1/26/2006 MCnormalnormal <- function(y, sigma2, mu0, tau20, mc=1000, ...) { n <- length(y) if(sigma2 <= 0) { cat("Error: Known variance sigma2 is less than or equal to zero.\n") stop("Please respecify and call function again.") } # check other parameters check.normal.prior(mu0, tau20) check.mc.parameter(mc) # draw sample and return mu1 = (1/tau20 * mu0 + n/sigma2 * mean(y)) / (1/tau20 + n/sigma2) tau21 = 1 / (1/tau20 + n/sigma2) output <- mcmc(matrix(rnorm(mc, mu1, sqrt(tau21)),mc,1)) varnames(output) <- as.list("mu") attr(output,"title") <- "MCnormalnormal Posterior Sample" return(output) } ## Monte Carlo simulation from the likelihood of a ## multinomal distribution with a Dirichlet(alpha) prior MCmultinomdirichlet <- function(y, alpha0, mc=1000, ...) { # check data d <- length(y) if(any(y < 0)) { cat("Error: Some counts negative in y.\n") stop("Please respecify and call function again.") } # check alpha if(length(alpha0) != d) { cat("Error: Dimension of alpha and y do not match.\n") stop("Please respecify and call function again.") } if(any(alpha0 <= 0)) { cat("Error: At least one alpha in Dirichlet prior less than or equal to zero.\n") stop("Please respecify and call function again.") } # draw sample and return output <- mcmc(rdirichlet(mc,y + alpha0)) varnames(output) <- paste("pi.", 1:d, sep="") attr(output,"title") <- "MCmultinomdirichlet Posterior Sample" return(output) } MCMCpack/R/MCMCtobit.R0000644000176000001440000000571412133644103013771 0ustar ripleyusers########################################################################## ## tobit regression model ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMCtobit" <- function(formula, data=NULL, below = 0.0, above = Inf, burnin = 1000, mcmc = 10000, thin=1, verbose = 0, seed = NA, beta.start = NA, b0 = 0, B0 = 0, c0 = 0.001, d0 = 0.001, ...) { # checks check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) if (!is.numeric(below) | !is.numeric(above)) { cat("Error: Censoring points must be numeric, which includes +-Inf.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if (below >= above) { cat("Error: Truncation points are logically inconsistent.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } # convert infinite values to finite approximations if(is.infinite(below)) below <- .Machine$double.xmax*-1 if(is.infinite(above)) above <- .Machine$double.xmax # seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] # form response and model matrices holder <- parse.formula(formula, data=data) Y <- holder[[1]] X <- holder[[2]] xnames <- holder[[3]] K <- ncol(X) # number of covariates # starting values and priors beta.start <- coef.start(beta.start, K, formula, family=gaussian, data) mvn.prior <- form.mvn.prior(b0, B0, K) b0 <- mvn.prior[[1]] B0 <- mvn.prior[[2]] check.ig.prior(c0, d0) # define holder for posterior sample sample <- matrix(data=0, mcmc/thin, K+1) posterior <- NULL # call C++ code to draw sample auto.Scythe.call(output.object="posterior", cc.fun.name="MCMCtobit", sample.nonconst=sample, Y=Y, X=X, below=as.double(below), above=as.double(above), burnin=as.integer(burnin), mcmc=as.integer(mcmc), thin=as.integer(thin), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), verbose=as.integer(verbose), betastart=beta.start, b0=b0, B0=B0, c0=as.double(c0), d0=as.double(d0)) # pull together matrix and build MCMC object to return output <- form.mcmc.object(posterior, names=c(xnames, "sigma2"), title="MCMCtobit Posterior Sample") return(output) } MCMCpack/R/MCMCSVDreg.R0000644000176000001440000001505112133644103013775 0ustar ripleyusers########################################################################## ## MCMCSVDreg.R samples from the posterior distribution of a Gaussian ## linear regression model in which the X matrix has been decomposed ## with an SVD. Useful for prediction when number of columns of X ## is (possibly much) greater than the number of rows of X. ## ## See West, Mike. 2000. "Bayesian Regression in the 'Large p, Small n' ## Paradigm." Duke ISDS Discussion Paper 2000-22. ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## KQ 9/9/2005 ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## parse.formula.SVDreg <- function(formula, data, intercept){ ## extract Y, X, and variable names for model formula and frame mt <- terms(formula, data=data) if(missing(data)) data <- sys.frame(sys.parent()) mf <- match.call(expand.dots = FALSE) mf$intercept <- NULL mf$drop.unused.levels <- TRUE mf$na.action <- na.pass ## for specialty handling of missing data mf[[1]] <- as.name("model.frame") mf <- eval(mf, sys.frame(sys.parent())) if (!intercept){ attributes(mt)$intercept <- 0 } ## null model support X <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) X <- as.matrix(X) # X matrix Y <- as.matrix(model.response(mf, "numeric")) # Y matrix ## delete obs that are missing in X but potentially keep obs that are ## missing Y ## These are used to for the full SVD of X' keep.indic <- apply(is.na(X), 1, sum) == 0 Y.full <- as.matrix(Y[keep.indic,]) X.full <- X[keep.indic,] xvars.full <- dimnames(X.full)[[2]] # X variable names xobs.full <- dimnames(X.full)[[1]] # X observation names return(list(Y.full, X.full, xvars.full, xobs.full)) } "MCMCSVDreg" <- function(formula, data=NULL, burnin = 1000, mcmc = 10000, thin=1, verbose = 0, seed = NA, tau2.start = 1, g0 = 0, a0 = 0.001, b0 = 0.001, c0=2, d0=2, w0=1, beta.samp=FALSE, intercept=TRUE, ...) { # checks check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) # seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## form response and model matrices holder <- parse.formula.SVDreg(formula, data, intercept) Y <- holder[[1]] X <- holder[[2]] xnames <- holder[[3]] obsnames <- holder[[4]] K <- ncol(X) # number of covariates in unidentified model N <- nrow(X) # number of obs (including obs for which predictions # are required) N is also the length of gamma Y.miss.indic <- as.numeric(is.na(Y)) n.miss.Y <- sum(Y.miss.indic) Y[is.na(Y)] <- mean(Y, na.rm=TRUE) ## create SVD representation of t(X) svd.out <- svd(t(X)) # t(X) = A %*% D %*% F A <- svd.out$u D <- diag(svd.out$d) F <- t(svd.out$v) ## starting values and priors if (length(tau2.start) < N){ tau2.start <- rep(tau2.start, length.out=N) } tau2.start <- matrix(tau2.start, N, 1) mvn.prior <- form.mvn.prior(g0, 0, N) g0 <- mvn.prior[[1]] c0 <- rep(c0, length.out=N) d0 <- rep(d0, length.out=N) check.ig.prior(a0, b0) for (i in 1:N){ check.ig.prior(c0[i], d0[i]) } w0 <- rep(w0, length.out=N) if (min(w0) < 0 | max(w0) > 1){ cat("Element(s) of w0 not in [0, 1].\n") stop("Please respecify and call MCMCSVDreg again.\n") } ## define holder for posterior sample if (beta.samp){ sample <- matrix(data=0, mcmc/thin, n.miss.Y + 2*N + 1 + K) } else{ sample <- matrix(data=0, mcmc/thin, n.miss.Y + 2*N + 1) } ## call C++ code to draw sample posterior <- .C("MCMCSVDreg", sampledata = as.double(sample), samplerow = as.integer(nrow(sample)), samplecol = as.integer(ncol(sample)), Y = as.double(Y), Yrow = as.integer(nrow(Y)), Ycol = as.integer(ncol(Y)), Ymiss = as.integer(Y.miss.indic), A = as.double(A), Arow = as.integer(nrow(A)), Acol = as.integer(ncol(A)), D = as.double(D), Drow = as.integer(nrow(D)), Dcol = as.integer(ncol(D)), F = as.double(F), Frow = as.integer(nrow(F)), Fcol = as.integer(ncol(F)), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), verbose = as.integer(verbose), tau2.start = as.double(tau2.start), tau2row = as.integer(nrow(tau2.start)), tau2col = as.integer(ncol(tau2.start)), g0 = as.double(g0), g0row = as.integer(nrow(g0)), g0col = as.integer(ncol(g0)), a0 = as.double(a0), b0 = as.double(b0), c0 = as.double(c0), d0 = as.double(d0), w0 = as.double(w0), betasamp = as.integer(beta.samp), PACKAGE="MCMCpack" ) ## pull together matrix and build MCMC object to return Y.miss.names <- NULL if (sum(Y.miss.indic) > 0){ Y.miss.names <- paste("y", obsnames[Y.miss.indic==1], sep=".") } gamma.names <- paste("gamma", 1:N, sep=".") tau2.names <- paste("tau^2", 1:N, sep=".") beta.names <- paste("beta", xnames, sep=".") if (beta.samp){ output <- form.mcmc.object(posterior, names=c(Y.miss.names, gamma.names, tau2.names, "sigma^2", beta.names), title="MCMCSVDreg Posterior Sample") } else{ output <- form.mcmc.object(posterior, names=c(Y.miss.names, gamma.names, tau2.names, "sigma^2"), title="MCMCSVDreg Posterior Sample") } return(output) } MCMCpack/R/MCMCresidualBreakAnalysis.R0000644000176000001440000001153712133644103017131 0ustar ripleyusers######################################################### ## residual break analysis ## JHP 07/01/2007 ## JHP 03/03/2009 ######################################################### "MCMCresidualBreakAnalysis"<- function(resid, m = 1, b0 = 0, B0 = 0.001, c0 = 0.1, d0 = 0.1, a = NULL, b = NULL, mcmc = 1000, burnin = 1000, thin = 1, verbose = 0, seed = NA, beta.start = NA, P.start = NA, marginal.likelihood = c("none", "Chib95"), ...){ ## form response and model matrices y <- as.matrix(resid, ,1) n <- nrow(y) ns <- m + 1 # number of states ## check iteration parameters check.mcmc.parameters(burnin, mcmc, thin) totiter <- mcmc + burnin cl <- match.call() nstore <- mcmc/thin ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] if(!is.na(seed)) set.seed(seed) ## get marginal likelihood argument marginal.likelihood <- match.arg(marginal.likelihood) ## following MCMCregress, set chib as binary logmarglike <- loglike <- NULL chib <- 0 if (marginal.likelihood == "Chib95"){ chib <- 1 } ## initial values if(m == 0){ output <- MCMCregress(y~1, mcmc=mcmc, burnin=burnin, verbose=verbose, thin=thin, b0=b0, B0=B0, c0=c0, d0=d0, seed = seed, beta.start=beta.start, marginal.likelihood = marginal.likelihood) attr(output, "y") <- y } else{ ## prior A0 <- trans.mat.prior(m=m, n=n, a=a, b=b) Pstart <- check.P(P.start, m, a=a, b=b) betastart <- rep(mean(y), ns) Sigmastart <- rep(var(y), ns) statestart <- sort(sample(1:ns, n, replace=T)) ## call C++ code to draw sample posterior <- .C("MCMCresidualBreakAnalysis", betaout = as.double(rep(0.0, nstore*ns)), Sigmaout = as.double(rep(0.0, nstore*ns)), psout = as.double(rep(0.0, n*ns)), Ydata = as.double(y), Yrow = as.integer(nrow(y)), Ycol = as.integer(ncol(y)), m = as.integer(m), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), verbose = as.integer(verbose), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), betastart = as.double(betastart), Sigmastart = as.double(Sigmastart), Pstart = as.double(Pstart), statestart = as.integer(statestart), a = as.double(a), b = as.double(b), b0data = as.double(b0), B0data = as.double(B0), c0 = as.double(c0), d0 = as.double(d0), A0data = as.double(A0), logmarglikeholder = as.double(0.0), loglikeholder = as.double(0.0), chib = as.integer(chib)) ## get marginal likelihood if Chib95 if (marginal.likelihood == "Chib95"){ logmarglike <- posterior$logmarglikeholder loglike <- posterior$loglikeholder } ## pull together matrix and build MCMC object to return beta.holder <- matrix(posterior$betaout, nstore, ns) Sigma.holder <- matrix(posterior$Sigmaout, nstore, ns) ps.holder <- matrix(posterior$psout, n, ) output1 <- mcmc(data=beta.holder, start=burnin+1, end=burnin + mcmc, thin=thin) varnames(output1) <- sapply(c(1:ns), function(i){ paste("mu_regime", i, sep = "") }) output2 <- mcmc(data=Sigma.holder, start=burnin+1, end=burnin + mcmc, thin=thin) varnames(output2) <- sapply(c(1:ns), function(i){ paste("sigma2_regime", i, sep = "") }) output <- as.mcmc(cbind(output1, output2)) attr(output, "title") <- "MCMCresidualBreakAnalysis Posterior Sample" attr(output, "formula") <- formula attr(output, "y") <- y attr(output, "m") <- m attr(output, "call") <- cl attr(output, "prob.state") <- ps.holder/nstore attr(output, "logmarglike") <- logmarglike attr(output, "loglike") <- loglike } return(output) }## end of MCMC function MCMCpack/R/MCMCregressChange.R0000644000176000001440000001525312133644103015427 0ustar ripleyusers######################################################### ## sample from the posterior distribution ## of a linear Gaussian model with multiple changepoints ## using linked C++ code in Scythe ## ## JHP 07/01/2007 ## JHP 03/03/2009 ######################################################### "MCMCregressChange"<- function(formula, data=parent.frame(), m = 1, b0 = 0, B0 = 0, c0 = 0.001, d0 = 0.001, sigma.mu = NA, sigma.var = NA, a = NULL, b = NULL, mcmc = 1000, burnin = 1000, thin = 1, verbose = 0, seed = NA, beta.start = NA, P.start = NA, marginal.likelihood = c("none", "Chib95"), ...){ ## form response and model matrices holder <- parse.formula(formula, data) y <- holder[[1]] X <- holder[[2]] xnames <- holder[[3]] k <- ncol(X) # number of covariates n <- length(y) ns <- m + 1 # number of states ## check iteration parameters check.mcmc.parameters(burnin, mcmc, thin) totiter <- mcmc + burnin cl <- match.call() nstore <- mcmc/thin ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] if(!is.na(seed)) set.seed(seed) ## prior mvn.prior <- form.mvn.prior(b0, B0, k) b0 <- mvn.prior[[1]] B0 <- mvn.prior[[2]] if (is.na(sigma.mu)|is.na(sigma.var)) { check.ig.prior(c0, d0) } else { c0 <- 4 + 2 *(sigma.mu^2/sigma.var) d0 <- 2*sigma.mu *(c0/2 - 1) } ## get marginal likelihood argument marginal.likelihood <- match.arg(marginal.likelihood) ## following MCMCregress, set chib as binary logmarglike <- loglik <- NULL chib <- 0 if (marginal.likelihood == "Chib95"){ chib <- 1 } if (m == 0){ output <- MCMCregress(formula, burnin = burnin, mcmc = mcmc, thin = thin, verbose = verbose, b0 = b0, B0 = B0, c0 =c0, d0=d0, marginal.likelihood = "Chib95") attr(output, "y") <- y attr(output, "m") <- m } else{ if(k == 1){ output <- MCMCresidualBreakAnalysis(y, data=data, m = m, b0 = b0, B0 = B0, c0 = c0, d0 = d0, a = a, b = b, burnin = burnin, mcmc = mcmc, thin = thin, verbose = verbose, seed = seed, beta.start = beta.start, P.start = P.start, marginal.likelihood = marginal.likelihood) } else{ ## initial values Pstart <- check.P(P.start, m, a=a, b=b) A0 <- trans.mat.prior(m=m, n=n, a=a, b=b) betastart <- beta.change.start(beta.start, ns, k, formula, family=gaussian, data) ols <- lm(y~X-1) Sigmastart <- rep(summary(ols)$sigma^2, ns) statestart <- sort(sample(1:ns, n, replace=T)) ## call C++ code to draw sample posterior <- .C("MCMCregressChange", betaout = as.double(rep(0.0, nstore*ns*k)), Sigmaout = as.double(rep(0.0, nstore*ns)), Pout = as.double(rep(0.0, nstore*ns*ns)), psout = as.double(rep(0.0, n*ns)), sout = as.double(rep(0.0, nstore*n)), Ydata = as.double(y), Yrow = as.integer(nrow(y)), Ycol = as.integer(ncol(y)), Xdata = as.double(X), Xrow = as.integer(nrow(X)), Xcol = as.integer(ncol(X)), m = as.integer(m), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), verbose = as.integer(verbose), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), betastart = as.double(betastart), Sigmastart = as.double(Sigmastart), Pstart = as.double(Pstart), statestart = as.integer(statestart), a = as.double(a), b = as.double(b), b0data = as.double(b0), B0data = as.double(B0), c0 = as.double(c0), d0 = as.double(d0), A0data = as.double(A0), logmarglikeholder = as.double(0.0), loglikeholder = as.double(0.0), chib = as.integer(chib)) ## get marginal likelihood if Chib95 if (marginal.likelihood == "Chib95"){ logmarglike <- posterior$logmarglikeholder loglike <- posterior$loglikeholder } ## pull together matrix and build MCMC object to return beta.holder <- matrix(posterior$betaout, nstore, ns*k) Sigma.holder <- matrix(posterior$Sigmaout, nstore, ns) P.holder <- matrix(posterior$Pout, nstore, ) s.holder <- matrix(posterior$sout, nstore, ) ps.holder <- matrix(posterior$psout, n, ) output1 <- mcmc(data=beta.holder, start=burnin+1, end=burnin + mcmc, thin=thin) varnames(output1) <- sapply(c(1:ns), function(i){ paste(c(xnames), "_regime", i, sep = "") }) output2 <- mcmc(data=Sigma.holder, start=burnin+1, end=burnin + mcmc, thin=thin) varnames(output2) <- sapply(c(1:ns), function(i){ paste(c("sigma2"), "_regime", i, sep = "") }) output <- as.mcmc(cbind(output1, output2)) attr(output, "title") <- "MCMCregressChange Posterior Sample" attr(output, "formula") <- formula attr(output, "y") <- y attr(output, "X") <- X attr(output, "m") <- m attr(output, "call") <- cl attr(output, "logmarglike") <- logmarglike attr(output, "loglik") <- loglik attr(output, "prob.state") <- ps.holder/nstore attr(output, "s.store") <- s.holder } } return(output) }## end of MCMC function MCMCpack/R/MCMCregress.R0000644000176000001440000001110312133644103014307 0ustar ripleyusers########################################################################## ## MCMCregress.R samples from the posterior distribution of a Gaussian ## linear regression model in R using linked C++ code in Scythe ## ## Original written by ADM and KQ 5/21/2002 ## Updated with helper functions ADM 5/28/2004 ## Modified to meet new developer specification 6/18/2004 KQ ## Modified for new Scythe and rngs 7/22/2004 ADM ## Modified to handle marginal likelihood calculation 1/26/2006 KQ ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMCregress" <- function(formula, data=NULL, burnin = 1000, mcmc = 10000, thin=1, verbose = 0, seed = NA, beta.start = NA, b0 = 0, B0 = 0, c0 = 0.001, d0 = 0.001, sigma.mu = NA, sigma.var = NA, marginal.likelihood = c("none", "Laplace", "Chib95"), ...) { ## checks check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) cl <- match.call() ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## form response and model matrices holder <- parse.formula(formula, data=data) Y <- holder[[1]] X <- holder[[2]] xnames <- holder[[3]] K <- ncol(X) # number of covariates ## starting values and priors beta.start <- coef.start(beta.start, K, formula, family=gaussian, data) mvn.prior <- form.mvn.prior(b0, B0, K) b0 <- mvn.prior[[1]] B0 <- mvn.prior[[2]] if (is.na(sigma.mu)|is.na(sigma.var)) { check.ig.prior(c0, d0) } else { c0 <- 4 + 2 *(sigma.mu^2/sigma.var) d0 <- 2*sigma.mu *(c0/2 - 1) } ## get marginal likelihood argument marginal.likelihood <- match.arg(marginal.likelihood) B0.eigenvalues <- eigen(B0)$values if (min(B0.eigenvalues) < 0){ stop("B0 is not positive semi-definite.\nPlease respecify and call again.\n") } if (isTRUE(all.equal(min(B0.eigenvalues), 0))){ if (marginal.likelihood != "none"){ warning("Cannot calculate marginal likelihood with improper prior\n") marginal.likelihood <- "none" } } logmarglike <- NULL chib <- 0 if (marginal.likelihood == "Chib95"){ chib <- 1 } ## define holder for posterior sample sample <- matrix(data=0, mcmc/thin, K+1) posterior <- NULL ## call C++ code to draw sample auto.Scythe.call(output.object="posterior", cc.fun.name="MCMCregress", sample.nonconst=sample, Y=Y, X=X, burnin=as.integer(burnin), mcmc=as.integer(mcmc), thin=as.integer(thin), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), verbose=as.integer(verbose), betastart=beta.start, b0=b0, B0=B0, c0=as.double(c0), d0=as.double(d0), logmarglikeholder.nonconst=as.double(0.0), chib=as.integer(chib)) ## get marginal likelihood if Chib95 if (marginal.likelihood == "Chib95"){ logmarglike <- posterior$logmarglikeholder } ## marginal likelihood calculation if Laplace if (marginal.likelihood == "Laplace"){ theta.start <- c(beta.start, log(0.5*var(Y))) optim.out <- optim(theta.start, logpost.regress, method="BFGS", control=list(fnscale=-1), hessian=TRUE, y=Y, X=X, b0=b0, B0=B0, c0=c0, d0=d0) theta.tilde <- optim.out$par beta.tilde <- theta.tilde[1:K] sigma2.tilde <- exp(theta.tilde[K+1]) Sigma.tilde <- solve(-1*optim.out$hessian) logmarglike <- (length(theta.tilde)/2)*log(2*pi) + log(sqrt(det(Sigma.tilde))) + logpost.regress(theta.tilde, Y, X, b0, B0, c0, d0) } ## pull together matrix and build MCMC object to return output <- form.mcmc.object(posterior, names=c(xnames, "sigma2"), title="MCMCregress Posterior Sample", y=Y, call=cl, logmarglike=logmarglike ) return(output) } MCMCpack/R/MCMCquantreg.R0000644000176000001440000000442412133644103014473 0ustar ripleyusers"MCMCquantreg" <- function(formula, data=NULL, tau=0.5, burnin = 1000, mcmc = 10000, thin=1, verbose = 0, seed = sample(1:1000000,1), beta.start = NA, b0 = 0, B0 = 0, ...) { ## checks check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) cl <- match.call() if (tau<=0 || tau>=1){ stop("tau must be in (0,1). \nPlease respecify and call again.\n") } ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## form response and model matrices holder <- parse.formula(formula, data=data) Y <- holder[[1]] X <- holder[[2]] xnames <- holder[[3]] K <- ncol(X) # number of covariates ## starting values and priors ols.fit <- lm(formula, data=data) defaults <- matrix(coef(ols.fit),K,1) defaults[1] <- defaults[1]+summary(ols.fit)$sigma*qnorm(tau) beta.start <- coef.start(beta.start, K, formula, family=gaussian, data, defaults=defaults) mvn.prior <- form.mvn.prior(b0, B0, K) b0 <- mvn.prior[[1]] B0 <- mvn.prior[[2]] B0.eigenvalues <- eigen(B0)$values if (min(B0.eigenvalues) < 0){ stop("B0 is not positive semi-definite.\nPlease respecify and call again.\n") } ## define holder for posterior sample sample <- matrix(data=0, mcmc/thin, K) posterior <- NULL ## call C++ code to draw samples auto.Scythe.call(output.object="posterior", cc.fun.name="MCMCquantreg", sample.nonconst=sample, tau=as.double(tau), Y=Y, X=X, burnin=as.integer(burnin), mcmc=as.integer(mcmc), thin=as.integer(thin), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), verbose=as.integer(verbose), betastart=beta.start, b0=b0, B0=B0, package="MCMCpack") ## pull together matrix and build MCMC object to return output <- form.mcmc.object(posterior, names=xnames, title="MCMCquantreg Posterior Sample", y=Y, call=cl) return(output) } MCMCpack/R/MCMCprobitChange.R0000644000176000001440000001171512133644103015253 0ustar ripleyusers######################################################### ## ## sample from the posterior distribution ## of a probit regression model with multiple changepoints ## ## JHP 07/01/2007 ## JHP 03/03/2009 ######################################################### "MCMCprobitChange"<- function(formula, data=parent.frame(), m = 1, burnin = 10000, mcmc = 10000, thin = 1, verbose = 0, seed = NA, beta.start = NA, P.start = NA, b0 = NULL, B0 = NULL, a = NULL, b = NULL, marginal.likelihood = c("none", "Chib95"), ...){ ## form response and model matrices holder <- parse.formula(formula, data) y <- holder[[1]] X <- holder[[2]] xnames <- holder[[3]] k <- ncol(X) n <- length(y) ns <- m + 1 ## check iteration parameters check.mcmc.parameters(burnin, mcmc, thin) totiter <- mcmc + burnin cl <- match.call() nstore <- mcmc/thin ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] if(!is.na(seed)) set.seed(seed) ## prior mvn.prior <- form.mvn.prior(b0, B0, k) b0 <- mvn.prior[[1]] B0 <- mvn.prior[[2]] chib <- 0 if (marginal.likelihood == "Chib95"){ chib <- 1 } if (m == 0){ if (marginal.likelihood == "Chib95"){ output <- MCMCprobit(formula=Y~X-1, burnin = burnin, mcmc = mcmc, thin = thin, verbose = verbose, b0 = b0, B0 = B0, marginal.likelihood = "Laplace") cat("\n Chib95 method is not yet available for m = 0. Laplace method is used instead.") } else { output <- MCMCprobit(formula=Y~X-1, burnin = burnin, mcmc = mcmc, thin = thin, verbose = verbose, b0 = b0, B0 = B0) } attr(output, "y") <- y } else{ A0 <- trans.mat.prior(m=m, n=n, a=a, b=b) Pstart <- check.P(P.start, m, a=a, b=b) betastart <- beta.change.start(beta.start, ns, k, formula, family=binomial, data) ## call C++ code to draw sample posterior <- .C("MCMCprobitChange", betaout = as.double(rep(0.0, nstore*ns*k)), Pout = as.double(rep(0.0, nstore*ns*ns)), psout = as.double(rep(0.0, n*ns)), sout = as.double(rep(0.0, nstore*n)), Ydata = as.double(y), Yrow = as.integer(nrow(y)), Ycol = as.integer(ncol(y)), Xdata = as.double(X), Xrow = as.integer(nrow(X)), Xcol = as.integer(ncol(X)), m = as.integer(m), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), verbose = as.integer(verbose), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), betastart = as.double(betastart), Pstart = as.double(Pstart), a = as.double(a), b = as.double(b), b0data = as.double(b0), B0data = as.double(B0), A0data = as.double(A0), logmarglikeholder = as.double(0.0), loglikeholder = as.double(0.0), chib = as.integer(chib)) ## get marginal likelihood if Chib95 if (chib==1){ logmarglike <- posterior$logmarglikeholder loglike <- posterior$loglikeholder } else{ logmarglike <- loglike <- 0 } ## pull together matrix and build MCMC object to return beta.holder <- matrix(posterior$betaout, nstore, ns*k) P.holder <- matrix(posterior$Pout, nstore, ) s.holder <- matrix(posterior$sout, nstore, ) ps.holder <- matrix(posterior$psout, n, ) output <- mcmc(data=beta.holder, start=burnin+1, end=burnin + mcmc, thin=thin) varnames(output) <- sapply(c(1:ns), function(i){ paste(c(xnames), "_regime", i, sep = "") }) attr(output, "title") <- "MCMCprobitChange Posterior Sample" attr(output, "formula") <- formula attr(output, "y") <- y attr(output, "X") <- X attr(output, "m") <- m attr(output, "call") <- cl attr(output, "logmarglike") <- logmarglike attr(output, "loglike") <- loglike attr(output, "prob.state") <- ps.holder/nstore attr(output, "s.store") <- s.holder } return(output) }## end of MCMC function MCMCpack/R/MCMCprobit.R0000644000176000001440000001546112133644103014147 0ustar ripleyusers########################################################################## ## sample from the posterior distribution of a probit ## model in R using linked C++ code in Scythe ## ## ADM and KQ 5/21/2002 ## Modified to meet new developer specification 7/26/2004 KQ ## Modified for new Scythe and rngs 7/26/2004 KQ ## Modified to handle marginal likelihood calculation 1/27/2006 KQ ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMCprobit" <- function(formula, data=NULL, burnin = 1000, mcmc = 10000, thin = 1, verbose = 0, seed = NA, beta.start = NA, b0 = 0, B0 = 0, bayes.resid=FALSE, marginal.likelihood = c("none", "Laplace", "Chib95"), ...) { ## checks check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) cl <- match.call() ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## form response and model matrices holder <- parse.formula(formula, data=data) Y <- holder[[1]] X <- holder[[2]] xnames <- holder[[3]] K <- ncol(X) # number of covariates ## starting values and priors beta.start <- coef.start(beta.start, K, formula, family=binomial(link=probit), data) mvn.prior <- form.mvn.prior(b0, B0, K) b0 <- mvn.prior[[1]] B0 <- mvn.prior[[2]] ## get marginal likelihood argument marginal.likelihood <- match.arg(marginal.likelihood) B0.eigenvalues <- eigen(B0)$values if (min(B0.eigenvalues) < 0){ stop("B0 is not positive semi-definite.\nPlease respecify and call again.\n") } if (isTRUE(all.equal(min(B0.eigenvalues), 0))){ if (marginal.likelihood != "none"){ warning("Cannot calculate marginal likelihood with improper prior\n") marginal.likelihood <- "none" } } logmarglike <- NULL ## residuals setup resvec <- NULL if (is.logical(bayes.resid) && bayes.resid==TRUE){ resvec <- matrix(1:length(Y), length(Y), 1) } else if (!is.logical(bayes.resid)){ resvec <- matrix(bayes.resid, length(bayes.resid), 1) if (min(resvec %in% 1:length(Y)) == 0){ cat("Elements of bayes.resid are not valid row numbers.\n") stop("Check data and call MCMCprobit() again.\n") } } ## y \in {0, 1} error checking if (sum(Y!=0 & Y!=1) > 0) { cat("Elements of Y equal to something other than 0 or 1.\n") stop("Check data and call MCMCprobit() again.\n") } ## if Chib95 is true chib <- 0 if (marginal.likelihood == "Chib95"){ chib <- 1 } posterior <- NULL if (is.null(resvec)){ ## define holder for posterior density sample sample <- matrix(data=0, mcmc/thin, dim(X)[2] ) ## call C++ code to draw sample auto.Scythe.call(output.object="posterior", cc.fun.name="MCMCprobit", sample.nonconst=sample, Y=Y, X=X, burnin=as.integer(burnin), mcmc=as.integer(mcmc), thin=as.integer(thin), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), verbose=as.integer(verbose), betastart=beta.start, b0=b0, B0=B0, logmarglikeholder.nonconst = as.double(0.0), chib = as.integer(chib)) ## get marginal likelihood if Chib95 if (marginal.likelihood == "Chib95"){ logmarglike <- posterior$logmarglikeholder } ## marginal likelihood calculation if Laplace if (marginal.likelihood == "Laplace"){ theta.start <- beta.start optim.out <- optim(theta.start, logpost.probit, method="BFGS", control=list(fnscale=-1), hessian=TRUE, y=Y, X=X, b0=b0, B0=B0) theta.tilde <- optim.out$par beta.tilde <- theta.tilde[1:K] Sigma.tilde <- solve(-1*optim.out$hessian) logmarglike <- (length(theta.tilde)/2)*log(2*pi) + log(sqrt(det(Sigma.tilde))) + logpost.probit(theta.tilde, Y, X, b0, B0) } ## put together matrix and build MCMC object to return output <- form.mcmc.object(posterior, names=xnames, title="MCMCprobit Posterior Sample", y=Y, call=cl, logmarglike=logmarglike) } else{ # define holder for posterior density sample sample <- matrix(data=0, mcmc/thin, dim(X)[2]+length(resvec) ) ## call C++ code to draw sample auto.Scythe.call(output.object="posterior", cc.fun.name="MCMCprobitres", sample.nonconst=sample, Y=Y, X=X, resvec=resvec, burnin=as.integer(burnin), mcmc=as.integer(mcmc), thin=as.integer(thin), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), verbose=as.integer(verbose), betastart=beta.start, b0=b0, B0=B0, logmarglikeholder.nonconst= as.double(0.0), chib = as.integer(chib)) ## get marginal likelihood if Chib95 if (marginal.likelihood == "Chib95"){ logmarglike <- posterior$logmarglikeholder } ## marginal likelihood calculation if Laplace if (marginal.likelihood == "Laplace"){ theta.start <- beta.start optim.out <- optim(theta.start, logpost.probit, method="BFGS", control=list(fnscale=-1), hessian=TRUE, y=Y, X=X, b0=b0, B0=B0) theta.tilde <- optim.out$par beta.tilde <- theta.tilde[1:K] Sigma.tilde <- solve(-1*optim.out$hessian) logmarglike <- (length(theta.tilde)/2)*log(2*pi) + log(sqrt(det(Sigma.tilde))) + logpost.probit(theta.tilde, Y, X, b0, B0) } ## put together matrix and build MCMC object to return xnames <- c(xnames, paste("epsilonstar", as.character(resvec), sep="") ) output <- form.mcmc.object(posterior, names=xnames, title="MCMCprobit Posterior Sample", y=Y, call=cl, logmarglike=logmarglike) } return(output) } MCMCpack/R/MCMCpoissonChange.R0000644000176000001440000001566312140060635015454 0ustar ripleyusers################################ ## Poisson Changepoint Model ## ## 07/14/2009 Jong Hee Park ################################ "MCMCpoissonChange"<- function(formula, data = parent.frame(), m = 1, b0 = 0, B0 = 1, a = NULL, b = NULL, c0 = NA, d0 = NA, lambda.mu = NA, lambda.var = NA, burnin = 1000, mcmc = 1000, thin = 1, verbose = 0, seed = NA, beta.start = NA, P.start = NA, ## offset = NA, marginal.likelihood = c("none", "Chib95"), ...) { ## form response and model matrices holder <- parse.formula(formula, data) y <- holder[[1]] X <- holder[[2]] xnames <- holder[[3]] k <- ncol(X) n <- length(y) n.arrival<- y + 1 NT <- max(n.arrival) tot.comp <- n + sum(y) ns <- m + 1 ## check iteration parameters check.mcmc.parameters(burnin, mcmc, thin) totiter <- mcmc + burnin cl <- match.call() nstore <- mcmc/thin ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] if(!is.na(seed)) set.seed(seed) if (k==1){ if (!is.na(lambda.mu) && !is.na(lambda.var)) { c0 <- lambda.mu^2/lambda.var d0 <- lambda.mu/lambda.var } if ((is.na(c0)||is.na(d0))&&((is.na(lambda.mu)||is.na(lambda.var)))){ stop("You have to provide a prior for lambda (c0 and d0 or lambda.mu and lambda.var) when there is no covariate.\n") } } else{ c0 <- d0 <- 0 mvn.prior <- form.mvn.prior(b0, B0, k) b0 <- mvn.prior[[1]] B0 <- mvn.prior[[2]] } ## get marginal likelihood argument marginal.likelihood <- match.arg(marginal.likelihood) logmarglike <- NULL chib <- 0 if (marginal.likelihood == "Chib95"){ chib <- 1 } if (m == 0){ if (marginal.likelihood == "Chib95"){ if (is.na(b0)||is.na(B0)) stop("You have to have a prior for beta (b0 and B0) when m = 0.\n") else{ output <- MCMCpoisson(formula, burnin = burnin, mcmc = mcmc, thin = thin, verbose = verbose, b0 = b0, B0 = B0, marginal.likelihood = "Laplace") cat("\n Chib95 method is not yet available for m = 0. Laplace method is used instead.") } } else { output <- MCMCpoisson(formula, burnin = burnin, mcmc = mcmc, thin = thin, verbose = verbose, b0 = b0, B0 = B0) } } else { ## prior A0 <- trans.mat.prior(m=m, n=n, a=a, b=b) ## get initial values of tau from observed y Pstart <- check.P(P.start, m, a=a, b=b) betastart <- beta.change.start(beta.start, ns, k, formula, family=poisson, data) if (k == 1){ betastart <- exp(betastart) } taustart <- tau.initial(y, tot.comp) componentstart <- round(runif(tot.comp, 1, 5)) ## if(is.na(offset)){ ## logoffset <- rep(0, length(y)) ## } ## else{ ## if(length(offset) == length(y)){ ## logoffset <- log(offset) ## } ## else{ ## stop("\n The length of offset is not matched with y.") ## } ## } ## print(offset) ## normal mixture weights wr <- c(0.2294, 0.2590, 0.2480, 0.1525, 0.0472) mr <- c(0.0982, -1.5320, -0.7433, 0.8303, -3.1428) sr <- sqrt(c(0.2401, 1.1872, 0.3782, 0.1920, 3.2375)) ## call C++ code to draw sample posterior <- .C("MCMCpoissonChange", betaout = as.double(rep(0.0, nstore*ns*k)), Pout = as.double(rep(0.0, nstore*ns*ns)), psout = as.double(rep(0.0, n*ns)), sout = as.double(rep(0.0, nstore*n)), Ydata = as.double(y), Yrow = as.integer(nrow(y)), Ycol = as.integer(ncol(y)), Xdata = as.double(X), Xrow = as.integer(nrow(X)), Xcol = as.integer(ncol(X)), ## logoffset = as.double(logoffset), m = as.integer(m), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), verbose = as.integer(verbose), betastart = as.double(betastart), Pstart = as.double(Pstart), taustart = as.double(taustart), componentstart = as.double(componentstart), a = as.double(a), b = as.double(b), c0 = as.double(c0), d0 = as.double(d0), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), b0data = as.double(b0), B0data = as.double(B0), A0data = as.double(A0), logmarglikeholder = as.double(0.0), loglikeholder = as.double(0.0), wrin = as.double(wr), mrin = as.double(mr), srin = as.double(sr), chib = as.integer(chib), PACKAGE="MCMCpack") ## get marginal likelihood if Chib95 if (marginal.likelihood == "Chib95"){ logmarglike <- posterior$logmarglikeholder loglike <- posterior$loglikeholder } else { logmarglike <- 0 loglike <- 0 } ## pull together matrix and build MCMC object to return beta.holder <- matrix(posterior$betaout, nstore, ) P.holder <- matrix(posterior$Pout, nstore, ) s.holder <- matrix(posterior$sout, nstore, ) ps.holder <- matrix(posterior$psout, n, ) output <- mcmc(data=beta.holder, start=burnin+1, end=burnin + mcmc, thin=thin) varnames(output) <- sapply(c(1:ns), function(i) { paste(xnames, "_regime", i, sep = "")}) attr(output, "title") <- "MCMCpoissonChange Posterior Sample" attr(output, "formula") <- formula attr(output, "y") <- y attr(output, "X") <- X attr(output, "m") <- m attr(output, "call") <- cl attr(output, "logmarglike") <- logmarglike attr(output, "loglike") <- loglike attr(output, "prob.state") <- ps.holder/nstore attr(output, "s.store") <- s.holder attr(output, "P.store") <- P.holder } return(output) } MCMCpack/R/MCMCpoisson.R0000644000176000001440000001004612133644103014334 0ustar ripleyusers########################################################################## ## sample from the posterior distribution of a Poisson regression ## model in R using linked C++ code in Scythe ## ## ADM 1/24/2003 ## KQ 3/17/2003 [bug fix] ## Modified to meet new developer specification 7/15/2004 KQ ## Modified for new Scythe and rngs 7/26/2004 KQ ## Modified to handle marginal likelihood calculation 1/27/2006 KQ ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMCpoisson" <- function(formula, data=NULL, burnin = 1000, mcmc = 10000, thin=1, tune=1.1, verbose = 0, seed = NA, beta.start = NA, b0 = 0, B0 = 0, marginal.likelihood = c("none", "Laplace"),...) { ## checks check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) cl <- match.call() ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## form response and model matrices holder <- parse.formula(formula, data=data) Y <- holder[[1]] X <- holder[[2]] xnames <- holder[[3]] K <- ncol(X) # number of covariates ## starting values and priors beta.start <- coef.start(beta.start, K, formula, family=poisson, data) mvn.prior <- form.mvn.prior(b0, B0, K) b0 <- mvn.prior[[1]] B0 <- mvn.prior[[2]] ## get marginal likelihood argument marginal.likelihood <- match.arg(marginal.likelihood) B0.eigenvalues <- eigen(B0)$values if (min(B0.eigenvalues) < 0){ stop("B0 is not positive semi-definite.\nPlease respecify and call again.\n") } if (isTRUE(all.equal(min(B0.eigenvalues), 0))){ if (marginal.likelihood != "none"){ warning("Cannot calculate marginal likelihood with improper prior\n") marginal.likelihood <- "none" } } logmarglike <- NULL ## form the tuning parameter tune <- vector.tune(tune, K) V <- vcov(glm(formula=formula, data=data, family=poisson)) ## test y non-negative if (sum(Y < 0) > 0) { cat("\n Elements of Y negative. ") stop("\n Check data and call MCMCpoisson() again. \n") } ## define holder for posterior sample sample <- matrix(data=0, mcmc/thin, dim(X)[2] ) ## marginal likelihood calculation if Laplace if (marginal.likelihood == "Laplace"){ theta.start <- beta.start optim.out <- optim(theta.start, logpost.poisson, method="BFGS", control=list(fnscale=-1), hessian=TRUE, y=Y, X=X, b0=b0, B0=B0) theta.tilde <- optim.out$par beta.tilde <- theta.tilde[1:K] Sigma.tilde <- solve(-1*optim.out$hessian) logmarglike <- (length(theta.tilde)/2)*log(2*pi) + log(sqrt(det(Sigma.tilde))) + logpost.poisson(theta.tilde, Y, X, b0, B0) } posterior <- NULL ## call C++ code to draw sample auto.Scythe.call(output.object="posterior", cc.fun.name="MCMCpoisson", sample.nonconst=sample, Y=Y, X=X, burnin=as.integer(burnin), mcmc=as.integer(mcmc), thin=as.integer(thin), tune=tune, lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), verbose=as.integer(verbose), betastart=beta.start, b0=b0, B0=B0, V=V) ## put together matrix and build MCMC object to return output <- form.mcmc.object(posterior, names=xnames, title="MCMCpoisson Posterior Sample", y=Y, call=cl, logmarglike=logmarglike) return(output) } MCMCpack/R/MCMCordfactanal.R0000644000176000001440000003354012133644103015124 0ustar ripleyusers########################################################################### ## sample from the posterior distribution of a factor analysis model ## model in R using linked C++ code in Scythe. ## ## The model is: ## ## x*_i = \Lambda \phi_i + \epsilon_i, \epsilon_i \sim N(0, I) ## ## \lambda_{ij} \sim N(l0_{ij}, L0^{-1}_{ij}) ## \phi_i \sim N(0,I) ## ## and x*_i is the latent variable formed from the observed ordinal ## variable in the usual (Albert and Chib, 1993) way. ## ## Andrew D. Martin ## Washington University ## ## Kevin M. Quinn ## Harvard University ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## May 12, 2003 ## Revised to accommodate new spec 7/13/2004 ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMCordfactanal" <- function(x, factors, lambda.constraints=list(), data=parent.frame(), burnin = 1000, mcmc = 20000, thin=1, tune=NA, verbose = 0, seed = NA, lambda.start = NA, l0=0, L0=0, store.lambda=TRUE, store.scores=FALSE, drop.constantvars=TRUE, ... ) { ## check for MCMCirtKd special case, this is used to tell the R ## and C++ code what to echo (1 if regular, 2 if MCMCirtKd) ## the test is based on the existence of model="MCMCirtKd" ## passed through ... args <- list(...) if (length(args$model) > 0){ # if model arg is passed if (args$model=="MCMCirtKd"){ case.switch <- 2 echo.name <- "MCMCirtKd" } ## could allow for other possibities here but not clear what they ## would be } else { # if model arg not passed then assume MCMCordfactanal case.switch <- 1 echo.name <- "MCMCordfactanal" } # extract X and variable names from the model formula and frame if (is.matrix(x)){ if (drop.constantvars==TRUE){ x.col.var <- apply(x, 2, var, na.rm=TRUE) keep.inds <- x.col.var>0 keep.inds[is.na(keep.inds)] <- FALSE x <- x[,keep.inds] } X <- as.data.frame(x) xvars <- dimnames(X)[[2]] xobs <- dimnames(X)[[1]] N <- nrow(X) # number of observations K <- ncol(X) # number of manifest variables ncat <- matrix(NA, K, 1) # vector of number of categ. in each man. var. for (i in 1:K){ X[,i] <- factor(X[,i], ordered=TRUE) ncat[i] <- nlevels(X[,i]) X[,i] <- as.integer(X[,i]) X[is.na(X[,i]), i] <- -999 } X <- as.matrix(X) } else { call <- match.call() mt <- terms(x, data=data) if (attr(mt, "response") > 0) stop("Response not allowed in formula in ", echo.name, "().\n") if(missing(data)) data <- sys.frame(sys.parent()) mf <- match.call(expand.dots = FALSE) mf$factors <- mf$lambda.constraints <- mf$burnin <- mf$mcmc <- NULL mf$thin <- mf$tune <- mf$verbose <- mf$seed <- NULL mf$lambda.start <- mf$l0 <- mf$L0 <- NULL mf$store.lambda <- mf$store.scores <- mf$drop.constantvars <- NULL mf$... <- NULL mf$drop.unused.levels <- TRUE mf[[1]] <- as.name("model.frame") mf$na.action <- 'na.pass' mf <- eval(mf, sys.frame(sys.parent())) attributes(mt)$intercept <- 0 Xterm.length <- length(attr(mt, "variables")) X <- subset(mf, select=as.character(attr(mt, "variables"))[2:Xterm.length]) if (drop.constantvars==TRUE){ x.col.var <- apply(X, 2, var, na.rm=TRUE) X <- X[,x.col.var!=0] } N <- nrow(X) # number of observations K <- ncol(X) # number of manifest variables ncat <- matrix(NA, K, 1) # vector of number of categ. in each man. var. for (i in 1:K){ X[,i] <- factor(X[,i], ordered=TRUE) ncat[i] <- nlevels(X[,i]) X[,i] <- as.integer(X[,i]) X[is.na(X[,i]), i] <- -999 } X <- as.matrix(X) xvars <- dimnames(X)[[2]] # X variable names xobs <- dimnames(X)[[1]] # observation names } ## take care of the case where X has no row names if (is.null(xobs)){ xobs <- 1:N } check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) ## setup constraints on Lambda holder <- build.factor.constraints(lambda.constraints, X, K, factors+1) Lambda.eq.constraints <- holder[[1]] Lambda.ineq.constraints <- holder[[2]] X.names <- holder[[3]] ## setup prior on Lambda holder <- form.factload.norm.prior(l0, L0, K, factors+1, X.names) Lambda.prior.mean <- holder[[1]] Lambda.prior.prec <- holder[[2]] if (case.switch==2){# if MCMCirtKD make it a prior on the diff. param. Lambda.prior.mean[,1] <- Lambda.prior.mean[,1] * -1 } # seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## Starting values for Lambda Lambda <- matrix(0, K, factors+1) if (is.na(lambda.start)){# sets Lambda to equality constraints & 0s for (i in 1:K){ for (j in 1:(factors+1)){ if (Lambda.eq.constraints[i,j]==-999){ if(Lambda.ineq.constraints[i,j]==0){ if (j==1){ if (ncat[i] == 2){ probit.out <- glm(as.factor(X[X[,i]!=-999,i])~1, family=binomial(link=probit)) probit.beta <- coef(probit.out) Lambda[i,j] <- probit.beta[1] } if (ncat[i] > 2){ polr.out <- polr(ordered(X[X[,i]!=-999,i])~1) Lambda[i,j] <- -polr.out$zeta[1]*.588 } } } if(Lambda.ineq.constraints[i,j]>0){ Lambda[i,j] <- 1.0 } if(Lambda.ineq.constraints[i,j]<0){ Lambda[i,j] <- -1.0 } } else Lambda[i,j] <- Lambda.eq.constraints[i,j] } } } else if (is.matrix(lambda.start)){ if (nrow(lambda.start)==K && ncol(lambda.start)==(factors+1)) Lambda <- lambda.start else { cat("Starting values not of correct size for model specification.\n") stop("Please respecify and call ", echo.name, "() again\n") } } else if (length(lambda.start)==1 && is.numeric(lambda.start)){ Lambda <- matrix(lambda.start, K, factors+1) for (i in 1:K){ for (j in 1:(factors+1)){ if (Lambda.eq.constraints[i,j] != -999) Lambda[i,j] <- Lambda.eq.constraints[i,j] } } } else { cat("Starting values neither NA, matrix, nor scalar.\n") stop("Please respecify and call ", echo.name, "() again\n") } ## check MH tuning parameter if (is.na(tune)){ tune <- matrix(NA, K, 1) for (i in 1:K){ tune[i] <- 0.05/ncat[i] } } else if (is.double(tune)){ tune <- matrix(tune/ncat, K, 1) } if(min(tune) < 0) { cat("Tuning parameter is negative.\n") stop("Please respecify and call ", echo.name, "() again\n") } ## starting values for gamma (note: not changeable by user) gamma <- matrix(0, max(ncat)+1, K) for (i in 1:K){ if (ncat[i]<=2){ gamma[1,i] <- -300 gamma[2,i] <- 0 gamma[3,i] <- 300 } if(ncat[i] > 2) { polr.out <- polr(ordered(X[X[,i]!=-999,i])~1) gamma[1,i] <- -300 gamma[2,i] <- 0 gamma[3:ncat[i],i] <- (polr.out$zeta[2:(ncat[i]-1)] - polr.out$zeta[1])*.588 gamma[ncat[i]+1,i] <- 300 } } ## define holder for posterior sample if (store.scores == FALSE && store.lambda == FALSE){ sample <- matrix(data=0, mcmc/thin, length(gamma)) } else if (store.scores == TRUE && store.lambda == FALSE){ sample <- matrix(data=0, mcmc/thin, (factors+1)*N + length(gamma)) } else if(store.scores == FALSE && store.lambda == TRUE) { sample <- matrix(data=0, mcmc/thin, K*(factors+1)+length(gamma)) } else { # store.scores==TRUE && store.lambda==TRUE sample <- matrix(data=0, mcmc/thin, K*(factors+1)+(factors+1)*N + length(gamma)) } accepts <- matrix(0, K, 1) ## Call the C++ code to do the real work posterior <- .C("ordfactanalpost", samdata = as.double(sample), samrow = as.integer(nrow(sample)), samcol = as.integer(ncol(sample)), X = as.integer(X), Xrow = as.integer(nrow(X)), Xcol = as.integer(ncol(X)), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), tune = as.double(tune), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), verbose = as.integer(verbose), Lambda = as.double(Lambda), Lambdarow = as.integer(nrow(Lambda)), Lambdacol = as.integer(ncol(Lambda)), gamma = as.double(gamma), gammarow = as.integer(nrow(gamma)), gammacol = as.integer(ncol(gamma)), ncat = as.integer(ncat), ncatrow = as.integer(nrow(ncat)), ncatcol = as.integer(ncol(ncat)), Lameq = as.double(Lambda.eq.constraints), Lameqrow = as.integer(nrow(Lambda.eq.constraints)), Lameqcol = as.integer(ncol(Lambda.ineq.constraints)), Lamineq = as.double(Lambda.ineq.constraints), Lamineqrow = as.integer(nrow(Lambda.ineq.constraints)), Lamineqcol = as.integer(ncol(Lambda.ineq.constraints)), Lampmean = as.double(Lambda.prior.mean), Lampmeanrow = as.integer(nrow(Lambda.prior.mean)), Lampmeancol = as.integer(ncol(Lambda.prior.prec)), Lampprec = as.double(Lambda.prior.prec), Lampprecrow = as.integer(nrow(Lambda.prior.prec)), Lamppreccol = as.integer(ncol(Lambda.prior.prec)), storelambda = as.integer(store.lambda), storescores = as.integer(store.scores), accepts = as.integer(accepts), acceptsrow = as.integer(nrow(accepts)), acceptscol = as.integer(ncol(accepts)), outswitch = as.integer(case.switch), PACKAGE="MCMCpack" ) if(case.switch==1) { accepts <- matrix(posterior$accepts, posterior$acceptsrow, posterior$acceptscol, byrow=FALSE) rownames(accepts) <- X.names colnames(accepts) <- "" cat("\n\nAcceptance rates:\n") print(t(accepts) / (posterior$burnin+posterior$mcmc), digits=2, width=6) } # put together matrix and build MCMC object to return sample <- matrix(posterior$samdata, posterior$samrow, posterior$samcol, byrow=FALSE) output <- mcmc(data=sample,start=1, end=mcmc, thin=thin) par.names <- NULL if (store.lambda==TRUE){ if(case.switch==1) { Lambda.names <- paste(paste("Lambda", rep(X.names, each=(factors+1)), sep=""), rep(1:(factors+1),K), sep=".") } if(case.switch==2) { alpha.hold <- paste("alpha", X.names, sep=".") beta.hold <- paste("beta", X.names, sep = ".") beta.hold <- rep(beta.hold, factors, each=factors) beta.hold <- paste(beta.hold, 1:factors, sep=".") Lambda.names <- t(cbind(matrix(alpha.hold, K, 1), matrix(beta.hold,K,factors,byrow=TRUE))) dim(Lambda.names) <- NULL } par.names <- c(par.names, Lambda.names) } gamma.names <- paste(paste("gamma", rep(0:(nrow(gamma)-1), each=K), sep=""), rep(X.names, nrow(gamma)), sep=".") par.names <- c(par.names, gamma.names) if (store.scores==TRUE){ if(case.switch==1) { phi.names <- paste(paste("phi", rep(xobs, each=(factors+1)), sep="."), rep(1:(factors+1),(factors+1)), sep=".") par.names <- c(par.names, phi.names) } if(case.switch==2) { phi.names <- paste(paste("theta", rep(xobs, each=(factors+1)), sep="."), rep(0:factors,(factors+1)), sep=".") par.names <- c(par.names, phi.names) } } varnames(output) <- par.names # get rid of columns for constrained parameters output.df <- as.data.frame(as.matrix(output)) output.var <- diag(var(output.df)) output.df <- output.df[,output.var != 0] output <- mcmc(as.matrix(output.df), start=burnin+1, end=burnin+mcmc, thin=thin) # add constraint info so this isn't lost attr(output, "constraints") <- lambda.constraints attr(output, "n.manifest") <- K attr(output, "n.factors") <- factors attr(output, "accept.rates") <- t(accepts) / (posterior$burnin+posterior$mcmc) if(case.switch==1) { attr(output,"title") <- "MCMCpack Ordinal Data Factor Analysis Posterior Sample" } if(case.switch==2) { attr(output,"title") <- "MCMCpack K-Dimensional Item Response Theory Model Posterior Sample" } return(output) } MCMCpack/R/MCMCoprobitChange.R0000644000176000001440000001732312133644103015433 0ustar ripleyusers######################################################### ## ## sample from the posterior distribution ## of ordinal probit changepoint regression model ## using a linear Gaussian approximation ## ## JHP 07/01/2007 ## JHP 03/03/2009 ## JHP 09/08/2010 ######################################################### "MCMCoprobitChange"<- function(formula, data=parent.frame(), m = 1, burnin = 1000, mcmc = 1000, thin = 1, tune = NA, verbose = 0, seed = NA, beta.start = NA, gamma.start = NA, P.start = NA, b0 = NULL, B0 = NULL, a = NULL, b = NULL, marginal.likelihood = c("none", "Chib95"), gamma.fixed=0, ...){ ## checks check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) cl <- match.call() nstore <- mcmc/thin ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] totiter <- mcmc+burnin holder <- parse.formula(formula, data=data) y <- holder[[1]] X <- holder[[2]] xnames <- holder[[3]] K <- ncol(X) Y <- factor(y, ordered = TRUE) ncat <- nlevels(Y) cat <- levels(Y) ns <- m + 1 N <- nrow(X) gk <- ncat + 1 if(sum(is.na(tune))==1) { stop("Please specify a tune parameter and call MCMCoprobitChange() again.\n") } else if (length(tune)==1){ tune <- rep(tune, ns) } else if(length(tune)>1&length(tune) 0) { new.X <- X[, -xint, drop = FALSE] } else warning("An intercept is needed and assumed in MCMCoprobitChange()\n.") if (ncol(new.X) == 0) { polr.out <- polr(ordered(Y) ~ 1) } else { polr.out <- polr(ordered(Y) ~ new.X) } ## prior for transition matrix A0 <- trans.mat.prior(m=m, n=N, a=a, b=b) ## prior for beta error checking if(is.null(dim(b0))) { b0 <- b0 * matrix(1,K,1) } if((dim(b0)[1] != K) || (dim(b0)[2] != 1)) { cat("N(b0,B0) prior b0 not conformable.\n") stop("Please respecify and call MCMCoprobitChange() again.\n") } if(is.null(dim(B0))) { B0 <- B0 * diag(K) } if((dim(B0)[1] != K) || (dim(B0)[2] != K)) { cat("N(b0,B0) prior B0 not conformable.\n") stop("Please respecify and call MCMCoprobitChange() again.\n") } marginal.likelihood <- match.arg(marginal.likelihood) B0.eigenvalues <- eigen(B0)$values if (isTRUE(all.equal(min(B0.eigenvalues), 0))){ if (marginal.likelihood != "none"){ warning("Cannot calculate marginal likelihood with improper prior\n") marginal.likelihood <- "none" } } chib <- 0 if (marginal.likelihood == "Chib95"){ chib <- 1 } ## to save time B0inv <- solve(B0) gamma.start <- matrix(NA, ncat + 1, 1) gamma.start[1] <- -300 gamma.start[2] <- 0 gamma.start[3:ncat] <- (polr.out$zeta[2:(ncat - 1)] - polr.out$zeta[1]) * 0.588 gamma.start[ncat + 1] <- 300 ## initial values mle <- polr(Y ~ X[,-1]) beta <- matrix(rep(c(mle$zeta[1], coef(mle)), ns), ns, , byrow=TRUE) ols <- lm(as.double(Y) ~ X-1) betalinearstart <- matrix(rep(coef(ols), ns), ns, , byrow=TRUE) P <- trans.mat.prior(m=m, n=N, a=0.9, b=0.1) Sigmastart <- summary(ols)$sigma if (gamma.fixed==1){ gamma <- gamma.start gamma.storage <-rep(0.0, nstore*gk) } else { gamma <- matrix(rep(gamma.start, ns), ns, , byrow=T) gamma.storage <- rep(0.0, nstore*ns*gk) } ## call C++ code to draw sample posterior <- .C("MCMCoprobitChange", betaout = as.double(rep(0.0, nstore*ns*K)), betalinearout = as.double(rep(0.0, nstore*ns*K)), gammaout = as.double(gamma.storage), Pout = as.double(rep(0.0, nstore*ns*ns)), psout = as.double(rep(0.0, N*ns)), sout = as.double(rep(0.0, nstore*N)), Ydata = as.double(Y), Xdata = as.double(X), Xrow = as.integer(nrow(X)), Xcol = as.integer(ncol(X)), m = as.integer(m), ncat = as.integer(ncat), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), verbose = as.integer(verbose), tunedata = as.double(tune), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), betastart = as.double(beta), betalinearstart = as.double(betalinearstart), gammastart = as.double(gamma), Pstart = as.double(P), sigmastart = as.double(Sigmastart), a = as.double(a), b = as.double(b), b0data = as.double(b0), B0data = as.double(B0), A0data = as.double(A0), logmarglikeholder = as.double(0.0), loglikeholder = as.double(0.0), chib = as.integer(chib), gammafixed= as.integer(gamma.fixed)) ## get marginal likelihood if Chib95 if (chib==1){ logmarglike <- posterior$logmarglikeholder loglike <- posterior$loglikeholder } else{ logmarglike <- loglike <- 0 } ## pull together matrix and build MCMC object to return beta.holder <- mcmc(matrix(posterior$betaout, nstore, ns*K)) if (gamma.fixed==1){ gamma.holder <- mcmc(matrix(posterior$gammaout, nstore, gk)) } else { gamma.holder <- mcmc(matrix(posterior$gammaout, nstore, ns*gk)) } P.holder <- matrix(posterior$Pout, nstore, ) s.holder <- matrix(posterior$sout, nstore, ) ps.holder <- matrix(posterior$psout, N, ) varnames(beta.holder) <- sapply(c(1:ns), function(i){ paste(c(xnames), "_regime", i, sep = "") }) ## betalinear betalinear.holder <- mcmc(matrix(posterior$betalinearout, nstore, ns*K)) varnames(betalinear.holder) <- sapply(c(1:ns), function(i){ paste(c(xnames), "_regime", i, sep = "") }) gamma.holder <- gamma.holder[, as.vector(sapply(1:ns, function(i){gk*(i-1) + (3:(gk-1))}))] gamma.names <- paste("gamma", 3:(gk-1), sep="") varnames(gamma.holder) <- sapply(c(1:ns), function(i){ paste(gamma.names, "_regime", i, sep = "") }) output <- mcmc(cbind(beta.holder, gamma.holder)) attr(output, "title") <- "MCMCoprobitChange Posterior Sample" ## attr(output, "betalinear") <- mcmc(betalinear.holder) attr(output, "formula") <- formula attr(output, "y") <- Y attr(output, "X") <- X attr(output, "m") <- m attr(output, "call") <- cl attr(output, "logmarglike") <- logmarglike attr(output, "loglike") <- loglike attr(output, "prob.state") <- ps.holder/nstore attr(output, "s.store") <- s.holder return(output) }## end of MCMC function MCMCpack/R/MCMCoprobit.R0000644000176000001440000001460612133644103014326 0ustar ripleyusers########################################################################## ## sample from the posterior distribution of an ordered probit model ## via the data augmentation approach of Cowles (1996) ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## KQ 1/25/2003 ## Modified to meet new developer specification 7/26/2004 KQ ## Modified for new Scythe and rngs 7/26/2004 KQ ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMCoprobit" <- function(formula, data = parent.frame(), burnin = 1000, mcmc = 10000, thin = 1, tune = NA, tdf = 1, verbose = 0, seed = NA, beta.start = NA, b0 = 0, B0 = 0, a0 = 0, A0 = 0, mcmc.method = c("Cowles", "AC"), ...) { ## checks check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## extract X, Y, and variable names from the model formula and frame call <- match.call() mt <- terms(formula, data=data) if(missing(data)) data <- sys.frame(sys.parent()) mf <- match.call(expand.dots = FALSE) mf$burnin <- mf$mcmc <- mf$b0 <- mf$B0 <- mf$a0 <- mf$A0 <- NULL mf$thin <- mf$... <- mf$tune <- mf$tdf <- mf$verbose <- mf$seed <- NULL mf$beta.start <- mf$mcmc.method <- NULL mf$drop.unused.levels <- TRUE mf[[1]] <- as.name("model.frame") mf <- eval(mf, sys.frame(sys.parent())) vars <- as.character(attr(mt, "variables"))[-1] # y varname and x varnames ## null model support X <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts)# else NULL X.names <- dimnames(X)[[2]] Y <- model.response(mf, "numeric") Y <- factor(Y, ordered=TRUE) ncat <- nlevels(Y) # number of categories of y cat <- levels(Y) # values of categories of y N <- nrow(X) # number of observations K <- ncol(X) # number of covariates if (length(Y) != N){ cat("X and Y do not have same number of rows.\n") stop("Please respecify and call MCMCoprobit() again.\n") } ## convert data to matrices to be passed Y <- as.matrix(as.integer(Y)) X <- as.matrix(X) ## check tuning parameter if (is.na(tune)){ tune <- 0.05/ncat } xint <- match("(Intercept)", colnames(X), nomatch=0) if (xint > 0){ new.X <- X[, -xint, drop=FALSE] } else warning("An intercept is needed and assumed in MCMCoprobit()\n.") if (ncol(new.X) == 0){ polr.out <- polr(ordered(Y)~1) } else { polr.out <- polr(ordered(Y)~new.X) } ## starting values for beta error checking if (is.na(beta.start)){ beta.start <- matrix(0, K, 1) beta.start[1] <- -.588 * polr.out$zeta[1] if( ncol(new.X) > 0){ beta.start[2:K] <- .588 * coef(polr.out) } } else if(is.null(dim(beta.start))) { beta.start <- beta.start * matrix(1,K,1) } else if((dim(beta.start)[1] != K) || (dim(beta.start)[2] != 1)) { cat("Starting value for beta not conformable.\n") stop("Please respecify and call MCMCoprobit() again.\n") } ## prior for beta error checking if(is.null(dim(b0))) { b0 <- b0 * matrix(1,K,1) } if((dim(b0)[1] != K) || (dim(b0)[2] != 1)) { cat("N(b0,B0) prior b0 not conformable.\n") stop("Please respecify and call MCMCoprobit() again.\n") } if(is.null(dim(B0))) { B0 <- B0 * diag(K) } if((dim(B0)[1] != K) || (dim(B0)[2] != K)) { cat("N(b0,B0) prior B0 not conformable.\n") stop("Please respecify and call MCMCoprobit() again.\n") } ## prior for alpha error checking if(is.null(dim(a0))) { a0 <- a0 * matrix(1, ncat-1, 1) } if((dim(a0)[1] != ncat-1) || (dim(a0)[2] != 1)) { cat("N(a0,A0) prior a0 not conformable.\n") stop("Please respecify and call MCMCoprobit() again.\n") } if(is.null(dim(A0))) { A0 <- A0 + diag(ncat - 1) } if((dim(A0)[1] != ncat - 1) || (dim(A0)[2] != ncat - 1)) { cat("N(a0, A0) prior A0 not conformable.\n") stop("Please respecify and call MCMCoprobit() again.\n") } ## form gamma starting values (note: not changeable) gamma <- matrix(NA,ncat+1,1) gamma[1] <- -300 gamma[2] <- 0 gamma[3:ncat] <- (polr.out$zeta[2:(ncat-1)] - polr.out$zeta[1])*.588 gamma[ncat+1] <- 300 ## posterior sample sample <- matrix(data=0, mcmc/thin, K + ncat + 1) ## call C++ code to draw sample nY <- as.matrix(as.numeric(Y)) ## mcmc.method cowles <- as.integer(1) if(mcmc.method[1]=="AC") {cowles <- as.integer(0)} ## form the tuning parameter tune <- vector.tune(tune, ncat-1) posterior <- NULL auto.Scythe.call(output.object="posterior", cc.fun.name="MCMCoprobit", sample.nonconst=sample, Y=as.integer(Y), nY=nY, X=X, burnin=as.integer(burnin), mcmc=as.integer(mcmc), thin=as.integer(thin), tune=tune, tdf=as.double(tdf), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), verbose=as.integer(verbose), beta=beta.start, gamma=gamma, b0=b0, B0=B0, a0=a0, A0=A0, cowles=as.integer(cowles)) ## put together matrix and build MCMC object to return sample <- matrix(posterior$sampledata, posterior$samplerow, posterior$samplecol, byrow=FALSE) if(mcmc.method[1]=="AC"){ sample[ , 1] <- sample[, 1] - sample[, K+2] ## post-MCMC normalization sample[ , (K+2):(K+ncat)] <- sample[ , (K+2):(K+ncat)] - sample[, K+2] ## post-MCMC normalization } sample <- sample[, c(1:K, (K+3):(K+ncat))] output <- mcmc(data=sample, start=burnin+1, end=burnin+mcmc, thin=thin) xnames <- c(X.names, paste("gamma", 2:(ncat-1), sep="")) varnames(output) <- xnames attr(output, "title") <- "MCMCoprobit Posterior Sample" return(output) } MCMCpack/R/MCMCmnl.R0000644000176000001440000003121712133644103013433 0ustar ripleyusers########################################################################## ## MCMCmnl.R samples from the posterior distribution of a multinomial ## logit model using Metropolis-Hastings. ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## KQ 12/22/2004 ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## ## parse formula and return a list that contains the model response ## matrix as element one, the model matrix as element two, ## the column names of X as element three, the rownames of ## X and y as element four, and the number of choices in the largest ## choice set in element five "parse.formula.mnl" <- function(formula, data, baseline=NULL, intercept=TRUE){ ## extract Y, X, and variable names for model formula and frame mt <- terms(formula, data=data) if(missing(data)) data <- sys.frame(sys.parent()) mf <- match.call(expand.dots = FALSE) mf$intercept <- mf$baseline <- NULL mf$drop.unused.levels <- TRUE mf[[1]] <- as.name("model.frame") mf <- eval(mf, sys.frame(sys.parent())) if (!intercept){ attributes(mt)$intercept <- 0 } ## deal with Y Y <- as.matrix(model.response(mf, "numeric")) # Y matrix if (ncol(Y)==1){ Y <- factor(Y) number.choices <- length(unique(Y)) choice.names <- sort(unique(Y)) Ymat <- matrix(NA, length(Y), number.choices) colnames(Ymat) <- choice.names for (i in 1:(number.choices)){ Ymat[,i] <- as.numeric(Y==choice.names[i]) } } else{ ## this block will allow for nonconstant choice sets number.choices <- ncol(Y) Ytemp <- Y Ytemp[Y== -999] <- NA if ( min(unique(array(Y)) %in% c(-999,0,1))==0 || min(apply(Ytemp, 1, sum, na.rm=TRUE) == rep(1, nrow(Y)))==0){ stop("Y is a matrix but it is not composed of 0/1/-999 values\n and/or rows do not sum to 1\n") } Ymat <- Y choice.names <- colnames(Y) } colnames(Ymat) <- choice.names rownames(Ymat) <- 1:nrow(Ymat) #Y.long <- matrix(t(Ymat), length(Ymat), 1) #colnames(Y.long) <- "Y" #rownames(Y.long) <- rep(1:nrow(Ymat), rep(length(choice.names), nrow(Ymat))) #rownames(Y.long) <- paste(rownames(Y.long), choice.names, sep=".") #group.id <- rep(1:nrow(Ymat), rep(ncol(Ymat), nrow(Ymat))) ## deal with X ## null model support X <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) X <- as.matrix(X) # X matrix xvars <- dimnames(X)[[2]] # X variable names xobs <- dimnames(X)[[1]] # X observation names if (is.null(baseline)){ baseline <- choice.names[1] } if (! baseline %in% choice.names){ stop("'baseline' not consistent with observed choice levels in y\n") } ## deal with choice specific covariates choicevar.indic <- rep(FALSE, length(xvars)) # indicators for choice # specific variables choicevar.indic[grep("^choicevar\\(", xvars)] <- TRUE if (sum(choicevar.indic) > 0){ cvarname1.vec <- rep(NA, sum(choicevar.indic)) cvarname2.vec <- rep(NA, sum(choicevar.indic)) counter <- 0 for (i in 1:length(xvars)){ if (choicevar.indic[i]){ counter <- counter + 1 vn2 <- strsplit(xvars[i], ",") vn3 <- strsplit(vn2[[1]], "\\(") vn4 <- strsplit(vn3[[3]], "=") cvarname1 <- vn3[[2]][1] cvarname1 <- strsplit(cvarname1, "\"")[[1]] cvarname1 <- cvarname1[length(cvarname1)] cvarname2 <- vn4[[1]][length(vn4[[1]])] cvarname2 <- strsplit(cvarname2, "\"")[[1]][2] if (! cvarname2 %in% choice.names){ stop("choicelevel that was set in choicevar() not consistent with\n observed choice levels in y") } cvarname1.vec[counter] <- cvarname1 cvarname2.vec[counter] <- cvarname2 xvars[i] <- paste(cvarname1, cvarname2, sep=".") } } X.cho <- X[, choicevar.indic] X.cho.mat <- matrix(NA, length(choice.names)*nrow(X), length(unique(cvarname1.vec))) rownames(X.cho.mat) <- rep(rownames(X), rep(length(choice.names), nrow(X))) rownames(X.cho.mat) <- paste(rownames(X.cho.mat), choice.names, sep=".") colnames(X.cho.mat) <- unique(cvarname1.vec) choice.names.n <- rep(choice.names, nrow(X)) for (j in 1:length(unique(cvarname1.vec))){ for (i in 1:length(cvarname2.vec)){ if (colnames(X.cho.mat)[j] == cvarname1.vec[i]){ X.cho.mat[choice.names.n==cvarname2.vec[i], j] <- X.cho[,i] } } } } ## deal with individual specific covariates xvars.ind.mat <- rep(xvars[!choicevar.indic], rep(length(choice.names), sum(!choicevar.indic))) xvars.ind.mat <- paste(xvars.ind.mat, choice.names, sep=".") if (sum(!choicevar.indic) > 0){ X.ind.mat <- X[,!choicevar.indic] %x% diag(length(choice.names)) colnames(X.ind.mat) <- xvars.ind.mat rownames(X.ind.mat) <- rep(rownames(X), rep(length(choice.names), nrow(X))) rownames(X.ind.mat) <- paste(rownames(X.ind.mat), choice.names, sep=".") ## delete columns correpsonding to the baseline choice ivarname1 <- strsplit(xvars.ind.mat, "\\.") ivar.keep.indic <- rep(NA, ncol(X.ind.mat)) for (i in 1:ncol(X.ind.mat)){ ivar.keep.indic[i] <- ivarname1[[i]][length(ivarname1[[i]])] != baseline } X.ind.mat <- X.ind.mat[,ivar.keep.indic] } if (sum(choicevar.indic) > 0 & sum(!choicevar.indic) > 0){ X <- cbind(X.cho.mat, X.ind.mat) } else if (sum(!choicevar.indic) > 0){ X <- X.ind.mat } else if (sum(choicevar.indic) > 0){ X <- X.cho.mat } else { stop("X matrix appears to have neither choice-specific nor individual-specific variables.\n") } #Y <- Y.long xvars <- colnames(X) xobs <- rownames(X) return(list(Ymat, X, xvars, xobs, number.choices)) } ## dummy function used to handle choice-specific covariates "choicevar" <- function(var, varname, choicelevel){ junk1 <- varname junk2 <- choicelevel return(var) } ## MNL log-posterior function (used to get starting values) ## vector Y without NAs "mnl.logpost.noNA" <- function(beta, new.Y, X, b0, B0){ nobs <- length(new.Y) ncat <- nrow(X) / nobs Xb <- X %*% beta Xb <- matrix(Xb, byrow=TRUE, ncol=ncat) indices <- cbind(1:nobs, new.Y) Xb.reform <- Xb[indices] eXb <- exp(Xb) #denom <- log(apply(eXb, 1, sum)) z <- rep(1, ncat) denom <- log(eXb %*% z) log.prior <- 0.5 * t(beta - b0) %*% B0 %*% (beta - b0) return(sum(Xb.reform - denom) + log.prior) } ## MNL log-posterior function (used to get starting values) ## matrix Y with NAs "mnl.logpost.NA" <- function(beta, Y, X, b0, B0){ k <- ncol(X) numer <- exp(X %*% beta) numer[Y== -999] <- NA numer.mat <- matrix(numer, nrow(Y), ncol(Y), byrow=TRUE) denom <- apply(numer.mat, 1, sum, na.rm=TRUE) choice.probs <- numer.mat / denom Yna <- Y Yna[Y == -999] <- NA log.like.mat <- log(choice.probs) * Yna log.like <- sum(apply(log.like.mat, 1, sum, na.rm=TRUE)) log.prior <- 0.5 * t(beta - b0) %*% B0 %*% (beta - b0) return(log.like + log.prior) } "MCMCmnl" <- function(formula, baseline=NULL, data=NULL, burnin = 1000, mcmc = 10000, thin=1, mcmc.method = c("IndMH", "RWM", "slice"), tune = 1.0, tdf=6, verbose = 0, seed = NA, beta.start = NA, b0 = 0, B0 = 0, ...) { ## checks check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) if (tdf <= 0){ stop("degrees of freedom for multivariate-t proposal must be positive.\n Respecify tdf and try again.\n") } ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## form response and model matrix holder <- parse.formula.mnl(formula=formula, baseline=baseline, data=data) Y <- holder[[1]] ## check to make sure baseline category is always available in choiceset if (is.null(baseline)){ if (max(Y[,1] == -999) == 1){ stop("Baseline choice not available in all choicesets.\n Respecify baseline category and try again.\n") } } else{ if (max(Y[,baseline] == -999) == 1){ stop("Baseline choice not available in all choicesets.\n Respecify baseline category and try again.\n") } } X <- holder[[2]] xnames <- holder[[3]] xobs <- holder[[4]] number.choices <- holder[[5]] K <- ncol(X) # number of covariates ## form the tuning parameter tune <- vector.tune(tune, K) ## priors and starting values mvn.prior <- form.mvn.prior(b0, B0, K) b0 <- mvn.prior[[1]] B0 <- mvn.prior[[2]] beta.init <- rep(0, K) cat("Calculating MLEs and large sample var-cov matrix.\n") cat("This may take a moment...\n") if (max(is.na(Y))){ optim.out <- optim(beta.init, mnl.logpost.NA, method="BFGS", control=list(fnscale=-1), hessian=TRUE, Y=Y, X=X, b0=b0, B0=B0) } else{ new.Y <- apply(Y==1, 1, which) optim.out <- optim(beta.init, mnl.logpost.noNA, method="BFGS", control=list(fnscale=-1), hessian=TRUE, new.Y=new.Y, X=X, b0=b0, B0=B0) } cat("Inverting Hessian to get large sample var-cov matrix.\n") ##V <- solve(-1*optim.out$hessian) V <- chol2inv(chol(-1*optim.out$hessian)) beta.mode <- matrix(optim.out$par, K, 1) if (is.na(beta.start) || is.null(beta.start)){ beta.start <- matrix(optim.out$par, K, 1) } else if(is.null(dim(beta.start))) { beta.start <- matrix(beta.start, K, 1) } else if (length(beta.start != K)){ stop("beta.start not of appropriate dimension\n") } ## define holder for posterior sample sample <- matrix(data=0, mcmc/thin, dim(X)[2] ) posterior <- NULL if (mcmc.method=="RWM"){ ## call C++ code to draw sample auto.Scythe.call(output.object="posterior", cc.fun.name="MCMCmnlMH", sample.nonconst=sample, Y=Y, X=X, burnin=as.integer(burnin), mcmc=as.integer(mcmc), thin=as.integer(thin), tune=tune, lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), verbose=as.integer(verbose), betastart=beta.start, betamode=beta.mode, b0=b0, B0=B0, V=V, RW=as.integer(1), tdf=as.double(tdf)) ## put together matrix and build MCMC object to return output <- form.mcmc.object(posterior, names=xnames, title="MCMCmnl Posterior Sample") } else if (mcmc.method=="IndMH"){ auto.Scythe.call(output.object="posterior", cc.fun.name="MCMCmnlMH", sample.nonconst=sample, Y=Y, X=X, burnin=as.integer(burnin), mcmc=as.integer(mcmc), thin=as.integer(thin), tune=tune, lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), verbose=as.integer(verbose), betastart=beta.start, betamode=beta.mode, b0=b0, B0=B0, V=V, RW=as.integer(0), tdf=as.double(tdf)) ## put together matrix and build MCMC object to return output <- form.mcmc.object(posterior, names=xnames, title="MCMCmnl Posterior Sample") } else if (mcmc.method=="slice"){ ## call C++ code to draw sample auto.Scythe.call(output.object="posterior", cc.fun.name="MCMCmnlslice", sample.nonconst=sample, Y=Y, X=X, burnin=as.integer(burnin), mcmc=as.integer(mcmc), thin=as.integer(thin), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), verbose=as.integer(verbose), betastart=beta.start, b0=b0, B0=B0, V=V) ## put together matrix and build MCMC object to return output <- form.mcmc.object(posterior, names=xnames, title="MCMCmnl Posterior Sample") } return(output) } MCMCpack/R/MCMCmixfactanal.R0000644000176000001440000003257412133644103015143 0ustar ripleyusers########################################################################## ## sample from the posterior distribution of a factor analysis model ## model in R using linked C++ code in Scythe. ## ## The model is: ## ## x*_i = \Lambda \phi_i + \epsilon_i, \epsilon_i \sim N(0, \Psi) ## ## \lambda_{ij} \sim N(l0_{ij}, L0^{-1}_{ij}) ## \phi_i \sim N(0,I) ## ## and x*_i is the latent variable formed from the observed ordinal ## variable in the usual (Albert and Chib, 1993) way and is equal to ## x_i when x_i is continuous. When x_j is ordinal \Psi_jj is assumed ## to be 1. ## ## Andrew D. Martin ## Washington University ## ## Kevin M. Quinn ## Harvard University ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## 12/2/2003 ## Revised to accommodate new spec 7/20/2004 ## Minor bug fix regarding std.mean 6/25/2004 ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMCmixfactanal" <- function(x, factors, lambda.constraints=list(), data=parent.frame(), burnin = 1000, mcmc = 20000, thin=1, tune=NA, verbose = 0, seed = NA, lambda.start = NA, psi.start=NA, l0=0, L0=0, a0=0.001, b0=0.001, store.lambda=TRUE, store.scores=FALSE, std.mean=TRUE, std.var=TRUE, ... ) { call <- match.call() echo.name <- NULL mt <- terms(x, data=data) if (attr(mt, "response") > 0) stop("Response not allowed in formula in MCMCmixfactanal().\n") if(missing(data)) data <- sys.frame(sys.parent()) mf <- match.call(expand.dots = FALSE) mf$factors <- mf$lambda.constraints <- mf$burnin <- mf$mcmc <- NULL mf$thin <- mf$tune <- mf$verbose <- mf$seed <- NULL mf$lambda.start <- mf$l0 <- mf$L0 <- mf$a0 <- mf$b0 <- NULL mf$store.lambda <- mf$store.scores <- mf$std.mean <- NULL mf$std.var <- mf$... <- NULL mf$drop.unused.levels <- TRUE mf[[1]] <- as.name("model.frame") mf$na.action <- 'na.pass' mf <- eval(mf, sys.frame(sys.parent())) attributes(mt)$intercept <- 0 Xterm.length <- length(attr(mt, "variables")) X <- subset(mf, select=as.character(attr(mt, "variables"))[2:Xterm.length]) N <- nrow(X) # number of observations K <- ncol(X) # number of manifest variables ncat <- matrix(NA, K, 1) # vector of number of categ. in each man. var. for (i in 1:K){ if (is.numeric(X[,i])){ ncat[i] <- -999 X[is.na(X[,i]), i] <- -999 } else if (is.ordered(X[, i])) { ncat[i] <- nlevels(X[, i]) temp <- as.integer(X[,i]) temp <- ifelse(is.na(X[,i]) | (X[,i] == ""), -999, temp) X[, i] <- temp } else { stop("Manifest variable ", dimnames(X)[[2]][i], " neither ordered factor nor numeric variable.\n") } } X <- as.matrix(X) xvars <- dimnames(X)[[2]] # X variable names xobs <- dimnames(X)[[1]] # observation names if (is.null(xobs)){ xobs <- 1:N } # standardize X if (std.mean){ for (i in 1:K){ if (ncat[i] == -999){ X[,i] <- X[,i]-mean(X[,i]) } } } if (std.var){ for (i in 1:K){ if (ncat[i] == -999){ X[,i] <- (X[,i] - mean(X[,i]))/sd(X[,i]) + mean(X[,i]) } } } n.ord.ge3 <- 0 for (i in 1:K) if (ncat[i] >= 3) n.ord.ge3 <- n.ord.ge3 + 1 check.mcmc.parameters(burnin, mcmc, thin) ## setup constraints on Lambda holder <- build.factor.constraints(lambda.constraints, X, K, factors+1) Lambda.eq.constraints <- holder[[1]] Lambda.ineq.constraints <- holder[[2]] X.names <- holder[[3]] ## if subtracting out the mean of continuous X then constrain ## the mean parameter to 0 for (i in 1:K){ if (ncat[i] < 2 && std.mean==TRUE){ if ((Lambda.eq.constraints[i,1] == -999 || Lambda.eq.constraints[i,1] == 0.0) && Lambda.ineq.constraints[i,1] == 0.0){ Lambda.eq.constraints[i,1] <- 0.0 } else { cat("Constraints on Lambda are logically\ninconsistent with std.mean==TRUE.\n") stop("Please respecify and call MCMCmixfactanal() again\n") } } } ## setup and check prior on Psi holder <- form.ig.diagmat.prior(a0, b0, K) a0 <- holder[[1]] b0 <- holder[[2]] ## setup prior on Lambda holder <- form.factload.norm.prior(l0, L0, K, factors+1, X.names) Lambda.prior.mean <- holder[[1]] Lambda.prior.prec <- holder[[2]] # seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] # Starting values for Lambda Lambda <- matrix(0, K, factors+1) if (is.na(lambda.start)){# sets Lambda to equality constraints & 0s for (i in 1:K){ for (j in 1:(factors+1)){ if (Lambda.eq.constraints[i,j]==-999){ if(Lambda.ineq.constraints[i,j]==0){ if (j==1){ if (ncat[i] < 2){ Lambda[i,j] <- mean(X[,i]!=-999) } if (ncat[i] == 2){ probit.out <- glm(as.factor(X[X[,i]!=-999,i])~1, family=binomial(link=probit)) probit.beta <- coef(probit.out) Lambda[i,j] <- probit.beta[1] } if (ncat[i] > 2){ polr.out <- polr(ordered(X[X[,i]!=-999,i])~1) Lambda[i,j] <- -polr.out$zeta[1]*.588 } } } if(Lambda.ineq.constraints[i,j]>0){ Lambda[i,j] <- 1.0 } if(Lambda.ineq.constraints[i,j]<0){ Lambda[i,j] <- -1.0 } } else Lambda[i,j] <- Lambda.eq.constraints[i,j] } } } else if (is.matrix(lambda.start)){ if (nrow(lambda.start)==K && ncol(lambda.start)==(factors+1)) Lambda <- lambda.start else { cat("Starting values not of correct size for model specification.\n") stop("Please respecify and call ", echo.name, "() again\n") } } else if (length(lambda.start)==1 && is.numeric(lambda.start)){ Lambda <- matrix(lambda.start, K, factors+1) for (i in 1:K){ for (j in 1:(factors+1)){ if (Lambda.eq.constraints[i,j] != -999) Lambda[i,j] <- Lambda.eq.constraints[i,j] } } } else { cat("Starting values neither NA, matrix, nor scalar.\n") stop("Please respecify and call ", echo.name, "() again\n") } # check MH tuning parameter if (is.na(tune)){ tune <- matrix(NA, K, 1) for (i in 1:K){ tune[i] <- abs(0.05/ncat[i]) } } else if (is.double(tune)){ tune <- matrix(abs(tune/ncat), K, 1) } # starting values for gamma (note: not changeable by user) if (max(ncat) <= 2){ gamma <- matrix(0, 3, K) } else { gamma <- matrix(0, max(ncat)+1, K) } for (i in 1:K){ if (ncat[i]<=2){ gamma[1,i] <- -300 gamma[2,i] <- 0 gamma[3,i] <- 300 } if(ncat[i] > 2) { polr.out <- polr(ordered(X[X[,i]!=-999,i])~1) gamma[1,i] <- -300 gamma[2,i] <- 0 gamma[3:ncat[i],i] <- (polr.out$zeta[2:(ncat[i]-1)] - polr.out$zeta[1])*.588 gamma[ncat[i]+1,i] <- 300 } } ## starting values for Psi Psi <- factuniqueness.start(psi.start, X) for (i in 1:K){ if (ncat[i] >= 2){ Psi[i,i] <- 1.0 } } # define holder for posterior sample if (store.scores == FALSE && store.lambda == FALSE){ sample <- matrix(data=0, mcmc/thin, length(gamma)+K) } else if (store.scores == TRUE && store.lambda == FALSE){ sample <- matrix(data=0, mcmc/thin, (factors+1)*N + length(gamma)+K) } else if(store.scores == FALSE && store.lambda == TRUE) { sample <- matrix(data=0, mcmc/thin, K*(factors+1)+length(gamma)+K) } else { # store.scores==TRUE && store.lambda==TRUE sample <- matrix(data=0, mcmc/thin, K*(factors+1)+(factors+1)*N + length(gamma)+K) } accepts <- matrix(0, K, 1) # Call the C++ code to do the real work posterior <- NULL posterior <- .C("mixfactanalpost", samdata = as.double(sample), samrow = as.integer(nrow(sample)), samcol = as.integer(ncol(sample)), X = as.double(X), Xrow = as.integer(nrow(X)), Xcol = as.integer(ncol(X)), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), tune = as.double(tune), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), verbose = as.integer(verbose), Lambda = as.double(Lambda), Lambdarow = as.integer(nrow(Lambda)), Lambdacol = as.integer(ncol(Lambda)), gamma = as.double(gamma), gammarow = as.integer(nrow(gamma)), gammacol = as.integer(ncol(gamma)), Psi = as.double(Psi), Psirow = as.integer(nrow(Psi)), Psicol = as.integer(ncol(Psi)), ncat = as.integer(ncat), ncatrow = as.integer(nrow(ncat)), ncatcol = as.integer(ncol(ncat)), Lameq = as.double(Lambda.eq.constraints), Lameqrow = as.integer(nrow(Lambda.eq.constraints)), Lameqcol = as.integer(ncol(Lambda.ineq.constraints)), Lamineq = as.double(Lambda.ineq.constraints), Lamineqrow = as.integer(nrow(Lambda.ineq.constraints)), Lamineqcol = as.integer(ncol(Lambda.ineq.constraints)), Lampmean = as.double(Lambda.prior.mean), Lampmeanrow = as.integer(nrow(Lambda.prior.mean)), Lampmeancol = as.integer(ncol(Lambda.prior.prec)), Lampprec = as.double(Lambda.prior.prec), Lampprecrow = as.integer(nrow(Lambda.prior.prec)), Lamppreccol = as.integer(ncol(Lambda.prior.prec)), a0 = as.double(a0), a0row = as.integer(nrow(a0)), a0col = as.integer(ncol(a0)), b0 = as.double(b0), b0row = as.integer(nrow(b0)), b0col = as.integer(ncol(b0)), storelambda = as.integer(store.lambda), storescores = as.integer(store.scores), accepts = as.integer(accepts), acceptsrow = as.integer(nrow(accepts)), acceptscol = as.integer(ncol(accepts)), PACKAGE="MCMCpack" ) accepts <- matrix(posterior$accepts, posterior$acceptsrow, posterior$acceptscol, byrow=TRUE) rownames(accepts) <- X.names colnames(accepts) <- "" cat("\n\nAcceptance rates:\n") print(t(accepts) / (posterior$burnin+posterior$mcmc), digits=2, width=6) # put together matrix and build MCMC object to return sample <- matrix(posterior$samdata, posterior$samrow, posterior$samcol, byrow=FALSE) output <- mcmc(data=sample,start=1, end=mcmc, thin=thin) par.names <- NULL if (store.lambda==TRUE){ Lambda.names <- paste(paste("Lambda", rep(X.names, each=(factors+1)), sep=""), rep(1:(factors+1),K), sep=".") par.names <- c(par.names, Lambda.names) } gamma.names <- paste(paste("gamma", rep(0:(nrow(gamma)-1), each=K), sep=""), rep(X.names, nrow(gamma)), sep=".") par.names <- c(par.names, gamma.names) if (store.scores==TRUE){ phi.names <- paste(paste("phi", rep(xobs, each=(factors+1)), sep="."), rep(1:(factors+1),(factors+1)), sep=".") par.names <- c(par.names, phi.names) } Psi.names <- paste("Psi", X.names, sep=".") par.names <- c(par.names, Psi.names) varnames(output) <- par.names # get rid of columns for constrained parameters output.df <- as.data.frame(as.matrix(output)) output.var <- diag(var(output.df)) output.df <- output.df[,output.var != 0] output <- mcmc(as.matrix(output.df), start=burnin+1, end=burnin+mcmc, thin=thin) # add constraint info so this isn't lost attr(output, "constraints") <- lambda.constraints attr(output, "n.manifest") <- K attr(output, "n.factors") <- factors attr(output, "accept.rates") <- t(accepts) / (posterior$burnin+posterior$mcmc) attr(output,"title") <- "MCMCpack Mixed Data Factor Analysis Posterior Sample" return(output) } MCMCpack/R/MCMCmetrop1R.R0000644000176000001440000001120412133644103014350 0ustar ripleyusers########################################################################## ## samples from a user-written posterior coded in R using a ## random walk Metropolis algorithm ## ## KQ 6/24/2004 ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## modified to work with non-invertible Hessian KQ 6/28/2005 ## ## changed the method used to pass additional arguments to the user-defined ## function KQ 8/15/2005 ## ## changed to allow more user control of optim KQ 6/18/2006 ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMCmetrop1R" <- function(fun, theta.init, burnin=500, mcmc=20000, thin=1, tune=1, verbose=0, seed=NA, logfun=TRUE, force.samp=FALSE, V=NULL, optim.method="BFGS", optim.lower= -Inf, optim.upper= Inf, optim.control=list(fnscale=-1, trace=0, REPORT=10, maxit=500), ...){ ## error checking here check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) ## form the tuning vector tune <- vector.tune(tune, length(theta.init)) ## form seed seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## setup the environment so that fun can see the things passed as ... userfun <- function(ttt) fun(ttt, ...) my.env <- environment(fun = userfun) ## setup function for maximization based on value of logfun if (logfun){ maxfun <- fun } else if (logfun==FALSE){ maxfun <- function(ttt, ...) log(fun(ttt, ...)) } else{ cat("logfun not a logical value.\n") stop("Respecifiy and call MCMCmetrop1R() again. \n", call.=FALSE) } if (is.null(V)){ ## find approx mode and Hessian using optim() opt.out <- optim(theta.init, maxfun, control=optim.control, lower=optim.lower, upper=optim.upper, method=optim.method, hessian=TRUE, ...) if(opt.out$convergence!=0){ warning("Mode and Hessian were not found with call to optim().\nSampling proceeded anyway. \n") } CC <- NULL try(CC <- chol(-1*opt.out$hessian), silent=TRUE) hess.new <- opt.out$hessian hess.flag <- 0 if (force.samp==TRUE){ if (max(diag(opt.out$hessian)==0)){ for (i in 1:nrow(hess.new)){ if (hess.new[i,i] == 0){ hess.new[i,i] <- -1e-6 } } } while (is.null(CC)){ hess.flag <- 1 hess.new <- hess.new - diag(diag(0.01 * abs(opt.out$hessian))) try(CC <- chol(-1*hess.new), silent=TRUE) } } else{ if (is.null(CC)){ hess.flag <- 2 } } if (hess.flag==1){ warning("Hessian from call to optim() not negative definite.\nSampling proceeded after enforcing negative definiteness. \n") } if (hess.flag==2){ cat("Hessian from call to optim() not negative definite.\n") cat("Sampling (as specified) cannot proceed.\n") stop("Check data and fun() and call MCMCmetrop1R() again. \n", call.=FALSE) } V <- tune %*% solve(-1*hess.new) %*% tune } else{ ## V is non NULL if (nrow(V) != ncol(V) || nrow(V) != length(theta.init)){ cat("V not of appropriate dimension.\n") stop("Check V and theta.init and call MCMCmetrop1R() again. \n", call.=FALSE) } CC <- NULL try(CC <- chol(V), silent=TRUE) if (is.null(CC)){ cat("V not positive definite.\n") stop("Check V and call MCMCmetrop1R() again. \n", call.=FALSE) } V <- tune %*% V %*% tune } ## Call the C++ function to do the MCMC sampling sample <- .Call("MCMCmetrop1R_cc", userfun, as.double(theta.init), my.env, as.integer(burnin), as.integer(mcmc), as.integer(thin), as.integer(verbose), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), as.logical(logfun), as.matrix(V), PACKAGE="MCMCpack") ## turn sample into an mcmc object sample <- mcmc(data=sample, start=burnin+1, end=burnin+mcmc, thin=thin) return(sample) } MCMCpack/R/MCMClogit.R0000644000176000001440000001712112133644103013761 0ustar ripleyusers########################################################################## ## sample from the posterior distribution of a logistic regression ## model in R using linked C++ code in Scythe ## ## KQ 1/23/2003 ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Modified to meet new developer specification 7/15/2004 KQ ## Modified for new Scythe and rngs 7/25/2004 KQ ## note: B0 is now a precision ## Modified to allow user-specified prior density 8/17/2005 KQ ## Modified to handle marginal likelihood calculation 1/27/2006 KQ ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMClogit" <- function(formula, data=NULL, burnin = 1000, mcmc = 10000, thin=1, tune=1.1, verbose = 0, seed = NA, beta.start = NA, b0 = 0, B0 = 0, user.prior.density=NULL, logfun=TRUE, marginal.likelihood = c("none", "Laplace"), ...) { ## checks check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) cl <- match.call() ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## form response and model matrices holder <- parse.formula(formula, data=data) Y <- holder[[1]] X <- holder[[2]] xnames <- holder[[3]] K <- ncol(X) # number of covariates ## starting values and priors beta.start <- coef.start(beta.start, K, formula, family=binomial, data) mvn.prior <- form.mvn.prior(b0, B0, K) b0 <- mvn.prior[[1]] B0 <- mvn.prior[[2]] ## get marginal likelihood argument marginal.likelihood <- match.arg(marginal.likelihood) B0.eigenvalues <- eigen(B0)$values if (min(B0.eigenvalues) < 0){ stop("B0 is not positive semi-definite.\nPlease respecify and call again.\n") } if (isTRUE(all.equal(min(B0.eigenvalues), 0))){ if (marginal.likelihood != "none"){ warning("Cannot calculate marginal likelihood with improper prior\n") marginal.likelihood <- "none" } } logmarglike <- NULL ## setup the environment so that fun can see the things passed as ... userfun <- function(ttt) user.prior.density(ttt, ...) my.env <- environment(fun = userfun) ## check to make sure user.prior.density returns a numeric scalar and ## starting values have positive prior mass if (is.function(user.prior.density)){ funval <- userfun(beta.start) if (length(funval) != 1){ cat("user.prior.density does not return a scalar.\n") stop("Respecify and call MCMClogit() again. \n") } if (!is.numeric(funval)){ cat("user.prior.density does not return a numeric value.\n") stop("Respecify and call MCMClogit() again. \n") } if (identical(funval, Inf)){ cat("user.prior.density(beta.start) == Inf.\n") stop("Respecify and call MCMClogit() again. \n") } if (logfun){ if (identical(funval, -Inf)){ cat("user.prior.density(beta.start) == -Inf.\n") stop("Respecify and call MCMClogit() again. \n") } } else{ if (funval <= 0){ cat("user.prior.density(beta.start) <= 0.\n") stop("Respecify and call MCMClogit() again. \n") } } } else if (!is.null(user.prior.density)){ cat("user.prior.density is neither a NULL nor a function.\n") stop("Respecify and call MCMClogit() again. \n") } ## form the tuning parameter tune <- vector.tune(tune, K) V <- vcov(glm(formula=formula, data=data, family=binomial)) ## y \in {0, 1} error checking if (sum(Y!=0 & Y!=1) > 0) { cat("Elements of Y equal to something other than 0 or 1.\n") stop("Check data and call MCMClogit() again. \n") } propvar <- tune %*% V %*% tune posterior <- NULL ## call C++ code to draw sample if (is.null(user.prior.density)){ ## define holder for posterior density sample sample <- matrix(data=0, mcmc/thin, dim(X)[2] ) auto.Scythe.call(output.object="posterior", cc.fun.name="MCMClogit", sample.nonconst=sample, Y=Y, X=X, burnin=as.integer(burnin), mcmc=as.integer(mcmc), thin=as.integer(thin), tune=tune, lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), verbose=as.integer(verbose), betastart=beta.start, b0=b0, B0=B0, V=V) ## marginal likelihood calculation if Laplace if (marginal.likelihood == "Laplace"){ theta.start <- beta.start optim.out <- optim(theta.start, logpost.logit, method="BFGS", control=list(fnscale=-1), hessian=TRUE, y=Y, X=X, b0=b0, B0=B0) theta.tilde <- optim.out$par beta.tilde <- theta.tilde[1:K] Sigma.tilde <- solve(-1*optim.out$hessian) logmarglike <- (length(theta.tilde)/2)*log(2*pi) + log(sqrt(det(Sigma.tilde))) + logpost.logit(theta.tilde, Y, X, b0, B0) } ## put together matrix and build MCMC object to return output <- form.mcmc.object(posterior, names=xnames, title="MCMClogit Posterior Sample", y=Y, call=cl, logmarglike=logmarglike) } else { sample <- .Call("MCMClogituserprior_cc", userfun, as.integer(Y), as.matrix(X), as.double(beta.start), my.env, as.integer(burnin), as.integer(mcmc), as.integer(thin), as.integer(verbose), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), as.logical(logfun), as.matrix(propvar), PACKAGE="MCMCpack") ## marginal likelihood calculation if Laplace if (marginal.likelihood == "Laplace"){ theta.start <- beta.start optim.out <- optim(theta.start, logpost.logit.userprior, method="BFGS", control=list(fnscale=-1), hessian=TRUE, y=Y, X=X, userfun=userfun, logfun=logfun, my.env=my.env) theta.tilde <- optim.out$par beta.tilde <- theta.tilde[1:K] Sigma.tilde <- solve(-1*optim.out$hessian) logmarglike <- (length(theta.tilde)/2)*log(2*pi) + log(sqrt(det(Sigma.tilde))) + logpost.logit.userprior(theta.tilde, Y, X, userfun=userfun, logfun=logfun, my.env=my.env) } output <- mcmc(data=sample, start=burnin+1, end=burnin+mcmc, thin=thin) varnames(output) <- as.list(xnames) attr(output, "title") <- "MCMClogit Posterior Sample" attr(output, "y") <- Y attr(output, "call") <- cl attr(output, "logmarglike") <- logmarglike } return(output) } ########################################################################## MCMCpack/R/MCMCirtKdRob.R0000644000176000001440000003551512133644103014372 0ustar ripleyusers########################################################################## ## sample from a K-dimensional two-parameter item response model with ## logit link that has been rescaled so that the inverse link is: ## ## \delta0 + (1 - \delta0 - \delta1)*\Phi(.) ## ## where \delta0 \in (0, k0) and \delta1 \in (0, k1) ## ## priors for deltas are rescaled beta with parameters c0, d0, and c1, d1 ## ## ## datamatrix is assumed to be nsubjects by nitems ## ## Andrew D. Martin ## Washington University ## ## Kevin M. Quinn ## Harvard University ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Feb. 17, 2005 ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMCirtKdRob" <- function(datamatrix, dimensions, item.constraints=list(), ability.constraints=list(), burnin = 500, mcmc = 5000, thin=1, interval.method="step", theta.w=0.5, theta.mp=4, alphabeta.w=1.0, alphabeta.mp=4, delta0.w=NA, delta0.mp=3, delta1.w=NA, delta1.mp=3, verbose = FALSE, seed = NA, theta.start=NA, alphabeta.start = NA, delta0.start = NA, delta1.start = NA, b0=0, B0=0, k0=.1, k1=.1, c0=1, d0=1, c1=1, d1=1, store.item=TRUE, store.ability=FALSE, drop.constant.items=TRUE, ... ) { ## set X up if (drop.constant.items==TRUE){ x.col.var <- apply(datamatrix, 2, var, na.rm=TRUE) keep.inds <- x.col.var>0 keep.inds[is.na(keep.inds)] <- FALSE datamatrix <- datamatrix[,keep.inds] } X <- as.data.frame(datamatrix) xvars <- dimnames(X)[[2]] xobs <- dimnames(X)[[1]] N <- nrow(X) # number of subjects K <- ncol(X) # number of items for (i in 1:K){ X[is.na(X[,i]), i] <- -999 } if(sum(datamatrix==1 | datamatrix==0 | is.na(datamatrix)) != (N*K)) { cat("Error: Data matrix contains elements other than 0, 1 or NA.\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } X <- as.matrix(X) ## take care of the case where X has no row names if (is.null(xobs)){ xobs <- 1:N } check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) ## check slice sampling parameters if (!(interval.method %in% c("step", "doubling"))){ cat("Error: interval.method not equal to 'step' or 'doubling'.\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } method.step <- 0 if (interval.method == "step"){ method.step <- 1 } if (theta.w <= 0 ){ cat("Error: theta.w not > 0.\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } if (theta.mp < 1 ){ cat("Error: theta.mp not >= 1.\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } if (alphabeta.w <= 0 ){ cat("Error: alphabeta.w not > 0.\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } if (alphabeta.mp < 1 ){ cat("Error: alphabeta.mp not >= 1.\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } if (is.na(delta0.w)){ delta0.w <- 0.25*k0 } if (delta0.w <= 0 ){ cat("Error: delta0.w not > 0.\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } if (delta0.mp < 1 ){ cat("Error: delta0.mp not >= 1.\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } if (is.na(delta1.w)){ delta1.w <- 0.25*k1 } if (delta1.w <= 0 ){ cat("Error: delta1.w not > 0.\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } if (delta1.mp < 1 ){ cat("Error: delta1.mp not >= 1.\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } ## error check the prior parameters for delta if (k0 < 0 | k0 > 0.5){ cat("Error: k0 not in (0, 0.5).\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } if (k1 < 0 | k1 > 0.5){ cat("Error: k1 not in (0, 0.5).\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } if (c0 < 0){ cat("Error: c0 < 0.\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } if (c1 < 0){ cat("Error: c1 < 0.\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } if (d0 < 0){ cat("Error: d0 < 0.\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } if (d1 < 0){ cat("Error: d1 < 0.\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } ## setup constraints on Lambda = (alpha, beta) holder <- build.factor.constraints(item.constraints, X, K, dimensions+1) Lambda.eq.constraints <- holder[[1]] Lambda.ineq.constraints <- holder[[2]] X.names <- holder[[3]] ## setup constraints on theta holder <- build.factor.constraints(ability.constraints, t(X), N, dimensions) theta.eq.constraints <- holder[[1]] theta.ineq.constraints <- holder[[2]] ## setup prior on Lambda holder <- form.factload.norm.prior(b0, B0, K, dimensions+1, X.names) Lambda.prior.mean <- holder[[1]] Lambda.prior.prec <- holder[[2]] # seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## Starting values for delta0 and delta1 if (is.na(delta0.start)){ delta0.start <- 0.5 * k0; } if (is.na(delta1.start)){ delta1.start <- 0.5 * k1; } if (delta0.start < 0 | delta0.start > k0){ cat("Error: delta0 not in (0, k0).\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } if (delta1.start < 0 | delta1.start > k1){ cat("Error: delta1 not in (0, k1).\n") stop("Please check data and try MCMCirtKdRob() again.\n", call.=FALSE) } ## Starting values for Lambda Lambda <- matrix(0, K, dimensions+1) if (is.na(alphabeta.start)){# sets Lambda to equality constraints & 0s for (i in 1:K){ for (j in 1:(dimensions+1)){ if (Lambda.eq.constraints[i,j]==-999){ if(Lambda.ineq.constraints[i,j]==0){ if (j==1){ probit.out <- glm(as.factor(X[X[,i]!=-999,i])~1, family=binomial(link=logit)) probit.beta <- coef(probit.out) Lambda[i,j] <- -1 * probit.beta[1] } } if(Lambda.ineq.constraints[i,j]>0){ Lambda[i,j] <- 1.0 } if(Lambda.ineq.constraints[i,j]<0){ Lambda[i,j] <- -1.0 } } else Lambda[i,j] <- Lambda.eq.constraints[i,j] } } } else if (is.matrix(alphabeta.start)){ if (nrow(alphabeta.start)==K && ncol(alphabeta.start)==(dimensions+1)) Lambda <- alphabeta.start else { cat("Starting values not of correct size for model specification.\n") stop("Please respecify and call MCMCirtKdRob() again\n", call.=FALSE) } } else if (length(alphabeta.start)==1 && is.numeric(alphabeta.start)){ Lambda <- matrix(alphabeta.start, K, dimensions+1) for (i in 1:K){ for (j in 1:(dimensions+1)){ if (Lambda.eq.constraints[i,j] != -999) Lambda[i,j] <- Lambda.eq.constraints[i,j] } } } else { cat("Starting values for alpha & beta neither NA, matrix, nor scalar.\n") stop("Please respecify and call MCMCirtKdRob() again\n", call.=FALSE) } for (i in 1:K){ lam.sqdist <- sum(Lambda[i,]^2) while (lam.sqdist > 100){ Lambda[i,] <- Lambda[i,] * 0.95 lam.sqdist <- sum(Lambda[i,]^2) } } ## Starting values for theta if (is.na(theta.start)){ theta <- matrix(0, N, dimensions) } else if(is.null(dim(theta.start)) & is.numeric(theta.start)){ theta <- matrix(theta.start, N, dimensions) } else if(nrow(theta.start)==N & ncol(theta.start)==dimensions){ theta <- theta.start } else{ cat("Starting values for theta neither NA, matrix, nor scalar.\n") stop("Please respecify and call MCMCirtKdRob() again\n", call.=FALSE) } for (i in 1:N){ for (j in 1:dimensions){ if (theta.eq.constraints[i,j]==-999){ if(theta.ineq.constraints[i,j]>0){ theta[i,j] <- 0.5 } if(theta.ineq.constraints[i,j]<0){ theta[i,j] <- -0.5 } } else theta[i,j] <- theta.eq.constraints[i,j] } } ## define holder for posterior sample if (store.ability == FALSE && store.item == FALSE){ cat("You need to store either the ability or item parameters.\n") stop("Please respecify and call MCMCirtKdRob() again\n", call.=FALSE) } else if (store.ability == TRUE && store.item == FALSE){ sample <- matrix(data=0, mcmc/thin, (dimensions+1)*N+2) } else if(store.ability == FALSE && store.item == TRUE) { sample <- matrix(data=0, mcmc/thin, K*(dimensions+1)+2) } else { # store.ability==TRUE && store.item==TRUE sample <- matrix(data=0, mcmc/thin, K*(dimensions+1)+(dimensions+1)*N+2) } ## Call the C++ code to do the real work posterior <- .C("irtKdRobpost", samdata = as.double(sample), samrow = as.integer(nrow(sample)), samcol = as.integer(ncol(sample)), X = as.integer(X), Xrow = as.integer(nrow(X)), Xcol = as.integer(ncol(X)), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), verbose = as.integer(verbose), method.step = as.integer(method.step), theta.w = as.double(theta.w), theta.mp = as.integer(theta.mp), ab.w = as.double(alphabeta.w), ab.mp = as.integer(alphabeta.mp), delta0.w = as.double(delta0.w), delta0.mp = as.integer(delta0.mp), delta1.w = as.double(delta1.w), delta1.mp = as.integer(delta1.mp), delta0 = as.double(delta0.start), delta1 = as.double(delta1.start), Lambda = as.double(Lambda), Lambdarow = as.integer(nrow(Lambda)), Lambdacol = as.integer(ncol(Lambda)), theta = as.double(theta), thetarow = as.integer(nrow(theta)), thetacol = as.integer(ncol(theta)), Lameq = as.double(Lambda.eq.constraints), Lameqrow = as.integer(nrow(Lambda.eq.constraints)), Lameqcol = as.integer(ncol(Lambda.ineq.constraints)), Lamineq = as.double(Lambda.ineq.constraints), Lamineqrow = as.integer(nrow(Lambda.ineq.constraints)), Lamineqcol = as.integer(ncol(Lambda.ineq.constraints)), theteq = as.double(theta.eq.constraints), theteqrow = as.integer(nrow(theta.eq.constraints)), theteqcol = as.integer(ncol(theta.ineq.constraints)), thetineq = as.double(theta.ineq.constraints), thetineqrow = as.integer(nrow(theta.ineq.constraints)), thetineqcol = as.integer(ncol(theta.ineq.constraints)), Lampmean = as.double(Lambda.prior.mean), Lampmeanrow = as.integer(nrow(Lambda.prior.mean)), Lampmeancol = as.integer(ncol(Lambda.prior.prec)), Lampprec = as.double(Lambda.prior.prec), Lampprecrow = as.integer(nrow(Lambda.prior.prec)), Lamppreccol = as.integer(ncol(Lambda.prior.prec)), k0 = as.double(k0), k1 = as.double(k1), c0 = as.double(c0), c1 = as.double(c1), d0 = as.double(d0), d1 = as.double(d1), storeitem = as.integer(store.item), storesability = as.integer(store.ability), PACKAGE="MCMCpack" ) ## put together matrix and build MCMC object to return sample <- matrix(posterior$samdata, posterior$samrow, posterior$samcol, byrow=FALSE) output <- mcmc(data=sample,start=1, end=mcmc, thin=thin) par.names <- NULL if (store.item==TRUE){ alpha.hold <- paste("alpha", X.names, sep=".") beta.hold <- paste("beta", X.names, sep = ".") beta.hold <- rep(beta.hold, dimensions, each=dimensions) beta.hold <- paste(beta.hold, 1:dimensions, sep=".") Lambda.names <- t(cbind(matrix(alpha.hold, K, 1), matrix(beta.hold,K,dimensions,byrow=TRUE))) dim(Lambda.names) <- NULL par.names <- c(par.names, Lambda.names) } if (store.ability==TRUE){ phi.names <- paste(paste("theta", rep(xobs, each=(dimensions+1)), sep="."), rep(0:dimensions,(dimensions+1)), sep=".") par.names <- c(par.names, phi.names) } par.names <- c("delta0", "delta1", par.names) varnames(output) <- par.names ## get rid of columns for constrained parameters output.df <- as.data.frame(as.matrix(output)) output.var <- diag(var(output.df)) output.df <- output.df[,output.var != 0] output <- mcmc(as.matrix(output.df), start=1, end=mcmc, thin=thin) ## add constraint info so this isn't lost attr(output, "constraints") <- item.constraints attr(output, "n.items") <- K attr(output, "n.dimensions") <- dimensions attr(output,"title") <- "MCMCpack Robust K-Dimensional Item Response Theory Model Posterior Sample" return(output) } MCMCpack/R/MCMCirtKdHet.R0000644000176000001440000002147212133644103014365 0ustar ripleyusersMCMCirtKdHet <- function(datamatrix, dimensions, item.constraints=list(), burnin = 1000, mcmc = 1000, thin=1, verbose = 0, seed = NA, alphabeta.start = NA, b0 = 0, B0=0.04, c0=0, d0=0, store.item = FALSE, store.ability=TRUE,store.sigma=TRUE, drop.constant.items=TRUE) { echo.name <- "MCMCirtKdHet" # translate variable names to MCMCordfactanal naming convention x <- as.matrix(datamatrix) factors <- dimensions lambda.constraints <- item.constraints lambda.start <- alphabeta.start l0 <- b0 L0 <- B0 sigma.c0 <- c0 sigma.d0 <- d0 store.lambda <- store.item store.scores <- store.ability drop.constantvars <- drop.constant.items # extract X and variable names from the model formula and frame if (is.matrix(x)){ if (drop.constantvars==TRUE){ x.col.var <- apply(x, 2, var, na.rm=TRUE) keep.inds <- x.col.var>0 keep.inds[is.na(keep.inds)] <- FALSE x <- x[,keep.inds] } X <- as.data.frame(x) xvars <- dimnames(X)[[2]] xobs <- dimnames(X)[[1]] N <- nrow(X) # number of observations K <- ncol(X) # number of manifest variables for (i in 1:K){ X[,i] <- as.integer(X[,i]) if (sum(X[,i] != 0 & X[,i] != 1,na.rm=TRUE) != 0){ stop("Data must be 0, 1, and NA only.\n") } X[is.na(X[,i]), i] <- -999 } X <- as.matrix(X) } else { stop("Please provide data as a matrix.\n") } ## take care of the case where X has no row names if (is.null(xobs)){ xobs <- 1:N } #check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) ## setup constraints on Lambda holder <- build.factor.constraints(lambda.constraints, X, K, factors+1) Lambda.eq.constraints <- holder[[1]] Lambda.ineq.constraints <- holder[[2]] X.names <- holder[[3]] ## setup prior on Lambda holder <- form.factload.norm.prior(l0, L0, K, factors+1, X.names) Lambda.prior.mean <- holder[[1]] Lambda.prior.prec <- holder[[2]] Lambda.prior.mean[,1] <- Lambda.prior.mean[,1] * -1 # seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## Starting values for Lambda Lambda <- matrix(0, K, factors+1) if (is.na(lambda.start)){# sets Lambda to equality constraints & 0s for (i in 1:K){ for (j in 1:(factors+1)){ if (Lambda.eq.constraints[i,j]==-999){ if(Lambda.ineq.constraints[i,j]==0){ if (j==1){ probit.out <- glm(as.factor(X[X[,i]!=-999,i])~1, family=binomial(link=probit)) probit.beta <- coef(probit.out) Lambda[i,j] <- probit.beta[1] } } if(Lambda.ineq.constraints[i,j]>0){ Lambda[i,j] <- 1.0 } if(Lambda.ineq.constraints[i,j]<0){ Lambda[i,j] <- -1.0 } } else Lambda[i,j] <- Lambda.eq.constraints[i,j] } } } else if (is.matrix(lambda.start)){ if (nrow(lambda.start)==K && ncol(lambda.start)==(factors+1)) Lambda <- lambda.start else { cat("Starting values not of correct size for model specification.\n") stop("Please respecify and call ", echo.name, "() again\n") } } else if (length(lambda.start)==1 && is.numeric(lambda.start)){ Lambda <- matrix(lambda.start, K, factors+1) for (i in 1:K){ for (j in 1:(factors+1)){ if (Lambda.eq.constraints[i,j] != -999) Lambda[i,j] <- Lambda.eq.constraints[i,j] } } } else { cat("Starting values neither NA, matrix, nor scalar.\n") stop("Please respecify and call ", echo.name, "() again\n") } ## define holder for posterior sample if (store.scores == FALSE && store.lambda == FALSE && store.sigma == FALSE){ stop("Please specify parameters to be stored.\n") } else if (store.scores == TRUE && store.lambda == FALSE && store.sigma == FALSE){ sample <- matrix(data=0, mcmc/thin, (factors+1)*N) } else if(store.scores == FALSE && store.lambda == TRUE && store.sigma == FALSE) { sample <- matrix(data=0, mcmc/thin, K*(factors+1)) } else if(store.scores==TRUE && store.lambda==TRUE && store.sigma == FALSE) { sample <- matrix(data=0, mcmc/thin, K*(factors+1)+(factors+1)*N) } else if (store.scores == FALSE && store.lambda == FALSE && store.sigma == TRUE){ sample <- matrix(data=0, mcmc/thin, N) } else if (store.scores == TRUE && store.lambda == FALSE && store.sigma == TRUE){ sample <- matrix(data=0, mcmc/thin, (factors+1)*N + N) } else if(store.scores == FALSE && store.lambda == TRUE && store.sigma == TRUE) { sample <- matrix(data=0, mcmc/thin, K*(factors+1)+N) } else if(store.scores==TRUE && store.lambda==TRUE && store.sigma == TRUE) { sample <- matrix(data=0, mcmc/thin, K*(factors+1)+(factors+1)*N + N) } ## create templates posterior <- .C("irtKdHetpost", samdata = as.double(sample), samrow = as.integer(nrow(sample)), samcol = as.integer(ncol(sample)), Xdata = as.integer(X), Xrow = as.integer(nrow(X)), Xcol = as.integer(ncol(X)), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), verbose = as.integer(verbose), Lamstartdata = as.double(Lambda), Lamstartrow = as.integer(nrow(Lambda)), Lamstartcol = as.integer(ncol(Lambda)), Lameqdata = as.double(Lambda.eq.constraints), Lameqrow = as.integer(nrow(Lambda.eq.constraints)), Lameqcol = as.integer(ncol(Lambda.ineq.constraints)), Lamineqdata = as.double(Lambda.ineq.constraints), Lamineqrow = as.integer(nrow(Lambda.ineq.constraints)), Lamineqcol = as.integer(ncol(Lambda.ineq.constraints)), Lampmeandata = as.double(Lambda.prior.mean), Lampmeanrow = as.integer(nrow(Lambda.prior.mean)), Lampmeancol = as.integer(ncol(Lambda.prior.prec)), Lampprecdata = as.double(Lambda.prior.prec), Lampprecrow = as.integer(nrow(Lambda.prior.prec)), Lamppreccol = as.integer(ncol(Lambda.prior.prec)), storelambda = as.integer(store.lambda), storescores = as.integer(store.scores), storesigma = as.integer(store.sigma), sigmapriorc = as.double(sigma.c0), sigmapriord = as.double(sigma.d0), package="MCMCpack" ) output <- mcmc(data=matrix(posterior$samdata, posterior$samrow, posterior$samcol,byrow=FALSE),start=1, end=mcmc, thin=thin) par.names <- NULL if (store.lambda==TRUE){ alpha.hold <- paste("alpha", X.names, sep=".") beta.hold <- paste("beta", X.names, sep = ".") beta.hold <- rep(beta.hold, factors, each=factors) beta.hold <- paste(beta.hold, 1:factors, sep=".") Lambda.names <- t(cbind(matrix(alpha.hold, K, 1), matrix(beta.hold,K,factors,byrow=TRUE))) dim(Lambda.names) <- NULL par.names <- c(par.names, Lambda.names) } if (store.scores==TRUE){ phi.names <- paste(paste("theta", rep(xobs, each=(factors+1)), sep="."), rep(0:factors,(factors+1)), sep=".") par.names <- c(par.names, phi.names) } if (store.sigma==TRUE){ sigma.names <- paste("sigma", rep(xobs), sep=".") par.names <- c(par.names, sigma.names) } varnames(output) <- par.names # get rid of columns for constrained parameters output.df <- as.data.frame(as.matrix(output)) output.var <- sd(output.df) output.df <- output.df[,output.var != 0] output <- mcmc(as.matrix(output.df), start=burnin+1, end=burnin+mcmc, thin=thin) # add constraint info so this isn't lost attr(output, "constraints") <- lambda.constraints attr(output, "n.manifest") <- K attr(output, "n.factors") <- factors attr(output, "title") <- "MCMCpack Heteroskedastic K-Dimensional Item Response Theory Model Posterior Sample" return(output) } MCMCpack/R/MCMCirtKd.R0000644000176000001440000000316112133644103013717 0ustar ripleyusers########################################################################## ## sample from a K-dimensional two-parameter item response model with ## probit link. This is just a wrapper function that calls ## MCMCordfactanal. ## ## Andrew D. Martin ## Washington University ## ## Kevin M. Quinn ## Harvard University ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## June 8, 2003 ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMCirtKd" <- function(datamatrix, dimensions, item.constraints=list(), burnin = 1000, mcmc = 10000, thin=1, verbose = 0, seed = NA, alphabeta.start = NA, b0=0, B0=0, store.item=FALSE, store.ability=TRUE, drop.constant.items=TRUE, ... ) { datamatrix <- as.matrix(datamatrix) post <- MCMCordfactanal(x=datamatrix, factors=dimensions, lambda.constraints=item.constraints, burnin=burnin, mcmc=mcmc, thin=thin, tune=NA, verbose=verbose, seed=seed, lambda.start=alphabeta.start, l0=b0, L0=B0, store.lambda=store.item, store.scores=store.ability, drop.constantvars=drop.constant.items, model="MCMCirtKd") return(post) } MCMCpack/R/MCMCirtHier1d.R0000644000176000001440000002424312133644103014501 0ustar ripleyusers## sample from the posterior distribution of a one-dimensional item ## response theory model in R using linked C++ code in Scythe. ## ## ADM and KQ 1/23/2003 ## updated extensively ADM & KQ 7/28/2004 ## store.ability arg added KQ 1/27/2006 ## Hierarchical Subject Parameters MJM 2007-11-07 ## Parameter Expansion 2008-11-18 ## Grouped Subject Parameters (Wdata) started MJM,YS 2008-11 "MCMCirtHier1d" <- function(datamatrix, Xjdata, burnin = 1000, mcmc = 20000, thin=1, verbose = 0, seed = NA, theta.start = NA, a.start = NA, b.start = NA, beta.start=NA, b0=0, B0=.01, c0=.001, d0=.001, ab0=0, AB0=.25, store.item = FALSE, store.ability=TRUE, drop.constant.items=TRUE, marginal.likelihood=c("none","Chib95"), px=TRUE, px_a0 = 10, px_b0=10, ... ) { ## checks check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) ## check vote matrix and convert to work with C++ code if (drop.constant.items==TRUE){ x.col.var <- apply(datamatrix, 2, var, na.rm=TRUE) keep.inds <- x.col.var>0 keep.inds[is.na(keep.inds)] <- FALSE datamatrix <- datamatrix[,keep.inds] } datamatrix <- as.matrix(datamatrix) K <- ncol(datamatrix) # cases, bills, items, etc J <- nrow(datamatrix) # justices, legislators, subjects, etc L <- ncol(Xjdata) # predictors on theta Xj if(sum(datamatrix==1 | datamatrix==0 | is.na(datamatrix)) != (J*K)) { cat("Error: Data matrix contains elements other than 0, 1 or NA.\n") stop("Please check data and call ", calling.function(), " again.\n", call.=FALSE) } datamatrix[is.na(datamatrix)] <- 9 item.names <- colnames(as.data.frame(datamatrix)) subject.names <- rownames(as.data.frame(datamatrix)) beta.names <- c(names(as.data.frame(Xjdata)),"sigmasq") ## names item.names <- colnames(datamatrix) if (is.null(item.names)){ item.names <- paste("item", 1:K, sep="") } ## check Xj matrix and set up betastart Xjdata <- as.matrix(Xjdata) if(nrow(Xjdata) != nrow(datamatrix)) { cat("Error: subject covariates not of same length as datamatrix\n") stop("Please check data and try ",calling.function()," again.\n",call.=FALSE) } ## prior for (alpha, beta) holder <- form.mvn.prior(ab0, AB0, 2) ab0 <- holder[[1]] AB0 <- holder[[2]] ## starting values for theta error checking ## could use factor.score.start.check EXCEPT ## We have done away with eq and ineq constraints. if (max(is.na(theta.start))==1) { theta.start <- factor.score.eigen.start(agree.mat(datamatrix), 1) } else if(is.numeric(theta.start) & length(theta.start) == J ) { theta.start <- theta.start * matrix(1, J, 1) } else { cat("Inappropriate value of theta.start passed.\n") stop("Please respecify and call", calling.function(), " again.\n", call.=FALSE) } ## starting values for (a, b) ab.starts <- matrix(NA, K, 2) for (i in 1:K){ local.y <- datamatrix[,i] local.y[local.y==9] <- NA if (var(na.omit(local.y))==0){ ab.starts[i,] <- c(0,10) } else { ab.starts[i,] <- coef(suppressWarnings(glm(local.y~theta.start, family=binomial(probit), control=glm.control( maxit=8, epsilon=1e-3) ))) } } ab.starts[,1] <- -1 * ab.starts[,1] # make this into a difficulty param ## starting values for a and b error checking if (is.na(a.start)) { a.start <- ab.starts[,1] } else if(is.null(dim(a.start))) { a.start <- a.start * matrix(1,K,1) } else if((dim(a.start)[1] != K) || (dim(a.start)[2] != 1)) { cat("Error: Starting value for a not conformable.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } if (is.na(b.start)) { b.start <- ab.starts[,2] } else if(is.null(dim(b.start))) { b.start <- b.start * matrix(1,K,1) } else if((dim(b.start)[1] != K) || (dim(b.start)[2] != 1)) { cat("Error: Starting value for b not conformable.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } cat("Generating starting values (glm) for hierarchical parameters:\n") ## starting values are regression of theta.start on Xj ## or passed vector, or same value all beta if (max(is.na(beta.start))==1) { # beta.start NA beta.start <- coef(suppressWarnings(glm.fit(Xjdata,theta.start))) } else if ( length(beta.start) == L ) { # beta.start vector beta.start <- matrix(beta.start,L,1) } else if ( length(beta.start) == 1 ) { # beta.start scalar beta.start <- beta.start * matrix(1,L,1) } else { cat("Error: Starting value for beta not conformable.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } print(beta.start) ## prior for beta holder <- form.mvn.prior(b0, B0, L) b0 <- holder[[1]] B0 <- holder[[2]] check.ig.prior(c0, d0) ## get marginal likelihood argument marginal.likelihood <- match.arg(marginal.likelihood) B0.eigenvalues <- eigen(B0)$values if (min(B0.eigenvalues) < 0){ stop("B0 is not positive semi-definite.\nPlease respecify and call again.\n") } if (isTRUE(all.equal(min(B0.eigenvalues), 0))){ if (marginal.likelihood != "none"){ warning("Cannot calculate marginal likelihood with improper prior\n") marginal.likelihood <- "none" } } logmarglike <- NULL chib <- 0 if (marginal.likelihood == "Chib95"){ chib <- 1 } cat("setting up posterior holder\n" ) ## define holder for posterior sample if(store.item == FALSE & store.ability == TRUE) { sample <- matrix(data=0, mcmc/thin, J+L+1) } else if (store.item == TRUE & store.ability == FALSE){ sample <- matrix(data=0, mcmc/thin, L+1 + 2*K) } else if (store.item == TRUE & store.ability == TRUE){ sample <- matrix(data=0, mcmc/thin, L+1 + J + 2 * K) } else{ stop("Either store.item or store.ability should be true.\n") } ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] # call C++ code to draw sample posterior <- .C("MCMCirtHier1d", sampledata = as.double(sample), samplerow = as.integer(nrow(sample)), samplecol = as.integer(ncol(sample)), Xdata = as.integer(datamatrix), Xrow = as.integer(nrow(datamatrix)), Xcol = as.integer(ncol(datamatrix)), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), verbose = as.integer(verbose), thetastartdata = as.double(theta.start), thetastartrow = as.integer(nrow(theta.start)), thetastartcol = as.integer(ncol(theta.start)), astartdata = as.double(a.start), astartrow = as.integer(length(a.start)), astartcol = as.integer(1), bstartdata = as.double(b.start), bstartrow = as.integer(length(b.start)), bstartcol = as.integer(1), ab0data = as.double(ab0), ab0row = as.integer(nrow(ab0)), ab0col = as.integer(ncol(ab0)), AB0data = as.double(AB0), AB0row = as.integer(nrow(AB0)), AB0col = as.integer(ncol(AB0)), Xjdata = as.double(Xjdata), Xjrow = as.integer(nrow(Xjdata)), Xjcol = as.integer(ncol(Xjdata)), betastartdata = as.double(beta.start), betastartrow = as.integer(length(beta.start)), betastartcol = as.integer(1), b0data = as.double(b0), b0row = as.integer(length(b0)), b0col = as.integer(1), B0data = as.double(B0), B0row = as.integer(nrow(B0)), B0col = as.integer(ncol(B0)), c0 = as.double(c0), d0 = as.double(d0), storei = as.integer(store.item), storea = as.integer(store.ability), logmarglikeholder = as.double(0.0), chib = as.integer(chib), px= as.integer(px), px_a0 = as.double(px_a0), px_b0 = as.double(px_b0), PACKAGE="MCMCpack" ) beta.names <- paste("beta.",beta.names,sep="") theta.names <- paste("theta.", subject.names, sep = "") alpha.beta.names <- paste(rep(c("a.","b."), K), rep(item.names, each = 2), sep = "") # put together matrix and build MCMC object to return sample <- matrix(posterior$sampledata, posterior$samplerow, posterior$samplecol, byrow=FALSE) output <- mcmc(data=sample, start=burnin+1, end=burnin+mcmc, thin=thin) if (marginal.likelihood == "Chib95"){ logmarglike <- posterior$logmarglikeholder } names <- NULL if(store.ability == TRUE) { names <- c(names, theta.names) } if (store.item == TRUE){ names <- c(names, alpha.beta.names) } names <- c(names,beta.names) try( varnames(output) <- names) attr(output,"title") <- "MCMCirtHier1d Posterior Sample" attr(output,"logmarglike") <- posterior$logmarglikeholder return(output) } MCMCpack/R/MCMCirt1d.R0000644000176000001440000002026412133644103013670 0ustar ripleyusers########################################################################## ## sample from the posterior distribution of a one-dimensional item ## response theory model in R using linked C++ code in Scythe. ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## ADM and KQ 1/23/2003 ## updated extensively ADM & KQ 7/28/2004 ## store.ability arg added KQ 1/27/2006 ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMCirt1d" <- function(datamatrix, theta.constraints=list(), burnin = 1000, mcmc = 20000, thin=1, verbose = 0, seed = NA, theta.start = NA, alpha.start = NA, beta.start = NA, t0 = 0, T0 = 1, ab0=0, AB0=.25, store.item = FALSE, store.ability=TRUE, drop.constant.items=TRUE, ... ) { ## checks check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) ## check vote matrix and convert to work with C++ code if (drop.constant.items==TRUE){ x.col.var <- apply(datamatrix, 2, var, na.rm=TRUE) keep.inds <- x.col.var>0 keep.inds[is.na(keep.inds)] <- FALSE datamatrix <- datamatrix[,keep.inds] } datamatrix <- as.matrix(datamatrix) K <- ncol(datamatrix) # cases, bills, items, etc J <- nrow(datamatrix) # justices, legislators, subjects, etc if(sum(datamatrix==1 | datamatrix==0 | is.na(datamatrix)) != (J*K)) { cat("Error: Data matrix contains elements other than 0, 1 or NA.\n") stop("Please check data and try MCMCirt1d() again.\n", call.=FALSE) } datamatrix[is.na(datamatrix)] <- 9 item.names <- colnames(datamatrix) subject.names <- rownames(datamatrix) ## setup constraints on theta if(length(theta.constraints) != 0) { for (i in 1:length(theta.constraints)){ theta.constraints[[i]] <- list(as.integer(1), theta.constraints[[i]][1]) } } holder <- build.factor.constraints(theta.constraints, t(datamatrix), J, 1) theta.eq.constraints <- holder[[1]] theta.ineq.constraints <- holder[[2]] subject.names <- holder[[3]] ## names item.names <- colnames(datamatrix) if (is.null(item.names)){ item.names <- paste("item", 1:K, sep="") } ## prior for theta holder <- form.mvn.prior(t0, T0, 1) t0 <- holder[[1]] T0 <- holder[[2]] ## prior for (alpha, beta) holder <- form.mvn.prior(ab0, AB0, 2) ab0 <- holder[[1]] AB0 <- holder[[2]] ## starting values for theta error checking theta.start <- factor.score.start.check(theta.start, datamatrix, t0, T0, theta.eq.constraints, theta.ineq.constraints, 1) ## starting values for (alpha, beta) ab.starts <- matrix(NA, K, 2) for (i in 1:K){ local.y <- datamatrix[,i] local.y[local.y==9] <- NA if (var(na.omit(local.y))==0){ ab.starts[i,] <- c(0,10) } else { ab.starts[i,] <- coef(suppressWarnings(glm(local.y~theta.start, family=binomial(probit), control=glm.control( maxit=8, epsilon=1e-3) ))) } } ab.starts[,1] <- -1 * ab.starts[,1] # make this into a difficulty param ## starting values for alpha and beta error checking if (is.na(alpha.start)) { alpha.start <- ab.starts[,1] } else if(is.null(dim(alpha.start))) { alpha.start <- alpha.start * matrix(1,K,1) } else if((dim(alpha.start)[1] != K) || (dim(alpha.start)[2] != 1)) { cat("Error: Starting value for alpha not conformable.\n") stop("Please respecify and call MCMCirt1d() again.\n", call.=FALSE) } if (is.na(beta.start)) { beta.start <- ab.starts[,2] } else if(is.null(dim(beta.start))) { beta.start <- beta.start * matrix(1,K,1) } else if((dim(beta.start)[1] != K) || (dim(beta.start)[2] != 1)) { cat("Error: Starting value for beta not conformable.\n") stop("Please respecify and call MCMCirt1d() again.\n", call.=FALSE) } ## define holder for posterior sample if(store.item == FALSE & store.ability == TRUE) { sample <- matrix(data=0, mcmc/thin, J) } else if (store.item == TRUE & store.ability == FALSE){ sample <- matrix(data=0, mcmc/thin, 2*K) } else if (store.item == TRUE & store.ability == TRUE){ sample <- matrix(data=0, mcmc/thin, J + 2 * K) } else{ cat("Error: store.item == FALSE & store.ability == FALSE.\n") stop("Please respecify and call MCMCirt1d() again.\n", call.=FALSE) } ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] # call C++ code to draw sample posterior <- .C("MCMCirt1d", sampledata = as.double(sample), samplerow = as.integer(nrow(sample)), samplecol = as.integer(ncol(sample)), Xdata = as.integer(datamatrix), Xrow = as.integer(nrow(datamatrix)), Xcol = as.integer(ncol(datamatrix)), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), verbose = as.integer(verbose), thetastartdata = as.double(theta.start), thetastartrow = as.integer(nrow(theta.start)), thetastartcol = as.integer(ncol(theta.start)), astartdata = as.double(alpha.start), astartrow = as.integer(length(alpha.start)), astartcol = as.integer(1), bstartdata = as.double(beta.start), bstartrow = as.integer(length(beta.start)), bstartcol = as.integer(1), t0 = as.double(t0), T0 = as.double(T0), ab0data = as.double(ab0), ab0row = as.integer(nrow(ab0)), ab0col = as.integer(ncol(ab0)), AB0data = as.double(AB0), AB0row = as.integer(nrow(AB0)), AB0col = as.integer(ncol(AB0)), thetaeqdata = as.double(theta.eq.constraints), thetaeqrow = as.integer(nrow(theta.eq.constraints)), thetaeqcol = as.integer(ncol(theta.eq.constraints)), thetaineqdata = as.double(theta.ineq.constraints), thetaineqrow = as.integer(nrow(theta.ineq.constraints)), thetaineqcol = as.integer(ncol(theta.ineq.constraints)), storei = as.integer(store.item), storea = as.integer(store.ability), PACKAGE="MCMCpack" ) theta.names <- paste("theta.", subject.names, sep = "") alpha.beta.names <- paste(rep(c("alpha.","beta."), K), rep(item.names, each = 2), sep = "") # put together matrix and build MCMC object to return sample <- matrix(posterior$sampledata, posterior$samplerow, posterior$samplecol, byrow=FALSE) output <- mcmc(data=sample, start=burnin+1, end=burnin+mcmc, thin=thin) names <- NULL if(store.ability == TRUE) { names <- c(names, theta.names) } if (store.item == TRUE){ names <- c(names, alpha.beta.names) } varnames(output) <- names attr(output,"title") <- "MCMCirt1d Posterior Sample" return(output) } MCMCpack/R/MCMCintervention.R0000644000176000001440000002175612133644103015400 0ustar ripleyusers######################################################### ## Internvetion Analysis using Changepoint Model ######################################################### "MCMCintervention"<- function(y, data=parent.frame(), m = 1, intervention = 1, prediction.type=c("trend", "ar"), change.type = c("fixed", "random", "all"), b0 = 0, B0 = 0, c0 = 0.001, d0 = 0.001, sigma.mu = NA, sigma.var = NA, a = NULL, b = NULL, mcmc = 1000, burnin = 1000, thin = 1, verbose = 0, seed = NA, beta.start = NA, P.start = NA, marginal.likelihood = c("none", "Chib95"), ...){ ## form response and model matrices y <- as.vector(y) n <- length(y) ns <- m + 1 # number of states if (prediction.type == "trend"){ X <- matrix(cbind(1, c(1:n)), n, 2) xnames <- c("constant", "trend") } else if (prediction.type == "ar"){ y1 <- y[1:(n/2)] ar1 <- arima(y1, c(1,0,0)) muy0 <- as.numeric(ar1$coef[2]) sigmay0 <- sqrt(as.numeric(as.numeric(ar1[2])/(1 - ar1$coef[1]^2))) y0 <- rnorm(1, muy0, sigmay0) X <- matrix(cbind(1, c(y0, y[-n])), n, 2) xnames <- c("constant", "lag.y") } else { X <- matrix(1, n, 1) xnames <- c("constant") } k <- ncol(X) # number of covariates ## check iteration parameters check.mcmc.parameters(burnin, mcmc, thin) totiter <- mcmc + burnin nstore <- mcmc/thin cl <- match.call() ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] if(!is.na(seed)) set.seed(seed) ## prior mvn.prior <- form.mvn.prior(b0, B0, k) b0 <- mvn.prior[[1]] B0 <- mvn.prior[[2]] if (prediction.type == "ar"){ if (b0[2] > 1|b0[2] < -1){ stop("The prior of AR coefficient ",b0[2], " is outside the stationary region! \n") } } if (is.na(sigma.mu)|is.na(sigma.var)) { check.ig.prior(c0, d0) } else { d0 <- 2*(sigma.mu + sigma.mu^3/sigma.var) c0 <- 2*(1 + (d0/2)/sigma.mu) } ## get marginal likelihood argument marginal.likelihood <- match.arg(marginal.likelihood) ## following MCMCregress, set chib as binary logmarglike <- loglik <- NULL chib <- 0 if (marginal.likelihood == "Chib95"){ chib <- 1 } ## initial values Y <- matrix(y, n, 1) if (m == 0){ output <- MCMCregress(formula = Y ~ X-1, mcmc=mcmc, burnin=burnin, verbose=verbose, thin=thin, b0 = b0, B0 = solve(B0), c0 = c0, d0 = d0, marginal.likelihood = marginal.likelihood) attr(output, "y") <- y attr(output, "intervention") <- intervention yhatout <- output[, 1:2]%*%t(X) attr(output, "yhat") <- matrix(yhatout, nstore, n) ## X_j*beta_j attr(output, "yforepred") <- matrix(yhatout, nstore, n) ## yt|Y_t-1, theta attr(output, "ybackpred") <- matrix(yhatout, nstore, n) ## yt|Y_t+1, theta } else{ ## if m > 0 A0 <- trans.mat.prior(m=m, n=n, a=a, b=b) Pstart <- check.P(P.start, m, a=a, b=b) ols <- lm(y~X-1) bols <- coef(ols) betastart <- matrix(rep(bols, ns), ns, k, byrow = TRUE) Sigmastart <- rep(summary(ols)$sigma^2, ns) statestart <- sort(sample(1:ns, n, replace=T)) AR <- 0 if (prediction.type == "ar"){ AR <- 1 } change <- 0 betaout.N <- nstore*ns*k Sigmaout.N <- nstore*ns if (change.type == "fixed"){ change <- 1 betaout.N <- nstore*ns*k Sigmaout.N <- nstore } if (change.type == "random"){ change <- 2 betaout.N <- nstore*k Sigmaout.N <- nstore*ns } ## call C++ code to draw sample posterior <- .C("MCMCintervention", accept = as.double(0.0), betaout = as.double(rep(0.0, betaout.N)), Sigmaout = as.double(rep(0.0, Sigmaout.N)), Pout = as.double(rep(0.0, nstore*ns*ns)), psout = as.double(rep(0.0, n*ns)), sout = as.double(rep(0.0, nstore*n)), yhatout = as.double(rep(0.0, nstore*n)), yerrorout = as.double(rep(0.0, nstore*n)), yforepredout = as.double(rep(0.0, nstore*n)), ybackpredout = as.double(rep(0.0, nstore*n)), Ydata = as.double(Y), Yrow = as.integer(nrow(Y)), Ycol = as.integer(ncol(Y)), Xdata = as.double(X), Xrow = as.integer(nrow(X)), Xcol = as.integer(ncol(X)), m = as.integer(m), intervention = as.integer(intervention), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), verbose = as.integer(verbose), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), betastart = as.double(betastart), Sigmastart = as.double(Sigmastart), Pstart = as.double(Pstart), statestart = as.integer(statestart), a = as.double(a), b = as.double(b), b0data = as.double(b0), B0data = as.double(B0), c0 = as.double(c0), d0 = as.double(d0), A0data = as.double(A0), logmarglikeholder = as.double(0.0), loglikeholder = as.double(0.0), ar = as.integer(AR), change = as.integer(change), chib = as.integer(chib)) ## get marginal likelihood if Chib95 if (marginal.likelihood == "Chib95"){ logmarglike <- posterior$logmarglikeholder loglike <- posterior$loglikeholder } ## pull together matrix and build MCMC object to return beta.holder <- matrix(posterior$betaout, nstore, ) Sigma.holder <- matrix(posterior$Sigmaout, nstore, ) P.holder <- matrix(posterior$Pout, nstore, ) s.holder <- matrix(posterior$sout, nstore, ) ps.holder <- matrix(posterior$psout, n, ) output1 <- mcmc(data=beta.holder, start=burnin+1, end=burnin + mcmc, thin=thin) if (change == 0){ varnames(output1) <- sapply(c(1:ns), function(i){ paste(c(xnames), "_regime", i, sep = "") }) output2 <- mcmc(data=Sigma.holder, start=burnin+1, end=burnin + mcmc, thin=thin) varnames(output2) <- sapply(c(1:ns), function(i){ paste(c("sigma2"), "_regime", i, sep = "") }) } else if (change == 1){ varnames(output1) <- sapply(c(1:ns), function(i){ paste(c(xnames), "_regime", i, sep = "") }) output2 <- mcmc(data=Sigma.holder, start=burnin+1, end=burnin + mcmc, thin=thin) names(output2) <- c("sigma2") } else{ output2 <- mcmc(data=Sigma.holder, start=burnin+1, end=burnin + mcmc, thin=thin) varnames(output2) <- sapply(c(1:ns), function(i){ paste(c("sigma2"), "_regime", i, sep = "") }) } ## To check the acceptance rate (if AR == 1) accept <- posterior$accept output <- as.mcmc(cbind(output1, output2)) attr(output, "title") <- "MCMCintervention Posterior Sample" attr(output, "intervention") <- intervention attr(output, "accept") <- accept attr(output, "y") <- y attr(output, "X") <- X attr(output, "m") <- m attr(output, "call") <- cl attr(output, "logmarglike") <- logmarglike attr(output, "loglik") <- loglik attr(output, "prob.state") <- ps.holder/nstore attr(output, "s.store") <- s.holder attr(output, "yhat") <- matrix(posterior$yhatout, nstore, n)## X_j*beta_j attr(output, "yerror") <- matrix(posterior$yerrorout, nstore, n)## y_j - X_j*beta_j attr(output, "yforepred") <- matrix(posterior$yforepredout, nstore, n)## yt|Y_t-1, theta attr(output, "ybackpred") <- matrix(posterior$ybackpredout, nstore, n)## yt|Y_t+1, theta } return(output) }## end of MCMC function MCMCpack/R/MCMChregress.R0000644000176000001440000001430312133644103014464 0ustar ripleyusers########################################################################## ## MCMChregress.R ## ## MCMChregress() samples from the posterior distribution of a ## Gaussian hierarchical linear regression model in R using linked C++ ## code in Scythe ## ## The code uses Algorithm 2 of Chib & Carlin (1999) for efficient ## inference of (\beta | Y, sigma^2, Vb). ## ## Chib, S. & Carlin, B. P. (1999) On MCMC sampling in hierarchical ## longitudinal models, Statistics and Computing, 9, 17-26 ## #################################################################### ## ## Original code by Ghislain Vieilledent, may 2011 ## CIRAD UR B&SEF ## ghislain.vieilledent@cirad.fr / ghislainv@gmail.com ## #################################################################### ## ## The initial version of this file was generated by the ## auto.Scythe.call() function in the MCMCpack R package ## written by: ## ## Andrew D. Martin ## Dept. of Political Science ## Washington University in St. Louis ## admartin@wustl.edu ## ## Kevin M. Quinn ## Dept. of Government ## Harvard University ## kevin_quinn@harvard.edu ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Copyright (C) 2011 Andrew D. Martin and Kevin M. Quinn ## #################################################################### ## ## Revisions: ## - G. Vieilledent, May 4 2011 [initial file] ## #################################################################### MCMChregress <- function (fixed, random, group, data, burnin=1000, mcmc=10000, thin=10, verbose=1, seed=NA, beta.start=NA, sigma2.start=NA, Vb.start=NA, mubeta=0, Vbeta=1.0E6, r, R, nu=0.001, delta=0.001, ...) { #======== # Basic checks #======== check.group.hmodels(group, data) check.mcmc.parameters.hmodels(burnin, mcmc, thin) check.verbose.hmodels(verbose) check.offset(list(...)) #======== # Seed #======== seed <- form.seeds.hmodels(seed) #======== # Form response and model matrices #======== mf.fixed <- model.frame(formula=fixed,data=data) X <- model.matrix(attr(mf.fixed,"terms"),data=mf.fixed) Y <- model.response(mf.fixed) mf.random <- model.frame(formula=random,data=data) W <- model.matrix(attr(mf.random,"terms"),data=mf.random) #======== # Model parameters #======== nobs <- nrow(X) IdentGroup <- as.numeric(as.factor(as.character(data[,names(data)==as.character(group)])))-1 LevelsGroup <- sort(unique(IdentGroup+1)) LevelsGroup.Name <- sort(unique(as.character(data[,names(data)==as.character(group)]))) ngroup <- length(LevelsGroup) np <- ncol(X) nq <- ncol(W) ngibbs <- mcmc+burnin nthin <- thin nburn <- burnin nsamp <- mcmc/thin #======== # Form and check starting parameters #======== beta.start <- form.beta.start.hmodels(fixed,data,beta.start,np,family="gaussian",defaults=NA) sigma2.start <- form.sigma2.start.hmodels(fixed,data,sigma2.start,family="gaussian") Vb.start <- form.Vb.start.hmodels(Vb.start,nq) #======== # Form priors #======== mvn.prior <- form.mvn.prior.hmodels(mubeta,Vbeta,np) mubeta <- mvn.prior[[1]] Vbeta <- mvn.prior[[2]] wishart.prior <- form.wishart.prior.hmodels(r,R,nq) r <- wishart.prior[[1]] R <- wishart.prior[[2]] check.ig.prior.hmodels(nu,delta) s1 <- nu s2 <- delta #======== # Parameters to save #======== beta_vect <- rep(c(beta.start),each=nsamp) Vb_vect <- rep(c(Vb.start),each=nsamp) b_vect <- rep(0,nq*ngroup*nsamp) V <- rep(sigma2.start,nsamp) Y_pred <- rep(0,nobs) Deviance <- rep(0,nsamp) #======== # call C++ code to draw sample #======== Sample <- .C("MCMChregress", #= Constants and data ngibbs=as.integer(ngibbs), nthin=as.integer(nthin), nburn=as.integer(nburn),## Number of iterations, burning and samples nobs=as.integer(nobs), ngroup=as.integer(ngroup), ## Constants np=as.integer(np), nq=as.integer(nq), ## Constants IdentGroup=as.integer(IdentGroup), Y_vect=as.double(c(Y)), ## Response variable X_vect=as.double(c(X)), ## Covariates W_vect=as.double(c(W)), ## Covariates #= Parameters to save beta_vect.nonconst=as.double(beta_vect), ## Fixed parameters of the regression b_vect.nonconst=as.double(b_vect), ## Random effects on intercept and slope Vb_vect.nonconst=as.double(Vb_vect), ## Variance-covariance of random effects V.nonconst=as.double(V), ## Variance of residuals #= Defining priors mubeta_vect=as.double(c(mubeta)), Vbeta_vect=as.double(c(Vbeta)), r=as.double(r), R_vect=as.double(c(R)), s1_V=as.double(s1), s2_V=as.double(s2), #= Diagnostic Deviance.nonconst=as.double(Deviance), Y_pred.nonconst=as.double(Y_pred), ## Predictive posterior mean #= Seeds seed=as.integer(seed), #= Verbose verbose=as.integer(verbose), PACKAGE="MCMCpack") #= Matrix of MCMC samples Matrix <- matrix(NA,nrow=nsamp,ncol=np+nq*ngroup+nq*nq+2) names.fixed <- paste("beta.",colnames(X),sep="") names.random <- paste("b.",rep(colnames(W),each=ngroup),".",rep(LevelsGroup.Name,nq),sep="") names.variances <- c(paste("VCV.",colnames(W),".",rep(colnames(W),each=nq),sep=""),"sigma2") colnames(Matrix) <- c(names.fixed,names.random,names.variances,"Deviance") #= Filling-in the matrix Matrix[,c(1:np)] <- matrix(Sample[[12]],ncol=np) Matrix[,c((np+1):(np+nq*ngroup))] <- matrix(Sample[[13]],ncol=nq*ngroup) Matrix[,c((np+nq*ngroup+1):(np+nq*ngroup+nq*nq))] <- matrix(Sample[[14]],ncol=nq*nq) Matrix[,ncol(Matrix)-1] <- Sample[[15]] Matrix[,ncol(Matrix)] <- Sample[[22]] #= Transform Sample list in an MCMC object MCMC <- mcmc(Matrix,start=nburn+1,end=ngibbs,thin=nthin) #= Output return (list(mcmc=MCMC,Y.pred=Sample[[23]])) } #################################################################### ## END #################################################################### MCMCpack/R/MCMChpoisson.R0000644000176000001440000001454212133644103014511 0ustar ripleyusers########################################################################## ## MCMChpoisson.R ## ## MCMChpoisson() samples from the posterior distribution of a ## Poisson hierarchical linear regression model in R using linked C++ ## code in Scythe ## ## The code uses Algorithm 2 of Chib & Carlin (1999) for efficient ## inference of (\beta | Y, sigma^2, Vb). ## ## Chib, S. & Carlin, B. P. (1999) On MCMC sampling in hierarchical ## longitudinal models, Statistics and Computing, 9, 17-26 ## #################################################################### ## ## Original code by Ghislain Vieilledent, may 2011 ## CIRAD UR B&SEF ## ghislain.vieilledent@cirad.fr / ghislainv@gmail.com ## #################################################################### ## ## The initial version of this file was generated by the ## auto.Scythe.call() function in the MCMCpack R package ## written by: ## ## Andrew D. Martin ## Dept. of Political Science ## Washington University in St. Louis ## admartin@wustl.edu ## ## Kevin M. Quinn ## Dept. of Government ## Harvard University ## kevin_quinn@harvard.edu ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Copyright (C) 2011 Andrew D. Martin and Kevin M. Quinn ## #################################################################### ## ## Revisions: ## - G. Vieilledent, May 9 2011 [initial file] ## #################################################################### MCMChpoisson <- function (fixed, random, group, data, burnin=5000, mcmc=10000, thin=10, verbose=1, seed=NA, beta.start=NA, sigma2.start=NA, Vb.start=NA, mubeta=0, Vbeta=1.0E6, r, R, nu=0.001, delta=0.001, FixOD=0, ...) { #======== # Basic checks #======== check.group.hmodels(group, data) check.mcmc.parameters.hmodels(burnin, mcmc, thin) check.verbose.hmodels(verbose) check.FixOD.hmodels(FixOD) check.offset(list(...)) #======== # Seed #======== seed <- form.seeds.hmodels(seed) #======== # Form response and model matrices #======== mf.fixed <- model.frame(formula=fixed,data=data) X <- model.matrix(attr(mf.fixed,"terms"),data=mf.fixed) Y <- model.response(mf.fixed) check.Y.Poisson.hmodels(Y) mf.random <- model.frame(formula=random,data=data) W <- model.matrix(attr(mf.random,"terms"),data=mf.random) #======== # Model parameters #======== nobs <- nrow(X) IdentGroup <- as.numeric(as.factor(as.character(data[,names(data)==as.character(group)])))-1 LevelsGroup <- sort(unique(IdentGroup+1)) LevelsGroup.Name <- sort(unique(as.character(data[,names(data)==as.character(group)]))) ngroup <- length(LevelsGroup) np <- ncol(X) nq <- ncol(W) ngibbs <- mcmc+burnin nthin <- thin nburn <- burnin nsamp <- mcmc/thin #======== # Form and check starting parameters #======== beta.start <- form.beta.start.hmodels(fixed,data,beta.start,np,family="poisson",defaults=NA) sigma2.start <- form.sigma2.start.hmodels(fixed,data,sigma2.start,family="poisson") Vb.start <- form.Vb.start.hmodels(Vb.start,nq) #======== # Form priors #======== mvn.prior <- form.mvn.prior.hmodels(mubeta,Vbeta,np) mubeta <- mvn.prior[[1]] Vbeta <- mvn.prior[[2]] wishart.prior <- form.wishart.prior.hmodels(r,R,nq) r <- wishart.prior[[1]] R <- wishart.prior[[2]] check.ig.prior.hmodels(nu,delta) s1 <- nu s2 <- delta #======== # Parameters to save #======== beta_vect <- rep(c(beta.start),each=nsamp) Vb_vect <- rep(c(Vb.start),each=nsamp) b_vect <- rep(0,nq*ngroup*nsamp) V <- rep(sigma2.start,nsamp) lambda_pred <- rep(0.5,nobs) Deviance <- rep(0,nsamp) #======== # call C++ code to draw sample #======== Sample <- .C("MCMChpoisson", #= Constants and data ngibbs=as.integer(ngibbs), nthin=as.integer(nthin), nburn=as.integer(nburn),## Number of iterations, burning and samples nobs=as.integer(nobs), ngroup=as.integer(ngroup), ## Constants np=as.integer(np), nq=as.integer(nq), ## Constants IdentGroup=as.integer(IdentGroup), Y_vect=as.double(c(Y)), ## Response variable X_vect=as.double(c(X)), ## Covariates W_vect=as.double(c(W)), ## Covariates #= Parameters to save beta_vect.nonconst=as.double(beta_vect), ## Fixed parameters of the regression b_vect.nonconst=as.double(b_vect), ## Random effects on intercept and slope Vb_vect.nonconst=as.double(Vb_vect), ## Variance-covariance of random effects V.nonconst=as.double(V), ## Variance of residuals #= Defining priors mubeta_vect=as.double(c(mubeta)), Vbeta_vect=as.double(c(Vbeta)), r=as.double(r), R_vect=as.double(c(R)), s1_V=as.double(s1), s2_V=as.double(s2), #= Diagnostic Deviance.nonconst=as.double(Deviance), lambda_pred.nonconst=as.double(lambda_pred), ## Predictive posterior mean #= Seeds seed=as.integer(seed), #= Verbose verbose=as.integer(verbose), #= Overdispersion FixOD=as.integer(FixOD), PACKAGE="MCMCpack") #= Matrix of MCMC samples Matrix <- matrix(NA,nrow=nsamp,ncol=np+nq*ngroup+nq*nq+2) names.fixed <- paste("beta.",colnames(X),sep="") names.random <- paste("b.",rep(colnames(W),each=ngroup),".",rep(LevelsGroup.Name,nq),sep="") names.variances <- c(paste("VCV.",colnames(W),".",rep(colnames(W),each=nq),sep=""),"sigma2") colnames(Matrix) <- c(names.fixed,names.random,names.variances,"Deviance") #= Filling-in the matrix Matrix[,c(1:np)] <- matrix(Sample[[12]],ncol=np) Matrix[,c((np+1):(np+nq*ngroup))] <- matrix(Sample[[13]],ncol=nq*ngroup) Matrix[,c((np+nq*ngroup+1):(np+nq*ngroup+nq*nq))] <- matrix(Sample[[14]],ncol=nq*nq) Matrix[,ncol(Matrix)-1] <- Sample[[15]] Matrix[,ncol(Matrix)] <- Sample[[22]] #= Transform Sample list in an MCMC object MCMC <- mcmc(Matrix,start=nburn+1,end=ngibbs,thin=nthin) #= Output return (list(mcmc=MCMC,lambda.pred=Sample[[23]])) } #################################################################### ## END #################################################################### MCMCpack/R/MCMChlogit.R0000644000176000001440000001452212133644103014133 0ustar ripleyusers########################################################################## ## MCMChlogit.R ## ## MCMChlogit() samples from the posterior distribution of a ## Binomial hierarchical linear regression model in R using linked C++ ## code in Scythe ## ## The code uses Algorithm 2 of Chib & Carlin (1999) for efficient ## inference of (\beta | Y, sigma^2, Vb). ## ## Chib, S. & Carlin, B. P. (1999) On MCMC sampling in hierarchical ## longitudinal models, Statistics and Computing, 9, 17-26 ## #################################################################### ## ## Original code by Ghislain Vieilledent, may 2011 ## CIRAD UR B&SEF ## ghislain.vieilledent@cirad.fr / ghislainv@gmail.com ## #################################################################### ## ## The initial version of this file was generated by the ## auto.Scythe.call() function in the MCMCpack R package ## written by: ## ## Andrew D. Martin ## Dept. of Political Science ## Washington University in St. Louis ## admartin@wustl.edu ## ## Kevin M. Quinn ## Dept. of Government ## Harvard University ## kevin_quinn@harvard.edu ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Copyright (C) 2011 Andrew D. Martin and Kevin M. Quinn ## #################################################################### ## ## Revisions: ## - G. Vieilledent, May 9 2011 [initial file] ## #################################################################### MCMChlogit <- function (fixed, random, group, data, burnin=5000, mcmc=10000, thin=10, verbose=1, seed=NA, beta.start=NA, sigma2.start=NA, Vb.start=NA, mubeta=0, Vbeta=1.0E6, r, R, nu=0.001, delta=0.001, FixOD=0, ...) { #======== # Basic checks #======== check.group.hmodels(group, data) check.mcmc.parameters.hmodels(burnin, mcmc, thin) check.verbose.hmodels(verbose) check.FixOD.hmodels(FixOD) check.offset(list(...)) #======== # Seed #======== seed <- form.seeds.hmodels(seed) #======== # Form response and model matrices #======== mf.fixed <- model.frame(formula=fixed,data=data) X <- model.matrix(attr(mf.fixed,"terms"),data=mf.fixed) Y <- model.response(mf.fixed) check.Y.Binomial.hmodels(Y) mf.random <- model.frame(formula=random,data=data) W <- model.matrix(attr(mf.random,"terms"),data=mf.random) #======== # Model parameters #======== nobs <- nrow(X) IdentGroup <- as.numeric(as.factor(as.character(data[,names(data)==as.character(group)])))-1 LevelsGroup <- sort(unique(IdentGroup+1)) LevelsGroup.Name <- sort(unique(as.character(data[,names(data)==as.character(group)]))) ngroup <- length(LevelsGroup) np <- ncol(X) nq <- ncol(W) ngibbs <- mcmc+burnin nthin <- thin nburn <- burnin nsamp <- mcmc/thin #======== # Form and check starting parameters #======== beta.start <- form.beta.start.hmodels(fixed,data,beta.start,np,family="binomial",defaults=NA) sigma2.start <- form.sigma2.start.hmodels(fixed,data,sigma2.start,family="binomial") Vb.start <- form.Vb.start.hmodels(Vb.start,nq) #======== # Form priors #======== mvn.prior <- form.mvn.prior.hmodels(mubeta,Vbeta,np) mubeta <- mvn.prior[[1]] Vbeta <- mvn.prior[[2]] wishart.prior <- form.wishart.prior.hmodels(r,R,nq) r <- wishart.prior[[1]] R <- wishart.prior[[2]] check.ig.prior.hmodels(nu,delta) s1 <- nu s2 <- delta #======== # Parameters to save #======== beta_vect <- rep(c(beta.start),each=nsamp) Vb_vect <- rep(c(Vb.start),each=nsamp) b_vect <- rep(0,nq*ngroup*nsamp) V <- rep(sigma2.start,nsamp) theta_pred <- rep(0.5,nobs) Deviance <- rep(0,nsamp) #======== # call C++ code to draw sample #======== Sample <- .C("MCMChlogit", #= Constants and data ngibbs=as.integer(ngibbs), nthin=as.integer(nthin), nburn=as.integer(nburn),## Number of iterations, burning and samples nobs=as.integer(nobs), ngroup=as.integer(ngroup), ## Constants np=as.integer(np), nq=as.integer(nq), ## Constants IdentGroup=as.integer(IdentGroup), Y_vect=as.double(c(Y)), ## Response variable X_vect=as.double(c(X)), ## Covariates W_vect=as.double(c(W)), ## Covariates #= Parameters to save beta_vect.nonconst=as.double(beta_vect), ## Fixed parameters of the regression b_vect.nonconst=as.double(b_vect), ## Random effects on intercept and slope Vb_vect.nonconst=as.double(Vb_vect), ## Variance-covariance of random effects V.nonconst=as.double(V), ## Variance of residuals #= Defining priors mubeta_vect=as.double(c(mubeta)), Vbeta_vect=as.double(c(Vbeta)), r=as.double(r), R_vect=as.double(c(R)), s1_V=as.double(s1), s2_V=as.double(s2), #= Diagnostic Deviance.nonconst=as.double(Deviance), theta_pred.nonconst=as.double(theta_pred), ## Predictive posterior mean #= Seeds seed=as.integer(seed), #= Verbose verbose=as.integer(verbose), #= Overdispersion FixOD=as.integer(FixOD), PACKAGE="MCMCpack") #= Matrix of MCMC samples Matrix <- matrix(NA,nrow=nsamp,ncol=np+nq*ngroup+nq*nq+2) names.fixed <- paste("beta.",colnames(X),sep="") names.random <- paste("b.",rep(colnames(W),each=ngroup),".",rep(LevelsGroup.Name,nq),sep="") names.variances <- c(paste("VCV.",colnames(W),".",rep(colnames(W),each=nq),sep=""),"sigma2") colnames(Matrix) <- c(names.fixed,names.random,names.variances,"Deviance") #= Filling-in the matrix Matrix[,c(1:np)] <- matrix(Sample[[12]],ncol=np) Matrix[,c((np+1):(np+nq*ngroup))] <- matrix(Sample[[13]],ncol=nq*ngroup) Matrix[,c((np+nq*ngroup+1):(np+nq*ngroup+nq*nq))] <- matrix(Sample[[14]],ncol=nq*nq) Matrix[,ncol(Matrix)-1] <- Sample[[15]] Matrix[,ncol(Matrix)] <- Sample[[22]] #= Transform Sample list in an MCMC object MCMC <- mcmc(Matrix,start=nburn+1,end=ngibbs,thin=nthin) #= Output return (list(mcmc=MCMC,theta.pred=Sample[[23]])) } #################################################################### ## END #################################################################### MCMCpack/R/MCMChierEI.R0000644000176000001440000001117512133644103014013 0ustar ripleyusers########################################################################## ## sample from the posterior distribution of Wakefield's hierarchical model ## for ecological inference in R using linked C++ code in Scythe ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## KQ 10/22/2002 ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMChierEI" <- function(r0, r1, c0, c1, burnin=5000, mcmc=50000, thin=1, verbose=0, seed=NA, m0=0, M0=2.287656, m1=0, M1=2.287656, a0=0.825, b0=0.0105, a1=0.825, b1=0.0105, ...){ # Error checking if (length(r0) != length(r1)){ cat("length(r0) != length(r1).\n") stop("Please check data and try MCMChierEI() again.\n") } if (length(r0) != length(c0)){ cat("length(r0) != length(c0).\n") stop("Please check data and try MCMChierEI() again.\n") } if (length(r0) != length(c1)){ cat("length(r0) != length(c1).\n") stop("Please check data and try MCMChierEI() again.\n") } if (length(r1) != length(c0)){ cat("length(r1) != length(c0).\n") stop("Please check data and try MCMChierEI() again.\n") } if (length(r1) != length(c1)){ cat("length(r1) != length(c1).\n") stop("Please check data and try MCMChierEI() again.\n") } if (length(c0) != length(c1)){ cat("length(c0) != length(c1).\n") stop("Please check data and try MCMChierEI() again.\n") } if (min((r0+r1) == (c0+c1))==0){ cat("Rows and columns do not sum to same thing.\n") stop("Please check data and try MCMChierEI() again.\n") } check.mcmc.parameters(burnin, mcmc, thin) # seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] if (M0 <= 0 ){ cat("Parameter M0 <= 0.\n") stop("Please respecify and try MCMChierEI() again.\n") } if (M1 <= 0 ){ cat("Parameter M1 <= 0.\n") stop("Please respecify and try MCMChierEI() again.\n") } if (a0 <= 0 ){ cat("Parameter a0 <= 0.\n") stop("Please respecify and try MCMChierEI() again.\n") } if (a1 <= 0 ){ cat("Parameter a1 <= 0.\n") stop("Please respecify and try MCMChierEI() again.\n") } if (b0 <= 0 ){ cat("Parameter b0 <= 0.\n") stop("Please respecify and try MCMChierEI() again.\n") } if (b1 <= 0 ){ cat("Parameter b1 <= 0.\n") stop("Please respecify and try MCMChierEI() again.\n") } # setup matrix to hold output from sampling ntables = length(r0) sample <- matrix(0, mcmc/thin, ntables*2+4) # call C++ code to do the sampling C.sample <- .C("hierEI", samdata = as.double(sample), samrow = as.integer(nrow(sample)), samcol = as.integer(ncol(sample)), r0 = as.double(r0), r1 = as.double(r1), c0 = as.double(c0), c1 = as.double(c1), ntables = as.integer(ntables), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), mu0priormean = as.double(m0), mu0priorvar = as.double(M0), mu1priormean = as.double(m1), mu1priorvar = as.double(M1), a0 = as.double(a0), b0 = as.double(b0), a1 = as.double(a1), b1 = as.double(b1), verbose = as.integer(verbose), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), PACKAGE="MCMCpack" ) sample <- matrix(C.sample$samdata, C.sample$samrow, C.sample$samcol, byrow=TRUE) output <- mcmc(data=sample, start=burnin+1, end=burnin+mcmc, thin=thin) p0names <- paste("p0table", 1:ntables, sep="") p1names <- paste("p1table", 1:ntables, sep="") varnames(output) <- c(p0names, p1names, "mu0", "mu1", "sigma^2.0", "sigma^2.1") attr(output, "title") <- "MCMCpack Wakefield's Hierarchical EI Model Posterior Sample" return(output) } MCMCpack/R/MCMChierBetaBinom.R0000644000176000001440000001365712133644103015365 0ustar ripleyusers########################################################################### ## sample from the posterior distribution of a hierarchical beta binomial ## model in R using linked C++ code in Scythe ## ## y_{ij} ~ Binomial(s_{ij}, theta_{ij}) ## theta_{ij} ~ Beta(alpha_j, beta_j) ## alpha_j ~ Pareto(1, a) ## beta_j ~ Pareto(1, b) ## ## KQ 5/24/2011 - 6/25/2011 ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMChierBetaBinom" <- function(y, s, i.labels, j.labels, burnin=1000, mcmc=10000, thin=1, verbose=0, seed=NA, theta.start=NA, alpha.start=NA, beta.start=NA, a=0, b=0){ ## checks check.mcmc.parameters(burnin, mcmc, thin) max.length <- max(length(y), length(s), length(i.labels), length(j.labels)) min.length <- min(length(y), length(s), length(i.labels), length(j.labels)) if (max.length != min.length){ stop("y, s, i.labels, j.labels not all of same length\n") } na.indic <- is.na(y) na.indic <- na.indic + is.na(s) na.indic <- na.indic + is.na(i.labels) na.indic <- na.indic + is.na(j.labels) na.indic <- na.indic != 0 y <- y[!na.indic] s <- s[!na.indic] i.labels <- i.labels[!na.indic] j.labels <- j.labels[!na.indic] if (min(y) < 0){ stop("y cannot have an element less than 0\n") } if (max(y > s) > 0){ stop("y[i] cannot be greater than s[i] for any i\n") } ## more checking needed ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] i.labels <- as.character(i.labels) j.labels <- as.character(j.labels) i.labels.unique <- unique(i.labels) j.labels.unique <- unique(j.labels) i.labels.num <- rep(NA, length(i.labels)) j.labels.num <- rep(NA, length(j.labels)) for (i in 1:length(i.labels)){ i.labels.num[i] <- which(i.labels.unique == i.labels[i]) } for (j in 1:length(j.labels)){ j.labels.num[j] <- which(j.labels.unique == j.labels[j]) } i.labels.num.unique <- unique(i.labels.num) j.labels.num.unique <- unique(j.labels.num) ## starting values if (length(theta.start) == 1){ if (is.numeric(theta.start)){ theta.start <- rep(theta.start, length(y)) } else if (is.na(theta.start)){ theta.start <- (y + 0.01) / (s + 0.02) } else{ theta.start <- rep(0.5, length(y)) } } if (length(theta.start) != length(y)){ stop("length(theta.start) != length(y)\n") } if (max(theta.start) >= 1.0){ stop("elements of theta.start must be less than 1.0\n") } if (min(theta.start) <= 0.0){ stop("elements of theta.start must be greater than 0.0\n") } if (length(alpha.start) == 1){ if (is.na(alpha.start)){ alpha.start <- rep(1.001, length(j.labels.unique)) } } if (length(alpha.start) != length(j.labels.unique)){ stop("length(alpha.start) != length(unique(j.labels))\n") } if (length(beta.start) == 1){ if (is.na(beta.start)){ beta.start <- rep(1.001, length(j.labels.unique)) } } if (length(beta.start) != length(j.labels.unique)){ stop("length(beta.start) != length(unique(j.labels))\n") } accepts <- rep(0, length(j.labels.unique)) ## get reasonable values for base.sigma base.sigma <- rep(1, length(j.labels.unique)) ## call C++ code to draw the sample sample <- matrix(data=0, mcmc/thin, (length(y) + 2*length(j.labels.unique)) ) posterior <- .C("hierBetaBinom", samdata = as.double(sample), samrow = as.integer(nrow(sample)), samcol = as.integer(ncol(sample)), y = as.integer(y), s = as.integer(s), theta = as.double(theta.start), alpha = as.double(alpha.start), beta = as.double(beta.start), a = as.double(a), b = as.double(b), ilabels = as.integer(i.labels.num), jlabels = as.integer(j.labels.num), ilabelsunique = as.integer(i.labels.num.unique), jlabelsunique = as.integer(j.labels.num.unique), n = as.integer(length(y)), ni = as.integer(length(i.labels.unique)), nj = as.integer(length(j.labels.unique)), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), verbose = as.integer(verbose), accepts = as.integer(accepts), basesigma = as.double(base.sigma), PACKAGE="MCMCpack") sample <- matrix(posterior$samdata, posterior$samrow, posterior$samcol, byrow=FALSE) output <- mcmc(data=sample, start=burnin+1, end=burnin+mcmc, thin=thin) theta.names <- paste("theta.", i.labels, ".", j.labels, sep="") alpha.names <- paste("alpha.", j.labels.unique, sep="") beta.names <- paste("beta.", j.labels.unique, sep="") varnames(output) <- c(theta.names, alpha.names, beta.names) attr(output, "title") <- "MCMChierBetaBinom Posterior Sample" attr(output, "acceptance.rates") <- posterior$accepts / (posterior$mcmc + posterior$burnin) return(output) } ## end MCMChierBetaBinom MCMCpack/R/MCMCfactanal.R0000644000176000001440000001217312133644103014416 0ustar ripleyusers########################################################################## ## sample from the posterior distribution of a factor analysis model ## model in R using linked C++ code in Scythe. ## ## The model is: ## ## x_i = \Lambda \phi_i + \epsilon_i, \epsilon_i \sim N(0, \Psi) ## ## where \Psi is diagonal and the priors are: ## ## \lambda_{ij} \sim N(l_{ij}, L^{-1}_{ij}) ## \phi_i \sim N(0,I) ## \psi^2_{jj} \sim IG(a0_j/2, b0_j/2) ## ## ## Andrew D. Martin ## Washington University ## ## Kevin M. Quinn ## Harvard University ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## May 7, 2003 ## revised to accomodate new spec 7/5/2004 KQ ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMCfactanal" <- function(x, factors, lambda.constraints=list(), data=NULL, burnin = 1000, mcmc = 20000, thin=1, verbose = 0, seed = NA, lambda.start = NA, psi.start = NA, l0=0, L0=0, a0=0.001, b0=0.001, store.scores = FALSE, std.var=TRUE, ... ) { ## check for an offset check.offset(list(...)) ## get data matrix and associated constants if (is.matrix(x)){ X <- x xvars <- dimnames(X)[[2]] xobs <- dimnames(X)[[1]] N <- nrow(X) K <- ncol(X) } else { holder <- parse.formula(formula=x, data=data, intercept=FALSE, justX=TRUE) X <- holder[[2]] xvars <- holder[[3]] xobs <- holder[[4]] N <- nrow(X) K <- ncol(X) } ## standardize X if (std.var){ for (i in 1:K){ X[,i] <- (X[,i]-mean(X[,i]))/sd(X[,i]) } } else{ for (i in 1:K){ X[,i] <- X[,i]-mean(X[,i]) } } ## take care of the case where X has no row names if (is.null(xobs)){ xobs <- 1:N } ## check mcmc parameters check.mcmc.parameters(burnin, mcmc, thin) # seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## setup constraints on Lambda holder <- build.factor.constraints(lambda.constraints, X, K, factors) Lambda.eq.constraints <- holder[[1]] Lambda.ineq.constraints <- holder[[2]] X.names <- holder[[3]] ## setup prior on Lambda holder <- form.factload.norm.prior(l0, L0, K, factors, X.names) Lambda.prior.mean <- holder[[1]] Lambda.prior.prec <- holder[[2]] ## setup and check prior on Psi holder <- form.ig.diagmat.prior(a0, b0, K) a0 <- holder[[1]] b0 <- holder[[2]] ## starting values for Lambda Lambda <- factload.start(lambda.start, K, factors, Lambda.eq.constraints, Lambda.ineq.constraints) ## starting values for Psi Psi <- factuniqueness.start(psi.start, X) ## define holder for posterior sample if(store.scores == FALSE) { sample <- matrix(data=0, mcmc/thin, K*factors+K) } else { sample <- matrix(data=0, mcmc/thin, K*factors+K+factors*N) } posterior <- NULL ## call C++ code to do the sampling auto.Scythe.call(output.object="posterior", cc.fun.name="MCMCfactanal", sample.nonconst=sample, X=X, burnin=as.integer(burnin), mcmc=as.integer(mcmc), thin=as.integer(thin), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), verbose=as.integer(verbose), Lambda=Lambda, Psi=Psi, Lameq=Lambda.eq.constraints, Lamineq=Lambda.ineq.constraints, Lampmean=Lambda.prior.mean, Lampprec=Lambda.prior.prec, a0=a0, b0=b0, storescores=as.integer(store.scores)) Lambda.names <- paste(paste("Lambda", rep(X.names, each=factors), sep=""), rep(1:factors,K), sep="_") Psi.names <- paste("Psi", X.names, sep="") par.names <- c(Lambda.names, Psi.names) if (store.scores==TRUE){ phi.names <- paste(paste("phi", rep(xobs, each=factors), sep="_"), rep(1:factors,factors), sep="_") par.names <- c(par.names, phi.names) } title <- "MCMCpack Factor Analysis Posterior Sample" output <- form.mcmc.object(posterior, par.names, title) ## get rid of columns for constrained parameters output.df <- as.data.frame(as.matrix(output)) output.sd <- apply(output.df, 2, sd) output.df <- output.df[,output.sd != 0] output <- mcmc(as.matrix(output.df), start=burnin+1, end=mcmc+burnin, thin=thin) ## add constraint info so this isn't lost attr(output, "constraints") <- lambda.constraints attr(output, "n.manifest") <- K attr(output, "n.factors") <- factors return(output) } MCMCpack/R/MCMCdynamicIRT1d.R0000644000176000001440000003160112133644103015072 0ustar ripleyusers########################################################################## ## samples from the posterior distribution of a dynamic 1d IRT model ## a la Martin and Quinn (2002) ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Assumes a local level model for the evolution of theta ## ## y_{jkt}^* = -alpha_k + beta_k * theta_{jt} + epsilon_{jkt} ## theta_{jt} ~ N(theta_{j(t-1)}, tau^2) ## ## Kevin Quinn ## 1/28/2008 ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMCdynamicIRT1d" <- function(datamatrix, item.time.map, theta.constraints=list(), burnin=1000, mcmc=20000, thin=1, verbose=0, seed=NA, theta.start=NA, alpha.start=NA, beta.start=NA, tau2.start=1, a0=0, A0=.1, b0=0, B0=.1, c0=-1, d0=-1, e0=0, E0=1, store.ability=TRUE, store.item=TRUE, ... ){ datamatrix <- as.matrix(datamatrix) nitems <- ncol(datamatrix) nsubj <- nrow(datamatrix) ntime <- max(item.time.map) ## checks check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) if (nitems != length(item.time.map)){ cat("Number of rows of datamatrix not equal to length of item.time.map\n") stop("Please check data and try MCMCdynamicIRT1d() again.\n", call.=FALSE) } if (min(item.time.map) != 1){ cat("Minimum value in item.time.map not equal to 1\n") stop("Please check data and try MCMCdynamicIRT1d() again.\n", call.=FALSE) } if(sum(datamatrix==1 | datamatrix==0 | is.na(datamatrix)) != (nitems * nsubj)) { cat("Error: Data matrix contains elements other than 0, 1 or NA.\n") stop("Please check data and try MCMCdynamicIRT1d() again.\n", call.=FALSE) } if (A0 < 0){ cat("Error: A0 (prior precision for alpha) is less than 0).\n") stop("Please respecify and try MCMCdynamicIRT1d() again.\n") } if (B0 < 0){ cat("Error: B0 (prior precision for beta) is less than 0).\n") stop("Please respecify and try MCMCdynamicIRT1d() again.\n") } ## setup constraints on theta if(length(theta.constraints) != 0) { for (i in 1:length(theta.constraints)){ theta.constraints[[i]] <- list(as.integer(1), theta.constraints[[i]][1]) } } holder <- build.factor.constraints(theta.constraints, t(datamatrix), nsubj, 1) theta.eq.constraints <- holder[[1]] theta.ineq.constraints <- holder[[2]] ##subject.names <- holder[[3]] ## names item.names <- colnames(datamatrix) if (is.null(item.names)){ item.names <- paste("item", 1:nitems, sep="") } ## starting values for theta error checking theta.start <- factor.score.start.check(theta.start, datamatrix, matrix(0,1,1), matrix(1,1,1), theta.eq.constraints, theta.ineq.constraints, 1) ## starting values for (alpha, beta) ab.starts <- matrix(NA, nitems, 2) for (i in 1:nitems){ local.y <- datamatrix[,i] ##local.y[local.y==9] <- NA if (length(na.omit(local.y)) <= 1){ ab.starts[i,] <- c(0, 10) } else if (var(na.omit(local.y))==0){ ab.starts[i,] <- c(0,10) } else { ab.starts[i,] <- coef(suppressWarnings(glm(local.y~theta.start, family=binomial(probit), control=glm.control( maxit=8, epsilon=1e-3) ))) } } ab.starts[,1] <- -1 * ab.starts[,1] # make this into a difficulty param ## starting values for alpha and beta error checking if (is.na(alpha.start)) { alpha.start <- ab.starts[,1] } else if(is.null(dim(alpha.start))) { alpha.start <- alpha.start * matrix(1,nitems,1) } else if((dim(alpha.start)[1] != nitems) || (dim(alpha.start)[2] != 1)) { cat("Error: Starting value for alpha not conformable.\n") stop("Please respecify and call MCMCdynamicIRT1d() again.\n", call.=FALSE) } if (is.na(beta.start)) { beta.start <- ab.starts[,2] } else if(is.null(dim(beta.start))) { beta.start <- beta.start * matrix(1,nitems,1) } else if((dim(beta.start)[1] != nitems) || (dim(beta.start)[2] != 1)) { cat("Error: Starting value for beta not conformable.\n") stop("Please respecify and call MCMCdynamicIRT1d() again.\n", call.=FALSE) } ## generate time-specific theta information and create extended theta.start subject.names <- NULL theta.start.new <- NULL ## theta.info has: ## col1: subj ID, col2: #time periods, col3: offset (first term C indexing) theta.info <- matrix(NA, nsubj, 3) for (s in 1:nsubj){ namestub <- rownames(datamatrix)[s] theta.info[s,1] <- s count <- 0 holder <- NULL for (i in 1:nitems){ if (!is.na(datamatrix[s,i])){ holder <- c(holder, item.time.map[i]) } } minholder <- min(holder) maxholder <- max(holder) theta.info[s,2] <- maxholder - minholder + 1 theta.info[s,3] <- minholder - 1 theta.start.new <- c(theta.start.new, rep(theta.start[s], theta.info[s,2])) subject.names <- c(subject.names, paste(namestub, ".t", minholder:maxholder, sep="")) } nthetas <- length(subject.names) theta.start <- theta.start.new if (length(c0) < nsubj){ c0 <- array(c0, nsubj) } if (length(d0) < nsubj){ d0 <- array(d0, nsubj) } if (length(tau2.start) < nsubj){ tau2.start <- array(tau2.start, nsubj) } if (length(e0) < nsubj){ e0 <- array(e0, nsubj) } if (length(E0) < nsubj){ E0 <- array(E0, nsubj) } E0inv <- 1/E0 subject.names.short <- rownames(datamatrix) ## convert datamatrix into a sparse format where the missing values are ## not explicitly represented ## ## 1st column: subject index (C indexing) ## 2nd column: item index (C indexing) ## 3rd column: vote data.sparse.si <- NULL for (i in 1:nsubj){ for (j in 1:nitems){ if (!is.na(datamatrix[i,j])){ data.sparse.si <- rbind(data.sparse.si, c(i-1, j-1, datamatrix[i,j])) } } } ## 1st column: item index (C indexing) ## 2nd column: subject index (C indexing) ## 3rd column: vote ## data.sparse.is <- NULL ## for (i in 1:nitems){ ## for (j in 1:nsubj){ ## if (!is.na(datamatrix[j,i])){ ## data.sparse.is <- rbind(data.sparse.is, c(i-1, j-1, datamatrix[j,i])) ## } ## } ## } rm(datamatrix) ## define storage matrix for posterior theta draws thetadraws <- matrix(0, mcmc/thin, nthetas) if (store.ability != TRUE){ thetadraws <- matrix(0, 1, 1) } alphadraws <- matrix(1, mcmc/thin, nitems) betadraws <- matrix(2, mcmc/thin, nitems) if (store.item != TRUE){ alphadraws <- matrix(1, 1, 1) betadraws <- matrix(2, 1, 1) } tau2draws <- matrix(0, mcmc/thin, nsubj) ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## print(theta.eq.constraints) ## print(theta.ineq.constraints) # return(list(theta=theta.start, alpha=alpha.start, beta=beta.start)) ## call C++ code to draw sample posterior <- .C("MCMCdynamicIRT1d", thetadata = as.double(thetadraws), thetarow = as.integer(nrow(thetadraws)), thetacol = as.integer(ncol(thetadraws)), alphadata = as.double(alphadraws), alpharow = as.integer(nrow(alphadraws)), alphacol = as.integer(ncol(alphadraws)), betadata = as.double(betadraws), betarow = as.integer(nrow(betadraws)), betacol = as.integer(ncol(betadraws)), tau2data = as.double(tau2draws), tau2row = as.integer(nrow(tau2draws)), tau2col = as.integer(ncol(tau2draws)), nsubj = as.integer(nsubj), nitems = as.integer(nitems), ntime = as.integer(ntime), Ydata = as.integer(data.sparse.si), Yrow = as.integer(nrow(data.sparse.si)), Ycol = as.integer(ncol(data.sparse.si)), IT = as.integer(item.time.map-1), ITlength = as.integer(length(item.time.map)), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), verbose = as.integer(verbose), thetastartdata = as.double(theta.start), thetastartlength = as.integer(length(theta.start)), thetainfo = as.integer(theta.info), thetainforow = as.integer(nrow(theta.info)), thetainfocol = as.integer(ncol(theta.info)), astartdata = as.double(alpha.start), astartlength = as.integer(length(alpha.start)), bstartdata = as.double(beta.start), bstartlength = as.integer(length(beta.start)), tau2data = as.double(tau2.start), tau2length = as.integer(length(tau2.start)), c0data = as.double(c0), c0length = as.integer(length(c0)), d0data = as.double(d0), d0length = as.integer(length(d0)), a0data = as.double(a0), A0data = as.double(A0), b0data = as.double(b0), B0data = as.double(B0), e0data = as.double(e0), E0invdata = as.double(E0inv), thetaeqdata = as.double(theta.eq.constraints), thetaeqrow = as.integer(nrow(theta.eq.constraints)), thetaeqcol = as.integer(ncol(theta.eq.constraints)), thetaineqdata = as.double(theta.ineq.constraints), thetaineqrow = as.integer(nrow(theta.ineq.constraints)), thetaineqcol = as.integer(ncol(theta.ineq.constraints)), storei = as.integer(store.item), storea = as.integer(store.ability), PACKAGE="MCMCpack" ) tau2samp <- matrix(posterior$tau2data, posterior$tau2row, posterior$tau2col) colnames(tau2samp) <- paste("tau2.", subject.names.short, sep="") if (store.item & store.ability){ thetasamp <- matrix(posterior$thetadata, posterior$thetarow, posterior$thetacol) colnames(thetasamp) <- paste("theta.", subject.names, sep="") alphasamp <- matrix(posterior$alphadata, posterior$alpharow, posterior$alphacol) colnames(alphasamp) <- paste("alpha.", item.names, sep="") betasamp <- matrix(posterior$betadata, posterior$betarow, posterior$betacol) colnames(betasamp) <- paste("beta.", item.names, sep="") outmat <- mcmc(cbind(thetasamp, alphasamp, betasamp, tau2samp), start=1, end=mcmc, thin=thin) } else if (store.item){ alphasamp <- matrix(posterior$alphadata, posterior$alpharow, posterior$alphacol) colnames(alphasamp) <- paste("alpha.", item.names, sep="") betasamp <- matrix(posterior$betadata, posterior$betarow, posterior$betacol) colnames(betasamp) <- paste("beta.", item.names, sep="") outmat <- mcmc(cbind(alphasamp, betasamp, tau2samp), start=1, end=mcmc, thin=thin) } else if (store.ability){ thetasamp <- matrix(posterior$thetadata, posterior$thetarow, posterior$thetacol) colnames(thetasamp) <- paste("theta.", subject.names, sep="") outmat <- mcmc(cbind(thetasamp, tau2samp), start=1, end=mcmc, thin=thin) } else { outmat <- mcmc(tau2samp, start=1, end=mcmc, thin=thin) } return(outmat) } MCMCpack/R/MCMCdynamicIRT1d-b.R0000644000176000001440000003173112133644103015315 0ustar ripleyusers########################################################################## ## samples from the posterior distribution of a dynamic 1d IRT model ## a la Martin and Quinn (2002) ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Assumes a local level model for the evolution of theta ## ## y_{jkt}^* = -alpha_k + beta_k * theta_{jt} + epsilon_{jkt} ## theta_{jt} ~ N(theta_{j(t-1)}, tau^2) ## ## Kevin Quinn ## 1/28/2008 ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## ## prior for alpha and beta that coincides with a uniform prior on the ## cutpoints "MCMCdynamicIRT1d_b" <- function(datamatrix, item.time.map, theta.constraints=list(), burnin=1000, mcmc=20000, thin=1, verbose=0, seed=NA, theta.start=NA, alpha.start=NA, beta.start=NA, tau2.start=1, a0=0, A0=.1, b0=0, B0=.1, c0=-1, d0=-1, e0=0, E0=1, store.ability=TRUE, store.item=TRUE, ... ){ datamatrix <- as.matrix(datamatrix) nitems <- ncol(datamatrix) nsubj <- nrow(datamatrix) ntime <- max(item.time.map) ## checks check.offset(list(...)) check.mcmc.parameters(burnin, mcmc, thin) if (nitems != length(item.time.map)){ cat("Number of rows of datamatrix not equal to length of item.time.map\n") stop("Please check data and try MCMCdynamicIRT1d() again.\n", call.=FALSE) } if (min(item.time.map) != 1){ cat("Minimum value in item.time.map not equal to 1\n") stop("Please check data and try MCMCdynamicIRT1d() again.\n", call.=FALSE) } if(sum(datamatrix==1 | datamatrix==0 | is.na(datamatrix)) != (nitems * nsubj)) { cat("Error: Data matrix contains elements other than 0, 1 or NA.\n") stop("Please check data and try MCMCdynamicIRT1d() again.\n", call.=FALSE) } if (A0 < 0){ cat("Error: A0 (prior precision for alpha) is less than 0).\n") stop("Please respecify and try MCMCdynamicIRT1d() again.\n") } if (B0 < 0){ cat("Error: B0 (prior precision for beta) is less than 0).\n") stop("Please respecify and try MCMCdynamicIRT1d() again.\n") } ## setup constraints on theta if(length(theta.constraints) != 0) { for (i in 1:length(theta.constraints)){ theta.constraints[[i]] <- list(as.integer(1), theta.constraints[[i]][1]) } } holder <- build.factor.constraints(theta.constraints, t(datamatrix), nsubj, 1) theta.eq.constraints <- holder[[1]] theta.ineq.constraints <- holder[[2]] ##subject.names <- holder[[3]] ## names item.names <- colnames(datamatrix) if (is.null(item.names)){ item.names <- paste("item", 1:nitems, sep="") } ## starting values for theta error checking theta.start <- factor.score.start.check(theta.start, datamatrix, matrix(0,1,1), matrix(1,1,1), theta.eq.constraints, theta.ineq.constraints, 1) ## starting values for (alpha, beta) ab.starts <- matrix(NA, nitems, 2) for (i in 1:nitems){ local.y <- datamatrix[,i] ##local.y[local.y==9] <- NA if (length(na.omit(local.y)) <= 1){ ab.starts[i,] <- c(0, 10) } else if (var(na.omit(local.y))==0){ ab.starts[i,] <- c(0,10) } else { ab.starts[i,] <- coef(suppressWarnings(glm(local.y~theta.start, family=binomial(probit), control=glm.control( maxit=8, epsilon=1e-3) ))) } } ab.starts[,1] <- -1 * ab.starts[,1] # make this into a difficulty param ## starting values for alpha and beta error checking if (is.na(alpha.start)) { alpha.start <- ab.starts[,1] } else if(is.null(dim(alpha.start))) { alpha.start <- alpha.start * matrix(1,nitems,1) } else if((dim(alpha.start)[1] != nitems) || (dim(alpha.start)[2] != 1)) { cat("Error: Starting value for alpha not conformable.\n") stop("Please respecify and call MCMCdynamicIRT1d() again.\n", call.=FALSE) } if (is.na(beta.start)) { beta.start <- ab.starts[,2] } else if(is.null(dim(beta.start))) { beta.start <- beta.start * matrix(1,nitems,1) } else if((dim(beta.start)[1] != nitems) || (dim(beta.start)[2] != 1)) { cat("Error: Starting value for beta not conformable.\n") stop("Please respecify and call MCMCdynamicIRT1d() again.\n", call.=FALSE) } ## generate time-specific theta information and create extended theta.start subject.names <- NULL theta.start.new <- NULL ## theta.info has: ## col1: subj ID, col2: #time periods, col3: offset (first term C indexing) theta.info <- matrix(NA, nsubj, 3) for (s in 1:nsubj){ namestub <- rownames(datamatrix)[s] theta.info[s,1] <- s count <- 0 holder <- NULL for (i in 1:nitems){ if (!is.na(datamatrix[s,i])){ holder <- c(holder, item.time.map[i]) } } minholder <- min(holder) maxholder <- max(holder) theta.info[s,2] <- maxholder - minholder + 1 theta.info[s,3] <- minholder - 1 theta.start.new <- c(theta.start.new, rep(theta.start[s], theta.info[s,2])) subject.names <- c(subject.names, paste(namestub, ".t", minholder:maxholder, sep="")) } nthetas <- length(subject.names) theta.start <- theta.start.new if (length(c0) < nsubj){ c0 <- array(c0, nsubj) } if (length(d0) < nsubj){ d0 <- array(d0, nsubj) } if (length(tau2.start) < nsubj){ tau2.start <- array(tau2.start, nsubj) } if (length(e0) < nsubj){ e0 <- array(e0, nsubj) } if (length(E0) < nsubj){ E0 <- array(E0, nsubj) } E0inv <- 1/E0 subject.names.short <- rownames(datamatrix) ## convert datamatrix into a sparse format where the missing values are ## not explicitly represented ## ## 1st column: subject index (C indexing) ## 2nd column: item index (C indexing) ## 3rd column: vote data.sparse.si <- NULL for (i in 1:nsubj){ for (j in 1:nitems){ if (!is.na(datamatrix[i,j])){ data.sparse.si <- rbind(data.sparse.si, c(i-1, j-1, datamatrix[i,j])) } } } ## 1st column: item index (C indexing) ## 2nd column: subject index (C indexing) ## 3rd column: vote ## data.sparse.is <- NULL ## for (i in 1:nitems){ ## for (j in 1:nsubj){ ## if (!is.na(datamatrix[j,i])){ ## data.sparse.is <- rbind(data.sparse.is, c(i-1, j-1, datamatrix[j,i])) ## } ## } ## } rm(datamatrix) ## define storage matrix for posterior theta draws thetadraws <- matrix(0, mcmc/thin, nthetas) if (store.ability != TRUE){ thetadraws <- matrix(0, 1, 1) } alphadraws <- matrix(1, mcmc/thin, nitems) betadraws <- matrix(2, mcmc/thin, nitems) if (store.item != TRUE){ alphadraws <- matrix(1, 1, 1) betadraws <- matrix(2, 1, 1) } tau2draws <- matrix(0, mcmc/thin, nsubj) ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## print(theta.eq.constraints) ## print(theta.ineq.constraints) # return(list(theta=theta.start, alpha=alpha.start, beta=beta.start)) ## call C++ code to draw sample posterior <- .C("MCMCdynamicIRT1d_b", thetadata = as.double(thetadraws), thetarow = as.integer(nrow(thetadraws)), thetacol = as.integer(ncol(thetadraws)), alphadata = as.double(alphadraws), alpharow = as.integer(nrow(alphadraws)), alphacol = as.integer(ncol(alphadraws)), betadata = as.double(betadraws), betarow = as.integer(nrow(betadraws)), betacol = as.integer(ncol(betadraws)), tau2data = as.double(tau2draws), tau2row = as.integer(nrow(tau2draws)), tau2col = as.integer(ncol(tau2draws)), nsubj = as.integer(nsubj), nitems = as.integer(nitems), ntime = as.integer(ntime), Ydata = as.integer(data.sparse.si), Yrow = as.integer(nrow(data.sparse.si)), Ycol = as.integer(ncol(data.sparse.si)), IT = as.integer(item.time.map-1), ITlength = as.integer(length(item.time.map)), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), verbose = as.integer(verbose), thetastartdata = as.double(theta.start), thetastartlength = as.integer(length(theta.start)), thetainfo = as.integer(theta.info), thetainforow = as.integer(nrow(theta.info)), thetainfocol = as.integer(ncol(theta.info)), astartdata = as.double(alpha.start), astartlength = as.integer(length(alpha.start)), bstartdata = as.double(beta.start), bstartlength = as.integer(length(beta.start)), tau2data = as.double(tau2.start), tau2length = as.integer(length(tau2.start)), c0data = as.double(c0), c0length = as.integer(length(c0)), d0data = as.double(d0), d0length = as.integer(length(d0)), a0data = as.double(a0), A0data = as.double(A0), b0data = as.double(b0), B0data = as.double(B0), e0data = as.double(e0), E0invdata = as.double(E0inv), thetaeqdata = as.double(theta.eq.constraints), thetaeqrow = as.integer(nrow(theta.eq.constraints)), thetaeqcol = as.integer(ncol(theta.eq.constraints)), thetaineqdata = as.double(theta.ineq.constraints), thetaineqrow = as.integer(nrow(theta.ineq.constraints)), thetaineqcol = as.integer(ncol(theta.ineq.constraints)), storei = as.integer(store.item), storea = as.integer(store.ability), PACKAGE="MCMCpack" ) tau2samp <- matrix(posterior$tau2data, posterior$tau2row, posterior$tau2col) colnames(tau2samp) <- paste("tau2.", subject.names.short, sep="") if (store.item & store.ability){ thetasamp <- matrix(posterior$thetadata, posterior$thetarow, posterior$thetacol) colnames(thetasamp) <- paste("theta.", subject.names, sep="") alphasamp <- matrix(posterior$alphadata, posterior$alpharow, posterior$alphacol) colnames(alphasamp) <- paste("alpha.", item.names, sep="") betasamp <- matrix(posterior$betadata, posterior$betarow, posterior$betacol) colnames(betasamp) <- paste("beta.", item.names, sep="") outmat <- mcmc(cbind(thetasamp, alphasamp, betasamp, tau2samp), start=1, end=mcmc, thin=thin) } else if (store.item){ alphasamp <- matrix(posterior$alphadata, posterior$alpharow, posterior$alphacol) colnames(alphasamp) <- paste("alpha.", item.names, sep="") betasamp <- matrix(posterior$betadata, posterior$betarow, posterior$betacol) colnames(betasamp) <- paste("beta.", item.names, sep="") outmat <- mcmc(cbind(alphasamp, betasamp, tau2samp), start=1, end=mcmc, thin=thin) } else if (store.ability){ thetasamp <- matrix(posterior$thetadata, posterior$thetarow, posterior$thetacol) colnames(thetasamp) <- paste("theta.", subject.names, sep="") outmat <- mcmc(cbind(thetasamp, tau2samp), start=1, end=mcmc, thin=thin) } else { outmat <- mcmc(tau2samp, start=1, end=mcmc, thin=thin) } return(outmat) } MCMCpack/R/MCMCdynamicEI.R0000644000176000001440000001061312133644103014504 0ustar ripleyusers########################################################################## ## sample from the posterior of Quinn's dynamic ecological inference model ## in R using linked C++ code in Scythe ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## KQ 10/25/2002 ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "MCMCdynamicEI" <- function(r0, r1, c0, c1, burnin=5000, mcmc=50000, thin=1, verbose=0, seed=NA, W=0, a0=0.825, b0=0.0105, a1=0.825, b1=0.0105, ...){ # Error checking if (length(r0) != length(r1)){ cat("length(r0) != length(r1).\n") stop("Please check data and try MCMCdynamicEI() again.\n") } if (length(r0) != length(c0)){ cat("length(r0) != length(c0).\n") stop("Please check data and try MCMCdynamicEI() again.\n") } if (length(r0) != length(c1)){ cat("length(r0) != length(c1).\n") stop("Please check data and try MCMCdynamicEI() again.\n") } if (length(r1) != length(c0)){ cat("length(r1) != length(c0).\n") stop("Please check data and try MCMCdynamicEI() again.\n") } if (length(r1) != length(c1)){ cat("length(r1) != length(c1).\n") stop("Please check data and try MCMCdynamicEI() again.\n") } if (length(c0) != length(c1)){ cat("length(c0) != length(c1).\n") stop("Please check data and try MCMCdynamicEI() again.\n") } if (min((r0+r1) == (c0+c1))==0){ cat("Rows and columns do not sum to same thing.\n") stop("Please check data and try MCMCdynamicEI() again.\n") } check.mcmc.parameters(burnin, mcmc, thin) # seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] if (a0 <= 0 ){ cat("Parameter a0 <= 0.\n") stop("Please respecify and try MCMCdynamicEI() again.\n") } if (b0 <= 0 ){ cat("Parameter b0 <= 0.\n") stop("Please respecify and try MCMCdynamicEI() again.\n") } if (a1 <= 0 ){ cat("Parameter a1 <= 0.\n") stop("Please respecify and try MCMCdynamicEI() again.\n") } if (b1 <= 0 ){ cat("Parameter b1 <= 0.\n") stop("Please respecify and try MCMCdynamicEI() again.\n") } ntables = length(r0) if (W==0){ # construct weight matrix for a simple random walk assuming # tables are temporally ordered and 1 time unit apart W <- matrix(0, ntables, ntables) for (i in 2:(ntables)){ W[i,i-1] <- 1 W[i-1,i] <- 1 } } # setup matrix to hold output from sampling sample <- matrix(0, mcmc/thin, ntables*2+2) # call C++ code to do the sampling C.sample <- .C("dynamicEI", samdata = as.double(sample), samrow = as.integer(nrow(sample)), samcol = as.integer(ncol(sample)), r0 = as.double(r0), r1 = as.double(r1), c0 = as.double(c0), c1 = as.double(c1), ntables = as.integer(ntables), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), W = as.double(W), a0 = as.double(a0), b0 = as.double(b0), a1 = as.double(a1), b1 = as.double(b1), verbose = as.integer(verbose), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), PACKAGE="MCMCpack" ) sample <- matrix(C.sample$samdata, C.sample$samrow, C.sample$samcol, byrow=TRUE) output <- mcmc(data=sample, start=(burnin+1), end=burnin+mcmc, thin=thin) p0names <- paste("p0table", 1:ntables, sep="") p1names <- paste("p1table", 1:ntables, sep="") varnames(output) <- c(p0names, p1names, "sigma^2_0", "sigma^2_1") attr(output, "title") <- "MCMCpack Quinn's Dynamic EI Model Posterior Sample" return(output) } MCMCpack/R/MCMCbinaryChange.R0000755000176000001440000000774012133644103015246 0ustar ripleyusers## sample from the posterior distribution ## of a binary model with multiple changepoints ## using linked C++ code in Scythe ## ## Written by JHP 07/01/2007 ## Revised by JHP 07/16/2009 "MCMCbinaryChange"<- function(data, m = 1, c0 = 1, d0 = 1, a = NULL, b = NULL, burnin = 10000, mcmc = 10000, thin = 1, verbose = 0, seed = NA, phi.start = NA, P.start = NA, marginal.likelihood = c("none", "Chib95"), ...) { ## check iteration parameters check.mcmc.parameters(burnin, mcmc, thin) totiter <- mcmc + burnin cl <- match.call() ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] if(!is.na(seed)) set.seed(seed) ## sample size y <- as.matrix(data) n <- nrow(y) ns <- m+1 ## get marginal likelihood argument marginal.likelihood <- match.arg(marginal.likelihood) ## following MCMCregress, set chib as binary logmarglike <- NULL chib <- 0 if (marginal.likelihood == "Chib95"){ chib <- 1 } nstore <- mcmc/thin if (m == 0){ b0 <- c0/(c0 + d0) B0 <- c0*d0/(c0 + d0)^2*(c0 + d0 + 1) output <- MCMCprobit(y~1, burnin = burnin, mcmc = mcmc, thin = thin, verbose = verbose, b0 = b0, B0 = B0, marginal.likelihood = marginal.likelihood) attr(output, "y") <- y } else { ## prior for transition matrix A0 <- trans.mat.prior(m=m, n=n, a=a, b=b) Pstart <- check.P(P.start, m=m, a=a, b=b) phistart <- check.theta(phi.start, ns, y, min=0, max=1) ## call C++ code to draw sample posterior <- .C("MCMCbinaryChange", phiout = as.double(rep(0.0, nstore*ns)), Pout = as.double(rep(0.0, nstore*ns*ns)), psout = as.double(rep(0.0, n*ns)), sout = as.double(rep(0.0, nstore*n)), Ydata = as.double(y), Yrow = as.integer(nrow(y)), Ycol = as.integer(ncol(y)), m = as.integer(m), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), verbose = as.integer(verbose), lecuyer=as.integer(lecuyer), seedarray=as.integer(seed.array), lecuyerstream=as.integer(lecuyer.stream), phistart = as.double(phistart), Pstart = as.double(Pstart), a = as.double(a), b = as.double(b), c0 = as.double(c0), d0 = as.double(d0), A0data = as.double(A0), logmarglikeholder = as.double(0.0), chib = as.integer(chib)) ## get marginal likelihood if Chib95 if (marginal.likelihood == "Chib95"){ logmarglike <- posterior$logmarglikeholder } ## pull together matrix and build MCMC object to return phi.holder <- matrix(posterior$phiout, nstore, ) P.holder <- matrix(posterior$Pout, nstore, ) s.holder <- matrix(posterior$sout, nstore, ) ps.holder <- matrix(posterior$psout, n, ) output <- mcmc(data=phi.holder, start=burnin+1, end=burnin + mcmc, thin=thin) varnames(output) <- paste("phi.", 1:ns, sep = "") attr(output,"title") <- "MCMCbinaryChange Posterior Sample" attr(output, "y") <- y attr(output, "m") <- m attr(output, "call") <- cl attr(output, "logmarglike") <- logmarglike attr(output, "prob.state") <- ps.holder/nstore attr(output, "s.store") <- s.holder } return(output) }## end of MCMC function MCMCpack/R/make.breaklist.R0000644000176000001440000000125112133644103015074 0ustar ripleyusers"make.breaklist" <- function(BF, threshold=3){ eBF <- exp(BF) N <- nrow(BF) out <- rep(NA, N) for (i in 1:N){ order.i <- order(eBF[i,], decreasing=TRUE) if(sum(is.na(eBF[i,]))>0){ out[i] <- 1 } else{ if(eBF[i, order.i[1]] / eBF[i, order.i[2]] > threshold){ out[i] <- order.i[1] } else if (eBF[i, order.i[1]] / eBF[i, order.i[2]] < threshold & order.i[1]<=order.i[2]){ out[i] <- order.i[1] } else if (eBF[i, order.i[1]] / eBF[i, order.i[2]] < threshold & order.i[1]>order.i[2]){ out[i] <- order.i[2] } else{ cat("\n Error occurs at i = ", i) } } } return(out-1) } MCMCpack/R/HMMpanelRE.R0000644000176000001440000002056312133644103014077 0ustar ripleyusers#################################################################### ## HMM Gaussian Panel Random Effects Model ## y_it = x_it'*beta + w_it*bi + e_it, ## e_it ~ N(0, sigma2) ## ## bi ~ N(0, D) : random effects coefficient ## D ~ IW(r0, R0) : covariance matrix for multiple random effects ## beta ~ N(b0, B0) : fixed effect coefficient ## sigma2 ~ IG(c0/2, d0/2) : random error ## ## written by Jong Hee Park 03/2009 ## modified and integrated with other codes on 09/2011 ###################################################################### "HMMpanelRE" <- function(subject.id, time.id, y, X, W, m=1, mcmc=1000, burnin=1000, thin=1, verbose=0, b0=0, B0=0.001, c0 = 0.001, d0 = 0.001, r0, R0, a = NULL, b = NULL, seed = NA, beta.start = NA, sigma2.start = NA, D.start= NA, P.start = NA, marginal.likelihood = c("none", "Chib95"), ...){ cl <- match.call() ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## Data ns <- m + 1 Y <- as.matrix(y) X <- as.matrix(X) W <- as.matrix(W) formula <- Y ~ X-1 ols <- lm(formula) N <- nrow(Y) K <- ncol(X) Q <- ncol(W) nobs <- nrow(X) ## Sort Data based on time.id oldTSCS <- cbind(time.id, subject.id, y, X, W) newTSCS <- oldTSCS[order(oldTSCS[,1]),] YT <- as.matrix(newTSCS[,3]) XT <- as.matrix(newTSCS[,4:(4+K-1)]) WT <- as.matrix(newTSCS[,(4+K):(4+K+Q-1)]) mvn.prior <- form.mvn.prior(b0, B0, K) b0 <- mvn.prior[[1]] B0 <- mvn.prior[[2]] R0 <- as.matrix(R0) nstore <- mcmc/thin nsubj <- length(unique(subject.id)) if (unique(subject.id)[1] != 1){ stop("subject.id should start 1!") } ## subject.offset is the obs number from which a new subject unit starts subject.offset <- c(0, which(diff(sort(subject.id))==1)[-nsubj]) ## col1: subj ID, col2: offset (C indexing), col3: #time periods in each subject nsubject.vec <- rep(NA, nsubj) for (i in 1:nsubj){ nsubject.vec[i] <- sum(subject.id==unique(subject.id)[i]) } subject.groupinfo <- cbind(unique(subject.id), subject.offset, nsubject.vec) ## time.groupinfo ## col1: time ID, col2: offset (C indexing), col3: # subjects in each time if(unique(time.id)[1] != 1){ time.id <- time.id - unique(time.id)[1] + 1 cat("time.id does not start from 1. So it is modified by subtracting the first unit of time.") } ntime <- max(nsubject.vec)## maximum time length ntime.vec <- rep(NA, ntime) for (i in 1:ntime){ ntime.vec[i] <- sum(time.id==unique(time.id)[i]) } ## time.offset is the obs number from which a new time unit starts when we stack data by time.id time.offset <- c(0, which(diff(sort(time.id))==1)[-ntime]) time.groupinfo <- cbind(unique(time.id), time.offset, ntime.vec) ## prior inputs if (m > 0){ P0 <- trans.mat.prior(m=m, n=ntime, a=a, b=b) ## initial values Pstart <- check.P(P.start, m, a=a, b=b) } else { Pstart <- P0 <- matrix(1, 1, 1) } if (is.na(beta.start[1])) { betastart <- coef(ols) } else{ betastart <- beta.start } if (is.na(sigma2.start[1])) { sigma2start <- summary(ols)$sigma^2 } else{ sigma2start <- sigma2.start } betadraws <- matrix(data=0, nstore, ns*K) sigmadraws <- matrix(data=0, nstore, ns) Ddraws <- matrix(data=0, nstore, ns*Q*Q) psdraws <- matrix(data=0, ntime, ns) sdraws <- matrix(data=0, nstore, ntime*ns) ## get marginal likelihood argument marginal.likelihood <- match.arg(marginal.likelihood) ## following MCMCregress, set chib as binary logmarglike <- loglik <- NULL chib <- 0 if (marginal.likelihood == "Chib95"){ chib <- 1 } ## call C++ code to draw sample posterior <- .C("HMMpanelRE", betadata = as.double(betadraws), betarow = as.integer(nrow(betadraws)), betacol = as.integer(ncol(betadraws)), sigmadata = as.double(sigmadraws), Ddata = as.double(Ddraws), psout = as.double(psdraws), sout = as.double(sdraws), nsubj = as.integer(nsubj), ntime = as.integer(ntime), m = as.integer(m), nobs = as.integer(nobs), subjectid = as.integer(subject.id), timeid = as.integer(time.id), Ydata = as.double(Y), Yrow = as.integer(nrow(Y)), Ycol = as.integer(ncol(Y)), Xdata = as.double(X), Xrow = as.integer(nrow(X)), Xcol = as.integer(ncol(X)), Wdata = as.double(W), Wrow = as.integer(nrow(W)), Wcol = as.integer(ncol(W)), YTdata = as.double(YT), XTdata = as.double(XT), WTdata = as.double(WT), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), verbose = as.integer(verbose), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), betastartdata = as.double(betastart), sigma2start = as.double(sigma2start), Pstart = as.double(Pstart), b0data = as.double(b0), B0data = as.double(B0), c0 = as.double(c0), d0 = as.double(d0), r0 = as.integer(r0), R0data = as.double(R0), subject_groupinfodata = as.double(subject.groupinfo), time_groupinfodata = as.double(time.groupinfo), logmarglikeholder = as.double(0), loglikeholder = as.double(0), chib = as.integer(chib), PACKAGE="MCMCpack" ) ## pull together matrix and build MCMC object to return beta.samp <- matrix(posterior$betadata, posterior$betarow, posterior$betacol) ## stored by the order of (11, 12, 13, 21, 22, 23) sigma.samp <- matrix(posterior$sigmadata, posterior$betarow, ns) D.samp <- matrix(posterior$Ddata, posterior$betarow, Q*Q*ns) xnames <- sapply(c(1:K), function(i){paste("beta", i, sep = "")}) Dnames <- sapply(c(1:(Q*Q)), function(i){paste("D", i, sep = "")}) if (m == 0){ output <- as.mcmc(cbind(beta.samp, sigma.samp, D.samp)) names <- c(xnames, "sigma2", Dnames) varnames(output) <- as.list(names) } else{ output1 <- mcmc(data=beta.samp, start=burnin+1, end=burnin + mcmc, thin=thin) output2 <- mcmc(data=sigma.samp, start=burnin+1, end=burnin + mcmc, thin=thin) output3 <- mcmc(data=D.samp, start=burnin+1, end=burnin + mcmc, thin=thin) varnames(output1) <- sapply(c(1:ns), function(i){ paste(xnames, "_regime", i, sep = "") }) varnames(output2) <- sapply(c(1:ns), function(i){ paste("sigma2_regime", i, sep = "") }) varnames(output3) <- sapply(c(1:ns), function(i){ paste(Dnames, "_regime", i, sep = "") }) output <- as.mcmc(cbind(output1, output2, output3)) ps.holder <- matrix(posterior$psout, ntime, ns) s.holder <- matrix(posterior$sout, nstore, ) } attr(output, "title") <- "HMMpanelRE Posterior Sample" attr(output, "call") <- cl attr(output, "y") <- y[1:ntime] attr(output, "X") <- X[1:ntime, ] attr(output, "m") <- m attr(output, "nsubj") <- nsubj attr(output, "ntime") <- ntime if (m > 0){ attr(output, "s.store") <- s.holder attr(output, "prob.state") <- ps.holder/nstore } attr(output, "logmarglike") <- posterior$logmarglikeholder attr(output, "loglike") <- posterior$loglikeholder return(output) } MCMCpack/R/HMMpanelFE.R0000644000176000001440000001411712133644103014061 0ustar ripleyusers#################################################################### ## HMM Gaussian Panel with Time Varying Intercepts ## ## first written by Jong Hee Park on 02/2009 ## revised for MCMCpack inclusion on 09/2011 ###################################################################### "HMMpanelFE" <- function(subject.id, y, X, m, mcmc=1000, burnin=1000, thin=1, verbose=0, b0=0, B0=0.001, c0 = 0.001, d0 = 0.001, delta0=0, Delta0=0.001, a = NULL, b = NULL, seed = NA, ...){ ## m should be a vector with a number of breaks for each group ## id is a numeric list ## p is a lag order ## offset is the first time period from which each group starts ## seeds seeds <- form.seeds(seed) lecuyer <- seeds[[1]] seed.array <- seeds[[2]] lecuyer.stream <- seeds[[3]] ## Data Y <- as.matrix(y); X <- as.matrix(cbind(1, X)) ## the intercept is not reported N <- nrow(Y); K <- ncol(X) mvn.prior <- form.mvn.prior(b0, B0, K) b0 <- mvn.prior[[1]] B0 <- mvn.prior[[2]] mvn.prior <- form.mvn.prior(delta0, Delta0, 1) delta0 <- mvn.prior[[1]] Delta0 <- mvn.prior[[2]] nstore <- mcmc/thin nsubj <- length(unique(subject.id)) ## groupinfo matrix ## col1: subj ID, col2: offset (first time C indexing), col3: #time periods if (unique(subject.id)[1] != 1){ stop("subject.id should start 1!") } subject.offset <- c(0, which(diff(sort(subject.id))==1)[-nsubj]) ## col1: subj ID, col2: offset (C indexing), col3: #time periods in each subject nsubject.vec <- rep(NA, nsubj) for (i in 1:nsubj){ nsubject.vec[i] <- sum(subject.id==unique(subject.id)[i]) } subject.groupinfo <- cbind(unique(subject.id), subject.offset, nsubject.vec) ## maximum time length ntime <- max(nsubject.vec) m.max <- max(m) m.min <- min(m) ## prior inputs P0data <- NULL Pstart <- NULL for (i in 1:nsubj){ if(m[i] == 0){ P0current <- 1 Pstartcurrent <- 1 } else{ P0current <- trans.mat.prior(m=m[i], n=nsubject.vec[i], a=a, b=b) Pstartcurrent <- trans.mat.prior(m=m[i], n=nsubject.vec[i], a=.9, b=.1) } P0data <- c(P0data, P0current) Pstart <- c(Pstart, Pstartcurrent) } ## starting values ols <- lm(Y~X-1) beta.start <- coef(ols) sigma2.start <- summary(ols)$sigma^2 deltastart <- NULL Ytilde <- Y - X%*%beta.start deltaformula <- Ytilde ~ 1 ## without intercept for (i in 1:nsubj){ deltacurrent <- rep(as.vector(coef(lm(Ytilde ~ 1))), m[i] + 1) deltastart <- c(deltastart, deltacurrent) } ## Storage totalstates0 <- sum(m+1) betadraws0 <- matrix(0, nstore, K) deltadraws0 <- matrix(data=0, nstore, totalstates0) sigmadraws0 <- matrix(data=0, nstore, totalstates0) statedraws0 <- matrix(data=0, nstore, totalstates0) ## call C++ code to draw sample posterior <- .C("HMMpanelFE", deltadraws = as.double(deltadraws0), sigmadraws = as.double(sigmadraws0), statedraws = as.double(statedraws0), betadraws = as.double(betadraws0), betarow = as.integer(nrow(betadraws0)), betacol = as.integer(ncol(betadraws0)), totalstates = as.integer(totalstates0), nsubj = as.integer(nsubj), ntime = as.integer(ntime), nobs = as.integer(N), subjectid = as.integer(subject.id), m = as.integer(m), mmax = as.integer(m.max), mmin = as.integer(m.min), Ydata = as.double(Y), Yrow = as.integer(nrow(Y)), Ycol = as.integer(ncol(Y)), Xdata = as.double(X), Xrow = as.integer(nrow(X)), Xcol = as.integer(ncol(X)), burnin = as.integer(burnin), mcmc = as.integer(mcmc), thin = as.integer(thin), verbose = as.integer(verbose), lecuyer = as.integer(lecuyer), seedarray = as.integer(seed.array), lecuyerstream = as.integer(lecuyer.stream), betastartdata = as.double(beta.start), sigma2start = as.double(sigma2.start), deltastartdata = as.double(deltastart), deltastartrow = as.integer(length(deltastart)), b0data = as.double(b0), B0data = as.double(B0), delta0data = as.double(delta0), Delta0data = as.double(Delta0), c0 = as.double(c0), d0 = as.double(d0), P0data = as.double(P0data), P0row = as.integer(length(P0data)), Pstartdata = as.double(Pstart), subject_groupinfodata = as.double(subject.groupinfo), PACKAGE="MCMCpack") ## pull together matrix and build MCMC object to return betadraws <- matrix(posterior$betadraws, posterior$betarow, posterior$betacol) sigma.samp <- as.mcmc(matrix(posterior$sigmadraws, nstore, totalstates0)) delta.samp <- as.mcmc(matrix(posterior$deltadraws, nstore, totalstates0)) state.samp <- as.mcmc(matrix(posterior$statedraws, nstore, totalstates0)) ## output <- mcmc(betadraws, start=burnin+1, end=burnin+mcmc, thin=thin) output <- as.mcmc(betadraws[, -1])## drop the intercept attr(output, "title") <- "HMMpanelFE Sample" attr(output, "m") <- m attr(output, "sigma") <- sigma.samp attr(output, "state") <- state.samp attr(output, "delta") <- delta.samp return(output) } MCMCpack/R/hidden.R0000644000176000001440000006444212133644103013446 0ustar ripleyusers########## hidden functions to help in model implementation ########## ## NOTE: these are not exported to the user and should always be ## used in model functions. As such, fixing problems here ## fixes them in all functions simultaneously. ## ## updated by ADM 7/22/04 ## re-organized (alphabetical) by ADM 7/28/04 ## added a number of functions for teaching models by ADM 1/25/2006 ## create an agreement score matrix from a vote matrix ## subjects initially on rows and items on cols of X ## note: treats missing votes as category for agreement / might be ## more principled to treat them in another fashion "agree.mat" <- function(X){ X <- t(X) # put subjects on columns n <- ncol(X) X[is.na(X)] <- -999 A <- matrix(NA, n, n) for (i in 1:n){ A[i,] <- apply(X[,i] == X, 2, sum) } return(A/nrow(X)) } ## create constraints for measurement models "build.factor.constraints" <- function(lambda.constraints, X, K, factors){ ## build initial constraint matrices and assign var names Lambda.eq.constraints <- matrix(NA, K, factors) Lambda.ineq.constraints <- matrix(0, K, factors) if (is.null(colnames(X))){ X.names <- paste("V", 1:ncol(X), sep="") } else { X.names <- colnames(X) } rownames(Lambda.eq.constraints) <- X.names rownames(Lambda.ineq.constraints) <- X.names ## setup the equality and inequality contraints on Lambda if (length(lambda.constraints) != 0){ constraint.names <- names(lambda.constraints) for (i in 1:length(constraint.names)){ name.i <- constraint.names[i] lambda.constraints.i <- lambda.constraints[[i]] col.index <- lambda.constraints.i[[1]] replace.element <- lambda.constraints.i[[2]] if (is.numeric(replace.element)){ Lambda.eq.constraints[rownames(Lambda.eq.constraints)==name.i, col.index] <- replace.element } if (replace.element=="+"){ Lambda.ineq.constraints[rownames(Lambda.ineq.constraints)==name.i, col.index] <- 1 } if (replace.element=="-"){ Lambda.ineq.constraints[rownames(Lambda.ineq.constraints)==name.i, col.index] <- -1 } } } testmat <- Lambda.ineq.constraints * Lambda.eq.constraints if (min(is.na(testmat))==0){ if ( min(testmat[!is.na(testmat)]) < 0){ cat("Constraints on factor loadings are logically inconsistent.\n") stop("Please respecify and call ", calling.function(), " again.\n") } } Lambda.eq.constraints[is.na(Lambda.eq.constraints)] <- -999 return( list(Lambda.eq.constraints, Lambda.ineq.constraints, X.names)) } # return name of the calling function "calling.function" <- function(parentheses=TRUE) { calling.function <- strsplit(toString(sys.call(which=-3)),",")[[1]][1] if (parentheses){ calling.function <- paste(calling.function, "()", sep="") } return(calling.function) } # check inverse Gamma prior "check.ig.prior" <- function(c0, d0) { if(c0 <= 0) { cat("Error: IG(c0/2,d0/2) prior c0 less than or equal to zero.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } if(d0 <= 0) { cat("Error: IG(c0/2,d0/2) prior d0 less than or equal to zero.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } return(0) } # check beta prior "check.beta.prior" <- function(alpha, beta) { if(alpha <= 0) { cat("Error: Beta(alpha,beta) prior alpha less than or equal to zero.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } if(beta <= 0) { cat("Error: Beta(alpha,beta) prior beta less than or equal to zero.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } return(0) } # check Gamma prior # ADM 1/25/2006 "check.gamma.prior" <- function(alpha, beta) { if(alpha <= 0) { cat("Error: Gamma(alpha,beta) prior alpha less than or equal to zero.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } if(alpha <= 0) { cat("Error: Gamma(alpha,beta) prior beta less than or equal to zero.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } return(0) } # check Normal prior # ADM 1/26/2006 "check.normal.prior" <- function(mu, sigma2) { if(sigma2 <= 0) { cat("Error: Normal(mu0,tau20) prior sigma2 less than or equal to zero.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } } # check mc parameter # ADM 1/25/2006 "check.mc.parameter" <- function(mc) { if(mc < 0) { cat("Error: Monte Carlo iterations negative.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } return(0) } # check mcmc parameters "check.mcmc.parameters" <- function(burnin, mcmc, thin) { if(mcmc %% thin != 0) { cat("Error: MCMC iterations not evenly divisible by thinning interval.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if(mcmc < 0) { cat("Error: MCMC iterations negative.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if(burnin < 0) { cat("Error: Burnin iterations negative.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if(thin < 0) { cat("Error: Thinning interval negative.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } return(0) } # check to see if an offset is passed "check.offset" <- function(args) { if(sum(names(args)=="offset")==1) { cat("Error: Offsets are currently not supported in MCMCpack.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } return(0) } # put together starting values for coefficients # NOTE: This can be used for any GLM model by passing the right family # or for another model by passing default starting values to # the function "coef.start" <- function(beta.start, K, formula, family, data=NULL, defaults=NA) { if (is.na(beta.start)[1] & is.na(defaults)[1]){ # use GLM estimates beta.start <- matrix(coef(glm(formula, family=family, data=data)), K, 1) } else if(is.na(beta.start)[1] & !is.na(defaults)[1]){ # use passed values beta.start <- matrix(defaults,K,1) } else if(is.null(dim(beta.start))) { beta.start <- beta.start * matrix(1,K,1) } else if(!all(dim(beta.start) == c(K,1))) { cat("Error: Starting value for beta not conformable.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } return(beta.start) } ## generate starting values for a factor loading matrix "factload.start" <- function(lambda.start, K, factors, Lambda.eq.constraints, Lambda.ineq.constraints){ Lambda <- matrix(0, K, factors) if (any(is.na(lambda.start))){# sets Lambda to equality constraints & 0s for (i in 1:K){ for (j in 1:factors){ if (Lambda.eq.constraints[i,j]==-999){ if(Lambda.ineq.constraints[i,j]==0){ Lambda[i,j] <- 0 } if(Lambda.ineq.constraints[i,j]>0){ Lambda[i,j] <- .5 } if(Lambda.ineq.constraints[i,j]<0){ Lambda[i,j] <- -.5 } } else Lambda[i,j] <- Lambda.eq.constraints[i,j] } } } else if (is.matrix(lambda.start)){ if (nrow(lambda.start)==K && ncol(lambda.start)==factors) Lambda <- lambda.start else { cat("lambda.start not of correct size for model specification.\n") stop("Please respecify and call ", calling.function(), " again.\n") } } else if (length(lambda.start)==1 && is.numeric(lambda.start)){ Lambda <- matrix(lambda.start, K, factors) for (i in 1:K){ for (j in 1:factors){ if (Lambda.eq.constraints[i,j] != -999) Lambda[i,j] <- Lambda.eq.constraints[i,j] } } } else { cat("lambda.start neither NA, matrix, nor scalar.\n") stop("Please respecify and call ", calling.function, " again.\n") } return(Lambda) } ## based on code originally written by Keith Poole ## takes a subject by subject agreement score matrix as input "factor.score.eigen.start" <- function(A, factors){ A <- (1 - A)^2 AA <- A arow <- matrix(NA, nrow(A), 1) acol <- matrix(NA, ncol(A), 1) for (i in 1:nrow(A)){ arow[i] <- mean(A[i,]) } for (i in 1:ncol(A)){ acol[i] <- mean(A[,i]) } matrixmean <- mean(acol) for (i in 1:nrow(A)){ for (j in 1:ncol(A)){ AA[i,j] <- (A[i,j]-arow[i]-acol[j]+matrixmean)/(-2) } } ev <- eigen(AA) scores <- matrix(NA, nrow(A), factors) for (i in 1:factors){ scores[,i] <- ev$vec[,i]*sqrt(ev$val[i]) scores[,i] <- (scores[,i] - mean(scores[,i]))/sd(scores[,i]) } return(scores) } ## check starting values of factor scores or ability parameters ## subjects on rows of X "factor.score.start.check" <- function(theta.start, X, prior.mean, prior.prec, eq.constraints, ineq.constraints, factors){ N <- nrow(X) ## set value of theta.start if (max(is.na(theta.start))==1) { theta.start <- factor.score.eigen.start(agree.mat(X), 1) for (i in 1:factors){ theta.start[,i] <- prior.mean[i] + theta.start[,i] * sqrt(1/prior.prec[i,i]) # make sure these are consistent with hard and soft constraints for (j in 1:nrow(theta.start)){ if (eq.constraints[j,i] != -999){ if (theta.start[j,i] * eq.constraints[j,i] < 0){ theta.start[,i] <- -1*theta.start[,i] } } if (theta.start[j,i] * ineq.constraints[j,i] < 0){ theta.start[,i] <- -1*theta.start[,i] } } theta.start[eq.constraints[,i]!=-999,i] <- eq.constraints[eq.constraints[,i]!=-999,i] theta.start[ineq.constraints[,i]!=0,i] <- abs(theta.start[ineq.constraints[,i]!=0,i]) * ineq.constraints[ineq.constraints[,i]!=0,i] } } else if(is.numeric(theta.start) && is.null(dim(theta.start))) { theta.start <- theta.start * matrix(1, N, 1) } else if((dim(theta.start)[1] != N) || (dim(theta.start)[2] != factors)) { cat("Starting value for theta not appropriately sized.\n") stop("Please respecify and call", calling.function(), " again.\n", call.=FALSE) } else { cat("Inappropriate value of theta.start passed.\n") stop("Please respecify and call", calling.function(), " again.\n", call.=FALSE) } ## check value of theta.start prev.bind.constraints <- rep(0, factors) for (i in 1:N){ for (j in 1:factors){ if (eq.constraints[i,j]==-999){ if(ineq.constraints[i,j]>0 && theta.start[i,j] < 0){ if (prev.bind.constraints[j]==0){ theta.start[,j] <- -1*theta.start[,j] } else { cat("Parameter constraints logically inconsistent.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } prev.bind.constraints[j] <- prev.bind.constraints[j] + 1 } if(ineq.constraints[i,j]<0 && theta.start[i,j] > 0){ if (prev.bind.constraints[j]==0){ theta.start[,j] <- -1*theta.start[,j] } else { cat("Parameter constraints logically inconsistent.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } prev.bind.constraints[j] <- prev.bind.constraints[j] + 1 } } else { if ((theta.start[i,j] * eq.constraints[i,j]) > 0){ theta.start[i,j] <- eq.constraints[i,j] } else { if (prev.bind.constraints[j]==0){ theta.start[,j] <- -1*theta.start[,j] theta.start[i,j] <- eq.constraints[i,j] } else { cat("Parameter constraints logically inconsistent.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } prev.bind.constraints[j] <- prev.bind.constraints[j] + 1 } } } } return(theta.start) } ## get starting values for factor uniqueness matrix (Psi) "factuniqueness.start" <- function(psi.start, X){ K <- ncol(X) if (any(is.na(psi.start))){ Psi <- 0.5 * diag(diag(var(X))) } else if (is.double(psi.start) && (length(psi.start==1) || length(psi.start==K))){ Psi <- diag(K) * psi.start } else { cat("psi.start neither NA, double. nor appropriately sized matrix.\n") stop("Please respecify and call ", calling.function, " again.\n") } if (nrow(Psi) != K || ncol(Psi) != K){ cat("Psi starting value not K by K matrix.\n") stop("Please respecify and call ", calling.function, " again.\n") } return(Psi) } ## form the ind. normal prior for a factor loading matrix "form.factload.norm.prior" <- function(l0, L0, K, factors, X.names){ ## prior means if (is.matrix(l0)){ # matrix input for l0 if (nrow(l0)==K && ncol(l0)==factors){ Lambda.prior.mean <- l0 rownames(Lambda.prior.mean) <- X.names } else { cat("l0 not of correct size for model specification.\n") stop("Please respecify and call ", calling.function(), " again.\n") } } else if (is.list(l0)){ # list input for l0 Lambda.prior.mean <- matrix(0, K, factors) rownames(Lambda.prior.mean) <- X.names l0.names <- names(l0) for (i in 1:length(l0.names)){ name.i <- l0.names[i] l0.i <- l0[[i]] col.index <- l0.i[[1]] replace.element <- l0.i[[2]] if (is.numeric(replace.element)){ Lambda.prior.mean[rownames(Lambda.prior.mean)==name.i, col.index] <- replace.element } } } else if (length(l0)==1 && is.numeric(l0)){ # scalar input for l0 Lambda.prior.mean <- matrix(l0, K, factors) rownames(Lambda.prior.mean) <- X.names } else { cat("l0 neither matrix, list, nor scalar.\n") stop("Please respecify and call ", calling.function(), " again.\n") } ## prior precisions if (is.matrix(L0)){ # matrix input for L0 if (nrow(L0)==K && ncol(L0)==factors){ Lambda.prior.prec <- L0 rownames(Lambda.prior.prec) <- X.names } else { cat("L0 not of correct size for model specification.\n") stop("Please respecify and call ", calling.function(), " again.\n") } } else if (is.list(L0)){ # list input for L0 Lambda.prior.prec <- matrix(0, K, factors) rownames(Lambda.prior.prec) <- X.names L0.names <- names(L0) for (i in 1:length(L0.names)){ name.i <- L0.names[i] L0.i <- L0[[i]] col.index <- L0.i[[1]] replace.element <- L0.i[[2]] if (is.numeric(replace.element)){ Lambda.prior.prec[rownames(Lambda.prior.prec)==name.i, col.index] <- replace.element } } } else if (length(L0)==1 && is.numeric(L0)){ # scalar input for L0 Lambda.prior.prec <- matrix(L0, K, factors) rownames(Lambda.prior.prec) <- X.names } else { cat("L0 neither matrix, list, nor scalar.\n") stop("Please respecify and call ", calling.function(), " again.\n") } if (min(Lambda.prior.prec) < 0) { cat("L0 contains negative elements.\n") stop("Please respecify and call ", calling.function(), " again.\n") } return( list(Lambda.prior.mean, Lambda.prior.prec)) } ## form ind. inv. gamma prior for a diagonal var. cov. matrix "form.ig.diagmat.prior" <- function(a0, b0, K){ ## setup prior for diag(Psi) if (length(a0)==1 && is.double(a0)) a0 <- matrix(a0, K, 1) else if (length(a0) == K && is.double(a0)) a0 <- matrix(a0, K, 1) else { cat("a0 not properly specified.\n") stop("Please respecify and call ", calling.function, " again.\n") } if (length(b0)==1 && is.double(b0)) b0 <- matrix(b0, K, 1) else if (length(b0) == K && is.double(b0)) b0 <- matrix(b0, K, 1) else { cat("b0 not properly specified.\n") stop("Please respecify and call ", calling.function(), " again.\n") } ## prior for Psi error checking if(min(a0) <= 0) { cat("IG(a0/2,b0/2) prior parameter a0 less than or equal to zero.\n") stop("Please respecify and call ", calling.function, " again.\n") } if(min(b0) <= 0) { cat("IG(a0/2,b0/2) prior parameter b0 less than or equal to zero.\n") stop("Please respecify and call ", calling.function(), " again.\n") } return(list(a0, b0) ) } # pull together the posterior density sample "form.mcmc.object" <- function(posterior.object, names, title, ...) { holder <- matrix(posterior.object$sampledata, posterior.object$samplerow, posterior.object$samplecol, byrow=FALSE) output <- mcmc(data=holder, start=(posterior.object$burnin+1), end=(posterior.object$burnin+posterior.object$mcmc), thin=posterior.object$thin) varnames(output) <- as.list(names) attr(output,"title") <- title attribs <- list(...) K <- length(attribs) attrib.names <- names(attribs) if (K>0){ for (i in 1:K){ attr(output, attrib.names[i]) <- attribs[[i]] } } return(output) } # form multivariate Normal prior "form.mvn.prior" <- function(b0, B0, K) { # prior mean if(is.null(dim(b0))) { b0 <- b0 * matrix(1,K,1) } if((dim(b0)[1] != K) || (dim(b0)[2] != 1)) { cat("Error: N(b0,B0^-1) prior b0 not conformable.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } # prior precision if(is.null(dim(B0))) { if (length(B0) > K){ stop("B0 was passed as a vector longer than K.\nB0 must be either a scalar or a matrix.\nPlease respecify and call ", calling.function(), " again.\n", call.=FALSE) } B0 <- B0 * diag(K) } if((dim(B0)[1] != K) || (dim(B0)[2] != K)) { cat("Error: N(b0,B0^-1) prior B0 not conformable.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } ## check B0 for symmetry symproblem <- FALSE for (i in 1:K){ for (j in i:K){ if (B0[i,j] != B0[j,i]){ symproblem <- TRUE } } } if (symproblem){ cat("B0 is not symmetric.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } return(list(b0,B0)) } # parse the passed seeds # 1] if a scalar is passed, it is used by Mersennse twister # 2] if a list of length two is passed, a parallel-friendly stream is # created using L'Ecuyer "form.seeds" <- function(seed) { if(length(seed)==1) { if(is.na(seed)) seed <- 12345 seed <- as.integer(seed) if(seed < 0) { cat("Error: Mersenne seed negative.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } seeds <- list(0, rep(seed,6), 0) } if(length(seed)==2) { if(!is.list(seed)) { cat("Error: List must be passed to use L'Ecuyer.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } lec.seed <- seed[[1]] lec.substream <- as.integer(seed[[2]]) if(is.na(lec.seed[1])) lec.seed <- rep(12345, 6) if(length(lec.seed) != 6) { cat("Error: L'Ecuyer seed not of length six.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if(!all(lec.seed >= 0)) { cat("Error: At least one L'Ecuyer seed negative.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if( max(lec.seed[1:3]) >= 4294967087){ cat("Error: At least one of first three L'Ecuyer seeds\n") cat(" greater than or equal to 4294967087\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if( all(lec.seed[1:3] == 0 )){ cat("Error: first three L'Ecuyer seeds == 0\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if( max(lec.seed[4:6]) >= 4294944443){ cat("Error: At least one of last three L'Ecuyer seeds\n") cat(" greater than or equal to 4294944443\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if( all(lec.seed[4:6] == 0 )){ cat("Error: last three L'Ecuyer seeds == 0\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if(lec.substream < 1) { cat("Error: L'Ecuyer substream number not positive.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } seeds <- list(1, lec.seed, lec.substream) } if(length(seed)>2) { cat("Error: Seed passed as length greater than two.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } return(seeds) } # form Wishart prior "form.wishart.prior" <- function(v, S, K) { # check to see if degrees of freedom produces proper prior if(v < K) { cat("Error: Wishart(v,S) prior v less than or equal to K.\n") stop("Please respecify and call ", calling.function(), " again.\n") } # form the prior scale matrix if(is.null(dim(S))) { S <- S * diag(K) } if((dim(S)[1] != K) | (dim(S)[2] != K)) { cat("Error: Wishart(v,S) prior S not comformable [K times K].\n") stop("Please respecify and call ", calling.function(), " again.\n") } return(list(v,S)) } # parse formula and return a list that contains the model response # matrix as element one, and the model matrix as element two "parse.formula" <- function(formula, data=NULL, intercept=TRUE, justX=FALSE) { # extract Y, X, and variable names for model formula and frame mf <- match.call(expand.dots = FALSE) mf$intercept <- mf$justX <- NULL mf$drop.unused.levels <- TRUE mf[[1]] <- as.name("model.frame") mf <- eval(mf, sys.frame(sys.parent())) mt <- attr(mf, "terms") if (!intercept){ attributes(mt)$intercept <- 0 } # null model support X <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) X <- as.matrix(X) # X matrix xvars <- dimnames(X)[[2]] # X variable names xobs <- dimnames(X)[[1]] # X observation names if (justX){ Y <- NULL } else { Y <- as.matrix(model.response(mf, "numeric")) # Y matrix } return(list(Y, X, xvars, xobs)) } # setup tuning constant for scalar parameter "scalar.tune" <- function(mcmc.tune){ if (max(is.na(mcmc.tune))){ cat("Error: Scalar tuning parameter cannot contain NAs.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if (length(mcmc.tune) != 1){ cat("Error: Scalar tuning parameter does not have length = 1.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if (mcmc.tune <= 0) { cat("Error: Scalar tuning parameter not positive.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } return(mcmc.tune) } # put together starting values for sigma2 "sigma2.start" <- function(sigma2.start, formula, data) { if(is.na(sigma2.start)){ # use MLE lm.out <- lm(formula, data=data) sigma2.start <- var(residuals(lm.out)) } else if(sigma2.start <= 0) { cat("Error: Starting value for sigma2 negative.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } else if (length(sigma2.start) != 1){ cat("Error: Starting value for sigma2 not a scalar.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } else if (!is.numeric(sigma2.start)){ cat("Error: Starting value for sigma2 neither numeric nor NA.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } return(sigma2.start) } ## setup diagonal tuning matrix for vector parameters "vector.tune" <- function(mcmc.tune, K){ if (max(is.na(mcmc.tune))){ cat("Error: Vector tuning parameter cannot contain NAs.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if (length(mcmc.tune) == 1){ mcmc.tune <- rep(mcmc.tune, K) } if (length(mcmc.tune) != K){ cat("Error: length(vector tuning parameter) != length(theta) or 1.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if (sum(mcmc.tune <= 0) != 0) { cat("Error: Vector tuning parameter cannot contain negative values.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if (length(mcmc.tune)==1){ return(matrix(mcmc.tune, 1, 1)) } else{ return(diag(as.double(mcmc.tune))) } } MCMCpack/R/hidden-hmodels.R0000644000176000001440000002400712133644103015070 0ustar ripleyusers######################################################################### ## ## Following functions perform checks for MCMCpack hierarchical models ## (denoted MCMCh...). ## ghislain.vieilledent@cirad.fr, May 5 2011 ## ######################################################################### ##======================================================================= ## ## Check group ## ##======================================================================= check.group.hmodels <- function(group,data) { if (!(group %in% colnames(data))) { cat("Error: group must be a string which gives the name of the grouping variable in data \n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } return(0) } ##======================================================================= ## ## Check mcmc parameters ## ##======================================================================= check.mcmc.parameters.hmodels <- function(burnin, mcmc, thin) { if(mcmc %% thin != 0) { cat("Error: MCMC iterations not evenly divisible by thinning interval.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if(mcmc <= 0) { cat("Error: MCMC iterations must be strictly positive.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if(burnin < 0) { cat("Error: Burnin iterations negative.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if(((burnin+mcmc) %% 10 != 0) || (burnin+mcmc)<100) { cat("Error: Value 'burnin+mcmc' should be divisible by 10 and >= 100.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if(thin < 1) { cat("Error: Thinning interval must be superior or equal to 1.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } return(0) } ##======================================================================= ## ## Check verbose ## ##======================================================================= check.verbose.hmodels <- function (verbose) { if (!(verbose%in%c(0,1))) { cat("Error: verbose must take value 0 or 1.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } return(0) } ##======================================================================= ## ## Check FixOD ## ##======================================================================= check.FixOD.hmodels <- function (FixOD) { if (!(FixOD%in%c(0,1))) { cat("Error: FixOD must take value 0 or 1.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } return(0) } ##======================================================================= ## ## Check Y in c(0,1) for Binomial process ## ##======================================================================= check.Y.Binomial.hmodels <- function (Y) { if (sum(!(c(Y)%in%c(0,1)))>0) { cat("Error: Response variable must take value 0 or 1.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } return(0) } ##======================================================================= ## ## Check Y is a positive integer for Poisson process ## ##======================================================================= check.Y.Poisson.hmodels <- function (Y) { if (sum(!(c(Y)>=0 && c(Y)%%1==0)>0)) { cat("Error: Response variable must be a positive integer.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } return(0) } ##======================================================================= ## ## Check and form seed ## ##======================================================================= form.seeds.hmodels <- function(seed) { if(length(seed)!=1) { cat("Error: Mersenne seed should be of length 1.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if(length(seed)==1) { if(is.na(seed)) { seed <- 12345 } else { seed <- as.integer(seed) } if(seed < 0) { cat("Error: Mersenne seed negative.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } } return(seed) } ##======================================================================= ## ## Check and form starting values ## ##======================================================================= #=============================== # form beta.start #=============================== form.beta.start.hmodels <- function (fixed,data,beta.start,np,family,defaults=NA) { if (is.na(beta.start)[1] & is.na(defaults)[1]){ # use GLM estimates beta.start <- matrix(coef(glm(fixed, family=family, data=data)), np, 1) } else if(is.na(beta.start)[1] & !is.na(defaults)[1]){ # use passed values beta.start <- matrix(defaults,np,1) } else if(is.null(dim(beta.start))) { beta.start <- beta.start * matrix(1,np,1) } else if(!all(dim(beta.start) == c(np,1))) { cat("Error: beta.start not conformable.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } return(beta.start) } #=============================== # form sigma2.start #=============================== form.sigma2.start.hmodels <- function (fixed,data,sigma2.start,family) { if (is.na(sigma2.start)[1]){ # use GLM estimates sigma2.start <- var(residuals(glm(fixed, family=family, data=data))) } else { sigma2.start <- as.integer(sigma2.start[1]) } if (sigma2.start<=0) { cat("Error: Starting value for sigma2 negative.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } return(sigma2.start) } #=============================== # form Vb.start #=============================== form.Vb.start.hmodels <- function (Vb.start,nq) { if (is.na(Vb.start)[1]) { Vb.start <- diag(1,nq) } else if(is.null(dim(Vb.start))) { Vb.start <- Vb.start * diag(nq) } if ((dim(Vb.start)[1] != nq) || (dim(Vb.start)[2] != nq)) { cat("Error: Vb.start not conformable.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } if (sum(diag(Vb.start)>0)!=nq) { cat("Error: Vb.start should have positive values on the diagonal.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } return(Vb.start) } ##======================================================================= ## ## Check and form priors ## ##======================================================================= #=============================== # form multivariate Normal prior #=============================== form.mvn.prior.hmodels <- function(mubeta, Vbeta, np) { # prior mean if(is.null(dim(mubeta))) { mubeta <- mubeta * matrix(1,nrow=np,ncol=1) } if((dim(mubeta)[1] != np) || (dim(mubeta)[2] != 1)) { cat("Error: in N(mubeta,Vbeta) prior, mubeta not conformable.\n") stop("Please respecify and call ", calling.function(), " again.", call.=FALSE) } # prior variance if(is.null(dim(Vbeta))) { if (length(Vbeta) > np){ cat("Vbeta was passed as a vector longer than np. Vbeta must be either a scalar or a matrix.") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } Vbeta <- Vbeta * diag(np) } if((dim(Vbeta)[1] != np) || (dim(Vbeta)[2] != np)) { cat("Error: in N(mubeta,Vbeta), prior Vbeta not conformable [p times p].\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } # check Vbeta for symmetry symproblem <- FALSE for (i in 1:np){ for (j in i:np){ if (Vbeta[i,j] != Vbeta[j,i]){ symproblem <- TRUE } } } if (symproblem){ cat("Vbeta is not symmetric.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } # check Vbeta for positive number on the diagonal if (sum(diag(Vbeta)>0)!=np) { cat("Error: Vbeta should have positive values on the diagonal.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } return(list(mubeta,Vbeta)) } #=================== # form Wishart prior #=================== form.wishart.prior.hmodels <- function(r, R, nq) { # check to see if degrees of freedom produces proper prior if(r < nq) { cat("Error: in Wishart(r,rR) prior, r less than q.\n") stop("Please respecify and call ", calling.function(), " again.\n") } # form the prior scale matrix if(is.null(dim(R))) { R <- R * diag(nq) } if((dim(R)[1] != nq) | (dim(R)[2] != nq)) { cat("Error: in Wishart(r,rR) prior, R not comformable [q times q].\n") stop("Please respecify and call ", calling.function(), " again.\n") } # check R for symmetry symproblem <- FALSE for (i in 1:nq){ for (j in i:nq){ if (R[i,j] != R[j,i]){ symproblem <- TRUE } } } if (symproblem){ cat("R is not symmetric.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } # check R for positive number on the diagonal if (sum(diag(R)>0)!=nq) { cat("Error: R should have positive values on the diagonal.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } return(list(r,R)) } #========================== # check inverse Gamma prior #========================== check.ig.prior.hmodels <- function(nu, delta) { if(nu <= 0) { cat("Error: in IG(nu,delta) prior, nu less than or equal to zero.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } if(delta <= 0) { cat("Error: in IG(nu,delta) prior, delta less than or equal to zero.\n") stop("Please respecify and call ", calling.function(), " again.\n", call.=FALSE) } return(0) } #========================== # END #========================== MCMCpack/R/distn.R0000644000176000001440000003072612133644103013332 0ustar ripleyusers########################################################################## ## Density Functions and Random Number Generators ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## ## ## Wishart ## # rwish delivers a pseudo-random Wishart deviate # # USAGE: # # A <- rwish(v, S) # # INPUT: # # v degrees of freedom # # S Scale matrix # # OUTPUT: # # A a pseudo-random Wishart deviate # # Based on code originally posted by Bill Venables to S-news # on 6/11/1998 # # KQ on 2/5/2001 "rwish" <- function(v, S) { if (!is.matrix(S)) S <- matrix(S) if (nrow(S) != ncol(S)) { stop(message="S not square in rwish().\n") } if (v < nrow(S)) { stop(message="v is less than the dimension of S in rwish().\n") } p <- nrow(S) CC <- chol(S) Z <- matrix(0, p, p) diag(Z) <- sqrt(rchisq(p, v:(v-p+1))) if(p > 1) { pseq <- 1:(p-1) Z[rep(p*pseq, pseq) + unlist(lapply(pseq, seq))] <- rnorm(p*(p-1)/2) } return(crossprod(Z %*% CC)) } # dwish evaluations the Wishart pdf at positive definite matrix W. # note: uses the Gelman, et. al. parameterization. # # USAGE: # # x <- dwish(W, v, S) # # INPUT: # # W positive definite matrix at which to evaluate PDF # # v degrees of freedom # # S Scale matrix # # OUTPUT: # # x the PDF evaluated (scalar) # # ADM 8/16/2002 "dwish" <- function(W, v, S) { if (!is.matrix(S)) S <- matrix(S) if (nrow(S) != ncol(S)){ stop(message="W not square in dwish()\n\n") } if (!is.matrix(W)) W <- matrix(W) if (nrow(W) != ncol(W)){ stop(message="W not square in dwish()\n\n") } if(nrow(S) != ncol(W)){ stop(message="W and X of different dimensionality in dwish()\n\n") } if (v < nrow(S)){ stop(message="v is less than the dimension of S in dwish()\n\n") } k <- nrow(S) # denominator gammapart <- 1 for(i in 1:k) { gammapart <- gammapart * gamma((v + 1 - i)/2) } denom <- gammapart * 2^(v * k / 2) * pi^(k*(k-1)/4) # numerator detS <- det(S) detW <- det(W) hold <- solve(S) %*% W tracehold <- sum(hold[row(hold) == col(hold)]) num <- detS^(-v/2) * detW^((v - k - 1)/2) * exp(-1/2 * tracehold) return(num / denom) } ## ## Inverse Wishart ## # riwish generates a draw from the inverse Wishart distribution # (using the Wishart generator) "riwish" <- function(v, S) { return(solve(rwish(v,solve(S)))) } # diwish evaluates the inverse Wishart pdf at positive definite # matrix W. note: uses the Gelman, et. al. parameterization. # # USAGE: # # x <- diwish(W, v, S) # # INPUT: # # W positive definite matrix at which to evaluate PDF # # v degrees of freedom # # S Scale matrix # # OUTPUT: # # x the PDF evaluated (scalar) # # ADM 8/16/2002 "diwish" <- function(W, v, S) { if (!is.matrix(S)) S <- matrix(S) if (nrow(S) != ncol(S)){ stop("W not square in diwish().\n") } if (!is.matrix(W)) S <- matrix(W) if (nrow(W) != ncol(W)){ stop("W not square in diwish().\n") } if(nrow(S) != ncol(W)){ stop("W and X of different dimensionality in diwish().\n") } if (v < nrow(S)){ stop("v is less than the dimension of S in diwish().\n") } k <- nrow(S) # denominator gammapart <- 1 for(i in 1:k) { gammapart <- gammapart * gamma((v + 1 - i)/2) } denom <- gammapart * 2^(v * k / 2) * pi^(k*(k-1)/4) # numerator detS <- det(S) detW <- det(W) hold <- S %*% solve(W) tracehold <- sum(hold[row(hold) == col(hold)]) num <- detS^(v/2) * detW^(-(v + k + 1)/2) * exp(-1/2 * tracehold) return(num / denom) } ## ## Inverse Gamma ## ## evaluate the inverse gamma density ## ## Kevin Rompala 5/6/2003 ## fixed KQ 3/8/2005 "dinvgamma" <- function(x, shape, scale = 1) { # error checking if(shape <= 0 | scale <=0) { stop("Shape or scale parameter negative in dinvgamma().\n") } alpha <- shape beta <- scale # done on log scale to allow for large alphas and betas log.density <- alpha * log(beta) - lgamma(alpha) - (alpha + 1) * log(x) - (beta/x) return(exp(log.density)) } ## generate draws from the inverse gamma density (using ## the gamma simulator) ## ## Kevin Rompala 5/6/2003 ## fixed KQ 3/8/2005 ## shape and rate made explicit 5/25/2010 (KQ) "rinvgamma" <- function(n, shape, scale = 1) { return(1 / rgamma(n=n, shape=shape, rate=scale)) } ## ## Dirichlet (Multivariate Beta) ## # ddirichlet evaluates the density of the Dirichlet at # vector x given shape parameter vector (or matrix) # alpha. # # note: this code is taken verbatim from the R-package # "Greg's Miscellaneous Functions" maintained by # Gregory R. Warnes # # Kevin Rompala 5/6/2003 "ddirichlet" <- function(x, alpha) { dirichlet1 <- function(x, alpha) { logD <- sum(lgamma(alpha)) - lgamma(sum(alpha)) s <- sum((alpha-1)*log(x)) exp(sum(s)-logD) } # make sure x is a matrix if(!is.matrix(x)) if(is.data.frame(x)) x <- as.matrix(x) else x <- t(x) if(!is.matrix(alpha)) alpha <- matrix( alpha, ncol=length(alpha), nrow=nrow(x), byrow=TRUE) if( any(dim(x) != dim(alpha)) ) stop("Mismatch between dimensions of x and alpha in ddirichlet().\n") pd <- vector(length=nrow(x)) for(i in 1:nrow(x)) pd[i] <- dirichlet1(x[i,],alpha[i,]) # Enforce 0 <= x[i,j] <= 1, sum(x[i,]) = 1 pd[ apply( x, 1, function(z) any( z <0 | z > 1)) ] <- 0 pd[ apply( x, 1, function(z) all.equal(sum( z ),1) !=TRUE) ] <- 0 return(pd) } # rdirichlet generates n random draws from the Dirichlet at # vector x given shape parameter vector (or matrix) # alpha. # # note: this code is taken verbatim from the R-package # "Greg's Miscellaneous Functions" maintained by # Gregory R. Warnes # # Kevin Rompala 5/6/2003 "rdirichlet" <- function(n, alpha) { l <- length(alpha) x <- matrix(rgamma(l*n,alpha),ncol=l,byrow=TRUE) sm <- x%*%rep(1,l) return(x/as.vector(sm)) } ## ## Non-Central Hypergeometric ## # code to evaluate the noncentral hypergeometric density (at a single point # or at all defined points). # # parameters: # # n1, n2 -- number of subjects in group 1 and 2 # # Y1, Y2 -- number of subjects with positive outcome [unobserved] # # psi -- odds ratio # # m1 -- sum of observed values of Y1 and Y2 (Y1 + Y2) # # output: # # pi -- Pr(Y1 = x | Y1 + Y2 = m1) x=ll,...,uu # # for ll = max(0, m1-n2) and uu = min(n1, m1) # # if x is NA, then a matrix is returned, with the first column the possible # values of x, and the second columns the probabilities. if x is a scalar, # the density is evaluated at that point. # # ADM on 5/8/2003 # # note: code adapted from R code published in conjunction with: # # Liao, J.G. And Rosen, O. (2001) Fast and Stable Algorithms for Computing and # Sampling from the Noncentral Hypergeometric Distribution. The American # Statistician 55, 366-369. # "dnoncenhypergeom" <- function (x = NA, n1, n2, m1, psi) { ## ## AUXILIARY FUNCTIONS ## mode.compute <- function(n1, n2, m1, psi, ll, uu) { a <- psi - 1 b <- -( (m1+n1+2)*psi + n2-m1 ) c <- psi*(n1+1)*(m1+1) q <- b + sign(b)*sqrt(b*b-4*a*c) q <- -q/2 mode <- trunc(c/q) if(uu>=mode && mode>=ll) return(mode) else return( trunc(q/a) ) } r.function <- function(n1, n2, m1, psi, i) { (n1-i+1)*(m1-i+1)/i/(n2-m1+i)*psi } ## ## MAIN FUNCTION ## # upper and lower limits for density evaluation ll <- max(0, m1-n2) uu <- min(n1, m1) # check parameters if(n1 < 0 | n2 < 0) { stop("n1 or n2 negative in dnoncenhypergeom().\n") } if(m1 < 0 | m1 > (n1 + n2)) { stop("m1 out of range in dnoncenhypergeom().\n") } if(psi <=0) { stop("psi [odds ratio] negative in dnoncenhypergeom().\n") } if(!is.na(x) & (x < ll | x > uu)) { stop("x out of bounds in dnoncenhypergeom().\n") } if(!is.na(x) & length(x) > 1) { stop("x neither missing or scalar in dnoncenhypergeom().\n") } # evaluate density using recursion (from mode) mode <- mode.compute(n1, n2, m1, psi, ll, uu) pi <- array(1, uu-ll+1) shift <- 1-ll if(modell) { r1 <- 1/r.function( n1, n2, m1, psi, mode:(ll+1) ) pi[(mode-1 + shift):(ll + shift)] <- cumprod(r1) } pi <- pi/sum(pi) if(is.na(x)) return(cbind(ll:uu,pi)) else return(pi[x + shift]) } # code to generate random deviates from the noncentral hypergeometric density # # parameters: # # n -- the number of draws to make # # n1, n2 -- number of subjects in group 1 and 2 # # Y1, Y2 -- number of subjects with positive outcome [unobserved] # # psi -- odds ratio # # m1 -- sum of observed values of Y1 and Y2 (Y1 + Y2) # # output: # # output -- a list of length n of random deviates # # # ADM on 5/9/2003 # # note: code adapted from R code published in conjunction with: # # Liao, J.G. And Rosen, O. (2001) Fast and Stable Algorithms for Computing and # Sampling from the Noncentral Hypergeometric Distribution. The American # Statistician 55, 366-369. # "rnoncenhypergeom" <- function (n, n1, n2, m1, psi) { ## ## AUXILIARY FUNCTIONS ## mode.compute <- function(n1, n2, m1, psi, ll, uu) { a <- psi - 1 b <- -( (m1+n1+2)*psi + n2-m1 ) c <- psi*(n1+1)*(m1+1) q <- b + sign(b)*sqrt(b*b-4*a*c) q <- -q/2 mode <- trunc(c/q) if(uu>=mode && mode>=ll) return(mode) else return( trunc(q/a) ) } sample.low.to.high <- function(lower.end, ran, pi, shift) { for(i in lower.end:uu) { if(ran <= pi[i+shift]) return(i) ran <- ran - pi[i+shift] } } sample.high.to.low <- function(upper.end, ran, pi, shift) { for(i in upper.end:ll) { if(ran <= pi[i+shift]) return(i) ran <- ran - pi[i+shift] } } single.draw <- function(n1, n2, m1, psi, ll, uu, mode, pi) { ran <- runif(1) shift <- 1-ll if(mode==ll) return(sample.low.to.high(ll, ran, pi, shift)) if(mode==uu) return(sample.high.to.low(uu, ran, pi, shift)) if(ran < pi[mode+shift]) return(mode) ran <- ran - pi[mode+shift] lower <- mode - 1 upper <- mode + 1 repeat { if(pi[upper + shift] >= pi[lower + shift]) { if(ran < pi[upper+shift]) return(upper) ran <- ran - pi[upper+shift] if(upper == uu) return( sample.high.to.low(lower, ran, pi, shift) ) upper <- upper + 1 } else { if(ran < pi[lower+shift]) return(lower) ran <- ran - pi[lower+shift] if(lower == ll) return( sample.low.to.high(upper, ran, pi, shift) ) lower <- lower - 1 } } } ## ## MAIN FUNCTION ## # upper and lower limits for density evaluation ll <- max(0, m1-n2) uu <- min(n1, m1) # check parameters if(n1 < 0 | n2 < 0) { stop("n1 or n2 negative in rnoncenhypergeom().\n") } if(m1 < 0 | m1 > (n1 + n2)) { stop("m1 out of range in rnoncenhypergeom().\n") } if(psi <=0) { stop("psi [odds ratio] negative in rnoncenhypergeom().\n") } # get density and other parameters mode <- mode.compute(n1, n2, m1, psi, ll, uu) pi <- dnoncenhypergeom(NA, n1, n2, m1, psi)[,2] output <- array(0,n) for(i in 1:n) output[i] <- single.draw(n1, n2, m1, psi, ll, uu, mode, pi) return(output) } MCMCpack/R/btsutil.R0000644000176000001440000003150612133644103013674 0ustar ripleyusers########################################################################## ## Utility Functions for Bayesian Times Series Models ## ## written and maintained by: ## Jong Hee Park ## Department of Political Science ## University of Chicago ## jhp@uchicago.edu ## ## Revised on 09/12/2007 JHP ## ## NOTE: only the plot functions are documented and exported in the ## NAMESPACE. ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## ############################################################## ## Helper functions for MCMCpoissonChange and MCMCbinaryChange() ############################################################## ## switch a state vector into a matrix containing the number of states "switchg" <- function(s1){ s <- max(s1) out <- matrix(0,s,s) ## all P(i,i+1) are 1 for (i in 1:(s-1)){ out[i,i+1] <- 1} ## diagonal elements is (the number of occurrence - 1) diag(out) <- table(s1)-1 return(out) } ## "trans.mat.prior" makes a transition matrix "trans.mat.prior" <- function(m, n, a=NULL, b=NULL){ if (!is.null(a)|!is.null(b)){ a <- a b <- b } else { expected.duration <- round(n/(m+1)) b <- 0.1 a <- b*expected.duration } trans <- diag(1, m+1) ## put a as diagonal elements except the last row diag(trans)[1:m]<-rep(a, m) ## put b in trans[i, i+1] for (i in 1:m){trans[i, i+1]<-b} return(trans) } ## "plotState" draws a plot of posterior distribution of states "plotState" <- function (mcmcout, main="Posterior Regime Probability", ylab=expression(paste("Pr(", S[t], "= k |", Y[t], ")")), legend.control = NULL, cex = 0.8, lwd = 1.2, start=1) { out <- attr(mcmcout, "prob.state") y <- attr(mcmcout, "y") m <- attr(mcmcout, "m") if (!is.ts(y)) y <- ts(y, start) time.frame <- as.vector(time(y)) plot(start, 0, xlim = range(time.frame), ylim = c(0, 1), type = "n", main = main, xlab = "Time", cex = cex, lwd = lwd, ylab = ylab, axes=F) axis(1, tick = FALSE, col="darkgrey") axis(2, tick = FALSE, col="darkgrey") for (i in 1:length(axTicks(1))) lines(c(axTicks(1)[i], axTicks(1)[i]), c(0,1), col="darkgrey") for (i in 1:length(axTicks(2))) lines(c(axTicks(2)[i], max(axTicks(1))), c(axTicks(2)[i], axTicks(2)[i]), col="darkgrey") for (i in 1:(m + 1)) points(time.frame, out[, i], type = "o", lty = i, lwd = lwd, col = i, cex = cex) if (!is.null(legend.control)) { if (length(legend.control) != 2) stop("You should specify x and y coordinate for a legend.") else legend(legend.control[1], legend.control[2], legend = paste("State",1:(m + 1), sep = ""), col = 1:(m + 1), lty = 1:(m + 1), lwd = rep(lwd, m + 1), pch = rep(1, m + 1), bty = "n") } } ## "plotChangepoint" draws a plot of posterior changepoint probability ## Thanks to Patrick Brandt for providing the idea of overlaying. "plotChangepoint" <- function (mcmcout, main="Posterior Density of Regime Change Probabilities", xlab = "Time", ylab = "", verbose = FALSE, start=1, overlay=FALSE) { out <- attr(mcmcout, "prob.state") y <- attr(mcmcout, "y") m <- attr(mcmcout, "m") if(overlay==FALSE){ par(mfrow = c(m, 1), mar = c(2, 4, 1, 1)) } if (!is.ts(y)) y <- ts(y, start) time.frame <- as.vector(time(y)) if (m == 1) { pr.st <- c(0, diff(out[, (m + 1)])) pr.st[pr.st<0] <- 0 plot(time.frame, pr.st, type = "h", lwd=2, main = main, xlab = xlab, ylab = ylab, axes=F) axis(1, tick = FALSE, col="darkgrey") axis(2, tick = FALSE, col="darkgrey") for (i in 1:length(axTicks(1))) lines(c(axTicks(1)[i], axTicks(1)[i]), c(0, max(axTicks(2))), col="darkgrey") for (i in 1:length(axTicks(2))) lines(c(axTicks(2)[i], max(axTicks(1))), c(axTicks(2)[i], axTicks(2)[i]), col="darkgrey") cp <- which(cumsum(pr.st) > 0.5)[1] - 1 lines(c(cp + time.frame[1], cp + time.frame[1]), c(0, max(axTicks(2))), lty = 3, col = "red") } else { cp <- rep(NA, m) for (i in 2:m) { pr.st <- c(0, diff(out[, i])) pr.st <- ifelse(pr.st < 0, 0, pr.st) plot(time.frame, pr.st, type = "h", lwd=2, main = "", xlab = xlab, ylab = ylab, col="black", axes=FALSE) axis(1, tick = FALSE, col="darkgrey") axis(2, tick = FALSE, col="darkgrey") for (k in 1:length(axTicks(1))) {lines(c(axTicks(1)[k], axTicks(1)[k]), c(0, max(axTicks(2))), col="darkgrey")} for (k in 1:length(axTicks(2))) {lines(c(axTicks(2)[k], max(axTicks(1))), c(axTicks(2)[k], axTicks(2)[k]), col="darkgrey")} cp[i - 1] <- which(cumsum(pr.st) > 0.5)[1] - 1 lines(c(cp[i - 1] + time.frame[1], cp[i - 1] + time.frame[1]), c(0, max(axTicks(2))), lty = 3, col = "red") } pr.st <- c(0, diff(out[, (m + 1)])) pr.st[pr.st<0] <- 0 plot(time.frame, pr.st, type = "h", lwd=2, main = main, xlab = xlab, ylab = ylab, col="black", axes=FALSE) axis(1, tick = FALSE, col="darkgrey") axis(2, tick = FALSE, col="darkgrey") for (k in 1:length(axTicks(1))) {lines(c(axTicks(1)[k], axTicks(1)[k]), c(0, max(axTicks(2))), col="darkgrey")} for (k in 1:length(axTicks(2))) {lines(c(axTicks(2)[k], max(axTicks(1))), c(axTicks(2)[k], axTicks(2)[k]), col="darkgrey")} cp[m] <- which(cumsum(pr.st) > 0.5)[1] - 1 lines(c(cp[m] + time.frame[1], cp[m] + time.frame[1]), c(0, max(axTicks(2))), lty = 3, col = "red") } cp.means <- rep(NA, m + 1) cp.start <- c(1, cp + 1) cp.end <- c(cp, length(y)) if (verbose == TRUE){ cat("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n") cat("Expected changepoint(s) ", cp + time.frame[1], "\n") for (i in 1:(m + 1)) cp.means[i] <- mean(y[cp.start[i]:cp.end[i]]) cat("Local means for each regime are ", cp.means, "\n") cat("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n") } } ## prior check for transition matrix "check.P" <- function(P.start = NA, m=m, n=n, a=a, b=b){ if (is.na(P.start)[1]){ P <- trans.mat.prior(m=m, n=n, a=0.9, b=0.1)} else if ((dim(P.start)[1]==m+1)&(dim(P.start)[2]==m+1)){ if ((max(P.start)>1)||(min(P.start)<0)){ stop("Error: P starting values are out of unit range.\n") } else P <- P.start } else { stop("Error: P starting values are not conformable.\n") } return(P) } ## priro check for mean "check.theta" <- function(theta.start = NA, ns = ns, y = y, min, max){ if (is.na(theta.start)[1]){ # draw from uniform with range(y) theta <- runif(ns, min=min, max=max)} else if (length(theta.start)==ns) theta <- theta.start else if (length(theta.start)!=ns) { stop("Error: theta starting values are not conformable.\n") } return(theta) } ## initial values of tau in MCMCpoissonChange "tau.initial" <- function(y, tot.comp){ tau <- rep(NA, tot.comp) lambda.t <- 0.1 count <- 0 for (t in 1:length(y)){ nt <- y[t] if (nt==0) { taut <- 1 + rexp(1, lambda.t) count <- count + nt + 1 } else{ ut <- runif(nt) uorder <- c(0, sort(ut)) tau.tj <- diff(uorder) sum.tau.tj <- sum(tau.tj) tau.last<- 1 - sum.tau.tj + rexp(1, y[t]) count <- count + nt + 1 taut <- c(tau.tj, tau.last) } tau[(count-nt):count] <- taut } return(tau) } ## beta starting values in MCMCpoissonChange() "beta.change.start"<- function (beta.start, ns, k, formula, family, data){ ## if a user does not specify beta.start, use a coefficient vector from mle if (is.na(beta.start[1])) { b0 <- coef(glm(formula, family = family, data = data)) beta.start <- matrix(rep(b0, ns), ns, k, byrow=TRUE) } ## if beta.start is scalar or k by 1 vector, repeat this else if (is.null(dim(beta.start))&&length(beta.start)<=k) { beta.start <- beta.start * matrix(1, ns, k) ## this alternates beta.start if beta.start is not a scalar } ## if the length of beta.start is same to ns*k, make this as a matrix else if (is.null(dim(beta.start))&&length(beta.start)==ns*k) { beta.start <- matrix(beta.start, ns, k) } else if (is.null(dim(beta.start))&&length(beta.start)>=k) { cat("Error: Starting value for beta not conformable.\n") stop("Please respecify and call ", calling.function(), " again.\n", call. = FALSE) } ## else, report an error message and stop else if (!all(dim(beta.start) == c(ns, k))) { cat("Error: Starting value for beta not conformable.\n") stop("Please respecify and call ", calling.function(), " again.\n", call. = FALSE) } return(beta.start) } ## draw predicted outcomes of intervention analysis plotIntervention <- function(mcmcout, forward = TRUE, start = 1, alpha = 0.05, col = "red", main="", ylab="", xlab=""){ ## pull inputs y <- ts(attr(mcmcout, "y"), start=start) inter <- attr(mcmcout, "intervention") N <- length(y) ## draw a plot plot(y, main=main, ylab=ylab, xlab=xlab) abline(v = time(y)[inter], lty=2) if (forward == TRUE){ yfore <- attr(mcmcout, "yforepred") yfore.mu <- ts(apply(yfore, 2, mean), start=start); yfore.mu[1:(inter-1)] <- NA yfore.upper <- ts(apply(yfore, 2, quantile, probs=(1-alpha/2)), start=start); yfore.upper[1:(inter-1)] <- NA yfore.lower <- ts(apply(yfore, 2, quantile, probs=(alpha/2)), start=start); yfore.lower[1:(inter-1)] <- NA lines(yfore.mu, col=col, lwd=2) lines(yfore.upper, col=col, lty=3) lines(yfore.lower, col=col, lty=3) } else { yback <- attr(mcmcout, "ybackpred") yback.mu <- ts(apply(yback, 2, mean), start=start); yback.mu[(inter+1):N] <- NA yback.upper <- ts(apply(yback, 2, quantile, probs=(1-alpha/2)), start=start); yback.upper[(inter+1):N] <- NA yback.lower <- ts(apply(yback, 2, quantile, probs=(alpha/2)), start=start); yback.lower[(inter+1):N] <- NA lines(yback.mu, col=col, lwd=2) lines(yback.upper, col=col, lty=3) lines(yback.lower, col=col, lty=3) } } ## Example ## pdf(file="Nile_MCMCinter.pdf", width=12, height=4) ## par(mfrow=c(1,3)) ## plotState(ar1, start=1871, main="Hidden Regime Change") ## plotIntervention(ar1, start=1871, main="Forward Analysis", alpha= 0.5, ylab="Nile River flow", xlab="Year") ## plotIntervention(ar1, forward=FALSE, start=1871, main="Backward Analysis", alpha= 0.5, ylab="Nile River flow", xlab="Year") ## dev.off() ## when we compare models in a list BayesFactorList <- function (model.list){ oldM <- length(model.list) zero.marg <- rep(NA, oldM) for (j in 1:oldM) { zero.marg[j] <- ifelse(attr(model.list[[j]], "logmarglike") == 0, 1, 0) } new.model.list <- model.list[c(which(zero.marg == 0))] M <- length(new.model.list) out <- matrix(NA, M, 2) BF <- rep(NA, M) for (j in 1:M) { BF[j] <- attr(new.model.list[[j]], "logmarglike") out[j, 1] <- BF[j] } if (sum(exp(BF) == 0)){ ## if log like is too small, add some constants BF <- BF + abs(BF[1]) } prob <- exp(BF)/sum(exp(BF)) out[, 2] <- prob marker <- which(zero.marg == 0) rownames(out) <- names(model.list)[marker] return(out) } ## consecutive geweke diag test geweke.test <- function(output.list, z.value=1.96){ n <- length(output.list) result <- rep(NA, n) cat("\n --------------------------------- ") for (i in 1:n){ if(sum(abs(geweke.diag(output.list[[i]])$z) > z.value)>0){ cat("\n Non-convergence for model ", i) result[i] <- "Fail" } else { result[i] <- "Pass" } } cat("\n --------------------------------- \n") return(result) } ## outputlist <- list(ar0, ar1a, ar2a, ar1f, ar2f, ar1r, ar2r, tr0, tr1a, tr2a, tr1f, tr2f, tr1r, tr2r) ## conv <- geweke.test(outputlist) ## consecutive heidel Heidelberger and Welch's convergence diagnostic test heidel.test <- function(output.list, p.value=0.05){ n <- length(output.list) result <- rep(NA, n) cat("\n --------------------------------- ") for (i in 1:n){ print(i) plist1 <- heidel.diag(output.list[[i]], pvalue=p.value)[,1] plist2 <- heidel.diag(output.list[[i]], pvalue=p.value)[,4] if(sum(c(plist1, plist2) == 0)>0){ cat("\n Non-convergence for model ", i) result[i] <- "Fail" } else { result[i] <- "Pass" } } cat("\n --------------------------------- \n") return(result) } ## outputlist <- list(ar0, ar1a, ar2a, ar1f, ar2f, ar1r, ar2r, tr0, tr1a, tr2a, tr1f, tr2f, tr1r, tr2r) ## conv <- geweke.test(outputlist) MCMCpack/R/BayesFactors.R0000644000176000001440000001721512133644103014574 0ustar ripleyusers########################################################################## ## BayesFactor.R contains functions useful for calculating and comparing ## marginal likelihoods ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Originally written by KQ 1/26/2006 ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## ## log densities "logdinvgamma" <- function(sigma2, a, b){ logf <- a * log(b) - lgamma(a) + -(a+1) * log(sigma2) + -b/sigma2 return(logf) } "logdmvnorm" <- function(theta, mu, Sigma){ d <- length(theta) logf <- -0.5*d * log(2*pi) - 0.5*log(det(Sigma)) - 0.5 * t(theta - mu) %*% solve(Sigma) %*% (theta - mu) return(logf) } ## log posterior densities "logpost.regress" <- function(theta, y, X, b0, B0, c0, d0){ n <- length(y) k <- ncol(X) beta <- theta[1:k] sigma2 <- exp(theta[k+1]) Sigma <- solve(B0) loglike <- sum(dnorm(y, X%*%beta, sqrt(sigma2), log=TRUE)) ## note the change to the prior for sigma2 b/c of the transformation logprior <- logdinvgamma(sigma2, c0/2, d0/2) + theta[k+1] + logdmvnorm(beta, b0, Sigma) return (loglike + logprior) } "logpost.probit" <- function(theta, y, X, b0, B0){ n <- length(y) k <- ncol(X) beta <- theta p <- pnorm(X %*% beta) Sigma <- solve(B0) loglike <- sum( y * log(p) + (1-y)*log(1-p) ) logprior <- logdmvnorm(beta, b0, Sigma) return (loglike + logprior) } "logpost.logit" <- function(theta, y, X, b0, B0){ n <- length(y) k <- ncol(X) beta <- theta eta <- X %*% beta p <- 1.0/(1.0+exp(-eta)) Sigma <- solve(B0) loglike <- sum( y * log(p) + (1-y)*log(1-p) ) logprior <- logdmvnorm(beta, b0, Sigma) return (loglike + logprior) } "logpost.logit.userprior" <- function(theta, y, X, userfun, logfun, my.env){ n <- length(y) k <- ncol(X) beta <- theta eta <- X %*% beta p <- 1.0/(1.0+exp(-eta)) loglike <- sum( y * log(p) + (1-y)*log(1-p) ) if (logfun){ logprior <- eval(userfun(theta), envir=my.env) } else{ logprior <- log(eval(userfun(theta), envir=my.env)) } return (loglike + logprior) } "logpost.poisson" <- function(theta, y, X, b0, B0){ n <- length(y) k <- ncol(X) beta <- theta eta <- X %*% beta lambda <- exp(eta) Sigma <- solve(B0) loglike <- sum(dpois(y, lambda, log=TRUE)) logprior <- logdmvnorm(beta, b0, Sigma) return (loglike + logprior) } ## functions for working with BayesFactor objects "BayesFactor" <- function(...){ model.list <- list(...) M <- length(model.list) #model.names <- paste("model", 1:M, sep="") this.call <- match.call() this.call.string <- deparse(this.call) this.call.string <- strsplit(this.call.string, "BayesFactor\\(") this.call.string <- this.call.string[[1]][length(this.call.string[[1]])] this.call.string <- strsplit(this.call.string, ",") model.names <- NULL for (i in 1:M){ model.names <- c(model.names, this.call.string[[1]][i]) } model.names <- gsub(")", "", model.names) model.names <- gsub(" ", "", model.names) for (i in 1:M){ if (!is.mcmc(model.list[[i]])){ stop("argument not of class mcmc\n") } } BF.mat <- matrix(NA, M, M) BF.log.mat <- matrix(NA, M, M) rownames(BF.mat) <- colnames(BF.mat) <- rownames(BF.log.mat) <- colnames(BF.log.mat) <- model.names BF.logmarglike <- array(NA, M, dimnames=model.names) BF.call <- NULL for (i in 1:M){ BF.logmarglike[i] <- attr(model.list[[i]], "logmarglike") BF.call <- c(BF.call, attr(model.list[[i]], "call")) for (j in 1:M){ if (identical(attr(model.list[[i]], "y"), attr(model.list[[j]], "y"))){ BF.log.mat[i,j] <- attr(model.list[[i]], "logmarglike") - attr(model.list[[j]], "logmarglike") BF.mat[i,j] <- exp(BF.log.mat[i,j]) } else{ warning(paste(model.names[i], " and ", model.names[j], " do not have exactly identical y data.\nBayes factors are not defined.\n", sep="")) } } } return(structure(list(BF.mat=BF.mat, BF.log.mat=BF.log.mat, BF.logmarglike=BF.logmarglike, BF.call=BF.call), class="BayesFactor")) } "is.BayesFactor" <- function(BF){ return(class(BF) == "BayesFactor") } "print.BayesFactor" <- function(x, ...){ cat("The matrix of Bayes Factors is:\n") print(x$BF.mat, digits=3) cat("\nThe matrix of the natural log Bayes Factors is:\n") print(x$BF.log.mat, digits=3) M <- length(x$BF.call) for (i in 1:M){ cat("\n", rownames(x$BF.mat)[i], ":\n") cat(" call = \n") print(x$BF.call[[i]]) cat("\n log marginal likelihood = ", x$BF.logmarglike[i], "\n\n") } } "summary.BayesFactor" <- function(object, ...){ cat("The matrix of Bayes Factors is:\n") print(object$BF.mat, digits=3) cat("\nThe matrix of the natural log Bayes Factors is:\n") print(object$BF.log.mat, digits=3) BF.mat.NA <- object$BF.mat diag(BF.mat.NA) <- NA minvec <- apply(BF.mat.NA, 1, min, na.rm=TRUE) best.model <- which.max(minvec) if (minvec[best.model] > 150){ cat("\nThere is very strong evidence to support", rownames(object$BF.mat)[best.model], "over\nall other models considered.\n") } else if(minvec[best.model] > 20){ cat("\nThere is strong evidence or better to support", rownames(object$BF.mat)[best.model], "over\nall other models considered.\n") } else if(minvec[best.model] > 3){ cat("\nThere is positive evidence or better to support", rownames(object$BF.mat)[best.model], "over\nall other models considered.\n") } else { cat("\nThe evidence to support", rownames(object$BF.mat)[best.model], "over all\nother models considered is worth no more\n than a bare mention.\n") } cat("\n\nStrength of Evidence Guidelines\n(from Kass and Raftery, 1995, JASA)\n") cat("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n") cat("2log(BF[i,j]) BF[i,j] Evidence Against Model j\n") cat("------------------------------------------------------------\n") cat(" 0 to 2 1 to 3 Not worth more than a\n") cat(" bare mention\n") cat(" 2 to 6 3 to 20 Positive\n") cat(" 6 to 10 20 to 150 Strong\n") cat(" >10 >150 Very Strong\n") cat("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\n") M <- length(object$BF.call) for (i in 1:M){ cat("\n", rownames(object$BF.mat)[i], ":\n") cat(" call = \n") print(object$BF.call[[i]]) cat("\n log marginal likelihood = ", object$BF.logmarglike[i], "\n\n") } } "PostProbMod" <- function(BF, prior.probs=1){ if (!is.BayesFactor(BF)){ stop("BF is not of class BayesFactor\n") } M <- length(BF$BF.call) if (min(prior.probs) <= 0){ stop("An element of prior.probs is non-positive\n") } prior.probs <- rep(prior.probs, M)[1:M] prior.probs <- prior.probs / sum(prior.probs) lognumer <- BF$BF.logmarglike + log(prior.probs) maxlognumer <- max(lognumer) logpostprobs <- array(NA, M) denom <- 0 for (i in 1:M){ denom <- denom + exp(lognumer[i]-maxlognumer) } logdenom <- log(denom) for (i in 1:M){ logpostprobs[i] <- (lognumer[i] - maxlognumer) - logdenom } postprobs <- exp(logpostprobs) names(postprobs) <- rownames(BF$BF.mat) return(postprobs) } MCMCpack/R/automate.R0000644000176000001440000003133512133644103014025 0ustar ripleyusers########################################################################## ## this function automates the Scythe C++ call making book-keeping ## much easier ## ## output.object: name of posterior sample that will be placed ## in the parent environment (string) ## ## cc.fun.name: name of the C++ function to be called (string) ## ## package: name of package (string, "MCMCpack" by default) ## ## developer: option that determines whether the R call and ## C++ template are echoed or whether the Scythe ## call is made (logical) ## ## help.file: option that determines whether a template of a ## helpfile for the calling R function should be ## generated (logical) ## ## cc.file: the file used to store the C++ template ## (string, output to the screen if "") ## ## R.file: the file used to store the clean R template ## (string, output to the screen if "") ## this is just the function call indented nicely ## ## ...: list of objects passed to C++ ## NOTE: this will only take integers (which have ## to be coerced), doubles, and matrices. They ## should all be of the form "X = X," with the first ## part the C++ name and the second part the R name. ## Remember that C++ names cannot have periods in them. ## Matrices will be, for example, Xdata, Xrow, and ## Xcol. ## ## Any objects that are changed in the C++ code ## need to have C++ names like sample.nonconst. All ## *.nonconst have the tag stripped and a pointer ## is used to change values. This is used in all models ## for the posterior density sample (sample.nonconst), ## and other quantities of interest. ## ## This also build a skeleton C++ template and clean R template ## for MCMCpack if developer=TRUE. ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2, June 1991. See the package LICENSE ## file for more information. ## ## Updated by ADM and KQ 1/25/2006 (to allow for multiple nonconsts) ## ## Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn ## Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn, ## and Jong Hee Park ########################################################################## "auto.Scythe.call" <- function(output.object, cc.fun.name, package="MCMCpack", developer=FALSE, help.file=FALSE, cc.file="", R.file="", ...) { # pull stuff from function call objects <- list(...) K <- length(objects) c.names <- names(objects) nonconst.indic <- rep(FALSE, K) test <- grep("\\.nonconst$", c.names) if(length(test)==0) stop("something must be declared non-constant in auto.Scythe.call()\n") nonconst.indic[test] <- TRUE c.names <- sub("\\.nonconst$", "", c.names) if(length(unique(c.names)) != K) stop("non-unique nonconst names passed in auto.Scythe.call()\n") R.names <- strsplit(toString(match.call()), ",")[[1]] R.names <- R.names[(length(R.names)-K+1):length(R.names)] ## put default values in for burnin, mcmc, thin, and verbose ## if not explicitly supplied burnin.exist <- mcmc.exist <- thin.exist <- seed.exist <- verbose.exist <- FALSE for (k in 1:K){ if(c.names[[k]]=="burnin" & is.integer(objects[[k]])) burnin.exist <- TRUE if(c.names[[k]]=="mcmc" & is.integer(objects[[k]])) mcmc.exist <- TRUE if(c.names[[k]]=="thin" & is.integer(objects[[k]])) thin.exist <- TRUE if(c.names[[k]]=="verbose" & is.null(dim(objects[[k]]))) verbose.exist <- TRUE } if (!burnin.exist){ objects <- c(objects, burnin=as.integer(5000)) R.names[length(objects)] <- "as.integer(5000)" } if (!mcmc.exist){ objects <- c(objects, mcmc=as.integer(25000)) R.names[length(objects)] <- "as.integer(25000)" } if (!thin.exist){ objects <- c(objects, thin=as.integer(1)) R.names[length(objects)] <- "as.integer(1)" } if (!verbose.exist){ objects <- c(objects, verbose=as.integer(FALSE)) R.names[length(objects)] <- "as.integer(FALSE)" } ## check parameters check.mcmc.parameters(objects$burnin, objects$mcmc, objects$thin) ## write out a template R help file callfun <- strsplit(toString(sys.call(which=-1)),",")[[1]][1] if (help.file){ prompt.call <- paste("prompt(", callfun, ", file =\"", paste(callfun, ".template.Rd\"", sep=""), ")") eval(parse(text=prompt.call)) } ## ## pull together R call ## # strings for R call start <- paste(".C('", cc.fun.name, "',", sep="") end <- paste("PACKAGE='", package, "')", sep="") middle <- NULL for(k in 1:K) { if(is.double(objects[[k]]) & is.null(dim(objects[[k]]))) { if (regexpr("as.double", R.names[[k]])==-1){ holder <- paste(c.names[[k]], " = as.double(", R.names[[k]], "),", sep="") middle <- c(middle, holder) } else { holder <- paste(c.names[[k]], " =", R.names[[k]], ",", sep="") middle <- c(middle, holder) } } else if(is.integer(objects[[k]]) & is.null(dim(objects[[k]]))) { if (regexpr("as.integer", R.names[[k]])==-1){ holder <- paste(c.names[[k]], " = as.integer(", R.names[[k]], "),", sep="") middle <- c(middle, holder) } else { holder <- paste(c.names[[k]], " =", R.names[[k]], ",", sep="") middle <- c(middle, holder) } } else if(is.matrix(objects[[k]])) { holder.data <- paste(c.names[[k]], "data", " = as.double(", R.names[[k]], "),", sep="") holder.row <- paste(c.names[[k]], "row", " =", " nrow(", R.names[[k]], "),", sep="") holder.col <- paste(c.names[[k]], "col", " =", " ncol(", R.names[[k]], "),", sep="") middle <- c(middle, holder.data, holder.row, holder.col) } else { stop("Integers, doubles, or matrices only to auto.Scythe.call().") } } # clean up and return R call middle <- paste(middle, sep=" ", collapse=" ") call <- paste(start, middle, end, sep=" ") call <- gsub('\\( ', '\\(', call) ## ## pull together C++ call ## # strings for C++ call c.start <- paste("void ", cc.fun.name, "(", sep="") c.end <- ")" c.middle <- NULL together.call <- NULL for(k in 1:K) { if(is.double(objects[[k]]) & is.null(dim(objects[[k]]))) { holder <- paste("double *", c.names[[k]], ",", sep="") if(!nonconst.indic[k]) holder <- paste("const ", holder, sep="") c.middle <- c(c.middle, holder) } else if(is.integer(objects[[k]]) & is.null(dim(objects[[k]]))) { holder <- paste("int *", c.names[[k]], ",", sep="") if(!nonconst.indic[k]) holder <- paste("const ", holder, sep="") c.middle <- c(c.middle, holder) } else if(is.matrix(objects[[k]])) { holder.data <- paste("double *", c.names[[k]], "data,", sep="") if(!nonconst.indic[k]) holder.data <- paste("const ", holder.data, sep="") scythe <- paste(" Matrix ", c.names[[k]], " = r2scythe(*", c.names[[k]], "row, *", c.names[[k]], "col, ", c.names[[k]], "data);\n", sep="") together.call <- paste(together.call, scythe, sep="") holder.row <- paste("const int *", c.names[[k]], "row,", sep="") holder.col <- paste("const int *", c.names[[k]], "col,", sep="") c.middle <- c(c.middle, holder.data, holder.row, holder.col) } } # clean up and print C++ function call c.middle <- paste(c.middle, sep=" ", collapse=" ") c.call <- paste(c.start, c.middle, c.end, sep="") c.call <- gsub(',)', ')', c.call) # if developer dump Scythe code to file, R function to screen, and evaluate if(developer) { comment.block <- paste("// ", cc.file, " DESCRIPTION HERE\n//\n// The initial version of this file was generated by the\n// auto.Scythe.call() function in the MCMCpack R package\n// written by:\n//\n// Andrew D. Martin\n// Dept. of Political Science\n// Washington University in St. Louis\n// admartin@wustl.edu\n//\n// Kevin M. Quinn\n// Dept. of Government\n// Harvard University\n// kevin_quinn@harvard.edu\n// \n// This software is distributed under the terms of the GNU GENERAL\n// PUBLIC LICENSE Version 2, June 1991. See the package LICENSE\n// file for more information.\n//\n// Copyright (C) ", substring(date(),21,24), " Andrew D. Martin and Kevin M. Quinn\n// \n// This file was initially generated on ", date(), "\n// REVISION HISTORY\n\n", sep="") includes.block <- '#include "matrix.h"\n#include "distributions.h"\n#include "stat.h"\n#include "la.h"\n#include "ide.h"\n#include "smath.h"\n#include "MCMCrng.h"\n#include "MCMCfcds.h"\n\n#include // needed to use Rprintf()\n#include // needed to allow user interrupts\n\nusing namespace SCYTHE;\nusing namespace std;\n\n' main.block <- 'extern "C" {\n\n // BRIEF FUNCTION DESCRIPTION\n' function.call <- paste(' ', c.call, ' {\n', sep="") together.block <- " \n // pull together Matrix objects\n // REMEMBER TO ACCESS PASSED ints AND doubles PROPERLY\n" constants.block <- "\n // define constants\n const int tot_iter = *burnin + *mcmc; // total number of mcmc iterations\n const int nstore = *mcmc / *thin; // number of draws to store\n const int NUMBER_OF_PARAMETERS = ????; // YOU NEED TO FILL THIS IN\n" storage.block <- "\n // storage matrix or matrices\n Matrix STORAGEMATRIX(nstore, NUMBER_OF_PARAMETERS);\n" seed.block <- "\n // initialize rng stream\n rng *stream = MCMCpack_get_rng(*lecuyer, seedarray, *lecuyerstream);\n" startval.block <- "\n // set starting values\n PARAMETER_BLOCK1 = ????;\n PARAMETER_BLOCK2 = ????;\n ETC.;\n" sample.call <- paste('\n ///// MCMC SAMPLING OCCURS IN THIS FOR LOOP\n for(int iter = 0; iter < tot_iter; ++iter){\n\n // sample the parameters\n PARAMETER_BLOCK1 = ????;\n PARAMETER_BLOCK2 = ????;\n ETC;\n\n // store draws in storage matrix (or matrices)\n if(iter >= *burnin && (iter % *thin == 0)){\n // PUT DRAWS IN STORAGEMATRIX HERE\n }\n\n // print output to stdout\n if(*verbose == 1 && iter % 500 == 0){\n Rprintf("\\n\\n', cc.fun.name, ' iteration %i of %i \\n", (iter+1), tot_iter);\n // ADD ADDITIONAL OUTPUT HERE IF DESIRED\n }\n\n void R_CheckUserInterrupt(void); // allow user interrupts\n } // end MCMC loop\n\n delete stream; // clean up random number stream\n\n // load draws into sample array\n', sep="") end.block <- paste("\n } // end", cc.fun.name, '\n} // end extern "C"\n') if (cc.file == ""){ cat("\n\nThe C++ template file is:\n") } cat(comment.block, includes.block, main.block, function.call, together.block, together.call, constants.block, storage.block, seed.block, startval.block, sample.call, end.block, sep="", file=cc.file) if (cc.file != "") { cat("\nCreated file named '", cc.file, "'.\n", sep="") cat("Edit the file and move it to the appropriate directory.\n") } if (R.file == "") { cat("\n\nThe clean R template file is:\n") } dump(callfun, file=R.file) if (R.file != "") { cat("\nCreated file named '", R.file, "'.\n", sep="") cat("Edit the file and move it to the appropriate directory.\n") cat("Do not forget to edit the MCMCpack NAMESPACE file if\n") cat("installing new functions as part of MCMCpack.\n") } cat("\nThe call to .C is:\n") draw.sample.call <- parse(text=paste(output.object, " <- ", call)) print(draw.sample.call) cat("\nAUTOMATIC TEMPLATE FILE CREATION SUCCEEDED.\n") cat("All created templated files placed in ", getwd(), ".\n", sep="") invokeRestart("abort") } # if not developer evaluate call leaving output.object in # parent environment if(!developer) { draw.sample.call <- parse(text=paste(output.object, " <- ", call)) eval(draw.sample.call, envir=parent.frame(1)) } } MCMCpack/NAMESPACE0000644000176000001440000000306212133644110013073 0ustar ripleyusersuseDynLib(MCMCpack) import(coda) import(MASS) import(stats) export( BayesFactor, choicevar, ddirichlet, dinvgamma, diwish, dnoncenhypergeom, dtomogplot, dwish, HMMpanelFE, HMMpanelRE, MCbinomialbeta, MCpoissongamma, MCnormalnormal, MCmultinomdirichlet, MCMCbinaryChange, MCMCdynamicEI, MCMCdynamicIRT1d, MCMCfactanal, MCMChierEI, MCMChlogit, MCMChpoisson, MCMChregress, MCMCintervention, MCMCirt1d, MCMCirtHier1d, MCMCirtKd, MCMCirtKdHet, MCMCirtKdRob, MCMClogit, MCMCmetrop1R, MCMCmixfactanal, MCMCmnl, MCMCoprobit, MCMCoprobitChange, MCMCordfactanal, MCMCpoisson, MCMCpoissonChange, MCMCprobit, MCMCprobitChange, MCMCquantreg, MCMCregress, MCMCregressChange, MCMCSVDreg, MCMCtobit, make.breaklist, mptable, plotState, plotChangepoint, plotIntervention, PostProbMod, procrustes, rdirichlet, read.Scythe, rinvgamma, riwish, rnoncenhypergeom, rwish, SSVSquantreg, testpanelGroupBreak, testpanelSubjectBreak, tomogplot, topmodels, vech, write.Scythe, xpnd ) S3method(print, BayesFactor) S3method(summary, BayesFactor) S3method(plot, qrssvs) S3method(print, qrssvs) S3method(print, summary.qrssvs) S3method(summary, qrssvs) MCMCpack/man/0000755000176000001440000000000012140061656012434 5ustar ripleyusersMCMCpack/man/xpnd.Rd0000644000176000001440000000204712133644110013671 0ustar ripleyusers\name{xpnd} \alias{xpnd} \title{Expand a Vector into a Symmetric Matrix} \description{ This function takes a vector of appropriate length (typically created using \code{vech}) and creates a symmetric matrix. } \usage{ xpnd(x, nrow) } \arguments{ \item{x}{A list of elements to expand into symmetric matrix.} \item{nrow}{The number of rows (and columns) in the returned matrix. Look into the details.} } \value{ An \eqn{(nrows \times nrows)}{(nrows * nrows)} symmetric matrix. } \details{ This function is particularly useful when dealing with variance covariance matrices. Note that R stores matrices in column major order, and that the items in \code{x} will be recycled to fill the matrix if need be. The number of rows can be specified or automatically computed from the number of elements in a given object via \eqn{(-1 + \sqrt{(1 + 8 * length(x))}) / 2}. } \examples{ xpnd(c(1,2,3,4,4,5,6,7,8,9),4) xpnd(c(1,2,3,4,4,5,6,7,8,9)) } \keyword{manip} \concept{triangular} \seealso{\code{\link{vech}}} MCMCpack/man/writescythe.Rd0000644000176000001440000000203712133644110015271 0ustar ripleyusers\name{write.Scythe} \alias{write.Scythe} \title{Write a Matrix to a File to be Read by Scythe} \description{ This function writes a matrix to an ASCII file that can be read by the Sycthe Statistical Library. Scythe requires that input files contain the number of rows and columns in the first row, followed by the data. } \usage{ write.Scythe(outmatrix, outfile=NA, overwrite=FALSE) } \arguments{ \item{outmatrix}{The matrix to be written to a file.} \item{outfile}{The file to be written. This can include path information.} \item{overwrite}{A logical that determines whether an existing file should be over-written. By default, it protects the user from over-writing existing files.} } \value{ A zero if the file is properly written. } \examples{ \dontrun{ write.Scythe(mymatrix, "myfile.txt") } } \references{ Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. } \keyword{file} \seealso{\code{\link{write.Scythe}}} MCMCpack/man/wishart.Rd0000644000176000001440000000152612133644110014402 0ustar ripleyusers\name{Wishart} \alias{dwish} \alias{rwish} \alias{Wishart} \title{The Wishart Distribution} \description{ Density function and random generation from the Wishart distribution. } \usage{ dwish(W, v, S) rwish(v, S) } \arguments{ \item{W}{Positive definite matrix W \eqn{(p \times p)}{(p x p)}.} \item{v}{Degrees of freedom (scalar).} \item{S}{Inverse scale matrix \eqn{(p \times p)}{(p x p)}.} } \value{ \code{dwish} evaluates the density at positive definite matrix W. \code{rwish} generates one random draw from the distribution. } \details{ The mean of a Wishart random variable with \code{v} degrees of freedom and inverse scale matrix \code{S} is \eqn{vS}{vS}. } \examples{ density <- dwish(matrix(c(2,-.3,-.3,4),2,2), 3, matrix(c(1,.3,.3,1),2,2)) draw <- rwish(3, matrix(c(1,.3,.3,1),2,2)) } \keyword{distribution} MCMCpack/man/vech.Rd0000644000176000001440000000135312133644110013644 0ustar ripleyusers\name{vech} \alias{vech} \title{Extract Lower Triangular Elements from a Symmetric Matrix} \description{ This function takes a symmetric matrix and extracts a list of all lower triangular elements. } \usage{ vech(x) } \arguments{ \item{x}{A symmetric matrix.} } \value{ A list of the lower triangular elements. } \details{ This function checks to make sure the matrix is square, but it does not check for symmetry (it just pulls the lower triangular elements). The elements are stored in column major order. The original matrix can be restored using the \code{xpnd} command. } \examples{ symmat <- matrix(c(1,2,3,4,2,4,5,6,3,5,7,8,4,6,8,9),4,4) vech(symmat) } \keyword{manip} \seealso{\code{\link{xpnd}}} MCMCpack/man/topmodels.Rd0000644000176000001440000000236312133644110014727 0ustar ripleyusers\name{topmodels} \alias{topmodels} \title{Shows an ordered list of the most frequently visited models sampled during quantile regression stochastic search variable selection (QR-SSVS).} \description{Given output from quantile regression stochastic search variable selection, this function returns a table of the 'best' models together with their associated empirical posterior probability.} \usage{ topmodels(qrssvs, nmodels=5, abbreviate=FALSE, minlength=3) } \arguments{ \item{qrssvs}{An object of class \code{qrssvs}. Typically this will be the \code{gamma} component of the list returned by \code{SSVSquantreg}.} \item{nmodels}{The number of models to tabulate.} \item{abbreviate}{Logical: should the names of the predictors be abbreviated?} \item{minlength}{If \code{abbreviate} is set to \code{TRUE}, the minimum length of the abbreviations. } } \value{A table with the models and their associated posterior probability. The models are arranged in descending order of probability.} \author{Craig Reed} \examples{ \dontrun{ set.seed(1) epsilon<-rnorm(100) set.seed(2) x<-matrix(rnorm(1000),100,10) y<-x[,1]+x[,10]+epsilon qrssvs<-SSVSquantreg(y~x) topmodels(qrssvs$gamma) } } \keyword{models} \seealso{ \code{\link[MCMCpack]{SSVSquantreg}}} MCMCpack/man/tomog.Rd0000644000176000001440000000467712133644110014060 0ustar ripleyusers\name{tomogplot} \alias{tomogplot} \title{Tomography Plot} \description{ tomogplot is used to produce a tomography plot (see King, 1997) for a series of partially observed 2 x 2 contingency tables. } \usage{ tomogplot(r0, r1, c0, c1, xlab="fraction of r0 in c0 (p0)", ylab="fraction of r1 in c0 (p1)", bgcol="white", ...) } \arguments{ \item{r0}{An \eqn{(ntables \times 1)}{(ntables * 1)} vector of row sums from row 0.} \item{r1}{An \eqn{(ntables \times 1)}{(ntables * 1)} vector of row sums from row 1.} \item{c0}{An \eqn{(ntables \times 1)}{(ntables * 1)} vector of column sums from column 0.} \item{c1}{An \eqn{(ntables \times 1)}{(ntables * 1)} vector of column sums from column 1.} \item{xlab}{The x axis label for the plot.} \item{ylab}{The y axis label for the plot.} \item{bgcol}{The background color for the plot.} \item{...}{further arguments to be passed} } \details{ Consider the following partially observed 2 by 2 contingency table:\cr \cr \tabular{llll}{ \tab | \eqn{Y=0} \tab | \eqn{Y=1} \tab | \cr - - - - - \tab - - - - - \tab - - - - - \tab - - - - - \cr \eqn{X=0} \tab | \eqn{Y_0}{Y0} \tab | \tab | \eqn{r_0}{r0}\cr - - - - - \tab - - - - - \tab - - - - - \tab - - - - - \cr \eqn{X=1} \tab | \eqn{Y_1}{Y1} \tab | \tab | \eqn{r_1}{r1}\cr - - - - - \tab - - - - - \tab - - - - - \tab - - - - - \cr \tab | \eqn{c_0}{c0} \tab | \eqn{c_1}{c1} \tab | \eqn{N}\cr } where \eqn{r_0}{r0}, \eqn{r_1}{r1}, \eqn{c_0}{c0}, \eqn{c_1}{c1}, and \eqn{N} are non-negative integers that are observed. The interior cell entries are not observed. It is assumed that \eqn{Y_0|r_0 \sim \mathcal{B}inomial(r_0, p_0)}{Y0|r0 ~ Binomial(r0, p0)} and \eqn{Y_1|r_1 \sim \mathcal{B}inomial(r_1, p_1)}{Y1|r1 ~ Binomial(r1,p1)}. This function plots the bounds on the maximum likelihood estimatess for (p0, p1). } \keyword{hplot} \references{ Gary King, 1997. \emph{A Solution to the Ecological Inference Problem}. Princeton: Princeton University Press. Jonathan C. Wakefield. 2004. ``Ecological Inference for 2 x 2 Tables.'' \emph{Journal of the Royal Statistical Society, Series A}. 167(3): 385445. } \examples{ r0 <- rpois(100, 500) r1 <- rpois(100, 200) c0 <- rpois(100, 100) c1 <- (r0 + r1) - c0 tomogplot(r0, r1, c0, c1) } \seealso{\code{\link{MCMChierEI}}, \code{\link{MCMCdynamicEI}}, \code{\link{dtomogplot}} } MCMCpack/man/testpanelSubjectBreak.Rd0000644000176000001440000001467312133644110017214 0ustar ripleyusers\name{testpanelSubjectBreak} \alias{testpanelSubjectBreak} \title{A Test for the Subject-level Break using a Unitivariate Linear Regression Model with Breaks} \description{testpanelSubjectBreak fits a unitivariate linear regression model with parametric breaks using panel residuals to test the existence of subject-level breaks in panel residuals. The details are discussed in Park (2011).} \usage{testpanelSubjectBreak(subject.id, time.id, resid, max.break=2, minimum = 10, mcmc=1000, burnin=1000, thin=1, verbose=0, b0, B0, c0, d0, a = NULL, b = NULL, seed = NA, Time = NULL, ps.out = FALSE)} \arguments{ \item{subject.id}{A numeric vector indicating the group number. It should start from 1.} \item{time.id}{A numeric vector indicating the time unit. It should start from 1.} \item{resid}{A vector of panel residuals.} \item{max.break}{An upper bound of break numbers for the test.} \item{minimum}{A minimum length of time series for the test. The test will skip a subject with a time series shorter than this.} \item{mcmc}{The number of MCMC iterations after burn-in.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{thin}{The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0, the iteration number and the posterior density samples are printed to the screen every \code{verbose}th iteration.} \item{b0}{The prior mean of the residual mean.} \item{B0}{The prior precision of the residual variance} \item{c0}{\eqn{c_0/2}{c0/2} is the shape parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2}. The amount of information in the inverse Gamma prior is something like that from \eqn{c_0}{c0} pseudo-observations.} \item{d0}{\eqn{d_0/2}{d0/2} is the scale parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2}.} \item{a}{\eqn{a}{a} is the shape1 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{b}{\eqn{b}{b} is the shape2 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{seed}{The seed for the random number generator. If NA, current R system seed is used.} \item{Time}{Times of the observations. This will be used to find the time of the first observations in panel residuals. } \item{ps.out}{If ps.out == TRUE, state probabilities are exported. If the number of panel subjects is huge, users can turn it off to save memory.} \item{...}{further arguments to be passed} } \details{ \code{testpanelSubjectBreak} fits a univariate linear regression model for subject-level residuals from a panel model. The details are discussed in Park (2011). The model takes the following form: \deqn{e_{it} = \alpha_{im} + \varepsilon_{it}\;\; m = 1, \ldots, M}{y_it = alpha_im + epsilon_it, m = 1,...,M.} The errors are assumed to be time-varying at the subject level: \deqn{\varepsilon_{it} \sim \mathcal{N}(0, \sigma^2_{im})}{epsilon_it ~ N(0, sigma^2_im)} We assume standard, semi-conjugate priors: \deqn{\beta \sim \mathcal{N}(b_0,B_0^{-1})}{beta ~ N(b0,B0^(-1))} And: \deqn{\sigma^{-2} \sim \mathcal{G}amma(c_0/2, d_0/2)}{sigma^(-2) ~ Gamma(c0/2, d0/2)} Where \eqn{\beta}{beta} and \eqn{\sigma^{-2}}{sigma^(-2)} are assumed \emph{a priori} independent. And: \deqn{p_{mm} \sim \mathcal{B}eta(a, b),\;\; m = 1, \ldots, M}{p_mm ~ Beta(a, b), m = 1,...,M.} Where \eqn{M}{M} is the number of states. OLS estimates are used for starting values. } \author{Jong Hee Park, \email{jhp@uchicago.edu}, \url{http://home.uchicago.edu/~jhp/}.} \value{ The returned object is a matrix containing log marginal likelihoods for all HMMs. The dimension of the returned object is the number of panel subjects by max.break + 1. If psout == TRUE, the returned object has an array attribute \code{psout} containing state probabilities for all HMMs. } \references{ Jong Hee Park, 2011. ``A Unified Method for Dynamic and Cross-Sectional Heterogeneity: Introducing Hidden Markov Panel Models." Working Paper. Siddhartha Chib. 1998. ``Estimation and comparison of multiple change-point models.'' \emph{Journal of Econometrics}. 86: 221-241. } \examples{ \dontrun{ set.seed(1974) N <- 30 T <- 80 NT <- N*T ## true parameter values true.beta <- c(1, 1) true.sigma <- 3 x1 <- rnorm(NT) x2 <- runif(NT, 2, 4) ## group-specific breaks break.point = rep(T/2, N); break.sigma=c(rep(1, N)); break.list <- rep(1, N) X <- as.matrix(cbind(x1, x2), NT, ); y <- rep(NA, NT) id <- rep(1:N, each=NT/N) K <- ncol(X); true.beta <- as.matrix(true.beta, K, 1) ## compute the break probability ruler <- c(1:T) W.mat <- matrix(NA, T, N) for (i in 1:N){ W.mat[, i] <- pnorm((ruler-break.point[i])/break.sigma[i]) } Weight <- as.vector(W.mat) ## draw time-varying individual effects and sample y j = 1 true.sigma.alpha <- 30 true.alpha1 <- true.alpha2 <- rep(NA, N) for (i in 1:N){ Xi <- X[j:(j+T-1), ] true.mean <- Xi \%*\% true.beta weight <- Weight[j:(j+T-1)] true.alpha1[i] <- rnorm(1, 0, true.sigma.alpha) true.alpha2[i] <- -1*true.alpha1[i] y[j:(j+T-1)] <- ((1-weight)*true.mean + (1-weight)*rnorm(T, 0, true.sigma) + (1-weight)*true.alpha1[i]) + (weight*true.mean + weight*rnorm(T, 0, true.sigma) + weight*true.alpha2[i]) j <- j + T } ## extract the standardized residuals from the OLS with fixed-effects FEols <- lm(y ~ X + as.factor(id) -1 ) resid.all <- rstandard(FEols) time.id <- rep(1:80, N) ## model fitting G <- 1000 BF <- testpanelSubjectBreak(subject.id=id, time.id=time.id, resid= resid.all, max.break=3, minimum = 10, mcmc=G, burnin = G, thin=1, verbose=G, b0=0, B0=1/100, c0=2, d0=2, Time = time.id) ## estimated break numbers ## thresho estimated.breaks <- make.breaklist(BF, threshold=3) ## print all posterior model probabilities print(attr(BF, "model.prob")) } } \keyword{models} MCMCpack/man/testpanelGroupBreak.Rd0000644000176000001440000001572312133644110016706 0ustar ripleyusers\name{testpanelGroupBreak} \alias{testpanelGroupBreak} \title{A Test for the Group-level Break using a Multivariate Linear Regression Model with Breaks} \description{testpanelGroupBreak fits a multivariate linear regression model with parametric breaks using panel residuals to test the existence of group-level breaks in panel residuals. The details are discussed in Park (2011).} \usage{ testpanelGroupBreak(subject.id, time.id, resid, m=1, mcmc=1000, burnin=1000, thin=1, verbose=0, b0, B0, c0, d0, a = NULL, b = NULL, seed = NA, marginal.likelihood = c("none", "Chib95"), ...)} \arguments{ \item{subject.id}{A numeric vector indicating the group number. It should start from 1.} \item{time.id}{A numeric vector indicating the time unit. It should start from 1.} \item{resid}{A vector of panel residuals} \item{m}{The number of changepoints.} \item{mcmc}{The number of MCMC iterations after burn-in.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{thin}{The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0, the iteration number and the posterior density samples are printed to the screen every \code{verbose}th iteration.} \item{b0}{The prior mean of the residual mean.} \item{B0}{The prior precision of the residual variance} \item{c0}{\eqn{c_0/2}{c0/2} is the shape parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2}. The amount of information in the inverse Gamma prior is something like that from \eqn{c_0}{c0} pseudo-observations.} \item{d0}{\eqn{d_0/2}{d0/2} is the scale parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2}.} \item{a}{\eqn{a}{a} is the shape1 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{b}{\eqn{b}{b} is the shape2 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{seed}{The seed for the random number generator. If NA, current R system seed is used.} \item{marginal.likelihood}{How should the marginal likelihood be calculated? Options are: \code{none} in which case the marginal likelihood will not be calculated and \code{Chib95} in which case the method of Chib (1995) is used.} \item{...}{further arguments to be passed} } \details{ \code{testpanelGroupBreak} fits a multivariate linear regression model with parametric breaks using panel residuals to detect the existence of system-level breaks in unobserved factors as discussed in Park (2011). The model takes the following form: \deqn{e_{i} \sim \mathcal{N}(\beta_{m}, \sigma^2_m I)\;\; m = 1, \ldots, M}{epsilon_i ~ N(beta_m, sigma^2_m I_{k_i}), m = 1,..., M.} We assume standard, semi-conjugate priors: \deqn{\beta \sim \mathcal{N}(b0, B0)}{beta ~ N(b0, B0)} And: \deqn{\sigma^{-2} \sim \mathcal{G}amma(c_0/2, d_0/2)}{sigma^(-2) ~ Gamma(c0/2, d0/2)} Where \eqn{\beta}{beta} and \eqn{\sigma^{-2}}{sigma^(-2)} are assumed \emph{a priori} independent. And: \deqn{p_{mm} \sim \mathcal{B}eta(a, b),\;\; m = 1, \ldots, M}{p_mm ~ Beta(a, b), m = 1,...,M.} Where \eqn{M}{M} is the number of states. } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. The object contains an attribute \code{prob.state} storage matrix that contains the probability of \eqn{state_i}{state_i} for each period, and the log-marginal likelihood of the model (\code{logmarglike}). } \references{ Jong Hee Park, 2011. ``A Unified Method for Dynamic and Cross-Sectional Heterogeneity: Introducing Hidden Markov Panel Models." Working Paper. Siddhartha Chib. 1998. ``Estimation and comparison of multiple change-point models.'' \emph{Journal of Econometrics}. 86: 221-241. } \author{Jong Hee Park, \email{jhp@uchicago.edu}, \url{http://home.uchicago.edu/~jhp/}.} \examples{ \dontrun{ ## data generating set.seed(1977) Q <- 3 true.beta1 <- c(1, 1, 1) ; true.beta2 <- c(1, -1, -1) true.sigma2 <- c(1, 3); true.D1 <- diag(.5, Q); true.D2 <- diag(2.5, Q) N=20; T=100; NT <- N*T x1 <- rnorm(NT) x2 <- runif(NT, 5, 10) X <- cbind(1, x1, x2); W <- X; y <- rep(NA, NT) ## true break numbers are one and at the center break.point = rep(T/2, N); break.sigma=c(rep(1, N)); break.list <- rep(1, N) id <- rep(1:N, each=NT/N) K <- ncol(X); ruler <- c(1:T) ## compute the weight for the break W.mat <- matrix(NA, T, N) for (i in 1:N){ W.mat[, i] <- pnorm((ruler-break.point[i])/break.sigma[i]) } Weight <- as.vector(W.mat) ## data generating by weighting two means and variances j = 1 for (i in 1:N){ Xi <- X[j:(j+T-1), ] Wi <- W[j:(j+T-1), ] true.V1 <- true.sigma2[1]*diag(T) + Wi\%*\%true.D1\%*\%t(Wi) true.V2 <- true.sigma2[2]*diag(T) + Wi\%*\%true.D2\%*\%t(Wi) true.mean1 <- Xi\%*\%true.beta1 true.mean2 <- Xi\%*\%true.beta2 weight <- Weight[j:(j+T-1)] y[j:(j+T-1)] <- (1-weight)*true.mean1 + (1-weight)*chol(true.V1)\%*\%rnorm(T) + weight*true.mean2 + weight*chol(true.V2)\%*\%rnorm(T) j <- j + T } ## model fitting subject.id <- c(rep(1:N, each=T)) time.id <- c(rep(1:T, N)) resid <- rstandard(lm(y ~X-1 + as.factor(subject.id))) G <- 100 out0 <- testpanelGroupBreak(subject.id, time.id, resid, m=0, mcmc=G, burnin=G, thin=1, verbose=G, b0=0, B0=1/100, c0=2, d0=2, marginal.likelihood = "Chib95") out1 <- testpanelGroupBreak(subject.id, time.id, resid, m=1, mcmc=G, burnin=G, thin=1, verbose=G, b0=0, B0=1/100, c0=2, d0=2, marginal.likelihood = "Chib95") out2 <- testpanelGroupBreak(subject.id, time.id, resid, m=2, mcmc=G, burnin=G, thin=1, verbose=G, b0=0, B0=1/100, c0=2, d0=2, marginal.likelihood = "Chib95") out3 <- testpanelGroupBreak(subject.id, time.id, resid, m=3, mcmc=G, burnin=G, thin=1, verbose=G, b0=0, B0=1/100, c0=2, d0=2, marginal.likelihood = "Chib95") ## Note that the code is for a hypothesis test of no break in panel residuals. ## When breaks exist, the estimated number of break in the mean and variance of panel residuals ## tends to be larger than the number of break in the data generating process. ## This is due to the difference in parameter space, not an error of the code. BayesFactor(out0, out1, out2, out3) ## In order to identify the number of breaks in panel parameters, ## use HMMpanelRE() instead. } } \keyword{models} MCMCpack/man/SupremeCourt.Rd0000644000176000001440000000174512133644110015361 0ustar ripleyusers\name{SupremeCourt} \alias{SupremeCourt} \title{ U.S. Supreme Court Vote Matrix } \description{ This dataframe contains a matrix votes cast by U.S. Supreme Court justices in all cases in the 2000 term. } \usage{ data(SupremeCourt) } \format{ The dataframe has contains data for justices Rehnquist, Stevens, O'Connor, Scalia, Kennedy, Souter, Thomas, Ginsburg, and Breyer for the 2000 term of the U.S. Supreme Court. It contains data from 43 non-unanimous cases. The votes are coded liberal (1) and conservative (0) using the protocol of Spaeth (2003). The unit of analysis is the case citation (ANALU=0). We are concerned with formally decided cases issued with written opinions, after full oral argument and cases decided by an equally divided vote (DECTYPE=1,5,6,7). } \source{ Harold J. Spaeth. 2005. \emph{Original United States Supreme Court Database: 1953-2004 Terms.} \url{http://www.as.uky.edu/polisci/ulmerproject/sctdata.htm}. } \keyword{datasets} MCMCpack/man/SSVSquantreg.Rd0000644000176000001440000001553212133644110015270 0ustar ripleyusers\name{SSVSquantreg} \alias{SSVSquantreg} \title{ Stochastic search variable selection for quantile regression } \description{ This function uses stochastic search to select promising regression models at a fixed quantile \eqn{\tau}{tau}. Indicator variables \eqn{\gamma}{gamma} are used to represent whether a predictor is included in the model or not. The user supplies the data and the prior distribution on the model size. A list is returned containing the posterior sample of \eqn{\gamma}{gamma} and the associated regression parameters \eqn{\beta}{beta}.} \usage{ SSVSquantreg(formula, data = NULL, tau = 0.5, include=NULL, burnin = 1000, mcmc = 10000, thin = 1, verbose = 0, seed = sample(1:1000000,1), pi0a0 = 1, pi0b0 = 1, ...) } \arguments{ \item{formula}{ Model formula. } \item{data}{ Data frame. } \item{tau}{The quantile of interest. Must be between 0 and 1. The default value of 0.5 corresponds to median regression model selection.} \item{include}{The predictor(s) that should definitely appear in the model. Can be specified by name, or their position in the formula (taking into account the intercept).} \item{burnin}{ The number of burn-in iterations for the sampler. } \item{mcmc}{ The number of MCMC iterations after burnin. } \item{thin}{The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value.} \item{verbose}{ A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number, the most recently sampled values of \eqn{\gamma}{gamma} and the associated values of \eqn{\beta}{beta} are printed to the screen every \code{verbose}th iteration. } \item{seed}{ The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The default value for this argument is a random integer between 1 and 1,000,000. This default value ensures that if the function is used again with a different value of \eqn{\tau}{tau}, it is extremely unlikely that the seed will be identical. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details. } \item{pi0a0, pi0b0}{Hyperparameters of the beta prior on \eqn{\pi_0}{pi_0}, the prior probability of including a predictor. Default values of (1,1) are equivalent to a uniform distribution.} \item{\dots}{ Further arguments } } \value{ A list containing: \item{gamma}{The posterior sample of \eqn{\gamma}{gamma}. This has associated summary and plot methods.} \item{beta}{The posterior sample of the associated regression parameters \eqn{\beta}{beta}. This can be analysed with functions from the coda package.} } \details{ \code{SSVSquantreg} implements stochastic search variable selection over a set of potential predictors to obtain promising models. The models considered take the following form: \deqn{Q_{\tau}(y_i|x_{i\gamma}) = x_{i\gamma} ' \beta_{\gamma},}{Q_tau(y_i|x_igamma) = x_igamma'beta_gamma,} where \eqn{Q_{\tau}(y_i|x_{i\gamma})}{Q_tau(y_i|x_igamma)} denotes the conditional \eqn{\tau}{tau}th quantile of \eqn{y_i}{y_i} given \eqn{x_{i\gamma}}{x_igamma}, \eqn{x_{i\gamma} }{x_igamma} denotes \eqn{x_i}{x_i} with those predictors \eqn{x_{ij}}{x_ij} for which \eqn{\gamma_j=0}{gamma_j=0} removed and \eqn{\beta_{\gamma}}{beta_gamma} denotes the model specific regression parameters. The likelihood is formed based on the assumption of independent asymmetric Laplace distributions on the \eqn{y_i}{y_i} with skewness parameter \eqn{\tau}{tau} and location parameters \eqn{ x_{i\gamma} ' \beta_{\gamma}}{x_igamma'beta_gamma}. This assumption ensures that the likelihood function is maximised by the \eqn{\tau}{tau}th conditional quantile of the response variable. The prior on each \eqn{\beta_j}{beta_j} is \deqn{(1-\gamma_j)\delta_0+\gamma_j\mbox{Cauchy}(0,1),}{(1-gamma_j)delta_0+gamma_jCauchy(0,1),} where \eqn{\delta_0}{delta_0} denotes a degenerate distribution with all mass at 0. A standard Cauchy distribution is chosen conditional on \eqn{\gamma_j=1}{gamma_j=1}. This allows for a wider range of nonzero values of \eqn{\beta_j}{beta_j} than a standard Normal distribution, improving the robustness of the method. Each of the indicator variables \eqn{\gamma_j}{gamma_j} is independently assigned a Bernoulli prior, with prior probability of inclusion \eqn{\pi_0}{pi_0}. This in turn is assigned a beta distribution, resulting in a beta-binomial prior on the model size. The user can supply the hyperparameters for the beta distribution. Starting values are randomly generated from the prior distribution. It is recommended to standardise any non-binary predictors in order to compare these predictors on the same scale. This can be achieved using the \code{scale} function. If it is certain that a predictor should be included, all predictors specified are brought to the first positions for computational convenience. The regression parameters associated with these predictors are given independent improper priors. Users may notice a small speed advantage if they specify the predictors that they feel certain should appear in the model, particularly for large models with a large number of observations. } \author{ Craig Reed} \references{ Craig Reed, David B. Dunson and Keming Yu. 2010. "Bayesian Variable Selection for Quantile Regression" Technical Report. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.2.} \url{http://scythe.wustl.edu}. Keming Yu and Jin Zhang. 2005. "A Three Parameter Asymmetric Laplace Distribution and it's extensions." \emph{Communications in Statistics - Theory and Methods}, 34, 1867-1879. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}.} \examples{ \dontrun{ set.seed(1) epsilon<-rnorm(100) set.seed(2) x<-matrix(rnorm(1000),100,10) y<-x[,1]+x[,10]+epsilon qrssvs<-SSVSquantreg(y~x) model.50pc<-SSVSquantreg(y~x) model.90pc<-SSVSquantreg(y~x,tau=0.9) summary(model.50pc) ## Intercept not in median probability model summary(model.90pc) ## Intercept appears in median probability model } } \keyword{models} \seealso{ \code{\link[MCMCpack]{MCMCquantreg}}, \code{\link[MCMCpack]{summary.qrssvs}}, \code{\link[MCMCpack]{plot.qrssvs}}, \code{\link[MCMCpack]{mptable}}, \code{\link[MCMCpack]{topmodels}}, \code{\link[base]{scale}}, \code{\link[quantreg]{rq}}} MCMCpack/man/Senate.Rd0000644000176000001440000000144612133644110014141 0ustar ripleyusers\name{Senate} \alias{Senate} \title{ 106th U.S. Senate Roll Call Vote Matrix } \description{ This dataframe contains a matrix of votes cast by U.S. Senators in the 106th Congress. } \usage{ data(Senate) } \format{ The dataframe contains roll call data for all Senators in the 106th Senate. The first column (id) is the ICPSR member ID number, the second column (statecode) is the ICPSR state code, the third column (party) is the member's state name, and the fourth column (member) is the member's name. This is followed by all roll call votes (including unanimous ones) in the 106th. Nay votes are coded 0, yea votes are coded 1, and NAs are missing votes. } \source{ Keith Poole. 2005. \emph{106th Roll Call Vote Data}. \url{http://voteview.uh.edu/}. } \keyword{datasets} MCMCpack/man/Rehnquist.Rd0000644000176000001440000000210412133644110014674 0ustar ripleyusers\name{Rehnquist} \alias{Rehnquist} \title{ U.S. Supreme Court Vote Matrix, Rehnquist Court (1994-2004) } \description{ This dataframe contains a matrix of votes cast by U.S. Supreme Court justices by all cases in the 1994-2004 terms. } \usage{ data(SupremeCourt) } \format{ The dataframe has contains data for justices Rehnquist, Stevens, O'Connor, Scalia, Kennedy, Souter, Thomas, Ginsburg, and Breyer for the 1994-2004 terms of the U.S. Supreme Court. The dataframe also contains the term of the case, and a time variable that counts from term 1 to 11. The votes are coded liberal (1) and conservative (0) using the protocol of Spaeth (2003). The unit of analysis is the case citation (ANALU=0). We are concerned with formally decided cases issued with written opinions, after full oral argument and cases decided by an equally divided vote (DECTYPE=1,5,6,7). } \source{ Harold J. Spaeth. 2005. \emph{Original United States Supreme Court Database: 1953-2004 Terms.} \url{http://www.as.uky.edu/polisci/ulmerproject/sctdata.htm}. } \keyword{datasets} MCMCpack/man/readscythe.Rd0000644000176000001440000000143112133644110015047 0ustar ripleyusers\name{read.Scythe} \alias{read.Scythe} \title{Read a Matrix from a File written by Scythe} \description{ This function reads a matrix from an ASCII file in the form produced by the Scythe Statistical Library. Scythe output files contain the number of rows and columns in the first row, followed by the data. } \usage{ read.Scythe(infile=NA) } \arguments{ \item{infile}{The file to be read. This can include path information.} } \value{ A matrix containing the data stored in the read file. } \examples{ \dontrun{ mymatrix <- read.Scythe("myfile.txt") } } \references{ Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. } \keyword{file} \seealso{\code{\link{write.Scythe}}} MCMCpack/man/QRSSVSsummary.Rd0000644000176000001440000000266112133644110015401 0ustar ripleyusers\name{summary.qrssvs} \alias{summary.qrssvs} \alias{print.summary.qrssvs} \title{Summarising the results of quantile regression stochastic search variable selection (QR-SSVS).} \description{This function produces a table of predictors and their associated marginal posterior probability of inclusion. It also returns the median probability model (see the details section).} \usage{\method{summary}{qrssvs}(object, \dots)} \arguments{ \item{object}{An object of class \code{qrssvs}. Typically this will be the \code{gamma} component of the list returned by \code{SSVSquantreg}.} \item{\dots}{Further arguments.} } \details{The median probability model is defined to be the model that contains any predictor with marginal posterior probability greater than or equal to 0.5. If the goal is to select a single model e.g. for prediction, Barbieri and Berger (2004) recommend the median probability model. In some cases, this will coincide with the maximum probability model.} \author{Craig Reed} \references{ Maria M. Barbieri, and James O. Berger (2004). "Optimal predictive model selection". \emph{Annals of Statistics}, 32, 870-897. } \examples{ \dontrun{ set.seed(1) epsilon<-rnorm(100) set.seed(2) x<-matrix(rnorm(1000),100,10) y<-x[,1]+x[,10]+epsilon qrssvs<-SSVSquantreg(y~x) summary(qrssvs$gamma) } } \keyword{models} \seealso{ \code{\link[MCMCpack]{SSVSquantreg}}, \code{\link[MCMCpack]{mptable}}, \code{\link[MCMCpack]{topmodels}}} MCMCpack/man/QRSSVSplot.Rd0000644000176000001440000000254312133644110014661 0ustar ripleyusers\name{plot.qrssvs} \alias{plot.qrssvs} \title{Plot output from quantile regression stochastic search variable selection (QR-SSVS).} \description{This function produces a Trellis plot of the predictors on the y-axis versus the marginal posterior probability of inclusion on the x-axis.} \usage{ \method{plot}{qrssvs}(x, \dots) } \arguments{ \item{x}{An object of class \code{qrssvs}. Typically this will be the \code{gamma} component of the list returned by \code{SSVSquantreg}.} \item{\dots}{Further arguments} } \value{An object with class \code{"trellis"}. The associated \code{\link[lattice:update.trellis]{update}} and \code{\link[lattice:print.trellis]{print}} methods are documented in the "Lattice" package.} \author{Craig Reed} \references{ Deepayan Sarkar. 2008. \emph{lattice: Lattice Graphics.} R package version 0.17-17 } \examples{ \dontrun{ set.seed(1) epsilon<-rnorm(100) set.seed(2) x<-matrix(rnorm(1000),100,10) y<-x[,1]+x[,10]+epsilon qrssvs<-SSVSquantreg(y~x) plot(qrssvs$gamma) ## Modify the graph by increasing the fontsize on the axes qrssvsplot<-plot(qrssvs$gamma) update(qrssvsplot, scales=list(cex=3)) } } \keyword{models} \seealso{ \code{\link[MCMCpack]{SSVSquantreg}}, \code{\link[MCMCpack]{mptable}}, \code{\link[lattice:Lattice]{Lattice}} for a brief introduction to lattice displays and links to further documentation. } MCMCpack/man/procrust.Rd0000644000176000001440000000225512133644110014602 0ustar ripleyusers\name{procrustes} \alias{procrustes} \title{Procrustes Transformation} \description{ This function performs a Procrustes transformation on a matrix \code{X} to minimize the squared distance between \code{X} and another matrix \code{Xstar}. } \usage{ procrustes(X, Xstar, translation=FALSE, dilation=FALSE) } \arguments{ \item{X}{The matrix to be transformed.} \item{Xstar}{The target matrix.} \item{translation}{logical value indicating whether \code{X} should be translated.} \item{dilation}{logical value indicating whether \code{X} should be dilated.} } \value{ A list containing: \code{X.new} the matrix that is the Procrustes transformed version of \code{X}, \code{R} the rotation matrix, \code{tt} the translation vector, and \code{s} the scale factor. } \details{ \code{R}, \code{tt}, and \code{s} are chosen so that: \deqn{s X R + 1 tt' \approx X^*}{s X R + 1 tt' approximately Xstar} \code{X.new} is given by: \deqn{X_{new} = s X R + 1 tt'}{X.new = s X R + 1 tt'} } \references{ Borg and Groenen. 1997. \emph{Modern Multidimensional Scaling}. New York: Springer. pp. 340-342. } \seealso{\code{\link{MCMCirtKd}}} \keyword{manip} MCMCpack/man/PostProbMod.Rd0000644000176000001440000000405212133644110015126 0ustar ripleyusers\name{PostProbMod} \alias{PostProbMod} \title{Calculate Posterior Probability of Model} \description{ This function takes an object of class \code{BayesFactor} and calculates the posterior probability that each model under study is correct given that one of the models under study is correct. } \usage{ PostProbMod(BF, prior.probs=1) } \arguments{ \item{BF}{An object of class \code{BayesFactor}.} \item{prior.probs}{The prior probabilities that each model is correct. Can be either a scalar or array. Must be positive. If the sum of the prior probabilities is not equal to 1 prior.probs will be normalized so that it does sum to unity.} } \value{ An array holding the posterior probabilities that each model under study is correct given that one of the models under study is correct. } \examples{ \dontrun{ data(birthwt) post1 <- MCMCregress(bwt~age+lwt+as.factor(race) + smoke + ht, data=birthwt, b0=c(2700, 0, 0, -500, -500, -500, -500), B0=c(1e-6, .01, .01, 1.6e-5, 1.6e-5, 1.6e-5, 1.6e-5), c0=10, d0=4500000, marginal.likelihood="Chib95", mcmc=10000) post2 <- MCMCregress(bwt~age+lwt+as.factor(race) + smoke, data=birthwt, b0=c(2700, 0, 0, -500, -500, -500), B0=c(1e-6, .01, .01, 1.6e-5, 1.6e-5, 1.6e-5), c0=10, d0=4500000, marginal.likelihood="Chib95", mcmc=10000) post3 <- MCMCregress(bwt~as.factor(race) + smoke + ht, data=birthwt, b0=c(2700, -500, -500, -500, -500), B0=c(1e-6, 1.6e-5, 1.6e-5, 1.6e-5, 1.6e-5), c0=10, d0=4500000, marginal.likelihood="Chib95", mcmc=10000) BF <- BayesFactor(post1, post2, post3) mod.probs <- PostProbMod(BF) print(mod.probs) } } \concept{Bayes factor} \concept{model comparison} \seealso{\code{\link{MCMCregress}}} \keyword{models} MCMCpack/man/plotState.Rd0000755000176000001440000000203412133644110014676 0ustar ripleyusers\name{plotState} \alias{plotState} \title{Changepoint State Plot} \description{Plot the posterior probability that each time point is in each state.} \usage{ plotState(mcmcout, main="Posterior Regime Probability", ylab=expression(paste("Pr(", S[t], "= k |", Y[t], ")")), legend.control = NULL, cex = 0.8, lwd = 1.2, start=1) } \arguments{ \item{mcmcout}{The \code{mcmc} object containing the posterior density sample from a changepoint model. Note that this must have a \code{prob.state} attribute.} \item{main}{Title of the plot.} \item{ylab}{Label for the y-axis.} \item{legend.control}{Control the location of the legend. It is necessary to pass both the x and y locations; i.e., \code{c(x,y)}.} \item{cex}{Control point size.} \item{lwd}{Line width parameter.} \item{start}{The time of the first observation to be shown in the time series plot.} } \author{Jong Hee Park, \email{jhp@uchicago.edu}, \url{http://home.uchicago.edu/~jhp/}.} \keyword{hplot} \seealso{\code{\link{MCMCpoissonChange}}, \code{\link{MCMCbinaryChange}}} MCMCpack/man/plotIntervention.Rd0000644000176000001440000000217212133644110016302 0ustar ripleyusers\name{plotIntervention} \alias{plotIntervention} \title{Plot of intervention analysis} \description{Plot the results of changepoint internvetion analysis} \usage{ plotIntervention(mcmcout, forward = TRUE, start = 1, alpha = 0.05, col = "red", main="", ylab="", xlab="") } \arguments{ \item{mcmcout}{The \code{mcmcout} object containing the posterior density sample from changepoint intervention analysis.} \item{forward}{If TRUE, draw the result of forward prediction. If FALSE, draw the result of backward prediction.} \item{start}{The time of the first observation to be shown in the time series plot.} \item{alpha}{The size of the prediction error band in the plot. By default, alpha = 0.05, which means the 95 percent Bayesian credible interval.} \item{col}{The color of predicted data.} \item{main}{Title of the plot} \item{ylab}{Label for the y-axis.} \item{xlab}{Label for the x-axis.} } \references{ Jong Hee Park. 2012. "A Change-point Approach to Intervention Analysis Using Bayesian Inference" Presented at the 2012 Annual Meeting of Korean Statistical Society. } \seealso{\code{\link{MCMCintervention}}} MCMCpack/man/plotChangepoint.Rd0000755000176000001440000000227112133644110016060 0ustar ripleyusers\name{plotChangepoint} \alias{plotChangepoint} \title{Posterior Density of Regime Change Plot} \description{Plot the posterior density of regime change.} \usage{ plotChangepoint(mcmcout, main="Posterior Density of Regime Change Probabilities", xlab = "Time", ylab = "", verbose = FALSE, start=1, overlay=FALSE) } \arguments{ \item{mcmcout}{The \code{mcmc} object containing the posterior density sample from a changepoint model. Note that this must have a \code{prob.state} attribute.} \item{main}{Title of the plot} \item{xlab}{Label for the x-axis.} \item{ylab}{Label for the y-axis.} \item{verbose}{If verbose=TRUE, expected changepoints are printed.} \item{start}{The time of the first observation to be shown in the time series plot.} \item{overlay}{If overlay=TRUE, the probability of each regime change is drawn separately, which will be useful to draw multiple plots in one screen. See the example in \code{MCMCpoissonChange}. Otherwise, multiple plots of regime change probabilities will be drawn.} } \author{Jong Hee Park, \email{jhp@uchicago.edu}, \url{http://home.uchicago.edu/~jhp/}.} \keyword{hplot} \seealso{\code{\link{MCMCpoissonChange}}, \code{\link{MCMCbinaryChange}}} MCMCpack/man/PErisk.Rd0000644000176000001440000000424312133644110014115 0ustar ripleyusers\name{PErisk} \alias{PErisk} \docType{data} \title{Political Economic Risk Data from 62 Countries in 1987} \description{ Political Economic Risk Data from 62 Countries in 1987. } \usage{data(PErisk)} \format{ A data frame with 62 observations on the following 9 variables. All data points are from 1987. See Quinn (2004) for more details. \describe{ \item{country}{a factor with levels \code{Argentina} through \code{Zimbabwe}} \item{courts}{an ordered factor with levels \code{0} < \code{1}.\code{courts} is an indicator of whether the country in question is judged to have an independent judiciary. From Henisz (2002).} \item{barb2}{a numeric vector giving the natural log of the black market premium in each country. The black market premium is coded as the black market exchange rate (local currency per dollar) divided by the official exchange rate minus 1. From Marshall, Gurr, and Harff (2002). } \item{prsexp2}{an ordered factor with levels \code{0} < \code{1} < \code{2} < \code{3} < \code{4} < \code{5}, giving the lack of expropriation risk. From Marshall, Gurr, and Harff (2002).} \item{prscorr2}{an ordered factor with levels \code{0} < \code{1} < \code{2} < \code{3} < \code{4} < \code{5}, measuring the lack of corruption. From Marshall, Gurr, and Harff (2002).} \item{gdpw2}{a numeric vector giving the natural log of real GDP per worker in 1985 international prices. From Alvarez et al. (1999).} } } \source{ Mike Alvarez, Jose Antonio Cheibub, Fernando Limongi, and Adam Przeworski. 1999. ``ACLP Political and Economic Database.'' \url{http://www.ssc.upenn.edu/~cheibub/data/}. Witold J. Henisz. 2002. ``The Political Constraint Index (POLCON) Dataset.'' \\ \url{http://www-management.wharton.upenn.edu/henisz/POLCON/ContactInfo.html}. Monty G. Marshall, Ted Robert Gurr, and Barbara Harff. 2002. ``State Failure Task Force Problem Set.'' \url{http://www.cidcm.umd.edu/inscr/stfail/index.htm}. } \references{ Kevin M. Quinn. 2004. ``Bayesian Factor Analysis for Mixed Ordinal and Continuous Response.'' \emph{Political Analyis}. 12: 338-353. } \keyword{datasets} MCMCpack/man/noncenhypergeom.Rd0000644000176000001440000000336412133644110016123 0ustar ripleyusers\name{NoncenHypergeom} \alias{NoncenHypergeom} \alias{rnoncenhypergeom} \alias{dnoncenhypergeom} \title{The Noncentral Hypergeometric Distribution} \description{ Evaluates the density at a single point or all points, and generate random draws from the Noncentral Hypergeometric distribution. } \usage{ dnoncenhypergeom(x=NA, n1, n2, m1, psi) rnoncenhypergeom(n, n1, n2, m1, psi) } \arguments{ \item{x}{The location to evaluate the density. If \code{x} is NA, then a matrix is returned with the density evaluated at all possible points.} \item{n}{The number of draws to make from the distribution.} \item{n1}{The size of group one.} \item{n2}{The size of group two.} \item{m1}{The observed number of positive outcomes (in both groups).} \item{psi}{Odds ratio.} } \value{ \code{dnoncenhypergeom} evaluates the density at point \code{x}, or a matrix with the first column containing the possible values of the random variable, and the second column containing the probabilities. \code{rnoncenhypergeom} returns a list of \code{n} random draws from the distribution. } \details{ The Noncentral Hypergeometric is particularly useful for conditional inference for \eqn{(2 \times 2)}{(2 x 2)} tables. We use the parameterization and algorithms of Liao and Rosen (2001). The underlying R code is based on their published code. See their article for details of the parameterization. } \source{ J. G. Liao and Ori Rosen. 2001. ``Fast and Stable Algorithms for Computing and Sampling From the Noncentral Hypergeometric Distribution." \emph{The American Statistician.} 55: 366-369. } \examples{ density <- dnoncenhypergeom(NA, 500, 500, 500, 6.0) draws <- rnoncenhypergeom(10, 500, 500, 500, 6.0) } \keyword{distribution} MCMCpack/man/Nethvote.Rd0000644000176000001440000000515312133644110014515 0ustar ripleyusers\name{Nethvote} \alias{Nethvote} \docType{data} \title{Dutch Voting Behavior in 1989} \description{ Dutch Voting Behavior in 1989. } \usage{data(Nethvote)} \format{ A data frame with 1754 observations and 11 variables from the 1989 Dutch Parliamentary Election Study (Anker and Oppenhuis, 1993). Each observation is a survey respondent. These data are a subset of one of five multiply imputed datasets used in Quinn and Martin (2002). For more information see Quinn and Martin (2002). \describe{ \item{vote}{A factor giving the self-reported vote choice of each respondent. The levels are CDA (Christen Democratisch Appel), D66 (Democraten 66), Pvda (Partij van de Arbeid), and VVD (Volkspartij voor Vrijheid en Democratie).} \item{distD66}{A numeric variable giving the squared ideological distance between the respondent and the D66. Larger values indicate ideological dissimilarity between the respondent and the party.} \item{distPvdA}{A numeric variable giving the squared ideological distance between the respondent and the PvdA. Larger values indicate ideological dissimilarity between the respondent and the party.} \item{distVVD}{A numeric variable giving the squared ideological distance between the respondent and the VVD. Larger values indicate ideological dissimilarity between the respondent and the party.} \item{distCDA}{A numeric variable giving the squared ideological distance between the respondent and the CDA. Larger values indicate ideological dissimilarity between the respondent and the party.} \item{relig}{An indicator variable equal to 0 if the respondent is not religious and 1 if the respondent is religious.} \item{class}{Social class of respondent. 0 is the lowest social class, 4 is the highest social class.} \item{income}{Income of respondent. 0 is lowest and 6 is highest.} \item{educ}{Education of respondent. 0 is lowest and 4 is highest.} \item{age}{Age category of respondent. 0 is lowest and 12 is highest.} \item{urban}{Indicator variable equal to 0 if the respondent is not a resident of an urban area and 1 if the respondent is a resident of an urban area.} } } \source{ H. Anker and E.V. Oppenhuis. 1993. ``Dutch Parliamentary Election Study.'' (computer file). Dutch Electoral Research Foundation and Netherlands Central Bureau of Statistics, Amsterdam. } \references{ Kevin M. Quinn and Andrew D. Martin. 2002. ``An Integrated Computational Model of Multiparty Electoral Competition.'' \emph{Statistical Science}. 17: 405-419. } \keyword{datasets} MCMCpack/man/mptable.Rd0000644000176000001440000000146112133644110014343 0ustar ripleyusers\name{mptable} \alias{mptable} \title{Calculate the marginal posterior probabilities of predictors being included in a quantile regression model.} \description{This function extracts the marginal probability table produced by \code{summary.qrssvs}.} \usage{mptable(qrssvs)} \arguments{ \item{qrssvs}{An object of class \code{qrssvs}. Typically this will be the \code{gamma} component of the list returned by \code{SSVSquantreg}.} } \value{A table with the predictors listed together with their posterior marginal posterior probability of inclusion.} \author{Craig Reed} \examples{ \dontrun{ set.seed(1) epsilon<-rnorm(100) set.seed(2) x<-matrix(rnorm(1000),100,10) y<-x[,1]+x[,10]+epsilon qrssvs<-SSVSquantreg(y~x) mptable(qrssvs$gamma) } } \keyword{models} \seealso{ \code{\link[MCMCpack]{SSVSquantreg}}} MCMCpack/man/MCpoissongamma.Rd0000644000176000001440000000312712133644110015635 0ustar ripleyusers\name{MCpoissongamma} \alias{MCpoissongamma} \title{Monte Carlo Simulation from a Poisson Likelihood with a Gamma Prior} \description{ This function generates a sample from the posterior distribution of a Poisson likelihood with a Gamma prior. } \usage{ MCpoissongamma(y, alpha, beta, mc=1000, ...) } \arguments{ \item{y}{A vector of counts (must be non-negative).} \item{alpha}{Gamma prior distribution shape parameter.} \item{beta}{Gamma prior distribution scale parameter.} \item{mc}{The number of Monte Carlo draws to make.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{ \code{MCpoissongamma} directly simulates from the posterior distribution. This model is designed primarily for instructional use. \eqn{\lambda}{lambda} is the parameter of interest of the Poisson distribution. We assume a conjugate Gamma prior: \deqn{\lambda \sim \mathcal{G}amma(\alpha, \beta)}{lambda ~ Gamma(alpha, beta)} \eqn{y} is a vector of counts. } \examples{ \dontrun{ data(quine) posterior <- MCpoissongamma(quine$Days, 15, 1, 5000) summary(posterior) plot(posterior) grid <- seq(14,18,0.01) plot(grid, dgamma(grid, 15, 1), type="l", col="red", lwd=3, ylim=c(0,1.3), xlab="lambda", ylab="density") lines(density(posterior), col="blue", lwd=3) legend(17, 1.3, c("prior", "posterior"), lwd=3, col=c("red", "blue")) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}}, \code{\link[coda]{summary.mcmc}}} MCMCpack/man/MCnormalnormal.Rd0000644000176000001440000000322612133644110015641 0ustar ripleyusers\name{MCnormalnormal} \alias{MCnormalnormal} \title{Monte Carlo Simulation from a Normal Likelihood (with known variance) with a Normal Prior} \description{ This function generates a sample from the posterior distribution of a Normal likelihood (with known variance) with a Normal prior. } \usage{ MCnormalnormal(y, sigma2, mu0, tau20, mc=1000, ...) } \arguments{ \item{y}{The data.} \item{sigma2}{The known variance of y.} \item{mu0}{The prior mean of mu.} \item{tau20}{The prior variance of mu.} \item{mc}{The number of Monte Carlo draws to make.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{ \code{MCnormalnormal} directly simulates from the posterior distribution. This model is designed primarily for instructional use. \eqn{\mu}{mu} is the parameter of interest of the Normal distribution. We assume a conjugate normal prior: \deqn{\mu \sim \mathcal{N}(\mu_0, \tau^2_0)}{mu ~ N(mu0, tau20)} \eqn{y} is a vector of observed data. } \examples{ \dontrun{ y <- c(2.65, 1.80, 2.29, 2.11, 2.27, 2.61, 2.49, 0.96, 1.72, 2.40) posterior <- MCMCpack:::MCnormalnormal(y, 1, 0, 1, 5000) summary(posterior) plot(posterior) grid <- seq(-3,3,0.01) plot(grid, dnorm(grid, 0, 1), type="l", col="red", lwd=3, ylim=c(0,1.4), xlab="mu", ylab="density") lines(density(posterior), col="blue", lwd=3) legend(-3, 1.4, c("prior", "posterior"), lwd=3, col=c("red", "blue")) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}}, \code{\link[coda]{summary.mcmc}}} MCMCpack/man/MCmultinomdirichlet.Rd0000644000176000001440000000313712133644110016675 0ustar ripleyusers\name{MCmultinomdirichlet} \alias{MCmultinomdirichlet} \title{Monte Carlo Simulation from a Multinomial Likelihood with a Dirichlet Prior} \description{ This function generates a sample from the posterior distribution of a multinomial likelihood with a Dirichlet prior. } \usage{ MCmultinomdirichlet(y, alpha0, mc=1000, ...) } \arguments{ \item{y}{A vector of data (number of successes for each category).} \item{alpha0}{The vector of parameters of the Dirichlet prior.} \item{mc}{The number of Monte Carlo draws to make.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{ \code{MCmultinomdirichlet} directly simulates from the posterior distribution. This model is designed primarily for instructional use. \eqn{\pi}{pi} is the parameter of interest of the multinomial distribution. It is of dimension \eqn{(d \times 1)}{(d x 1)}. We assume a conjugate Dirichlet prior: \deqn{\pi \sim \mathcal{D}irichlet(\alpha_0)}{pi ~ Dirichlet(alpha0)} \eqn{y} is a \eqn{(d \times 1)}{(d x 1)} vector of observed data. } \examples{ \dontrun{ ## Example from Gelman, et. al. (1995, p. 78) posterior <- MCmultinomdirichlet(c(727,583,137), c(1,1,1), mc=10000) bush.dukakis.diff <- posterior[,1] - posterior[,2] cat("Pr(Bush > Dukakis): ", sum(bush.dukakis.diff > 0) / length(bush.dukakis.diff), "\n") hist(bush.dukakis.diff) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}}, \code{\link[coda]{summary.mcmc}}} MCMCpack/man/MCMCtobit.Rd0000644000176000001440000001624612133644110014507 0ustar ripleyusers\name{MCMCtobit} \alias{MCMCtobit} \title{Markov Chain Monte Carlo for Gaussian Linear Regression with a Censored Dependent Variable} \description{ This function generates a sample from the posterior distribution of a linear regression model with Gaussian errors using Gibbs sampling (with a multivariate Gaussian prior on the beta vector, and an inverse Gamma prior on the conditional error variance). The dependent variable may be censored from below, from above, or both. The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{ MCMCtobit(formula, data = NULL, below = 0, above = Inf, burnin = 1000, mcmc = 10000, thin = 1, verbose = 0, seed = NA, beta.start = NA, b0 = 0, B0 = 0, c0 = 0.001, d0 = 0.001, ...) } \arguments{ \item{formula}{A model formula.} \item{data}{A dataframe.} \item{below}{The point at which the dependent variable is censored from below. The default is zero. To censor from above only, specify that below = -Inf.} \item{above}{The point at which the dependent variable is censored from above. To censor from below only, use the default value of Inf.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of MCMC iterations after burnin.} \item{thin}{The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number, the \eqn{\beta}{beta} vector, and the error variance is printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{beta.start}{The starting values for the \eqn{\beta}{beta} vector. This can either be a scalar or a column vector with dimension equal to the number of betas. The default value of of NA will use the OLS estimate of \eqn{\beta}{beta} as the starting value. If this is a scalar, that value will serve as the starting value mean for all of the betas.} \item{b0}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas.} \item{B0}{The prior precision of \eqn{\beta}{beta}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of beta. Default value of 0 is equivalent to an improper uniform prior for beta.} \item{c0}{\eqn{c_0/2}{c0/2} is the shape parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). The amount of information in the inverse Gamma prior is something like that from \eqn{c_0}{c0} pseudo-observations.} \item{d0}{\eqn{d_0/2}{d0/2} is the scale parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). In constructing the inverse Gamma prior, \eqn{d_0}{d0} acts like the sum of squared errors from the \eqn{c_0}{c0} pseudo-observations.} \item{...}{further arguments to be passed} } \details{ \code{MCMCtobit} simulates from the posterior distribution using standard Gibbs sampling (a multivariate Normal draw for the betas, and an inverse Gamma draw for the conditional error variance). \code{MCMCtobit} differs from \code{MCMCregress} in that the dependent variable may be censored from below, from above, or both. The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The model takes the following form: \deqn{y_i = x_i ' \beta + \varepsilon_{i},}{y_i = x_i'beta + epsilon_i,} where the errors are assumed to be Gaussian: \deqn{\varepsilon_{i} \sim \mathcal{N}(0, \sigma^2).}{epsilon_i ~ N(0, sigma^2).} Let \eqn{c_1} and \eqn{c_2} be the two censoring points, and let \eqn{y_i^\ast}{y_i^star} be the partially observed dependent variable. Then, \deqn{y_i = y_i^{\ast} \texttt{ if } c_1 < y_i^{\ast} < c_2,}{y_i = y_i^star if c_1 < y_i^star < c_2,} \deqn{y_i = c_1 \texttt{ if } c_1 \geq y_i^{\ast},}{y_i = c_1 if c_1 >= y_i^star,} \deqn{y_i = c_2 \texttt{ if } c_2 \leq y_i^{\ast}.}{y_i = c_2 if c_1 <= y_i^star.} We assume standard, semi-conjugate priors: \deqn{\beta \sim \mathcal{N}(b_0,B_0^{-1}),}{beta ~ N(b0,B0^(-1)),} and: \deqn{\sigma^{-2} \sim \mathcal{G}amma(c_0/2, d_0/2),}{sigma^(-2) ~ Gamma(c0/2, d0/2),} where \eqn{\beta}{beta} and \eqn{\sigma^{-2}}{sigma^(-2)} are assumed \emph{a priori} independent. Note that only starting values for \eqn{\beta}{beta} are allowed because simulation is done using Gibbs sampling with the conditional error variance as the first block in the sampler. } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \references{ Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. Siddhartha Chib. 1992. ``Bayes inference in the Tobit censored regression model." \emph{Journal of Econometrics}. 51:79-99. James Tobin. 1958. ``Estimation of relationships for limited dependent variables." \emph{Econometrica.} 26:24-36. } \author{Ben Goodrich, \email{goodrich.ben@gmail.com}, \url{http://www.people.fas.harvard.edu/~goodrich/}} \seealso{ \code{\link[coda]{plot.mcmc}}, \code{\link[coda]{summary.mcmc}}, \code{\link[survival]{survreg}}, \code{\link[MCMCpack]{MCMCregress}}} \examples{ \dontrun{ library(survival) example(tobin) summary(tfit) tfit.mcmc <- MCMCtobit(durable ~ age + quant, data=tobin, mcmc=30000, verbose=1000) plot(tfit.mcmc) raftery.diag(tfit.mcmc) summary(tfit.mcmc) } } \keyword{models} MCMCpack/man/MCMCSVDreg.Rd0000644000176000001440000001437112133644110014515 0ustar ripleyusers\name{MCMCSVDreg} \alias{MCMCSVDreg} \title{Markov Chain Monte Carlo for SVD Regression} \description{ This function generates a sample from the posterior distribution of a linear regression model with Gaussian errors in which the design matrix has been decomposed with singular value decomposition.The sampling is done via the Gibbs sampling algorithm. The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{ MCMCSVDreg(formula, data=NULL, burnin = 1000, mcmc = 10000, thin=1, verbose = 0, seed = NA, tau2.start = 1, g0 = 0, a0 = 0.001, b0 = 0.001, c0=2, d0=2, w0=1, beta.samp=FALSE, intercept=TRUE, ...)} \arguments{ \item{formula}{Model formula. Predictions are returned for elements of y that are coded as NA.} \item{data}{Data frame.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of MCMC iterations after burnin.} \item{thin}{The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number, the \eqn{\beta}{beta} vector, and the error variance are printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{tau2.start}{The starting values for the \eqn{\tau^2}{tau^2} vector. Can be either a scalar or a vector. If a scalar is passed then that value will be the starting value for all elements of \eqn{\tau^2}{tau^2}.} \item{g0}{The prior mean of \eqn{\gamma}{gamma}. This can either be a scalar or a column vector with dimension equal to the number of gammas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas.} \item{a0}{\eqn{a_0/2}{a0/2} is the shape parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). The amount of information in the inverse Gamma prior is something like that from \eqn{a_0}{a0} pseudo-observations.} \item{b0}{\eqn{b_0/2}{b0/2} is the scale parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). In constructing the inverse Gamma prior, \eqn{b_0}{b0} acts like the sum of squared errors from the \eqn{a_0}{a0} pseudo-observations.} \item{c0}{\eqn{c_0/2}{c0/2} is the shape parameter for the inverse Gamma prior on \eqn{\tau_i^2}{tau[i]^2}.} \item{d0}{\eqn{d_0/2}{d0/2} is the scale parameter for the inverse Gamma prior on \eqn{\tau_i^2}{tau[i]^2}.} \item{w0}{The prior probability that \eqn{\gamma_i = 0}{gamma[i] = 0}. Can be either a scalar or an \eqn{N}{N} vector where \eqn{N}{N} is the number of observations.} \item{beta.samp}{Logical indicating whether the sampled elements of beta should be stored and returned.} \item{intercept}{Logical indicating whether the original design matrix should include a constant term.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{ The model takes the following form: \deqn{y = X \beta + \varepsilon}{y = X beta + epsilon} Where the errors are assumed to be iid Gaussian: \deqn{\varepsilon_{i} \sim \mathcal{N}(0, \sigma^2)}{epsilon_i ~ N(0, sigma^2)} Let \eqn{N}{N} denote the number of rows of \eqn{X}{X} and \eqn{P}{P} the number of columns of \eqn{X}{X}. Unlike the standard regression setup where \eqn{N >> P}{N >> P} here it is the case that \eqn{P >> N}{P >> N}. To deal with this problem a singular value decomposition of \eqn{X'}{X'} is performed: \eqn{X' = ADF}{X' = ADF} and the regression model becomes \deqn{y = F'D \gamma + \varepsilon}{y = F'D gamma + epsilon} where \eqn{\gamma = A' \beta}{gamma = A' beta}. We assume the following priors: \deqn{\sigma^{-2} \sim \mathcal{G}amma(a_0/2, b_0/2)}{sigma^(-2) ~ Gamma(a0/2, b0/2)} \deqn{\tau^{-2} \sim \mathcal{G}amma(c_0/2, d_0/2)}{tau^(-2) ~ Gamma(c0/2, d0/2)} \deqn{\gamma_i \sim w0_i \delta_0 + (1-w0_i) \mathcal{N}(g0_i, \sigma^2 \tau^2_i / d_i^2)}{ gamma[i] ~ w0[i] delta0 + (1-w0[i] N(g0[i], sigma^2 tau[i]^2/ d[i]^2)} where \eqn{\delta_0}{delta0} is a unit point mass at 0 and \eqn{d_i}{d[i]} is the \eqn{i}{i}th diagonal element of \eqn{D}{D}. } \references{ Mike West, Josheph Nevins, Jeffrey Marks, Rainer Spang, and Harry Zuzan. 2000. ``DNA Microarray Data Analysis and Regression Modeling for Genetic Expression Profiling." Duke ISDS working paper. Gottardo, Raphael, and Adrian Raftery. 2004. ``Markov chain Monte Carlo with mixtures of singular distributions.'' Statistics Department, University of Washington, Technical Report 470. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}}, \code{\link[coda]{summary.mcmc}}, \code{\link[stats]{lm}}} MCMCpack/man/MCMCresidualBreakAnalysis.Rd0000644000176000001440000001465312133644110017647 0ustar ripleyusers\name{MCMCresidualBreakAnalysis} \alias{MCMCresidualBreakAnalysis} \title{Break Analysis of Univariate Time Series using Markov Chain Monte Carlo} \description{ This function performs a break analysis for univariate time series data using a linear Gaussian changepoint model. The code is written mainly for an internal use in \code{testpanelSubjectBreak}.} \usage{ MCMCresidualBreakAnalysis(resid, m = 1, b0 = 0, B0 = 0.001, c0 = 0.1, d0 = 0.1, a = NULL, b = NULL, mcmc = 1000, burnin = 1000, thin = 1, verbose = 0, seed = NA, beta.start = NA, P.start = NA, marginal.likelihood = c("none", "Chib95"), ...) } \arguments{ \item{resid}{Univariate time series} \item{m}{The number of breaks.} \item{b0}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas.} \item{B0}{The prior precision of \eqn{\beta}{beta}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of beta. Default value of 0 is equivalent to an improper uniform prior for beta.} \item{c0}{\eqn{c_0/2}{c0/2} is the shape parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). The amount of information in the inverse Gamma prior is something like that from \eqn{c_0}{c0} pseudo-observations.} \item{d0}{\eqn{d_0/2}{d0/2} is the scale parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). In constructing the inverse Gamma prior, \eqn{d_0}{d0} acts like the sum of squared errors from the \eqn{c_0}{c0} pseudo-observations.} \item{a}{\eqn{a}{a} is the shape1 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{b}{\eqn{b}{b} is the shape2 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of MCMC iterations after burnin.} \item{thin}{The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number, the \eqn{\beta}{beta} vector, and the error variance are printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{beta.start}{The starting values for the \eqn{\beta}{beta} vector. This can either be a scalar or a column vector with dimension equal to the number of betas. The default value of of NA will use the OLS estimate of \eqn{\beta}{beta} as the starting value. If this is a scalar, that value will serve as the starting value mean for all of the betas.} \item{P.start}{The starting values for the transition matrix. A user should provide a square matrix with dimension equal to the number of states. By default, draws from the \code{Beta(0.9, 0.1)} are used to construct a proper transition matrix for each raw except the last raw.} \item{marginal.likelihood}{How should the marginal likelihood be calculated? Options are: \code{none} in which case the marginal likelihood will not be calculated, and \code{Chib95} in which case the method of Chib (1995) is used.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{ \code{MCMCresidualBreakAnalysis} simulates from the posterior distribution using standard Gibbs sampling (a multivariate Normal draw for the betas, and an inverse Gamma draw for the conditional error variance). The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The model takes the following form: \deqn{y_{i} \sim \mathcal{N}(\beta_{m}, \sigma^2_{m}) \;\; m = 1, \ldots, M}{y_i ~ N(beta_m, sigma^2_m), m = 1,...,M.} We assume standard, semi-conjugate priors: \deqn{\beta \sim \mathcal{N}(b_0,B_0^{-1})}{beta ~ N(b0,B0^(-1))} And: \deqn{\sigma^{-2} \sim \mathcal{G}amma(c_0/2, d_0/2)}{sigma^(-2) ~ Gamma(c0/2, d0/2)} Where \eqn{\beta}{beta} and \eqn{\sigma^{-2}}{sigma^(-2)} are assumed \emph{a priori} independent. And: \deqn{p_{mm} \sim \mathcal{B}eta(a, b),\;\; m = 1, \ldots, M}{p_mm ~ Beta(a, b), m = 1,...,M.} Where \eqn{M}{M} is the number of states. } \references{ Jong Hee Park, 2011. ``A Unified Method for Dynamic and Cross-Sectional Heterogeneity: Introducing Hidden Markov Panel Models." Working Paper. Siddhartha Chib. 1998. ``Estimation and comparison of multiple change-point models.'' \emph{Journal of Econometrics}. 86: 221-241. } \examples{ \dontrun{ line <- list(X = c(-2,-1,0,1,2), Y = c(1,3,3,3,5)) ols <- lm(Y~X) residual <- rstandard(ols) posterior <- MCMCresidualBreakAnalysis(residual, m = 1, data=line, mcmc=1000, verbose=200) plotState(posterior) summary(posterior) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}}, \code{\link[coda]{summary.mcmc}}, \code{\link[stats]{lm}}} MCMCpack/man/MCMCregressChange.Rd0000644000176000001440000002146312133644110016143 0ustar ripleyusers\name{MCMCregressChange} \alias{MCMCregressChange} \title{Markov Chain Monte Carlo for a linear Gaussian Multiple Changepoint Model} \description{ This function generates a sample from the posterior distribution of a linear Gaussian model with multiple changepoints. The function uses the Markov chain Monte Carlo method of Chib (1998). The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{MCMCregressChange(formula, data=parent.frame(), m=1, b0=0, B0=0, c0=0.001, d0=0.001, sigma.mu=NA, sigma.var=NA, a=NULL, b=NULL, mcmc=1000, burnin=1000, thin=1, verbose=0, seed=NA, beta.start=NA, P.start=NA, marginal.likelihood=c("none", "Chib95"), ...)} \arguments{ \item{formula}{Model formula.} \item{data}{Data frame.} \item{m}{The number of changepoints.} \item{b0}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas.} \item{B0}{The prior precision of \eqn{\beta}{beta}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of beta. Default value of 0 is equivalent to an improper uniform prior for beta.} \item{c0}{\eqn{c_0/2}{c0/2} is the shape parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). The amount of information in the inverse Gamma prior is something like that from \eqn{c_0}{c0} pseudo-observations.} \item{d0}{\eqn{d_0/2}{d0/2} is the scale parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). In constructing the inverse Gamma prior, \eqn{d_0}{d0} acts like the sum of squared errors from the \eqn{c_0}{c0} pseudo-observations.} \item{sigma.mu}{The mean of the inverse Gamma prior on \eqn{\sigma^2}{sigma^2}. \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the inverse Gamma prior by choosing its mean and variance. } \item{sigma.var}{The variacne of the inverse Gamma prior on \eqn{\sigma^2}{sigma^2}. \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the inverse Gamma prior by choosing its mean and variance. } \item{a}{\eqn{a}{a} is the shape1 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{b}{\eqn{b}{b} is the shape2 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of MCMC iterations after burnin.} \item{thin}{The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number, the \eqn{\beta}{beta} vector, and the error variance are printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{beta.start}{The starting values for the \eqn{\beta}{beta} vector. This can either be a scalar or a column vector with dimension equal to the number of betas. The default value of of NA will use the MLE estimate of \eqn{\beta}{beta} as the starting value. If this is a scalar, that value will serve as the starting value mean for all of the betas.} \item{P.start}{The starting values for the transition matrix. A user should provide a square matrix with dimension equal to the number of states. By default, draws from the \code{Beta(0.9, 0.1)} are used to construct a proper transition matrix for each raw except the last raw.} \item{marginal.likelihood}{How should the marginal likelihood be calculated? Options are: \code{none} in which case the marginal likelihood will not be calculated, and \code{Chib95} in which case the method of Chib (1995) is used.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. The object contains an attribute \code{prob.state} storage matrix that contains the probability of \eqn{state_i}{state_i} for each period, the log-likelihood of the model (\code{loglike}), and the log-marginal likelihood of the model (\code{logmarglike}). } \details{ \code{MCMCregressChange} simulates from the posterior distribution of a binary model with multiple changepoints. The model takes the following form: \deqn{y_t=x_t ' \beta_i + I(s_t=i)\varepsilon_{t},\;\; i=1, \ldots, k}{y_t=x_t'beta_i + I(s_t=i)epsilon_t, i=1,...,k.} Where \eqn{k}{k} is the number of states and \eqn{I(s_t=i)}{I(s_t=i)} is an indicator function that becomes 1 when a state at \eqn{t}{t} is \eqn{i}{i} and otherwise 0. The errors are assumed to be Gaussian in each regime: \deqn{I(s_t=i)\varepsilon_{t} \sim \mathcal{N}(0, \sigma^2_i)}{I(s_t=i)epsilon_t ~ N(0, sigma^2_i)} We assume standard, semi-conjugate priors: \deqn{\beta_i \sim \mathcal{N}(b_0,B_0^{-1}),\;\; i=1, \ldots, k}{beta_i ~ N(b0,B0^(-1)), i=1,...,k.} And: \deqn{\sigma^{-2}_i \sim \mathcal{G}amma(c_0/2, d_0/2),\;\; i=1, \ldots, k}{sigma^(-2)_i ~ Gamma(c0/2, d0/2), i=1,...,k.} Where \eqn{\beta_i}{beta_i} and \eqn{\sigma^{-2}_i}{sigma^(-2)_i} are assumed \emph{a priori} independent. The simulation proper is done in compiled C++ code to maximize efficiency. } \references{ Jong Hee Park. 2012. "A Change-point Approach to Intervention Analysis Using Bayesian Inference" Presented at the 2012 Annual Meeting of Korean Statistical Society. Siddhartha Chib. 1995. "Marginal Likelihood from the Gibbs Output." \emph{Journal of the American Statistical Association}. 90: 1313-1321. Siddhartha Chib. 1998. "Estimation and comparison of multiple change-point models." \emph{Journal of Econometrics}. 86: 221-241. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. } \examples{ \dontrun{ set.seed(1119) n <- 100 x1 <- runif(n) true.beta1 <- c(2, -2) true.beta2 <- c(0, 2) true.Sigma <- c(1, 2) true.s <- rep(1:2, each=n/2) mu1 <- cbind(1, x1[true.s==1])%*%true.beta1 mu2 <- cbind(1, x1[true.s==2])%*%true.beta2 y <- as.ts(c(rnorm(n/2, mu1, sd=sqrt(true.Sigma[1])), rnorm(n/2, mu2, sd=sqrt(true.Sigma[2])))) formula=y ~ x1 ols1 <- lm(y[true.s==1] ~x1[true.s==1]) ols2 <- lm(y[true.s==2] ~x1[true.s==2]) ## prior b0 <- 0 B0 <- 1 sigma.mu=sd(y) sigma.var=var(y) ## models model1 <- MCMCregressChange(formula, m=1, b0=b0, B0=B0, sigma.mu=sigma.mu, sigma.var=sigma.var, marginal.likelihood="Chib95") model2 <- MCMCregressChange(formula, m=2, b0=b0, B0=B0, sigma.mu=sigma.mu, sigma.var=sigma.var, marginal.likelihood="Chib95") model3 <- MCMCregressChange(formula, m=3, b0=b0, B0=B0, sigma.mu=sigma.mu, sigma.var=sigma.var, marginal.likelihood="Chib95") model4 <- MCMCregressChange(formula, m=4, b0=b0, B0=B0, sigma.mu=sigma.mu, sigma.var=sigma.var, marginal.likelihood="Chib95") model5 <- MCMCregressChange(formula, m=5, b0=b0, B0=B0, sigma.mu=sigma.mu, sigma.var=sigma.var, marginal.likelihood="Chib95") print(BayesFactor(model1, model2, model3, model4, model5)) plotState(model1) plotChangepoint(model1) } } \keyword{models} \seealso{\code{\link{plotState}}, \code{\link{plotChangepoint}}} MCMCpack/man/MCMCregress.Rd0000644000176000001440000001571612133644110015041 0ustar ripleyusers\name{MCMCregress} \alias{MCMCregress} \title{Markov Chain Monte Carlo for Gaussian Linear Regression} \description{ This function generates a sample from the posterior distribution of a linear regression model with Gaussian errors using Gibbs sampling (with a multivariate Gaussian prior on the beta vector, and an inverse Gamma prior on the conditional error variance). The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{ MCMCregress(formula, data = NULL, burnin = 1000, mcmc = 10000, thin = 1, verbose = 0, seed = NA, beta.start = NA, b0 = 0, B0 = 0, c0 = 0.001, d0 = 0.001, sigma.mu = NA, sigma.var = NA, marginal.likelihood = c("none", "Laplace", "Chib95"), ...) } \arguments{ \item{formula}{Model formula.} \item{data}{Data frame.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of MCMC iterations after burnin.} \item{thin}{The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number, the \eqn{\beta}{beta} vector, and the error variance are printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{beta.start}{The starting values for the \eqn{\beta}{beta} vector. This can either be a scalar or a column vector with dimension equal to the number of betas. The default value of of NA will use the OLS estimate of \eqn{\beta}{beta} as the starting value. If this is a scalar, that value will serve as the starting value mean for all of the betas.} \item{b0}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas.} \item{B0}{The prior precision of \eqn{\beta}{beta}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of beta. Default value of 0 is equivalent to an improper uniform prior for beta.} \item{c0}{\eqn{c_0/2}{c0/2} is the shape parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). The amount of information in the inverse Gamma prior is something like that from \eqn{c_0}{c0} pseudo-observations.} \item{d0}{\eqn{d_0/2}{d0/2} is the scale parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). In constructing the inverse Gamma prior, \eqn{d_0}{d0} acts like the sum of squared errors from the \eqn{c_0}{c0} pseudo-observations.} \item{sigma.mu}{The mean of the inverse Gamma prior on \eqn{\sigma^2}{sigma^2}. \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the inverse Gamma prior by choosing its mean and variance. } \item{sigma.var}{The variacne of the inverse Gamma prior on \eqn{\sigma^2}{sigma^2}. \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the inverse Gamma prior by choosing its mean and variance. } \item{marginal.likelihood}{How should the marginal likelihood be calculated? Options are: \code{none} in which case the marginal likelihood will not be calculated, \code{Laplace} in which case the Laplace approximation (see Kass and Raftery, 1995) is used, and \code{Chib95} in which case the method of Chib (1995) is used.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{ \code{MCMCregress} simulates from the posterior distribution using standard Gibbs sampling (a multivariate Normal draw for the betas, and an inverse Gamma draw for the conditional error variance). The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The model takes the following form: \deqn{y_i = x_i ' \beta + \varepsilon_{i}}{y_i = x_i'beta + epsilon_i} Where the errors are assumed to be Gaussian: \deqn{\varepsilon_{i} \sim \mathcal{N}(0, \sigma^2)}{epsilon_i ~ N(0, sigma^2)} We assume standard, semi-conjugate priors: \deqn{\beta \sim \mathcal{N}(b_0,B_0^{-1})}{beta ~ N(b0,B0^(-1))} And: \deqn{\sigma^{-2} \sim \mathcal{G}amma(c_0/2, d_0/2)}{sigma^(-2) ~ Gamma(c0/2, d0/2)} Where \eqn{\beta}{beta} and \eqn{\sigma^{-2}}{sigma^(-2)} are assumed \emph{a priori} independent. Note that only starting values for \eqn{\beta}{beta} are allowed because simulation is done using Gibbs sampling with the conditional error variance as the first block in the sampler. } \references{ Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Siddhartha Chib. 1995. ``Marginal Likelihood from the Gibbs Output.'' \emph{Journal of the American Statistical Association}. 90: 1313-1321. Robert E. Kass and Adrian E. Raftery. 1995. ``Bayes Factors.'' \emph{Journal of the American Statistical Association}. 90: 773-795. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. } \examples{ \dontrun{ line <- list(X = c(-2,-1,0,1,2), Y = c(1,3,3,3,5)) posterior <- MCMCregress(Y~X, b0=0, B0 = 0.1, sigma.mu = 5, sigma.var = 25, data=line, verbose=1000) plot(posterior) raftery.diag(posterior) summary(posterior) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}}, \code{\link[coda]{summary.mcmc}}, \code{\link[stats]{lm}}} MCMCpack/man/MCMCquantreg.Rd0000644000176000001440000001511112133644110015202 0ustar ripleyusers\name{MCMCquantreg} \alias{MCMCquantreg} \title{ Bayesian quantile regression using Gibbs sampling } \description{ This function fits quantile regression models under Bayesian inference. The function samples from the posterior distribution using Gibbs sampling with data augmentation. A multivariate normal prior is assumed for \eqn{\beta}{beta}. The user supplies the prior parameters. A sample of the posterior distribution is returned as an mcmc object, which can then be analysed by functions in the coda package. } \usage{ MCMCquantreg(formula, data = NULL, tau=0.5, burnin = 1000, mcmc = 10000, thin = 1, verbose = 0, seed = sample(1:1000000,1), beta.start = NA, b0 = 0, B0 = 0, ...) } \arguments{ \item{formula}{ Model formula. } \item{data}{ Data frame. } \item{tau}{The quantile of interest. Must be between 0 and 1. The default value of 0.5 corresponds to median regression.} \item{burnin}{ The number of burn-in iterations for the sampler. } \item{mcmc}{ The number of MCMC iterations after burnin. } \item{thin}{ The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value. } \item{verbose}{ A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number and the most recently sampled values of \eqn{\beta}{beta} and \eqn{\sigma}{sigma} are printed to the screen every \code{verbose}th iteration. } \item{seed}{ The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The default value for this argument is a random integer between 1 and 1,000,000. This default value ensures that if the function is used again with a different value of \eqn{\tau}{tau}, it is extremely unlikely that the seed will be identical. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details. } \item{beta.start}{ The starting values for \eqn{\beta}{beta}. This can either be a scalar or a column vector with dimension equal to the dimension of \eqn{\beta}{beta}. The default value of NA will use the OLS estimate \eqn{\hat{\beta}}{beta^hat} with \eqn{\hat{\sigma}\Phi^{-1}(\tau)}{sigma^hat*Phi^(-1)(tau)} added on to the first element of \eqn{\hat{\beta}}{beta^hat} as the starting value. (\eqn{\hat{\sigma}^2}{(sigma^hat)^2} denotes the usual unbiased estimator of \eqn{\sigma^2}{sigma^2} under ordinary mean regression and \eqn{\Phi^{-1}(\tau)}{Phi^(-1)(tau)} denotes the inverse of the cumulative density function of the standard normal distribution.) Note that the default value assume that an intercept is included in the model. If a scalar is given, that value will serve as the starting value for all \eqn{\beta}{beta}. } \item{b0}{ The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a column vector with dimension equal to the dimension of \eqn{\beta}{beta}. If this takes a scalar value, then that value will serve as the prior mean for all \eqn{\beta}{beta}. } \item{B0}{ The prior precision of \eqn{\beta}{beta}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of \eqn{\beta}{beta}. Default value of 0 is equivalent to an improper uniform prior for \eqn{\beta}{beta}. } \item{\dots}{ further arguments to be passed } } \value{ An mcmc object that contains the posterior sample. This object can be summarised by functions provided by the coda package. } \details{ \code{MCMCquantreg} simulates from the posterior distribution using Gibbs sampling with data augmentation (see \url{http://people.brunel.ac.uk/~mastkky/}). \eqn{\beta}{beta} are drawn from a multivariate normal distribution. The augmented data are drawn conditionally from the inverse Gaussian distribution. The simulation is carried out in compiled C++ code to maximise efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyse the posterior sample. We assume the model \deqn{Q_{\tau}(y_i|x_i) = x_i'\beta}{Q_tau(y_i|x_i) = x_i'beta}, where \eqn{Q_{\tau}(y_i|x_i)}{Q_tau(y_i|x_i)} denotes the conditional \eqn{\tau}{tau}th quantile of \eqn{y_i}{y_i} given \eqn{x_i}{x_i}, and \eqn{\beta=\beta(\tau)}{beta=beta(tau)} are the regression parameters possibly dependent on \eqn{\tau}{tau}. The likelihood is formed based on assuming independent Asymmetric Laplace distributions on the \eqn{y_i}{y_i} with skewness parameter \eqn{\tau}{tau} and location parameters \eqn{x_i'\beta}{x_i'beta}. This assumption ensures that the likelihood function is maximised by the \eqn{\tau}{tau}th conditional quantile of the response variable. We assume standard, semi-conjugate priors on \eqn{\beta}{beta}: \deqn{\beta \sim \mathcal{N}(b_0,B_0^{-1})}{beta ~ N(b0,B0^(-1))}. Only starting values for \eqn{\beta}{beta} are allowed for this sampler. } \author{ Craig Reed} \references{ Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.2.} \url{http://scythe.wustl.edu}. Craig Reed and Keming Yu. 2009. ``An Efficient Gibbs Sampler for Bayesian Quantile Regression.'' Technical Report. Keming Yu and Jin Zhang. 2005. ``A Three Parameter Asymmetric Laplace Distribution and it's extensions.'' \emph{Communications in Statistics - Theory and Methods}, 34, 1867-1879. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}.} \examples{ \dontrun{ x<-rep(1:10,5) y<-rnorm(50,mean=x) posterior_50 <- MCMCquantreg(y~x) posterior_95 <- MCMCquantreg(y~x, tau=0.95, verbose=10000, mcmc=50000, thin=10, seed=2) plot(posterior_50) plot(posterior_95) raftery.diag(posterior_50) autocorr.plot(posterior_95) summary(posterior_50) summary(posterior_95) } } \keyword{models} \seealso{ \code{\link[MCMCpack]{MCMCregress}}, \code{\link[coda]{plot.mcmc}}, \code{\link[coda]{summary.mcmc}}, \code{\link[stats]{lm}}, \code{\link[quantreg]{rq}}} MCMCpack/man/MCMCprobitChange.Rd0000644000176000001440000001767312133644110016000 0ustar ripleyusers\name{MCMCprobitChange} \alias{MCMCprobitChange} \title{Markov Chain Monte Carlo for a linear Gaussian Multiple Changepoint Model} \description{ This function generates a sample from the posterior distribution of a linear Gaussian model with multiple changepoints. The function uses the Markov chain Monte Carlo method of Chib (1998). The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{MCMCprobitChange(formula, data=parent.frame(), m = 1, burnin = 10000, mcmc = 10000, thin = 1, verbose = 0, seed = NA, beta.start = NA, P.start = NA, b0 = NULL, B0 = NULL, a = NULL, b = NULL, marginal.likelihood = c("none", "Chib95"), ...)} \arguments{ \item{formula}{Model formula.} \item{data}{Data frame.} \item{m}{The number of changepoints.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of MCMC iterations after burnin.} \item{thin}{The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number, the \eqn{\beta}{beta} vector, and the error variance are printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{beta.start}{The starting values for the \eqn{\beta}{beta} vector. This can either be a scalar or a column vector with dimension equal to the number of betas. The default value of of NA will use the MLE estimate of \eqn{\beta}{beta} as the starting value. If this is a scalar, that value will serve as the starting value mean for all of the betas.} \item{P.start}{The starting values for the transition matrix. A user should provide a square matrix with dimension equal to the number of states. By default, draws from the \code{Beta(0.9, 0.1)} are used to construct a proper transition matrix for each raw except the last raw.} \item{b0}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas.} \item{B0}{The prior precision of \eqn{\beta}{beta}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of beta. Default value of 0 is equivalent to an improper uniform prior for beta.} \item{a}{\eqn{a}{a} is the shape1 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{b}{\eqn{b}{b} is the shape2 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{marginal.likelihood}{How should the marginal likelihood be calculated? Options are: \code{none} in which case the marginal likelihood will not be calculated, and \code{Chib95} in which case the method of Chib (1995) is used.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. The object contains an attribute \code{prob.state} storage matrix that contains the probability of \eqn{state_i}{state_i} for each period, the log-likelihood of the model (\code{loglike}), and the log-marginal likelihood of the model (\code{logmarglike}). } \details{ \code{MCMCprobitChange} simulates from the posterior distribution of a probit regression model with multiple parameter breaks. The simulation is based on Chib (1998) and Park (2011). The model takes the following form: \deqn{\Pr(y_t = 1) = \Phi(x_i'\beta_m) \;\; m = 1, \ldots, M}{ Pr(y_t = 1) = Phi(x_i'beta_m)} Where \eqn{M}{M} is the number of states, and \eqn{\beta_m}{beta_m} is a parameter when a state is \eqn{m}{m} at \eqn{t}{t}. We assume Gaussian distribution for prior of \eqn{\beta}{beta}: \deqn{\beta_m \sim \mathcal{N}(b_0,B_0^{-1}),\;\; m = 1, \ldots, M}{beta_m ~ N(b0,B0^(-1)), m = 1,...,M.} And: \deqn{p_{mm} \sim \mathcal{B}eta(a, b),\;\; m = 1, \ldots, M}{p_mm ~ Beta(a, b), m = 1,...,M.} Where \eqn{M}{M} is the number of states. } \author{Jong Hee Park, \email{jhp@uchicago.edu}, \url{http://home.uchicago.edu/~jhp/}.} \references{ Jong Hee Park. 2011. ``Changepoint Analysis of Binary and Ordinal Probit Models: An Application to Bank Rate Policy Under the Interwar Gold Standard." \emph{Political Analysis}. 19: 188-204. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Siddhartha Chib. 1998. ``Estimation and comparison of multiple change-point models.'' \emph{Journal of Econometrics}. 86: 221-241. Albert, J. H. and S. Chib. 1993. ``Bayesian Analysis of Binary and Polychotomous Response Data.'' \emph{J. Amer. Statist. Assoc.} 88, 669-679 } \examples{ \dontrun{ set.seed(1973) x1 <- rnorm(300, 0, 1) true.beta <- c(-.5, .2, 1) true.alpha <- c(.1, -1., .2) X <- cbind(1, x1) ## set two true breaks at 100 and 200 true.phi1 <- pnorm(true.alpha[1] + x1[1:100]*true.beta[1]) true.phi2 <- pnorm(true.alpha[2] + x1[101:200]*true.beta[2]) true.phi3 <- pnorm(true.alpha[3] + x1[201:300]*true.beta[3]) ## generate y y1 <- rbinom(100, 1, true.phi1) y2 <- rbinom(100, 1, true.phi2) y3 <- rbinom(100, 1, true.phi3) Y <- as.ts(c(y1, y2, y3)) ## fit multiple models with a varying number of breaks out0 <- MCMCprobitChange(formula=Y~X-1, data=parent.frame(), m=0, mcmc=1000, burnin=1000, thin=1, verbose=1000, b0 = 0, B0 = 10, a = 1, b = 1, marginal.likelihood = c("Chib95")) out1 <- MCMCprobitChange(formula=Y~X-1, data=parent.frame(), m=1, mcmc=1000, burnin=1000, thin=1, verbose=1000, b0 = 0, B0 = 10, a = 1, b = 1, marginal.likelihood = c("Chib95")) out2 <- MCMCprobitChange(formula=Y~X-1, data=parent.frame(), m=2, mcmc=1000, burnin=1000, thin=1, verbose=1000, b0 = 0, B0 = 10, a = 1, b = 1, marginal.likelihood = c("Chib95")) out3 <- MCMCprobitChange(formula=Y~X-1, data=parent.frame(), m=3, mcmc=1000, burnin=1000, thin=1, verbose=1000, b0 = 0, B0 = 10, a = 1, b = 1, marginal.likelihood = c("Chib95")) ## find the most reasonable one BayesFactor(out0, out1, out2, out3) ## draw plots using the "right" model plotState(out2) plotChangepoint(out2) } } \keyword{models} \seealso{\code{\link{plotState}}, \code{\link{plotChangepoint}}} MCMCpack/man/MCMCprobit.Rd0000644000176000001440000001412112133644110014653 0ustar ripleyusers\name{MCMCprobit} \alias{MCMCprobit} \title{Markov Chain Monte Carlo for Probit Regression} \description{ This function generates a sample from the posterior distribution of a probit regression model using the data augmentation approach of Albert and Chib (1993). The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{ MCMCprobit(formula, data = NULL, burnin = 1000, mcmc = 10000, thin = 1, verbose = 0, seed = NA, beta.start = NA, b0 = 0, B0 = 0, bayes.resid = FALSE, marginal.likelihood=c("none", "Laplace", "Chib95"), ...) } \arguments{ \item{formula}{Model formula.} \item{data}{Data frame.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of Gibbs iterations for the sampler.} \item{thin}{The thinning interval used in the simulation. The number of Gibbs iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number and the betas are printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{beta.start}{The starting value for the \eqn{\beta}{beta} vector. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the starting value for all of the betas. The default value of NA will use the maximum likelihood estimate of \eqn{\beta}{beta} as the starting value.} \item{b0}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas.} \item{B0}{The prior precision of \eqn{\beta}{beta}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of \eqn{\beta}{beta}. Default value of 0 is equivalent to an improper uniform prior on \eqn{\beta}{beta}.} \item{bayes.resid}{Should latent Bayesian residuals (Albert and Chib, 1995) be returned? Default is FALSE meaning no residuals should be returned. Alternatively, the user can specify an array of integers giving the observation numbers for which latent residuals should be calculated and returned. TRUE will return draws of latent residuals for all observations.} \item{marginal.likelihood}{How should the marginal likelihood be calculated? Options are: \code{none} in which case the marginal likelihood will not be calculated, \code{Laplace} in which case the Laplace approximation (see Kass and Raftery, 1995) is used, or \code{Chib95} in which case Chib (1995) method is used.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{ \code{MCMCprobit} simulates from the posterior distribution of a probit regression model using data augmentation. The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The model takes the following form: \deqn{y_i \sim \mathcal{B}ernoulli(\pi_i)}{y_i ~ Bernoulli(pi_i)} Where the inverse link function: \deqn{\pi_i = \Phi(x_i'\beta)}{pi_i = Phi(x_i'beta)} We assume a multivariate Normal prior on \eqn{\beta}{beta}: \deqn{\beta \sim \mathcal{N}(b_0,B_0^{-1})}{beta ~ N(b0,B0^(-1))} See Albert and Chib (1993) for estimation details. } \references{ Albert, J. H. and S. Chib. 1993. ``Bayesian Analysis of Binary and Polychotomous Response Data.'' \emph{J. Amer. Statist. Assoc.} 88, 669-679 Albert, J. H. and S. Chib. 1995. ``Bayesian Residual Analysis for Binary Response Regression Models.'' \emph{Biometrika.} 82, 747-759. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Siddhartha Chib. 1995. ``Marginal Likelihood from the Gibbs Output.'' \emph{Journal of the American Statistical Association}. 90: 1313-1321. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. } \examples{ \dontrun{ data(birthwt) out1 <- MCMCprobit(low~as.factor(race)+smoke, data=birthwt, b0 = 0, B0 = 10, marginal.likelihood="Chib95") out2 <- MCMCprobit(low~age+as.factor(race), data=birthwt, b0 = 0, B0 = 10, marginal.likelihood="Chib95") out3 <- MCMCprobit(low~age+as.factor(race)+smoke, data=birthwt, b0 = 0, B0 = 10, marginal.likelihood="Chib95") BayesFactor(out1, out2, out3) plot(out3) summary(out3) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}},\code{\link[coda]{summary.mcmc}}, \code{\link[stats]{glm}}} MCMCpack/man/MCMCpoissonChange.Rd0000644000176000001440000002201112140060721016147 0ustar ripleyusers\name{MCMCpoissonChange} \alias{MCMCpoissonChange} \title{Markov Chain Monte Carlo for a Poisson Regression Changepoint Model} \description{ This function generates a sample from the posterior distribution of a Poisson regression model with multiple changepoints. The function uses the Markov chain Monte Carlo method of Chib (1998). The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{MCMCpoissonChange( formula, data = parent.frame(), m = 1, b0 = 0, B0 = 1, a = NULL, b = NULL, c0 = NA, d0 = NA, lambda.mu = NA, lambda.var = NA, burnin = 1000, mcmc = 1000, thin = 1, verbose = 0, seed = NA, beta.start = NA, P.start = NA, marginal.likelihood = c("none", "Chib95"), ...)} \arguments{ \item{formula}{Model formula.} \item{data}{Data frame.} \item{m}{The number of changepoints.} \item{b0}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas.} \item{B0}{The prior precision of \eqn{\beta}{beta}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of beta. Default value of 0 is equivalent to an improper uniform prior for beta.} \item{a}{\eqn{a}{a} is the shape1 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{b}{\eqn{b}{b} is the shape2 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{c0}{\eqn{c_0}{c0} is the shape parameter for Gamma prior on \eqn{\lambda}{lambda} (the mean). When there is no covariate, this should be provided by users. No default value is provided.} \item{d0}{\eqn{d_0}{d0} is the scale parameter for Gamma prior on \eqn{\lambda}{lambda} (the mean). When there is no covariate, this should be provided by users. No default value is provided.} \item{lambda.mu}{The mean of the Gamma prior on \eqn{\lambda}{lambda}. \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the Gamma prior by choosing its mean and variance. } \item{lambda.var}{The variacne of the Gamma prior on \eqn{\lambda}{lambda}. \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the Gamma prior by choosing its mean and variance. } \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of MCMC iterations after burn-in.} \item{thin}{The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0, the iteration number and the posterior density samples are printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, current R system seed is used.} \item{beta.start}{The starting values for the beta vector. This can either be a scalar or a column vector with dimension equal to the number of betas. The default value of NA will use draws from the Uniform distribution with the same boundary with the data as the starting value. If this is a scalar, that value will serve as the starting value mean for all of the betas. When there is no covariate, the log value of means should be used.} \item{P.start}{The starting values for the transition matrix. A user should provide a square matrix with dimension equal to the number of states. By default, draws from the \code{Beta(0.9, 0.1)} are used to construct a proper transition matrix for each raw except the last raw.} \item{marginal.likelihood}{How should the marginal likelihood be calculated? Options are: \code{none} in which case the marginal likelihood will not be calculated, and \code{Chib95} in which case the method of Chib (1995) is used.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. The object contains an attribute \code{prob.state} storage matrix that contains the probability of \eqn{state_i}{state_i} for each period, and the log-marginal likelihood of the model (\code{logmarglike}). } \details{ \code{MCMCpoissonChange} simulates from the posterior distribution of a Poisson regression model with multiple changepoints using the methods of Chib (1998) and Fruhwirth-Schnatter and Wagner (2006). The details of the model are discussed in Park (2010). The model takes the following form: \deqn{y_t \sim \mathcal{P}oisson(\mu_t)}{y_t ~ Poisson(mu_t)} \deqn{\mu_t = x_t ' \beta_m,\;\; m = 1, \ldots, M}{mu_t = x_t'beta_m, m = 1,...,M.} Where \eqn{M}{M} is the number of states and \eqn{\beta_m}{beta_m} is paramters when a state is \eqn{m}{m} at \eqn{t}{t}. We assume Gaussian distribution for prior of \eqn{\beta}{beta}: \deqn{\beta_m \sim \mathcal{N}(b_0,B_0^{-1}),\;\; m = 1, \ldots, M}{beta_m ~ N(b0,B0^(-1)), m = 1,...,M.} And: \deqn{p_{mm} \sim \mathcal{B}eta(a, b),\;\; m = 1, \ldots, M}{p_mm ~ Beta(a, b), m = 1,...,M.} Where \eqn{M}{M} is the number of states. } \author{Jong Hee Park, \email{jhp@uchicago.edu}, \url{http://home.uchicago.edu/~jhp/}.} \references{ Jong Hee Park. 2010. ``Structural Change in the U.S. Presidents' Use of Force Abroad.'' \emph{American Journal of Political Science} 54: 766-782. Sylvia Fruhwirth-Schnatter and Helga Wagner 2006. ``Auxiliary Mixture Sampling for Parameter-driven Models of Time Series of Counts with Applications to State Space Modelling.'' \emph{Biometrika}. 93:827--841. Siddhartha Chib. 1998. ``Estimation and comparison of multiple change-point models.'' \emph{Journal of Econometrics}. 86: 221-241. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Siddhartha Chib. 1995. ``Marginal Likelihood from the Gibbs Output.'' \emph{Journal of the American Statistical Association}. 90: 1313-1321. } \examples{ \dontrun{ set.seed(11119) n <- 150 x1 <- runif(n, 0, 0.5) true.beta1 <- c(1, 1) true.beta2 <- c(1, -2) true.beta3 <- c(1, 2) ## set true two breaks at (50, 100) true.s <- rep(1:3, each=n/3) mu1 <- exp(1 + x1[true.s==1]*1) mu2 <- exp(1 + x1[true.s==2]*-2) mu3 <- exp(1 + x1[true.s==3]*2) y <- as.ts(c(rpois(n/3, mu1), rpois(n/3, mu2), rpois(n/3, mu3))) formula = y ~ x1 ## fit multiple models with a varying number of breaks model0 <- MCMCpoissonChange(formula, m=0, mcmc = 1000, burnin = 1000, verbose = 500, b0 = rep(0, 2), B0 = 5*diag(2), marginal.likelihood = "Chib95") model1 <- MCMCpoissonChange(formula, m=1, mcmc = 1000, burnin = 1000, verbose = 500, b0 = rep(0, 2), B0 = 5*diag(2), marginal.likelihood = "Chib95") model2 <- MCMCpoissonChange(formula, m=2, mcmc = 1000, burnin = 1000, verbose = 500, b0 = rep(0, 2), B0 = 5*diag(2), marginal.likelihood = "Chib95") model3 <- MCMCpoissonChange(formula, m=3, mcmc = 1000, burnin = 1000, verbose = 500, b0 = rep(0, 2), B0 = 5*diag(2), marginal.likelihood = "Chib95") model4 <- MCMCpoissonChange(formula, m=4, mcmc = 1000, burnin = 1000, verbose = 500, b0 = rep(0, 2), B0 = 5*diag(2), marginal.likelihood = "Chib95") model5 <- MCMCpoissonChange(formula, m=5, mcmc = 1000, burnin = 1000, verbose = 500, b0 = rep(0, 2), B0 = 5*diag(2), marginal.likelihood = "Chib95") ## find the most reasonable one print(BayesFactor(model0, model1, model2, model3, model4, model5)) ## draw plots using the "right" model par(mfrow=c(attr(model2, "m") + 1, 1), mai=c(0.4, 0.6, 0.3, 0.05)) plotState(model2, legend.control = c(1, 0.6)) plotChangepoint(model2, verbose = TRUE, ylab="Density", start=1, overlay=TRUE) ## No covariate case model2.1 <- MCMCpoissonChange(y ~ 1, m = 2, c0 = 2, d0 = 1, mcmc = 1000, burnin = 1000, verbose = 500, marginal.likelihood = "Chib95") print(BayesFactor(model2, model2.1)) } } \keyword{models} \seealso{\code{\link{MCMCbinaryChange}}, \code{\link{plotState}}, \code{\link{plotChangepoint}}} MCMCpack/man/MCMCpoisson.Rd0000644000176000001440000001327612133644110015060 0ustar ripleyusers\name{MCMCpoisson} \alias{MCMCpoisson} \title{Markov Chain Monte Carlo for Poisson Regression} \description{ This function generates a sample from the posterior distribution of a Poisson regression model using a random walk Metropolis algorithm. The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{ MCMCpoisson(formula, data = NULL, burnin = 1000, mcmc = 10000, thin = 1, tune = 1.1, verbose = 0, seed = NA, beta.start = NA, b0 = 0, B0 = 0, marginal.likelihood = c("none", "Laplace"), ...) } \arguments{ \item{formula}{Model formula.} \item{data}{Data frame.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of Metropolis iterations for the sampler.} \item{thin}{The thinning interval used in the simulation. The number of mcmc iterations must be divisible by this value.} \item{tune}{Metropolis tuning parameter. Can be either a positive scalar or a \eqn{k}{k}-vector, where \eqn{k}{k} is the length of \eqn{\beta}{beta}.Make sure that the acceptance rate is satisfactory (typically between 0.20 and 0.5) before using the posterior sample for inference.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number, the current beta vector, and the Metropolis acceptance rate are printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{beta.start}{The starting value for the \eqn{\beta}{beta} vector. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the starting value for all of the betas. The default value of NA will use the maximum likelihood estimate of \eqn{\beta}{beta} as the starting value.} \item{b0}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas.} \item{B0}{The prior precision of \eqn{\beta}{beta}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of \eqn{\beta}{beta}. Default value of 0 is equivalent to an improper uniform prior for beta.} \item{marginal.likelihood}{How should the marginal likelihood be calculated? Options are: \code{none} in which case the marginal likelihood will not be calculated or \code{Laplace} in which case the Laplace approximation (see Kass and Raftery, 1995) is used.} \item{\ldots}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{\code{MCMCpoisson} simulates from the posterior distribution of a Poisson regression model using a random walk Metropolis algorithm. The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The model takes the following form: \deqn{y_i \sim \mathcal{P}oisson(\mu_i)}{y_i ~ Poisson(mu_i)} Where the inverse link function: \deqn{\mu_i = \exp(x_i'\beta)}{mu_i = exp(x_i'beta)} We assume a multivariate Normal prior on \eqn{\beta}{beta}: \deqn{\beta \sim \mathcal{N}(b_0,B_0^{-1})}{beta ~ N(b0,B0^(-1))} The Metropois proposal distribution is centered at the current value of \eqn{\theta}{theta} and has variance-covariance \eqn{V = T (B_0 + C^{-1})^{-1} T }{V = T (B0 + C^{-1})^{-1} T}, where \eqn{T}{T} is a the diagonal positive definite matrix formed from the \code{tune}, \eqn{B_0}{B0} is the prior precision, and \eqn{C}{C} is the large sample variance-covariance matrix of the MLEs. This last calculation is done via an initial call to \code{glm}. } \references{ Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. } \examples{ \dontrun{ counts <- c(18,17,15,20,10,20,25,13,12) outcome <- gl(3,1,9) treatment <- gl(3,3) posterior <- MCMCpoisson(counts ~ outcome + treatment) plot(posterior) summary(posterior) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}},\code{\link[coda]{summary.mcmc}}, \code{\link[stats]{glm}}} MCMCpack/man/MCMCordfactanal.Rd0000644000176000001440000002374312133644110015644 0ustar ripleyusers\name{MCMCordfactanal} \alias{MCMCordfactanal} \title{Markov Chain Monte Carlo for Ordinal Data Factor Analysis Model} \description{ This function generates a sample from the posterior distribution of an ordinal data factor analysis model. Normal priors are assumed on the factor loadings and factor scores while improper uniform priors are assumed on the cutpoints. The user supplies data and parameters for the prior distributions, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{ MCMCordfactanal(x, factors, lambda.constraints=list(), data=parent.frame(), burnin = 1000, mcmc = 20000, thin=1, tune=NA, verbose = 0, seed = NA, lambda.start = NA, l0=0, L0=0, store.lambda=TRUE, store.scores=FALSE, drop.constantvars=TRUE, ... ) } \arguments{ \item{x}{Either a formula or a numeric matrix containing the manifest variables.} \item{factors}{The number of factors to be fitted.} \item{lambda.constraints}{List of lists specifying possible equality or simple inequality constraints on the factor loadings. A typical entry in the list has one of three forms: \code{varname=list(d,c)} which will constrain the dth loading for the variable named varname to be equal to c, \code{varname=list(d,"+")} which will constrain the dth loading for the variable named varname to be positive, and \code{varname=list(d, "-")} which will constrain the dth loading for the variable named varname to be negative. If x is a matrix without column names defaults names of ``V1", ``V2", ... , etc will be used. Note that, unlike \code{MCMCfactanal}, the \eqn{\Lambda}{Lambda} matrix used here has \code{factors}+1 columns. The first column of \eqn{\Lambda}{Lambda} corresponds to negative item difficulty parameters and should generally not be constrained. } \item{data}{A data frame.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of iterations for the sampler.} \item{thin}{The thinning interval used in the simulation. The number of iterations must be divisible by this value.} \item{tune}{The tuning parameter for the Metropolis-Hastings sampling. Can be either a scalar or a \eqn{k}{k}-vector. Must be strictly positive.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number and the Metropolis-Hastings acceptance rate are printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{lambda.start}{Starting values for the factor loading matrix Lambda. If \code{lambda.start} is set to a scalar the starting value for all unconstrained loadings will be set to that scalar. If \code{lambda.start} is a matrix of the same dimensions as Lambda then the \code{lambda.start} matrix is used as the starting values (except for equality-constrained elements). If \code{lambda.start} is set to \code{NA} (the default) then starting values for unconstrained elements in the first column of Lambda are based on the observed response pattern, the remaining unconstrained elements of Lambda are set to , and starting values for inequality constrained elements are set to either 1.0 or -1.0 depending on the nature of the constraints.} \item{l0}{The means of the independent Normal prior on the factor loadings. Can be either a scalar or a matrix with the same dimensions as \code{Lambda}.} \item{L0}{The precisions (inverse variances) of the independent Normal prior on the factor loadings. Can be either a scalar or a matrix with the same dimensions as \code{Lambda}.} \item{store.lambda}{A switch that determines whether or not to store the factor loadings for posterior analysis. By default, the factor loadings are all stored.} \item{store.scores}{A switch that determines whether or not to store the factor scores for posterior analysis. \emph{NOTE: This takes an enormous amount of memory, so should only be used if the chain is thinned heavily, or for applications with a small number of observations}. By default, the factor scores are not stored.} \item{drop.constantvars}{A switch that determines whether or not manifest variables that have no variation should be deleted before fitting the model. Default = TRUE.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{The model takes the following form: Let \eqn{i=1,\ldots,N}{1=1,...,n} index observations and \eqn{j=1,\ldots,K}{j=1,...,K} index response variables within an observation. The typical observed variable \eqn{x_{ij}}{x_ij} is ordinal with a total of \eqn{C_j}{C_j} categories. The distribution of \eqn{X}{X} is governed by a \eqn{N \times K}{N by K} matrix of latent variables \eqn{X^*}{Xstar} and a series of cutpoints \eqn{\gamma}{gamma}. \eqn{X^*}{Xstar} is assumed to be generated according to: \deqn{x^*_i = \Lambda \phi_i + \epsilon_i}{xstar_i = Lambda phi_i + epsilon_i} \deqn{\epsilon_i \sim \mathcal{N}(0,I)}{epsilon_i ~ N(0, I)} where \eqn{x^*_i}{xstar_i} is the \eqn{k}{k}-vector of latent variables specific to observation \eqn{i}{i}, \eqn{\Lambda}{Lambda} is the \eqn{k \times d}{k by d} matrix of factor loadings, and \eqn{\phi_i}{phi_i} is the \eqn{d}{d}-vector of latent factor scores. It is assumed that the first element of \eqn{\phi_i}{phi_i} is equal to 1 for all \eqn{i}{i}. The probability that the \eqn{j}{j}th variable in observation \eqn{i}{i} takes the value \eqn{c}{c} is: \deqn{ \pi_{ijc} = \Phi(\gamma_{jc} - \Lambda'_j\phi_i) - \Phi(\gamma_{j(c-1)} - \Lambda'_j\phi_i) }{ pi_ijc = pnorm(gamma_jc - Lambda'_j phi_i) - pnorm(gamma_j(c-1) - Lambda'_j phi_i) } The implementation used here assumes independent conjugate priors for each element of \eqn{\Lambda}{Lambda} and each \eqn{\phi_i}{phi_i}. More specifically we assume: \deqn{\Lambda_{ij} \sim \mathcal{N}(l_{0_{ij}}, L_{0_{ij}}^{-1}), i=1,\ldots,k, j=1,\ldots,d}{Lambda_ij ~ N(l0_ij, L0_ij^-1), i=1,...,k, j=1,...,d} \deqn{\phi_{i(2:d)} \sim \mathcal{N}(0, I), i=1,\dots,n}{phi_i(2:d) ~ N(0, I), i=1,...,n} The standard two-parameter item response theory model with probit link is a special case of the model sketched above. \code{MCMCordfactanal} simulates from the posterior distribution using a Metropolis-Hastings within Gibbs sampling algorithm. The algorithm employed is based on work by Cowles (1996). Note that the first element of \eqn{\phi_i}{phi_i} is a 1. As a result, the first column of \eqn{\Lambda}{Lambda} can be interpretated as item difficulty parameters. Further, the first element \eqn{\gamma_1}{gamma_1} is normalized to zero, and thus not returned in the mcmc object. The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. As is the case with all measurement models, make sure that you have plenty of free memory, especially when storing the scores. } \references{ Shawn Treier and Simon Jackman. 2008. ``Democracy as a Latent Variable." \emph{American Journal of Political Science}. 52: 201-217. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. M. K. Cowles. 1996. ``Accelerating Monte Carlo Markov Chain Convergence for Cumulative-link Generalized Linear Models." \emph{Statistics and Computing.} 6: 101-110. Valen E. Johnson and James H. Albert. 1999. ``Ordinal Data Modeling." Springer: New York. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. } \examples{ \dontrun{ data(painters) new.painters <- painters[,1:4] cuts <- apply(new.painters, 2, quantile, c(.25, .50, .75)) for (i in 1:4){ new.painters[new.painters[,i]= 1 & z < 2] <- 2; y[z >= 2] <- 3; ## inputs formula <- y ~ x1 ## fit multiple models with a varying number of breaks out1 <- MCMCoprobitChange(formula, m=1, mcmc=1000, burnin=1000, thin=1, tune=c(.5, .5), verbose=1000, b0=0, B0=10, marginal.likelihood = "Chib95") out2 <- MCMCoprobitChange(formula, m=2, mcmc=1000, burnin=1000, thin=1, tune=c(.5, .5, .5), verbose=1000, b0=0, B0=10, marginal.likelihood = "Chib95") out3 <- MCMCoprobitChange(formula, m=3, mcmc=1000, burnin=1000, thin=1, tune=c(.5, .5, .5, .5), verbose=1000, b0=0, B0=10, marginal.likelihood = "Chib95") ## find the most reasonable one BayesFactor(out1, out2, out3) ## draw plots using the "right" model plotState(out1) plotChangepoint(out1) } \keyword{models} \seealso{\code{\link{plotState}}, \code{\link{plotChangepoint}}} MCMCpack/man/MCMCoprobit.Rd0000644000176000001440000001665012133644110015043 0ustar ripleyusers\name{MCMCoprobit} \alias{MCMCoprobit} \title{Markov Chain Monte Carlo for Ordered Probit Regression} \description{ This function generates a sample from the posterior distribution of an ordered probit regression model using the data augmentation approach of Cowles (1996). The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{ MCMCoprobit(formula, data = parent.frame(), burnin = 1000, mcmc = 10000, thin=1, tune = NA, tdf = 1, verbose = 0, seed = NA, beta.start = NA, b0 = 0, B0 = 0, a0 = 0, A0 = 0, mcmc.method = c("Cowles", "AC"), ...) } \arguments{ \item{formula}{Model formula.} \item{data}{Data frame.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of MCMC iterations for the sampler.} \item{thin}{The thinning interval used in the simulation. The number of Gibbs iterations must be divisible by this value.} \item{tune}{The tuning parameter for the Metropolis-Hastings step. Default of NA corresponds to a choice of 0.05 divided by the number of categories in the response variable.} \item{tdf}{Degrees of freedom for the multivariate-t proposal distribution when \code{mcmc.method} is set to "IndMH". Must be positive. } \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number, the beta vector, and the Metropolis-Hastings acceptance rate are printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{beta.start}{The starting value for the \eqn{\beta}{beta} vector. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the starting value for all of the betas. The default value of NA will use rescaled estimates from an ordered logit model.} \item{b0}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas.} \item{B0}{The prior precision of \eqn{\beta}{beta}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of \eqn{\beta}{beta}. Default value of 0 is equivalent to an improper uniform prior on \eqn{\beta}{beta}. } \item{a0}{The prior mean of \eqn{\gamma}{gamma}. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas.} \item{A0}{The prior precision of \eqn{\gamma}{gamma}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of \eqn{\gamma}{gamma}. Default value of 0 is equivalent to an improper uniform prior on \eqn{\gamma}{gamma}. } \item{mcmc.method}{Can be set to either "Cowles" (default) or "AC" to perform posterior sampling of cutpoints based on Cowles (1996) or Albert and Chib (2001) respectively.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{ \code{MCMCoprobit} simulates from the posterior distribution of a ordered probit regression model using data augmentation. The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The observed variable \eqn{y_i}{y_i} is ordinal with a total of \eqn{C}{C} categories, with distribution governed by a latent variable: \deqn{z_i = x_i'\beta + \varepsilon_i}{z_i = x_i'beta + epsilon_i} The errors are assumed to be from a standard Normal distribution. The probabilities of observing each outcome is governed by this latent variable and \eqn{C-1}{C-1} estimable cutpoints, which are denoted \eqn{\gamma_c}{gamma_c}. The probability that individual \eqn{i}{i} is in category \eqn{c}{c} is computed by: \deqn{ \pi_{ic} = \Phi(\gamma_c - x_i'\beta) - \Phi(\gamma_{c-1} - x_i'\beta) }{ pi_ic = Phi(gamma_c - x_i'beta) - Phi(gamma_(c-1) - x_i'beta) } These probabilities are used to form the multinomial distribution that defines the likelihoods. \code{MCMCoprobit} provides two ways to sample the cutpoints. Cowles (1996) proposes a sampling scheme that groups sampling of a latent variable with cutpoints. In this case, for identification the first element \eqn{\gamma_1}{gamma_1} is normalized to zero. Albert and Chib (2001) show that we can sample cutpoints indirectly without constraints by transforming cutpoints into real-valued parameters (\eqn{\alpha}{alpha}). } \references{ Albert, J. H. and S. Chib. 1993. ``Bayesian Analysis of Binary and Polychotomous Response Data.'' \emph{J. Amer. Statist. Assoc.} 88, 669-679 M. K. Cowles. 1996. ``Accelerating Monte Carlo Markov Chain Convergence for Cumulative-link Generalized Linear Models." \emph{Statistics and Computing.} 6: 101-110. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Valen E. Johnson and James H. Albert. 1999. \emph{Ordinal Data Modeling}. Springer: New York. Albert, James and Siddhartha Chib. 2001. ``Sequential Ordinal Modeling with Applications to Survival Data." \emph{Biometrics.} 57: 829-836. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/} } \examples{ \dontrun{ x1 <- rnorm(100); x2 <- rnorm(100); z <- 1.0 + x1*0.1 - x2*0.5 + rnorm(100); y <- z; y[z < 0] <- 0; y[z >= 0 & z < 1] <- 1; y[z >= 1 & z < 1.5] <- 2; y[z >= 1.5] <- 3; out1 <- MCMCoprobit(y ~ x1 + x2, tune=0.3) out2 <- MCMCoprobit(y ~ x1 + x2, tune=0.3, tdf=3, verbose=1000, mcmc.method="AC") summary(out1) summary(out2) plot(out1) plot(out2) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}},\code{\link[coda]{summary.mcmc}}} MCMCpack/man/MCMCmnl.Rd0000644000176000001440000002476112133644110014155 0ustar ripleyusers\name{MCMCmnl} \alias{MCMCmnl} \title{Markov Chain Monte Carlo for Multinomial Logistic Regression} \description{ This function generates a sample from the posterior distribution of a multinomial logistic regression model using either a random walk Metropolis algorithm or a slice sampler. The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{ MCMCmnl(formula, baseline=NULL, data=NULL, burnin = 1000, mcmc = 10000, thin = 1, mcmc.method = c("IndMH", "RWM", "slice"), tune = 1, tdf=6, verbose = 0, seed = NA, beta.start = NA, b0 = 0, B0 = 0, ...) } \arguments{ \item{formula}{Model formula. If the choicesets do not vary across individuals, the \code{y} variable should be a factor or numeric variable that gives the observed choice of each individual. If the choicesets do vary across individuals, \code{y} should be a \eqn{n \times p}{n x p} matrix where \eqn{n}{n} is the number of individuals and \eqn{p}{p} is the maximum number of choices in any choiceset. Here each column of \code{y} corresponds to a particular observed choice and the elements of \code{y} should be either \code{0} (not chosen but available), \code{1} (chosen), or \code{-999} (not available). Choice-specific covariates have to be entered using the syntax: \code{choicevar(cvar, "var", "choice")} where \code{cvar} is the name of a variable in \code{data}, \code{"var"} is the name of the new variable to be created, and \code{"choice"} is the level of \code{y} that \code{cvar} corresponds to. Specifying each choice-specific covariate will typically require \eqn{p}{p} calls to the \code{choicevar} function in the formula. Individual specific covariates can be entered into the formula normally. See the examples section below to see the syntax used to fit various models.} \item{baseline}{The baseline category of the response variable. \code{baseline} should be set equal to one of the observed levels of the response variable. If left equal to \code{NULL} then the baseline level is set to the alpha-numerically first element of the response variable. If the choicesets vary across individuals, the baseline choice must be in the choiceset of each individual. } \item{data}{The data frame used for the analysis. Each row of the dataframe should correspond to an individual who is making a choice. } \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of iterations to run the sampler past burn-in. } \item{thin}{The thinning interval used in the simulation. The number of mcmc iterations must be divisible by this value. } \item{mcmc.method}{Can be set to either "IndMH" (default), "RWM", or "slice" to perform independent Metropolis-Hastings sampling, random walk Metropolis sampling or slice sampling respectively.} \item{tdf}{Degrees of freedom for the multivariate-t proposal distribution when \code{mcmc.method} is set to "IndMH". Must be positive. } \item{tune}{Metropolis tuning parameter. Can be either a positive scalar or a \eqn{k}{k}-vector, where \eqn{k}{k} is the length of \eqn{\beta}{beta}. Make sure that the acceptance rate is satisfactory (typically between 0.20 and 0.5) before using the posterior sample for inference. } \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number, the current beta vector, and the Metropolis acceptance rate are printed to the screen every \code{verbose}th iteration. } \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details. } \item{beta.start}{The starting value for the \eqn{\beta}{beta} vector. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the starting value for all of the betas. The default value of NA will use the maximum likelihood estimate of \eqn{\beta}{beta} as the starting value. } \item{b0}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas. } \item{B0}{The prior precision of \eqn{\beta}{beta}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of \eqn{\beta}{beta}. Default value of 0 is equivalent to an improper uniform prior for beta.} \item{\dots}{Further arguments to be passed. } } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{ \code{MCMCmnl} simulates from the posterior distribution of a multinomial logistic regression model using either a random walk Metropolis algorithm or a univariate slice sampler. The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The model takes the following form: \deqn{y_i \sim \mathcal{M}ultinomial(\pi_i)}{y_i ~ Multinomial(pi_i)} where: \deqn{\pi_{ij} = \frac{\exp(x_{ij}'\beta)}{\sum_{k=1}^p\exp(x_{ik}'\beta)}}{pi_{ij} = exp(x_{ij}'beta) / [sum_{k=1}^p exp(x_{ik}'beta)]} We assume a multivariate Normal prior on \eqn{\beta}{beta}: \deqn{\beta \sim \mathcal{N}(b_0,B_0^{-1})}{beta ~ N(b0,B0^(-1))} The Metropollis proposal distribution is centered at the current value of \eqn{\beta}{beta} and has variance-covariance \eqn{V = T (B_0 + C^{-1})^{-1} T }{V = T (B0 + C^{-1})^{-1} T}, where \eqn{T}{T} is a the diagonal positive definite matrix formed from the \code{tune}, \eqn{B_0}{B0} is the prior precision, and \eqn{C}{C} is the large sample variance-covariance matrix of the MLEs. This last calculation is done via an initial call to \code{optim}. } \references{ Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Radford Neal. 2003. ``Slice Sampling'' (with discussion). \emph{Annals of Statistics}, 31: 705-767. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. Siddhartha Chib, Edward Greenberg, and Yuxin Chen. 1998. ``MCMC Methods for Fitting and Comparing Multinomial Response Models." } \seealso{\code{\link[coda]{plot.mcmc}},\code{\link[coda]{summary.mcmc}}, \code{\link[nnet]{multinom}}} \examples{ \dontrun{ data(Nethvote) ## just a choice-specific X var post1 <- MCMCmnl(vote ~ choicevar(distD66, "sqdist", "D66") + choicevar(distPvdA, "sqdist", "PvdA") + choicevar(distVVD, "sqdist", "VVD") + choicevar(distCDA, "sqdist", "CDA"), baseline="D66", mcmc.method="IndMH", B0=0, verbose=500, mcmc=100000, thin=10, tune=1.0, data=Nethvote) plot(post1) summary(post1) ## just individual-specific X vars post2<- MCMCmnl(vote ~ relig + class + income + educ + age + urban, baseline="D66", mcmc.method="IndMH", B0=0, verbose=500, mcmc=100000, thin=10, tune=0.5, data=Nethvote) plot(post2) summary(post2) ## both choice-specific and individual-specific X vars post3 <- MCMCmnl(vote ~ choicevar(distD66, "sqdist", "D66") + choicevar(distPvdA, "sqdist", "PvdA") + choicevar(distVVD, "sqdist", "VVD") + choicevar(distCDA, "sqdist", "CDA") + relig + class + income + educ + age + urban, baseline="D66", mcmc.method="IndMH", B0=0, verbose=500, mcmc=100000, thin=10, tune=0.5, data=Nethvote) plot(post3) summary(post3) ## numeric y variable nethvote$vote <- as.numeric(nethvote$vote) post4 <- MCMCmnl(vote ~ choicevar(distD66, "sqdist", "2") + choicevar(distPvdA, "sqdist", "3") + choicevar(distVVD, "sqdist", "4") + choicevar(distCDA, "sqdist", "1") + relig + class + income + educ + age + urban, baseline="2", mcmc.method="IndMH", B0=0, verbose=500, mcmc=100000, thin=10, tune=0.5, data=Nethvote) plot(post4) summary(post4) ## Simulated data example with nonconstant choiceset n <- 1000 y <- matrix(0, n, 4) colnames(y) <- c("a", "b", "c", "d") xa <- rnorm(n) xb <- rnorm(n) xc <- rnorm(n) xd <- rnorm(n) xchoice <- cbind(xa, xb, xc, xd) z <- rnorm(n) for (i in 1:n){ ## randomly determine choiceset (c is always in choiceset) choiceset <- c(3, sample(c(1,2,4), 2, replace=FALSE)) numer <- matrix(0, 4, 1) for (j in choiceset){ if (j == 3){ numer[j] <- exp(xchoice[i, j] ) } else { numer[j] <- exp(xchoice[i, j] - z[i] ) } } p <- numer / sum(numer) y[i,] <- rmultinom(1, 1, p) y[i,-choiceset] <- -999 } post5 <- MCMCmnl(y~choicevar(xa, "x", "a") + choicevar(xb, "x", "b") + choicevar(xc, "x", "c") + choicevar(xd, "x", "d") + z, baseline="c", verbose=500, mcmc=100000, thin=10, tune=.85) plot(post5) summary(post5) } } \keyword{models} MCMCpack/man/MCMCmixfactanal.Rd0000644000176000001440000003171012133644110015646 0ustar ripleyusers\name{MCMCmixfactanal} \alias{MCMCmixfactanal} \title{Markov Chain Monte Carlo for Mixed Data Factor Analysis Model} \description{ This function generates a sample from the posterior distribution of a mixed data (both continuous and ordinal) factor analysis model. Normal priors are assumed on the factor loadings and factor scores, improper uniform priors are assumed on the cutpoints, and inverse gamma priors are assumed for the error variances (uniquenesses). The user supplies data and parameters for the prior distributions, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{ MCMCmixfactanal(x, factors, lambda.constraints=list(), data=parent.frame(), burnin = 1000, mcmc = 20000, thin=1, tune=NA, verbose = 0, seed = NA, lambda.start = NA, psi.start=NA, l0=0, L0=0, a0=0.001, b0=0.001, store.lambda=TRUE, store.scores=FALSE, std.mean=TRUE, std.var=TRUE, ... ) } \arguments{ \item{x}{A one-sided formula containing the manifest variables. Ordinal (including dichotomous) variables must be coded as ordered factors. Each level of these ordered factors must be present in the data passed to the function. NOTE: data input is different in \code{MCMCmixfactanal} than in either \code{MCMCfactanal} or \code{MCMCordfactanal}.} \item{factors}{The number of factors to be fitted.} \item{lambda.constraints}{List of lists specifying possible equality or simple inequality constraints on the factor loadings. A typical entry in the list has one of three forms: \code{varname=list(d,c)} which will constrain the dth loading for the variable named varname to be equal to c, \code{varname=list(d,"+")} which will constrain the dth loading for the variable named varname to be positive, and \code{varname=list(d, "-")} which will constrain the dth loading for the variable named varname to be negative. If x is a matrix without column names defaults names of ``V1", ``V2", ... , etc will be used. Note that, unlike \code{MCMCfactanal}, the \eqn{\Lambda}{Lambda} matrix used here has \code{factors}+1 columns. The first column of \eqn{\Lambda}{Lambda} corresponds to negative item difficulty parameters for ordinal manifest variables and mean parameters for continuous manifest variables and should generally not be constrained directly by the user. } \item{data}{A data frame.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of iterations for the sampler.} \item{thin}{The thinning interval used in the simulation. The number of iterations must be divisible by this value.} \item{tune}{The tuning parameter for the Metropolis-Hastings sampling. Can be either a scalar or a \eqn{k}{k}-vector (where \eqn{k}{k} is the number of manifest variables). \code{tune} must be strictly positive.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is great than 0 the iteration number and the Metropolis-Hastings acceptance rate are printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{lambda.start}{Starting values for the factor loading matrix Lambda. If \code{lambda.start} is set to a scalar the starting value for all unconstrained loadings will be set to that scalar. If \code{lambda.start} is a matrix of the same dimensions as Lambda then the \code{lambda.start} matrix is used as the starting values (except for equality-constrained elements). If \code{lambda.start} is set to \code{NA} (the default) then starting values for unconstrained elements in the first column of Lambda are based on the observed response pattern, the remaining unconstrained elements of Lambda are set to 0, and starting values for inequality constrained elements are set to either 1.0 or -1.0 depending on the nature of the constraints.} \item{psi.start}{Starting values for the error variance (uniqueness) matrix. If \code{psi.start} is set to a scalar then the starting value for all diagonal elements of \code{Psi} that represent error variances for continuous variables are set to this value. If \code{psi.start} is a \eqn{k}{k}-vector (where \eqn{k}{k} is the number of manifest variables) then the staring value of \code{Psi} has \code{psi.start} on the main diagonal with the exception that entries corresponding to error variances for ordinal variables are set to 1.. If \code{psi.start} is set to \code{NA} (the default) the starting values of all the continuous variable uniquenesses are set to 0.5. Error variances for ordinal response variables are always constrained (regardless of the value of \code{psi.start} to have an error variance of 1 in order to achieve identification.} \item{l0}{The means of the independent Normal prior on the factor loadings. Can be either a scalar or a matrix with the same dimensions as \code{Lambda}.} \item{L0}{The precisions (inverse variances) of the independent Normal prior on the factor loadings. Can be either a scalar or a matrix with the same dimensions as \code{Lambda}.} \item{a0}{Controls the shape of the inverse Gamma prior on the uniqueness. The actual shape parameter is set to \code{a0/2}. Can be either a scalar or a \eqn{k}{k}-vector.} \item{b0}{Controls the scale of the inverse Gamma prior on the uniquenesses. The actual scale parameter is set to \code{b0/2}. Can be either a scalar or a \eqn{k}{k}-vector.} \item{store.lambda}{A switch that determines whether or not to store the factor loadings for posterior analysis. By default, the factor loadings are all stored.} \item{store.scores}{A switch that determines whether or not to store the factor scores for posterior analysis. \emph{NOTE: This takes an enormous amount of memory, so should only be used if the chain is thinned heavily, or for applications with a small number of observations}. By default, the factor scores are not stored.} \item{std.mean}{If \code{TRUE} (the default) the continuous manifest variables are rescaled to have zero mean.} \item{std.var}{If \code{TRUE} (the default) the continuous manifest variables are rescaled to have unit variance.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{The model takes the following form: Let \eqn{i=1,\ldots,N}{1=1,...,n} index observations and \eqn{j=1,\ldots,K}{j=1,...,K} index response variables within an observation. An observed variable \eqn{x_{ij}}{x_ij} can be either ordinal with a total of \eqn{C_j}{C_j} categories or continuous. The distribution of \eqn{X}{X} is governed by a \eqn{N \times K}{N by K} matrix of latent variables \eqn{X^*}{Xstar} and a series of cutpoints \eqn{\gamma}{gamma}. \eqn{X^*}{Xstar} is assumed to be generated according to: \deqn{x^*_i = \Lambda \phi_i + \epsilon_i}{xstar_i = Lambda phi_i + epsilon_i} \deqn{\epsilon_i \sim \mathcal{N}(0,\Psi)}{epsilon_i ~ N(0, Psi)} where \eqn{x^*_i}{xstar_i} is the \eqn{k}{k}-vector of latent variables specific to observation \eqn{i}{i}, \eqn{\Lambda}{Lambda} is the \eqn{k \times d}{k by d} matrix of factor loadings, and \eqn{\phi_i}{phi_i} is the \eqn{d}{d}-vector of latent factor scores. It is assumed that the first element of \eqn{\phi_i}{phi_i} is equal to 1 for all \eqn{i}{i}. If the \eqn{j}{j}th variable is ordinal, the probability that it takes the value \eqn{c}{c} in observation \eqn{i}{i} is: \deqn{ \pi_{ijc} = \Phi(\gamma_{jc} - \Lambda'_j\phi_i) - \Phi(\gamma_{j(c-1)} - \Lambda'_j\phi_i) }{ pi_ijc = pnorm(gamma_jc - Lambda'_j phi_i) - pnorm(gamma_j(c-1) - Lambda'_j phi_i) } If the \eqn{j}{j}th variable is continuous, it is assumed that \eqn{x^*_{ij} = x_{ij}}{xstar_{ij} = x_{ij}} for all \eqn{i}{i}. The implementation used here assumes independent conjugate priors for each element of \eqn{\Lambda}{Lambda} and each \eqn{\phi_i}{phi_i}. More specifically we assume: \deqn{\Lambda_{ij} \sim \mathcal{N}(l_{0_{ij}}, L_{0_{ij}}^{-1}), i=1,\ldots,k, j=1,\ldots,d}{Lambda_ij ~ N(l0_ij, L0_ij^-1), i=1,...,k, j=1,...,d} \deqn{\phi_{i(2:d)} \sim \mathcal{N}(0, I), i=1,\dots,n}{phi_i(2:d) ~ N(0, I), i=1,...,n} \code{MCMCmixfactanal} simulates from the posterior distribution using a Metropolis-Hastings within Gibbs sampling algorithm. The algorithm employed is based on work by Cowles (1996). Note that the first element of \eqn{\phi_i}{phi_i} is a 1. As a result, the first column of \eqn{\Lambda}{Lambda} can be interpretated as negative item difficulty parameters. Further, the first element \eqn{\gamma_1}{gamma_1} is normalized to zero, and thus not returned in the mcmc object. The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. As is the case with all measurement models, make sure that you have plenty of free memory, especially when storing the scores. } \references{ Kevin M. Quinn. 2004. ``Bayesian Factor Analysis for Mixed Ordinal and Continuous Responses.'' \emph{Political Analysis}. 12: 338-353. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. M. K. Cowles. 1996. ``Accelerating Monte Carlo Markov Chain Convergence for Cumulative-link Generalized Linear Models." \emph{Statistics and Computing.} 6: 101-110. Valen E. Johnson and James H. Albert. 1999. ``Ordinal Data Modeling." Springer: New York. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. } \examples{ \dontrun{ data(PErisk) post <- MCMCmixfactanal(~courts+barb2+prsexp2+prscorr2+gdpw2, factors=1, data=PErisk, lambda.constraints = list(courts=list(2,"-")), burnin=5000, mcmc=1000000, thin=50, verbose=500, L0=.25, store.lambda=TRUE, store.scores=TRUE, tune=1.2) plot(post) summary(post) library(MASS) data(Cars93) attach(Cars93) new.cars <- data.frame(Price, MPG.city, MPG.highway, Cylinders, EngineSize, Horsepower, RPM, Length, Wheelbase, Width, Weight, Origin) rownames(new.cars) <- paste(Manufacturer, Model) detach(Cars93) # drop obs 57 (Mazda RX 7) b/c it has a rotary engine new.cars <- new.cars[-57,] # drop 3 cylinder cars new.cars <- new.cars[new.cars$Cylinders!=3,] # drop 5 cylinder cars new.cars <- new.cars[new.cars$Cylinders!=5,] new.cars$log.Price <- log(new.cars$Price) new.cars$log.MPG.city <- log(new.cars$MPG.city) new.cars$log.MPG.highway <- log(new.cars$MPG.highway) new.cars$log.EngineSize <- log(new.cars$EngineSize) new.cars$log.Horsepower <- log(new.cars$Horsepower) new.cars$Cylinders <- ordered(new.cars$Cylinders) new.cars$Origin <- ordered(new.cars$Origin) post <- MCMCmixfactanal(~log.Price+log.MPG.city+ log.MPG.highway+Cylinders+log.EngineSize+ log.Horsepower+RPM+Length+ Wheelbase+Width+Weight+Origin, data=new.cars, lambda.constraints=list(log.Horsepower=list(2,"+"), log.Horsepower=c(3,0), weight=list(3,"+")), factors=2, burnin=5000, mcmc=500000, thin=100, verbose=500, L0=.25, tune=3.0) plot(post) summary(post) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}}, \code{\link[coda]{summary.mcmc}}, \code{\link[stats]{factanal}}, \code{\link[MCMCpack]{MCMCfactanal}}, \code{\link[MCMCpack]{MCMCordfactanal}}, \code{\link[MCMCpack]{MCMCirt1d}}, \code{\link[MCMCpack]{MCMCirtKd}}} MCMCpack/man/MCMCmetrop1R.Rd0000644000176000001440000002267512133644110015102 0ustar ripleyusers\name{MCMCmetrop1R} \alias{MCMCmetrop1R} \title{Metropolis Sampling from User-Written R function} \description{ This function allows a user to construct a sample from a user-defined continuous distribution using a random walk Metropolis algorithm. } \usage{ MCMCmetrop1R(fun, theta.init, burnin = 500, mcmc = 20000, thin = 1, tune = 1, verbose = 0, seed=NA, logfun = TRUE, force.samp = FALSE, V = NULL, optim.method = "BFGS", optim.lower = -Inf, optim.upper = Inf, optim.control = list(fnscale = -1, trace = 0, REPORT = 10, maxit=500), ...) } \arguments{ \item{fun}{The unnormalized (log)density of the distribution from which to take a sample. This must be a function defined in R whose first argument is a continuous (possibly vector) variable. This first argument is the point in the state space at which the (log)density is to be evaluated. Additional arguments can be passed to \code{fun()} by inserting them in the call to \code{MCMCmetrop1R()}. See the Details section and the examples below for more information.} \item{theta.init}{Starting values for the sampling. Must be of the appropriate dimension. It must also be the case that \code{fun(theta.init, ...)} is greater than \code{-Inf} if \code{fun()} is a logdensity or greater than 0 if \code{fun()} is a density.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of MCMC iterations after burnin.} \item{thin}{The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value.} \item{tune}{The tuning parameter for the Metropolis sampling. Can be either a positive scalar or a \eqn{k}{k}-vector, where \eqn{k}{k} is the length of \eqn{\theta}{theta}.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number, the \eqn{\theta}{theta} vector, the function value, and the Metropolis acceptance rate are sent to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{logfun}{Logical indicating whether \code{fun} returns the natural log of a density function (TRUE) or a density (FALSE).} \item{force.samp}{Logical indicating whether the sampling should proceed if the Hessian calculated from the initial call to \code{optim} routine to maximize the (log)density is not negative definite. If \code{force.samp==TRUE} and the Hessian from \code{optim} is non-negative definite, the Hessian is rescaled by subtracting small values from it's main diagonal until it is negative definite. Sampling proceeds using this rescaled Hessian in place of the original Hessian from \code{optim}. By default, if \code{force.samp==FALSE} and the Hessian from \code{optim} is non-negative definite, an error message is printed and the call to \code{MCMCmetrop1R} is terminated. \emph{Please note that a non-negative Hessian at the mode is often an indication that the function of interest is not a proper density. Thus, \code{force.samp} should only be set equal to \code{TRUE} with great caution.} } \item{V}{The variance-covariance matrix for the Gaussian proposal distribution. Must be a square matrix or \code{NULL}. If a square matrix, \code{V} must have dimension equal to the length of \code{theta.init}. If \code{NULL}, \code{V} is calculated from \code{tune} and an initial call to \code{optim}. See the Details section below for more information. Unless the log-posterior is expensive to compute it will typically be best to use the default \code{V = NULL}.} \item{optim.method}{The value of the \code{method} parameter sent to \code{optim} during an initial maximization of \code{fun}. See \code{optim} for more details.} \item{optim.lower}{The value of the \code{lower} parameter sent to \code{optim} during an initial maximization of \code{fun}. See \code{optim} for more details.} \item{optim.upper}{The value of the \code{upper} parameter sent to \code{optim} during an initial maximization of \code{fun}. See \code{optim} for more details.} \item{optim.control}{The value of the \code{control} parameter sent to \code{optim} during an initial maximization of \code{fun}. See \code{optim} for more details.} \item{\dots}{Additional arguments.} } \details{ MCMCmetrop1R produces a sample from a user-defined distribution using a random walk Metropolis algorithm with multivariate normal proposal distribution. See Gelman et al. (2003) and Robert & Casella (2004) for details of the random walk Metropolis algorithm. The proposal distribution is centered at the current value of \eqn{\theta}{theta} and has variance-covariance \eqn{V}{V}. If \eqn{V}{V} is specified by the user to be \code{NULL} then \eqn{V}{V} is calculated as: \eqn{V = T (-1\cdot H)^{-1} T }{V = T (-1*H)^{-1} T}, where \eqn{T}{T} is a the diagonal positive definite matrix formed from the \code{tune} and \eqn{H}{H} is the approximate Hessian of \code{fun} evaluated at its mode. This last calculation is done via an initial call to \code{optim}. } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \examples{ \dontrun{ ## logistic regression with an improper uniform prior ## X and y are passed as args to MCMCmetrop1R logitfun <- function(beta, y, X){ eta <- X \%*\% beta p <- 1.0/(1.0+exp(-eta)) sum( y * log(p) + (1-y)*log(1-p) ) } x1 <- rnorm(1000) x2 <- rnorm(1000) Xdata <- cbind(1,x1,x2) p <- exp(.5 - x1 + x2)/(1+exp(.5 - x1 + x2)) yvector <- rbinom(1000, 1, p) post.samp <- MCMCmetrop1R(logitfun, theta.init=c(0,0,0), X=Xdata, y=yvector, thin=1, mcmc=40000, burnin=500, tune=c(1.5, 1.5, 1.5), verbose=500, logfun=TRUE) raftery.diag(post.samp) plot(post.samp) summary(post.samp) ## ################################################## ## negative binomial regression with an improper unform prior ## X and y are passed as args to MCMCmetrop1R negbinfun <- function(theta, y, X){ k <- length(theta) beta <- theta[1:(k-1)] alpha <- exp(theta[k]) mu <- exp(X \%*\% beta) log.like <- sum( lgamma(y+alpha) - lfactorial(y) - lgamma(alpha) + alpha * log(alpha/(alpha+mu)) + y * log(mu/(alpha+mu)) ) } n <- 1000 x1 <- rnorm(n) x2 <- rnorm(n) XX <- cbind(1,x1,x2) mu <- exp(1.5+x1+2*x2)*rgamma(n,1) yy <- rpois(n, mu) post.samp <- MCMCmetrop1R(negbinfun, theta.init=c(0,0,0,0), y=yy, X=XX, thin=1, mcmc=35000, burnin=1000, tune=1.5, verbose=500, logfun=TRUE, seed=list(NA,1)) raftery.diag(post.samp) plot(post.samp) summary(post.samp) ## ################################################## ## sample from a univariate normal distribution with ## mean 5 and standard deviation 0.1 ## ## (MCMC obviously not necessary here and this should ## really be done with the logdensity for better ## numerical accuracy-- this is just an illustration of how ## MCMCmetrop1R works with a density rather than logdensity) post.samp <- MCMCmetrop1R(dnorm, theta.init=5.3, mean=5, sd=0.1, thin=1, mcmc=50000, burnin=500, tune=2.0, verbose=5000, logfun=FALSE) summary(post.samp) } } \references{ Siddhartha Chib; Edward Greenberg. 1995. ``Understanding the Metropolis-Hastings Algorithm." \emph{The American Statistician}, 49, 327-335. Andrew Gelman, John B. Carlin, Hal S. Stern, and Donald B. Rubin. 2003. \emph{Bayesian Data Analysis}. 2nd Edition. Boca Raton: Chapman & Hall/CRC. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. Christian P. Robert and George Casella. 2004. \emph{Monte Carlo Statistical Methods}. 2nd Edition. New York: Springer. } \seealso{\code{\link[coda]{plot.mcmc}}, \code{\link[coda]{summary.mcmc}}, \code{\link[stats]{optim}}, \code{\link[mcmc]{metrop}}} \keyword{models} MCMCpack/man/MCMClogit.Rd0000644000176000001440000002016712133644110014501 0ustar ripleyusers\name{MCMClogit} \alias{MCMClogit} \title{Markov Chain Monte Carlo for Logistic Regression} \description{ This function generates a sample from the posterior distribution of a logistic regression model using a random walk Metropolis algorithm. The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{ MCMClogit(formula, data=NULL, burnin = 1000, mcmc = 10000, thin=1, tune=1.1, verbose = 0, seed = NA, beta.start = NA, b0 = 0, B0 = 0, user.prior.density=NULL, logfun=TRUE, marginal.likelihood=c("none", "Laplace"), ...) } \arguments{ \item{formula}{Model formula.} \item{data}{Data frame.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of Metropolis iterations for the sampler.} \item{thin}{The thinning interval used in the simulation. The number of mcmc iterations must be divisible by this value.} \item{tune}{Metropolis tuning parameter. Can be either a positive scalar or a \eqn{k}{k}-vector, where \eqn{k}{k} is the length of \eqn{\beta}{beta}.Make sure that the acceptance rate is satisfactory (typically between 0.20 and 0.5) before using the posterior sample for inference.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number, the current beta vector, and the Metropolis acceptance rate are printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{beta.start}{The starting value for the \eqn{\beta}{beta} vector. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the starting value for all of the betas. The default value of NA will use the maximum likelihood estimate of \eqn{\beta}{beta} as the starting value.} \item{b0}{If \code{user.prior.density==NULL} \code{b0} is the prior mean of \eqn{\beta}{beta} under a multivariate normal prior. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas.} \item{B0}{If \code{user.prior.density==NULL} \code{B0} is the prior precision of \eqn{\beta}{beta} under a multivariate normal prior. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of \eqn{\beta}{beta}. Default value of 0 is equivalent to an improper uniform prior for beta.} \item{user.prior.density}{If non-NULL, the prior (log)density up to a constant of proportionality. This must be a function defined in R whose first argument is a continuous (possibly vector) variable. This first argument is the point in the state space at which the prior (log)density is to be evaluated. Additional arguments can be passed to \code{user.prior.density()} by inserting them in the call to \code{MCMClogit()}. See the Details section and the examples below for more information. } \item{logfun}{Logical indicating whether \code{use.prior.density()} returns the natural log of a density function (TRUE) or a density (FALSE).} \item{marginal.likelihood}{How should the marginal likelihood be calculated? Options are: \code{none} in which case the marginal likelihood will not be calculated or \code{Laplace} in which case the Laplace approximation (see Kass and Raftery, 1995) is used.} \item{\ldots}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{\code{MCMClogit} simulates from the posterior distribution of a logistic regression model using a random walk Metropolis algorithm. The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The model takes the following form: \deqn{y_i \sim \mathcal{B}ernoulli(\pi_i)}{y_i ~ Bernoulli(pi_i)} Where the inverse link function: \deqn{\pi_i = \frac{\exp(x_i'\beta)}{1 + \exp(x_i'\beta)}}{pi_i = exp(x_i'beta) / [1 + exp(x_i'beta)]} By default, we assume a multivariate Normal prior on \eqn{\beta}{beta}: \deqn{\beta \sim \mathcal{N}(b_0,B_0^{-1})}{beta ~ N(b0,B0^(-1))} Additionally, arbitrary user-defined priors can be specified with the \code{user.prior.density} argument. If the default multivariate normal prior is used, the Metropolis proposal distribution is centered at the current value of \eqn{\beta}{beta} and has variance-covariance \eqn{V = T (B_0 + C^{-1})^{-1} T }{V = T (B0 + C^{-1})^{-1} T}, where \eqn{T}{T} is a the diagonal positive definite matrix formed from the \code{tune}, \eqn{B_0}{B0} is the prior precision, and \eqn{C}{C} is the large sample variance-covariance matrix of the MLEs. This last calculation is done via an initial call to \code{glm}. If a user-defined prior is used, the Metropolis proposal distribution is centered at the current value of \eqn{\beta}{beta} and has variance-covariance \eqn{V = T C T }{V = T C T}, where \eqn{T}{T} is a the diagonal positive definite matrix formed from the \code{tune} and \eqn{C}{C} is the large sample variance-covariance matrix of the MLEs. This last calculation is done via an initial call to \code{glm}. } \references{ Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. } \examples{ \dontrun{ ## default improper uniform prior data(birthwt) posterior <- MCMClogit(low~age+as.factor(race)+smoke, data=birthwt) plot(posterior) summary(posterior) ## multivariate normal prior data(birthwt) posterior <- MCMClogit(low~age+as.factor(race)+smoke, b0=0, B0=.001, data=birthwt) plot(posterior) summary(posterior) ## user-defined independent Cauchy prior logpriorfun <- function(beta){ sum(dcauchy(beta, log=TRUE)) } posterior <- MCMClogit(low~age+as.factor(race)+smoke, data=birthwt, user.prior.density=logpriorfun, logfun=TRUE) plot(posterior) summary(posterior) ## user-defined independent Cauchy prior with additional args logpriorfun <- function(beta, location, scale){ sum(dcauchy(beta, location, scale, log=TRUE)) } posterior <- MCMClogit(low~age+as.factor(race)+smoke, data=birthwt, user.prior.density=logpriorfun, logfun=TRUE, location=0, scale=10) plot(posterior) summary(posterior) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}},\code{\link[coda]{summary.mcmc}}, \code{\link[stats]{glm}}} MCMCpack/man/MCMCirtKdRob.Rd0000644000176000001440000003757212133644110015113 0ustar ripleyusers\name{MCMCirtKdRob} \alias{MCMCirtKdRob} \title{Markov Chain Monte Carlo for Robust K-Dimensional Item Response Theory Model} \description{ This function generates a posterior sample from a Robust K-dimensional item response theory (IRT) model with logistic link, independent standard normal priors on the subject abilities (ideal points), and independent normal priors on the item parameters. The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{ MCMCirtKdRob(datamatrix, dimensions, item.constraints=list(), ability.constraints=list(), burnin = 500, mcmc = 5000, thin=1, interval.method="step", theta.w=0.5, theta.mp=4, alphabeta.w=1.0, alphabeta.mp=4, delta0.w=NA, delta0.mp=3, delta1.w=NA, delta1.mp=3, verbose = FALSE, seed = NA, theta.start = NA, alphabeta.start = NA, delta0.start = NA, delta1.start = NA, b0 = 0, B0=0, k0=.1, k1=.1, c0=1, d0=1, c1=1, d1=1, store.item=TRUE, store.ability=FALSE, drop.constant.items=TRUE, ... ) } \arguments{ \item{datamatrix}{The matrix of data. Must be 0, 1, or missing values. It is of dimensionality subjects by items.} \item{dimensions}{The number of dimensions in the latent space.} \item{item.constraints}{List of lists specifying possible equality or simple inequality constraints on the item parameters. A typical entry in the list has one of three forms: \code{rowname=list(d,c)} which will constrain the dth item parameter for the item named rowname to be equal to c, \code{rowname=list(d,"+")} which will constrain the dth item parameter for the item named rowname to be positive, and \code{rowname=list(d, "-")} which will constrain the dth item parameter for the item named rowname to be negative. If datamatrix is a matrix without row names defaults names of ``V1", ``V2", ... , etc will be used. In a \eqn{K}{K}-dimensional model, the first item parameter for item \eqn{i}{i} is the difficulty parameter (\eqn{\alpha_i}{alpha_i}), the second item parameter is the discrimation parameter on dimension 1 (\eqn{\beta_{i,1}}{beta_{i,1}}), the third item parameter is the discrimation parameter on dimension 2 (\eqn{\beta_{i,2}}{beta_{i,2}}), ..., and the \eqn{(K+1)}{(K+1)}th item parameter is the discrimation parameter on dimension \eqn{K}{K} (\eqn{\beta_{i,K}}{beta_{i,K}}). The item difficulty parameters (\eqn{\alpha}{alpha}) should generally not be constrained. } \item{ability.constraints}{List of lists specifying possible equality or simple inequality constraints on the ability parameters. A typical entry in the list has one of three forms: \code{colname=list(d,c)} which will constrain the dth ability parameter for the subject named colname to be equal to c, \code{colname=list(d,"+")} which will constrain the dth ability parameter for the subject named colname to be positive, and \code{colname=list(d, "-")} which will constrain the dth ability parameter for the subject named colname to be negative. If datamatrix is a matrix without column names defaults names of ``V1", ``V2", ... , etc will be used.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of iterations for the sampler after burn-in.} \item{thin}{The thinning interval used in the simulation. The number of iterations must be divisible by this value.} \item{interval.method}{Method for finding the slicing interval. Can be equal to either \code{step} in which case the stepping out algorithm of Neal (2003) is used or \code{doubling} in which case the doubling procedure of Neal (2003) is used. The stepping out method tends to be faster on a per-iteration basis as it typically requires few function calls. The doubling method expands the initial interval more quickly which makes the Markov chain mix somewhat more quickly-- at least in some situations. } \item{theta.w}{The initial width of the slice sampling interval for each ability parameter (the elements of \eqn{\theta}{theta})} \item{theta.mp}{The parameter governing the maximum possible width of the slice interval for each ability parameter (the elements of \eqn{\theta}{theta}). If \code{interval.method="step"} then the maximum width is \code{theta.w * theta.mp}. If \code{interval.method="doubling"} then the maximum width is \code{theta.w * 2^theta.mp}. } \item{alphabeta.w}{The initial width of the slice sampling interval for each item parameter (the elements of \eqn{\alpha}{alpha} and \eqn{\beta}{beta})} \item{alphabeta.mp}{ The parameter governing the maximum possible width of the slice interval for each item parameters (the elements of \eqn{\alpha}{alpha} and \eqn{\beta}{beta}). If \code{interval.method="step"} then the maximum width is \code{alphabeta.w * alphabeta.mp}. If \code{interval.method="doubling"} then the maximum width is \code{alphabeta.w * 2^alphabeta.mp}. } \item{delta0.w}{The initial width of the slice sampling interval for \eqn{\delta_0}{delta0}} \item{delta0.mp}{The parameter governing the maximum possible width of the slice interval for \eqn{\delta_0}{delta0}. If \code{interval.method="step"} then the maximum width is \code{delta0.w * delta0.mp}. If \code{interval.method="doubling"} then the maximum width is \code{delta0.w * 2^delta0.mp}. } \item{delta1.w}{The initial width of the slice sampling interval for \eqn{\delta_1}{delta1}} \item{delta1.mp}{The parameter governing the maximum possible width of the slice interval for \eqn{\delta_1}{delta1}. If \code{interval.method="step"} then the maximum width is \code{delta1.w * delta1.mp}. If \code{interval.method="doubling"} then the maximum width is \code{delta1.w * 2^delta1.mp}. } \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If verbose > 0, the iteration number with be printed to the screen every verbose'th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{theta.start}{The starting values for the ability parameters \eqn{\theta}{theta}. Can be either a scalar or a matrix with number of rows equal to the number of subjects and number of columns equal to the dimension \eqn{K}{K} of the latent space. If \code{theta.start=NA} then starting values will be chosen that are 0 for unconstrained subjects, -0.5 for subjects with negative inequality constraints and 0.5 for subjects with positive inequality constraints. } \item{alphabeta.start}{The starting values for the \eqn{\alpha}{alpha} and \eqn{\beta}{beta} difficulty and discrimination parameters. If \code{alphabeta.start} is set to a scalar the starting value for all unconstrained item parameters will be set to that scalar. If \code{alphabeta.start} is a matrix of dimension \eqn{(K+1) \times items}{(K+1) x items} then the \code{alphabeta.start} matrix is used as the starting values (except for equality-constrained elements). If \code{alphabeta.start} is set to \code{NA} (the default) then starting values for unconstrained elements are set to values generated from a series of proportional odds logistic regression fits, and starting values for inequality constrained elements are set to either 1.0 or -1.0 depending on the nature of the constraints. } \item{delta0.start}{The starting value for the \eqn{\delta_0}{delta0} parameter.} \item{delta1.start}{The starting value for the \eqn{\delta_1}{delta1} parameter.} \item{b0}{The prior means of the \eqn{\alpha}{alpha} and \eqn{\beta}{beta} difficulty and discrimination parameters, stacked for all items. If a scalar is passed, it is used as the prior mean for all items.} \item{B0}{The prior precisions (inverse variances) of the independent Normal prior on the item parameters. Can be either a scalar or a matrix of dimension \eqn{(K+1) \times items}{(K+1) x items}.} \item{k0}{\eqn{\delta_0}{delta0} is constrained to lie in the interval between 0 and \code{k0}.} \item{k1}{\eqn{\delta_1}{delta1} is constrained to lie in the interval between 0 and \code{k1}.} \item{c0}{Parameter governing the prior for \eqn{\delta_0}{delta0}. \eqn{\delta_0}{delta0} divided by \code{k0} is assumed to be follow a beta distribution with first parameter \code{c0}.} \item{d0}{Parameter governing the prior for \eqn{\delta_0}{delta0}. \eqn{\delta_0}{delta0} divided by \code{k0} is assumed to be follow a beta distribution with second parameter \code{d0}.} \item{c1}{Parameter governing the prior for \eqn{\delta_1}{delta1}. \eqn{\delta_1}{delta1} divided by \code{k1} is assumed to be follow a beta distribution with first parameter \code{c1}.} \item{d1}{Parameter governing the prior for \eqn{\delta_1}{delta1}. \eqn{\delta_1}{delta1} divided by \code{k1} is assumed to be follow a beta distribution with second parameter \code{d1}.} \item{store.item}{A switch that determines whether or not to store the item parameters for posterior analysis. \emph{NOTE: This typically takes an enormous amount of memory, so should only be used if the chain is thinned heavily, or for applications with a small number of items}. By default, the item parameters are not stored.} \item{store.ability}{A switch that determines whether or not to store the subject abilities for posterior analysis. By default, the item parameters are all stored.} \item{drop.constant.items}{A switch that determines whether or not items that have no variation should be deleted before fitting the model. Default = TRUE.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{ \code{MCMCirtKdRob} simulates from the posterior using the slice sampling algorithm of Neal (2003). The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The model takes the following form. We assume that each subject has an subject ability (ideal point) denoted \eqn{\theta_j}{theta_j} \eqn{(K \times 1)}{(K x 1)}, and that each item has a scalar difficulty parameter \eqn{\alpha_i}{alpha_i} and discrimination parameter \eqn{\beta_i}{beta_i} \eqn{(K \times 1)}{(K x 1)}. The observed choice by subject \eqn{j}{j} on item \eqn{i}{i} is the observed data matrix which is \eqn{(I \times J)}{(I * J)}. The probability that subject \eqn{j}{j} answers item \eqn{i}{i} correctly is assumed to be: \deqn{\pi_{ij} = \delta_0 + (1 - \delta_0 - \delta_1) /(1+\exp(\alpha_i - \beta_i \theta_j))}{pi_{ij} = delta0 + (1 - delta0 - delta1) / (1 + exp(alpha_i - beta_i * theta_j))} This model was discussed in Bafumi et al. (2005). We assume the following priors. For the subject abilities (ideal points) we assume independent standard Normal priors: \deqn{\theta_{j,k} \sim \mathcal{N}(0,1)}{theta_j,k ~ N(0, 1)} These cannot be changed by the user. For each item parameter, we assume independent Normal priors: \deqn{\left[\alpha_i, \beta_i \right]' \sim \mathcal{N}_{(K+1)} (b_{0,i},B_{0,i})}{[alpha_i beta_i]' ~ N_(K+1) (b_0,i, B_0,i)} Where \eqn{B_{0,i}}{B_0,i} is a diagonal matrix. One can specify a separate prior mean and precision for each item parameter. We also assume \eqn{\delta_0 / k_0 \sim \mathcal{B}eta(c_0, d_0)}{delta0/k0 ~ Beta(c0, d0)} and \eqn{\delta_1 / k_1 \sim \mathcal{B}eta(c_1, d_1)}{delta1/k1 ~ Beta(c1, d1)}. The model is identified by constraints on the item parameters and / or ability parameters. See Rivers (2004) for a discussion of identification of IRT models. As is the case with all measurement models, make sure that you have plenty of free memory, especially when storing the item parameters. } \references{ James H. Albert. 1992. ``Bayesian Estimation of Normal Ogive Item Response Curves Using Gibbs Sampling." \emph{Journal of Educational Statistics}. 17: 251-269. Joseph Bafumi, Andrew Gelman, David K. Park, and Noah Kaplan. 2005. ``Practical Issues in Implementing and Understanding Bayesian Ideal Point Estimation.'' \emph{Political Analysis}. Joshua Clinton, Simon Jackman, and Douglas Rivers. 2004. ``The Statistical Analysis of Roll Call Data." \emph{American Political Science Review}. 98: 355-370. Simon Jackman. 2001. ``Multidimensional Analysis of Roll Call Data via Bayesian Simulation.'' \emph{Political Analysis.} 9: 227-241. Valen E. Johnson and James H. Albert. 1999. \emph{Ordinal Data Modeling}. Springer: New York. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Radford Neal. 2003. ``Slice Sampling'' (with discussion). \emph{Annals of Statistics}, 31: 705-767. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. Douglas Rivers. 2004. ``Identification of Multidimensional Item-Response Models." Stanford University, typescript. } \examples{ \dontrun{ ## Court example with ability (ideal point) and ## item (case) constraints data(SupremeCourt) post1 <- MCMCirtKdRob(t(SupremeCourt), dimensions=1, burnin=500, mcmc=5000, thin=1, B0=.25, store.item=TRUE, store.ability=TRUE, ability.constraints=list("Thomas"=list(1,"+"), "Stevens"=list(1,-4)), item.constraints=list("1"=list(2,"-")), verbose=50) plot(post1) summary(post1) ## Senate example with ability (ideal point) constraints data(Senate) namestring <- as.character(Senate$member) namestring[78] <- "CHAFEE1" namestring[79] <- "CHAFEE2" namestring[59] <- "SMITH.NH" namestring[74] <- "SMITH.OR" rownames(Senate) <- namestring post2 <- MCMCirtKdRob(Senate[,6:677], dimensions=1, burnin=1000, mcmc=5000, thin=1, ability.constraints=list("KENNEDY"=list(1,-4), "HELMS"=list(1, 4), "ASHCROFT"=list(1,"+"), "BOXER"=list(1,"-"), "KERRY"=list(1,"-"), "HATCH"=list(1,"+")), B0=0.1, store.ability=TRUE, store.item=FALSE, verbose=5, k0=0.15, k1=0.15, delta0.start=0.13, delta1.start=0.13) plot(post2) summary(post2) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}},\code{\link[coda]{summary.mcmc}}, \code{\link[MCMCpack]{MCMCirt1d}}, \code{\link[MCMCpack]{MCMCirtKd}} } MCMCpack/man/MCMCirtKdHet.Rd0000644000176000001440000002206512133644110015100 0ustar ripleyusers\name{MCMCirtKdHet} \Rdversion{1.1} \alias{MCMCirtKdHet} \title{ Markov Chain Monte Carlo for Heteroskedastic K-Dimensional Item Response Theory Model} \description{ This function generates a sample from the posterior distribution of a heteroskedastic K-dimensional item response theory (IRT) model, with standard normal priors on the subject abilities (ideal points), normal priors on the item parameters, and inverse-gamma priors on subject error variances. To maintain identification and comparability with results of the homoskedastic estimator, the mean root subject error precision is constrained to one. The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package.} \usage{ MCMCirtKdHet(datamatrix, dimensions, item.constraints = list(), burnin = 1000, mcmc = 1000, thin = 1, verbose = 0, seed = NA, alphabeta.start = NA, b0 = 0, B0 = 0.04, c0 = 0, d0 = 0, store.item = FALSE, store.ability = TRUE, store.sigma = TRUE, drop.constant.items = TRUE) } \arguments{ \item{datamatrix}{The matrix of data. Must be 0, 1, or NA. It is of dimensionality subjects by items.} \item{dimensions}{The number of dimensions in the latent space.} \item{item.constraints}{List of lists specifying possible equality or simple inequality constraints on the item parameters. A typical entry in the list has one of three forms: \code{rowname=list(d,c)} which will constrain the dth item parameter for the item named rowname to be equal to c, \code{rowname=list(d,"+")} which will constrain the dth item parameter for the item named rowname to be positive, and\code{rowname=list(d, "-")} which will constrain the dth item parameter for the item named rowname to be negative. If x is a matrix without row names defaults names of ``V1", ``V2", ... , etc will be used. In a K dimensional model, the first item parameter for item \eqn{i}{i} is the difficulty parameter (\eqn{\alpha_i}{alpha_i}), the second item parameter is the discrimation parameter on dimension 1 (\eqn{\beta_{i,1}}{beta_{i,1}}), the third item parameter is the discrimation parameter on dimension 2 (\eqn{\beta_{i,2}}{beta_{i,2}}), ..., and the (K+1)th item parameter is the discrimation parameter on dimension K (\eqn{\beta_{i,1}}{beta_{i,1}}). The item difficulty parameters (\eqn{\alpha}{alpha}) should generally not be constrained. } \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of iterations for the sampler.} \item{thin}{The thinning interval used in the simulation. The number of iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 then every \code{verbose}th iteration will be printed to the screen.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{alphabeta.start}{The starting values for the \eqn{\alpha}{alpha} and \eqn{\beta}{beta} difficulty and discrimination parameters. If \code{alphabeta.start} is set to a scalar the starting value for all unconstrained item parameters will be set to that scalar. If \code{alphabeta.start} is a matrix of dimension \eqn{(K+1) \times items}{(K+1) x items} then the \code{alphabeta.start} matrix is used as the starting values (except for equality-constrained elements). If \code{alphabeta.start} is set to \code{NA} (the default) then starting values for unconstrained elements are set to values generated from a series of proportional odds logistic regression fits, and starting values for inequality constrained elements are set to either 1.0 or -1.0 depending on the nature of the constraints. } \item{b0}{The prior means of the \eqn{\alpha}{alpha} and \eqn{\beta}{beta} difficulty and discrimination parameters, stacked for all items. If a scalar is passed, it is used as the prior mean for all items.} \item{B0}{The prior precisions (inverse variances) of the independent normal prior on the item parameters. Can be either a scalar or a matrix of dimension \eqn{(K+1) \times items}{(K+1) x items}.} \item{c0}{The first parameter of the inverse gamma prior on the subject-specific variance parameters. This can be thought of as the number of bills that the prior information is equivalent to. This scalar value is common across all subjects (legislators) and defaults to an uninformative prior. NOTE: regardless of the value provided, identification is provided by a constraint on the mean root subject specific variance.} \item{d0}{The second parameter of the inverse gamma prior on the subject-specific variance parameters. This can be thought of as the sum of square error that the prior information is equivalent to. This scalar value is common across all subjects (legislators) and defaults to an uninformative prior. NOTE: regardless of the value provided, identification is provided by a constraint on the mean root subject specific variance.} \item{store.item}{A switch that determines whether or not to store the item parameters for posterior analysis. \emph{NOTE: In applications with many items this takes an enormous amount of memory. If you have many items and want to want to store the item parameters you may want to thin the chain heavily}. By default, the item parameters are not stored.} \item{store.ability}{A switch that determines whether or not to store the subject abilities for posterior analysis. \emph{NOTE: In applications with many subjects this takes an enormous amount of memory. If you have many subjects and want to want to store the ability parameters you may want to thin the chain heavily}. By default, the ability parameters are all stored.} \item{store.sigma}{A switch that determines whether or not to store the subject-specific variances for posterior analysis. \emph{NOTE: In applications with many subjects this takes an enormous amount of memory. If you have many subjects and want to want to store the ability parameters you may want to thin the chain heavily}. By default, the subject-specific variance parameters are all stored.} \item{drop.constant.items}{A switch that determines whether or not items that have no variation should be deleted before fitting the model. Default = TRUE.} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \references{ Benjamin E. Lauderdale. 2010. ``Unpredictable Voters in Ideal Point Estimation'' \emph{Political Analysis.} 18: 151-171. } \author{ Benjamin E. Lauderdale, \email{blauderd@princeton.edu}, \url{http://www.princeton.edu/~blauderd/}. Modified from \code{\link[MCMCpack]{MCMCirtKd}} and \code{\link[MCMCpack]{MCMCordfactanal}}. Suggestions for additional options are welcome. } \seealso{\code{\link[coda]{plot.mcmc}},\code{\link[coda]{summary.mcmc}}, \code{\link[MCMCpack]{MCMCirtKd}} } \examples{ \dontrun{ data(Senate) Y <- as.matrix(Senate[,6:677]) Hompost <- MCMCirtKd(Y,1,b0=0,B0=0.04,burn=1000,mcmc=1000,thin=1,verbose=250) Hetpost <- MCMCirtKdHet(Y,1,b0=0,B0=0.04,burn=1000,mcmc=1000,thin=1,verbose=250) SenatorNames <- Senate[,5] HomoskedasticIdealPointEstimates <- colMeans(Hompost)[1:102] HeteroskedasticIdealPointEstimates <- colMeans(Hetpost)[1:102] HeteroskedasticSigmaEstimates <- colMeans(Hetpost)[103:204] plot(HomoskedasticIdealPointEstimates, HeteroskedasticIdealPointEstimates, cex= HeteroskedasticSigmaEstimates,xlab="Ideal Points (Homoskedastic)", ylab="Ideal Points (Heteroskedastic)", main="Comparison of Ideal Point Estimates for the 106th Senate", xlim=c(-2.5,2.5),ylim=c(-2.5,2.5)) for (i in 1:102){ if (rank(-HeteroskedasticSigmaEstimates)[i] <= 10){ text(HomoskedasticIdealPointEstimates[i], HeteroskedasticIdealPointEstimates[i],SenatorNames[i], pos=3-sign(HomoskedasticIdealPointEstimates[i]),cex=0.75) } } legend(x="topleft",legend=c("Point sizes proportional to estimated legislator", "variance under heteroskedastic model.","Some legislators with large variance have", "more extreme estimated ideal points under the","heteroskedastic model because their", "deviations from the party line are attributable","to idiosyncrasy rather than moderation."),cex=0.5) } } \keyword{models} MCMCpack/man/MCMCirtKd.Rd0000644000176000001440000002535612133644110014445 0ustar ripleyusers\name{MCMCirtKd} \alias{MCMCirtKd} \title{Markov Chain Monte Carlo for K-Dimensional Item Response Theory Model} \description{ This function generates a sample from the posterior distribution of a K-dimensional item response theory (IRT) model, with standard normal priors on the subject abilities (ideal points), and normal priors on the item parameters. The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{ MCMCirtKd(datamatrix, dimensions, item.constraints=list(), burnin = 1000, mcmc = 10000, thin=1, verbose = 0, seed = NA, alphabeta.start = NA, b0 = 0, B0=0, store.item = FALSE, store.ability=TRUE, drop.constant.items=TRUE, ... ) } \arguments{ \item{datamatrix}{The matrix of data. Must be 0, 1, or missing values. It is of dimensionality subjects by items.} \item{dimensions}{The number of dimensions in the latent space.} \item{item.constraints}{List of lists specifying possible equality or simple inequality constraints on the item parameters. A typical entry in the list has one of three forms: \code{rowname=list(d,c)} which will constrain the dth item parameter for the item named rowname to be equal to c, \code{rowname=list(d,"+")} which will constrain the dth item parameter for the item named rowname to be positive, and\code{rowname=list(d, "-")} which will constrain the dth item parameter for the item named rowname to be negative. If x is a matrix without row names defaults names of ``V1", ``V2", ... , etc will be used. In a K dimensional model, the first item parameter for item \eqn{i}{i} is the difficulty parameter (\eqn{\alpha_i}{alpha_i}), the second item parameter is the discrimation parameter on dimension 1 (\eqn{\beta_{i,1}}{beta_{i,1}}), the third item parameter is the discrimation parameter on dimension 2 (\eqn{\beta_{i,2}}{beta_{i,2}}), ..., and the (K+1)th item parameter is the discrimation parameter on dimension K (\eqn{\beta_{i,1}}{beta_{i,1}}). The item difficulty parameters (\eqn{\alpha}{alpha}) should generally not be constrained. } \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of iterations for the sampler.} \item{thin}{The thinning interval used in the simulation. The number of iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 then every \code{verbose}th iteration will be printed to the screen.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{alphabeta.start}{The starting values for the \eqn{\alpha}{alpha} and \eqn{\beta}{beta} difficulty and discrimination parameters. If \code{alphabeta.start} is set to a scalar the starting value for all unconstrained item parameters will be set to that scalar. If \code{alphabeta.start} is a matrix of dimension \eqn{(K+1) \times items}{(K+1) x items} then the \code{alphabeta.start} matrix is used as the starting values (except for equality-constrained elements). If \code{alphabeta.start} is set to \code{NA} (the default) then starting values for unconstrained elements are set to values generated from a series of proportional odds logistic regression fits, and starting values for inequality constrained elements are set to either 1.0 or -1.0 depending on the nature of the constraints. } \item{b0}{The prior means of the \eqn{\alpha}{alpha} and \eqn{\beta}{beta} difficulty and discrimination parameters, stacked for all items. If a scalar is passed, it is used as the prior mean for all items.} \item{B0}{The prior precisions (inverse variances) of the independent normal prior on the item parameters. Can be either a scalar or a matrix of dimension \eqn{(K+1) \times items}{(K+1) x items}.} \item{store.item}{A switch that determines whether or not to store the item parameters for posterior analysis. \emph{NOTE: In applications with many items this takes an enormous amount of memory. If you have many items and want to want to store the item parameters you may want to thin the chain heavily}. By default, the item parameters are not stored.} \item{store.ability}{A switch that determines whether or not to store the subject abilities for posterior analysis. \emph{NOTE: In applications with many subjects this takes an enormous amount of memory. If you have many subjects and want to want to store the ability parameters you may want to thin the chain heavily}. By default, the ability parameters are all stored.} \item{drop.constant.items}{A switch that determines whether or not items that have no variation should be deleted before fitting the model. Default = TRUE.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{ \code{MCMCirtKd} simulates from the posterior distribution using standard Gibbs sampling using data augmentation (a normal draw for the subject abilities, a multivariate normal draw for the item parameters, and a truncated normal draw for the latent utilities). The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The default number of burnin and mcmc iterations is much smaller than the typical default values in MCMCpack. This is because fitting this model is extremely computationally expensive. It does not mean that this small of a number of scans will yield good estimates. The priors of this model need to be proper for identification purposes. The user is asked to provide prior means and precisions \emph{(not variances)} for the item parameters and the subject parameters. The model takes the following form. We assume that each subject has an ability (ideal point) denoted \eqn{\theta_j}{theta_j} \eqn{(K \times 1)}{(K x 1)}, and that each item has a difficulty parameter \eqn{\alpha_i}{alpha_i} and discrimination parameter \eqn{\beta_i}{beta_i} \eqn{(K \times 1)}{(K x 1)}. The observed choice by subject \eqn{j}{j} on item \eqn{i}{i} is the observed data matrix which is \eqn{(I \times J)}{(I * J)}. We assume that the choice is dictated by an unobserved utility: \deqn{z_{i,j} = - \alpha_i + \beta_i' \theta_j + \varepsilon_{i,j}}{z_ij = - alpha_i + beta_i'*theta_j + epsilon_ij} Where the \eqn{\varepsilon_{i,j}}{epsilon_ij}s are assumed to be distributed standard normal. The parameters of interest are the subject abilities (ideal points) and the item parameters. We assume the following priors. For the subject abilities (ideal points) we assume independent standard normal priors: \deqn{\theta_{j,k} \sim \mathcal{N}(0,1)}{theta_j,k ~ N(0, 1)} These cannot be changed by the user. For each item parameter, we assume independent normal priors: \deqn{\left[\alpha_i, \beta_i \right]' \sim \mathcal{N}_{(K+1)} (b_{0,i},B_{0,i})}{[alpha_i beta_i]' ~ N_(K+1) (b_0,i, B_0,i)} Where \eqn{B_{0,i}}{B_0,i} is a diagonal matrix. One can specify a separate prior mean and precision for each item parameter. The model is identified by the constraints on the item parameters (see Jackman 2001). The user cannot place constraints on the subject abilities. This identification scheme differs from that in \code{MCMCirt1d}, which uses constraints on the subject abilities to identify the model. In our experience, using subject ability constraints for models in greater than one dimension does not work particularly well. As is the case with all measurement models, make sure that you have plenty of free memory, especially when storing the item parameters. } \references{ James H. Albert. 1992. ``Bayesian Estimation of Normal Ogive Item Response Curves Using Gibbs Sampling." \emph{Journal of Educational Statistics}. 17: 251-269. Joshua Clinton, Simon Jackman, and Douglas Rivers. 2004. ``The Statistical Analysis of Roll Call Data." \emph{American Political Science Review}. 98: 355-370. Simon Jackman. 2001. ``Multidimensional Analysis of Roll Call Data via Bayesian Simulation.'' \emph{Political Analysis.} 9: 227-241. Valen E. Johnson and James H. Albert. 1999. \emph{Ordinal Data Modeling}. Springer: New York. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. Douglas Rivers. 2004. ``Identification of Multidimensional Item-Response Models." Stanford University, typescript. } \examples{ \dontrun{ data(SupremeCourt) # note that the rownames (the item names) are "1", "2", etc posterior1 <- MCMCirtKd(t(SupremeCourt), dimensions=1, burnin=5000, mcmc=50000, thin=10, B0=.25, store.item=TRUE, item.constraints=list("1"=list(2,"-"))) plot(posterior1) summary(posterior1) data(Senate) Sen.rollcalls <- Senate[,6:677] posterior2 <- MCMCirtKd(Sen.rollcalls, dimensions=2, burnin=5000, mcmc=50000, thin=10, item.constraints=list(rc2=list(2,"-"), rc2=c(3,0), rc3=list(3,"-")), B0=.25) plot(posterior2) summary(posterior2) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}},\code{\link[coda]{summary.mcmc}}, \code{\link[MCMCpack]{MCMCirt1d}}, \code{\link[MCMCpack]{MCMCordfactanal}} } MCMCpack/man/MCMCirtHier1d.Rd0000644000176000001440000003405312133644110015215 0ustar ripleyusers\name{MCMCirtHier1d} \alias{MCMCirtHier1d} \title{Markov Chain Monte Carlo for Hierarchical One Dimensional Item Response Theory Model, Covariates Predicting Latent Ideal Point (Ability)} \description{ This function generates a sample from the posterior distribution of a one dimensional item response theory (IRT) model, with multivariate Normal priors on the item parameters, and a Normal-Inverse Gamma hierarchical prior on subject ideal points (abilities). The user supplies item-response data, subject covariates, and priors. Note that this identification strategy obviates the constraints used on theta in \code{\link[MCMCpack]{MCMCirt1d}}. A sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. If you are interested in fitting K-dimensional item response theory models, or would rather identify the model by placing constraints on the item parameters, please see \code{\link[MCMCpack]{MCMCirtKd}}. } \usage{ MCMCirtHier1d(datamatrix, Xjdata, burnin = 1000, mcmc = 20000, thin=1, verbose = 0, seed = NA, theta.start = NA, a.start = NA, b.start = NA, beta.start=NA, b0=0, B0=.01, c0=.001, d0=.001, ab0=0, AB0=.25, store.item = FALSE, store.ability=TRUE, drop.constant.items=TRUE, marginal.likelihood=c("none","Chib95"), px=TRUE,px_a0 = 10, px_b0=10, ... ) } \arguments{ \item{datamatrix}{The matrix of data. Must be 0, 1, or missing values. The rows of \code{datamatrix} correspond to subjects and the columns correspond to items.} \item{Xjdata}{A \code{data.frame} containing second-level predictor covariates for ideal points \eqn{\theta}{theta}. Predictors are modeled as a linear regression on the mean vector of \eqn{\theta}{theta}; the posterior sample contains regression coefficients \eqn{\beta}{beta} and common variance \eqn{\sigma^2}{sigma^2}. See Rivers (2003) for a thorough discussion of identification of IRT models.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of Gibbs iterations for the sampler.} \item{thin}{The thinning interval used in the simulation. The number of Gibbs iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 then every \code{verbose}th iteration will be printed to the screen.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{theta.start}{The starting values for the subject abilities (ideal points). This can either be a scalar or a column vector with dimension equal to the number of voters. If this takes a scalar value, then that value will serve as the starting value for all of the thetas. The default value of NA will choose the starting values based on an eigenvalue-eigenvector decomposition of the agreement score matrix formed from the \code{datamatrix}.} \item{a.start}{The starting values for the \eqn{a}{a} difficulty parameters. This can either be a scalar or a column vector with dimension equal to the number of items. If this takes a scalar value, then that value will serve as the starting value for all \eqn{a}{a}. The default value of NA will set the starting values based on a series of probit regressions that condition on the starting values of theta.} \item{b.start}{The starting values for the \eqn{b}{b} discrimination parameters. This can either be a scalar or a column vector with dimension equal to the number of items. If this takes a scalar value, then that value will serve as the starting value for all \eqn{b}{b}. The default value of NA will set the starting values based on a series of probit regressions that condition on the starting values of theta.} \item{beta.start}{The starting values for the \eqn{\beta}{beta} regression coefficients that predict the means of ideal points \eqn{\theta}{theta}. This can either be a scalar or a column vector with length equal to the number of covariates. If this takes a scalar value, then that value will serve as the starting value for all of the betas. The default value of NA will set the starting values based on a linear regression of the covariates on (either provided or generated) \code{theta.start}. } \item{b0}{The prior mean of \eqn{\beta}{beta}. Can be either a scalar or a vector of length equal to the number of subject covariates. If a scalar all means with be set to the passed value.} \item{B0}{The prior precision of \eqn{\beta}{beta}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of beta. A default proper but diffuse value of .01 ensures finite marginal likelihood for model comparison. A value of 0 is equivalent to an improper uniform prior for beta.} \item{c0}{\eqn{c_0/2}{c0/2} is the shape parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of \eqn{\theta}{theta}). The amount of information in the inverse Gamma prior is something like that from \eqn{c_0}{c0} pseudo-observations.} \item{d0}{\eqn{d_0/2}{d0/2} is the scale parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of \eqn{\theta}{theta}). In constructing the inverse Gamma prior, \eqn{d_0}{d0} acts like the sum of squared errors from the \eqn{c_0}{c0} pseudo-observations.} \item{ab0}{The prior mean of \code{(a, b)}. Can be either a scalar or a 2-vector. If a scalar both means will be set to the passed value. The prior mean is assumed to be the same across all items.} \item{AB0}{The prior precision of \code{(a, b)}.This can either be ascalar or a 2 by 2 matrix. If this takes a scalar value, then that value times an identity matrix serves as the prior precision. The prior precision is assumed to be the same across all items.} \item{store.item}{A switch that determines whether or not to store the item parameters for posterior analysis. \emph{NOTE: In situations with many items storing the item parameters takes an enormous amount of memory, so \code{store.item} should only be \code{TRUE} if the chain is thinned heavily, or for applications with a small number of items}. By default, the item parameters are not stored.} \item{store.ability}{A switch that determines whether or not to store the ability parameters for posterior analysis. \emph{NOTE: In situations with many individuals storing the ability parameters takes an enormous amount of memory, so \code{store.ability} should only be \code{TRUE} if the chain is thinned heavily, or for applications with a small number of individuals}. By default, ability parameters are stored.} \item{drop.constant.items}{A switch that determines whether or not items that have no variation should be deleted before fitting the model. Default = TRUE.} \item{marginal.likelihood}{Should the marginal likelihood of the second-level model on ideal points be calculated using the method of Chib (1995)? It is stored as an attribute of the posterior \code{mcmc} object and suitable for comparison using \code{\link[MCMCpack]{BayesFactor}}.} \item{px}{Use Parameter Expansion to reduce autocorrelation in the chain? PX introduces an unidentified parameter \eqn{alpha} for the residual variance in the latent data (Liu and Wu 1999). Default = TRUE } \item{px_a0}{Prior shape parameter for the inverse-gamma distribution on \eqn{alpha}, the residual variance of the latent data. Default=10.} \item{px_b0}{ Prior scale parameter for the inverse-gamma distribution on \eqn{alpha}, the residual variance of the latent data. Default = 10 } \item{...}{further arguments to be passed} } \value{ An \code{mcmc} object that contains the sample from the posterior distribution. This object can be summarized by functions provided by the coda package. If \code{marginal.likelihood = "Chib95"} the object will have attribute \code{logmarglike}. } \details{ \code{MCMCirtHier1d} simulates from the posterior distribution using standard Gibbs sampling using data augmentation (a Normal draw for the subject abilities, a multivariate Normal draw for (second-level) subject ability predictors, an Inverse-Gamma draw for the (second-level) variance of subject abilities, a multivariate Normal draw for the item parameters, and a truncated Normal draw for the latent utilities). The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The model takes the following form. We assume that each subject has an subject ability (ideal point) denoted \eqn{\theta_j}{theta_j} and that each item has a difficulty parameter \eqn{a_i}{a_i} and discrimination parameter \eqn{b_i}{b_i}. The observed choice by subject \eqn{j}{j} on item \eqn{i}{i} is the observed data matrix which is \eqn{(I \times J)}{(I * J)}. We assume that the choice is dictated by an unobserved utility: \deqn{z_{i,j} = -\alpha_i + \beta_i \theta_j + \varepsilon_{i,j}}{z_ij = -a_i + b_i*theta_j + epsilon_ij} Where the errors are assumed to be distributed standard Normal. This constitutes the measurement or level-1 model. The subject abilities (ideal points) are modeled by a second level Normal linear predictor for subject covariates \code{Xjdata}, with common variance \eqn{\sigma^2}{sigma^2}. The parameters of interest are the subject abilities (ideal points), item parameters, and second-level coefficients. We assume the following priors. For the subject abilities (ideal points): \deqn{\theta_j \sim \mathcal{N}(\mu_{\theta} ,T_{0}^{-1})}{theta_j ~ N(t0, T0^{-1})} For the item parameters, the prior is: \deqn{\left[a_i, b_i \right]' \sim \mathcal{N}_2 (ab_{0},AB_{0}^{-1})}{[alpha_i beta_i]' ~ N_2 (ab0, AB0^{-1})} The model is identified by the proper priors on the item parameters and constraints placed on the ability parameters. As is the case with all measurement models, make sure that you have plenty of free memory, especially when storing the item parameters. } \author{Michael Malecki, \email{malecki@wustl.edu}, \url{http://malecki.wustl.edu}.} \references{ James H. Albert. 1992. ``Bayesian Estimation of Normal Ogive Item Response Curves Using Gibbs Sampling." \emph{Journal of Educational Statistics}. 17: 251--269. Joshua Clinton, Simon Jackman, and Douglas Rivers. 2004. ``The Statistical Analysis of Roll Call Data." \emph{American Political Science Review} 98: 355--370. Valen E. Johnson and James H. Albert. 1999. ``Ordinal Data Modeling." Springer: New York. Liu, Jun S. and Ying Nian Wu. 1999. ``Parameter Expansion for Data Augmentation.'' \emph{Journal of the American Statistical Association} 94: 1264--1274. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. Douglas Rivers. 2004. ``Identification of Multidimensional Item-Response Models." Stanford University, typescript. } \examples{ \dontrun{ data(SupremeCourt) Xjdata <- data.frame(presparty= c(1,1,0,1,1,1,1,0,0), sex= c(0,0,1,0,0,0,0,1,0)) ## Parameter Expansion reduces autocorrelation. posterior1 <- MCMCirtHier1d(t(SupremeCourt), burnin=50000, mcmc=10000, thin=20, verbose=10000, Xjdata=Xjdata, marginal.likelihood="Chib95", px=TRUE) ## But, you can always turn it off. posterior2 <- MCMCirtHier1d(t(SupremeCourt), burnin=50000, mcmc=10000, thin=20, verbose=10000, Xjdata=Xjdata, #marginal.likelihood="Chib95", px=FALSE) ## Note that the hierarchical model has greater autocorrelation than ## the naive IRT model. posterior0 <- MCMCirt1d(t(SupremeCourt), theta.constraints=list(Scalia="+", Ginsburg="-"), B0.alpha=.2, B0.beta=.2, burnin=50000, mcmc=100000, thin=100, verbose=10000, store.item=FALSE) ## Randomly 10% Missing -- this affects the expansion parameter, increasing ## the variance of the (unidentified) latent parameter alpha. scMiss <- SupremeCourt scMiss[matrix(as.logical(rbinom(nrow(SupremeCourt)*ncol(SupremeCourt), 1, .1)), dim(SupremeCourt))] <- NA posterior1.miss <- MCMCirtHier1d(t(scMiss), burnin=80000, mcmc=10000, thin=20, verbose=10000, Xjdata=Xjdata, marginal.likelihood="Chib95", px=TRUE) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}},\code{\link[coda]{summary.mcmc}}, \code{\link[MCMCpack]{MCMCirtKd}}} MCMCpack/man/MCMCirt1d.Rd0000644000176000001440000002436012133644110014405 0ustar ripleyusers\name{MCMCirt1d} \alias{MCMCirt1d} \title{Markov Chain Monte Carlo for One Dimensional Item Response Theory Model} \description{ This function generates a sample from the posterior distribution of a one dimensional item response theory (IRT) model, with Normal priors on the subject abilities (ideal points), and multivariate Normal priors on the item parameters. The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. If you are interested in fitting K-dimensional item response theory models, or would rather identify the model by placing constraints on the item parameters, please see \code{\link[MCMCpack]{MCMCirtKd}}. } \usage{ MCMCirt1d(datamatrix, theta.constraints=list(), burnin = 1000, mcmc = 20000, thin=1, verbose = 0, seed = NA, theta.start = NA, alpha.start = NA, beta.start = NA, t0 = 0, T0 = 1, ab0=0, AB0=.25, store.item = FALSE, store.ability = TRUE, drop.constant.items=TRUE, ... ) } \arguments{ \item{datamatrix}{The matrix of data. Must be 0, 1, or missing values. The rows of \code{datamatrix} correspond to subjects and the columns correspond to items.} \item{theta.constraints}{A list specifying possible simple equality or inequality constraints on the ability parameters. A typical entry in the list has one of three forms: \code{varname=c} which will constrain the ability parameter for the subject named \code{varname} to be equal to c, \code{varname="+"} which will constrain the ability parameter for the subject named \code{varname} to be positive, and \code{varname="-"} which will constrain the ability parameter for the subject named \code{varname} to be negative. If x is a matrix without row names defaults names of ``V1",``V2", ... , etc will be used. See Rivers (2003) for a thorough discussion of identification of IRT models.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of Gibbs iterations for the sampler.} \item{thin}{The thinning interval used in the simulation. The number of Gibbs iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 then every \code{verbose}th iteration will be printed to the screen.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{theta.start}{The starting values for the subject abilities (ideal points). This can either be a scalar or a column vector with dimension equal to the number of voters. If this takes a scalar value, then that value will serve as the starting value for all of the thetas. The default value of NA will choose the starting values based on an eigenvalue-eigenvector decomposition of the aggreement score matrix formed from the \code{datamatrix}.} \item{alpha.start}{The starting values for the \eqn{\alpha}{alpha} difficulty parameters. This can either be a scalar or a column vector with dimension equal to the number of items. If this takes a scalar value, then that value will serve as the starting value for all of the alphas. The default value of NA will set the starting values based on a series of probit regressions that condition on the starting values of theta.} \item{beta.start}{The starting values for the \eqn{\beta}{beta} discrimination parameters. This can either be a scalar or a column vector with dimension equal to the number of items. If this takes a scalar value, then that value will serve as the starting value for all of the betas. The default value of NA will set the starting values based on a series of probit regressions that condition on the starting values of theta.} \item{t0}{A scalar parameter giving the prior mean of the subject abilities (ideal points).} \item{T0}{A scalar parameter giving the prior precision (inverse variance) of the subject abilities (ideal points).} \item{ab0}{The prior mean of \code{(alpha, beta)}. Can be either a scalar or a 2-vector. If a scalar both means will be set to the passed value. The prior mean is assumed to be the same across all items.} \item{AB0}{The prior precision of \code{(alpha, beta)}.This can either be ascalar or a 2 by 2 matrix. If this takes a scalar value, then that value times an identity matrix serves as the prior precision. The prior precision is assumed to be the same across all items.} \item{store.item}{A switch that determines whether or not to store the item parameters for posterior analysis. \emph{NOTE: In situations with many items storing the item parameters takes an enormous amount of memory, so \code{store.item} should only be \code{FALSE} if the chain is thinned heavily, or for applications with a small number of items}. By default, the item parameters are not stored.} \item{store.ability}{A switch that determines whether or not to store the ability parameters for posterior analysis. \emph{NOTE: In situations with many individuals storing the ability parameters takes an enormous amount of memory, so \code{store.ability} should only be \code{TRUE} if the chain is thinned heavily, or for applications with a small number of individuals}. By default, the item parameters are stored.} \item{drop.constant.items}{A switch that determines whether or not items that have no variation should be deleted before fitting the model. Default = TRUE.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the sample from the posterior distribution. This object can be summarized by functions provided by the coda package. } \details{ \code{MCMCirt1d} simulates from the posterior distribution using standard Gibbs sampling using data augmentation (a Normal draw for the subject abilities, a multivariate Normal draw for the item parameters, and a truncated Normal draw for the latent utilities). The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The model takes the following form. We assume that each subject has an subject ability (ideal point) denoted \eqn{\theta_j}{theta_j} and that each item has a difficulty parameter \eqn{\alpha_i}{alpha_i} and discrimination parameter \eqn{\beta_i}{beta_i}. The observed choice by subject \eqn{j}{j} on item \eqn{i}{i} is the observed data matrix which is \eqn{(I \times J)}{(I * J)}. We assume that the choice is dictated by an unobserved utility: \deqn{z_{i,j} = -\alpha_i + \beta_i \theta_j + \varepsilon_{i,j}}{z_ij = -alpha_i + beta_i*theta_j + epsilon_ij} Where the errors are assumed to be distributed standard Normal. The parameters of interest are the subject abilities (ideal points) and the item parameters. We assume the following priors. For the subject abilities (ideal points): \deqn{\theta_j \sim \mathcal{N}(t_{0},T_{0}^{-1})}{theta_j ~ N(t0, T0^{-1})} For the item parameters, the prior is: \deqn{\left[\alpha_i, \beta_i \right]' \sim \mathcal{N}_2 (ab_{0},AB_{0}^{-1})}{[alpha_i beta_i]' ~ N_2 (ab0, AB0^{-1})} The model is identified by the proper priors on the item parameters and constraints placed on the ability parameters. As is the case with all measurement models, make sure that you have plenty of free memory, especially when storing the item parameters. } \references{ James H. Albert. 1992. ``Bayesian Estimation of Normal Ogive Item Response Curves Using Gibbs Sampling." \emph{Journal of Educational Statistics}. 17: 251-269. Joshua Clinton, Simon Jackman, and Douglas Rivers. 2004. ``The Statistical Analysis of Roll Call Data." \emph{American Political Science Review}. 98: 355-370. Valen E. Johnson and James H. Albert. 1999. ``Ordinal Data Modeling." Springer: New York. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. Douglas Rivers. 2004. ``Identification of Multidimensional Item-Response Models." Stanford University, typescript. } \examples{ \dontrun{ ## US Supreme Court Example with inequality constraints data(SupremeCourt) posterior1 <- MCMCirt1d(t(SupremeCourt), theta.constraints=list(Scalia="+", Ginsburg="-"), B0.alpha=.2, B0.beta=.2, burnin=500, mcmc=100000, thin=20, verbose=500, store.item=TRUE) geweke.diag(posterior1) plot(posterior1) summary(posterior1) ## US Senate Example with equality constraints data(Senate) Sen.rollcalls <- Senate[,6:677] posterior2 <- MCMCirt1d(Sen.rollcalls, theta.constraints=list(KENNEDY=-2, HELMS=2), burnin=2000, mcmc=100000, thin=20, verbose=500) geweke.diag(posterior2) plot(posterior2) summary(posterior2) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}},\code{\link[coda]{summary.mcmc}}, \code{\link[MCMCpack]{MCMCirtKd}}} MCMCpack/man/MCMCintervention.Rd0000644000176000001440000002541512133644110016110 0ustar ripleyusers\name{MCMCintervention} \alias{MCMCintervention} \title{Markov Chain Monte Carlo for a linear Gaussian Multiple Changepoint Model} \description{ This function generates a sample from the posterior distribution of a linear Gaussian model with multiple changepoints. The function uses the Markov chain Monte Carlo method of Chib (1998). The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{MCMCintervention(y, data=parent.frame(), m = 1, intervention, prediction.type=c("trend","ar"), change.type=c("fixed", "random", "all"), b0 = 0, B0 = 0, c0 = 0.001, d0 = 0.001, sigma.mu = NA, sigma.var = NA, a = NULL, b = NULL, mcmc = 1000, burnin = 1000, thin = 1, verbose = 0, seed = NA, beta.start = NA, P.start = NA, marginal.likelihood = c("none", "Chib95"), ...)} \arguments{ \item{y}{data.} \item{data}{Data frame.} \item{m}{The number of changepoints.} \item{intervention}{The timing of intervention measured by its place in the response vector. It should be larger than 0 and smaller than the length of the response vector. No default value is provided.} \item{prediction.type}{The type of local process. "trend" denotes the linear trend model and "ar" denotes AR(1) process. By default, MCMCintervention uses the linear trend model.} \item{change.type}{The tyep of parameteric breaks. "all" denotes that all parameters have breaks, "fixed" denotes that only the intercept and the slope have breaks, and "random" denotes that only the variance has breaks. By default, MCMCintervetnion assumes that all parameters have breaks.} \item{b0}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas.} \item{B0}{The prior precision of \eqn{\beta}{beta}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of beta. Default value of 0 is equivalent to an improper uniform prior for beta.} \item{c0}{\eqn{c_0/2}{c0/2} is the shape parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). The amount of information in the inverse Gamma prior is something like that from \eqn{c_0}{c0} pseudo-observations.} \item{d0}{\eqn{d_0/2}{d0/2} is the scale parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). In constructing the inverse Gamma prior, \eqn{d_0}{d0} acts like the sum of squared errors from the \eqn{c_0}{c0} pseudo-observations.} \item{sigma.mu}{The mean of the inverse Gamma prior on \eqn{\sigma^2}{sigma^2}. \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the inverse Gamma prior by choosing its mean and variance. } \item{sigma.var}{The variacne of the inverse Gamma prior on \eqn{\sigma^2}{sigma^2}. \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the inverse Gamma prior by choosing its mean and variance. } \item{a}{\eqn{a}{a} is the shape1 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{b}{\eqn{b}{b} is the shape2 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of MCMC iterations after burnin.} \item{thin}{The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number, the \eqn{\beta}{beta} vector, and the error variance are printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{beta.start}{The starting values for the \eqn{\beta}{beta} vector. This can either be a scalar or a column vector with dimension equal to the number of betas. The default value of of NA will use the MLE estimate of \eqn{\beta}{beta} as the starting value. If this is a scalar, that value will serve as the starting value mean for all of the betas.} \item{P.start}{The starting values for the transition matrix. A user should provide a square matrix with dimension equal to the number of states. By default, draws from the \code{Beta(0.9, 0.1)} are used to construct a proper transition matrix for each raw except the last raw.} \item{marginal.likelihood}{How should the marginal likelihood be calculated? Options are: \code{none} in which case the marginal likelihood will not be calculated, and \code{Chib95} in which case the method of Chib (1995) is used.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. The object contains an attribute \code{prob.state} storage matrix that contains the probability of \eqn{state_i}{state_i} for each period, the log-likelihood of the model (\code{loglike}), and the log-marginal likelihood of the model (\code{logmarglike}). } \details{ \code{MCMCintervention} simulates from the posterior distribution of a binary model with multiple changepoints. The model takes the following form: \deqn{y_t = x_t ' \beta_i + I(s_t = i)\varepsilon_{t},\;\; i = 1, \ldots, k}{y_t = x_t'beta_i + I(s_t = i)epsilon_t, i = 1,...,k.} Where \eqn{k}{k} is the number of states and \eqn{I(s_t = i)}{I(s_t = i)} is an indicator function that becomes 1 when a state at \eqn{t}{t} is \eqn{i}{i} and otherwise 0. The errors are assumed to be Gaussian in each regime: \deqn{I(s_t = i)\varepsilon_{t} \sim \mathcal{N}(0, \sigma^2_i)}{I(s_t = i)epsilon_t ~ N(0, sigma^2_i)} We assume standard, semi-conjugate priors: \deqn{\beta_i \sim \mathcal{N}(b_0,B_0^{-1}),\;\; i = 1, \ldots, k}{beta_i ~ N(b0,B0^(-1)), i = 1,...,k.} And: \deqn{\sigma^{-2}_i \sim \mathcal{G}amma(c_0/2, d_0/2),\;\; i = 1, \ldots, k}{sigma^(-2)_i ~ Gamma(c0/2, d0/2), i = 1,...,k.} Where \eqn{\beta_i}{beta_i} and \eqn{\sigma^{-2}_i}{sigma^(-2)_i} are assumed \emph{a priori} independent. The simulation proper is done in compiled C++ code to maximize efficiency. } \references{ Jong Hee Park. 2012. "A Change-point Approach to Intervention Analysis Using Bayesian Inference" Presented at the 2012 Annual Meeting of Korean Statistical Society. Siddhartha Chib. 1998. "Estimation and comparison of multiple change-point models." \emph{Journal of Econometrics}. 86: 221-241. } \examples{ \dontrun{ Nile.std <- (Nile - mean(Nile))/sd(Nile) set.seed(1973) b0 <- matrix(c(0, 0), 2, 1); B0 <- diag(rep(0.25, 2)) c0 = 2; d0 = 1 ## Model Comparison ar0 <- MCMCintervention(Nile.std, m=0, prediction.type="ar", change.type = "all", b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500, intervention = 29, marginal.likelihood = "Chib95") ar1all <- MCMCintervention(Nile.std, m=1, prediction.type="ar", change.type = "all", b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500, intervention = 29, marginal.likelihood = "Chib95") ar1fixed <- MCMCintervention(Nile.std, m=1, prediction.type="ar", change.type = "fixed", b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500, intervention = 29, marginal.likelihood = "Chib95") ar1random <- MCMCintervention(Nile.std, m=1, prediction.type="ar", change.type = "random", b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500, intervention = 29, marginal.likelihood = "Chib95") tr0 <- MCMCintervention(Nile.std, m=0, prediction.type="trend", change.type = "all", b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500, intervention = 29, marginal.likelihood = "Chib95") tr1all <- MCMCintervention(Nile.std, m=1, prediction.type="trend", change.type = "all", b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500, intervention = 29, marginal.likelihood = "Chib95") tr1fixed <- MCMCintervention(Nile.std, m=1, prediction.type="trend", change.type = "fixed", b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500, intervention = 29, marginal.likelihood = "Chib95") tr1random <- MCMCintervention(Nile.std, m=1, prediction.type="trend", change.type = "random", b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500, intervention = 29, marginal.likelihood = "Chib95") BayesFactor(ar0, ar1all, ar1fixed, ar1random, tr0, tr1all, tr1fixed, tr1random) par(mfrow=c(1,3)) plotState(ar1fixed, start=1871, main="Hidden Regime Change") plotIntervention(ar1fixed, start=1871, main="Forward Analysis", alpha= 0.5, ylab="Nile River flow", xlab="Year") plotIntervention(ar1fixed, forward=FALSE, start=1871, main="Backward Analysis", alpha= 0.5, ylab="Nile River flow", xlab="Year") } } \keyword{models} \seealso{\code{\link{plotIntervention}}} MCMCpack/man/MCMChregress.Rd0000644000176000001440000002175412133644110015210 0ustar ripleyusers\name{MCMChregress} \alias{MCMChregress} \title{Markov Chain Monte Carlo for the Hierarchical Gaussian Linear Regression Model} \description{MCMChregress generates a sample from the posterior distribution of a Hierarchical Gaussian Linear Regression Model using Algorithm 2 of Chib and Carlin (1999). This model uses a multivariate Normal prior for the fixed effects parameters, an Inverse-Wishart prior on the random effects variance matrix, and an Inverse-Gamma prior on the residual error variance. The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package.} \usage{MCMChregress(fixed, random, group, data, burnin=1000, mcmc=10000, thin=10, verbose=1, seed=NA, beta.start=NA, sigma2.start=NA, Vb.start=NA, mubeta=0, Vbeta=1.0E6, r, R, nu=0.001, delta=0.001, ...)} \arguments{ \item{fixed}{A two-sided linear formula of the form 'y~x1+...+xp' describing the fixed-effects part of the model, with the response on the left of a '~' operator and the p fixed terms, separated by '+' operators, on the right.} \item{random}{A one-sided formula of the form '~x1+...+xq' specifying the model for the random effects part of the model, with the q random terms, separated by '+' operators.} \item{group}{String indicating the name of the grouping variable in \code{data}, defining the hierarchical structure of the model.} \item{data}{A data frame containing the variables in the model.} \item{burnin}{The number of burnin iterations for the sampler.} \item{mcmc}{The number of Gibbs iterations for the sampler. Total number of Gibbs iterations is equal to \code{burnin+mcmc}. \code{burnin+mcmc} must be divisible by 10 and superior or equal to 100 so that the progress bar can be displayed.} \item{thin}{The thinning interval used in the simulation. The number of mcmc iterations must be divisible by this value.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister.} \item{verbose}{A switch (0,1) which determines whether or not the progress of the sampler is printed to the screen. Default is 1: a progress bar is printed, indicating the step (in \%) reached by the Gibbs sampler.} \item{beta.start}{The starting values for the \eqn{\beta}{beta} vector. This can either be a scalar or a p-length vector. The default value of NA will use the OLS \eqn{\beta}{beta} estimate of the corresponding Gaussian Linear Regression without random effects. If this is a scalar, that value will serve as the starting value mean for all of the betas.} \item{sigma2.start}{Scalar for the starting value of the residual error variance. The default value of NA will use the OLS estimates of the corresponding Gaussian Linear Regression without random effects.} \item{Vb.start}{The starting value for variance matrix of the random effects. This must be a square q-dimension matrix. Default value of NA uses an identity matrix.} \item{mubeta}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a p-length vector. If this takes a scalar value, then that value will serve as the prior mean for all of the betas. The default value of 0 will use a vector of zeros for an uninformative prior.} \item{Vbeta}{The prior variance of \eqn{\beta}{beta}. This can either be a scalar or a square p-dimension matrix. If this takes a scalar value, then that value times an identity matrix serves as the prior variance of beta. Default value of 1.0E6 will use a diagonal matrix with very large variance for an uninformative flat prior.} \item{r}{The shape parameter for the Inverse-Wishart prior on variance matrix for the random effects. r must be superior or equal to q. Set r=q for an uninformative prior. See the NOTE for more details} \item{R}{The scale matrix for the Inverse-Wishart prior on variance matrix for the random effects. This must be a square q-dimension matrix. Use plausible variance regarding random effects for the diagonal of R. See the NOTE for more details} \item{nu}{The shape parameter for the Inverse-Gamma prior on the residual error variance. Default value is \code{nu=delta=0.001} for uninformative prior.} \item{delta}{The rate (1/scale) parameter for the Inverse-Gamma prior on the residual error variance. Default value is \code{nu=delta=0.001} for uninformative prior.} \item{...}{further arguments to be passed} } \details{ \code{MCMChregress} simulates from the posterior distribution sample using the blocked Gibbs sampler of Chib and Carlin (1999), Algorithm 2. The simulation is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The model takes the following form: \deqn{y_i = X_i \beta + W_i b_i + \varepsilon_i}{y_i = X_i * beta + W_i * b_i + epsilon_i} Where each group \eqn{i} have \eqn{k_i} observations. Where the random effects: \deqn{b_i \sim \mathcal{N}_q(0,V_b)}{b_i ~ N_q(0,Vb)} And the errors: \deqn{\varepsilon_i \sim \mathcal{N}(0, \sigma^2 I_{k_i})}{epsilon_i ~ N(0, sigma^2 I_{k_i})} We assume standard, conjugate priors: \deqn{\beta \sim \mathcal{N}_p(\mu_{\beta},V_{\beta})}{beta ~ N_p(mubeta,Vbeta)} And: \deqn{\sigma^{2} \sim \mathcal{IG}amma(\nu, 1/\delta)}{sigma^2 ~ IGamma(nu, 1/delta)} And: \deqn{V_b \sim \mathcal{IW}ishart(r, rR)}{Vb ~ IWishart(r, rR)} See Chib and Carlin (1999) for more details. \emph{NOTE:} We do not provide default parameters for the priors on the precision matrix for the random effects. When fitting one of these models, it is of utmost importance to choose a prior that reflects your prior beliefs about the random effects. Using the \code{dwish} and \code{rwish} functions might be useful in choosing these values. } \value{ \item{mcmc}{An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. The posterior sample of the deviance \eqn{D}{D}, with \eqn{D=-2\log(\prod_i P(y_i|\beta,b_i,\sigma^2))}{% D=-2log(prod_i P(y_i|beta,b_i,sigma^2))}, is also provided.} \item{Y.pred}{Predictive posterior mean for each observation.} } \references{ Siddhartha Chib and Bradley P. Carlin. 1999. ``On MCMC Sampling in Hierarchical Longitudinal Models.'' \emph{Statistics and Computing.} 9: 17-26. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Andrew D. Martin and Kyle L. Saunders. 2002. ``Bayesian Inference for Political Science Panel Data.'' Paper presented at the 2002 Annual Meeting of the American Political Science Association. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. } \author{ Ghislain Vieilledent } \seealso{ \code{\link[coda]{plot.mcmc}}, \code{\link[coda]{summary.mcmc}} } \examples{ \dontrun{ #======================================== # Hierarchical Gaussian Linear Regression #======================================== #== Generating data # Constants nobs <- 1000 nspecies <- 20 species <- c(1:nspecies,sample(c(1:nspecies),(nobs-nspecies),replace=TRUE)) # Covariates X1 <- runif(n=nobs,min=0,max=10) X2 <- runif(n=nobs,min=0,max=10) X <- cbind(rep(1,nobs),X1,X2) W <- X # Target parameters # beta beta.target <- matrix(c(0.1,0.3,0.2),ncol=1) # Vb Vb.target <- c(0.5,0.2,0.1) # b b.target <- cbind(rnorm(nspecies,mean=0,sd=sqrt(Vb.target[1])), rnorm(nspecies,mean=0,sd=sqrt(Vb.target[2])), rnorm(nspecies,mean=0,sd=sqrt(Vb.target[3]))) # sigma2 sigma2.target <- 0.02 # Response Y <- vector() for (n in 1:nobs) { Y[n] <- rnorm(n=1, mean=X[n,]\%*\%beta.target+W[n,]\%*\%b.target[species[n],], sd=sqrt(sigma2.target)) } # Data-set Data <- as.data.frame(cbind(Y,X1,X2,species)) plot(Data$X1,Data$Y) #== Call to MCMChregress model <- MCMChregress(fixed=Y~X1+X2, random=~X1+X2, group="species", data=Data, burnin=1000, mcmc=1000, thin=1,verbose=1, seed=NA, beta.start=0, sigma2.start=1, Vb.start=1, mubeta=0, Vbeta=1.0E6, r=3, R=diag(c(1,0.1,0.1)), nu=0.001, delta=0.001) #== MCMC analysis # Graphics pdf("Posteriors-MCMChregress.pdf") plot(model$mcmc) dev.off() # Summary summary(model$mcmc) # Predictive posterior mean for each observation model$Y.pred # Predicted-Observed plot(Data$Y,model$Y.pred) abline(a=0,b=1) } } \keyword{models} \keyword{hierarchical models} \keyword{mixed models} \keyword{Gaussian} \keyword{MCMC} \keyword{bayesian} MCMCpack/man/MCMChpoisson.Rd0000644000176000001440000002425112133644110015223 0ustar ripleyusers\name{MCMChpoisson} \alias{MCMChpoisson} \title{Markov Chain Monte Carlo for the Hierarchical Poisson Linear Regression Model using the log link function} \description{MCMChpoisson generates a sample from the posterior distribution of a Hierarchical Poisson Linear Regression Model using the log link function and Algorithm 2 of Chib and Carlin (1999). This model uses a multivariate Normal prior for the fixed effects parameters, an Inverse-Wishart prior on the random effects variance matrix, and an Inverse-Gamma prior on the variance modelling over-dispersion. The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package.} \usage{MCMChpoisson(fixed, random, group, data, burnin=5000, mcmc=10000, thin=10, verbose=1, seed=NA, beta.start=NA, sigma2.start=NA, Vb.start=NA, mubeta=0, Vbeta=1.0E6, r, R, nu=0.001, delta=0.001, FixOD=0, ...)} \arguments{ \item{fixed}{A two-sided linear formula of the form 'y~x1+...+xp' describing the fixed-effects part of the model, with the response on the left of a '~' operator and the p fixed terms, separated by '+' operators, on the right. Response variable y must be 0 or 1 (Binomial process).} \item{random}{A one-sided formula of the form '~x1+...+xq' specifying the model for the random effects part of the model, with the q random terms, separated by '+' operators.} \item{group}{String indicating the name of the grouping variable in \code{data}, defining the hierarchical structure of the model.} \item{data}{A data frame containing the variables in the model.} \item{burnin}{The number of burnin iterations for the sampler.} \item{mcmc}{The number of Gibbs iterations for the sampler. Total number of Gibbs iterations is equal to \code{burnin+mcmc}. \code{burnin+mcmc} must be divisible by 10 and superior or equal to 100 so that the progress bar can be displayed.} \item{thin}{The thinning interval used in the simulation. The number of mcmc iterations must be divisible by this value.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister.} \item{verbose}{A switch (0,1) which determines whether or not the progress of the sampler is printed to the screen. Default is 1: a progress bar is printed, indicating the step (in \%) reached by the Gibbs sampler.} \item{beta.start}{The starting values for the \eqn{\beta}{beta} vector. This can either be a scalar or a p-length vector. The default value of NA will use the OLS \eqn{\beta}{beta} estimate of the corresponding Gaussian Linear Regression without random effects. If this is a scalar, that value will serve as the starting value mean for all of the betas.} \item{sigma2.start}{Scalar for the starting value of the residual error variance. The default value of NA will use the OLS estimates of the corresponding Gaussian Linear Regression without random effects.} \item{Vb.start}{The starting value for variance matrix of the random effects. This must be a square q-dimension matrix. Default value of NA uses an identity matrix.} \item{mubeta}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a p-length vector. If this takes a scalar value, then that value will serve as the prior mean for all of the betas. The default value of 0 will use a vector of zeros for an uninformative prior.} \item{Vbeta}{The prior variance of \eqn{\beta}{beta}. This can either be a scalar or a square p-dimension matrix. If this takes a scalar value, then that value times an identity matrix serves as the prior variance of beta. Default value of 1.0E6 will use a diagonal matrix with very large variance for an uninformative flat prior.} \item{r}{The shape parameter for the Inverse-Wishart prior on variance matrix for the random effects. r must be superior or equal to q. Set r=q for an uninformative prior. See the NOTE for more details} \item{R}{The scale matrix for the Inverse-Wishart prior on variance matrix for the random effects. This must be a square q-dimension matrix. Use plausible variance regarding random effects for the diagonal of R. See the NOTE for more details} \item{nu}{The shape parameter for the Inverse-Gamma prior on the residual error variance. Default value is \code{nu=delta=0.001} for uninformative prior.} \item{delta}{The rate (1/scale) parameter for the Inverse-Gamma prior on the residual error variance. Default value is \code{nu=delta=0.001} for uninformative prior.} \item{FixOD}{A switch (0,1) which determines whether or not the variance for over-dispersion (sigma2) should be fixed (1) or not (0). Default is 0, parameter sigma2 is estimated. If FixOD=1, sigma2 is fixed to the value provided for \code{sigma2.start}.} \item{...}{further arguments to be passed} } \details{ \code{MCMChpoisson} simulates from the posterior distribution sample using the blocked Gibbs sampler of Chib and Carlin (1999), Algorithm 2. The simulation is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The model takes the following form: \deqn{y_i \sim \mathcal{P}oisson(\lambda_i)}{y_i ~ Poisson(lambda_i)} With latent variables \eqn{\phi(\lambda_i)}{phi(lambda)}, \eqn{\phi}{phi} being the log link function: \deqn{\phi(\lambda_i) = X_i \beta + W_i b_i + \varepsilon_i}{phi(lambda_i) = X_i * beta + W_i * b_i + epsilon_i} Where each group \eqn{i} have \eqn{k_i} observations. Where the random effects: \deqn{b_i \sim \mathcal{N}_q(0,V_b)}{b_i ~ N_q(0,Vb)} And the over-dispersion terms: \deqn{\varepsilon_i \sim \mathcal{N}(0, \sigma^2 I_{k_i})}{epsilon_i ~ N(0, sigma^2 I_{k_i})} We assume standard, conjugate priors: \deqn{\beta \sim \mathcal{N}_p(\mu_{\beta},V_{\beta})}{beta ~ N_p(mubeta,Vbeta)} And: \deqn{\sigma^{2} \sim \mathcal{IG}amma(\nu, 1/\delta)}{sigma^2 ~ IGamma(nu, 1/delta)} And: \deqn{V_b \sim \mathcal{IW}ishart(r, rR)}{Vb ~ IWishart(r, rR)} See Chib and Carlin (1999) for more details. \emph{NOTE:} We do not provide default parameters for the priors on the precision matrix for the random effects. When fitting one of these models, it is of utmost importance to choose a prior that reflects your prior beliefs about the random effects. Using the \code{dwish} and \code{rwish} functions might be useful in choosing these values. } \value{ \item{mcmc}{An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. The posterior sample of the deviance \eqn{D}{D}, with \eqn{D=-2\log(\prod_i P(y_i|\lambda_i))}{% D=-2log(prod_i P(y_i|lambda_i))}, is also provided.} \item{lambda.pred}{Predictive posterior mean for the exponential of the latent variables. The approximation of Diggle et al. (2004) is used to marginalized with respect to over-dispersion terms: \deqn{E[\lambda_i|\beta,b_i,\sigma^2]=\phi^{-1}((X_i \beta+W_i b_i)+0.5 \sigma^2)}{% E[lambda_i|beta,b_i,sigma^2]=phi^(-1)((X_i*beta+W_i*b_i)+0.5*sigma^2)} } } \references{ Siddhartha Chib and Bradley P. Carlin. 1999. ``On MCMC Sampling in Hierarchical Longitudinal Models.'' \emph{Statistics and Computing.} 9: 17-26. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Andrew D. Martin and Kyle L. Saunders. 2002. ``Bayesian Inference for Political Science Panel Data.'' Paper presented at the 2002 Annual Meeting of the American Political Science Association. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. } \author{ Ghislain Vieilledent } \seealso{ \code{\link[coda]{plot.mcmc}}, \code{\link[coda]{summary.mcmc}} } \examples{ \dontrun{ #======================================== # Hierarchical Poisson Linear Regression #======================================== #== Generating data # Constants nobs <- 1000 nspecies <- 20 species <- c(1:nspecies,sample(c(1:nspecies),(nobs-nspecies),replace=TRUE)) # Covariates X1 <- runif(n=nobs,min=-1,max=1) X2 <- runif(n=nobs,min=-1,max=1) X <- cbind(rep(1,nobs),X1,X2) W <- X # Target parameters # beta beta.target <- matrix(c(0.1,0.1,0.1),ncol=1) # Vb Vb.target <- c(0.05,0.05,0.05) # b b.target <- cbind(rnorm(nspecies,mean=0,sd=sqrt(Vb.target[1])), rnorm(nspecies,mean=0,sd=sqrt(Vb.target[2])), rnorm(nspecies,mean=0,sd=sqrt(Vb.target[3]))) # Response lambda <- vector() Y <- vector() for (n in 1:nobs) { lambda[n] <- exp(X[n,]\%*\%beta.target+W[n,]\%*\%b.target[species[n],]) Y[n] <- rpois(1,lambda[n]) } # Data-set Data <- as.data.frame(cbind(Y,lambda,X1,X2,species)) plot(Data$X1,Data$lambda) #== Call to MCMChpoisson model <- MCMChpoisson(fixed=Y~X1+X2, random=~X1+X2, group="species", data=Data, burnin=5000, mcmc=1000, thin=1,verbose=1, seed=NA, beta.start=0, sigma2.start=1, Vb.start=1, mubeta=0, Vbeta=1.0E6, r=3, R=diag(c(0.1,0.1,0.1)), nu=0.001, delta=0.001, FixOD=1) #== MCMC analysis # Graphics pdf("Posteriors-MCMChpoisson.pdf") plot(model$mcmc) dev.off() # Summary summary(model$mcmc) # Predictive posterior mean for each observation model$lambda.pred # Predicted-Observed plot(Data$lambda,model$lambda.pred) abline(a=0,b=1) ## #Not run ## #You can also compare with lme4 results ## #== lme4 resolution ## library(lme4) ## model.lme4 <- lmer(Y~X1+X2+(1+X1+X2|species),data=Data,family="poisson") ## summary(model.lme4) ## plot(fitted(model.lme4),model$lambda.pred,main="MCMChpoisson/lme4") ## abline(a=0,b=1) } } \keyword{models} \keyword{hierarchical models} \keyword{mixed models} \keyword{glmm} \keyword{Poisson} \keyword{MCMC} \keyword{bayesian} MCMCpack/man/MCMChlogit.Rd0000644000176000001440000002510112133644110014642 0ustar ripleyusers\name{MCMChlogit} \alias{MCMChlogit} \title{Markov Chain Monte Carlo for the Hierarchical Binomial Linear Regression Model using the logit link function} \description{MCMChlogit generates a sample from the posterior distribution of a Hierarchical Binomial Linear Regression Model using the logit link function and Algorithm 2 of Chib and Carlin (1999). This model uses a multivariate Normal prior for the fixed effects parameters, an Inverse-Wishart prior on the random effects variance matrix, and an Inverse-Gamma prior on the variance modelling over-dispersion. The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package.} \usage{MCMChlogit(fixed, random, group, data, burnin=5000, mcmc=10000, thin=10, verbose=1, seed=NA, beta.start=NA, sigma2.start=NA, Vb.start=NA, mubeta=0, Vbeta=1.0E6, r, R, nu=0.001, delta=0.001, FixOD=0, ...)} \arguments{ \item{fixed}{A two-sided linear formula of the form 'y~x1+...+xp' describing the fixed-effects part of the model, with the response on the left of a '~' operator and the p fixed terms, separated by '+' operators, on the right. Response variable y must be 0 or 1 (Binomial process).} \item{random}{A one-sided formula of the form '~x1+...+xq' specifying the model for the random effects part of the model, with the q random terms, separated by '+' operators.} \item{group}{String indicating the name of the grouping variable in \code{data}, defining the hierarchical structure of the model.} \item{data}{A data frame containing the variables in the model.} \item{burnin}{The number of burnin iterations for the sampler.} \item{mcmc}{The number of Gibbs iterations for the sampler. Total number of Gibbs iterations is equal to \code{burnin+mcmc}. \code{burnin+mcmc} must be divisible by 10 and superior or equal to 100 so that the progress bar can be displayed.} \item{thin}{The thinning interval used in the simulation. The number of mcmc iterations must be divisible by this value.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister.} \item{verbose}{A switch (0,1) which determines whether or not the progress of the sampler is printed to the screen. Default is 1: a progress bar is printed, indicating the step (in \%) reached by the Gibbs sampler.} \item{beta.start}{The starting values for the \eqn{\beta}{beta} vector. This can either be a scalar or a p-length vector. The default value of NA will use the OLS \eqn{\beta}{beta} estimate of the corresponding Gaussian Linear Regression without random effects. If this is a scalar, that value will serve as the starting value mean for all of the betas.} \item{sigma2.start}{Scalar for the starting value of the residual error variance. The default value of NA will use the OLS estimates of the corresponding Gaussian Linear Regression without random effects.} \item{Vb.start}{The starting value for variance matrix of the random effects. This must be a square q-dimension matrix. Default value of NA uses an identity matrix.} \item{mubeta}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a p-length vector. If this takes a scalar value, then that value will serve as the prior mean for all of the betas. The default value of 0 will use a vector of zeros for an uninformative prior.} \item{Vbeta}{The prior variance of \eqn{\beta}{beta}. This can either be a scalar or a square p-dimension matrix. If this takes a scalar value, then that value times an identity matrix serves as the prior variance of beta. Default value of 1.0E6 will use a diagonal matrix with very large variance for an uninformative flat prior.} \item{r}{The shape parameter for the Inverse-Wishart prior on variance matrix for the random effects. r must be superior or equal to q. Set r=q for an uninformative prior. See the NOTE for more details} \item{R}{The scale matrix for the Inverse-Wishart prior on variance matrix for the random effects. This must be a square q-dimension matrix. Use plausible variance regarding random effects for the diagonal of R. See the NOTE for more details} \item{nu}{The shape parameter for the Inverse-Gamma prior on the residual error variance. Default value is \code{nu=delta=0.001} for uninformative prior.} \item{delta}{The rate (1/scale) parameter for the Inverse-Gamma prior on the residual error variance. Default value is \code{nu=delta=0.001} for uninformative prior.} \item{FixOD}{A switch (0,1) which determines whether or not the variance for over-dispersion (sigma2) should be fixed (1) or not (0). Default is 0, parameter sigma2 is estimated. If FixOD=1, sigma2 is fixed to the value provided for \code{sigma2.start}.} \item{...}{further arguments to be passed} } \details{ \code{MCMChlogit} simulates from the posterior distribution sample using the blocked Gibbs sampler of Chib and Carlin (1999), Algorithm 2. The simulation is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The model takes the following form: \deqn{y_i \sim \mathcal{B}ernoulli(\theta_i)}{y_i ~ Bernoulli(theta_i)} With latent variables \eqn{\phi(\theta_i)}{phi(theta)}, \eqn{\phi}{phi} being the logit link function: \deqn{\phi(\theta_i) = X_i \beta + W_i b_i + \varepsilon_i}{phi(theta_i) = X_i * beta + W_i * b_i + epsilon_i} Where each group \eqn{i} have \eqn{k_i} observations. Where the random effects: \deqn{b_i \sim \mathcal{N}_q(0,V_b)}{b_i ~ N_q(0,Vb)} And the over-dispersion terms: \deqn{\varepsilon_i \sim \mathcal{N}(0, \sigma^2 I_{k_i})}{epsilon_i ~ N(0, sigma^2 I_{k_i})} We assume standard, conjugate priors: \deqn{\beta \sim \mathcal{N}_p(\mu_{\beta},V_{\beta})}{beta ~ N_p(mubeta,Vbeta)} And: \deqn{\sigma^{2} \sim \mathcal{IG}amma(\nu, 1/\delta)}{sigma^2 ~ IGamma(nu, 1/delta)} And: \deqn{V_b \sim \mathcal{IW}ishart(r, rR)}{Vb ~ IWishart(r, rR)} See Chib and Carlin (1999) for more details. \emph{NOTE:} We do not provide default parameters for the priors on the precision matrix for the random effects. When fitting one of these models, it is of utmost importance to choose a prior that reflects your prior beliefs about the random effects. Using the \code{dwish} and \code{rwish} functions might be useful in choosing these values. } \value{ \item{mcmc}{An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. The posterior sample of the deviance \eqn{D}{D}, with \eqn{D=-2\log(\prod_i P(y_i|\theta_i))}{% D=-2log(prod_i P(y_i|theta_i))}, is also provided.} \item{theta.pred}{Predictive posterior mean for the inverse-logit of the latent variables. The approximation of Diggle et al. (2004) is used to marginalized with respect to over-dispersion terms: \deqn{E[\theta_i|\beta,b_i,\sigma^2]=\phi^{-1}((X_i \beta+W_i b_i)/\sqrt{(16\sqrt{3}/15\pi)^2\sigma^2+1})}{% E[theta_i|beta,b_i,sigma^2]=phi^(-1)((X_i*beta+W_i*b_i) /% sqrt((16*sqrt(3)/15*pi)^2*sigma^2+1))} } } \references{ Siddhartha Chib and Bradley P. Carlin. 1999. ``On MCMC Sampling in Hierarchical Longitudinal Models.'' \emph{Statistics and Computing.} 9: 17-26. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Andrew D. Martin and Kyle L. Saunders. 2002. ``Bayesian Inference for Political Science Panel Data.'' Paper presented at the 2002 Annual Meeting of the American Political Science Association. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. Diggle P., Heagerty P., Liang K., and Zeger S. 2004. ``Analysis of Longitudinal Data.'' \emph{Oxford University Press}, 2sd Edition. } \author{ Ghislain Vieilledent } \seealso{ \code{\link[coda]{plot.mcmc}}, \code{\link[coda]{summary.mcmc}} } \examples{ \dontrun{ #======================================== # Hierarchical Binomial Linear Regression #======================================== #== inv.logit function inv.logit <- function(x, min=0, max=1) { p <- exp(x)/(1+exp(x)) p <- ifelse( is.na(p) & !is.na(x), 1, p ) # fix problems with +Inf return(p*(max-min)+min) } #== Generating data # Constants nobs <- 1000 nspecies <- 20 species <- c(1:nspecies,sample(c(1:nspecies),(nobs-nspecies),replace=TRUE)) # Covariates X1 <- runif(n=nobs,min=-10,max=10) X2 <- runif(n=nobs,min=-10,max=10) X <- cbind(rep(1,nobs),X1,X2) W <- X # Target parameters # beta beta.target <- matrix(c(0.3,0.2,0.1),ncol=1) # Vb Vb.target <- c(0.5,0.05,0.05) # b b.target <- cbind(rnorm(nspecies,mean=0,sd=sqrt(Vb.target[1])), rnorm(nspecies,mean=0,sd=sqrt(Vb.target[2])), rnorm(nspecies,mean=0,sd=sqrt(Vb.target[3]))) # Response theta <- vector() Y <- vector() for (n in 1:nobs) { theta[n] <- inv.logit(X[n,]\%*\%beta.target+W[n,]\%*\%b.target[species[n],]) Y[n] <- rbinom(n=1,size=1,prob=theta[n]) } # Data-set Data <- as.data.frame(cbind(Y,theta,X1,X2,species)) plot(Data$X1,Data$theta) #== Call to MCMChlogit model <- MCMChlogit(fixed=Y~X1+X2, random=~X1+X2, group="species", data=Data, burnin=5000, mcmc=1000, thin=1,verbose=1, seed=NA, beta.start=0, sigma2.start=1, Vb.start=1, mubeta=0, Vbeta=1.0E6, r=3, R=diag(c(1,0.1,0.1)), nu=0.001, delta=0.001, FixOD=1) #== MCMC analysis # Graphics pdf("Posteriors-MCMChlogit.pdf") plot(model$mcmc) dev.off() # Summary summary(model$mcmc) # Predictive posterior mean for each observation model$theta.pred # Predicted-Observed plot(Data$theta,model$theta.pred) abline(a=0,b=1) ## #Not run ## #You can also compare with lme4 results ## #== lme4 resolution ## library(lme4) ## model.lme4 <- lmer(Y~X1+X2+(1+X1+X2|species),data=Data,family="binomial") ## summary(model.lme4) ## plot(fitted(model.lme4),model$theta.pred,main="MCMChlogit/lme4") ## abline(a=0,b=1) } } \keyword{models} \keyword{hierarchical models} \keyword{mixed models} \keyword{glmm} \keyword{logit} \keyword{Binomial} \keyword{MCMC} \keyword{bayesian} MCMCpack/man/MCMChierEI.Rd0000644000176000001440000001624612133644110014533 0ustar ripleyusers\name{MCMChierEI} \alias{MCMChierEI} \title{Markov Chain Monte Carlo for Wakefield's Hierarchial Ecological Inference Model} \description{ `MCMChierEI' is used to fit Wakefield's hierarchical ecological inference model for partially observed 2 x 2 contingency tables. } \usage{ MCMChierEI(r0, r1, c0, c1, burnin=5000, mcmc=50000, thin=1, verbose=0, seed=NA, m0=0, M0=2.287656, m1=0, M1=2.287656, a0=0.825, b0=0.0105, a1=0.825, b1=0.0105, ...) } \arguments{ \item{r0}{\eqn{(ntables \times 1)}{(ntables * 1)} vector of row sums from row 0.} \item{r1}{\eqn{(ntables \times 1)}{(ntables * 1)} vector of row sums from row 1.} \item{c0}{\eqn{(ntables \times 1)}{(ntables * 1)} vector of column sums from column 0.} \item{c1}{\eqn{(ntables \times 1)}{(ntables * 1)} vector of column sums from column 1.} \item{burnin}{The number of burn-in scans for the sampler.} \item{mcmc}{The number of mcmc scans to be saved.} \item{thin}{The thinning interval used in the simulation. The number of mcmc iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 then every \code{verbose}th iteration will be printed to the screen. } \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{m0}{Prior mean of the \eqn{\mu_0}{mu0} parameter.} \item{M0}{Prior variance of the \eqn{\mu_0}{mu0} parameter.} \item{m1}{Prior mean of the \eqn{\mu_1}{mu1} parameter.} \item{M1}{Prior variance of the \eqn{\mu_1}{mu1} parameter.} \item{a0}{\code{a0/2} is the shape parameter for the inverse-gamma prior on the \eqn{\sigma^2_0}{sigma^2_0} parameter.} \item{b0}{\code{b0/2} is the scale parameter for the inverse-gamma prior on the \eqn{\sigma^2_0}{sigma^2_0} parameter.} \item{a1}{\code{a1/2} is the shape parameter for the inverse-gamma prior on the \eqn{\sigma^2_1}{sigma^2_1} parameter.} \item{b1}{\code{b1/2} is the scale parameter for the inverse-gamma prior on the \eqn{\sigma^2_1}{sigma^2_1} parameter.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the sample from the posterior distribution. This object can be summarized by functions provided by the coda package. } \details{ Consider the following partially observed 2 by 2 contingency table for unit \eqn{t} where \eqn{t=1,\ldots,ntables}:\cr \cr \tabular{llll}{ \tab | \eqn{Y=0} \tab | \eqn{Y=1} \tab | \cr - - - - - \tab - - - - - \tab - - - - - \tab - - - - - \cr \eqn{X=0} \tab | \eqn{Y_{0t}}{Y0[t]} \tab | \tab |\eqn{r_{0t}}{r0[t]}\cr - - - - - \tab - - - - - \tab - - - - - \tab - - - - - \cr \eqn{X=1} \tab | \eqn{Y_{1t}}{Y1[t]} \tab | \tab | \eqn{r_{1t}}{r1[t]}\cr - - - - - \tab - - - - - \tab - - - - - \tab - - - - - \cr \tab | \eqn{c_{0t}}{c0[t]} \tab | \eqn{c_{1t}}{c1[t]} \tab | \eqn{N_t}{N[t]}\cr } Where \eqn{r_{0t}}{r0[t]}, \eqn{r_{1t}}{r1[t]}, \eqn{c_{0t}}{c0[t]}, \eqn{c_{1t}}{c1[t]}, and \eqn{N_t}{N[t]} are non-negative integers that are observed. The interior cell entries are not observed. It is assumed that \eqn{Y_{0t}|r_{0t} \sim \mathcal{B}inomial(r_{0t}, p_{0t})}{Y0[t]|r0[t] ~ Binomial(r0[t], p0[t])} and \eqn{Y_{1t}|r_{1t} \sim \mathcal{B}inomial(r_{1t}, p_{1t})}{Y1[t]|r1[t] ~ Binomial(r1[t],p1[t])}. Let \eqn{\theta_{0t} = log(p_{0t}/(1-p_{0t}))}{theta0[t] = log(p0[t]/(1-p0[t]))}, and \eqn{\theta_{1t} = log(p_{1t}/(1-p_{1t}))}{theta1[t] = log(p1[t]/(1-p1[t]))}. The following prior distributions are assumed: \eqn{\theta_{0t} \sim \mathcal{N}(\mu_0, \sigma^2_0)}{\theta0[t] ~ Normal(mu0, sigma^2_0)}, \eqn{\theta_{1t} \sim \mathcal{N}(\mu_1, \sigma^2_1)}{\theta1[t] ~ Normal(mu1, sigma^2_1)}. \eqn{\theta_{0t}}{theta0[t]} is assumed to be a priori independent of \eqn{\theta_{1t}}{theta1[t]} for all t. In addition, we assume the following hyperpriors: \eqn{\mu_0 \sim \mathcal{N}(m_0, M_0)}{mu0 ~ Normal(m0, M0)}, \eqn{\mu_1 \sim \mathcal{N}(m_1, M_1)}{mu1 ~ Normal(m1, M1)}, \eqn{\sigma^2_0 \sim \mathcal{IG}(a_0/2, b_0/2)}{\sigma^2_0 ~ InvGamma(a0/2, b0/2)}, and \eqn{\sigma^2_1 \sim \mathcal{IG}(a_1/2, b_1/2)}{\sigma^2_1 ~ InvGamma(a1/2, b1/2)}. The default priors have been chosen to make the implied prior distribution for \eqn{p_{0}}{p0} and \eqn{p_{1}}{p1} \emph{approximately} uniform on (0,1). Inference centers on \eqn{p_0}{p0}, \eqn{p_1}{p1}, \eqn{\mu_0}{mu0}, \eqn{\mu_1}{mu1}, \eqn{\sigma^2_0}{sigma^2_0}, and \eqn{\sigma^2_1}{sigma^2_1}. Univariate slice sampling (Neal, 2003) along with Gibbs sampling is used to sample from the posterior distribution. See Section 5.4 of Wakefield (2003) for discussion of the priors used here. \code{MCMChierEI} departs from the Wakefield model in that the \code{mu0} and \code{mu1} are here assumed to be drawn from independent normal distributions whereas Wakefield assumes they are drawn from logistic distributions. } \references{ Jonathan C. Wakefield. 2004. ``Ecological Inference for 2 x 2 Tables.'' \emph{Journal of the Royal Statistical Society, Series A}. 167(3): 385445. Radford Neal. 2003. ``Slice Sampling" (with discussion). \emph{Annals of Statistics}, 31: 705-767. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. } \examples{ \dontrun{ ## simulated data example set.seed(3920) n <- 100 r0 <- round(runif(n, 400, 1500)) r1 <- round(runif(n, 100, 4000)) p0.true <- pnorm(rnorm(n, m=0.5, s=0.25)) p1.true <- pnorm(rnorm(n, m=0.0, s=0.10)) y0 <- rbinom(n, r0, p0.true) y1 <- rbinom(n, r1, p1.true) c0 <- y0 + y1 c1 <- (r0+r1) - c0 ## plot data tomogplot(r0, r1, c0, c1) ## fit exchangeable hierarchical model post <- MCMChierEI(r0,r1,c0,c1, mcmc=40000, thin=5, verbose=100, seed=list(NA, 1)) p0meanHier <- colMeans(post)[1:n] p1meanHier <- colMeans(post)[(n+1):(2*n)] ## plot truth and posterior means pairs(cbind(p0.true, p0meanHier, p1.true, p1meanHier)) } } \keyword{models} \seealso{\code{\link{MCMCdynamicEI}}, \code{\link[coda]{plot.mcmc}},\code{\link[coda]{summary.mcmc}}} MCMCpack/man/MCMCfactanal.Rd0000644000176000001440000002176012133644110015134 0ustar ripleyusers\name{MCMCfactanal} \alias{MCMCfactanal} \title{Markov Chain Monte Carlo for Normal Theory Factor Analysis Model} \description{ This function generates a sample from the posterior distribution of a normal theory factor analysis model. Normal priors are assumed on the factor loadings and factor scores while inverse Gamma priors are assumed for the uniquenesses. The user supplies data and parameters for the prior distributions, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{ MCMCfactanal(x, factors, lambda.constraints=list(), data=NULL, burnin = 1000, mcmc = 20000, thin=1, verbose = 0, seed = NA, lambda.start = NA, psi.start = NA, l0=0, L0=0, a0=0.001, b0=0.001, store.scores = FALSE, std.var=TRUE, ... ) } \arguments{ \item{x}{Either a formula or a numeric matrix containing the manifest variables.} \item{factors}{The number of factors to be fitted.} \item{lambda.constraints}{List of lists specifying possible simple equality or inequality constraints on the factor loadings. A typical entry in the list has one of three forms: \code{varname=list(d,c)} which will constrain the dth loading for the variable named \code{varname} to be equal to c, \code{varname=list(d,"+")} which will constrain the dth loading for the variable named \code{varname} to be positive, and \code{varname=list(d, "-")} which will constrain the dth loading for the variable named \code{varname} to be negative. If x is a matrix without column names defaults names of ``V1",``V2", ... , etc will be used.} \item{data}{A data frame.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of iterations for the sampler.} \item{thin}{The thinning interval used in the simulation. The number of iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 the iteration number and the factor loadings and uniquenesses are printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{lambda.start}{Starting values for the factor loading matrix Lambda. If \code{lambda.start} is set to a scalar the starting value for all unconstrained loadings will be set to that scalar. If \code{lambda.start} is a matrix of the same dimensions as Lambda then the \code{lambda.start} matrix is used as the starting values (except for equality-constrained elements). If \code{lambda.start} is set to \code{NA} (the default) then starting values for unconstrained elements are set to 0, and starting values for inequality constrained elements are set to either 0.5 or -0.5 depending on the nature of the constraints.} \item{psi.start}{Starting values for the uniquenesses. If \code{psi.start} is set to a scalar then the starting value for all diagonal elements of \code{Psi} are set to this value. If \code{psi.start} is a \eqn{k}{k}-vector (where \eqn{k}{k} is the number of manifest variables) then the staring value of \code{Psi} has \code{psi.start} on the main diagonal. If \code{psi.start} is set to \code{NA} (the default) the starting values of all the uniquenesses are set to 0.5.} \item{l0}{The means of the independent Normal prior on the factor loadings. Can be either a scalar or a matrix with the same dimensions as \code{Lambda}.} \item{L0}{The precisions (inverse variances) of the independent Normal prior on the factor loadings. Can be either a scalar or a matrix with the same dimensions as \code{Lambda}.} \item{a0}{Controls the shape of the inverse Gamma prior on the uniqueness. The actual shape parameter is set to \code{a0/2}. Can be either a scalar or a \eqn{k}{k}-vector.} \item{b0}{Controls the scale of the inverse Gamma prior on the uniquenesses. The actual scale parameter is set to \code{b0/2}. Can be either a scalar or a \eqn{k}{k}-vector.} \item{store.scores}{A switch that determines whether or not to store the factor scores for posterior analysis. \emph{NOTE: This takes an enormous amount of memory, so should only be used if the chain is thinned heavily, or for applications with a small number of observations}. By default, the factor scores are not stored.} \item{std.var}{If \code{TRUE} (the default) the manifest variables are rescaled to have zero mean and unit variance. Otherwise, the manifest variables are rescaled to have zero mean but retain their observed variances.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the sample from the posterior distribution. This object can be summarized by functions provided by the coda package. } \details{The model takes the following form: \deqn{x_i = \Lambda \phi_i + \epsilon_i}{x_i = Lambda phi_i + epsilon_i} \deqn{\epsilon_i \sim \mathcal{N}(0,\Psi)}{epsilon_i ~ N(0, Psi)} where \eqn{x_i}{x_i} is the \eqn{k}{k}-vector of observed variables specific to observation \eqn{i}{i}, \eqn{\Lambda}{Lambda} is the \eqn{k \times d}{k by d} matrix of factor loadings, \eqn{\phi_i}{phi_i} is the \eqn{d}{d}-vector of latent factor scores, and \eqn{\Psi}{Psi} is a diagonal, positive definite matrix. Traditional factor analysis texts refer to the diagonal elements of \eqn{\Psi}{Psi} as uniquenesses. The implementation used here assumes independent conjugate priors for each element of \eqn{\Lambda}{Lambda}, each \eqn{\phi_i}{phi_i}, and each diagonal element of \eqn{\Psi}{Psi}. More specifically we assume: \deqn{\Lambda_{ij} \sim \mathcal{N}(l_{0_{ij}}, L_{0_{ij}}^{-1}), i=1,\ldots,k, j=1,\ldots,d}{Lambda_ij ~ N(l0_ij, L0_ij^-1), i=1,...,k, j=1,...,d} \deqn{\phi_i \sim \mathcal{N}(0, I), i=1,\dots,n}{phi_i ~ N(0, I), i=1,...,n} \deqn{\Psi_{ii} \sim \mathcal{IG}(a_{0_i}/2, b_{0_i}/2), i=1,\ldots,k}{Psi_ii ~ IG(a0_i/2, b0_i/2), i=1,...,k} \code{MCMCfactanal} simulates from the posterior distribution using standard Gibbs sampling. The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. As is the case with all measurement models, make sure that you have plenty of free memory, especially when storing the scores. } \references{ Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. } \examples{ \dontrun{ ### An example using the formula interface data(swiss) posterior <- MCMCfactanal(~Agriculture+Examination+Education+Catholic +Infant.Mortality, factors=2, lambda.constraints=list(Examination=list(1,"+"), Examination=list(2,"-"), Education=c(2,0), Infant.Mortality=c(1,0)), verbose=0, store.scores=FALSE, a0=1, b0=0.15, data=swiss, burnin=5000, mcmc=50000, thin=20) plot(posterior) summary(posterior) ### An example using the matrix interface Y <- cbind(swiss$Agriculture, swiss$Examination, swiss$Education, swiss$Catholic, swiss$Infant.Mortality) colnames(Y) <- c("Agriculture", "Examination", "Education", "Catholic", "Infant.Mortality") post <- MCMCfactanal(Y, factors=2, lambda.constraints=list(Examination=list(1,"+"), Examination=list(2,"-"), Education=c(2,0), Infant.Mortality=c(1,0)), verbose=0, store.scores=FALSE, a0=1, b0=0.15, burnin=5000, mcmc=50000, thin=20) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}},\code{\link[coda]{summary.mcmc}},\code{\link[stats]{factanal}}} MCMCpack/man/MCMCdynamicIRT1d.Rd0000644000176000001440000003030112133644110015602 0ustar ripleyusers\name{MCMCdynamicIRT1d} \alias{MCMCdynamicIRT1d} \alias{MCMCdynamicIRT1d_b} \title{Markov Chain Monte Carlo for Dynamic One Dimensional Item Response Theory Model} \description{ This function generates a sample from the posterior distribution of a dynamic one dimensional item response theory (IRT) model, with Normal random walk priors on the subject abilities (ideal points), and multivariate Normal priors on the item parameters. The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{ MCMCdynamicIRT1d(datamatrix, item.time.map, theta.constraints=list(), burnin = 1000, mcmc = 20000, thin = 1, verbose = 0, seed = NA, theta.start = NA, alpha.start = NA, beta.start = NA, tau2.start = 1, a0 = 0, A0 = 0.1, b0 = 0, B0 = 0.1, c0 = -1, d0 = -1, e0 = 0, E0 = 1, store.ability = TRUE, store.item = TRUE, ...) } \arguments{ \item{datamatrix}{The matrix of data. Must be 0, 1, or missing values. The rows of \code{datamatrix} correspond to subjects and the columns correspond to items.} \item{item.time.map}{A vector that relates each item to a time period. Each element of \code{item.time.map} gives the time period of the corresponding column of \code{datamatrix}. It is assumed that the minimum value of \code{item.time.map} is 1.} \item{theta.constraints}{ A list specifying possible simple equality or inequality constraints on the ability parameters. A typical entry in the list has one of three forms: \code{varname=c} which will constrain the ability parameter for the subject named \code{varname} to be equal to c, \code{varname="+"} which will constrain the ability parameter for the subject named \code{varname} to be positive, and \code{varname="-"} which will constrain the ability parameter for the subject named \code{varname} to be negative. If x is a matrix without row names defaults names of ``V1",``V2", ... , etc will be used. See Rivers (2003) for a thorough discussion of identification of IRT models. } \item{burnin}{ The number of burn-in iterations for the sampler. } \item{mcmc}{The number of Gibbs iterations for the sampler. } \item{thin}{The thinning interval used in the simulation. The number of Gibbs iterations must be divisible by this value. } \item{verbose}{ A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 then every \code{verbose}th iteration will be printed to the screen. } \item{seed}{ The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details. } \item{theta.start}{ The starting values for the subject abilities (ideal points). This can either be a scalar or a column vector with dimension equal to the number of voters. If this takes a scalar value, then that value will serve as the starting value for all of the thetas. The default value of NA will choose the starting values based on an eigenvalue-eigenvector decomposition of the aggreement score matrix formed from the \code{datamatrix}. } \item{alpha.start}{ The starting values for the \eqn{\alpha}{alpha} difficulty parameters. This can either be a scalar or a column vector with dimension equal to the number of items. If this takes a scalar value, then that value will serve as the starting value for all of the alphas. The default value of NA will set the starting values based on a series of probit regressions that condition on the starting values of theta. } \item{beta.start}{ The starting values for the \eqn{\beta}{beta} discrimination parameters. This can either be a scalar or a column vector with dimension equal to the number of items. If this takes a scalar value, then that value will serve as the starting value for all of the betas. The default value of NA will set the starting values based on a series of probit regressions that condition on the starting values of theta. } \item{tau2.start}{ The starting values for the evolution variances (the variance of the random walk increments for the ability parameters / ideal points. Order corresponds to the rows of \code{datamatrix}.} \item{a0}{ A vector containing the prior mean of each of the difficulty parameters \eqn{\alpha}{alpha}. Should have as many elements as items / roll calls. Order corresponds to the columns of \code{datamatrix}. If a scalar is passed it is assumed that all elements of \code{a0} are equal to the scalar. } \item{A0}{ A vector containing the prior precision (inverse variance) of each of the difficulty parameters \eqn{\alpha}{alpha}. Should have as many elements as items / roll calls. Order corresponds to the columns of \code{datamatrix}. If a scalar is passed it is assumed that all elements of \code{A0} are equal to the scalar. } \item{b0}{ A vector containing the prior mean of each of the discrimination parameters \eqn{\beta}{beta}. Should have as many elements as items / roll calls. Order corresponds to the columns of \code{datamatrix}. If a scalar is passed it is assumed that all elements of \code{b0} are equal to the scalar. } \item{B0}{ A vector containing the prior precision (inverse variance) of each of the discrimination parameters \eqn{\beta}{beta}. Should have as many elements as items / roll calls. Order corresponds to the columns of \code{datamatrix}. If a scalar is passed it is assumed that all elements of \code{B0} are equal to the scalar. } \item{c0}{\eqn{c_{0/2}}{c0/2} is the shape parameter for the inverse Gamma prior on \eqn{\tau^2}{tau^2} (the variance of the random walk increments). The amount of information in the inverse Gamma prior is something like that from \eqn{c_0}{c0} pseudo-observations. \code{c0} can be either a vector with an element for each subject or a scalar. If \code{c0} is negative then \eqn{\tau^2}{tau^2} is not estimated-- the values in \code{tau2.start} are used throughout the sampling.} \item{d0}{ \eqn{d_{0/2}}{d0/2} is the scale parameter for the inverse Gamma prior on \eqn{\tau^2}{tau^2} (the variance of the random walk increments). In constructing the inverse Gamma prior, \eqn{d_0}{d0} acts like the sum of squared errors from the \eqn{c_0}{c0} pseudo-observations. \code{d0} can be either a vector with an element for each subject or a scalar. If \code{d0} is negative then \eqn{\tau^2}{tau^2} is not estimated-- the values in \code{tau2.start} are used throughout the sampling. } \item{e0}{ A vector containing the prior mean of the initial ability parameter / ideal point for each subject. Should have as many elements as subjects. Order corresponds to the rows of \code{datamatrix}. If a scalar is passed it is assumed that all elements of \code{e0} are equal to the scalar.} \item{E0}{ A vector containing the prior variance of the initial ability parameter / ideal point for each subject. Should have as many elements as subjects. Order corresponds to the rows of \code{datamatrix}. If a scalar is passed it is assumed that all elements of \code{E0} are equal to the scalar.} \item{store.ability}{ A switch that determines whether or not to store the ability parameters for posterior analysis. \emph{NOTE}: In situations with many individuals storing the ability parameters takes an enormous amount of memory, so \code{store.ability} should only be \code{TRUE} if the chain is thinned heavily, or for applications with a small number of individuals. By default, the item parameters are stored. } \item{store.item}{A switch that determines whether or not to store the item parameters for posterior analysis. \emph{NOTE}: In situations with many items storing the item parameters takes an enormous amount of memory, so \code{store.item} should only be \code{FALSE} if the chain is thinned heavily, or for applications with a small number of items. By default, the item parameters are not stored.} \item{\dots}{further arguments to be passed } } \details{ \code{MCMCdynamicIRT1d} simulates from the posterior distribution using the algorithm of Martin and Quinn (2002). The simulation proper is done in compiled C++ code to maximize efficiency. Please consult the coda documentation for a comprehensive list of functions that can be used to analyze the posterior sample. The model takes the following form. We assume that each subject has an subject ability (ideal point) denoted \eqn{\theta_{j,t}}{theta_jt} (where \eqn{j}{j} indexes subjects and \eqn{t}{t} indexes time periods) and that each item has a difficulty parameter \eqn{\alpha_i}{alpha_i} and discrimination parameter \eqn{\beta_i}{beta_i}. The observed choice by subject \eqn{j}{j} on item \eqn{i}{i} is the observed data matrix which is \eqn{(I \times J)}{(I * J)}. We assume that the choice is dictated by an unobserved utility: \deqn{z_{i,j,t} = -\alpha_i + \beta_i \theta_{j,t} + \varepsilon_{i,j,t}}{z_ijt = -alpha_i + beta_i*theta_jt + epsilon_ijt} Where the disturbances are assumed to be distributed standard Normal. The parameters of interest are the subject abilities (ideal points) and the item parameters. We assume the following priors. For the subject abilities (ideal points): \deqn{\theta_{j,t} \sim \mathcal{N}(\theta_{j,t-1}, \tau^2_j)}{theta_jt ~ N(theta_j(t-1), tau^2)} with \deqn{\theta_{j,0} \sim \mathcal{N}(e0, E0)}{theta_j0 ~ N(e0, E0).} The evolution variance has the following prior: \deqn{\tau^2_j \sim \mathcal{IG}(c0/2, d0/2)}{tau^2_j ~ IG(c0/2, d0/2).} For the item parameters in the standard model, the prior is: \deqn{\alpha_i \sim \mathcal{N}(a0, A0^{-1})}{alpha_i ~ N(a0, A0^{-1})} and \deqn{\beta_i \sim \mathcal{N}(b0, B0^{-1})}{beta_i ~ N(b0, B0^{-1}).} The model is identified by the proper priors on the item parameters and constraints placed on the ability parameters. As is the case with all measurement models, make sure that you have plenty of free memory, especially when storing the item parameters. } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \references{ Andrew D. Martin and Kevin M. Quinn. 2002. "Dynamic Ideal Point Estimation via Markov Chain Monte Carlo for the U.S. Supreme Court, 1953-1999." \emph{Political Analysis.} 10: 134-153. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. } \author{Kevin M. Quinn } \seealso{\code{\link[coda]{plot.mcmc}},\code{\link[coda]{summary.mcmc}}, \code{\link[MCMCpack]{MCMCirt1d}} } \examples{ \dontrun{ data(Rehnquist) ## assign starting values theta.start <- rep(0, 9) theta.start[2] <- -3 ## Stevens theta.start[7] <- 2 ## Thomas out <- MCMCdynamicIRT1d(t(Rehnquist[,1:9]), item.time.map=Rehnquist$time, theta.start=theta.start, mcmc=50000, burnin=20000, thin=5, verbose=500, tau2.start=rep(0.1, 9), e0=0, E0=1, a0=0, A0=1, b0=0, B0=1, c0=-1, d0=-1, store.item=FALSE, theta.constraints=list(Stevens="-", Thomas="+")) summary(out) } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ models } MCMCpack/man/MCMCdynamicEI.Rd0000644000176000001440000002071312133644110015222 0ustar ripleyusers\name{MCMCdynamicEI} \alias{MCMCdynamicEI} \title{Markov Chain Monte Carlo for Quinn's Dynamic Ecological Inference Model} \description{ MCMCdynamicEI is used to fit Quinn's dynamic ecological inference model for partially observed 2 x 2 contingency tables. } \usage{ MCMCdynamicEI(r0, r1, c0, c1, burnin=5000, mcmc=50000, thin=1, verbose=0, seed=NA, W=0, a0=0.825, b0=0.0105, a1=0.825, b1=0.0105, ...) } \arguments{ \item{r0}{\eqn{(ntables \times 1)}{(ntables * 1)} vector of row sums from row 0.} \item{r1}{\eqn{(ntables \times 1)}{(ntables * 1)} vector of row sums from row 1.} \item{c0}{\eqn{(ntables \times 1)}{(ntables * 1)} vector of column sums from column 0.} \item{c1}{\eqn{(ntables \times 1)}{(ntables * 1)} vector of column sums from column 1.} \item{burnin}{The number of burn-in scans for the sampler.} \item{mcmc}{The number of mcmc scans to be saved.} \item{thin}{The thinning interval used in the simulation. The number of mcmc iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0 then every \code{verbose}th iteration will be printed to the screen.} \item{seed}{The seed for the random number generator. If NA, the Mersenne Twister generator is used with default seed 12345; if an integer is passed it is used to seed the Mersenne twister. The user can also pass a list of length two to use the L'Ecuyer random number generator, which is suitable for parallel computation. The first element of the list is the L'Ecuyer seed, which is a vector of length six or NA (if NA a default seed of \code{rep(12345,6)} is used). The second element of list is a positive substream number. See the MCMCpack specification for more details.} \item{W}{Weight (\emph{not precision}) matrix structuring the temporal dependence among elements of \eqn{\theta_{0}}{theta0} and \eqn{\theta_{1}}{theta1}. The default value of 0 will construct a weight matrix that corresponds to random walk priors for \eqn{\theta_{0}}{theta0} and \eqn{\theta_{1}}{theta1}. The default assumes that the tables are equally spaced throughout time and that the elements of \eqn{r0}, \eqn{r1}, \eqn{c0}, and \eqn{c1} are temporally ordered.} \item{a0}{\code{a0/2} is the shape parameter for the inverse-gamma prior on the \eqn{\sigma^2_0}{sigma^2_0} parameter.} \item{b0}{\code{b0/2} is the scale parameter for the inverse-gamma prior on the \eqn{\sigma^2_0}{sigma^2_0} parameter.} \item{a1}{\code{a1/2} is the shape parameter for the inverse-gamma prior on the \eqn{\sigma^2_1}{sigma^2_1} parameter.} \item{b1}{\code{b1/2} is the scale parameter for the inverse-gamma prior on the \eqn{\sigma^2_1}{sigma^2_1} parameter.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the sample from the posterior distribution. This object can be summarized by functions provided by the coda package. } \details{ Consider the following partially observed 2 by 2 contingency table for unit \eqn{t} where \eqn{t=1,\ldots,ntables}{t=1,...,ntables}:\cr \cr \tabular{llll}{ \tab | \eqn{Y=0} \tab | \eqn{Y=1} \tab | \cr - - - - - \tab - - - - - \tab - - - - - \tab - - - - - \cr \eqn{X=0} \tab | \eqn{Y_{0t}}{Y0[t]} \tab | \tab | \eqn{r_{0t}}{r0[t]}\cr - - - - - \tab - - - - - \tab - - - - - \tab - - - - - \cr \eqn{X=1} \tab | \eqn{Y_{1t}}{Y1[t]} \tab | \tab | \eqn{r_{1t}}{r1[t]}\cr - - - - - \tab - - - - - \tab - - - - - \tab - - - - - \cr \tab | \eqn{c_{0t}}{c0[t]} \tab | \eqn{c_{1t}}{c1[t]} \tab | \eqn{N_t}{N[t]}\cr } Where \eqn{r_{0t}}{r0-t}, \eqn{r_{1t}}{r1[t]}, \eqn{c_{0t}}{c0[t]}, \eqn{c_{1t}}{c1[t]}, and \eqn{N_t}{N[t]} are non-negative integers that are observed. The interior cell entries are not observed. It is assumed that \eqn{Y_{0t}|r_{0t} \sim \mathcal{B}inomial(r_{0t}, p_{0t})}{Y0[t]|r0[t] ~ Binomial(r0[t], p0[t])} and \eqn{Y_{1t}|r_{1t} \sim \mathcal{B}inomial(r_{1t}, p_{1t})}{Y1[t]|r1[t] ~ Binomial(r1[t],p1[t])}. Let \eqn{\theta_{0t} = log(p_{0t}/(1-p_{0t}))}{theta0[t] = log(p0[t]/(1-p0[t]))}, and \eqn{\theta_{1t} = log(p_{1t}/(1-p_{1t}))}{theta1[t] = log(p1[t]/(1-p1[t]))}. The following prior distributions are assumed: \deqn{p(\theta_0|\sigma^2_0) \propto \sigma_0^{-ntables} \exp \left(-\frac{1}{2\sigma^2_0} \theta'_{0} P \theta_{0}\right)}{p(theta0|sigma^2_0) propto sigma^(-ntables)_0 exp(-1/(2*sigma^2_0) theta0' * P * theta0)} and \deqn{p(\theta_1|\sigma^2_1) \propto \sigma_1^{-ntables} \exp \left(-\frac{1}{2\sigma^2_1} \theta'_{1} P \theta_{1}\right)}{p(theta1|sigma^2_1) propto sigma^(-ntables)_1 exp(-1/(2*sigma^2_1) theta1' * P * theta1)} where \eqn{P_{ts}}{P[t,s]} = \eqn{-W_{ts}}{-W[t,s]} for \eqn{t} not equal to \eqn{s} and \eqn{P_{tt}}{P[t,t]} = \eqn{\sum_{s \ne t}W_{ts}}{sum(W[t,])}. The \eqn{\theta_{0t}}{theta0[t]} is assumed to be a priori independent of \eqn{\theta_{1t}}{theta1[t]} for all t. In addition, the following hyperpriors are assumed: \eqn{\sigma^2_0 \sim \mathcal{IG}(a_0/2, b_0/2)}{\sigma^2_0 ~ InvGamma(a0/2, b0/2)}, and \eqn{\sigma^2_1 \sim \mathcal{IG}(a_1/2, b_1/2)}{\sigma^2_1 ~ InvGamma(a1/2, b1/2)}. Inference centers on \eqn{p_0}{p0}, \eqn{p_1}{p1}, \eqn{\sigma^2_0}{sigma^2_0}, and \eqn{\sigma^2_1}{sigma^2_1}. Univariate slice sampling (Neal, 2003) together with Gibbs sampling is used to sample from the posterior distribution. } \references{ Kevin Quinn. 2004. ``Ecological Inference in the Presence of Temporal Dependence." In \emph{Ecological Inference: New Methodological Strategies}. Gary King, Ori Rosen, and Martin A. Tanner (eds.). New York: Cambridge University Press. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Radford Neal. 2003. ``Slice Sampling" (with discussion). \emph{Annals of Statistics}, 31: 705-767. Daniel Pemstein, Kevin M. Quinn, and Andrew D. Martin. 2007. \emph{Scythe Statistical Library 1.0.} \url{http://scythe.wustl.edu}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. Jonathan C. Wakefield. 2004. ``Ecological Inference for 2 x 2 Tables.'' \emph{Journal of the Royal Statistical Society, Series A}. 167(3): 385445. } \examples{ \dontrun{ ## simulated data example 1 set.seed(3920) n <- 100 r0 <- rpois(n, 2000) r1 <- round(runif(n, 100, 4000)) p0.true <- pnorm(-1.5 + 1:n/(n/2)) p1.true <- pnorm(1.0 - 1:n/(n/4)) y0 <- rbinom(n, r0, p0.true) y1 <- rbinom(n, r1, p1.true) c0 <- y0 + y1 c1 <- (r0+r1) - c0 ## plot data dtomogplot(r0, r1, c0, c1, delay=0.1) ## fit dynamic model post1 <- MCMCdynamicEI(r0,r1,c0,c1, mcmc=40000, thin=5, verbose=100, seed=list(NA, 1)) ## fit exchangeable hierarchical model post2 <- MCMChierEI(r0,r1,c0,c1, mcmc=40000, thin=5, verbose=100, seed=list(NA, 2)) p0meanDyn <- colMeans(post1)[1:n] p1meanDyn <- colMeans(post1)[(n+1):(2*n)] p0meanHier <- colMeans(post2)[1:n] p1meanHier <- colMeans(post2)[(n+1):(2*n)] ## plot truth and posterior means pairs(cbind(p0.true, p0meanDyn, p0meanHier, p1.true, p1meanDyn, p1meanHier)) ## simulated data example 2 set.seed(8722) n <- 100 r0 <- rpois(n, 2000) r1 <- round(runif(n, 100, 4000)) p0.true <- pnorm(-1.0 + sin(1:n/(n/4))) p1.true <- pnorm(0.0 - 2*cos(1:n/(n/9))) y0 <- rbinom(n, r0, p0.true) y1 <- rbinom(n, r1, p1.true) c0 <- y0 + y1 c1 <- (r0+r1) - c0 ## plot data dtomogplot(r0, r1, c0, c1, delay=0.1) ## fit dynamic model post1 <- MCMCdynamicEI(r0,r1,c0,c1, mcmc=40000, thin=5, verbose=100, seed=list(NA, 1)) ## fit exchangeable hierarchical model post2 <- MCMChierEI(r0,r1,c0,c1, mcmc=40000, thin=5, verbose=100, seed=list(NA, 2)) p0meanDyn <- colMeans(post1)[1:n] p1meanDyn <- colMeans(post1)[(n+1):(2*n)] p0meanHier <- colMeans(post2)[1:n] p1meanHier <- colMeans(post2)[(n+1):(2*n)] ## plot truth and posterior means pairs(cbind(p0.true, p0meanDyn, p0meanHier, p1.true, p1meanDyn, p1meanHier)) } } \keyword{models} \seealso{\code{\link{MCMChierEI}}, \code{\link[coda]{plot.mcmc}},\code{\link[coda]{summary.mcmc}}} MCMCpack/man/MCMCbinaryChange.Rd0000755000176000001440000001424212133644110015755 0ustar ripleyusers\name{MCMCbinaryChange} \alias{MCMCbinaryChange} \title{Markov Chain Monte Carlo for a Binary Multiple Changepoint Model} \description{ This function generates a sample from the posterior distribution of a binary model with multiple changepoints. The function uses the Markov chain Monte Carlo method of Chib (1998). The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package. } \usage{MCMCbinaryChange(data, m = 1, c0 = 1, d0 = 1, a = NULL, b = NULL, burnin = 10000, mcmc = 10000, thin = 1, verbose = 0, seed = NA, phi.start = NA, P.start = NA, marginal.likelihood = c("none", "Chib95"), ...)} \arguments{ \item{data}{The data.} \item{m}{The number of changepoints.} \item{c0}{\eqn{c_0}{c0} is the shape1 parameter for Beta prior on \eqn{\phi}{phi} (the mean).} \item{d0}{\eqn{d_0}{d0} is the shape2 parameter for Beta prior on \eqn{\phi}{phi} (the mean).} \item{a}{\eqn{a}{a} is the shape1 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{b}{\eqn{b}{b} is the shape2 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{mcmc}{The number of MCMC iterations after burn-in.} \item{thin}{The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0, the iteration number and the posterior density samples are printed to the screen every \code{verbose}th iteration.} \item{seed}{The seed for the random number generator. If NA, current R system seed is used.} \item{phi.start}{The starting values for the mean. The default value of NA will use draws from the Uniform distribution.} \item{P.start}{The starting values for the transition matrix. A user should provide a square matrix with dimension equal to the number of states. By default, draws from the \code{Beta(0.9, 0.1)} are used to construct a proper transition matrix for each raw except the last raw.} \item{marginal.likelihood}{How should the marginal likelihood be calculated? Options are: \code{none} in which case the marginal likelihood will not be calculated, and \code{Chib95} in which case the method of Chib (1995) is used.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. The object contains an attribute \code{prob.state} storage matrix that contains the probability of \eqn{state_i}{state_i} for each period, and the log-marginal likelihood of the model (\code{logmarglike}). } \details{ \code{MCMCbinaryChange} simulates from the posterior distribution of a binary model with multiple changepoints. The model takes the following form: \deqn{Y_t \sim \mathcal{B}ernoulli(\phi_i),\;\; i = 1, \ldots, k}{Y_t ~ Bernoulli(phi_i), i = 1,...,k.} Where \eqn{k}{k} is the number of states. We assume Beta priors for \eqn{\phi_{i}}{phi_i} and for transition probabilities: \deqn{\phi_i \sim \mathcal{B}eta(c_0, d_0)}{phi_i ~ Beta(c0, d0)} And: \deqn{p_{mm} \sim \mathcal{B}eta{a}{b},\;\; m = 1, \ldots, k}{p_mm ~ Beta(a, b), m = 1,...,M.} Where \eqn{M}{M} is the number of states. } \author{Jong Hee Park, \email{jhp@uchicago.edu}, \url{http://home.uchicago.edu/~jhp/}.} \references{ Jong Hee Park. 2011. ``Changepoint Analysis of Binary and Ordinal Probit Models: An Application to Bank Rate Policy Under the Interwar Gold Standard." \emph{Political Analysis}. 19: 188-204. Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park. 2011. ``MCMCpack: Markov Chain Monte Carlo in R.'', \emph{Journal of Statistical Software}. 42(9): 1-21. \url{http://www.jstatsoft.org/v42/i09/}. Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002. \emph{Output Analysis and Diagnostics for MCMC (CODA)}. \url{http://www-fis.iarc.fr/coda/}. Siddhartha Chib. 1995. ``Marginal Likelihood from the Gibbs Output.'' \emph{Journal of the American Statistical Association}. 90: 1313-1321. } \examples{ \dontrun{ set.seed(19173) true.phi<- c(0.5, 0.8, 0.4) ## two breaks at c(80, 180) y1 <- rbinom(80, 1, true.phi[1]) y2 <- rbinom(100, 1, true.phi[2]) y3 <- rbinom(120, 1, true.phi[3]) y <- as.ts(c(y1, y2, y3)) model0 <- MCMCbinaryChange(y, m=0, c0=2, d0=2, mcmc=1000, burnin=1000, verbose=500, marginal.likelihood = "Chib95") model1 <- MCMCbinaryChange(y, m=1, c0=2, d0=2, mcmc=1000, burnin=1000, verbose=500, marginal.likelihood = "Chib95") model2 <- MCMCbinaryChange(y, m=2, c0=2, d0=2, mcmc=1000, burnin=1000, verbose=500, marginal.likelihood = "Chib95") model3 <- MCMCbinaryChange(y, m=3, c0=2, d0=2, mcmc=1000, burnin=1000, verbose=500, marginal.likelihood = "Chib95") model4 <- MCMCbinaryChange(y, m=4, c0=2, d0=2, mcmc=1000, burnin=1000, verbose=500, marginal.likelihood = "Chib95") model5 <- MCMCbinaryChange(y, m=5, c0=2, d0=2, mcmc=1000, burnin=1000, verbose=500, marginal.likelihood = "Chib95") print(BayesFactor(model0, model1, model2, model3, model4, model5)) ## plot two plots in one screen par(mfrow=c(attr(model2, "m") + 1, 1), mai=c(0.4, 0.6, 0.3, 0.05)) plotState(model2, legend.control = c(1, 0.6)) plotChangepoint(model2, verbose = TRUE, ylab="Density", start=1, overlay=TRUE) } } \keyword{models} \seealso{\code{\link{MCMCpoissonChange}},\code{\link{plotState}}, \code{\link{plotChangepoint}}} MCMCpack/man/MCbinomialbeta.Rd0000644000176000001440000000330012133644110015557 0ustar ripleyusers\name{MCbinomialbeta} \alias{MCbinomialbeta} \title{Monte Carlo Simulation from a Binomial Likelihood with a Beta Prior} \description{ This function generates a sample from the posterior distribution of a binomial likelihood with a Beta prior. } \usage{ MCbinomialbeta(y, n, alpha=1, beta=1, mc=1000, ...) } \arguments{ \item{y}{The number of successes in the independent Bernoulli trials.} \item{n}{The number of independent Bernoulli trials.} \item{alpha}{Beta prior distribution alpha parameter.} \item{beta}{Beta prior distribution beta parameter.} \item{mc}{The number of Monte Carlo draws to make.} \item{...}{further arguments to be passed} } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. } \details{ \code{MCbinomialbeta} directly simulates from the posterior distribution. This model is designed primarily for instructional use. \eqn{\pi}{pi} is the probability of success for each independent Bernoulli trial. We assume a conjugate Beta prior: \deqn{\pi \sim \mathcal{B}eta(\alpha, \beta)}{pi ~ Beta(alpha, beta)} \eqn{y} is the number of successes in \eqn{n} trials. By default, a uniform prior is used. } \examples{ \dontrun{ posterior <- MCbinomialbeta(3,12,mc=5000) summary(posterior) plot(posterior) grid <- seq(0,1,0.01) plot(grid, dbeta(grid, 1, 1), type="l", col="red", lwd=3, ylim=c(0,3.6), xlab="pi", ylab="density") lines(density(posterior), col="blue", lwd=3) legend(.75, 3.6, c("prior", "posterior"), lwd=3, col=c("red", "blue")) } } \keyword{models} \seealso{\code{\link[coda]{plot.mcmc}}, \code{\link[coda]{summary.mcmc}}} MCMCpack/man/make.breaklist.Rd0000644000176000001440000000216012133644110015610 0ustar ripleyusers\name{make.breaklist} \alias{make.breaklist} \title{Vector of break numbers} \description{This function generates a vector of break numbers using the output of \code{testpanelSubjectBreak}. The function performs a pairwise comparison of models using Bayes Factors. } \usage{ make.breaklist(BF, threshold=3) } \arguments{ \item{BF}{output of \code{testpanelSubjectBreak}.} \item{threshold}{The Bayes Factor threshold to pick the best model. If a Bayes factor of two models is smaller than \code{threshold}, the model with a smaller number of break is chosen to avoid the over-identification problem. Users can change threshold into any positive number. The default value of 3 is chosen as it indicates the existence of "substantial evidence" in favor of the model in the numerator according to Jeffreys' scale.} } \references{ Jong Hee Park, 2011. ``A Unified Method for Dynamic and Cross-Sectional Heterogeneity: Introducing Hidden Markov Panel Models." Working Paper. Harold Jeffreys, 1961. The Theory of Probability. Oxford University Press. } \seealso{\code{\link{testpanelSubjectBreak}}} MCMCpack/man/iwishart.Rd0000644000176000001440000000161112133644110014546 0ustar ripleyusers\name{InvWishart} \alias{diwish} \alias{riwish} \alias{InvWishart} \title{The Inverse Wishart Distribution} \description{ Density function and random generation from the Inverse Wishart distribution. } \usage{ diwish(W, v, S) riwish(v, S) } \arguments{ \item{W}{Positive definite matrix W \eqn{(p \times p)}{(p x p)}.} \item{v}{Degrees of freedom (scalar).} \item{S}{Scale matrix \eqn{(p \times p)}{(p x p)}.}} \value{ \code{diwish} evaluates the density at positive definite matrix W. \code{riwish} generates one random draw from the distribution. } \details{ The mean of an inverse Wishart random variable with \code{v} degrees of freedom and scale matrix \code{S} is \eqn{(v-p-1)^{-1}S}{1/(v-p-1) S}. } \examples{ density <- diwish(matrix(c(2,-.3,-.3,4),2,2), 3, matrix(c(1,.3,.3,1),2,2)) draw <- riwish(3, matrix(c(1,.3,.3,1),2,2)) } \keyword{distribution} MCMCpack/man/invgamma.Rd0000644000176000001440000000236412133644110014521 0ustar ripleyusers\name{InvGamma} \alias{dinvgamma} \alias{rinvgamma} \alias{InvGamma} \title{The Inverse Gamma Distribution} \description{ Density function and random generation from the inverse gamma distribution. } \usage{ rinvgamma(n, shape, scale = 1) dinvgamma(x, shape, scale = 1) } \arguments{ \item{x}{Scalar location to evaluate density.} \item{n}{Number of draws from the distribution.} \item{shape}{Scalar shape parameter.} \item{scale}{Scalar scale parameter (default value one).} } \value{ \code{dinvgamma} evaluates the density at \code{x}. \code{rinvgamma} takes \code{n} draws from the inverse Gamma distribution. The parameterization is consistent with the Gamma Distribution in the stats package. } \details{ An inverse gamma random variable with shape \eqn{a}{a} and scale \eqn{b}{b} has mean \eqn{\frac{b}{a-1}}{b/(a-1)} (assuming \eqn{a>1}{a>1}) and variance \eqn{\frac{b^2}{(a-1)^2(a-2)}}{(b^2)/((a-1)^2 (a-2))} (assuming \eqn{a>2}{a>2}). } \references{ Andrew Gelman, John B. Carlin, Hal S. Stern, and Donald B. Rubin. 2004. \emph{Bayesian Data Analysis}. 2nd Edition. Boca Raton: Chapman & Hall. } \examples{ density <- dinvgamma(4.2, 1.1) draws <- rinvgamma(10, 3.2) } \keyword{distribution} \seealso{\code{\link[stats]{GammaDist}}} MCMCpack/man/HMMpanelRE.Rd0000644000176000001440000002352112133644110014610 0ustar ripleyusers\name{HMMpanelRE} \alias{HMMpanelRE} \title{Markov Chain Monte Carlo for the Hidden Markov Random-effects Model} \description{HMMpanelRE generates a sample from the posterior distribution of the hidden Markov random-effects model discussed in Park (2011). The code works for panel data with the same starting point. The sampling of panel parameters is based on Algorithm 2 of Chib and Carlin (1999). This model uses a multivariate Normal prior for the fixed effects parameters and varying individual effects, an Inverse-Wishart prior on the random-effects parameters, an Inverse-Gamma prior on the residual error variance, and Beta prior for transition probabilities. The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package.} \usage{ HMMpanelRE(subject.id, time.id, y, X, W, m=1, mcmc=1000, burnin=1000, thin=1, verbose=0, b0=0, B0=0.001, c0 = 0.001, d0 = 0.001, r0, R0, a = NULL, b = NULL, seed = NA, beta.start = NA, sigma2.start = NA, D.start= NA, P.start = NA, marginal.likelihood = c("none", "Chib95"), ...)} \arguments{ \item{subject.id}{A numeric vector indicating the group number. It should start from 1.} \item{time.id}{A numeric vector indicating the time unit. It should start from 1.} \item{y}{The dependent variable} \item{X}{The model matrix of the fixed-effects} \item{W}{The model matrix of the random-effects. W should be a subset of X.} \item{m}{The number of changepoints.} \item{mcmc}{The number of MCMC iterations after burn-in.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{thin}{The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0, the iteration number and the posterior density samples are printed to the screen every \code{verbose}th iteration.} \item{b0}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas.} \item{B0}{The prior precision of \eqn{\beta}{beta}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of beta. Default value of 0 is equivalent to an improper uniform prior for beta.} \item{c0}{\eqn{c_0/2}{c0/2} is the shape parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). The amount of information in the inverse Gamma prior is something like that from \eqn{c_0}{c0} pseudo-observations.} \item{d0}{\eqn{d_0/2}{d0/2} is the scale parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). In constructing the inverse Gamma prior, \eqn{d_0}{d0} acts like the sum of squared errors from the \eqn{c_0}{c0} pseudo-observations.} \item{r0}{The shape parameter for the Inverse-Wishart prior on variance matrix for the random effects. Set r=q for an uninformative prior where q is the number of random effects} \item{R0}{The scale matrix for the Inverse-Wishart prior on variance matrix for the random effects. This must be a square q-dimension matrix. Use plausible variance regarding random effects for the diagonal of R.} \item{a}{\eqn{a}{a} is the shape1 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{b}{\eqn{b}{b} is the shape2 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{seed}{The seed for the random number generator. If NA, current R system seed is used.} \item{beta.start}{The starting values for the beta vector. This can either be a scalar or a column vector with dimension equal to the number of betas. The default value of NA will use draws from the Uniform distribution with the same boundary with the data as the starting value. If this is a scalar, that value will serve as the starting value mean for all of the betas. When there is no covariate, the log value of means should be used.} \item{sigma2.start}{The starting values for \eqn{\sigma^2}{sigma^2}. This can either be a scalar or a column vector with dimension equal to the number of states.} \item{D.start}{The starting values for the beta vector. This can either be a scalar or a column vector with dimension equal to the number of betas. The default value of NA will use draws from the Uniform distribution with the same boundary with the data as the starting value. If this is a scalar, that value will serve as the starting value mean for all of the betas. When there is no covariate, the log value of means should be used.} \item{P.start}{The starting values for the transition matrix. A user should provide a square matrix with dimension equal to the number of states. By default, draws from the \code{Beta(0.9, 0.1)} are used to construct a proper transition matrix for each raw except the last raw.} \item{marginal.likelihood}{How should the marginal likelihood be calculated? Options are: \code{none} in which case the marginal likelihood will not be calculated and \code{Chib95} in which case the method of Chib (1995) is used.} \item{...}{further arguments to be passed} } \details{ \code{HMMpanelRE} simulates from the random-effect hidden Markov panel model introduced by Park (2011). The model takes the following form: \deqn{y_i = X_i \beta_m + W_i b_i + \varepsilon_i\;\; m = 1, \ldots, M}{y_i = X_i * beta_m + W_i * b_i + epsilon_i, m = 1,..., M.} Where each group \eqn{i} have \eqn{k_i} observations. Random-effects parameters are assumed to be time-varying at the system level: \deqn{b_i \sim \mathcal{N}_q(0, D_m)}{b_i ~ N_q(0, D_m)} \deqn{\varepsilon_i \sim \mathcal{N}(0, \sigma^2_m I_{k_i})}{epsilon_i ~ N(0, sigma^2_m I_{k_i})} And the errors: We assume standard, conjugate priors: \deqn{\beta \sim \mathcal{N}_p(b0, B0)}{beta ~ N_p(b0, B0)} And: \deqn{\sigma^{2} \sim \mathcal{IG}amma(c0/2, d0/2)}{sigma^2 ~ IGamma(c0/2, d0/2)} And: \deqn{D \sim \mathcal{IW}ishart(r0, R0)}{D ~ IWishart(r0, R0)} See Chib and Carlin (1999) for more details. And: \deqn{p_{mm} \sim \mathcal{B}eta(a, b),\;\; m = 1, \ldots, M}{p_mm ~ Beta(a, b), m = 1,...,M.} Where \eqn{M}{M} is the number of states. \emph{NOTE:} We do not provide default parameters for the priors on the precision matrix for the random effects. When fitting one of these models, it is of utmost importance to choose a prior that reflects your prior beliefs about the random effects. Using the \code{dwish} and \code{rwish} functions might be useful in choosing these values. } \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. The object contains an attribute \code{prob.state} storage matrix that contains the probability of \eqn{state_i}{state_i} for each period, and the log-marginal likelihood of the model (\code{logmarglike}). } \references{ Jong Hee Park, 2011. ``A Unified Method for Dynamic and Cross-Sectional Heterogeneity: Introducing Hidden Markov Panel Models." Working Paper. Siddhartha Chib. 1998. ``Estimation and comparison of multiple change-point models.'' \emph{Journal of Econometrics}. 86: 221-241. } \author{Jong Hee Park, \email{jhp@uchicago.edu}, \url{http://home.uchicago.edu/~jhp/}.} \examples{ \dontrun{ ## data generating set.seed(1977) Q <- 3 true.beta1 <- c(1, 1, 1) ; true.beta2 <- c(-1, -1, -1) true.sigma2 <- c(2, 5); true.D1 <- diag(.5, Q); true.D2 <- diag(2.5, Q) N=30; T=100; NT <- N*T x1 <- runif(NT, 1, 2) x2 <- runif(NT, 1, 2) X <- cbind(1, x1, x2); W <- X; y <- rep(NA, NT) ## true break numbers are one and at the center break.point = rep(T/2, N); break.sigma=c(rep(1, N)); break.list <- rep(1, N) id <- rep(1:N, each=NT/N) K <- ncol(X); ruler <- c(1:T) ## compute the weight for the break W.mat <- matrix(NA, T, N) for (i in 1:N){ W.mat[, i] <- pnorm((ruler-break.point[i])/break.sigma[i]) } Weight <- as.vector(W.mat) ## data generating by weighting two means and variances j = 1 for (i in 1:N){ Xi <- X[j:(j+T-1), ] Wi <- W[j:(j+T-1), ] true.V1 <- true.sigma2[1]*diag(T) + Wi\%*\%true.D1\%*\%t(Wi) true.V2 <- true.sigma2[2]*diag(T) + Wi\%*\%true.D2\%*\%t(Wi) true.mean1 <- Xi\%*\%true.beta1 true.mean2 <- Xi\%*\%true.beta2 weight <- Weight[j:(j+T-1)] y[j:(j+T-1)] <- (1-weight)*true.mean1 + (1-weight)*chol(true.V1)\%*\%rnorm(T) + weight*true.mean2 + weight*chol(true.V2)\%*\%rnorm(T) j <- j + T } ## model fitting subject.id <- c(rep(1:N, each=T)) time.id <- c(rep(1:T, N)) ## model fitting G <- 100 b0 <- rep(0, K) ; B0 <- solve(diag(100, K)) c0 <- 2; d0 <- 2 r0 <- 5; R0 <- diag(c(1, 0.1, 0.1)) subject.id <- c(rep(1:N, each=T)) time.id <- c(rep(1:T, N)) out1 <- HMMpanelRE(subject.id, time.id, y, X, W, m=1, mcmc=G, burnin=G, thin=1, verbose=G, b0=b0, B0=B0, c0=c0, d0=d0, r0=r0, R0=R0) ## latent state changes plotState(out1) ## print mcmc output summary(out1) } } \keyword{models} MCMCpack/man/HMMpanelFE.Rd0000644000176000001440000001741412133644110014600 0ustar ripleyusers\name{HMMpanelFE} \alias{HMMpanelFE} \title{Markov Chain Monte Carlo for the Hidden Markov Fixed-effects Model} \description{HMMpanelFE generates a sample from the posterior distribution of the fixed-effects model with varying individual effects model discussed in Park (2011). The code works for both balanced and unbalanced panel data as long as there is no missing data in the middle of each group. This model uses a multivariate Normal prior for the fixed effects parameters and varying individual effects, an Inverse-Gamma prior on the residual error variance, and Beta prior for transition probabilities. The user supplies data and priors, and a sample from the posterior distribution is returned as an mcmc object, which can be subsequently analyzed with functions provided in the coda package.} \usage{HMMpanelFE(subject.id, y, X, m, mcmc=1000, burnin=1000, thin=1, verbose=0, b0=0, B0=0.001, c0 = 0.001, d0 = 0.001, delta0=0, Delta0=0.001, a = NULL, b = NULL, seed = NA, ...)} \arguments{ \item{subject.id}{A numeric vector indicating the group number. It should start from 1.} \item{y}{The response variable.} \item{X}{The model matrix excluding the constant.} \item{m}{A vector of break numbers for each subject in the panel.} \item{mcmc}{The number of MCMC iterations after burn-in.} \item{burnin}{The number of burn-in iterations for the sampler.} \item{thin}{The thinning interval used in the simulation. The number of MCMC iterations must be divisible by this value.} \item{verbose}{A switch which determines whether or not the progress of the sampler is printed to the screen. If \code{verbose} is greater than 0, the iteration number and the posterior density samples are printed to the screen every \code{verbose}th iteration.} \item{b0}{The prior mean of \eqn{\beta}{beta}. This can either be a scalar or a column vector with dimension equal to the number of betas. If this takes a scalar value, then that value will serve as the prior mean for all of the betas.} \item{B0}{The prior precision of \eqn{\beta}{beta}. This can either be a scalar or a square matrix with dimensions equal to the number of betas. If this takes a scalar value, then that value times an identity matrix serves as the prior precision of beta. Default value of 0 is equivalent to an improper uniform prior for beta.} \item{c0}{\eqn{c_0/2}{c0/2} is the shape parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). The amount of information in the inverse Gamma prior is something like that from \eqn{c_0}{c0} pseudo-observations.} \item{d0}{\eqn{d_0/2}{d0/2} is the scale parameter for the inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the disturbances). In constructing the inverse Gamma prior, \eqn{d_0}{d0} acts like the sum of squared errors from the \eqn{c_0}{c0} pseudo-observations.} \item{delta0}{The prior mean of \eqn{\alpha}{alpha}.} \item{Delta0}{The prior precision of \eqn{\alpha}{alpha}.} \item{a}{\eqn{a}{a} is the shape1 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{b}{\eqn{b}{b} is the shape2 beta prior for transition probabilities. By default, the expected duration is computed and corresponding a and b values are assigned. The expected duration is the sample period divided by the number of states.} \item{seed}{The seed for the random number generator. If NA, current R system seed is used.} \item{...}{further arguments to be passed} } \details{ \code{HMMpanelFE} simulates from the fixed-effect hidden Markov panel model introduced by Park (2011). The model takes the following form: \deqn{y_{it} = x'_{it} \beta + \varepsilon_{it}\;\; m = 1, \ldots, M}{y_it = x'_it * beta + epsilon_it, m = 1,...,M.} Unlike conventional fixed-effects models, individual effects and variances are assumed to be time-varying at the subject level: \deqn{\varepsilon_{it} \sim \mathcal{N}(\alpha_{im}, \sigma^2_{im})}{epsilon_it ~ N(alpha_im, sigma^2_im)} We assume standard, semi-conjugate priors: \deqn{\beta \sim \mathcal{N}(b_0,B_0^{-1})}{beta ~ N(b0,B0^(-1))} And: \deqn{\sigma^{-2} \sim \mathcal{G}amma(c_0/2, d_0/2)}{sigma^(-2) ~ Gamma(c0/2, d0/2)} And: \deqn{\alpha \sim \mathcal{N}(delta_0,Delta_0^{-1})}{alpha ~ N(delta0, Delta0^(-1))} \eqn{\beta}{beta}, \eqn{\alpha}{alpha} and \eqn{\sigma^{-2}}{sigma^(-2)} are assumed \emph{a priori} independent. And: \deqn{p_{mm} \sim \mathcal{B}eta(a, b),\;\; m = 1, \ldots, M}{p_mm ~ Beta(a, b), m = 1,...,M.} Where \eqn{M}{M} is the number of states. OLS estimates are used for starting values. } \author{Jong Hee Park, \email{jhp@uchicago.edu}, \url{http://home.uchicago.edu/~jhp/}.} \value{ An mcmc object that contains the posterior sample. This object can be summarized by functions provided by the coda package. The object contains an attribute \code{sigma} storage matrix that contains time-varying residual variance, an attribute \code{state} storage matrix that contains posterior samples of hidden states, and an attribute \code{delta} storage matrix containing time-varying intercepts. } \references{ Jong Hee Park, 2011. ``A Unified Method for Dynamic and Cross-Sectional Heterogeneity: Introducing Hidden Markov Panel Models." Working Paper. Siddhartha Chib. 1998. ``Estimation and comparison of multiple change-point models.'' \emph{Journal of Econometrics}. 86: 221-241. } \examples{ \dontrun{ ## data generating set.seed(1974) N <- 30 T <- 80 NT <- N*T ## true parameter values true.beta <- c(1, 1) true.sigma <- 3 x1 <- rnorm(NT) x2 <- runif(NT, 2, 4) ## group-specific breaks break.point = rep(T/2, N); break.sigma=c(rep(1, N)); break.list <- rep(1, N) X <- as.matrix(cbind(x1, x2), NT, ); y <- rep(NA, NT) id <- rep(1:N, each=NT/N) K <- ncol(X); true.beta <- as.matrix(true.beta, K, 1) ## compute the break probability ruler <- c(1:T) W.mat <- matrix(NA, T, N) for (i in 1:N){ W.mat[, i] <- pnorm((ruler-break.point[i])/break.sigma[i]) } Weight <- as.vector(W.mat) ## draw time-varying individual effects and sample y j = 1 true.sigma.alpha <- 30 true.alpha1 <- true.alpha2 <- rep(NA, N) for (i in 1:N){ Xi <- X[j:(j+T-1), ] true.mean <- Xi \%*\% true.beta weight <- Weight[j:(j+T-1)] true.alpha1[i] <- rnorm(1, 0, true.sigma.alpha) true.alpha2[i] <- -1*true.alpha1[i] y[j:(j+T-1)] <- ((1-weight)*true.mean + (1-weight)*rnorm(T, 0, true.sigma) + (1-weight)*true.alpha1[i]) + (weight*true.mean + weight*rnorm(T, 0, true.sigma) + weight*true.alpha2[i]) j <- j + T } ## extract the standardized residuals from the OLS with fixed-effects FEols <- lm(y ~ X + as.factor(id) -1 ) resid.all <- rstandard(FEols) time.id <- rep(1:80, N) ## model fitting G <- 100 BF <- testpanelSubjectBreak(subject.id=id, time.id=time.id, resid= resid.all, max.break=3, minimum = 10, mcmc=G, burnin = G, thin=1, verbose=G, b0=0, B0=1/100, c0=2, d0=2, Time = time.id) ## get the estimated break numbers estimated.breaks <- make.breaklist(BF, threshold=3) ## model fitting out <- HMMpanelFE(subject.id = id, y, X=X, m = estimated.breaks, mcmc=G, burnin=G, thin=1, verbose=G, b0=0, B0=1/1000, c0=2, d0=2, delta0=0, Delta0=1/1000) ## print out the slope estimate ## true values are 1 and 1 summary(out) ## compare them with the result from the constant fixed-effects summary(FEols) } } \keyword{models} MCMCpack/man/dtomog.Rd0000644000176000001440000000733412133644110014215 0ustar ripleyusers\name{dtomogplot} \alias{dtomogplot} \title{Dynamic Tomography Plot} \description{ dtomogplot is used to produce a tomography plot (see King, 1997) for a series of temporally ordered, partially observed 2 x 2 contingency tables. } \usage{ dtomogplot(r0, r1, c0, c1, time.vec=NA, delay=0, xlab="fraction of r0 in c0 (p0)", ylab="fraction of r1 in c0 (p1)", color.palette=heat.colors, bgcol="black", ...) } \arguments{ \item{r0}{An \eqn{(ntables \times 1)}{(ntables * 1)} vector of row sums from row 0.} \item{r1}{An \eqn{(ntables \times 1)}{(ntables * 1)} vector of row sums from row 1.} \item{c0}{An \eqn{(ntables \times 1)}{(ntables * 1)} vector of column sums from column 0.} \item{c1}{An \eqn{(ntables \times 1)}{(ntables * 1)} vector of column sums from column 1.} \item{time.vec}{Vector of time periods that correspond to the elements of \eqn{r_0}{r0}, \eqn{r_1}{r1}, \eqn{c_0}{c0}, and \eqn{c_1}{c1}.} \item{delay}{Time delay in seconds between the plotting of the tomography lines. Setting a positive delay is useful for visualizing temporal dependence.} \item{xlab}{The x axis label for the plot.} \item{ylab}{The y axis label for the plot.} \item{color.palette}{Color palette to be used to encode temporal patterns.} \item{bgcol}{The background color for the plot.} \item{...}{further arguments to be passed} } \details{ Consider the following partially observed 2 by 2 contingency table:\cr \cr \tabular{llll}{ \tab | \eqn{Y=0} \tab | \eqn{Y=1} \tab | \cr - - - - - \tab - - - - - \tab - - - - - \tab - - - - - \cr \eqn{X=0} \tab | \eqn{Y_0}{Y0} \tab | \tab | \eqn{r_0}{r0}\cr - - - - - \tab - - - - - \tab - - - - - \tab - - - - - \cr \eqn{X=1} \tab | \eqn{Y_1}{Y1} \tab | \tab | \eqn{r_1}{r1}\cr - - - - - \tab - - - - - \tab - - - - - \tab - - - - - \cr \tab | \eqn{c_0}{c0} \tab | \eqn{c_1}{c1} \tab | \eqn{N}\cr } where \eqn{r_0}{r0}, \eqn{r_1}{r1}, \eqn{c_0}{c0}, \eqn{c_1}{c1}, and \eqn{N} are non-negative integers that are observed. The interior cell entries are not observed. It is assumed that \eqn{Y_0|r_0 \sim \mathcal{B}inomial(r_0, p_0)}{Y0|r0 ~ Binomial(r0, p0)} and \eqn{Y_1|r_1 \sim \mathcal{B}inomial(r_1, p_1)}{Y1|r1 ~ Binomial(r1,p1)}. This function plots the bounds on the maximum likelihood estimates for (p0, p1) and color codes them by the elements of time.vec. } \keyword{hplot} \references{ Gary King, 1997. \emph{A Solution to the Ecological Inference Problem}. Princeton: Princeton University Press. Jonathan C. Wakefield. 2004. ``Ecological Inference for 2 x 2 Tables.'' \emph{Journal of the Royal Statistical Society, Series A}. 167(3): 385445. Kevin Quinn. 2004. ``Ecological Inference in the Presence of Temporal Dependence." In \emph{Ecological Inference: New Methodological Strategies}. Gary King, Ori Rosen, and Martin A. Tanner (eds.). New York: Cambridge University Press. } \examples{ \dontrun{ ## simulated data example 1 set.seed(3920) n <- 100 r0 <- rpois(n, 2000) r1 <- round(runif(n, 100, 4000)) p0.true <- pnorm(-1.5 + 1:n/(n/2)) p1.true <- pnorm(1.0 - 1:n/(n/4)) y0 <- rbinom(n, r0, p0.true) y1 <- rbinom(n, r1, p1.true) c0 <- y0 + y1 c1 <- (r0+r1) - c0 ## plot data dtomogplot(r0, r1, c0, c1, delay=0.1) ## simulated data example 2 set.seed(8722) n <- 100 r0 <- rpois(n, 2000) r1 <- round(runif(n, 100, 4000)) p0.true <- pnorm(-1.0 + sin(1:n/(n/4))) p1.true <- pnorm(0.0 - 2*cos(1:n/(n/9))) y0 <- rbinom(n, r0, p0.true) y1 <- rbinom(n, r1, p1.true) c0 <- y0 + y1 c1 <- (r0+r1) - c0 ## plot data dtomogplot(r0, r1, c0, c1, delay=0.1) } } \seealso{\code{\link{MCMChierEI}}, \code{\link{MCMCdynamicEI}},\code{\link{tomogplot}} } MCMCpack/man/dirichlet.Rd0000644000176000001440000000213612133644110014666 0ustar ripleyusers\name{Dirichlet} \alias{Dirichlet} \alias{ddirichlet} \alias{rdirichlet} \title{The Dirichlet Distribution} \description{ Density function and random generation from the Dirichlet distribution. } \usage{ ddirichlet(x, alpha) rdirichlet(n, alpha) } \arguments{ \item{x}{A vector containing a single deviate or matrix containing one random deviate per row.} \item{n}{Number of random vectors to generate. } \item{alpha}{Vector of shape parameters, or matrix of shape parameters corresponding to the number of draw.} } \details{ The Dirichlet distribution is the multidimensional generalization of the beta distribution. } \value{ \code{ddirichlet} gives the density. \code{rdirichlet} returns a matrix with \code{n} rows, each containing a single Dirichlet random deviate. } \author{ Code is taken from Greg's Miscellaneous Functions (gregmisc). His code was based on code posted by Ben Bolker to R-News on 15 Dec 2000. } \seealso{ \code{\link[stats]{Beta}} } \examples{ density <- ddirichlet(c(.1,.2,.7), c(1,1,1)) draws <- rdirichlet(20, c(1,1,1) ) } \keyword{distribution} MCMCpack/man/choicevar.Rd0000644000176000001440000000117012133644110014657 0ustar ripleyusers\name{choicevar} \alias{choicevar} \title{Handle Choice-Specific Covariates in Multinomial Choice Models} \description{ This function handles choice-specific covariates in multinomial choice models. See the example for an example of useage. } \usage{ choicevar(var, varname, choicelevel) } \arguments{ \item{var}{The is the name of the variable in the dataframe.} \item{varname}{The name of the new variable to be created.} \item{choicelevel}{The level of \code{y} that the variable corresponds to.} } \value{ The new variable used by the \code{MCMCmnl()} function. } \seealso{\code{\link{MCMCmnl}}} \keyword{manip} MCMCpack/man/BayesFactor.Rd0000644000176000001440000000474612133644110015132 0ustar ripleyusers\name{BayesFactor} \alias{BayesFactor} \alias{is.BayesFactor} \title{Create an object of class BayesFactor from MCMCpack output} \description{ This function creates an object of class \code{BayesFactor} from MCMCpack output. } \usage{ BayesFactor(...) is.BayesFactor(BF) } \arguments{ \item{...}{MCMCpack output objects. These have to be of class \code{mcmc} and have a \code{logmarglike} attribute. In what follows, we let \code{M} denote the total number of models to be compared.} \item{BF}{An object to be checked for membership in class \code{BayesFactor}.} } \value{ An object of class \code{BayesFactor}. A \code{BayesFactor} object has four attributes. They are: \code{BF.mat} an \eqn{M \times M}{M by M} matrix in which element \eqn{i,j}{i,j} contains the Bayes factor for model \eqn{i}{i} relative to model \eqn{j}{j}; \code{BF.log.mat} an \eqn{M \times M}{M by M} matrix in which element \eqn{i,j}{i,j} contains the natural log of the Bayes factor for model \eqn{i}{i} relative to model \eqn{j}{j}; \code{BF.logmarglike} an \eqn{M}{M} vector containing the log marginal likelihoods for models 1 through \eqn{M}{M}; and \code{BF.call} an \eqn{M}{M} element list containing the calls used to fit models 1 through \eqn{M}{M}. } \examples{ \dontrun{ data(birthwt) model1 <- MCMCregress(bwt~age+lwt+as.factor(race) + smoke + ht, data=birthwt, b0=c(2700, 0, 0, -500, -500, -500, -500), B0=c(1e-6, .01, .01, 1.6e-5, 1.6e-5, 1.6e-5, 1.6e-5), c0=10, d0=4500000, marginal.likelihood="Chib95", mcmc=10000) model2 <- MCMCregress(bwt~age+lwt+as.factor(race) + smoke, data=birthwt, b0=c(2700, 0, 0, -500, -500, -500), B0=c(1e-6, .01, .01, 1.6e-5, 1.6e-5, 1.6e-5), c0=10, d0=4500000, marginal.likelihood="Chib95", mcmc=10000) model3 <- MCMCregress(bwt~as.factor(race) + smoke + ht, data=birthwt, b0=c(2700, -500, -500, -500, -500), B0=c(1e-6, 1.6e-5, 1.6e-5, 1.6e-5, 1.6e-5), c0=10, d0=4500000, marginal.likelihood="Chib95", mcmc=10000) BF <- BayesFactor(model1, model2, model3) print(BF) } } \concept{Bayes factor} \concept{model comparison} \seealso{\code{\link{MCMCregress}}} \keyword{models} MCMCpack/inst/0000755000176000001440000000000012140061656012636 5ustar ripleyusersMCMCpack/inst/CITATION0000644000176000001440000000140512133644110013765 0ustar ripleyuserscitHeader("To cite MCMCpack in publications use:") citEntry(entry = "Article", title = "{MCMCpack}: Markov Chain Monte Carlo in {R}", author = personList(as.person("Andrew D. Martin"), as.person("Kevin M. Quinn"), as.person("Jong Hee Park")), journal = "Journal of Statistical Software", year = "2011", volume = {42}, number = {9}, pages = {1--21}, url = {"http://www.jstatsoft.org/v42/i09/"}, textVersion = paste("Andrew D. Martin, Kevin M. Quinn, Jong Hee Park (2011).", "MCMCpack: Markov Chain Monte Carlo in R.", "Journal of Statistical Software. 42(9): 1-21.", "URL http://www.jstatsoft.org/v42/i09/.") ) MCMCpack/HISTORY0000644000176000001440000005175512140061627012760 0ustar ripleyusers// // Changes and Bug Fixes // 1.3-2 to 1.3-3 * fixed an error in MCMCpoissonChange: is.na(lambda.mu) and dropped offset argument (thanks to Brian Ripley) 1.3-1 to 1.3-2 * fixed two mistakes in marginal likelihood computation in MCMCregress (thanks to Sid Chib). In computing log_sigma2, - exp(digamma) -> digamma - averaging over tot_iter -> averaging over nstore * added MCMCintervention(), MCMCregressChange(), and plotIntervention() * sigma.mu and sigma.var are added in MCMCregress() * lambda.mu and lambda.sigma are added in MCMCpoissonChange() * fixed minor mistakes and change expressions for a recent version of g++ (thanks to Jeffrey Oldham) 1.2-4 to 1.3-1 * fixed many C++ issues related to clang and Solaris (many thanks to Brian Ripley for his significant contributions) 1.2-3 to 1.2-4 * fixed a bug in HMMmultivariateGaussian.cc (comparison of unsigned and singed integers) * cleaned up g++ 4.7 compatibility issues (thanks to Dan Pemstein and Chris Lawrence) 1.2-2 to 1.2-3 * fixed extraneous warnings in hidden.R (thanks to Jeffrey Arnold) * added make.breaklist.R * fixed example codes in HMMpanelFE.Rd and testpanelSubjectBreak.Rd * fixed testpanelSubjectBreak() to handle missing values in subject.id 1.2-1 to 1.2-2 * fixed bug in dwish() (thanks to Taráz E. Buck) * fixed .onAttach() to no longer require packages and use packageStartupMessage() (thanks to Brian Ripley) * updated to most recent version of Scythe library * cleaned up issues with cout, cerr, abort(), exit(), and pthread.h (thanks to Brian Ripley) 1.1-5 to 1.2-1 * added HMMpanelFE() * added HMMpanelRE() * added testpanelGroupBreak() * added testpanelSubjectBreak() * added MCMCresidualBreakAnalysis() * deleted #undef DO, #undef DS, #undef SO, #undef SS due to a symbol clash with Solaris' headers (thanks to Brian Ripley) 1.1-4 to 1.1-5 * fixed an error in MCMCregress: loglike should be dropped 1.1-3 to 1.1-4 * fixed an error in MCMChregress: NQ has to be replaced by NP (line 223) * in MCMChregress, MCMChpoisson, MCMChlogit, R_CheckUserInterrupt(void) is replaced by R_CheckUserInterrupt() * fixed an error in the marginal likelihood computation in MCMCproit.cc and MCMCregress.cc (thanks to Sid Chib) * changed B0 as prior precision in MCMCprobitChange, MCMCoprobitChange and MCMCpoissonChange * the acceptance rate is reported if verbose > 0 * added the reporting of marginal likelihood components if verbose > 0 1.0-11 to 1.1-2 * fixed an error in MCMCoprobit.R * changed the documentation of MCMCoprobit. * Added m=0 option to all changepoint models. Now all changepoint models allow users to set m=0 for marginal likelihood computation. * Fixed a bug in MCMCbinaryChange. I don't know why but log(dbeta) in Scythe often generates NaN results while lndbeta1() does not. 1.0-10 to 1.0-11 * updated CITATION file in inst/ to coincide with JStatSoft publication * added reference to JStatSoft in references 1.0-9 to 1.0-10 * added CITATION in inst/ to coincide with JStatSoft publication * compressed two large data files (Senate and Nethvote) 1.0-8 to 1.0-9 * added MCMCprobitChange() * added MCMCoprobitChange() * modified MCMCprobit and MCMCprobitres for marginal likelihood estimation 1.0-7 to 1.0-8 * fixed some NAMESPACE issues [thanks to Shawn Treier] 1.0-6 to 1.0-7 * slight modifications to internals of rinvgamma to make parameterization more clear [thanks to Daniel Runcie and Richard Morey] * fixed some NAMESPACE issues with SSVSquantreg and its methods [written by Craig Reed] 1.0-5 to 1.0-6 * added SSVSquantreg [written by Craig Reed] * added functions to handle SSVSquantreg output [written by Craig Reed] * revisions to the MCMCquantreg function [written by Craig Reed] * revisions to the MCMCquantreg function [written by Craig Reed] * fixed parameterization issue with rinvgamma [thanks to Julian Stander] 1.0-4 to 1.0-5 * added heteroskedastic IRT model [written by Ben Lauderdale] * minor changes to error status in hierarchical IRT [written by Mike Malecki] 1.0-3 to 1.0-4 * little change to parameterization of BQR [contributed by Craig Reed] 1.0-2 to 1.0-3 * fix to hierarchical IRT documentation [written by Mike Malecki] * added Bayesian quantile regression [contributed by Craig Reed] 1.0-1 to 1.0-2 * added Poisson regression changepoint analysis MCMCpoissonChange() [JHP] * Old MCMCpoissonChangepoint() is deprecated [JHP] * added binary data changepoint model MCMCbinaryChange() [JHP] * plotState() function modified to support new models [JHP] * plotChangepoint() function modified to support new models * added a number of new helper functions in btsutil.R [JHP] 0.9-6 to 1.0-1 * added one-dimensional dynamic IRT model * added Rehnquist Court data to illustrate dynamic IRT model 0.9-5 to 0.9-6 * fixed bug in MCMCmetrop1R.R [thanks to many for spotting memory issue] * fixed by in Lecuyer seeds [thanks to Eduardo Leoni] 0.9-4 to 0.9-5 * changed how missing values are handled when calculating agreement scores for starting values in factor analysis and IRT models * added warning about incomparable models to BayesFactor * fixed bug with MCMCmnl example [thanks to Duncan Murdoch] * fixed formula interface bug for all models [thanks to Duncan Murdoch] * added hierarchical IRT code [written by Mike Malecki] 0.9-3 to 0.9-4 * fixed issue with MCMCmetrop1R.R [thanks to Jim Albert] * fixed Linux compilation issue [thanks to Chris Lawrence] 0.9-2 to 0.9-3 * added gcc 4.0 check in configure.ac and SystemRequirements * added verbose option to plotChangepoint() 0.9-1 to 0.9-2 * fixed Makevars per Ripley's email [PKG_CXXFLAGS to PKG_CPPFLAGS] * fixed encoding on some documentation files [thanks to Kurt Hornik] * added other estimation algorithms for MCMCoprobit * added other estimation algorithms for MCMCmnl * added MCMCpoissonChangepoint with estimation in compiled C++ * a variety of other minor fixes 0.8-2 to 0.9-1 * first release of JStatSoft version * full port to Scythe 1.0.2 (nearly all C++ has been radically changed) * MCMCpanel() deprecated * a number of minor fixes [thanks to anonymous reviewers] 0.8-1 to 0.8-2 * models with multivariate normal priors now check for symmetry and positive semi-definiteness of precision matrix * fixed bug in how models with marginal likelihood calculations check for prior propriety [thanks to Gary Rosner for spotting this] * fixed bug in how MCMCmetrop1R handled a singular Hessian [thanks to Piers Dunstan for spotting this] 0.7-4 to 0.8-1 * added MCMCPoissonChangepoint() model (authored by Jong Hee Park) * added two plot methods for changepoint models: plotPostChangepoint() and plotPostState() (authored by Jong Hee Park) * package cleaned up prior to submission of piece to JStatSoft, including major edit of documentation 0.7-3 to 0.7-4 * fixed minor bug in MCMCpoisson() that was causing the function to not work on Windows machines. * changed test <- grep("\.nonconst$", c.names) to test <- grep("\\.nonconst$", c.names) and c.names <- sub("\.nonconst$", "", c.names) to c.names <- sub("\\.nonconst$", "", c.names) in automate.R. [Thanks to Kurt Hornik for noticing this.] 0.7-2 to 0.7-3 * following posting by Radford Neal at: http://www.stat.columbia.edu/~cook/movabletype/archives/2006/03/ adaptive_metrop.html switched a < to a <= in the shrinkage procedures used in the various slice sampling implementations. * modified tomogplot() and dtomogplot() to handle situations in which r1[i] == 0. [thanks to David Hugh-Jones for making this suggestion]. * allowed for more user control of the initial call to optim in MCMCmetrop1R [thanks to Luca La Rocca for this suggestion]. * allowed users to pass the variance-covariance matrix of the Gaussian proposal directly without resorting to a call to optim [thanks to Luca La Rocca for this suggestion]. * fixed minor bug in developer mode in automate.R [thanks to Ben Goodrich]. * fixed a minor bug in the developer mode documentation template. * fixed minor bug in nonconst call in MCMCregress. 0.7-1 to 0.7-2 * added procrustes() to NAMESPACE so that it can be seen (function and documentation already there). * fixed the prior in the full conditional for theta_i in MCMCirtKdRob. Previously was uniform on the unit circle which was not consistent with the documentation and could cause problems given how the starting values were chosen. * fixed prior for lambda_j in MCMCirtKdRob (similar to point above) * fixed the the calculation of the full conditional for gamma[i] in MCMCSVDreg() * Fixed a bug in MCMCbetabinomial() [thanks to John Wood]. 0.6-6 to 0.7-1 * Added robust k-dimensional IRT model MCMCirtKdRob(). * Added SVD regression MCMCSVDreg(). * Updated auto.Scythe.call() to have any number of non-constants args; this is necessary to return other things from the C++ code besides the posterior density sample (including log-marginal likelihoods, acceptance rates, etc.). * Updated form.mcmc.object() to add additional attributes to an mcmc object to hold other quantities of interest (such as log-marginal likelihoods, acceptance rates, data, etc.). * For linear regression model, added option to compute log-marginal likelihood via the method of Chib (1995) or via the Laplace Approximation. * For logistic regression model, added option to compute log-marginal likelihood via the Laplace Approximation. * For probit regression model, added option to compute log-marginal likelihood via the Laplace Approximation. * For Poisson regression model, added option to compute log-marginal likelihood via the Laplace Approximation. * Added methods to calculate Bayes factors and posterior probabilities of models and handle log-marginal likelihoods. * Added teaching model MCbinomialbeta(). * Added teaching model MCpoissongamma(). * Added teaching model MCnormalnormal(). * Added teaching model MCmultinomdirichlet(). * patched xpnd() function to provide more functionality. Thanks to Gregor Gorjan for the patches. * added the function procrustes() that performs a Procrustes transformation of a matrix. * made some minor changes to MCMCirt1d() to help conserve memory when dealing with large datasets (MORE OPTIMIZATION NEEDS TO BE DONE IN TERMS OF BOTH SPEED AND MEMORY) 0.6-5 to 0.6-6 * fixed the std::accumulate problem pointed out by Kurt Hornik. Thanks to Dan Pemstein for tracking the problem down and making the fix. * fixed up how the force.samp option works with MCMCmetrop1R-- previously if a diagonal element of the Hessian was 0 the correction wouldn't work. * changed how additional arguments are passed to user-defined function in MCMCmetrop1R. This breaks old code but provides a more standard interface. * when logfun==FALSE in MCMCmetrop1R the initial call to optim() now maximizes the log of the user-defined function * cleaned up the *.Rd files for the model fitting functions a bit (more still needs to be done) * MCMClogit now optionally accepts a user-defined prior density 0.6-4 to 0.6-5 * added a check so one can run MCMCirt1d without passing constraints * fixed the Senate dataset so there are no duplicate names in the member variables, and modified examples accordingly * slightly edited DESCRIPTION file to get some important keywords in the first sentence * compute dinvgamma() on the log scale so it does not blow up for large shape and scale parameters * changed how MCMCmetrop1R handles a non-negative definite Hessian from optim--sampling can now proceed if the user flips a switch * fixed minor error in how std.mean was passed to MCMCmixfactanal * fixed the error in the beta full conditional in MCMCpanel that resulted from a typo in the Chib and Carlin paper * changed factor.score.eigen.start() in hidden.R to use squared distances rather than distances so as to be compatible with Poole, 2005. _Spatial Models of Parliamentary Voting_ * fixed a minor bug in how the verbose argument was handled in the C++ code for MCMCmetrop1R and MCMCfactanal [thanks to Lingji Chen] * fixed a minor documentation bug in the MCMCirtKd model [thanks to Guillermo Rosas] * fixed the text echoed at start-up so it does not have to be updated year to year 0.6-3 to 0.6-4 * fixed bug with verbose in MCMCmetrop1R pointed out by a referee for Rnews * added a little clarification to the lecuyer.h file about the dual- licensing scheme Pierre L'Ecuyer agreed to on 7 August 2004. 0.6-2 to 0.6-3 * cleaned up the docs for MCMCirtKd * MCMCirtKd now fits a model with a difficulty parameter rather than a negative difficulty (easiness ???) parameter * MCMCirtKd and MCMCordfactanal now do a better job of dropping variables that have 0 variance. * fixed Senate examples for MCMCirt1d and MCMCirtKd. EVENTUALLY THE SENATE DATA SHOULD BE FIXED DIRECTLY SO THAT THERE ARE NO DUPLICATE NAMES IN THE MEMBER VARIABLE. * users can interrupt all estimation algorithms now with CTL-C * changed the behavior of the verbose switch. Now if verbose is greater than 0 output is printed every verboseth iteration. * changed the settings when mcmc objects are created so that "start=burnin+1" and "end=mcmc+burnin" * fixed documentation of MCMCmetrop1R to make clear how data are passed to the log-posterior function * fixed factor.score.start.check to not return an error when a vector of starting values is passed * improved the way factor.score.start.check enforces constraints 0.6-1 to 0.6-2 * fixed documentation for rinvgamma() and dinvgamma() so that it is clear that these functions take shape and scale parameters as arguments. * fixed documentation and R code for riwish(), diwish(), rwish(), and dwish() so that it is more clear how these distributions are parameterized. * fixed line endings in MCMCtobit.cc * changed how MCMCordfactanal and MCMCmixfactanal report MH acceptance rates. They now report separate rates for each manifest variable. * fixed a bug with starting values in the IRT and factor models. If no starting values were passed, those created in factor.score.start.check() did not include the constraints (hard or soft), so the check failed. Now the constructed starting values meet the constraints. 0.5-2 to 0.6-1 * added a tobit model for a linear model with censoring in MCMCtobit() * added multinomial logit model in MCMCmnl() * added vote data from the Netherlands to illustrate MCMCmnl() * added choicevar() function to specify choice-specific variables in multinomial choice models * fixed some data-handling issues in MCMCmixfactanal() and MCMCordfactanal() [thanks to Ben Goodrich for isolating these and providing patches] * fixed the Metropolis-Hastings step for the Cowles algorithm for cutpoints in MCMCoprobit(), MCMCordfactanal(), and MCMCmixfactanal() [thanks to Alexander Raach for isolating the problem and providing a patch] * removed gcc specific compilation flags from Makevars.in (per the request of Brian Ripley and Kurt Hornik) * added informative message for templates created in auto.Scythe.call() * modified vector.tune() hidden function * fixed Neal's shrinkage procedure in MCMChierEI.cc * a number of editorial fixes, updating for new calendar year, etc. * removed functions acosh, asinh, atanh, and expm1 from smath.h and smath.cc so cross-compilation will work // // Old Changes and Bug Fixes // MCMCpack 0.5-1 was a major revision of MCMCpack. The entire package was been essentially re-written using the new development environment (documented in the MCMCpack specification) and the new Scythe Statistical Library 1.0. This following list summarizes major changes, but is by no means exhaustive. 0.5-1 to 0.5-2 * C++ code for truncated normal draws optimized for speed * with the permission of Pierre L'Ecuyer licensing of RngStream code changed to a dual license setup that is GPL compatible. Thanks to Chris Lawrence for bringing the licensing issues to our attention and drafting a new licensing statement and to Pierre L'Ecuyer for agreeing to use the new licensing statement for his RngStream code. * Fixed serious bugs in MCMChierEI() and MCMCdynamicEI() * Implemented a new sampling scheme based on slice sampling for MCMChierEI() and MCMCdynamicEI(). * Removed MCMCbaselineEI() * Added delay argument to dtomogplog() 0.4-8 to 0.5-1 * NAMESPACE implemented * hidden functions are now available to aid in development (see hidden.R) * a function is available to automate the C++ call and generate template C++ code for estimation (see automate.R) * all model functions have been updated to the new specification, and most use hidden functions and automate * added a general purpose Metropolis sampler that allows the user to sample from an arbitrary (log)-density. * C++ code now using Scythe 1.0 (now using the unedited Scythe codebase through IFDEFs) * support for arbitrary random number generators, including the L'Ecuyer RNG for parallel computation (the RNG helper functions are available in MCMCrng.cc) * many full conditional distributions are available in MCMfcnds.cc * documentation for density functions and RNGs have been made "R-like" * fixed some spelling errors and misnomers in the documentation * all documentation updated to reflect changes * MCMCirt1d() has a new interface with new types of constraints-- sampling for this model is also now much faster. 0.4-8 to 0.4-9 * Fixed a minor Scythe issue to fix error found by gcc 3.4. 0.4-7 to 0.4-8 * Repaired Scythe_Simulate.*, for which an outdated version was included in the last release. 0.4-6 to 0.4-7 * Fixed some Scythe bugs, including a problem with memory allocation for matrix multiplication. See http://sourceforge.net/projects/scythestat/ for the latest version of Scythe, which is now distributed with MCMCpack. The Scythe code differs slightly in the paths ../include and ../src are replace with the current path, and in pnorm2 the isnan() function is commented out to allow for cross-compilation. * Rolled out http://mcmcpack.wustl.edu website. * Mixed response factor code. * Fixed factanal. * Fixed irtKd. 0.4-5 to 0.4-6 * Fixed a bug in rnoncenhypergeom() [thanks to Tom LaFramboise] * Patched Scythe0.3 to fix an error in inv() [thanks to Donour Sizemore]. Note that this function is not called in MCMCpack, so was causing no explicit errors. 0.4-3 to 0.4-5 * Fixed a bug in xpnd() [thanks to Michael Man] * Fixed some inconsistencies in documentation [thanks to Kurt Hornik] 0.4-2 to 0.4-3 * Fixed bug in Scythe truncated Normal generators (which had been fixed before but sneaked into the last release) -- this fixes a problem with MCMCirt1d * Cleaned up MCMCbaselineDA.cc (eliminated unused arguments) * Cleaned up MCMCbaseline.R (tuning argument) * Set seed in MCMClogit.cc fixed * Set seed in MCMCpoisson.cc fixed * Fixed all examples such that they work out of the box 0.4-1 to 0.4-2 * Optimized some of the Scythe 0.4 code, which provides faster computation for most models. * Corrected a permissions problem on cleanup [thanks to Kurt Hornik] * Added explicit licensing information and a text echo when loading MCMCpack. 0.3-11 to 0.4-1 * Ported to Scythe Version 0.4 (which will soon be publicly available) * Cleaned up the codebase and documentation (changes will soon be part of the specification) * Added vech() and xpnd() utility functions * Included data file of 106th Senate roll call votes for the MCMCirt1d() and MCMCirtKd() models * Added Dirichlet, Noncentral Hypergeometric, and Inverse Gamma generators and densities [with contributions from Kevin Rompala] * Added read.Scythe() function to read matrices written by Scythe [contributed by Kevin Rompala] * Added helper functions to make coding easier [contributed by Kevin Rompala] * Added three models: a K-dimensional item response theory model (MCMCirtKd), a linear factor model (MCMCfactanal), and an ordinal item response theory model (MCMCordfactanal) * Added a pre-processor command to handle ininf() compilation issues on SGI [thanks to Dave Henderson] * All MCMC* functions now only allow starting values for the first simulated block of parameters and use check.parameters() function. * Range checking is turned off in the compiled C++ code, yielding significant speed gains for most models. 0.3-10 to 0.3-11 * Fixed a bug in MCMCpoisson() re: non-negative counts * Included a data file of Supreme Court votes for the MCMCirt1d() model [thanks to Simon Jackman for the suggestion] * Fixed memory leak caused by Scythe_Matrix.cc [thanks to Dan Pemstein] MCMCpack/DESCRIPTION0000644000176000001440000000211512140120720013352 0ustar ripleyusersPackage: MCMCpack Version: 1.3-3 Date: 2013-5-1 Title: Markov chain Monte Carlo (MCMC) Package Author: Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park Maintainer: Jong Hee Park Depends: R (>= 2.10.0), coda (>= 0.11-3), MASS, stats Description: This package contains functions to perform Bayesian inference using posterior simulation for a number of statistical models. Most simulation is done in compiled C++ written in the Scythe Statistical Library Version 1.0.3. All models return coda mcmc objects that can then be summarized using the coda package. MCMCpack also contains some useful utility functions, including some additional density functions and pseudo-random number generators for statistical distributions, a general purpose Metropolis sampling algorithm, and tools for visualization. License: GPL-3 SystemRequirements: gcc (>= 4.0) URL: http://mcmcpack.wustl.edu Packaged: 2013-05-01 00:37:02 UTC; parkjonghee NeedsCompilation: yes Repository: CRAN Date/Publication: 2013-05-01 07:02:08 MCMCpack/data/0000755000176000001440000000000012140061656012572 5ustar ripleyusersMCMCpack/data/SupremeCourt.rda0000644000176000001440000000073412133644107015723 0ustar ripleyusers‹í—ÝJÃ0†³¢‚áÁ`¸¤ûñHp‚›;­[tƒ­Õþ(y/^¡W f³yqØ4Ý‚¶o“ï{Ÿ&iÛ¿ÒÚ°F)’â¡8—„,Å©@ʤ*®ûƒø1àsÞóã "¤t,êõ⨓”rþ‘ñ{œi¾iQýòú§åëxÜåøªñi~Y¯¶>º¼·åµòžÕGõËÛ¯´|¢Œ¯i1åèâmçŸ:¾Yý²ö;-΀c´?ä-›^¿ÛηÝLýuí¦ó×¶Ø®S_gÓû¯mòî³:îºö}[ß þÿûƒEû_›¿ÛþþX÷ú2ý~°x?æêߦÇG—ŸÕWW‹>oådÇsç<âˆ,~>¾+«}>ñžâi%•AÄŸ¹&·{7'=ßóü ¹ßŒÜÙÔ•Á×Üóøø~q„ÞNü¹ £«©ÞÅÁƒl½ø«ˆ]íc5ð_?ûYO M)¨L GŠ–m):Rt¥8KD±y Õ„¢P ÊjAµ¡:P](0( ƒ‚AÁ `P0( ƒÁÀ``00 ÃÃÃÃaêLÍÜP>AYY»‘Û¸ÄÃwŸ‹ã ñ‰j8MCMCpack/data/Senate.rda0000644000176000001440000006060212133644107014505 0ustar ripleyusers‹í½YeK–&´3óFܸ·ª¡š¦¨žª!»î™"¢ºzp÷8îî÷ðÉÐdge ¤ª.*+xâ 1IL-$â ñ€º(xdx ‰ÞãèfŸˆcÇWXŸµ¿o-[¶÷>Ç÷ª:iaÛÌÖ÷ÙZö-Ûî×ÃãæÅ÷g_~ÿ˦i¾Ý|ûçÛÏwÚ?~öíö¾Õ~¾h?oüø³7Íw~_;é/·O~WûùIÓüÎ/4ßúÍÿ¦i~ùO5ÍŸ|Ó4¿²nÿüWšæ§ÿlûùKíŸÿƒ¦ùíµi~ó?o¾õ³ÿ¬ižþ Mó§¿ß¶¿¿iþÄ/´óÿåvÎÕúùÃí¼­mÿߦyöçšæ÷þwMóüi}ü{íøÚ>ÿ?ÛÏÿÕŽýÇíÚ¥ýüëMó[·íØg­ï¶íEÓÌ[J¿óGÚÏm×¶<žýcM3ûšæŸµãçMó_ÿCíó²õù_4Í_ùï›fõ_¶Ÿÿ±}þ·>þÅÖç?×r»k×ÿ±¶ý¦ý¼m×ýíšÿ£iþÔÏ·>m?ÿ[ûiù=ý­ÿÿ¤ýsµßù¿Ûþl}üKíÜÿµ}Ö>ÿ÷WÚy¿ü+íŸÿxÛ¶óójÛ?Ó®ûÛgÿ_ÛÿŸÛü·Mó ÿF»n³þ¤åò´Ÿ?ßöÛØüö¿Ù~þ­vþÔúÿwÛÏÿÞrzߎý3­ŸvO¿ýo7Í£–ç/¯ýó¿Ó޽kýþbÛþžûü4|ûù¶Ÿ?½ýüâöójûù3ÛÏ·¶ŸŸÛ~þÄöó÷‰ÏÍöóg·Ÿß¿ýüÍÛÏßµýü¡íg¾ýüýÛÏ··ŸÅöóíçl?÷öóÝíçïÙ~þÜöó÷n?'ÛÏgÛÏÏo?§ÛÏïÚ~þäöó ÛÏß²ý,·ŸóíçoØ~‰Ï/m?xûYm?_m?gÛÏãíçm?Ûϳíçn?/>~¾“ò“âb’b‘ö”x$ÿŸo?O¶Ÿ/¶Ÿ/Å'å.Å#Å!íïoÜ~~÷öó{¶Ÿß»ýü¾íçoÚ~òü¦<¥8¦½ý­ÛÏß¶ýüíÛÏß±ýüÛO~>R¾SžS~S¾RìÿÈöóGÅ'Å;ót®ós›Î^:SéL¤ü¦|Î>~>­~¿öãêÇ¿ö[ÛhͶO??¹<9=ys’&µÝÛ×'»Á›‹\_‰îë“«Û]÷ìäòâåõÍ®{}y}sòâ¾{uµ>{›º/Ö—'ïNv“_¶s/^ì\½Z_ß¼ºØ±8o§^\l{Úyç×iæÅååÅÕõÅ®{õââdÇð³‹ëw;'¸Þ¦‰¯×WoïÎ^§îåõÝÅíðÑ›“‹«u{sróþòäê¾{{{rv¾ë^œ_¼:¹ï¶Û¼½¾ïÞÞ¶ÿ/»×w»]¿¹¾z{Ïöó«õéMîDøjýÍÉ.$mïÝùÉÑûÞúFôÞ¬¿/zï¯ïÇ®oÞžŸ}ÒK9ùìúübÇôúõeÖÀõÍúÕõnÏ_¯¯®nß_¦ðÜœ_¿HáyÜnH|èí’þv½‰FÚþ£·ëïï2ðÙÝÛ“]¿Yßlb±ë^´Ù¿Ú%õÝÉíùÅÕ«ñn}ûö›ûÁ‹Ûöhí"Üný͇¹ŸöG?úµþV:ë;üä‡?úÙoü´ýÓ_m>ÖŸüÙö¦Þ˜µÕÌ:í³8Z=G¼Ù}!VüÚqgÇ­çÂ_/ntþôu»;S~Ú÷¸fS$Ú÷½ïcÉnÞ‘6Õaswoî»Í{Àæ>ÞÜ™›÷™Í}¿þæž{Ù|¼“7÷nûþ÷áÝaó^’îßÍ}º¹+7ï ›ú³¹7÷݆æàmîÿMMÙ¼lÞû6÷yû~üá¥}§lž·ŸöþCØÖÍ]Ú¾ÿ¸ÿ75kón²yר¼›l*ÅæNýåæcmøºùx×þ ùø~øšUdsçoJvûžúážßÜ·›÷‡?Ô||?øQóñ}lSKÿÁm¬Ú÷ñ¦}?ß½lŠðæþaóñÝbónµyøG›÷þæ}ró~´¹ÓŸ6ß7ソÚ||OØTíͻΦ nÞ-Ú÷óïþcüö_Ûîç'Mº¹Û‹æþ6ytòú䵸ò/OnRÒ–Þ³›ë—©4?>=¹;»ÛÕðÓ“÷»~º©õoÓÄG§/ÖéþxrÚ–äö­âj·îúêÅnâõ÷ïo²Ó›õÉ]ºÉ¾8½¹~wuz²»¦Þ¼¿¿ˆOﮮG§w7W÷ÄÞßoáìäÍ×§ëËË]ÿüäåËõú«¬¿{:»lßM®Ä‹ËÙùÍ=jûZÓ¾q$ ÇíÝsÿ’óÅÙu{“½¸ÇztvsrñJt¾ÞÝ·/NnÏÎ/w·è‹õ»ûWŽÏ^\¿ØÑqýf}uqv±›Ù¾íè<~qwÓ79]¿hß©^좰¾úAZöäåºÕõåŽê¦ûv½[ûåË‹·?xµ¾9ÙMyôòæâv—øWí›Ãî…àñ«OÎOÛ{óI'1xÒvno/×ïïׯvñ8?yµN‘jßñn^ïÈ´Cow¯XÎ×—÷Ï?ÄÿUêy~×Îl·²ãöÅÇ'÷_\_¿\ß÷®ïÞ§Þ“ï­_¶ï¬»}þ½ësákóšxµ~‘è?~½¾¹¹ß̦—:Ÿ½¾>O{ùÎëݫғÍIº¹Xß%º—'wíKÑéúf…ËõÉùûûÎ7» |qy±n'ÞËæóvãíñÛ©èòú^n—w¯voÓŸ½¹—Ìã7gg'÷.ÛÞæ=ü^ o.^ß]Þ¾Þ®7í õNŽOÞ\¿¿º8?¹_~wóúúœ~wss’ØÞÓ×—ëÝñ»Y¯_ÜÿùâþÏ×§§iIûçõÍÛ´äçn®Ï^¯_®‡vöÛtžÜž\½½¾¹{sß¿9=¹Ú!~ÞJêîÍ=ýÛöÝóâþåðñm{’NßïFß\¼=ÿîÕù§ýÝ›ó£Û«ëw»/n¿n¿xÙyþ¼UÎ7ë{ÇoÛ7çÝ›í“MïëûCÔöï6¯·iÿ_¶[¸¹8k7™ÂøÅ7×í©üæbwä·2¾Ú¡}ñ®{Ûª/±yôîý‡âº)îßIŽÂkm_í?ÿ¡ýüßšÇÔÆ´‡žÏf«›¦Ð¬¯áÚ— Z_‹³†£õÑsmž•·uVC¼ØyÖq+?­òÉâ²ãÖs@àOºé°¾y±6éÆ6>6ÝDçOë­ë¾¢îo<Ùy}é&·(ݰ<¢Î¥°ƒø¶}Š3‹ÃæÁº?k>­¼¢êoTFþ½ºDùôâF刕û&˱Þ7¯é¾éû}CàOºé°I76¾ù:ÍÆ®›||UyOËã¢{¾Ð½ÎúaÏ gâaÅgãng·ž ¶®FáZóÇÚžuÓ¿™Ú)Ÿv=î}OcëF´i~Q}Òø"¿¬?¯±¼P|­¼Jù£z÷Ñü|\Ë'âÃâõ`¡_ß°ãÖýiõÉkÖõÞóîÅóòÑæyãV«Næù¬k­„õú}ho?ê¾±žÿR½{ñ¬üÑ<—UçQï9y>­õw¨s‰ÞÓX³Ö[/oö¾±Öad¥ù´òÒü³¸Öy^½"CóQ>£ß+¢Î¥°I76鯯7_§YmݔƇÀïÔMiÜ­~´~©nJÏmTëÅgù°ü£Æ^þ› ßËQÛRœ‡æ1YŒA>C¾Ÿ ßÎq-Î^¿Q¼½xQø¬–Wß÷M“YÎ+ŠëG3í¾éá^C<:׳÷:òsìm2ëù-Åé w,ql¦¯oD;¶øCW&Ý µßCóÔ~Úz>›JºI6Źß6ÙXüÔÊgn}ñ6ÈÏÙä<óuy?ÍgqPþµçÖü ³òby"¼|^?ÖòïÕʧ×›?OǺé뛩òi×cˆnrCãù<Ô×ꆣõÑó<Ÿ,žfV^,O„g;×ιö\ãÃæS{Þ×¹$Ö}жdi{~£ó¤Õ'Ä;ZßV<﹊®;Þõš•Æ+ϧ6¯v}öƯ™þûMgËâi†xiõ€åïåçª÷(Ÿ^ܨüñù÷o¼u9×úZœ5ôœ=÷šŸ¨º…öågé¼R‹¾o¼øQ÷ñûD7¥:¬U/´ºlå§Ícù¢qk¾¬º‰®¯Þ}"¾ÈO-ÝD݇ùø{ßO+]?µ±í¡ç£y ¿_ ­×æiæÝ—uŸÅµÎ³Ž³†æ£|zq5ÞüuàO÷ÍÔ\>‡n›é¾é´Òz…x±ø,®užuœ54åÓ‹«ñðæ¯ûÆÊ»´>z}[>JÛCÏG3½§Míí¡ç£Ù¿ÑƵ8k8y?­×æ±ï"|ÄÃ:­/Çæ“åËZߎõâç¬y*=g+>š§áYãÎŽ—Ƌͧ7*>ĺI7-‹§Y-Ý ¼±é&ï?4Ýä篯¶w(ÞS»¿=ô|4¤n’Mº™ÚˆöÐóÑ¿/ö›úHgì<Ô×â¬áh}ô\›W_/–gtÜÙqÖ”O/®¶Þÿ¾'êçzE8l¬ñ*=?^|ï9`y±ã¥õ"ʧ·´Îh¦Ý7^Ýxóg=^ÝxÏctž¼xÈoi}`Çóyhò£Õߨüh6Ý”ò;Ôû†{­ûáí¾±ÖA/ntþ:Öuê&_w¨º±âY㪙W7šŸZõTóƒü[ï§dµu£™7.šnr= :¡íÛÊo,÷ še‡~ßh8ÖúeÕYíûFÓÍ¡Ý7Ö÷ˆ¨{Þ{ߌU7ÖzÎQ4ntþòu‚÷QZ3뼨6ŠgtÜKqÐ|T£êD-¾ÉƦm~ŽkÕM”¾¼uÐÊ‹µZºñž_4ßšOï½ÀòÑÆ‡Òf¥çˆÕ kQ÷R)¯Yu£õ­x^?篈‹ë=ŸH7Þûµß¼ŸÇá°q°ÞïÞúËòŠ~oay±ãÖúŒÎQ4ntþ:Öâ=­ô^gý çÚ­¼4ÿ,®u^”^5\ÍP>£ß+¢ê€°é¾!ZäG³é¾ÙßË}ãåÕW«Õ§©-k‡Šë¡ã6äï­¡yì>Ù}!,>š§á±qGqEþ½¼káFŇX7馣eñ4«¥„76Ýô…Û·n¼ë½õÆzžRœ­qñâ{ý•âiø,/vרs…«á±|Kç sÝ7Z½Ðø y¨?Ý7îXïöüÚ}ãÕ ªÚóR¼©i‡Šk_¸µpšL7g(ŒU7‰WÎo,çQã7ž,¾õ\ÖÞW3}z6ÙÐ<†ÖÍÐûJ7š±ó´uy?Å™ÅAçS{Žx[÷eåÅòDxì<Wäß[jáFç¯cÝ^ÝXëÁØæOíÇÅm¨¸FáÅ¿™ÞÓt{躪m&Ýt¶É†Þÿ±Åõ¡ëF3ë<ÔOqfqйמ#Þì¾+~­¸£¸"ÿÞzS 7:ë>Ñ•§§sê«n+îXuÓîPºÑê‚Ö7à˜øåýhÝäû`÷7Ý7\‹æ—âö•¿ŽuEºé«Æeã~ìí¡æsèü5Ưo’9pö;®Õ+®wŸ¥ë£âiåeõ_Ê—Å«u¾süÜ¢u3TÝ©UŸ¢÷3öøDï+~,ñ,ÅMë›#ý~Z>_›ÇîO‹72v¿vÜѸõ¼i|Ø|jÏKϧ5ë&Ýt´¹¿CÓõ|¢q¯nò~mÝh6Ýx[tÙùVÿ¥~§v›,:?cmð÷=­zFëØz”÷‘ÎrËëY)ïZæké¼è}±ûÐòéÅÓúÚüÒó,¬ê÷´òAºÑÖçë4ÝX÷Éâi†ü È—Ÿ·žEÝ?(Ÿ,_6¾šm>ÚG3}}ÓÙ²xš•žG+nT]c÷ÞXÝXãî=ŸÖüu¬›~®ó·Þ¸jÖîPñv”÷ гö<º^Yy±<;Åù÷žïZ¸ÑùëXG馔ÿPuqj»[o\“nTÛL÷MgËâi†x¡ócŵÎCqEþ½ç»ntþ:Öä×7Þz5µŸ¶CÅñÐó׌ô¾AõI‹;2/ª^ix,~í¸çqÕx²xhɬ¸µâÄÚØtƒÖEé†ÍëØtcÅ­­ë¹›n¢Îes ïiS{Øí¡ç³É~Ά­Ú8;Ÿ]—,¯OZܽøDœ:ý þÖç,?ÄõY^^¬n4ÿ,_o ÎåAÞ7‡^¯z¨‡Ô¼¡âxhùK–ë&ÎúÑÌ:¯O,Å9ï{qr<ë~Kñ¬øÞø£çy\Ñzë<·×p®;ýyÏ¥¦›±š¦›¡øŒÝØsQGoý8”üåõYX/ßOcë‹Ö/½oJßc„‹úÖ÷-ͳÖ}ö}K›ŸÌ‹«ùõžO„Ûa!¿—CãeÍvN½º)=·ÎØuãÍ‹u}ëÆš'Í¢ÞÓjé¹T÷¬î¬ç3™U‡®›|^ôùe÷W 7:ëB¾Ÿ¦ùjÑùöžGÖot|XžQûDõ¨öþ­º©wk]êX·÷ûCé(_—LËo)ïܯ×OtóyV^ì|WÇÊÏ«W/^TÞ„õòßoò}æý¾Ïw2oþJëW)ÿ(ý ýçÅ«T7}çkϸI7¨>iVª§Zqfý²ûéë¼[yhü£ë†µ^Y÷1t½ºaõb‡úyœNi=Aç5+/k=ÔðØy(®Èi½byGO6>ĺίoŽËJó‹æiëJy³¸Þº…k«w^n,niAþÑóëõ¾ñöÇvßxÏÕtßÔÅíû¾éK7Ú<ÔŸtÃÏz~KÏ«+ïhÝ8γëç:ûnKó‹æ'ÿµøçøhìsmÞXö­ñÉqÏZó¼zm¦û¦³eñ4óžw–¿—Ÿw<ªN |zq£âC¬ëÔ Ëáxã\ª›Z÷…£>¹xjëXšÕÒ»žÕMi~5ÿÖøN÷M÷s6î¬M÷Íþ~íûƪdšn´yÚºÚ÷Oî'ŹN_mΓåÏÎ+Í[T]A­–ϾÎU)nSø÷ÖJ÷—ký(Ýhþ5«…g­w,®u^´nòuÚs”Oï½ á•Ö•=F}?­/ÖªOùzԷ΋¾_¬qËmèz¬»Ÿ¨ûFó¥«n4ö¹•'ò‡ê“†ÍËË?Z7¹EíÛºÞÊ£T7Þ8¢¾5ÞMðÏuÖ:w 7'ï­³Q|5ë ?J§}óaó•— qséf?;ÌC›‡ÆQœ­uùaó\jÖógõ=îå…ÎQ^­|¼õÌú<×M7Ú½åû®cùü—ÅÑúè¹6/ª>{ñY\뽸µßÄúÐßÝCÍ‹Öõë§ô¾Éýh}„g¨;þ±þ£ø±ËËÊÝ'Zç=(ŸÑ¸šÖ4ÝØ¼è~YcÏy>_Ûw>ŽÌ:Ï›O Ï/d¨~xuä=gÈP>£p­û6Ô©Îß;¨Õ+?4ߪ¶^Yë©6o(ÝXq£æ¡çV³ÖA6Ž,né=¶ÇŠtSzï±}¯nX|4ol÷Mô{É¡è¦W7è%Ù ßÈç£>Ò‰•GÔ<ͬûµ¶V\„§Íc×[[kÝ+wß¼{ùû7ÞýÔÒWßcÑ÷~‰ª'^?)µuS?„ÛTþ~;õ½ºaçEKm¾Ögy”îá Þ^ž¹ŸRÝXyxçkøšnJõÚCÙCÓ7ïg?´ûùaÍ«Ö_é¹Ðú)ÎQõ×{žs+½'¼÷B-~Þ}°¸¹nJñ¢âï8ÿ¦ûÆ‹[ÚZãÜ·¿©µµc²ŽñQý=‚I7£=ôø7ô÷Ù°ycñ4³òBû©wvÜz.n¢q£ó×±nÐßgƒækq¶ê87Ö_é>­çÜzQߪCë~½ñ±æ3ê<²}‚×ôž6À~’ ×cÉgßñm&ÝŒbÑù÷®ëëü•Æ¿¯RÝä¼4c÷¥ÅIÃÉûh=òcÅCûBó¬þ‘_×Ê/êÜj¸yÞ’Yu£YßuH˜I7i¿?·(ÝhãÈO©.кҼi¥×Òsâ=ÇZÞØó€ðXQyVU7Ö8!ÜÒsÎâ¡õÚx”n¼¸hžµÎ³û`ùyyhæséþç¿ïisi¾¢ò]»æ9ö¸%ÿìyAó%Mð}“›CÇ{-ÇՌűâkë­uÑ‹_êŸ]϶^¿É¬çâÇžOnè¿SXˬº©ÍÇkÞóÀúcω•_´nÙs4T‰Yë©·îæó¬û²î᳸ÖyÖqÖÐ|”O/®ÆÃ›¿ü£üù4vžuÖ|²û‹:¿Þ¸³ãÖsáͧ7:릟蘗lèý[[šÏèóàУéûÐZ½ÈûÚ|mêçë4äǪO3´¯nž5îÉr^¬k|Q>.«+Í6ž[ÇþzÑ š‡úǦ+þ±è&ÇÑü#\¯n4|6cÕ6>é†ÃÕâ9ÝŒõ¾™tÓÝŸtãó£ñ8vÝhøÍÈþûæ¿6îÔöÛz>›ž¿­õQ@õùEøÚ:Ö¯fÖ8ióÑs–·ÕŸ6ŽÎ=ÂÑòìݧ5~ÖxìéêûÐZ>½>Ô·ªó­ëKóqhùL–ë¦QLÓ¡6¯TÇy<“Yë;®ñb÷ÿ"hÂc×çóS>ºgë~µ|¢uVï¹$â2Ý7GØ&š‡5χ¯f`ݰ|Çç©-k=Ÿ ÐM2´ßÜÐ8;?ÇMϤKí¹UßȬ¼¼uñC}ͼqC¸y>½xQãȬº)Íg­útèõª‡z8 l>­m”Ç£ö¾’YãŒÖYý²KïuÖOÁýp”÷ ›‡é¾±O÷ÍîѤ›Ž–ÅÓlÒÍþöXtc]¯ù³îùM–âÌâh}ô\›çͧ•—æŸÅµÎ‹Ò«†«ùAùôâj<¢ê€°éë››tc㛯Óü‹n¢îA–o>õѽîåSûžg÷u~½qgÇ­ç›O/ntþ:Ö¥nØs7éÆ6îÕMþ|ÒM¿I7þ¤_‹ü°¦éÆæE÷gÝw>®õSœY­žkó¬çݺ?„x•ƽT¯®æåÓ‹«ñˆªÂŽò¾açM÷m|ºovŠ~T²<Ú:–_Þ·ê†Í_­¸kqòâkþ¼ü†Ò ›O/îXu3T›Ÿ›±¶‡ÂsèöÐãÔ(o ™u~©iõ)ÚØ:¬ÍcyÖŽ_©v½'S­xxý늾/€OY_º)µ±ðìK7^ëK7^«­›Ž{Ì4õµ{ÅñAfÝWÎÓÊKópKãÎŽ³†ækù,ÅÕxXãCàê÷C ÷XyÖn“ §Úþ›ì¾IxbùxiÝCñF¸ŽfÖ:fÝWž/—á²ñ`ϵ6ΛÏR\6>h½cŸºaÏ/{þµçÈгf^}i¼´ç}醭;h>šg]¯ùÓ 3¯^5Qõ\Ø'ïiè~ÓÎË×Û×îA+Nô9Ì=Vü(þV^Ú:+ï¼ò‰ðÐ9-½‡ˆuUt“ãyÍ«–{Ø}h<5Þ¼³þ½óJu‡ Å á²qÓê˜W7ÂLßOCä†òé=ߨs),äçl¬|óy¨o½oØ<Ծ筼Jィyè¹vî£ò‰øZõ¯ÍcmÏ:“nX|4ÎÆM‹sÔy*=§¹iÐz¯ÞPðê¢V¾:ôt”À[¯YÞ¬•êS›‡ð¬qgÇÙs¯ùaó©=Ök{Öõús6Þ¾õ¾AfçͧOóÏâ¢õV=jãÚ¹×prCùŒª_ÞøŒý¾ÑÖåý±Ü7Ö}ë}ãå=Ý7ûýyëc­û¦ô^bý”îÏŠ«á³ü¼ã¬Yïm}_÷ã~˜tC˜õž¨­›è¸[uSzŸ¡|Šn¢îA–/Z—÷ѽΞSëù/5+¯Òú}ÞÐsk½H†ò©=ú\ŠuÓ}C˜7ŸV^¥¸ÖyÞûYi>徉âulºñÞÓnºûÇ¢›ÒóQ»Õâ<µÃ´É¢òYêo€ýâ¾Ñæ'CõI[ÇÎC«䇭·¥ûG½¸Qù#ÎOUÝ ë[7,~-ݰþ¢uÃúgñǪ«Íjë&ªÕxçqFy¨Åõ_›‡7Žy? §vŽøYëAmÝD×#v^ŽO‹§7:þ†z1Šï§¡~Ú¶>GñÕž³¼Y³ê“å‰ð¬qgǽúÎû(ŸÚóÒóéÍ/«Mw}ÕÝ±à²¼ÆÆ/ÙXóQ+^}å¡™tÂklü’5]7šiyÔæ¡~ÂEë‘ë¹Òü¢}!V|k<­qgÇYþˆ7ŸÞúÁâ ùǦëùÔžOºé·žWt/DãzóçˆëÞÿ~£íËz~µq¤C­>±8Z=·šõüXñ­ñ´Î‹Ò«†«ùAùôâjë½õµ?D7µm(ÜC³¨zPÛÆ–OGÜB¾/ ñˆªSyœk×[ï{€†úÑï!ˆ5®Q|^4®õ~±ÆI{OóêÆŠg}ñê†Úoiýî[7Ž6®ÕýZçTÓ6ßËG3ö¼ë>ù}6µu¯µh~­õÆ›]Ï\µãÑé&7«þ÷ðÚ;®ùGõÍ‹Úwn,/+O„Wš—<ÞÚz6Ú9Ôü[q5Ý ?¬íYgú9M_¥ü¬º±âXùÖÖM-½¢s£ñòŽ[ëW²R]zãcåÙ±.D7¥-òËÞ7‡ÒÖÚG­üóq«Yë Ë_Ø(ÞÓîPûéfh~lÛo-ßO“«_-.šióQ\?¢žP<Ù}å}íühóµçh_Öçâg¿U®æ_›Çâ"¼æ@xXqØùCÅq,í¡ï¿Ét“ŒÝ¯fH÷ù<ÔO¸é¹¦#ä‡Ý'ò«í+Í×âd=7,®wžÕá\í}žÇGÃEqgñ5ÿèÜëŠî+?mê£8çëØón?»¯|>êU7È¿•7›O/ntþ:Ö¥n¢îO³±ëí­{èºa׳÷'ÂeÏSŽkÍ·õ\hó¼ù´òÒü³¸ÖyQzÕp5Cùôâj<¢ê€°I7öPuÃÆ[³‡¢v½u¿Q­†ëmkó;þÐmt>£ñsÛ3îºoØúéÐñÞq ­C|¬æˆoç<=·æÃúÜÊåå3:OÞ8vX§n´ý²ü¬uá–ÆÁŠ_{Ösc­C^þì¸fÞz¯­/ÅóÖõüñÑŒ=_^½">Ñûέ–>›—<®O–/»¿Z¸Qç’X7馠E6é¦_ÜI7Ÿö'Ýp¸“nl~X+ÕMÚom~y,º±ZߺÑp¬~¼çùéK7šåç—µÚºñÖ#´^Ã=ÔVÛ¯ö<§¶_4>–|j<‰ýíý¹Î|]2v¿š¯.óçµãnå]Š“ãyñ£x#?ÉR¬çB›ÏògçY×#^¦ŸOÛƒÃꙕâ©ÅñË×YóÂòeã‹òéÅÕð4?È:t4ýœÍœ|ž7ŸV^ˆ'ªëÚz­XÏ=k(¾ÖûÆklÔp;êT¯oMãWwlm²¡y ÕÖÎg²Šþòï{k›lhS>»Ûfúú¦³eñ4CqóêáYãÎŽ³üÑ8ʧöÜ[‡4?¬Mºé~>é¦{Üz^'Ý|l'Ýì®™•פ›2Ü(Ýñ¯úóÐì<ÔÏõêÅÑükæÅ³î᳸ÖyQzÕp5?(Ÿ^\m}T6ªûF‹çØîï¹?ôûFËË;­¯}ßhVzßÞ.Ýt£úë…vŸEé­/­sÈ]7;?Z7VZ7¥q׬¶nJï›Rcñµ8šnúºo¬f­³šnX<”ÏÒzªéR{?ÈÍzÏ”[WX^š„ëuÏi¸y?×M4®ÆÃ«Gí6AÿÝS{nmkùâw¨øcÛï¡Æ#Z7µã}(q®Í3Y_û±â±ûOóò¶4޵âŸ,×Mã4„ÃÎÓöóeq´>z®Í³î ñ°â³¸ÖyÖqÖÐ|”O/®ÆÃ›¿üп']«^×®ã}ï›]7Ö}×Ês­sŸ†üïžÞz…æ¡x&CõÉŠÃÖ±¾ò¡=OûfyEñg×#<í¹·hÖWòžÆÎCûÐÎIþÜzN¢¬ôüiý¾ø±óÙq«¥<¢:hÕ‹öÜšƒ òõfè¾ñ±Ôù”®¯­›Òx²y±â#CuÑúþàÅ÷Æ™XWôõ âaÍ‹õ²žÓZï!šYy•ÖŨyì9öÞg(Ÿ¬ŸÚùëXgÒMîÇ€Ó9õ'Ýp¸¥ï7hÜ{Nºn¦ûfÿsÍM7ÞûclºÑl(ݤý>tÝ<”ûÆÊw,º)ÍÿI7m©ºnXkÆ®b]/ºaãTK7Èÿtßpãå¾!Ö…ü¼€Uo}¯ŸÚqµ‡–Ïd¹nšÊ¦Õ—¾ÖO6¬åç¯v>­÷‰Ã>Ñ {ß±óQ}Éçi}´ÞÊŸ+âP·(|ÄË:ÏÊ3 /{¼ç¿t_¾K7Ö¸£{ùµê†å«­ëë¾÷îÕÿÒ:Êæ3Z×ë[¯_ßh$jÇ9:žÞ}Œ_Ö²ü<ÔŽCÿ”nJy°6é¦_~^þù|Ö²±ë­k¦ŸèlY<ͬ¼XžÏwv<êÜ¡|zq£ó×±nTßSëk÷†£õÑsmž7ŸV^š×:/J¯h¾¦›(\ÄÅGãÓ_ôsÖúYZŸ¢ëŒ—/âÉæ!¿¯¶”¿5Ÿ}óÍmÏø^ÝhÖwÞ“õgoÙ8"|o|ˆ}»tcÅ)=gµtƒò0鯯7_§Ù±è¦ô >¬i8Ú}i=§ì¹1įӬ¼JϯužuÜ{N´:ˆæ!\v=šçX7éÆÐZmÒÍþçQº±æ)Z7Ý«î í—Ý·6®õ®Õf¥:(­3¥<½ç¤Ôk'Ïg´±ñfëØ ùûž¥úG}«^£î›Rú¾ñÖ)k}°æA«ƒQ÷Õ2í¾a÷ÏÖ o=eu“×ÒM©~úÒMi½bÇKu“ åÓ‹ë=wìü\7¬oÝ÷Ö¥·´Ž¢çšÕª»,.â“,*?òƒòYZ·¼õÈ CÓ}c­gÖ}kyŸîŽO~Y^ìxi½HüŽõ¾±â³÷š§å½ô<”Öûèó•_­Ž#|ïxéü\7ÑññÖwí¹÷¾ñêÛ;}ßDÝÓÞ{§¯û&“UohÜ'¤›(j¯GçGËg-Üèý6A߇Öê2¶>äþÙ¸çý¨ú£Šƒ5Oˆgn(/Ú¼ÜØx[yçºéf¡û±žOÍ¿9ÒßsËÎcóoÅCJu“óÎ×!œÒº”«g–WnÞ}xñ4óÆ éÆëß{þKu̻̾›¨6ÇŠÇXãs(m®›¡ùxÛf ÷4í¹6žpÓs+לּN)o¯ÿÜJýåñ@ºñâ”Æ9Ÿ‡Ö5…ÿݳÖ9`q½ó£øzý ?Ž5Cµh¿MOÿnÔØtóPZ‡¾ã¤áÕÊo­ý5Gús¨nhÏo´4ß‹o§5îì¸á\ííkyDçÊq®)?¬Šn4\§¶nظ«n4<4®ŸhÝDå¯cÝ(u“÷­ºÉç{óÁâiv¬º)½oØçQzÑü±fÕM¾.ß[—{ŸGëæØî›ÒyšEëÆ{ßÔºg¬çsèûÆÛ×tƒðY>Þ¸zyXñ‡ºo¼çXÃAy,Õki=@묺IûÕî›Ú­WÛïÔrm²¡ó8Ö¶9П³yhm²¡y”ò{“±ºÉçký´ï˜ÆÇ;_ÃEù±â²|½æõÏò³ž ä'šÊKÔþjY®›èºŸãX÷Ëâjë¬<­|‡º7Ðzdµx²¼P^X?šõ•¿&èßñÈã‘{^µ~©n¬|ØyÞún=,®užuñÊÇóó¢—5o~ z-ÒMŽøyÕ ²Òõ¬ß±éÆê_ÃI¦dìù‰ÊSÔ=µÇùïžÖó…âòê}ÑðXc÷ç­û^©n­÷9ËéÍoü¼yë\ºaù”î3ÊÒ·÷>è«Í­4®}íÃÊÇšGoœjç§ ú¾@i<¢tPznjï#7 ?Šòƒð_/^Ô>¢üåûCûn*ÿ~hB·ür\«k\Øóƒü±<Ñ9Öæ!ÿZÜòul\­çíOã£ÅÓªä¿4¾Í‘Þ7cÇAøQ</N²¾ð¢Ï]i|›éç¦ÖÑ&ëK7µÎ“—O3Ý$+ÅÍmèü¿R¾Ç¢´ïÆøûÓ:üPójëf¬çS3k\¼ë¼yÐøjÏE79´¿=ÖËï‡ÖŒ=ùþŽïùC~4«uþ5ËóÃòbÇKë âWª{kbmÏ:êß#H†ÎcÔ¾Kë”–Ÿ¨{ 4Ÿµ÷_ºm}nV<ë>¬u¬¯¼5=ýþ´=¸¦~®W /Ò7âËæ »?¶^EÕS¯Ÿ‚söÁÐ>¢ô¡™5ëB>-ú<å¸ì¹Öxjþ½æ=?V|ï~¢ëkš?tßÔæ•[Až¹o¬ã(ÞÖù¹yïo¯?+®uÿÖ¸[ùx[í¾){4Oä·ºIûÌÛ¨¼Ö>7}Ç»>ŠíýDùÏù×>7ã1ÊßÝ·.gÂi2zÿc‰Ï¡é&™ö\›uN4üZ:Ï-ç‰Ö±þY|4žÏ³Ž[ãRšG+nTn£ü^um[/nTB|£Ïai=õúe×±üKǽû‰Š·‚®Fñs6cÅ=TžÉƆ3–øì{TºÑüJ¼ÇÂ3ÙØpÆŸcÓÍØp‡nmßDzŸfÒͨÛcÛ÷±ì§™t3êöØö},ûi”Ÿ³IûCÏ;ü~bì¸Ö×âmõƒžk¦áñ5ñÒpohÿˆÂÏ÷ËâZãü ÿÖxiyŒÂEqbãC¬;èûFÛס·¥qNV‹Ÿf}í‹Å«¸ß€ÝŸ·tÃúAϵyÞ¼Yy%Ë÷ËâjóPŸÕkÚ|t~Jq5Öø°ºñÖA/_Ä;ïçñF8lJë2ï¹eÏËÏWäߪ;¶xq£ó×±®×ÇÃ[Ùû&Ú¬º/Ç®+ÕQ2-Ÿ^^hœ=?Qù®7ÂBïkýû}Súþà½o¼¸Þº¤ùAþ§û†3ôþ`‡6î½oØóá›ö¼ô>ÂEë½u,*^èü”âj<¬ñ!pòûiCñK<µ-W²¡÷Ñô|ßXã’ãæñgq´yÈ¢ö…ö§=·âZçYÇYÓæ#Ý”âj<¢Î¥°I76é¦Û¿f“n>µRÝh­„ë=ì¹C<¬ºE|¼:œtÓ=¿–n4|dÖsÄêÆ‹ïu~½¸¥üØzVz~“¡º[ª›Ûû}ÍP½`ó‡,ŸŸÇ۪ߨûÅkÖûV{^ûÞ²®·Îgë]TÜsÖ8 û†}ÏcuÝj÷Mß<ÆÞæVê‡ÅañJóVº¯À8ò¿{j¸ižÿÜæá±ó‘Yy•ž_k<4+Õ¡6?Ïc4®U×^Ü&ø¾aë‰u^i[[ÇCÇ¡òÖt“:H¿Cí[ÃÍÍZ7ˆwÕuù<´Î:¿t÷üxñJãŽtÞ·´oÄ#š'ÒMÔ9Žæ}húDëóçV¯njŸ/Ç9™tS‘÷¤›O[¯nJqké¦Q¬T7¥õùA|ÙøkV!ÞEøˆZoõƒü³x¬nX?VÜÒº²Ç:¿/ûÏ÷­™¦3«!?V]xçE™÷|”êÌjÖø±óÑy(ͲÀ¸P÷M²C¹o4~Ú<ÍÆrß°¼ØyÞzÌòeën_÷œ÷žñÞ7ÞsÇî›=_¥÷7~,žfQùbñÐû€5/^þÚ|–âã=ïÖüu¬3éf¨Õ©©=ŒöXòØLº™ÚÛÒ<&z ©›Ü-Þcms‹ò7ô¾Ž=ñïh¦ƒ|åY3-ÞVì<«´ÖŸÕ¢âÎŽ{q’åydý!³ò¶Ö±=6è÷O­N!ë<ï} ³®õµx—òA}ž£ð£ãpŽö®ËûH7®×¢ò·Ç¦ï§Mm±þZ› ÷´Ü¬óP?Å›Åaë—uì¾+~í¸³ã¥ºÒò…¿ŽuÓÏCw´,ovÞPºñæù·ž“cÓMÁú¢y¬•ê&ÊJýFé±'Ê¿éÆË£¶YuSë¾F8Z*õcÕ™µ~•Ö{o]­å/ú>ÖòXúuö=è×7Úº¼âÍâxãæ5«Ž½uGóæ±çP[gåî›(ý#ÿÞs¥›Ò–Õ;ojÇÝK›I7SÛc{,yl&ÝLmí±ä±ÙêÆ»ͬóP?ñcq´>zŽx³ûB<¬øµãÎŽ[ÏÒM4ntþ:Öíý~š×¿æ‡}®kº>Cú‰öï]W;¾¨þiýR³ÖWÂ\ÿýFã]§’±ºñú×,j_Ñç¢4îQã/7ô^ÃúaÍ»?ÿ(®Së£çÚ¼hZñ£ùyÇYCºAþké[uc­›}·¨NMía´Ç’ÇæÜ7È¿f¥õ ñbñY\ë<ë8kÚü”Gäß‹«ñðæ¯Яoßd^Ýhu†Ò†ï]úVþÚ|tß \ÖJÏåC×M”¾ îs/–ç¡Ý7Ǫk]+­ƒVËïw„¥‹RóÖé¾ïSëzï9ÐòXÊÃÊËŠ»Ç߃øúÍÓ¬´^YyiþY\ë<ëzÖ´ù}Ý7¬ŸÒû&Š×±é&Ÿom­¼Xžµtu~‡Ö5>ÖuÍÈþ¾§6®ÝïÖsÊÆ©´ÎiæÅ·Æ3*î,‹«åázëòÃÚžuÓïèhY<ͬ¼Æ®o}EyŒÂÎ_ǺI7-‹§Ù±êFÃCû=¶ûÆæE÷]§’¥x{q´yȼzcõÂâ#^ÞyQzÕpó¾–ÇRÜ\µÎ¥0Ó}“ï[³c½o¬v¬÷—w­û&ú\ëL÷M´®óq­o½o´>z®Íó¶V^,ÏC»o’M÷M÷>Žõ¾±æ³¯û†­+¹?Wô|¬÷MTþ:Ö¹¾/àÀ震úµuƒæ±ãÚ|/^íyÖñ¡tcÅC†êábúýxïXxŽ=.,¿fàïCçúÖê‡Vغƒæ±ûc÷…xXñ£ãÎÆ=·òÎÏ¥·ô|Zó×±nÒMGËâi6éf?Þ¤›2~“n8üÒ¸{ãŠüOº±ñ˜t³\›Ïò:”ûÆ{N4¼(ܨø늾ícòÃúcçiûôΫèºm=¿Öó‰ÆKãåß›—ñi uƒêÒu¾>ï{qÐþµçˆ·u_Þ¼°ü½ü¼ã†sµ÷¹×[w4?¬•êÆÊ[†Òu߈_4O„‡xY×{ëwi-Í“7Q÷MtkÕÍP<­*žÇ’ÇfàŸ‡Îã˜ÏO¦Å[ÃÑúè¹6Ϻ/ÄÊÏâZçYÇYÓæ§<"ÿ^\‡7ø“n:Lã—û™t³]nè¾)ÅÕxXÏK~>÷, ýï7½îå¥íCão=§lœ4ÜÒóÞ[ï9²Î³Ž—êÕÉÒ{Áê'ž[G½žî›=8oÍÏtßì_—ß¾ït>÷,¾/p@í¡Ç¡”ÿXößÝ7ZÍÏã Y‡îMxÞõ¥~¢L«×^³æ¯Ô¬yÔ΋×»¯ŽuÓ}Óèçgl-‡Ò}hõ,šnCÝ'Öý5…ºÑê_é9Èý ­›|ŸÚ<íy-}²ñËç£ušÕÖMNi>к&øçŽU7ZžÇ¢›ÜŸæÒ 7­kzO³ê /ÝÔ:ßcm‘Ž¢ó:T£ÏC3Ð÷¡Süòq­¯Å»”2/[Ǽæ»×<¾ÚzdšŸ|ù·âjVš¿ü£ÔÖGϵyCé&ª>LºáxŽM7èÜOºéöÏâZçYÇY³žƒC½oØõ¥÷ {žÐý¤Ígý"¼‚xñ²âZÏ[ô9²òÖò˜ûCÏKã×Aï–·õ¾±ú×lèû†åÕ×}c[n}Ý7Ѻßcƒþûž^Ýyùió¿è}Yï-]ü•âYuƒüXãŸ+ú½}µì}5µãnÇ’Çdõ­—ßg“ÏGñËý¤ù¬vÿ¥qFfå•wo<´ùV¾Úü<ïѸÑùëX7È}c­;c©SS[ÖKðõµn±ãlü’ió5¾,/d=Ä?7ÚOÔ<6ïµp+äéÝXÏ—7HÇ¢ ÏŠÏòbçyÏ¥•Wþœ­—¥¸}é&ºŽ¡y¬nJãíKmœ¾ò\{}nh~i5«Ÿ|~Séç:½û‰Š7‹¯ÙXÏóкAV[7}DZoÝD·QñžÚaÛcÉcóÀu“lèý=”}»nòýåV+^šÿ|ý±ÄÿPÏ¥v>ÆÂo(ÝÔŽ³5~h~­|$¿‡šo÷Pû9Ô8¥›ÚñŽÖáØÛd‡v~=î¹nR'ÏKnlÞÐz4?žÇ;7mœõ¯YísÎòBÏѾ­çW[ç[²„«ÍGëYn“UÏÖ|²¼¼z³ÎÓâ‰ü”Æ‹ÅE~jŸKbõ÷X}Zý°ëQ¼‘®¼¸¹vžµŽYy”îÝß,¾u\»ç¢ðµyÞùëý÷=£î/Ÿ‡zß°ãÚ=î='}Ý7QùëXw”ºAqÓÖ{ãÊZmü(}YãÎÆ»T7¨u.‰u½ü»¸¥m_¸Q8ÈOßqÌñX|m^mcK£|?MÓ%Ò9;>VÝôµ/Ë£6_ÍPþJóÈâEŸ³¯!u?ož u€ê{yZ÷¬4.ÈŸ6žÇS›gå«ñöêÆŠë­CÉ¿üùžu½ü~h4î­^œR>µãUë\ ÅGÓ âÝ÷¹ÌuÓ±nÐß«­›Ci“ Í£¯VÓÍØÚ¾uÓ×~†æqlmßqÍñÆ–W–O3éæA¶)žCëfl­U7Mfbœ²=~;Ç;øì}®íGÃay±†ü²ûÑú¥¼Ø¸'CçÃŠÇÆÇŠËòAK†ê¾†_Ê7Ï/¯•Þ÷6ýwÏÚd‡ê¿¯<•÷d:Ú«mÝPûËý÷ÏdCŸ·cÑ÷¡è†ˆKèÏ $§:ýÄ´üäýÜ¿OäÇ{ͬ¼Jq½qgǽñJ–çMóëŵúaMÓÍË_ïO‹ª¹­ŸûÉu”¯cãÇš7ŸÚz¯î®u^tÈ×åf=V\GTv_ß øK[{ŸÉ_ñ<–¼5A÷MTÝÓúšn4 W›§Yi½B¼X|×:Ï:Κ6é¦W[ïÍ_þ¤› ¨K!ø,/ë¼I7\»Ç„nÐ<ͦûÆÆ7_—[ž·Zºaý”ꦴ®Ön5Ý Ík¨6YÔ¼¡ò=¿¯ø6·ùηU7Èú:WµýׯÕL÷ê€]ÏZíx5Ó}3µ=¶Ç’·¦’n’Mº™ZÙKÞš¿/€ô–LÓ†£õÑsm^Týðâ³¼¬óJÇ^nH7¬dµÎ¥°éûif=OÇ®–亂ºüòq­¨ºa÷‹ð¯±è&_—Û¤›~ù¸Ö‹n¬ú±òbyNºáð'ÝlþwxÝhÏÊ}SÊûØtc]Çžû(?šn¬:tÄÇå§v«ñbãb­/Qué¦Vœ¢×7#û=P®¦Ä—Õ«f¥û²òÒü³¸y\¼çIg Õ?ä?JßÚxijFöß=ó}&Ót3Ö:5T—dÖóÔ¿¡Ûfdºaã},ñ«n*?«n§uøõòÙÛ×tcå£ùÏ­V~>ŠŸö\‹‹7?hñ×ü ¸"?âu.…uÞ7,ÄÍCýü‹k¥W 7ï³çØ‹›«Óèû¦´[ÏÏtßpóÞtß|ЧùA¸ëLº)À震úVݰ|&ÝØÆKÏIžÇh\¯niº±®×üÕº‡rhñ·òAæÝ—uŸÅµÎ‹Ò«†›÷µz…›{¿Yëgôž†tí=ÏÞû†=§Þ}}ßXï6®Oí¹•wíûF;'šÖJïÍŸ¦ë¨zl½o´>z®Y©®jß7h=ªwQõ=ÇÍûÖûÆjÚ9ñê±ÃªêñC~“ ¥›¨û¨¶n¼ó´zÌòÐL›oÕ׫¶/ìÝxχõ¾G<Ùû×zN½óÑsͼ÷•?‹—ÏÓò†ÖYûè[õÏú×ꂵ¾îY·÷¾Ñög=¿¾õܧù¨~hóÐ>4cëEéþXêçñFþµ>zŽü–Æß‹Ïò*æ•Ò ë‡Å·ú7Ä£êÏuZMÃÍë¨Õš××ùñÆÍ[Ÿ¼VZ5CyìkVÓî›Òzªù~ÿ¸_Cùzß;Jñóõš¿R¯m¾5^^þ¹YãˆÖ7ÁºéÀ震úZª}žJÍÊ‹å‰ð¼÷ÖM4nT|ˆu!ßOÓüGµÞº>Ö6ÙÐ<ún%É:Æ{ùû7Z¼r?Z­gý çÚP¼§¶ß6™uªCï˰ÿQÞ7¹¡xk~Øyš•æ×Ê‹åi{?Öò¯™6Õ¿R\m½5Žþ(u“÷Ǧm|ÒM÷ü¾tÃú±ÆCX¯¿Šå÷S¼òc½7X<ͬ¼JÏ‘6/ŸÕ5~Ú¸¦_—÷\Få¯cݨ¾­õ§û†ÃEëµó[û¾±â"}±ø¥ù+½o4–´ÍÏŸ³ó¢êY)¯Z÷Õ¼úGzÑò¨Í÷ÞwÖûÆQŸ¦û†0o°òbq5|–Ÿwœ5ë}“[ß÷u]ãÔ [Ðüœ¯ÖJ7ÞsiÝÂæçg ä¿T7Öóâx¯˜tÓacÕ÷½d¬÷M´nJãS[7Öuɬù°âyãüXyGÕéR«4žÇ‹ÍcéùB×Þ7ïµé¾ápÑ<«ŸZïiÖû&êÞAãÖû†]ÏÖ v¿lÞ4<ë9eßS¼çP›Ïò*=¿ÖyÖqë9óž›|žõ|"ÿÞ: Öé†=ˆZ?”n¼ùAh\ãáÍâ•[Ê#ò?t|ˆ}O÷Ím²¡y •Ï¡y”æ§™t3µ=¶Ç’Ǧ§Ok†tãµÒõȯV·jãk|¼ãÑëRs?µâc=/†ù£þ÷=sݰ8Z=G¼½y`y±<^©^Øý[yçõ/7:ëù=·Ö8­kÝ@Ý8Îó _ßäñCóØùS»¿Kü4cágÐcÑ¿·¿Üo©n¬<5«ÿ–ïOÃõò±ÆáhÆæÓÊÏjµòTª´/kœµ~®/Â×òW OóWWoÜÙñ‚söÁØøzq£âC¬3ý^u–Gé~òs\Z/Y¾c¯{QqΛ¶>7¶þyÏ_”®N3Ð}ÃæÍ{ß >ÂcùY-ÚŸæ¿ö}Ãâ¡øZqkK+n3ðÏCçãyÝ7,×\ŸµðXD×'ö~aýk†ò‰üGÕ6ߎûîÝ”Þ;Ñ-ÒM-¼±ø)­³µÖ{ý—Æ¥t_¥|„í½o¼uÌ;Õ'mˆ¯¯V>¯Ü¢××Ö'Â÷ÆWÃ)åÍòÙc!?g“ãkó¢t“ú,Ž6/7¯_¯†×‘ÊϼðØyËÕ+Z‡p´<±º)½?4ÝXï‡|é&º~–úÕøöÍ3YT}Žò—¨ý wôC~ΆÍcŽâ Õoï~­qGó4Cu¤?‡6áj†ò¦á±¼Yÿ‡_)Ÿ=~§¿GðÚ±Äo,…à[yyýZóËòÒòçÝ72o>ˆuïi,®¦‡|ý¡è†ÖMéüRÝX×—êF›WZW¼ù0äÓôõ¦§ôëhÝD›÷<×Ò×JýGëŸÕÍPæ½oØ÷Pô½¿¢q4ߺþÐÛdCóˆj%M¦ÖX}}ß°¼Ðó|œ=ïQ¼X‹âåÍ/ò—çÏʵ\ŸÈØý £tƒü¢¸jqC†tã÷PºÑò‰Ö{Ͻõü <ÇùÚË'ÎÆÅë[7Þ¼³|Ærßxç[u]¿‘YñØõšŸÒû†Åµâ£sÊê·ƒé¾ñÖ¯dc¹o4«¥›RüZïY,¯_«n¬¼´q¯nò>Ò†uŽ´ýXç[×[[´?dÑ÷N«_Öò››–? /z_HW>Ô{7„}ß ³Öoöþ°Ö…(^š±çY”^5Z·úC†òuß”Æ[³Ü_ߺ‰6V7¬ö¹Õ¼:òÎgM‹O”nX?QºéFÑùYm,º©UKueQ÷ûœ5tßxyYçE7ì~ÆþžÆúë}ƒÞO¬~Ðzëy/ÕÕJuÓa!¿¯SãcGó‡~OË-ºž³V;µÎ1zOʼº©u_Ú×7Þ¯¿£âýõMéúÒý²÷q­¯o¼÷n­÷4ÖÆª›ZïicÓMô÷JÏa__ßD½—"ÝÔzÏ<6Ýh~¢¾^²¨ûù+ÕÕZç=Qïil=µÎKûbqòùÖõµö…ÎQ4ŽÕ_©v½už–¿R¾Qyd×7=ÿ¨±ëfjë¶}é¦vÛ€Ç#®õ“¡¸¤ñý«ñü¼ï­ÈØy¥ÆòGÏÙqë<ÖŠo®›h^ÖuÞù¹n :ÛÛOñ(ÕMnH7V¿(¯V}•šõœ…—5^¬nXóÆ£4~^Ýhºˆ׿#]öõ>qhm¾m_Þý&«•ÿZù+=‡Mð×7µøÛy~¨­8w{mh~lÛý»Q(.Ú¹ÏÇÑü=ü)?hžf,âÇòÒü³¸ÖyÖqÖ´ùùùˆÆÕxXÏ P÷ͱ´É†æÑw«å3ÙÐü ù;ˆ¯o†ŽÓÔÆ´‡žÏÄ¿™t3H›lhCë&·¡ùÔÒ¶¿d|<÷£õ­qF~­ç–ÅËùj<´qo¼­ü¢Ç­ç‘Íg®Õf{æM_ß¶‡Æw,ñI64?¶m u“ÇaÒͰ|‡Æ?t~}鯗c×ͱ´µâÞW>NÓÔñß}}Sªͯ7Έ§ÕO”¿¾Ú¾øZqúÖM­}5àç:]7h^nc9µuƒÖçÖ·nn_ºA¸©¯ùKãù<ÍO-Ý }n£ðQ¼këí+÷Ïâõ¥›Ò|åþó|4Ó×7UÚRÞÉjëÁË?YßùLV{ÿ§™tS¥=TÞVþ%Ÿµt“L‹K>Þ(†â¬ÍËÇѼÒ}æþ“åûÓöÃòÓæiæGn¥ç;ÇËýZù—æÉêá6…ºÉmlºÑæ#?^¿hCë&ŸųT7ÖýŠn>[O‘®ØùcmßCÙâ™[”_¯.£[ä¿)ü>tÞguƒü$Cþ¼ûGæ=/ù¹Èç¡ýäÏ^ôód(Oš4ϪÍËÃä·ÉÏCç]ŸújKãuž¬z.å‰üÕÚ­<"ÿÍHtS;ÎcÓMÓ”áX×%Ïd“núñ½ÿ(žc‰{ߺIVÁï ß`ã×wœÇª›¾Ï â™ûgñ¢ö_º¿RÿQ7ì¹ÐpµõÖùÞøk8}éçPê«›dQ~Ç’Gä¿9Ð÷´ü9êåÜyyõ½ŸZxCç%ŠOC~:7M—‰‡æOã©ùÍýåë5¯#>!~J늶o——o2”G¯ë9ÍùxñÑü|œ°A¿€–ÍŠ»fÑçŠ5ëydu­ŸÜ¼uØZY~^½i8¨!~MO?Ÿ¦ñž¯ís,­u¿ÞsSRÝ ¿V^Ñ|JÏQ3ðÏCGÏ{kÝo²¾÷][7µ×!}–Éq‘i|òqm}é|ôÍÓxG™¶Ö¢ø£¸[ñ’?Ö/ÂñîSÓ»NØô÷ozle?Ñ®Í×ð4­ž£ý¡qm~2mÿŸÅµòGyd׳¼Ù|"\–â± ûo¦û¦×öPöS‹gßûOÍ£è¿{²¼½:ëùôƯïýDáå~Æš_2M7MÓ=í;YÏýhãìüü¹Æí­ï¨/Ï5cãcÕ Âcçi–óÔÖYãÅæS{nm‘„Û±îƒnJ륷þXëÓ¡Õ3¯n†æÉê¤ö>?–—C”nÒ8Ê+ÒE-ݰü¬ù¨•g/6Nlü¼ûŽSt]@þš&f{üvþÞÁ„Û—nйòæÙš¯h½XõÍê&êü{õ¥Çhý³q¶úAºIm2vßÖý[uUÏÚº)=g(.µôŒxF÷¾ù—ÆÑ«6Nɼã,^n^m½u>2gŸkxìù×,çÉú×xkçù÷Ö1¤”Ä« þï7µëÞÔvÛw>ká5Á?Ì:Îî›ÅAñ²ò@xšiëY|4OÃó®×x²|®5Ÿµu“ãj6é&†ÇXtÃêÚ«›ÒóÀæ³4?¥u§–nÐ9¬Õ…;V‡Ú¦øõÇdñ~?ûvû?ßj?_´ŸGñ‡¿þãßjÿð»›æÛyûðÛÿø¯nÿôÅoýì‡?ûñ~ãWœæx:ÿÄú³¿´í<þõÿú_øñO·½ïüôG_ÝÿqvÿÇùý÷\Þÿquÿǧ÷|vÿÇçÛ?~ÖBü’øóWâÏ3ñç¹øóBüy)þ¼~*þüLüYàÎîLàÎîLàÎîLàÎîLàÎîLàÎî\àÎî\àÎî\àÎî\àÎî\à.îBà.îBà.îBà.îBà.îBà.îRà.îRà.îRà.îRà.îRà®îJà®îJà®îJà®îJà®îJà>¸OîSûTà>¸OîSûTà>¸Oî3ûLà>¸Ïî3ûLà>¸Ïî3ûLà>¸Ïîsû\à>¸Ïîsû\à>¸Ïî£MÝø%ÙùJvf²3—…ì,eg%;Oeç™ìH_I_I_I_I_I_I_I_I_I_I3É`&Ì$ƒ™d0“ f’ÁL2˜I3É`&Ì%ƒ¹d0— æ’Á\2˜KsÉ`.Ì%ƒ¹d° ’ÁB2XH É`!,$ƒ…d° ’ÁR2XJKÉ`),%ƒ¥d°” –’ÁR2XJ+É`%¬$ƒ•d°’ V’ÁJ2XI+É`%<• žJO%ƒ§’ÁSÉà©dðT2x*<• žJÏ$ƒg’Á3Éà™dðL2x&<“ žIÏ$ƒg’ÁsÉà¹dð\2x.<— žKÏ%ƒç’ÁsÉ@ÖÄ™¬‰3Yg²&ÎdMœÉš8“5q&kâLÖÄ™¬‰3Yg²&ÎdMœÉš8“5q&kâLÖÄ™¬‰3Yg²&ÎdMœÉš8“5q&kâLÖÄ™¬‰3Yg²&ÎdMœÉš8“5q&kâLÖÄ™¬‰3Yg²&ÎdMœÉš8“5q&kâLÖÄ™¬‰3Yg²&ÎdMœÉš8“5q&kâLÖÄ™¬‰3Yg²&ÎdMœÉš8“5q&kâLÖÄ™¬‰3Yg²&ÎdMœÉš8“5q&kâLÖÄ™¬‰3Yg²&ÎdMœÉš8“5q&kâLÖÄ™¬‰3Yg²&ÎdMœÉš8“5q&kâLÖÄ™¬‰3Yg²&ÎdMœÉš8“5q&kâLÖÄ™¬‰3Yg²&ÎdMœÉš8“5q&kâLÖÄ™¬‰3Yg²&ÎdMœËš8—5q.kâ\ÖŬ‰sYç²&ÎeMœËš8—5q.kâ\ÖŬ‰sYç²&ÎeMœËš8—5q.kâ\ÖŬ‰sYç²&ÎeMœËš8—5q.kâ\ÖŬ‰sYç²&ÎeMœËš8—5q.kâ\ÖŬ‰sYç²&ÎeMœËš8—5q.kâ\ÖŬ‰sYç²&ÎeMœËš8—5q.kâ\ÖŬ‰sYç²&ÎeMœËš8—5q.kâ\ÖŬ‰sYç²&ÎeMœËš8—5q.kâ\ÖŬ‰sYç²&ÎeMœËš8—5q.kâ\ÖŬ‰sYç²&ÎeMœËš8—5q.kâ\ÖŬ‰sYç²&ÎeMœËš8—5q.kâ\ÖŬ‰sYç²&ÎeMœËš8—5q.kâ\ÖÄ…¬‰ Y²&.dM\Èš¸5q!kâBÖÄ…¬‰ Y²&.dM\Èš¸5q!kâBÖÄ…¬‰ Y²&.dM\Èš¸5q!kâBÖÄ…¬‰ Y²&.dM\Èš¸5q!kâBÖÄ…¬‰ Y²&.dM\Èš¸5q!kâBÖÄ…¬‰ Y²&.dM\Èš¸5q!kâBÖÄ…¬‰ Y²&.dM\Èš¸5q!kâBÖÄ…¬‰ Y²&.dM\Èš¸5q!kâBÖÄ…¬‰ Y²&.dM\Èš¸5q!kâBÖÄ…¬‰ Y²&.dM\Èš¸5q!kâBÖÄ…¬‰ Y²&.dM\Èš¸5q!kâBÖÄ…¬‰ Y²&.dM\Èš¸5q!kâBÖÄ…¬‰ Y²&.dM\Êš¸”5q)kâRÖÄ¥¬‰KY—²&.eM\Êš¸”5q)kâRÖÄ¥¬‰KY—²&.eM\Êš¸”5q)kâRÖÄ¥¬‰KY—²&.eM\Êš¸”5q)kâRÖÄ¥¬‰KY—²&.eM\Êš¸”5q)kâRÖÄ¥¬‰KY—²&.eM\Êš¸”5q)kâRÖÄ¥¬‰KY—²&.eM\Êš¸”5q)kâRÖÄ¥¬‰KY—²&.eM\Êš¸”5q)kâRÖÄ¥¬‰KY—²&.eM\Êš¸”5q)kâRÖÄ¥¬‰KY—²&.eM\Êš¸”5q)kâRÖÄ¥¬‰KY—²&.eM\Êš¸”5q)kâRÖÄ¥¬‰KY—²&.eM\Êš¸”5q)kâRÖÄ¥¬‰KY—²&.eM\Êš¸”5q)kâRÖÄ•¬‰+YW²&®dM\Éš¸’5q%kâJÖÄ•¬‰+YW²&®dM\Éš¸’5q%kâJÖÄ•¬‰+YW²&®dM\Éš¸’5q%kâJÖÄ•¬‰+YW²&®dM\Éš¸’5q%kâJÖÄ•¬‰+YW²&®dM\Éš¸’5q%kâJÖÄ•¬‰+YW²&®dM\Éš¸’5q%kâJÖÄ•¬‰+YW²&®dM\Éš¸’5q%kâJÖÄ•¬‰+YW²&®dM\Éš¸’5q%kâJÖÄ•¬‰+YW²&®dM\Éš¸’5q%kâJÖÄÕ¦&~ò3!_üô7þé ùðË%6On×··×W·ÛþãÛóõåéû´ìÍÝÍëëw·¯/¶>¿}»þf½›ý×ï/ÓÂ7gg'WÛÞ—çwoÏÎ/®n¯Ó“Ï//®Î®/S÷Ñéõ÷×7 æåºúv½[þøäòòäæEâxvòæëÓõe‚úìÅõ‹4öÅåÅút}óæäÞñÅ‹uê|vsýö<ù|usr~ò&¼99{¨]®/O®v.Ï®¿Yß¼¸Ç{üæâòrGöÑÉëöÿÒÐÅÕõÝûu:»9¹x%:__§y/înNï£óòâí^­oN.æg§'ïÑG—w¯NÚ“–õííå:eäñùÉÍë£/No®ß]Š­Ü\·Ñx›òóùéÝÕÕÅU¢ôE›£ë«+±³Ó›õÉÝ÷Ö&7ë»]\®/Û¬%gn¯®ß¥­>ysñúîòþ`<¹=¹9=¹Zï_¯[ ‰ö£×ë››ÔùüäT¦âÑåú›Ý޵û}“œ|ñ®¥zûöúj}Ïèìüf—ëÏ.¯ß¾MNnÏÏn®_¦þg§×»Œ>>=¹;»Ûíãôîæ~Sç'¯Ö»plXîBýèôæý=ÔÍúâÅ=Çõ«Ó'·o.Þž÷*eïËË“»·ë«6 iÆ—o¯on.ÎÚ­ì‚Õ…W'÷göÉ‹ë7ë«‹³Ýø›ë÷Wç»ñÏoÏÎïÞìNàçëïZuÜoa}¹ Ùã6Á7'»¿¸¾yµsóøÅúÝÅ.–_|sÝÞo.Îv¹¸:¿~¹ uËçõå.ŸÛm^ïTðîý½ÌÚä_µ›¼{³ãûõúìíŽï“³ó“—/×믲þî'BnÖëÖÏ?œ¹W;Ü·çw7oîSùù‹“6—;–ß»>EæÑË›‹Û·÷K¯ß|-7G+Qüâcõéts`wÇ©=ow‘yò½õË—×"â—ë“ó÷;ö×§§)„m^®vû~üêúæíàq[KoNv«NßïêÛÏÝ\Ÿ½^¿\‹"ódS_]ßׇ××ç»ò·¾úA:)7{<¹ýXí¿óךÅ=Uý/õ‡?ûáwòÓ¶î·½¿ºùüÿU¸B}0®MCMCpack/data/Rehnquist.rda0000644000176000001440000000324012133644107015243 0ustar ripleyusers‹í\Ï‹\E~™¬&³AYЃ""dñàApÑ€‚‚²ñë˜<Ýì ™™ä¤ÿ‰àÿæÝ£'ñí03Ägʪúê«~/Ò;µ¯»~|ÝU]Ý=3»çwïx|ÿ¸išI3™v¯7»_&Ý˦9éšéy{±x|5_ošææk]ÃîçÕ®ÿ÷fGŸþјh/×—·êKòQ}ÔÛ?JÒ¼²ýkó6¶y°â²ö³ñz):¯}4žQ;V=4nèsv~³òÕ*—•ÏÙuˆM¥ê¾W?+άñŽuÍÚWXöÑxe¯—¬:Ç®lœh|Ñq³÷IÍ/«>hóÎÚ½¤­–ý¬u_úþcõÿóŸ×¯·~³>ïíôÛQy o©ó!þÛû²v?ÑÖGTO³‡ê÷ñ÷©/¿ÏG¶_‰[ýi¸Ù8Q?V½¬ñ”Î+¿¤¯ÙCý¡ú^<}Šæ{ܾ¬yÏÊ W)ÜYu¨´¿~; רìDíIzÞý³O,\V}¯ŸÒòÞñ UŸÆ¶¾£8údÕö—ÊãÒñ/·¬úæmKÊk~QBÏY8½q`Ç_ÃãõË>Çf­ÿ@ÝÿÇ}Ù ¿%­~[íeÕs/.¯‹²ö5«_ GÔ‹¢õ•ÏŠG©üÊZG¨zžAýi~£uŃâ`á-unû¿ÐØê»N—ÚoPªŽeé=߬{&*ï%ÖýU³]O¥Ï­¬sQÖ¾<•ª×¬s:ÿìø¢÷v¶œô¬µ[‰°o>÷ûØš}Mž5OÙï§xí±õúò¥óU»¯—ºÿXqhø¬ýÒ³†ÏJèºAýy Í›Òu,;žÑø£÷ôœå]ÑzjÅ“}/ÈŠ+Û¯õÙëÏ»?h8XûÚ/áɪƒYuÝØõ­ËÑótt?@óW–ž¦ŸUP»Ñs’d/«¾Hö†¶Ó·§ùAq•^ÏVûV;^(úÈu_Ηן§Ô¯ zŠÆA"?k¿´Ú*îV|’<Š+ºÎöÏÒ÷¯½óا¨½¬ºik¾Yõ­Ñù‰¶³êµæ—]²æß‹Cê÷¶³xôÿcHTªÞ±Ö¦‡WŸå¯oG¢¬ø°ìKxÑqXqjö£~Ѻɪg¥ð³ÖŸ×nGq°ôúTjœš_+n !ïþóûØ^²Öa©Ý©_“Q({hœ¤~6Þ쿳õʳÆW*?£~P}m}…¼u¨”k_. ïØâVš¼q`Û•ä­ç«Ý±Æy¨:fÕG÷ɱ¬Wï98ÚîÕg­+ë¹ÚK¥ÎÑs±ÕvóÒ‹f?+NÙy=ÏgÕO¢ç~¾¬ÝWîí®~$/Ù·>{ß'±â—üZý°âç=ŸEÏèù»†3šOÞd¯«¨ —fWêâ‰ÖQ¯¾½,ü¥Öiöº‹ÆEóg%öú.Ö8$};·}lùhõƒÊgÇYk/=ßc«OÑqIúÖö¨ÿèzŽâi”ïcKįùË®‹h\­ï'XýYçŸU_¬öXr±óÉw­Ÿµþ£úÞ8g­ÓèøX8½ö5V<š_ïúÓˆ5ßhÞ÷ë^Ô/+ߣqDë”UŽ•_,»¬üA×'j§Tü³æ­{Q^<¨?ÍnVÙÞòtY%ÈÔ/ƒqÕ¾7n%‚DEüžÏ\ Ñü—ž«!O½uÞƒ;ÒîÓUg׃¤såM룆öøàÀ'vÝG Ž…5Ëþ87«;ü]`ÐuzŠÃ³ÏŒõ\êÇúñ-o*Abcëðî´õ@×çÞÒ¶:$$îÏþ) çýë?~¸<ø$З‰Èú$oݳöa`¨Z&¦¥µ‚DÏöKß> ­‡»:{{ôüðf×Hv¶ÌhÙ ´GiÍÛ‡x ‹ëŸµkè¾Ú¡*ïIA}`WÅÇ@ÿÉX wâ<®KÖ{ ±K÷äg‚äœ5ÇŸ¸®4¸¿`Ý`èh{çãòÏ€¾<äÛ3ArìÈëì'À}¦÷bO¯ºØµ‹3~ó>Hþ.cL_vræ6omŠº^ÃîúÃ@ßÙÚ|¾ /·Aáà“ i(:ìæ¡cþ÷ ßÝ¿½§1ó.Î)ÝqÕXDNr“£«!ª&[Qã@³ÁAÄ@,šy„ˆL[-wºßÞ$C[ .¬A8¿¤‚ï^(OìÞWϽ¯ž{_=ÿ×_=ÿH·þ ìcfÕ-MCMCpack/data/Nethvote.rda0000644000176000001440000020425312133644107015064 0ustar ripleyusers‹”»y<•_÷7~ÎqÌó©éÈT×ÿ<‡L5§¡¢ýÏï¤?ÖRó¼ÔZjYÿ$ÂÿŸGâ2Ÿ›ÿMgžG^‚Žš‡¿ÉA-+‘°<¿Ôv§¶ÃRö^Šj½-§«å|êOÕü?kHT¿ýIKþcPŸO½f9]ÿ7ý3ä¿Ð/5_ÊK¥âãO¹þ”ûO¹¨ùY.–ò·¥âDXZ–åâ‘Z¾åÆr2“–Øg)û-Çÿßðz¾&Pó÷§î—“íoX°Íÿ&ö—ó™¿Ñ/'ïéùóMjùÿô¥|~)Ÿ¦ö'ê|Cíßÿ/1I-ï“i)lYn¯åâéϘÿ›Þ¨ã’“—ã‰Ú~Ô¹q©|KÍÛŸ6Z*†ÉK\£¡Úc9¿§¦YÊ'©cšoêø^.©±)›QëˆHXZ¶ÿ¬]ÊwI j|ýSÔ>±-çòó§ Kaµ­–òåtñçzê|AëË“Dª}–ÒëRqK­ÒûSË¿\\QûÍR¾¾Tÿc£¥æ®ýÎPŸ»”þÄ’åè©e¥¾FíÛKí·”,…×ÔqµTô·ø¡Ž ꚟڎ³ÙŸr,%Ïrº¤æú>ÈR9„Z¿þ¿ÅÙŸµõRvX®GZÊÇ–ó™å°æ»n)¼ý© —ª_–Óõµ¥t¾îþ·\¹”/Só´Üýûåî+.‡ƒÔµéßj¼åôôßp:&þŒŸ¥ê©¥üo)[þ OˆËÐ/Å35 uºŽR×K³”Þ—³-µÌKÉG¢:óoxK-ëŸô$ªý–Z÷oyþ?ÏãèlO9²u¤Ì8þMñÏU­í[ÿ3Ý®¬üï)y¿“Åÿ½lh¸j+ZsÛ“ŽÿÙéÿîoyÒüÂYÊl2ØþyÒ‡´Gï°} I]w¨%=FAs5ñº-Æ(èJ'? à ¥ëÚx«)@'ƆøN´‚Üf'øM|D_ÃÃ-¯zŸpÊ„öuÕϺk×ÀWþ”½ ,*8¸…‡èÁlœa¨x6Ò^çWûmÝ_Þ”D꽈OëZp í ‹³¼'}‘ÇVüm¼`a\>q|(ê~’ Ö„¼÷¹•7GÇBµïI—5O ¢l·lø¹uíÓT“¹fú}Rî KCïïîsço¬D“|¹GuéH »ýˆ¹ÆÆâ×kù¿A±*½íë㜑ƒQÞ¥—9žsçŸzž†„Sç-/D‹£Ä –®\Ln ú&é'åBøæ‘°ºY»ÖÂøï›)›´Ê‘ãeFéKŒÊyDˬ…ðcÍW«dîäÃ.ö2KiÒ¨ÿîÆ~å¼)®p.M~!`+’^¦=˜ƒæsU"U‘vtcŸÅ6˜[E~-ÿÎ |°3½gŠ\×¶ê#ÃŽà]R!,Üã4[(ƒ‰Õ'ZÈüëR-iè"&«¸èYÁÞr!Ì8ó &ôŸ'Û½f^íQ¸yuŒyõÏ?ŸGÞ'äF³R(Ý ›á{ײV¯%E¹@û«=RmÐ!Ûžþew6Œivº\½†Œu&ö³¦'£jÑ‘KÇyžu‘âÕ AƒÎ˽í'4ÈÓMÛŸ±‡Å÷ⳡH¼$ï—rc²oì½fɇz$È0þm·§š6rû„XH_Ü­EÙ¬i PWØ2\ e(°Pwïø¹1¨äY—Û™&‹·ÉquÚ÷”[heBvõeB¿Ï7蛊o²ý†,|E)ß*}`Î#Ûžu&‰Jñ±#èÃÔ™ò &´ nÓž@ÞKvœWõo!½¤ÿ“RÁÍÈÚðàêáS£0ÓsÒjºí8 ½Ï˜¸«„œïr7…Nö!ú”ÞH8´™Û+ËÚë‚ùt¬ÏB¨.Í |qPµYeÝ£O$$WÓ̵e"÷Z19õa–éÓ,®¯ûh'}²‘/ÎÊvâ¥)Ò½zR%™r é†yøÙÔ®Á”Gݦ!1Xé³fÑÓ/ù„D“‘ü1åBíÈ­7Í2Aöqúô€“kà¥v²^h ’¸ùö„¼ƒLîÕÊÚ'O@ó#¾…:ÁðóKíýš\¤›M=Ðø¹~X\)«ƒº&Úê†÷’>8·¦ÊSz¹%Dc>o€¾ )å›úHrkúkR Ð@÷aLf,÷æuÁ YI ÃÆo|d˜C²•ûàá®"˜ K[ë\€Ú[.mâ!#WT¥ÏÓ~#˜*WÝÚµ º¹~¿¹ê/_.}ôç<{i[Ù¹ƒÔ¦ <ÎqBxö 9Öw¹ÑhÀð3>ÞÞ™‡P£y™`û fv?rÙ)ê ÷¿Ÿû„·Ÿƒz$ÏÃè¡£ù±ç‚¡¯ýnÁŒòÌϥ߳ƒ8ÓnÚÆ¦P’}`Ô*J,iløA~×ûjj¤‘éì¾…ì:o_懾Ò8®c+ù‘øÄ€ØXÎ ­ìIÍ ­âP?ùq,ÁÁY݊Ϻ¬B‘U-¿_çô·’](Ÿ7¹ÅÇ­ v"­Rû…âCPcÐÂn°¾ÊÎsqL‡ÛèQe‰â}a(ù¹«ÅåäˆØâÔñ£P rZâjî²0|à_µ÷’½š§=¾|…bÿ©çFð[û ‰¦ÔU›F ÉeÝî[Ýð‡áâSÓè}’_êÝ&?Ýâ=í#÷w=Ù ëÃ^4üŽô}o|7s‡AãRÕ‹H³Z±’÷æ/äm|Û@Kƒœ‡0ÙpŒ>¿ôN¬ÔP†ÚW1Þ WA—¿´•‘+LŸží´õ†±ºñ;—C¡Is…›o7!F"SP°Êiw¾=±Y¬lk~|~9¾WeQ€™Ç= ŸI¡Ñíºj(Òdç}ѯAš(·“òƒDd?#Ô¡tðLUÅ›òhÂT¢CƒVÿc˜Ïn1º|¯Z/kcI•t±W:¥ õžär’t“Ë…Æ+Ÿ~¼]Vø†Î¥ùÉv²ÃTüã댑ÁÈ/±[éöeF·Ù™xèSÌ$U¦o;Ä÷N¥Ÿ\ñi}uKF!…üþËé‚d%É{­Ö}ˆìIoßöT p‰cuß9²×ûÉ}+Dršp˜Ž²6”è'´^bØãõ6[Ì4NÀÐ;Ú‘ÛºªPÂ|3ìnô7Èõj¿×²f54mz&Ÿ·­R÷7e®¬B†…Ü¢ÝÐú9Îpæ~$Šíaûˆôû²\÷*~†1½5¾phú.u\SX 2¾ì=Aü6]÷Ï”%GìýMÅ«ÊPЯPŒ6ÂJ šô;Ü+pðð­ë~fÈã8;ÝLbƒÜwèœÏ”‚oG?¦"‹·J›CmLÒ×é_-¨‚ÚuÍOš~ “3*¢Êmö-Ž¢ˆcŽYMÚ[¤1[7â´³òÏ~¹nEÂw6þ¡O(çɯ9B+õ÷ÚÝWÆÆ•aãÛµ*H˜}5˜y¾Ƶ½Âu8¼`h]ÛÓ¡ée°©–rŒš<Ïg¸RÓ^Ÿ3É0Ûb Wô Y#g³](L“ê¹ £;l2úœÕ¡ŽF´/’| y-DfZîÀÈmÐ˳¶‡ÔpgNÕ–‹ÐÕwMm¸[7ß/,= ù·Š~ Ñw#M;Sê‹VSävú.í$Ì“w‚ÅÊ«Ž#ƒöðXÄ YäÔߣæï´Y¦¯8¥¾çªïÖd„ÁäõQvÃD$–Ó¦3IôÂH ý#U9(à—jûu~’ÇL +¬‡ôçêGì`ôÈ!àOî0Äs6³^„ÌMöïŽý˜Grėܳ»4 ã‚Ì`F6´oQ0…Úd(j"‰m6*‚2“H¢l'<ß í¤t{¦3ŠDaœ-üa‹i Ûš;{‡gaJ3ÖA!*,™o CSn÷À¹$'22ÓTR‡ëŸ‚mα„ŸÓ,£„:´)÷¾Î\ ƒ6Öönfu0Eû¦gåx4t­t½|õk ÒMék²LÜ¡ÔOú›®«ïEq·ö›.“áÐ>-a§Ø°FÎveK=A‘oJ»,¢$æäví7òš(âN×éG¹W¯É_¥Ì¡yáK½†1r³¯zöäJ-4?fU ¼ùšù#¹DS·¹–¸mBú|o¹˜F ÐÊNcèàiÎ|ӎ—wg8mH‚Æ S¤(õÒÏMSO? "A-­·f;2þ¸®7µ¾'J ±.¯ß¤Ø Ží&léW®ëx·ÁXÚXØ|Ÿ 2ŠÑxG~ဉf–—.ç!-êjÞê2gè>úÅÏï;ôÛêß u-uŸމAS¼ªŽÝ¥($pؘf÷(Á˜p·§J dXwÔ¯ÔD®ÔÏÅç²5‘UŽ+¥ÿµ*ô+WÍt†™‡oÅÖM@žö\.59#"é1,…g_‚]#LK§,n…6ÎâUÒÅMIý°Y ;ˆùs{Ùx‘­Ó“çµá0]®ŽÙ­ú‰<†Dƒ$È×:{žq@§œ¬ä™&gèÌY›iu†ú+7‹€~¹_Œå<€´ž.Gƒ7ìDòhéïOG`|.j>å­ÒnÜ8çUøW¹ÊêµBššØ‘{‡à÷›ÍJD Ã輆l¤8<-N†ÕÞB¦Õ¢L¥éÕHZq;¼õ=Lí×fqö¦CúíÓ¯%l(ùõ„Hœá>dÙ·…£î'$·Æ›±3ÃhÖBg)s 4=º’ý4y_‰®~KBnf±‰S{¼¡ŒG"Woój¨et}'=¸†íÊ.5'÷Cž__ŒÆ†˜¿è+ÇR¨ UÌBY ó{ G…ñë“]waæÞõ­¤Å‡P!¬ïôÔR9“‹{vÃä[:‡¦d˜1Oºý-Ú®{="F #—¤qûÚ¶Õ0ÆïO[ÛÂÝŸ³'Ĥ[ö"íuw)$¥üO¯8XÙE“SY} ¥*0ÏɬžyÂG=+„Ÿb‘ÝŸM#KXwÞÆdZ“MŸôî5 <ÿšµ»¹9ý}EDÃÈÖ³^¡¾Ð¬Y˵©©ºUf\MÖ"£»†@MÆx»éIuˆ™($×…ª¿OÎGâ½¾¡Ã™ÐÒ¶ÙÅì€<ôŸ-Ù¹qßgd°~IOˆÜФÂU¡¹ª?`Jõñ5Å:è–ÿQ·­:ýy6¶ÖÃôÉ5‡*ÄÞ"GÃnM¶èZhâ“ýì¿&Ë]`FDTäœ3;´ÔïèˆâER1CúÙ0gè²ìí|ºî4ôT— =…\§G$å¡ÔNÞvïdÉÜ©òQ fÈ¿·^ä5„’µìº&AðuÿF1u×7HÒ(ݲº2¿TWÈgCÛÊÏ/E"Me gˆùA¨ÌÞ·.y'Æ7Š~öŽg_?Ǫ!wa¥ÃAÖ(SC÷¾ h~Ös©Ì ê¯t1X݆LÎي韰àóH¦çá!¤ ¬ÙyŸ3~x9id_‹ uÛmºÎ}Ⱦ-C_ü–'T¾8qÍoÃ9d?–”¢ C¹×ÕÈÃDoèÅ—aÁÚ鈸!ŒKITª„S"Ûå¯CB®I“ºU-´ _‘…0“|N̉¥i×Å‹EÖ_„^Ò%­›fNÈœlÒ»b… µ,<üv J;¤qÂÉ8ÍËî:0¼[ÑxÿòŒ¶‡¸ÉDÁŒ÷S«ñ/Fi`§˜ ôàJ¯'V,£ó³n¦Äô6¡³L '§TvÈõŠ£è¨ˆóí\ÈÇC;ß­ìøÃŠÐZ¶ÿ¬¼²ìTÝ÷ Où>nó»ÜÈSe¾è\ïóž‹™"ƒñ—Û+ó§¶ËȶÐz9³®è #Á쪙SŒ(ÇÞÉêXD†šYî JrÈsA[ÛÃþ!ÌD‰W§ Bkl£w߆~J¼¹Y±ÉqÃt‚QíµŠ”þ¹rÖ¹ÁY®û߬’…ªg/jüŒrÖls—5P™¨kè9J‘ÛûÒÙ—k˜ _!kö’± J¸ðî=nŒ´’žæî±PÜäùu‡0ÔUÛýX¼<‚ìÏ´áuVÝÁ6ÿ¬¯ô¤·<€Ì³}Y>§‘ðºVvÓÉ ÈõmA¶çôc˜Q«K¼,rš‚Z¯èÓ saÇþóï¡¶û×Å„dhù"¾^xÎRÝ="zjÝa.àÃà$t(îһ팂œ»ø¯êä Hž›Ê曃ð#%{•ík˜0‘}0Cc£é[f8J“ ÷L}舴ھܰcâ!tou3 ö€¹$Ídþ+°óRÅŒÃäÈ”vÚ³ì;ýžUÁéHóÌE$Ž#9G›•9ŽkCᦫ·Óí`üvnÿ¸†;’z(Õ¥ãWhò«¨lsƒ&óBe‡ÕÈ]#_o.Jƒ Í›G¿"«A÷ y³ LÏ«¸ ?(Eâ3ÆÌB‰»(À¹øE9ü 2dŸ͸ý z¬wšd™]„£{¢µô{aáä ÉÀÊ9¤—ÙÁt¯ c»Ìš³2žÐ²×‡ú¸YZé¿ÞkFÚÏg÷I"k‹Ü ¹:$…Yý–—A&Ö³ñ°(ÆcwQï$¥wعÏI’F‰¹W‘V¾ñä*zdãsY{ön)ŒÇG¼î P‚f+‰]/5‘çðŠŒð×&ÈxSêH³HÌ_ýâ*¸þ Ìù|¬x“ÞºãßÖOƒw²òØ ?‡yÝÏ{BÜWõžsœ(´NgµÛ–<È5Ñ`sl^ Êí5)ù‚HÞX¾w±cô«Ó_WBcëõ½Û² éÁ¦ÏÚëïAÏ篃“{ ãeW¿æÒ…Sí‘@kèøF89Ķ?ˆÜ£ãk÷»K–Ãâ]½Ñ›‚³¿ôŠgeXózü¡ ‰ÅÐî¥âÇå-l!Žr!érÖk¼™‚Â|_Ü…Fÿö4ÝO¡qËáC†ÏC 3°hÜIm”3ÜŽí4/Eáb¬\T™ ^û]†0i©VÑöò&Ï´Üs ‚ü‰–„á£Ìã„W(J­£‘x•«[º‘žIì¡ 2ÀØ=EGÁ2s=ÿAÇc'r´ ^-epBš¿{8Ý¡ZF,&ÕâŒ*öä˜Áø/NÞ’i˜|æIgñœ2mò]žÖªÂôuºâò ]Hw`ÂÄno3Ì.Ê„˜]@s— ÷~û`ðšÔT¿•$L©­TÑ|‡„£¿UÕ«‡‘M9éFƒ­"̱Î?Ÿ‹í†®ví¢@š­È¹ªxán LqèW©“˵ú¥‹˜Ó `쬇>c2 ®£µ=#!Ž Ç G+[K»hÆæ6jÚÙ"§ø×)-G¨¾drk¦yèÂj>|AþÐ(÷GÝÐ:]_÷ý?´rr4Ùˆä³q{{µO@ãæÃ_nLÙAá÷äÒ ^=$ªôè ”Ü†Þöˆä¸ûPºþtc¿Oô3žI:_Lت•¿¹Ã„•ÛbŠI+L¯ØÞ½E$^YxJXCfƒ™‰·™Ö(nÌ þ’-3†ÏÝ‚º‡‡ìöAÞÏÙwSM 3+t ü®2”±Z qìAÒ¦aaÓ÷Oã…hc[•´q§<³A:«G÷9ÄŽÂÌT¶ìªó0tLºÓ˜¯åv·‘%&¡íòä·NÙ\èhֽ¯«ƒü÷h/©jg 0½“dúÒè®:}¦ÜJí«ŸV¼€Òç»XÃTùHåîøô™ý陀ÓHßéµÓøk;ä2:ÆEyÀ`K&Ýì:ïõS~A‰7·7š±@åµwâ Û a”ø=çÝa|v¿U+ ÷ôQVÛbVˆ]ñäË‘KÐb¸JP­¥šX¯Äü°…a‚ÓUÓ,Ëjׇ‰¤›×®6¯€VˆÅEi(›d}ÄñºBÏ­S{Á•y9 i<`ÖGO¶nþr¬ï÷Ï Sä¡Ô»û-¶ØMWC¥Ø›HOo!hϵù¦ª@©6'˜fi&!G¡Œ‚›C,Žueí¯ºï…?žy+u2wUŽ/(@Ø!}w3-´˜´Yþ4ÙOñÛƒ;£ù,à±ÖByùª×HÜú°_׊÷;‹Í¤/Àìa÷3‘6·aôbY·WòJ|º-Èb¨û¼bÛ7äœËÓþôùÔîß}òœyX¬ì ÌÆ‘F*i›N¾Ì^ÿ.s² i¯8LiKÕŠ™Ÿ±E։Ǥ£«àÇ7Ùçã)Ð&¨ðªùm ÒÝjïyK^ “÷ÃVÌTþ‚¤‹¸¯gv!ÁZjFˆ…ÒרÜ!ÐÁ<ÏÔ.i¤}_ÙR¼ýüîßòn?²Ó Ûh ¡ï+Ãâï®hx ßÔ7"éãIûØf2r·_Ð}­ú ³ù#÷œ' uxÐó%ÿ¯êz:rÙ·íáò„!Þ\þFêp¹]àCŸMræ'­ ‚9ÓOÏŽð-e4Κߺ~øJ¤Ša&»nÁQ•™šæ½ãœï¸w½Üã“P²QÅSê|Œ¾Ïdè€2ÿze·­ÐõÚ»IôÞM¤Ób¸|Á³ 9ŽH[ŸÀÐøÜ¹Eh*tr«3@Ÿó‚o^ƒŸ“kü|:2‘,çhýÎú&²]Þ8û] ¹Ômˆ«ƒúÁÿîèç‹ïP`qóÆõièí½úCç±´}ûŸç.âŸO0¥~ÇšR"cŽ¼Ê®Á.hÙºž£»^R=¦ÂS½Ì¡Ør:nû‚$}! w¨7;zái º:•̇‚CçU®¤C¹¦NK—'’ÈiëëùŸCIÐÛߦnÙP*n«œ0ÒãÝ×ÄŸÍ@ïmÚûô_P z“xÆf)u½ï\üÊ ^ë´®z >£³Ý6~Ú\H÷AíèÓ0èŒô«¬ËæAâƒ>N¥§‘]&Bàr®ŒŽŠº°W¼:ì’…,Ý_™NKÃäÞ8ó”TwwÞb ãt Fß U ókô¢Ýñ9øúŒdÄè}æ1‹§d¡³Ê òyâ}˜ë"™}(BÈ,=3* Á„ŒµcW|¾WB­¹ç¡ïÅìPÙ¸’¶Û™-úyÊ õs¾ï7-ûFd¯fqZÏ@‰Ãg×ün†A›îé#èIˆÝØ´wì»EÁÉ=çWÜ`†æŒë*£Ÿv@±“èí’ 9¨ù6Ú¬úý!´>EdL…õ¿Ù%ä/@“Kêó…Pã^° êv ˜:4íuÆ;_Ã`÷¥Q÷'g‘¢r.6À&÷]éœ |júÇŒ‘ȵ¿Ïý9t\Õ?šáxI¹õîí€o¶± -`¨¥SBIúô„°¼ÂÕíö©†Ÿ@Ž+q-§¾èÀ@’Ô4Ö)ˆ°ãœ“mBÒþ"ÅßOM¡n*dW´p T3¾ ÷Z`Aâs›Ó2–´PâúRM&ˆ ,ºRI ò0_Ÿ!ªs² º%k*÷ý6†ÙüÔšàŒÓÈ »ãbÕ¡ék´›?9…[Œ¸[™Ë(ùH”Aï¦0,þëÆ5²k•OÉVG¢âÎE©GÈ7y^{4½Å€Ÿúì¸îÆë†Ê”{0{‘¯ ¬¡i[é¶{¡‹Îi¡ö¡s4š!üÔ0,L>´NU¸Š¤RÛô<õÝ0A¿Û:Aì:2¸NŽŸ½©ƒ"›Å_÷ÃLͪˆU*üÈ¿ïxBÇÜ Ò«%4¹!_/ß»tû_HJ³ýšþµÚî=wûÈ 7Ûâ´é†6.¡5lÖ%&v‰}E†¹=|Gè)ù7®¬@Djijx9ç  w[îç 0#²mb<@ µŒºvRêƒÀG3cRžH+?c7˜#Uso?Eïƒá¬ê÷Ç^lF↷[è~åC^öE²ygd¾9 ¦ ƒ?Ç/©CK­Û¡a=edïÊîT½ƒ,Ú7Ù¹ƒ "¿(îý]˜oIø¥·ãŒìð?pTjÜuôîªBž_ºßï9Ø@^»:J $4øl?dbæ ² “”aÄg#Göþó@¹­\M^+x@I™O]Zywi8ë—*!'ËBlHg=Òeú¬%3As½GÁs’ µ9§»ÆÓZ íÚ¿Þ€ª'è¬;‘ܽ¸ ½*ô’è˜ëc¡òèÕWßÏÁHÎDáBå&¤§ozÁõZn5(½©KFš+wEï¸!C@‹™ék$vç¯Zo: ³d'uh{]î1…ë6«; \E×Ná0RlÀ,ÄîÁ; J4«ÀXÇÅ ÆÐ;Ü•˜äùY+µÆv?®‚1ΗWŸ¾å‚Öo¦ŽŠ>!Ý¢%«NBç_®OVIÈ„–¯Ë³Ò ëÖ99–iU¤µbx“k› CÁÄG®¯`˜)Kƒû|à5sR¾Í©~-Ù ‘ï_Òì›õH“ó]’ëÀÄlªí§Ïq I–il”zqJ ), Š]æ ß™æÃ¤“.ó†{6ÐA(pú£Çdcã6rCKQÂ÷ì"i4MI/øaî·,Œú"[‘®Ñ­WúиÁds™ø˜È»àzý„ |ÒÙ}1ýé=˜´çUÎéî†ñÌ?øGNÁxÑ€A]‰²©ŸÛᤈl¡‘kaÀ!çÎ]&ÌÌœÛbèÒu:"¯¬)¸VUd¢·ÂN×NŽI @3mÊw¶È·¨àËѢĕâ¡ööð[°ð¼,`3¥>ë¨g¸×ô6‰tõ6М»<÷kd«¨DÄÍú çÓ•r£(!Z¼»^od‹æÉÔN$Á¿Ä»ñó w gåÎ:~(:A_4í URŽNC’–Ð5& ?ò›·GÍ™#QúÑ%ŒÞ Í)V&&IãP±¥¾òî©ùÒsá«Hkᓺ6KY9ãl+`ðã7áÅ”f(5imz¾ *¦7ÿ -+‡ÉIÂÇw–•zñk’ºûÙ=ïò‰$˜íhæ=².Fò´Ûè{  ýæ_šo ×·aãüñHÏžìúÂʾdéæŽBFÍþ›WÅ×ÃŒ•ÛŠßPžýÎlF³å$†4`4 ·¦ü:Ôm³ÏRxqê¹õ„qZÆX§‰ óñyµ{ǸTûd4ŒGÚí¯®oè¾£Ï6'çld‚ÖŸkEen ˜„»mv…î”2í“ ý0`ãEk‚/ðD”ÆCSˆÏ÷C6}ÈC6ÑùtÁ Wõ–ô‡ÌÇ@Uä£éóÁ”ú-Œ÷A½òŒîj'„Âx_S?ï6$®àKŽ–6ÆUÒ”.{«:ô„É™Òêí„ê÷»·Ž¤F@ù}ç+iê\[hà›ä‚δ³÷¤­‘5ÖÏ?ãév‘š+› PA®ÛO³¸~¬… ϵ¬§´ shÝé¤;(9ϳ_P”†'ï½/òþó,žoQ‡ïÀM[~ @»â÷U™7 Q’ž÷¾ÂÂ/Ìeà9‘âAÐñàêågº0Ý=ßîx/ 齿ÝïEN~Ÿ«­¯Ö!'C­¹µçjÈŽ¦O’+Øc/_™U$ËÃÄN3 ç5Ç¡¿Œ Ÿ´þ 2ð½|z¸ZlîïØŸï„„¼Ãn:F0²u‚­Ý‰zŸ–æi#¹-}WòÝ ä‰ß¥r4© ªáBãlY(’|gW"Í4ù\CÔMêî/¥ÔUó×Be‘°m‡~D× ˜KØös6&fõÂ{ÙªPT‰¬²?!@nÏÃÕÐÿ”†N0ÏÆZß½`nE–f:&ZÍëc|gÜaЙ¸ò4‹ü¥Ý r ×]HŒÉ‹[ï•Û· Aà3¨4Ö^ýrá4˜e do@Ò³°u-¾ÆÈjáËöáY/$¬h²Q½±Í[ZùÞy ÍRy S’jHzóz÷F²r] ›béƒnm›Òö"Kܧ7s¶0|QÓèÅJ¤°Ö É‚6ž´£ÅJfP[¡aI\ ½1_«Ã0ý¥i¥fsH–Î-|UJ©;êÖHÌN3A™âÂÃo—² .?ÁY`ȉl«ýç\8¸ßó ïe˜Xwœ×#ÿ&ätˆmc²ô†Dý•ö=¸ÃŸÆ Þ$À$ü¿Zaà»e²P=´ö×Ùê³~G^>òõÀ­HŽyÍ*þr#['ŒCƒâŸL¥™| DArׂ!=Ì~Æ÷ÚÓŒ°¸ApGQ¸ ´—äòÆ ½€ñj#±¼l3x9xH¥§tFO†Dçúy!ß…wèn” Èz9þÄEYh¿‚¬Ì†0ÆrNi”ȃÉ_O¶½ÞŽüõ¼n|^0%¾óž‹$8>¾u?¯ª›XR üaðæ ¶é¦Ho½wÍÊëªÐqè‡f­~8”ç^ñf¾AªEZª®)È|å k÷4‹ |÷¿¶]êzMÉ_Ç‘óPÿ™¨¬[Hל'&‚4¯ã8=ÍoÀhÞg¦V°è{°©·j2žŸª“jx M-*ƒ öÁŒÖï~Ûkží#[÷:i3ty™nÐ$£Ä²\ܯmŒ×Nj2†¹QËÕod‘–¹ôÇq¿HwÍd a{.ò]‹HŽ»WcCLÏ?#©fG隨fä<ÕÐw—脜)Ÿh¿È´AÈ5Å“7HÛ{ònÇeèñ(}wò’?ÒYˇ¿I9³ÇåâTG|öæÅËú’J0gÀSsãÉ8rÏdÑ–€‘M%ºÇ|VÁ'‡â£–绑ù¯÷ùPy½ðþÔÐKS©D73§W~`8‚Éuâû¥LaBøˆæG• (;¥yŠYĉa·Mïm(EÉœã[gœ"‡ëÛù¶G¡‡øðÀɳPãR°ÿÑ2M(¼¾iËߪJ=HAzg”æ®T(—㎸ă|!ubÎÖÈ¢¼ÍÅEÿ$LGøn7½=šŸ¾ Ü”…Š#?;ãôá…Ø%5+˜ŒººsÏ9(ÓËßr;Œ2n Jÿz‹lqil—­"»†ƒû|p8ò°þs;t z·$<7CáÈl…0éµéɧ­»pe“—Ò½7HÿéÒ §=÷‘õ˜–A·åIXø~úÖM¾P(^ë4{g†’f¾ˆ„I@ýFCó¶Ê¤û-¸AÚ9}úcƒÍŸ£D›(oŠ‹5´tò[—Ý“A4Ïž¼B®þYIÇ6¤;IRÖX}êj„µ ûA‘V)·ƒ"oju©}ÁftPœ^n;üVÎ66Ÿ–º’ÊÝóðâèCÕUîHÿ(KxÁ\Ù#߯_òÝó'ƒ™>6ŸFÚ_7ŒŽ'tBϹ>ÙO©¹0j¡µzе†]óÝr7~‚©–…Ì(vêÀ>^Ztd7k{fC×NÐgéòÓ雲¦B”ÖCo_åž²mÇaž¶‡qìâmÈSmn0lé ôwf+™~ÒÀø㩚}¶Ðמa¦C ÷:ib“¡ù.·Ç“µsH™Líj¸ =’UqûìaHª˜5·_ ª§¦,Á dÚÿÏ IÞ0aóŒÖ#`”“÷“äézJŸ;P~Â× …ž9–³* ×ÓÐíï=S iþžÛð**w‡ ô@& ó4ý£ðù@Ç÷¢w¡!+0[‰†|­>èJ@â÷­#\UŽ0»‰7n›Ü/ƒù«é[Ý`üª÷ÑîM-’Õ2¯ˆìER­Ž¿™¹Ù{"ßèàÎ"Ã^èö®Ëx°2¿ÔÜ[Ë7½ìÕ6››Sp˜3çš©"rÎõiH‰^EÂ'ºù]ZB0ú|ïñëmO`”éñš~–Ðò,,xø~4Çý0µ †?kœx\ o•âœüý™{‡åNå£ÄÚ1[=º1˜‰ÒZO¸œõÇ,ã7‰‚Tòc΢$eèM¸rîεad¸dFt©Ê€¡ˆ™¬ëm¹ØÅO²’9Ø¿JÞlЂ-y2q4AÐÊ);/è€DåW­òª+ '¹…qá t½þzÒ?¸½>ZÂBé»Ù¤”V®€2r¹BÿMxt€õ¨tj2ô×jØX쩃EA®È±áX{›Wýγ ¹õ|߀ß>4˜ís¡3zõ#•k¹0sæÆµ°Ð"$}Ú,ðP Ù‚ºƒÕbò`x<±q—krj»ìJ< ‹·?¬p*F&§aZ_5g¹{Rl-ã&h˜z7'µî Ôè·º|õ‡Åxº3#Ú5ì'Þe ÃŽÉÑ ²t7[ÊÉ׳—ÑûšõÌÈ­c÷àr¹L˜¦ˆïkAÆk>ùg\ç‘ߪåÈܯ@:dþPØ×2â?_?#Eü&|,(xš“¶9Ò7[±*¤žø§NëäØ>ŽÌÎ7ØÕw"é¶ûâN‰ZXì;›÷H9ÚJEøYš; ½RóÛÅǵ0E;~œ­°>*zØ{ù"£äø ¯ßN`â˜Eb âÉÁF$7šEoó`„Q'ß{ ’0é;.ó¹“Òo>™ÊZ©¤†œ®u4a×#ãªrÑ•kœåñ*¤X¨Êi$Þøe¾­-]ý½H3dñ#2©~¼ ×VËCÉÓO 8¾ÃÌ,%’öÁǯxGKÇ ÆüÃZy”¡LØçÎì™hÛ•a¹ÕÕ&·ç;§%Á`Ë€æFv$Þ¨>/>ŸœÝµôS#Ùb"s««ŸÅ=xMS f‘Ë"L0-Øúӷ꾈!!¯‘ñË|7Zd³¾üisEƒ ¡yì_ÿdÝ™_,ÕrHXGT÷{I 11^GõŒah¸t6e¢¹îýütp%r$ŸjWYÓã~ÆŸ-…ß\Ç{í¡çƒÐlu%L»T,¬ÔsóŒÆ#þõ0ö­³nÔÛ*MâÒGž€¦©Ö£'nQòÓÅRžM6гÆþ4lÌAî’»ÅQ3¦¸¦§Pg§ÒïI8ó“PÂz$K ŸA³é^‘Ô‰ÚÂ2Ó®ºˆd…¶{‹íHšVïær‡Û·#eaæÆùÅÉghÏ 8| Ÿ0ÅßWЇ›ªO]¦’æù«ÙwÎi˜àŠY•øÆÙŸ¿üð¦xw{Þë…™2Õó¦¶06,ÚÖˆùž:´­”yä ‚ïö+d"§âA·ÊP(0âOm½.€DË„5­ApËM¿–ò¾ëöÊ‹DNC+žv$?Sxû]™WjMÔ=ц<šu8‚d`ncw]ËJ(“PmõƒyKaCANJñ<ÿúÖÖ~™¹ ÷‹SsË!íÊÞWÆÈ0YqÄ\7™UB{Cl¡':-ªT8z ~ûLj“ ?êüg™jäšfÙ¡,~ *’oïPþ¥#t#f·½‘^-*ž‹#Ƽ%w¿YáCDÕí‰påIV.$ ‹°ËÀYÈ.;÷¦¥_DÖ‡'Rê¡É•Ô“åG&Õ"š†¸R$þ,Î{hŒâ_3$/‹çC?kë#–?˜O•8ú&WÿóÝ©›ÿ´ß<i[ :Z$sa,ê{Æ ½F$Y†G _Ãk¹ÇÅ)èã§T©® ÐDÖäoЂfæO\s¸a`%a˜ï=?ø­Žíl{«}¨ÛN[säg¬Œ¤ ´Þ¿‚í LœvÆã©0±úÐ*±Lu˜™“#¨1µA•Å fiäèÜ“õöÜ1¨.Ùæ—ú›‚ÿÿó~cû”÷È׉3Ðzy}·ì3ŠŽZѽ0t€q± *)D:BÒô܇4˜Ç+þBåÐûîÕúÊO·põ›¾µ_ÅÁ¾œÁ¡ eÖ’üóØn¤}ÛA’š²ƒf×!ïÀ¢!˜\óÏcKe»ò݃­¿¹9‡[ëïÞEº žyJ¼ÞµÿrªÑwhâ6þp%.>/¶\®½=46Vr&4Ï`œ\¹x(‰yþ¹ÝÍ‹ôñïß•Röcí»åªbÓ×$è~ô €¦à«§E¡T¡WFaqXU.”×}…S±÷gZ£¡OçjÍã¶ä9^Þ5KøM‹Ãõ®Äa4ÞÀÛá¡ t·®vûqã»Tn%ÃÄÉ—-bà—óºG'…õDŸ«pÇøŠÈ^¶õ*‰GtÏ}ú¿ÿ×Fz0ñNŠ4dÊrrè†Sê‡ÕW$å÷£ð®Í¯~E¨¿+š²òGq±ÈfOp?Nüì e˜£*¡™›d©œGñÓ1þïG`v½{¥ðnozx0ë ò(ö0Š&ŽÂBÔI]Á ÚÐðãq¦ry¸Ei󶃦O÷\^@ýUºWÉBpG,ÀÖü \þWýCŒŒ‹Ç·•Pz,. £äi]ï»,'`Œ´z‹k'ŠÒTÖjÝæEÂËâ“1™Û£éb·ïÚ0oxóéÁ²o(î!‚ï—Å/ô¸>Þ•…Ø™à¬0«)˜ß׳R¬kF÷¼0"d¬O³Ó´ôad¥pTéˆ;|&§/0Jm¦øßÇâwèõQºä%f Õ”^ïuâW\Á/ï>z÷5´Ëå‡UÇ2@[ñ›ÞulH>à½ðÊ<ù×ïŒÜ ³«NzÉU@å—ÇŸÓ#‹ÇýÀ#$lM~C”¡u«œˆl¢È2Ì/?ôª^ܺB˜ _|RÎüF¸39¼­Ý`ÂIäÀÙ”þÜŸ½”Vhy3s|Ë£Õ¡úçpvÌž}ã§zࢠô‰ëŽw­âƒf[N™ñp˜4OYuQéÙ7T?n|‹\ùû=\JZ‘ç e;§Õh{Ø­Üžµšcèž_ûhMñ»®ÎD/¤Is2;ÖgƒìaÍf?PòË•'K{ašv¥Mà*ä}±ÅZ(ø>´UÜVÜ›µùÀpÐ;”Úƒ‹ß^ƒFClW¶ªÃâ(gÍhå<´>köÀÆrÖÏš2¬µ‡A¯P³f§(dÌ 2ïN°€©ï•í7Ù‘&gÕàK_$ËÎ÷¯‚Ñè_Κ#æå}Ï&´‘^ñǰ²j2ýO?óaó.V›=Ðí*">LÉ÷\®¶ÌéÐëg§P­ü kéXî\…¦;ç_z9B"*ñ[#+”)‡§ÖíÜ†äæªÀßÑW/+[Woïê ïŽpì‚AA…N½Ã‘ÿÛ?¯CíBº¹ÖûÕç'‘¶7tü½%ŒÝ­erÝà]9|±AÍ\0m8Rk°éWÏÒÏ^€.}ÏhÉüjä@!ÿʯê|ŽQõ7Ô-vÉ9ÎQð+û„nâ+hù0ö†Ù=¾¤ZÕÈ7! w·á¹}¢È#Ãþœ”пU~—?Ùš8n…”BÇáÞÜo+šá£‹ƒ8c+,†T•´Þ§¹$Y›Î +X 'Ê:Ë<€±õýÓv[r`¸ÿ‡–ï>]$öQº"™k0a¯­¹ Y…ll=-#ç±ß…fE$˜+Ý3-\ Û<ôªcj .u^Ÿyò1ҜӒÞå²òÚÕàë{è‡ûöߦraÄqµï] ¨ ,Ü{Èq,¾¿Gö M…tÖ—†qß\añÚU&³#¾0\z*'.Qh·®ërF®6éö™ðüìb”«ã(ªÖ '矂je™ïÂ'AvSQû Ò‰Û3æT>G–[´6/Y¡ãWúª‚^$iÛ·(õ<‡ŽˆY‡5ÉsPÝè«$Âw™&8üÆ5(ýáqCº˜‰<èô"èÒ$©oÝn7€¤Cg„«òk¡´qq»Àñ,(þ¼u“k¥Ží>sšüê0d«¿ˆO!™ëíü?wÀlŒaâ ô—Ç\ÑéŸEºkƒñu0ÈþÚõØä”‹U¬Óòä€ùÌñi>Â(´O{J |…Lù¹wêJG¡=ÓbÌÉ„­ƒÏÇöØ=¡ü>Þe0ŠÄ‡;*‘\ü9¾×à Ø®û`ei ¹²º‡‘÷¶É}ù{›î”½@Ë>xÒÒ!d­[Û~ò¸°A{ò3{0Öšñ}ÅeóW<àݵöäÄgGüàÓ oƒÚÑt$?÷Ÿ”²EŽkéÏøW…|¶ÚÈÆa^Ÿ×÷ä×xdRÎ^ñ t+Ò:ö‰æõ­@b¦q¨‰×edζ]Ö S'÷FW.ÀÏ]Þ@þÏ0w1aÃ/)¨Ó™=(Ê-4?fsXÆ‘ KqwÇh$°mã/õ*NmÉ)ùÙ'UÛ)ãã›ïKïDãì'‹§^Á0f(¿ÌV°.do…w«8®_ÏlB:Á€½4©%n#1äãdŒÌ¥…²çCJ‘ÃZÀª¢T…íXõÏ›El¼=M L'|ûvÏxLf ^v<²†ãÂÕ®î.>¶¼ÛåIPºnä sÞgøò˜]ÂãHT¾=´®ºj 隙ڕ‘¶àhÖ´ã8Ô¿û†ÉÍ0øÔ¥¦Žô?y>ÿ²Þ ãléæ- б–Dbͧô›ï Û¿·TCËc‚\è³$„ˆ~ ´GZ'˜Úìgî2oPAÒŠXžC’ÁÈKƒê7>@áïq¾ìÇrcK¥S'²îûuzbs#ŒhxÿÔ]„V¥3¥'™O Ãéc›4ã¡>N.^Óh…ïîHûI‹Äucõ÷ÙÎSêÊÜ+D`!=2¬=Ð J’ÓØgÆÊ &óÒÍ,á$Š]Ý5=m \[²Ûå`ìŸ×ºoVÃ×§™w¢ MÑUècC ŒÍsŸéІkSwîÝßûþO×Hõµí½·È^B¡RçD„”´¨$BJVÒ0B¥¤aeUF*B ‘-ÊÞdï½÷º6ïí×û¾Þ{ÿ¸÷~¾ŸsžçùœsžÏF^ŤðF´½íœ½ä¨K&6á}z°,sI[\1zNˆgœ½€ì§©g2Ÿ ÁèÞL»[Î!­X¦ÁƒGânÎXG*jÙòkÃùðK4/êòéI$嬽¨Y’†ì1îÏÂù΂®äM÷F>¤ ,Ì}Ô€lÎg›çÆò~`K_¼•ì¼Z¶œRSaõÀ•{&rH·Ï]Uƒ¥éy~ÖoPP¢Ú=y©½©x~À¯ÚÀ“Ë>C0ïü5†™Æ«Æ}·_þD¦Ç¾È‰1$[Þ÷HJ¡ÏŒó†váL›'ž|t*«ö‰O‚fgÙE m|ñ‡þ§u¤Sk‘à€å#‡}ShŽÃ’X´¾ƒg82µ±]mcß ã“ŽzãÄõ° /Ñ©kƒ*¶Ã&9Â0ÔáfkÞ\|V9uTH=¦¡è•ñ)zgj÷œ5Fr“¯êc>á0{·LŽô=Ñßú ü°ùŒs|åó,½´šMÊ#Ãá¨2Êö?·ÔÙ ¥Þ «aPèWAz>rþb¹ùŒÉ6«C«G’{ufDjb'_[!E€PdÇJ6l„[×>ü¾€‚21ïUyaBÿ0•àv L¾¹N¾d„üRòRý÷. 9¡´Ë¹ðúœ5U²©mý¬Ä¼ÇtË»%FÙ)vù\ÓL‚Ö¡ØÖpó"/0“iàA޲')O´O›W¥ÇÚù¾'?±»a “6µ/yáŽãÍ^”©‘Ð7t¡Ï±Jêúðͯ¥ÁºõÔ›ÊiO¤^¼r]Ó<2‚ùçê]àUÏKö6LZÄì‰õ_…‡šÚÊÀòrÈþÀ²b¤:½Ô‘Uûæ}w»3êñBµ"ý¹ÛŸFaïxöú‹Â¬DÚ^ýÀ!(9ê=sBÚâ}RìEO¦kÈö Ë5X¦5ë>rÃjÌÒãu†þ {òÁÖE7b>ò¤6¾ã;³åGD-a3'±Ã~3Ù—Ç?Ü!Ô?sÔ`s’ìo# ¬ŸˆY*³{c‘ÍÅ:BH;Ë×uáõ¼ýÕjO‘ÁÍÿáï€m4¤ HF.µquÖ¨%}œ™’QSoÃTxfG¡¿;×ý8m,Îx0Ï%ÔðCà"Rþz˜à÷BÉ®zÞ75Ÿ†¹å d£U¤Ì%ó~“Û£üjCÎý†Š3~JÙ\yPw@ïe¥ÇŒ”Ë>Y÷î Hæ@ÿ¬âðø3}3Ìã:B·jÄ>ù—éÈÿ¬„HÜî!Õq‹©‚«—ÕÜ„¦¼a'ÒÅzŸÑ\Q„mI½›*µ0;øŽpS¦ yÄ¢­µo+C) G%û~ $ú/^‘Õð^ÅÁ•&d9öì+ýKqXAÝCæa1jú˜CK 5yœ ÊôÕh³ÁeË-dN!M&¿nl²k|Ù Ðé7Àà0üHM°`ȆÒ9N†øoÀjœ»;‹;r1¦i¼¡¥<¢†*õ#Òˆß_Ø3ø i¾'“0Ú ‰ZO6óŘh\cóܧIü=&M÷mVQØ„œÒv‹Æ3ÜŸN‡ÃúÛ \°|yEõ…Y Jl½¼êìCg„íG4‘3©›ƒêR»ö<ìP^@›µ{Òä´°*§%—é"9.yû<9‘ôŽàñ¥èVdîwV8Ç¥é -g‰º+FMWi×* œv«’³Aö´·&š>#+Cð]ë“О¿vÞ±ÁÚ}C%OÞ÷ÙÅõ qlÐøùÁ÷îý HjUšBÁ Ý'­ HcŽÁ RZ×TŠl%…wØ:{B}î…DMÇ dGy°Þü•ðÁômyôeaüÌç[‹œÈze¼þmÀÌFÖjÇ1»Ã¬kXiÿ£#HÕž­z'ð RœbzÒX…_¤Ìý#a}úí~#)ûÏîjOÈÜWÏ3¿¡ úJcænþDøoþi™«îZfLàŽSµâ6ÑìÈ!J¾0ÙK•ÔO«T`œï£“o iS[ÖŽ¾@úÔ/¯C…y!1'åukJðr®ªˆD ËKv9QîÐ5¦vTjµfÅ^–EÚË{¢Ú7 ©¼ÓYËïý@x¿ÆÙ¡³Ì®™Ýd0r>K?Ôf6~îë}¡ˆlby3Ù*Q0AQœ; X‘êW¾KÙLÁz£“ÌqjKUh[~”8‹”ñEKV~WŸ§ç&Í7ûê¬YhÄ>ïÒ'¾^¯áõEZ‚׋‰K²°rï8]Á sh -¿ÒÞIqeÎo³íö\Àý«£²ð£ÐåÖ¡;èµ`œ{rh ©fo; @—þŽþö‡ðþH*Ó\Ékè‹ ˆÓF6‘”³ÑêÈ §Þ-Ñ ½ë§s-ïºAã]r·~©&¨¬òûí©Û·ƒ¬¤S¼ò’xljÁÏw§+¶¸ öeþCÚ¦ßÈò>çÌ–W3´Ré”ð'™#ýLA‚ÇÛ ˜z’¼3ÈßmÜŽ.n„šô°äHLÌ{gÞÙL„‘Ô¢Už$''‰æ‚nß,¡/S p\kßõõÒÈv²2Ú¹Ãô÷2\žB&Óèà‘ËD}`ñ™½)ªÖãTem—ÏBýÓ„‚ñ‡aVá¨ôŸƒ]ÏéŒï Xᜳ³)G2г…±©&0³‘ ÔÃ*<·&~d²›‡eÔ?Á’Öͳô³µÈö»ÍL&Ivm¨,šsù°ã%+¹iÏÜ2/û ë“oö•¼°F®Òtv½ndö“ ˜y(‹¬ƒyÆ\õŠä©¦ÔÖ&rɵι’ƒf3EÚoªaý‹±½MÔ5˜³Zñ¶Ì¶GʆÆS'«¦cZj‘“E‘øùík»vÃg¦m€;¬ó'?¿;<K$òuqb0s«¼¤k I?´?_¿½¥…Û‹¡Ð`tH¶«»je-Ô ÙÅì}eÏVÃ\¹/¹¹Y- Æ®È~ü|É´4^-êBYWúvÇÔèw½ÿðy 2<*Ñg„‹!jýK0ïv;kžEš„ýámùkHÎì¹Lo‘‰‰•ÄKA{d‹tªbúKýÐ{Ãñh5;ôï°ôIá>Òû@y¨É­}èšù[‘®0„#¾c’•ïðÿ´… ©%ûÚš£‘Dl§QóÅ (ŸÚÒ-—XGR­Œ ‘¤ÔHc¼ïjæ¥Mdiã0í刃ùÄ€H‘àó0ðp³Þ ¤ ÙÞL̺?ÊCjÞµ¥&òغõõíÖÎ8X.:Üvá#²ìýòØ2Ê>÷VÀ ¾¡6ƒ´uö3GÉ'>.á[0ü±?Ì÷Æ×IsõüÄ*4ýZÑE/ذ·d­aÖ~Y4óæß9Všsû¹‘\ôäŒ1;Ò›lÎӔ܇mW 1›Rh2%¯Ì}ˆL–l.°œ`k,:¥ £ý9~¯Ü |ü»å´–=ô“šdë‰B‘¤ñú·—ï ¦eËk×l>’éoìî?„äSß fó0äfAAÿà*Ì$þç“ÃNwÉóN6Á¢µÛz£ð9ä~õA*íì˜Ô‹<|&xøžßaºKQ¾Íó’ZÁÃO¿ÃÒµ²Øj“Bx£¤¢ÝS‰4·…|‡£Ÿ³éa¹ýs?à¥Ü $÷+ÞiW\Ÿ±ãC#.«0IkÿãíC!¤Ó°h‹´±BÒwGòÆ:éÌŸµÀèaU=½H¡]©;ùÊV½w4 ‡ùŸ)®”µŽ¿õ`.ú·bFÉAïÄõíc‹û³ÕÊÚÑ"ù~X?ÛËõ É~vxLÆ ÅF¥JPr…=vÅp,÷Àê9÷sÏìàã#ï ÃW¡HgüÜåÈ;ü‰w8øç°Ì#ìÜš~†|3âyR°™Û+n/~z¢ ’žñ\‡þ{÷Ö‡ØyV¬Ï\œú_Ç.¿üÈ 7ã[­4ç ã…ÕŸbÐ\d·æéK>þ¤ÖæÇQð¬Ë—вXwàýÉ-iåbòN£°°WŒ©ô-ŒþsªQÚ-\TÆ–OÃ`•ÝѶ#æ24½®õ0C† ï­ía»ù®{¿‰<9:ÝÁ0¤8nÄi]y û/Ûj¸œAZw¾¿ƒÐ°RØîÉž‚Œê±®»lè`´ðî_áÛܤùü½}°þO"ûñË>Úl?aÃ;õtœÆCäµ÷<¸öŠwx]ûE I’)`WŸ+¬»)Qåù˜Ã2…qÎﬓH]z¼Ï£js%S €Ù|oë§R¤Öz_sätîͳ„^‰>é?¹®0áÞmÅs™Ô*ŠplÁv]Âïôòs›mr½ü},d5·¯@Æ9—ÅÀA'h÷¾­~dzïî­ã22†Ï¬¯æf#‘êzÈËÈž³PþØÝïó–2l:Ÿ`¬­2†£#w8Ï5 '¿¹³K)2{eWÜ,€ì–í÷’” çÒujÝhäRýrKÑQ)þ;çƒõ¹ä”AöH#âþýt ÒïOQ°lx[1‘ºÔ/`ŠËg¿˜ê òH|ß©6Ã,k¨·Ãi蹬–S.ØÊ} €‰6×”?‚H©P½pȹ)þó_€µ¹s 2=Žîõ[t¥RÁ)ÕNî£r)´þˆ´õœ×Þ2‚¬NÅIÛã™qb3ôþ $£4=⣢‡<'X/…:!Iß½»T‘¦•æ^D‰>ôŽß`öe{€¬öEç)KØažÇ´öDò+h§$w½¤Í!¤&Ÿê‘ã™ã-ÒJ$ù/]œC²Èç®™÷#ÏásŠ–'‘UðNÊeŠŸ0»gª0ïB ¸ŸæYÜw9¹5øï³…íãbO܇”×t" áK0 ÊLÓéè [åköZÕHݤ_Ékw æ{§9ü¸¥'‚uâ ÜaÆ.›Œ·÷3T4SØ9'kÂLQà¾Ú¤uò±â·«Bö-ÅZSq Xâþ=8é®Ceù¥žßÃ¥Þ×7}%ÈîXV‘ ¶FÒ»O<êDfƒÄßšÏOyÕ.«ÖóvMÔ}5Gj„ò㢗”`¸†ÛòÞè6 ‹äØB¡ãýê-ж¯ÏWÐ:šžmဠҞ8Ær¥<f2·ŸœGæ­LJ›áð#£ùç:ÀBõä‡s»¶sK]1áž 2 Ü>³Y>(¤¶(Á†ôÄK‘·Èö`ÒWi/t¿‰R½±Us©¯Lc•(¡³™T@Uì tN3δü,„åûê\â”·atò̰‚pÒ73þ”ûd“_XÝ|Õ‰ÍÁªô´D¼âÓÈ+šgDÑ!öÁ¬ÓH±÷a»©’’0¾ÐâÍ/&òÐŽÖ6Ýç°4FðµRv€ŽÈÛ—ßf· pºÝ &âe0[¼þ,26v©=}À¢^?[°ˆ6BxΙ]²DÞ=^'ZÖ$‘)Ãq6ÓÂúòÞ\? ß§IϾnFFþTåÕ$¯ø”õLÏ–ø#.ò¸èB‰[é£>Øt¥«ÎºÔy=&Kß õb¬ï™f@R¬åæîË÷‘KÆ-±î™2ÊõVïý£Å?8 ýºÈ–z3‚šÁÚÇ(#N´IÂ(Ú. V¨T“Ž{Ïÿ Ö̼Ƀz‘†ßqóD$/th¯Ü —‚ÄÝ>•%jþШsJ†åÓRîî?|’÷t]ýYRû=U2û#ïú»¹‚$P¹yø-o¸’ 2F‰-4A³Õ) Ih\œŸ-’†Uƒ¯–'UaNàü÷ 5s¢>Å À°ìzVŸ!ñ„~ ö1rü+vƒJsQH¾1ž´o€Æ_˜ÒµDY ©®,õ{jG䜰ÒÉÙ»„bÌi”™Qtï??"ú{å†ÕDÜß`f‰gfK{]„å`é‹M%Å ¹­s¿ÊKÈÔë®ì‚a uú× 0E%®“ó¶Õˆ²,$jk¼ï=æ=… î»Ï•›Â›EHâC2è\¬Ò_;¬ ~Lüš)Èb¢L¶ãåmdõææwSñAZ1?Õ)oCOXãM³ƒAs£ÚµÆ¤¹›ù×`–©ÞH$²-@OËçË].ÐÔóÁ`u¾º%S@2Uz/ÞÞÙÖþI«ß¶^^ó†á«ÝÞÏ¡o_ó¹ÔkÐ8•îvæj<’ž‹ûm(Ò;4O…ò#YÁ'ëÄ…"Ø ¿Û±mŸAä’l?oy›,߯ÍkÐw±Çy%È I{çed$– €Êöw½=,öØgh“èÂHü”=ya;4?yJT‰¬$é¶L˰ |ï\ô,ù¯_) ¿ÜÏ—ó€f9ƒåË3ЩLe°¥¨nøÚ¢ Ê=-›o}€m½Åý«BlDà-ù™  ›×D4]`Û-TúŒ4Òž‘–ûœª ]Ê¿ÙÓQnÙ:Ý‹F’·o*|VG2®`é(æudÚq£µï%².nTÙµn —þ¿:RÛe[;[‡y”»]es6w–}yèý<"Í)¹xÓô5X³ûW«Éßeíi[ÍR®Ü'\,i_Ó§þ0ʳdêô)»£ôs¶ Pë~É%†!)ª ߀á‰'o•ŽyÂ2ƒ$¥¤·ŒÝ“ViéFjç#÷Þ Ùӯ—Þ^Bð?©¹Þÿ ’ñWu>|üY fy¢I„=,”™R®qRÀ Ë’fnu,2d–EÞè_@zóüß«LNPM¯ Lw°I²˜-küçaú.ë’îìahk˜Nk¤N@v*‚Ôp<7Qßø¬‡ ¦"™e¦vŠN6d¥ò.h)! ï»g9ʪ‹#”ª'Å‘íø?}×Fzl¡ç"güýâÈ–iÌvÜ)޵/y¹®Ã@¼cB¨m1ÌgåÞˆ; Sdï’ ¤Ÿ ;÷ØŒáÈN¨ËTÚ½Å<ŒÔ¥úŸ»¶×¡^t¯ºAT¥­¥ú Á2ßÎ{½ôvHS 9´Âµ  ‹·Û ÷ ÕÑ=d›tJ ÐØ÷î)ا©¡èP€­ºÅÚ÷jÐ7œÝ®ÀtdÔxz–Ê‘¬ï2E^c?,‹+ÏÉ6‰CJ¢ÓžÂ={€pÍ`ç.ÊT˜eñä´à<ƒt6í,ä4`ah¸¼†£zeø”ódì‘jSOB† ÏKýÂd²-RÍ{MŽ„þ –3‘4HMAZ´±sƵ*ä2jÌa±Eµ ’%s’‹“EaØÑU­_)yÂÅŒ>u!%¯t¼˜ï,zØt¾½‡,ó³iåÆ,È"˜M­’¾äYÚô°ÜB7>\‚ɤ–,‹aZ˜3¼{A$…zAÄ#%g 6µ¸Þs@’)Ó°RúBd°I“ àù -ÚÙk¬îaý¦µÌTQ”„J ðü| ;½îhJ¸Â"ÓÙ_«{÷á»O{Ïj¤‹m¡0Q Dzªš×woŒÃx_•™ñ÷ ž(ú6ýnã$ÙD‘ºx‚f™ « ¤çG¡_6Q{ó>4åõ?žVN@JiêO‡´&t%)>¦BtN'é›#Ý¥-wèæ³0??92Sv‰]‘Hs¬ý¹XŠ? \­;Nåë…uÏçÍ:óaŽê\¶Væq_¼Çž ¦ùŽóÌ›Ð+»Ù¡ã­JyóA—¡¬Sû ø!y÷·»ïN@gÛP’VãtI¦ŸòyݽK£6‹öko|Sz`–ôŒ÷µyÈž—÷nYߣ=éù¼g¡pJyí5yÒÞó ØzãV3Ü.´@xLÚ¥__‹´mmw rÒ ÖvàØ€ Œ\ ,³ßÁS¢S9rãŸ[/ÈX@€ÂAêƒÏBÃFrÝ#óù$ã4bÏÓ“€Íûã£XòwNÒ•{GÌ Ëùã»Ê7‡FïàV½ Pí1òUO´–å‚&Ä‘\)…£×¬ ú]³âÓAý!¶+нn›\nI¹©OüÙ1k]ݤqw¿ÂЕqŒet€ñÏkbå™sÐwlBÆ;ì27&¶†dsræ?îMm,އé¶l=íRnþ~¤Sõµ9<åç…Ô Þ B•õ&^½GС¶¹Eýw«EN •᳘s~ÈvýÛÇɶTØž#WOˆ"î#þâÌN¢.nÜf È\‡ÑÜ=ü,ä‘‚/×ìèÏ»PæùõÄ)ë?P-ËÀ¦H• k>Ô•±ÆBÐÒã@]Ûº©½R¬@r©%‹w/è¡a54VÿŠñûê9ç;`ÖæIZ¯Ö&ÒÍ,=óômW3[ÕÂ겇 w,²™¥Tæ{ÃèÕÎÇŸ˜F‘§ß"Wý  © ¬S%ê…:ÊÝϸƒþÆÆB'% Í^ÿI὆œ‡þÅ‹„ùÙþdó^èh½£Â”¶†tüG«2ZÝëY([s8,ðÈPø?s†EÁó]ÆV(ö$Ø#«†BjgmsŠ¡áØƒLµð1h8HAß-&H›ÅŒŸ~pIð…²ÜZ~e!¦óÏOrAV¡ßäšrݰh¥áÆãŒÚÿù€ HyãꯕË0™Gc©¹«¿þÒMwX{3käÅc (èò ]7 vù\jjôît$ûk¿÷|úXm-Ó˜†‘íÖ¿~è'‡S‚’Èj^Wr>€ Is]Ü"zO!•y1Sà5(lûÊS-c+§I®nœ|‡d§„œØkØaã祔w˜è"©7S€åw‡9TŠoÀ€Ä‰R.dMŽ+ÝK,´Â%¢–¦PdRiuk[fg+¯0=6€ùéß>¼>ã wx7R~Ü%ÿIÑÚOßµHW¥G2Å€b¿_ 0[q‹:džÈNPMw†"ù°ÏÓîdÿ^3tËšø…Ãï )Ãîmµ²ƒãЧØCÐXÙvšë„ôÄ þ2U„õ2A…E{Xn ;öœì¬qHlÝz|™Ê'Ã8÷Ú­ë+#Ý0%©ê.ô[VJéhBÝ‘I‹ø4iÅàÅzgdpI0ÒFmx²!Ó=­S †þaMz« _~ðW=­{ Um‹@ߟùÏ QHÆC2Þ÷¼‡óK·ÎÃÛ†Š3 , ÐBþGé0 /ÿçç…$oÏF°hE!kªŒˆƒÁŠçÇByL‘V=O®‚{&^sÙžý-†Ô¡×²@w£\ðø«а›“õ9/LìÙ§ÃD ³=F/îŒC©@ߔåË0ÞéóøøÚYèȼ.q‘ª ú’´ÞL<@òN›ýƒ[üHºRAS-Çs3{È*¸!{"È—›Â ¯ÈÞó¾K ó[ÿürÖÍ®¿]dH„q þÃÔL c„á‡R=ŽÈÎyå¥a@R|3~Lí ›¿¼lþÀ¼ yVÙ ”û¸²ßŠnC.«ï¨ŠÁöë ðöldÎ0xÁÈÔ‰ôŠbÜÊnB‰o}¾Kãw˜* I$‘*F²ùí‡g·•‘§Ó¥¡#­ʨ<8Ìú‘Méï1;’ÔQø¥d‡¾TÑrÅÁ ÈÀ§»'&{? øÕŸ™å–†Q‰«‡#—¡>ª{·æ·}Hb”[Ì]ƒd#´‡mEy‰8Çôùj2´Ý ü3H’èÚÉë0Ž,œ6r›¯`&ìðÞé%.(ˆÓ^´ £ƒ¹ Œ÷œ¾ÂäøÍx­+}HJÎB¹ç× ¤‘tþõr ƒ/ûÅíÚ…d²ÃÔ{îŒ#í“# †Ð&ô¨ÅTú6½˜5òË ph`‰ÚÝZÿº-ˆÞ‚å;ÊßX aCv[áãð Ñí¾ fâó™‹qið:¦‡Æ©±«QR'>"×ùÓL¯Dq'+¡ˆ›/úd–¸ÔéгFÄàÃ%ØúxøÒ“«È›ö ˜iŸÁZƒúTÎ…nøÉçeÝ­© M<ïZUÂôÙS®7÷ Å'µêsSÐüpr;B*xΚ6Áû2n]U=5df½'··$~ ž¬-…)uwëÃ*ÚÈ*¿ªK~Á©´ª‚í¯Û!ÉÌb”sâM˜»{_{!¨öPûRøîw0HD¶Z\ûb›¦`CÍ¿¨û×oÈ1[áŽè_ø¶s¯ÆYÔY=Õå Û¤V= Ö°r;¾)ü?C,I2RzÃs9[èŸjnçÃâ›mªŽ\¤LöèIÛ#€,-ÿx/§ «iGM™¾?¸a9Ü%ŒT–É;†L‚`P`}WkRßù¦’®Ÿ Ël_×ïÊz!Ù7ñ =X3ûæðKI 9j.F+k1ÂÒ ~'õ2Hðã½x펒왶¨¦UÂŒ{´#$‹`ˆ±çá“S°m1Ä`’ÊÜ×?ûhY,Ü.Á:¨œ©l:O£‹¶§†ùw¡ˆO¬ú’6²ÆÌÛ:hÁ(ãOÑÿ2hŸì¿›¸Çé1ÇøÝAª<²QÖª¤–®Ø¶U¢…*5³ÙçSäÈ饬økq72øÑ&?—EžO/qjÆÒWLó t ›]Ink;YZ›ÛܰàU¹Q¸7šDŒÏs«¶Íôl£íd<)vÿ‰^2…ý7…œÓ¶ ò1þ-p!ß±á½n=ú°r-ÑÜeõŠx‹ÕLšýDªÓJ{¾-Þùë"þù¬ ‰ûlµ€âÐ¥&¯¯'lgþ˜Ÿ«4ÌÍFd˜z E­xÐàÖ²=séšLú…‘gÔòÐCº«`:b7L„¯è?GvABHÅwDꫯ––¡¿J—Ÿl©F›úÞ@Ún5ƒ§01©®Drù6’}øHyF‚©Ÿ8o¿7I†ŽQ{¨mãíÊ#­¹Z6–|¼œ‰ü-Ät{‰rt¥KHÞ†!ÏÑû¢[ 0w0^ÔÅè%²ˆÕYo}†²¿ã(;a©ï~,½$ÌíäÂ4qeh­-ñÜÈF~ßá”îyÓjÅ—CäCNê±àà†¤û[}¿5޾!{Œ^Á²¾rcœ¹%LTߺBɉOªr¼E05°‹Žù¾R–rÑž/hù¹ï6…¥AKýSÁad GjC†óÐ$úpí—[&ŒðüäOÎ.CÝ´ÉNåjþ(KµÓõ̤·ûç ¿éï½ì7€YçææƒtaÐxÐõëΦx¸é`\#Aóÿù_!‰zÜöué—@ðO¹ÜñÀæžüï½7†b=©L`ÀÑæÍÓ“æ°m}$p ©çò?òD#©½Ô×R'Xj~‹Ïžê!ó•[]Sß`-èßœ.mn°Ù†ÓEÜÁþ×&OZµ\½’ãÏC§Ç Ûû_aËãå﹇™0Èñ|™Ã‰©Ï¨ß³%Àð=~&ŽIøLÎÄ¥Hg„lûfÛË™þÀÚ[òp›ï)Hó8ŒÐi]‡¼ŠRMô¦0§yÊf—4¹`Íܯ3JÕzΞºÈ×Î}¸wø LL¨]@ÒÛÏÏËg¸#Mò¶€Qò;¤´]Ô-à êT•ºSÃ0aò¶øz‚9 ÉÎ?òBÜ¡}NFon’¬[ºi®~_z¡°K:ד¸È¢‘ìdœÏ²¥$Òþ-7ªÀlAfÄMXïfðW8õb.ŠUÈZØšB´Ÿ Ö"C‘v´ƒÒ)I%å™vÖAûžÝ',M‘ ó‚bÏîx@½Ñ@lâönèÖÚaÚx^懲vw¶tÁÂUaÉÐEwØÌ‘ÝÎòAVw’¢ã Pö®;±¢aw¾’gûU‡,öÓ1qË0×ýNK<¹<Ù‡üÙp' ß¾á›íHqpïbãéhsó:·¿PÉ(_¨´PÃF}M’8?/‹¬Û ô€ñ"3k]ÈáÊô¶ú."ËsÒ{~˜Ð–>¨6ÝGì½'‡aǨ¤TegØüwî›ûþN:ÂÜ·) B/TÅHóyÛ o/³ yBýªwó‰2ž_½Ur³Y‹Ç¹MíšQœ0^fzþÒ9µ'ò™EÞ#³–0èjܺSeÊØúK”Î…!µ–'7kÐ$ÓûÎü]ì Œ˜OožÔ‚^›iä‡ä6=:+ (,~xfó±(ÄPú²Q„. ÝA÷/->ù°š}-,ç,(ó{çŠÀBÜÓkéÓ‡aõ9ǹ•‹¼°ü¼sê+Zy¢Y²^FrÙþR:NYdÿkCL u­æÎ»øˆï×tZì€÷ç&[• Æƒ'«W™™Ž&ÜœFÒonoŸ ¢hŽJ†K@þ…Þ’íoz°B[«ÆÖTuõŠÕÞßsaU°…d÷äç¼â>fMÔÝþù_ $a1¹tLõ Òè¿;¯xÈK²Çd-ƒÖIôãmyž»zèeƒ9 ºè🉄ºG®Ì00‡¬çy”lD¡G·K*Ûç ,’¯æ; ¤#…ÛåƒGaåHôóí0u “ÄÆ¦¤Ã–’жQ¤óß<ÅðO-ã‹åu0cT~%it©Î_4´°„Þä|‚Ë;Id:¶sømãn¨Ñ‘ÝÃ4| íÏô ÖÂŒ¡ßÁ·éHî÷÷%f(ÏÿT²ó©(ŒX>²÷Š€U6/=i—-dô¦òr÷¸Š$2¾×®$[£sMÌŒÔO¼ ; Õ¿`}÷àx]á˜)ï6]èC²Æ“ü”°øgQš—Ü_ú! ú:îµl+0™mØ´‹µ:çý‚ž14Âp`^F U9Ì mx™Â¹æÁ‚ÂŽ>YIðœü V˜~,Í~¥\­=_»a£Hû:`m×CÔ4X—ïyr :móSöX S|dÅ%²'ÐäÑAÖy™ýâ~ËøðAïHê!¹´H8ÄÖkh'üLž™ÂGwÂBWÝFo-$ï: Hª[rUßóŒNK]!MB‰°:1ázˆÙ²å`É7€l—K·â‘,A;nøPtü6 Vw„å$±ýnX¸¸;ƒGþrr9fz¡ ’çÿ*ºÄtE#œE¤«¡õ•_ø¡,(RçPá OC’òeÇÀ¢×@HwÿúMç,r(*ÒZy×"Íq“ ÿï0÷ú¾"MÝeRþúGä^ý;¦ÍîüN ôЄ˜¥zÊ(yy¤ùüD• …îÜyLõ²éÕn{ïUƒV[Ÿ;fb3ndíV€1gÚZHcѰ® }Ëâ‚¢)?f¡!PJYš&)xå]§_¢%i¿‘>‘ÿ*Éó’^øÿ¹6/ÿ…Ö¨²ßÃäˆìµºÓŽû‘çlngPÁ„2wn‘¿Ì‚¤OâÖæ!­£TÐí˜yú39ÁÐÒÕzî×í¿|ޤ 7äB ‰ø£ë¥J󄨷œÆ8[“. Sgý¬ˆÚMh{3×&TÈ‹|ƒkaóºvLÁÐrÊ)ËŠIôSê¥?Ͳ 7ÔÙÒ)v,™äžDrš¨“LžU(*~ºSs8æ¶Ó w8Ç3J¸i· ¸òaõÕTyxÈ'äÍé¶Ú.Bm‹ÖÒ¯`p‘ì}u ±dîe"ê ÓGB-,0¨L|(|™Y—iõLôqç ¸²î¼(CêÝ„gÞ«&0}S”÷ʇHíÃ^­fr¼†—ÏG]Ü‹´IÛÊ_-`~¹ýM#,I§Ñ$Ç#Ç ‘_•#ÓtGmjž~Ëô¾¡¾od³ÿê TIÃø8Œˆü4Lý¡W²VV¿Þ= öloŠwPAOáòŽmÚ>¨|mc4ý- æî“Qän‰ù•èí•;Çýˆsè»À0¤£ñe:Îo‚lo‹Êæõ² §-ÉG“1FX?­ÀìwKA6J°½!™úqÒ=ÍL>=óÎ1ó ÞWWŠ=ä@æOý*aü¿aNú(}Ù A˜±Š®-'ñCî†Ê»fO¡ÆÁÛ Ì™–CfÒ){'`©Þ<ªÿÙ2]|!a¡^ ‹žÖ ƒÈ,Ç»#Å© ·‰J6ÄÚ‹SZažù»FKÉ}Sâ/¶ä!—WìÞï~Hæ™!‚áqÈIùKÊô’f^¶T†ÞÏ g"­OS´÷§¨!;m¼Ò‰Ôö{ï'ú—ÂòOËTÕçøk-|­¦ö¦h*"IT¥¾ÀqM䟉ºòUÙûÊ=ça@X¾7ìÒWd©¸g+0¢‡¤âGŠï†"‹ó©/c[Ä0 ƒ´Ï}xÔ¹Äa.ð`tÈÏD!Îo/|Öß<—£›_4ï|°½‚ ¦û£u¯õ“EÎMWg›÷v¦ŒA²f·›/nœƒ-çZ¹4md.]Ô>غCuŸ?½ŒÜRú,4/ wLp,áŒ8”;­k¦BÝ.öì¡ÐûTz2/‘€,uË_¬R`ÊðîCò°•½ÑñÌà6¬æßK² :MùËá^òÏáÿnT¹%tæû¤}ª«àæ\ƒ$Ýlí5ܧ€y±Iõ+/ò_¿z—«+:"ß;T…Ãh.sŽÒ‚ÔL_¼&¹)-|JWeùp,—êËÄWd]»´ÝÁ ËW:"t¥ÔaE«¾dOý(KYÒ hsé}…ÍÞÈœqXÑ:Ý9½w^~ºžà snÞr‰ dQ7H“̆µMßSþõÀÕw9lŠE Ù}ÜÕÅ»#UæžS»¶¡1À„¾fbü"0fÖá{˜°m“AJémfPC2¶¿ífFÄx9ÿ-û5$Èß1÷2©@r ÎÕkÏ*‘¬ÊP©ÐuÊJ?÷¾†ÉOÇLRFRǒܯÅ–'ÞùÔD*KÍ!á~Èil.¬òLO\Ÿnî¯Cr–q%faX7~l4ädK–‰ädcù@ðó˜<{Ésþ¨©@Æ‹\29ö‰°p,ä»ÉËÐi!“Åe‚”bÔØ¥%oÆd¾—èÜ€¹¢ý–n—ÞÁ®½¢;ºÀvó+»OÂÌF ðÞôÁlÝÛMªHã(_v4w2Ì\È÷]&ÆÇ~)È–&æÿ£#´éÐäõg€Õ¸ÉÚ*Åw÷*á”À¥½m0ø¤E,ÿ9 )(/>kp†9aÙEyVdpzÎFÔS•ºœÕe0§cRžµ€B—äMJv"cˆkPe£‘‡íHñæ‡Ö?aÏVÌb¡âOÕ— ÂR °*«F+†õ›Wê…f}¯º¹A÷,üV±®3r$îÇnÏEoñ5d~üêxLºÓMèVdÞÃïe·mHi£x–E–÷9µûúaè½T¾ôp9¶yú£ÂGÛ é#ç mƒ:òÝ0Qðš™\§Š‚x%aõ5CF¤Õ$­Î›xæœ Îz¬‘ª0;ÿN"Mäkž» ò2¦º—îõí(À‘ùžº0C¬œ_ñêsM†Þĵڠ§`Äÿú*[Ý(D×+³3܃7Õ+¿»‘¥ÿ^8«gK†Û~ž€k¬Ðs I¥„“ìI„%_ä­|, I ñkî1P06Á™)ºÚZÕlB[k‘åÁ¦í˜<2“'("ËÚ¤.Ïú÷ÓåfË}{“å¨fYØÿ’£‘€¾¢Ç¡kÏø¡‘,„M<Üùb~g¼çèFRï¤_kt HQDóQ-ù9¬î<úTIŒ/ÿ›GçïãÜü •ŠA,’—õÿ½f_óF\Y΀‘~ݰ]_W`ìçyV/"NGmd»|U¯É\“õƒTf!åØIèüRí{¨¸¶*òÊE+zaùµÞëó>.Ð:ÁëþðÀi¤wâ9eD\jƒ—Ù¾TÈçÚ)Ñ9ðʈFs÷E$QËy{Çì+îxe0þíŠ(¬WÌ ›•ë ç»´‡ü ”4½ì¤yôïMaøÜ&‚,|?E†º¹`!騻÷Ü0ïk2U%Ô„\òžGÚòÅC¹âNû½dd'ôœÉd\…Š“*^¬ÏYì¾@…Í ý¹šÃpû8ib°‡R‡;–Fd Û¶LÏÃh^wþ©°m]Nw\ÝVaŠãÀQ§TФÐÚ{Þæ6,VÑÆdg­2öîò7$¯½›lúÄ BmÚÒÛzaµý¸¶çBWŸšÃíd7ȺÚösåcLX·RZòþø‹ÃŒm]°e*›Àdù‰V•¨ÒMüø¢Í]Š”ïµ‹o&¹Áú,¹IL±Èç7¥A޳]&ÏN@‹þ Îôh$ÛÝrþõó—°åúòñžègп&l—Q ë# öƬA¡·«Òf÷ud¥-Ö‹©$Aú™kßs=O"ÃÖõgèg˸A ½’ÅÑÏÏ4ê!õõÆ'0Ô|‹Å܆{mj¢$F{íh?Òµ“^ÿQ¹äW_$¨xÛ2÷}ÜߘÃÊãd"ϬØkwëCRL X×ÊTe@cå¹4Ö -¨ŒÖßñÖ”YÔ´’F²2¡šÀ’7jdU|­æ²Ë?/\µR$.6²{ÆÓG!…ÁÃ^p0‚MÍUš ÕØ$IbÎJAÒ+ªVúo‘å× ú^CLP&«Ú<¦'êÊ(7…¤øš¥À‚´;xÿ6œ õb¸YA )YÙØ_ÇcôÙÚ–0ºTtoì?r¶œbð®ÎG>¯ôìƒuõ LìÐŒ,É©ÆeÁpA9Ï+x“µ_âsÝn#˘Ô'óåehçýZôæƒ;¬|SPIv@Ž=¾Î ÃåH¸›ÿ§[Ì èTýùJ‡´? ×ÖèP ï•·ZT!ômNýò#eFš‚‡/´"%ódã+Xã! šÍƒÂi‰±ýÈ|\kÖゲÛ)¼ÂâBdxiyƒÅâ‘Gßl´p(DÖ/÷3› Æ2ò|‘2èeÚ·²fˆóbyøúS$¬,¶m˜ªêC#É3ÍÅs¸ƒ{hŸ³Iò4„ÍJØí‚Pª^O^ )˜òÏ tT$®¯x:µïx~Vƒdp¯8,¨ÿçà –AäBnÁúÁ#úîK0ö€¦|‹Å :;ØÞãçä¼8§yš7†žËÞú2Û|Fï><;=Õ¥‘~Þ#«bÉ=me$³¸”tÔ¡™* 'g¤Ö ®t¿C óLeµ©`)½ï«]¨%,f`E~(cƒ,‡õWÂÔI:»V¨±^Šl‹ü´ÇÉ.#ͯ6Õ‰¶Vd}unQêÒ ¤§NŽà 3=G*n¹C¹ê@•zÌ:þw‘’(œ0³€¦G¾ÅÍŒ¯ÁÓ2'èwíSXؤOLÝ Kî¹[‹f©Ûwm5‰¼Ž‰pˆ)x­Ár¡•”¬f*ôˆÛÍDL@ÿm¥û[°:qý’Òn¤ò÷fr{ûæGéî|*­„:u.ú‹0SÄ, 佺¹Ç "ßaéhÜÁ ž°üñÃä¶£ÌO0µ(ñCM«óëüì èá.™ã$òS…ù©4ÍH1e£ü¤¢9IÄM ÷…gʤPÒEì®b’ÖŸa2É—a™‡MÓ­¯Voý=¦¿OÔIC¿G!»öâ ×ÿݯŠ|¶±‹•3D>›}†»‰¦æVô¨£À1êâMèúÆÌ”ð¯Ïy”δJ;ìrYë0úÓ»Â|¢¦ìžìTXÛWeýùP)Ò*&rD²®‹Á×¿ÃæM¥ýcòí°5á~=ç‹,½ü&çÆq)*øf…|'‘äÎKš “X˜Ó¦ÎIã ê£Ñ ;NøÀvƒ÷µ¹¤;sËí¨À!ˆâ1²b…­Ù&ûàòÿ)ý"Ö‰TÌ!¨]lƒŒ$mfO@£ãÖ’ä³P:»<Øà^ˆä;]~YFʘ­ª5^e䚥ó)ÉË'òŠO”²×nñø¿{Ø`¤…¿Ôt“¨Soßt*rC6É:ƒ¢äùJBDz¶†l²*çZ>Œ#›kìœÃ2ZVmé.¡J>¾¡ýHGÏÇÕ[Á×äÕ‘Í-…Ûâ}r7üCš¿s«K´2+_a&+šä;lUN§Ë¼Cr¡÷mRPçG³À“‹Tt”…5˶Ô]Ç™÷~н¡ßuÆÎOo»}¸Ž\Z}ïüÇ#‰qÙÇÕ‘ƒBF?ì"âã?ü—I´5#½?òŸ¶ –!ë cСýPïÇ[ûŠÚ’%åíZ]‘Íçˆ$kî/ØHæIùœx™¯­Gø•!iýÈ¢ƒG1Ìý,1Y|´™£‡ãÇnC#2É™ÓD¾‘sêãÓGÐÞüU¾\ )xÙ¤P–BÙûtrªâPd¿”ÀWÚù¹ÖÆÃÙhÁ NËáUXb4qt òΆ77ïÞ‚)×°Í@˜k_ëz¨¿ ÍÊû\ûkí`Uç\-Ø6"mîÓNÉÔ0LÝý¥Ô.)ërÚ•t%vÃèËkdù>ðw`ºnMì é}„ä*>nœbg‘~MDpè~0,„†óÔ¶GÂL8»›Rândn ºvfn¶Þ,§)#YGžtRÆYe¯©9"ÃŽ·µÍ«È+¶ª³ûM.²2ü×·õ ç˜ C ©—ór^.’9õ¼Z9¬¿úL¦2KÍýº ¯_Â\ÖÛñ:åãÁaýsÈvìÅä|aCšÜ.…Œ§.š‹ Q#WÛ¹íÒ*X2ýž®;žêÿûãÚ{ï‘™ ¥()S(•’–=#Jˆ"iI’"{%$I¶B$QHöÞ{ï½Ç÷êó{üüsÿpï}¼ßïûzó|¾Î9Ïç 5•<ÄuÔΠG! ‚’¢Žú‹'?GõúÀÉQøq"î8WŒ! ÝZ”™ƒ‘Rý§wC%‰•ƒòJ²KIE«}{ -{•˜v³?D2÷†ãƒ— D™Uaè#/ÔéQ·Ú —ÃÖ¯±£¢ßa8ääæ-)Q˜p𓵒݀5ß¾a{•Ÿ°U±ÛÚ¯I=~ËØ·²"W©c å7ØšÜ.Çĉb¡£ óHâ=d.›„Wæd9\ý0*UåçthúòíIb-×`½i¼QË—VÖߎÿHÓCŠvn¢zÈ'‘ÙÄý”ã+–tZ0»K&¸pë,j²Üô~Íý;%¾8Ú"Uâ9“}0Çhã—̆Ü®¦~? sß¶—ËUh½öàõä™P}”l/¹Æmè«<#nɃçhmmß?†ñHÙ·—‰÷%ëí2ÖxIz3®ÂÚ_ ÙI˜u2oú+í +s›QI)5°þÝX äOÓqK±SIZu®PŒÕÂÂÓ€(Ã'Ø´j ÿü¹5œF+,ë¡(É <ÏË ùŸ%U`4C*zÞG:%³06F/ ’i%ô/âûå‘q)ååñ|˜-ø/¯ÿP~jõé ¨N¿3 ƒ¥ŸcI¦ïá§zk™.Ì´mËõrÂ÷Ú➬F2Íp÷ÇqÛ×ÓóÈ`%š$Äþ¤:…ìÙÚÑ®¿‘‡o……#´Ì*ïP†–32ÇÓja°uì•ö\T¿ãµ»Eä•)û†¶´a«ÿ«éÆ:/r¥ܺux†B;ò?Dš‚oÞÓG ãâkë[aкôH‘ÕÁ %¼‡Ù/LuÀ|«O´R,Iv3ÌÊÒ¢Pm©ºœ÷ r8oËcZ!]Z‚;ßÇ:è<™ Ÿºw?rë¼»ª~t fJÇ›Û*K¡³M©˜/Ö‹øDÂå@UšÏu9:h ñ(½zÛÙ¢åüšÉîfÍZ¦sPy°îº i6]"+n)JD®Æ#Râ+Ð o!¯¬ýš´_p²´‡¹È¨s‡~‚9©æ+Átg OÌ)Î]¶9ÎiŠR+ÃÐ_Ó_ÅÊ|P¿ã©­ùÈ èp½-ÍøzeºÚß®·AMªzå@âO¤L'=µ³=9Gø8Ûsµó€Í‰ºo;‘ÄלCúU dE|š!°F&ÏÔœ¨?0 ²•¼t¾Ù¥ü=k„ä>᳇­»‘Py½ù¡´LnωÀæùyIŶå Õ©Oõ—]¼‘¶SXàÙÊSdxÏ̱c¼/\;*ÛØ[L×ÌwK¨ ù)îN­]H1þõžÐ,#le Y5²LÃTqMãÙuwLÏËÕÏBöý§—í‚T¡ëÒñp—*"»'ð<žÀ„$x¸'V ¹ºWh¬r^"å·þÏÐÜD¢Wí õ¤£ßç\9äÚÑšÓܳB6…®ìƒùYÙâ³ êð7ê_¡–[uLívÀˆŒ´³IØLÓn·/Ð#é'¥Å¶fd‰)<¤†Ôy_=%‡Ô{ÊSn‡ßE¶¨Þ=d}4ÈxF[-È®L¹ÿ9—ó;gÆSêÁrêÓ oÏùÝÍ´ó…0~µböú·ÓPtémömäyóžüê‡FX KpµÙ•3Ÿ2dgØêa<À\ê“Û",Ÿøq*næWÅi݄Յ‹ú¿$äa¢eïøç<:$ß?-Vý;‹/~‘yÕñÃíËù“°¹ÞxFŒÙ G'ݶV…¡y‘ô¼W\;’fÍ„çȼ@&öG_U®ÃgÆZ< [úƒ/úæáSWy€l R2ÌSV܇#é7F–á0ËëN¸"ĉtù4ß~ë-‹Ò©¿JܪHDÊ1+²$›H+è×W˜ßW.%;?„¾a߉JgaX?CŠvÝ\°¸ûYÞ)eoäz¹äéíÌÓMâ{ögBN´±©•.'lÚ_Þ½úÄIî~§iGnFf½àF¨àlxêª ­:\þa¹QÐSÑüjø$Ò¹Ò¼÷³)oöm7 "¡tÝÿÚ¤‘íIy`ÔØø™Žèœé[6±gË0ÿÂø3ë h¤f6Û ¯„Ù½4_ó0@_³ôéo|Š_P9ÚÔŸÛTŽf"-Þ·ûc«j¼_ÍàŽâù½ ^°9˜ ûÅV·Ûºf¿Aÿ{òYmáOHøÊ<=BW #Ú÷Ü®ñ#ë‚Ýœ©i9ñþxŠͼµ*a±s ùãlÈ t„aÜæñþãÉÈlÍpT2îRK9Öp}‘„¶ˆP¥ú[ÐÃÄ2¨sɭØh ·Ê‚ìùí‡7a˜ï¡i<»¤*tÒÀH×ó«â" Ñ4´-Oÿ6R¼xu­Íêi¢®ÓŸ·F§­…zÈù·…ø›G3óä‘nÿâeÿ‹Pï“™ŸÃP ч¶ k K91ð€ý!:’S¿|Ói”ö´Þ7„iÉ}-ä¡-eãOè=t|w‘LQ‡5¡ß ´^0é>¢#ä² ½Î݇™O 'ßO në/0q#éù«Öó°Ä'䛿å)r,ƒ!/™ Àb÷I±ø'°`äe1'c G\ßÕ;š!+•Ñ[Ǹ`b6¼î¾4E‘’vÄeÚð½D–Ók'õÛbaÚ鱿±,¤a8ÿÛhÙ¸Cß»^–Äʃ]‘^#(°âöu‹Bé­;.·‰ÁТ´Ï|<ô¸Y 0¶øÀæíCÏ2ÀrY#S<|Ë1Ð: ëöa϶®‰@¿Sû‹”Y$";Þdí¾é\4…-››8è ¥-a3—¡û`˜Õƒ÷̰pÌøc–ròhçNjªBò;m¡S»¦u‹ò¸é».h7ˆ@KÍñå¦Ê…°Ò{•ä{ 0Rq®kœfƒ>×CßÛmI!–lßc‡9(=ÿûA2% oÒ›8sGœË¢ï›B™“³¢t¥ ¤³¨Ìz=£‡Yò×Û{Äx$¶mнÝ÷ªsàÞ’œ®H°Š‚!‘Ôþœ:GX¡r/¶€Ù(‡!ú‘9XÖ.nd-þŒtsçÏÿ|Ú³ Åãö@û>å( FRXÝWöüüä˜ZY¹ò³Î©DZÔÚY¡×W<¢$ú&tìüfs ®/õÃäah¬nò², Ö®¯qx#µæÈVŸó+­¸hÈ÷CÖÂÒUÞÔ‡9µ‰*ï`UôUÍ3[8Jš=”ôM"SÜšû`vU48ó;=rhÿÓë†Eý-³*;˜ž×qøŽ š.3­@*ŽL‹‡õ‘ãÅog1/˜Ûªz¥&õ%u&Ÿw:€Tg#Ì×3ùЗ=…‰qOu™sÃÞÙn}¤AšÁ:QIQy˜¼Ëà”ã,S3,’FÒýÍf©î?¡­)ï…œc·úÒTtñCÉÆ¸‡Oa]"–¿š×úYN^"®·…ؤº“`>>t®;#;5ÃÑ-³jðÛ³,Ò©V„,$‰ íH…”Ê^f!û.÷¹agúG¨ù£·E®õZ/ ê³o@Å«è¢.ÿó0«üi»p Óü÷¦Ö>À2IÆNʺ™6Çܦa"t”ôÛCPõµvX•&þŽãuušgaªMiD%>¶ôo+®š {ÌÆHÿ>Fb‡=üaø±O‹¶…Ψ‰$»Bû×ñ gŠE`KCÿþ£0cè­‘ ¨RoEÒªš«Q§)!ƒæ-íõcÈöu»Á¯êOsW|Vý“—¸2k=á÷ã³gH.†‰Ÿr#œ°L¢Ó>&I­¤ÚkÊÍ‘Ho|s¨æe;è¿LÃÀN¡÷ò»È`]EäÒPg4 Ÿ —{¯NÄýÆÝ®Ï6‘̸Î,Èí¬ï^èµ-?‹׫C½¡&0*gJÌxÿ¡²¾C¶ÓK:Û^0›c^Þw‡æ=vÉ}NY‚†ˆ·C&çba‚Á1â>µÜñW3æ’¸‡ãä WÞ±R›ºföÆÒí¿¶šb¦^YÿtF†ï3Ýn/Á0kÿ«v0˜lêuhG=Ì\º F^H…tÿÊRÂÈ~à„‚Wæi$d‰ê:ñ…#óãít¬ õÙêôjô]ÐU{sŸÛCXL«ü³Á˜ ‹á-Œ«6%Ðh±·Èë|Òdètåv!Íš,ËÚÄ8ÐÚ@ûÇžŒœDøÝµTÞ&¿•¶ùj¹l77écd ”ö<¬ QŸÄV\ÉiO™ß³@¹ìíÓû`óÞI똰2“j×7iÈ€/c/H^ɉÃF}ãÌH^)l¸m·åyàåm{6Y{y¼ƒWxÖVêoÕKCá#ìTó?©a’Ç]çú¹ïÈrUGóÓÄ3>Uûð5TŠ(÷JSª!Aøßy-wW®­3é!Û÷Á'늰@¼m 3þvY ÝàÌÈsؘÖd¤&AÒ&ç’“ ïN6u˜ûy$üb—¢œ¢†±ó;U …8Ný| ÙŠï©É¯Âjè¿8‰$é‘z¿Wˆ|£©ªÆŒîäθ+zúj#½|á±zM ÜŸªWÜA¡XÀ]«¬Ldó¯—¤í¡@β†:Š¿³HPIsûusi>%Ç€®î¨ÎËJ¬Ó†5ãŽbžËæ0¸ç\9ó·C0*œ÷üÙR"T©þ½ð·Ìfšž`¿÷ i¶âÍåt\R‹ø0ªˆ<ÝF•Ïz cÇêW»ŽŒOÉí— _ç-ÕX'D’/ß‚Q”’Ÿ3¼d†Ïß÷bÌe|†êCêþN¹¡˜ÔöxŽ ¬…AùÛ„÷Hqÿüôf›,ÙØ²e²Œ!_iï›}S/áç§s3Õ&ÈœÀö~ÙÞé¯û/Œ}¢†­ñý÷#ÀoöòFÙµ H>÷]]â4"åÃ<¶ãâ0X˜~ŠÉ‹ ÃÍË}|âõeNÒ2 øNmrî^ñ9XYó½<çŠ,O=mÿÞX„ªAò˜š $KÙ÷¾ï~º¨SZVÃô%c³_qYȺ³¸UåiÌmròÎ;ÁìV“‡Ï$Ùý¤:,+Võ—q² Ä?$KE¾ßǦ¶šò SðåsÆ=ÃHæ;aéŠäuB<"¯ÁØ× 5ÂÏFX,ø,ÍËþioØ~”ñ¬CÊŸïû«!}AªaÓ ž§7ð.'æß•KJ_a(jÕÏ•ÿ ôÔù*þ•fFò³B‰A³BÐt,Þ#Ð%Y‚CUhTÔ‘ƒJd¡ò\#j]}̼¾\ß0÷KvÁÂË·^Ö`ˆJç¶aV1’è®j~3ƒT-rÂé,´–Ý^Ÿ¸°3º×¯ÑY²A=§­O“2xöëp¨"ï ÙÇ ãn°zÊÖ¹WÐÙ팻”†C‘„˜“ŒË¿°€àÓ_F'w>ÃÌ#{Í«F`yƒÙ¾=Ça­C®‹¼ã ¤m2²|º.ñ«ÕAûPµ(Aÿúìåä™›Ç`™ÿSâáFXWàrÿ¤BdÑÊÌ‹¥§¡ïÉv»ù1ð"u hØèWÔJö†™œ Ï'¹1°11x´[O~§º þ>i‡¼ü…ïVº÷ ‰·Ý¹3òÉHàµ[`êEÒ"F“‡sðW¥0>¥˜/6+Ÿ\¾³ˆT– QóA$Ô—}‹m0@jþªû÷Aw¸è­ Ï»ˆq8C?çØXju0IwCá?dÏ 7F/G¼¬ÿŠÜ¼ÿô¿Ñè__ ,s·¸vTFj5Éê–è˜ÃÃDO2¨ž,–ظBy˜Þs<ä‡ñöEÙ3âH*¼egV²fs¿µÓ¸¤ÃGÌΧ>À”O–\?+Ñ"å§&Ã%J}$-'üö4„áß{ím0N}uõq¨®23yì ŒŽ¾ÛîÈIšF#<”+£©Nd·`¹ý_ݹWúÿ–°Aoé{ê ­vê$~«¢? šì$̃:ÿ¾{”T ±-#ti°þZAæÛÎ+ }}‚„G¬‡%g Âìãyr.t•꺑ÆÜB6³o1;;‘“ëÙö ÔòÚ³Ü4Ž€>+kéË˰ÑjÊéàs™Kë º=†`¸úMÙ©—úP{—û'=ôDîU}Ó~Ö®ñQèF–œ›¬Eš¢°@—TINnc;‚G'Ê †¥—Öñƒ3’³>ÿ ¥ÁÃNìÔa&0bÙ%%‡ ùôn¿ç#ù¹øÔ ¿7Èè4×e|N ÉvÚ^|»ÿÜP›l?.½}ä%¯—Èaú"×¶2’ÈŽçÔ ªB[ âûŸ½0ÿô”ð†KŠôm·GɼeìrˆT:RsYÖjÚÑ#3iO¬À‡V¤<°\p;r9D§ƒ“öÁÌ­æ‚e°>÷™§²S™*?+G¦Te¾N·f¨ê ³ÕzðéÉzèÔöGÁê–I`ЇÍJ·­ÕEH%TÃæ~ÓW™ŠO¾«„u£>•Ϋ¦È ;gI¦êiÔÑŒPWLÚŸc^MO‰qj5mC)©ÖœÉWÌÈ_Ÿ×«‚ _NÜÑêa¤1“Š„P'ýý{7¡×Šë‚d–¬rT|.ޤF¾Ø#—üÅéQÇ´ªyºFŠî>¥³†ºxÉIï}¤¸@¿«eR6»ÄòÅo"¡7LSãÑLèNZ>š(D®B¹2zI)¤”}g'¶÷’EÅ”rÁj«NˆpÙHnì0N µ3›ïA HþÂøíµ(þš®#à ÝÒŒKl¥rÈPzÅ+ê‚?ÒeW„>¸ž€ô•‚#Õ¬µÈ8É+fs¹;=‚X׉øŸý\@çiÜÅóµ^‰ŒT?—ýÅäü‰ño—É ñzíÿùVÂH‹ÒÊ—Zd°àä,ü&Ó²ßìµäíÅåÂ4]RÿHR±;Í<·ey=y ×"<_ÄÀ¼€èç$ûdL,[Œ’Í'a2£Uœ©0Ø©¤³€TEWÒsÖ™‰|^"dzC6žnËöEæû%íˆû^…“I~c&.ºMÊÓ†|.ÝòÏ"£Š±˜4ÅLë ^zæ ãnjQ®LrP[ý‘QÂK I]yLã)*þ£Çïx€yýócBo÷Àš÷?]¨á”b¹2\ãù]ó%ßCa³žÖpgÝShî× YéqGÑÜÞK'+g`¨M*ï컣ÐÌÈîàÖ·3JKïedùq$7xF–50eÎ )Äv~Ó‘*„é@™êŠ>$u˜ ý°†ÕꔩýÕ‘äÈñnA®èêØàX¿½u2ŠÎ MûÙôIQnC¬‘Ta/ uÆþîpV›EEÉ\Z©O0›þ2åÃYXn}òñêß°|¬óáò}Q˜¾ðú ûékö•¦öHù ¤{˜w¾Å úé^RgÁ²¦‹ùþ@$s¶•"kA.Ù}hÈË¢¿SMù: û(G !Ë%ýÆÐw˜í ‡DéÓÆÂ`Õé§ÒŽi¯¹<ÃM‘ ŸX?ÞßgƒTÂà“éE³ZÅÝ«®G ý#óë•7PtòÞó˜†Hžãp7(I vïä]\G¶¡¨æ{ëд¶ÒCưÞãa§`-üÛý¾Y$å|”ô©4y„n$°Pô"Çþ`)RfèSþç·‚¤L§dÈ9ÀÜmW-÷»P5.µÒ¹sI>S|»,¦ ‘µ'VKF‘­t5Pn EĬO¬Ü¯'ÆyߦK‡¡: É1\ jwûçu?N-!WwµÈ(=öüþ×8èže‰ðaG ïÁmB†,¿*.‘{ÕBÍ'}ö‹s' ˜C¾=õ·’<»ñáe‰(Ìœœ²%³„Ú>±;¡OýoÁÅŸÐÿ-¶›Ç#ª¸õÆÍ&¡Ê:›]{ÖâËû`>%³3ôѬ,yP1xs—½^ëf´ƒœ‘»ÀU˜~ýÖ†=ÕÆÂÕ;$Íb‘„çÓ¼Üt,ÒÑu’ÔdA{­MeçOd·¢L¸IÄ7¶¤.Œ¬â¶ÊëuQDÓ)P‰ÍfgFÑ€¦ÞžÅ%n¤V°:vôĂϘ&#ågvú Xe:µ†ÎgŦkêÃÈö}Ü¥Dñ;Ì„žzÂûæ"RÕy±3k+!³ÕËs.y— /“õY î L-gkŸ ¤^Ëñ'w{"Ó%]^IXŒ—ºßöJéwº;#û±#GB `ápfüQž—HÈSˆéÌË!âD7÷GNa>[A±e: /÷ •B…1ïl™² ]ÝàƒþG?òÓ€åµÏ+oÃÆaêWÊkHÚôÂÓ”.œ†Â¡ózÝ«z[¤KI£­ºÞ†LÏÒÿŒŸÊCö†eæSŽ0ýÎPE{Ábî÷tð0Ñ ÉÚü¡»=Äû¡–»:Ÿl‰"¯åÊ}¯ÁÌv¹`a²£xD§Ã²jLÓÞ A Ù£‘³î‹Þ=ìn6äH`½¸xŠ' F½·<»© … Ê«¸3ÈårA%w&2Hì> £Øcšöeÿ5䦙aXÈ„q½Ç¾[ÁbÈXD¹kàsŠ™=iב-CÊo–ïeÔXanÛý•ã!R'YW1ßGÚãúWO왇åE™ßƒ»`m¿ YÞd\XB¥N¤Í×Iy› kGuBvBw÷fß‹$Xz¤=³¿†)©cË4üçö„d;.6ã%IÃä÷¥Ð-c³ÓyÆÂÞÆŸñ„ɇB´ú&ÈûYz§Ë½¸3ÐäÜrš¬ËhçS’‚W­m£G“`X~øô§É&¨ò7¸yãŠñÿü“ö×Ã|¦thбp(¸7w|wa4R’HéÖt$Còµ‰³î‘#¬å\¼ç¡0øŸ ²éŒsÚÄÁDÅV$!ÍÉŽÄäSx%!a}²˜¯™Èg„þùÚÁR¬ÑWµõ?ÈJG˜rúÈd}‘uö,²û·Þ7È™‚¶ÙQ¡‡`.͆_éœ%té§µ§ÙA÷/6ÞÃ,*ÈŒñ¦)+Ȱ<$Üû± …,ßL|=$‹rq¢GˆyoßÝ£,[0Š»ÙkîyËNAiâ~˜Œ¼¿M¬: Vÿxf{xÃj§“ Ëä'$ëÍ!¯k?ýSûŸð{®!NÁëôoíù•0,o¥ÛÈ#ç¡Ç”jOÑÅ 0šµÇ¹™Iæ²Ò½Plñ±PÁë°–©-ÅBB@’<ýŽ _#k<Û’ô ˜1!\à ùƒìÕöüOï#!é–sF0h–ªHÌ­^ц™ãû`µ«¨ª’†ºªïß·Œ•±ýÒ0J·‡åÁGk$_ß[,ùé¶f*"7ÐJÉ<:nþ§Ã6¼º7oß);è¯ø2ýýø(’¾$9éØÞˆÌÒsFT—‘„2‘<Ζ~7 + …A¯ÈåUXK ª3c»­ž@~ýŒ—í§Nh$›¯Yñ´‚š?¿Ó4Ba]šú³ãØ4R}LyUiöFmc{f–I`v%ºÄüé˜õêýˆ„Ë€¾µ¤õú&›!;}á˦ÑCЕ~Bv3ÉIÚßÖ ý€¥å½³Y{aöóC£ò)Y N´X>؉¤ý¼87ȉŒ+õ¤w鸸âÊg´°ïôÄúí æ]VÝî ] Y½{i’¡—òúö2 .¹Sâ£o9¥}‰8w:ÛÌhævJóŽ)Õ Éô⽟dȤ·›×ã» ²Zº½Ó®†ÑS]Žö±’ò5ïæ.vdÔ®.'®úx^ÞK¹Š²ExÛ‰ñ,âÖBù;-$ãì>R„ ù¼n¦ªÈaËðÙwB*†ÍÜzìÎ@†ªä—î°2éÅ>ÄU « ÿ|Ë‘Wß!õ™ê.˜—Zs”v/AÒ´ßœ ¡vLýG4JAÁq*JoS˜ÜGQ •žß/*ªðIsxQ@ÂÙ=/oÇ!Uâéß'ÿ®§¼'vþº-’nj•Û¦äÂjÖd½á”ä"Ý¿w§k…¬Ò—‘ºùΟuØÒöÿ­rBVÝykÙ ‘^t«xÿ²Íqs‹åÌ#áó3SI˜¹f#“öë’×R×õs+#å¼ñï£_üaååƒK0oˆ'˜li ¿?¦Iwú( ¶%u9!ï¤Ü×>ñ0ç·îZ|L –¼4¢œŸîA†®Ç3¶aVdvÛ˜¹zÜîôI$BOìm{÷FFð<'óî'/ ¤|êãf-F!ég_-)‘y3¸£)‹o_’Óu~‡$íç÷Z›F‚Yq~`­9ø‹òi¶4FÙCÏ¥¼Êa¨ÏcjîȦL]xÒ„6Ÿßãzå·…œ›+ïÊ ï@‘RS-lp²M̽"CN~.ßWï!aõuÜjõ8ü!YÈøûgyÕ%ù½EÞð`3ùùHqÙõ²¤ãq˜ð‘£â"@¯àæðfZ4 ˜EG-kßBZ÷†r;Xô‚­² |H»^ç+ëüT•z:y¤É‹wÐèÎÁæylÈ×GZ=éÈ’uwXY¶,¹ñõüzS–>.A@ac¦÷ÒȦÁ#'³‚,ܾ·×ÆÞqûÉ3RFÈ{·"ßì²lŒsÈܺÝ®KÏ2¿yÂRõÅd‘ïa° ºláqF‹È§eŒÍÃ͉¼Ù²÷x7’Õn˯ìA6¥]W£`ãÑçý+š¾H? vö” lÝcJ;7¦ŠÌÍÃô¿DÃBŽ?zódoL>ðÔï/òV2‡&‚a.ÖvkWPóüÊQX³~Lffxy-´(Þ]œÆöwÇVã–8¥#ei(îþOßÙbÒ¼ó‡A®ñAûÜHÐ-ëD[俽•xò¦’?)ÔïÕA¶QÑËç‰ñ/ãï]ªÃHIõk[°VÚ6®6_…U•#™trC(@ó¬íÂ<#rüi%ñÿßûƒc(¬÷ß9ÉèkÓÎÙHS®JféK„‚ÏÆýt÷.óEŠ·ÿÍ?W¸ÎPZ¬À:¿¾å×XHÓe«ôµ†.B¾x 32d7×|¯¸ k5ÓJìÈ1¸-‡h~’&®…ïƒ-¥í² ÒÿómEj~Š{ ª…0tN|Næ«:4Ý>}ìÐ9X¹¶d‘H ›Z™k‚ÓÈѾ‘ÐÓ Æ­”t·®Ð°î1•LErÈ‘gà“ÐA:óGb“åµÈ¤•º˜t{)¾eÇUê€ a)Ui½d¤ßûÄ®ZbæcnÏ=¹·úeF„‰<>òÀ±™$-ä2`0»å'Šì!Û©¾0#Y¾l•E be J–‚:nä€\Ìï>%˹r6nÙ7Фïƒ!‡¯¹–ŽÈôtüÊ3'r¾r+.æÈH›ñ; l'Ì*ž7êºöšӶ޳Dª´ÀK9¹N0H²¯ƒ+2<"¥cª¡îIÕ¹6èŽÌuNª†…Áå$q‹Päì~ÿs¬ô¤¿¸JâÔ>‹<¶EÅþ•P]ÿº?góR>ø¯¿Žô»‘¨0 _Õ¿úr³™õñ usѶð?4O”G'çŽ!—Ú•ìÐÃ"°©’˜:wñÒ6›Æ1 ƒŸÔ‰×Zw`åF$aàçùeï`'-˜Üý#âPrŒM½ùñ%›šÈõfÛ›9h`ñÙá\úÔa(1;þÜûɬûO‰áì|óSßy˜yºîuqõ>ÒThlWô‘„¥Ù6䨨úÑ4S´Á‹Â»XŸC¡Ë÷½)µ»a©Íÿ‡9…²PkÏÒD* ð—¾"Âþ•³?ÐúR+RV|ÛtÉbÞ½r1îCа.¶%ä9¶ïû!䳫sÍ„ž¾ÓãûO”BÿïÁ:¯Ý0mÛ¤¬vY©8«ÜÈÓ‡…¨¹éMòGÐ"þÏ8Z­8Ï;’ÿ@šbûµw‚SÐÞLöb>)Ù/e:|),“ïÎΈ!Ÿ´TšÂ¬/’L¿²Ÿ‘£®ò§³Û¡¯óïoš/ÈxD­“uÔ6)Æœ÷ ò²ª ¨ ù#W¼ µe 2m¯•#}Á?D(­ž=kòè4rDP¯:{"oÙR¤î”"Ò”þÃá°¢´¯w±õ;Ì/²4°‡ŽAËtpAnº³Eê3g÷u3?ËÉ©lwÄ"3Çß~[—ŸÈ’|ëÓ¹“Dþuçì³!&¤Oú\IÕc:ñ|ô­HAw¯`÷FÒô”±RÝp…ùA†¿a‘Pt¥î^Oö¬ï:9æØ¶9ëw™:#—¦1ÿ»ÓY0ô|ÖG¼°ƒÈ¯^Ž&CgÖ­ýÉ’ü0¦%›²¦Û€œî?K/ŽÁì.ÏvÒyX0P*ͶBÁõá×Þ9ÎÁv2iöE“)Ç !uÏÉà 7èz|Ãâ^’|g²íò‡š#Î †X‡­ù÷¢îT¤ÍÝHÚá#H^¼¨¹;` fä±1ÅÃpÒ>\ƒ¯–ä¡|Ì5¤¸»ævžø"‹d—HÎ| ËGÖ"UII¥ƒæ0°OJ²²•ìv˜uûÁ ÿ¡ü ™ ˜ùQþ® –crkj‹éPôµIÖ§õXu¸X*:ýY=‡Îk_ŒGrwÓÊ=vÄü¢§$òNi™õ„c†ãËÈ¿Û$Ü‘~[ñÛAX¾ÿ´ÑaœˆwëGj .Â×þ;ÆýÞ‚°I¶m }¹¯Çrò d#ÕÍ‹%çdé¹üÚΉ§uÐ$sÝ®r õiÙw/È!ÉÅ*꡽°^ØÝsˆo¨Ê–º+aìù&Ç1[¤e\ôñulC²œÓ Zžðõr-\|pÉÞqÉðL¡àñŸ[l·¡ñÀ³…:{Èû½øn(’‡Q›TjÊ"S/k€–â}$“`_½FÍ€Â?ìØá¢8’ºí¬–Œî†Åÿ›Ï¤Ê!Âèšdha5kwò3iÈæÅB¤^ùÓí‡ì-’‡É¥_"áѲ@†­ ’Í{Н¼ª€ºtÚ-ŸưFû|o²¹ Ȫ¡ÀËÂèÈ\Sàòjo ,§SÛÿ6Y$þó§,I˜0Ÿ KÆíÜa»s`Ö¸C7Çkùž|v­ûtÙZùõ\t´`^–¸-F—aöµEÒ+eh?VvfHgÙÄn÷Û|"ƒn#Ëw¯Fú¦üÜ×dHrª”#iH&eæúòˆà‚[ªbžýð¤˸ctGivÎ+VuÅÃêG¢éT’”ý;7FÞ™Éw”İ}ÇÆ/¤¾ãª¢pL©÷åÄ®ñ#ãu<¢coŠÌ»þéÓ ¾XxAî)(Íu8¸­û~õT’R®:2Gµ¨Q\Ô€Ž*¡CЉ|uTDm—’ì5ÈX…-®Ûª7‚¡FZé¶ÈFlÝz,)FGĉßm¢xéùpß?î¢<´4æjëÉ¿÷ýVr´e8Tð³`“^Òˆe™’>lFê¯iÎÆS2°HV•Ï]߇¬\,ä5KHR8ûqËr4•žÖøtHù‹¢uIN#%Ï¿ù%äXûK*l‰$3ØÃE°þéZqª l8}“ åóBáîÇóµqD¾D±ú×'ýR1.hw0ÂðàeëÈœî÷àû  ´Úþì÷3 †ÑXfóº_×`šòoÕP*’.×é>)s\Iæ[ Äõ³Û&1E•˜çåo‘t7‘M¥+ÄÕ*……¤Æf®Àt¬§Ž¸¼ 4¬qgŸm$òI³†[ÉOàï¦`µëRÈ1é=}íS, ŸÏ.Ë:¬çÓzþ ^’˜t%ÆK…ÏÌ&ÜAH£_?!•‡d½­äE™BÈzòKV\@Š_Î=§’Ù‘ðþoNüU>¤nyÊ€º×R³à`ÞåTä û¯o±ç]7ÉG$¹øIßè¡ò_|]?ŸóÉ¿K޶6 Ãss¾>ï+ÈÐB„]мéø}Én–†^Ó–ÂŒŸM@=ŠßógWÑEÑ1^÷ÃȬuè‡×ä¤Èh‹×=Šä·/ š#?Gnrì/&wñ{Ñd„n7dùIóaºE7 šþ Lq¶v«Ûr%ºÈ!w5É;ÅóêU-X ‘»ú •k ÎÑšgúB—å ”w–G’«}“L*ˆñÿBñÏédŸÿ§Sˆ;ß:„}­¦C~ ©øùã²He4Óà 2îßcÜØjìÁ›®Îù}(â8Ï$‹;žp|¹3šKÞwˤ÷çˇÎï{‰8zËþȳHw5FáýË£ÈÚrâT¬Tr 5ÙŸ8š‹¼‘¬i¦P2Áá,EV5xÈ„Ù˾ËBæTE‚,ý0ô­º¨PsÅ#EÕÁ§;¼ý@¾÷áïs É·ó i?™`¡Åäô¾î[ξF +±^<ãØ‹/¼[ÐÕiÏ÷@/YŠj*uË#ÇÜn3­kÜqú`éÈ‘oª'uÌ}„LµV÷ÿ¾E’eIóûMúH±S3n€ô´ìyiEÕ]k ®vOãÃïæåOaf÷&(.©×Á‹HDïêA$‰¦Ü¨Hö¨:Õ“oj$‚Ï+þuG&6÷†ÞErdsÎ/Ýyö-2¯2„ª~-‚Zû_"O#{uäÕï–¯…ƒ:®` ¶ÿÍ Ðûú)jxyIïN{¹êB˜ÉVÙKXyT¾ññ¯"Ç^BÌÙ©":ÏY-!T wú6n®ÂÊßÃQ›90ž\°S„¬ YSóæY¾È!«´ÞüœþSd¾S±M wYÓ0Øœ©:ÄöìCkÝp-ê5˜{‘Ýö”ñÌ%ÚÌÈCFŸ[-~ ÿdÅDÌÃøiG2¿íßPUŒD²9ßmcuHoK|Ù#†ÂŒŽ£'$C‘oxûåBô¼l oBdF =ý³Ë0Vxÿ¨Éaôq=ñâÒ9™{*_)@º YƒÛQ0ñh€Zj™"Ærã øQÌõŸ.î¸5/‘4Š|OW·KP"͸ډWÓŠ5¤œw!éÜH¦ÙŽ#ÈdòŠ–ÇÐ Ü6jéK‡‘Ñ [Ãâ¤1LÝø×ç‹Ì_wÛ‡ij /ý½£÷žø#¯Èžµ}>HÇY<Ö{¤ ºëb¶†`™kíéí£ HùÃcëyO.²… o<]CÖé;~öÈ7¯x:~$i|¾­L—!{vþreÒ Øîÿ^hƒ\u§c³I|2@P«¥ø=Ë¿RJZÔ`nO'ä6¥#C¤®^d9,yd=2 ÚÁªMI™eáïŸ$ØE·y:g 3ï}yG ɭîAÊ/#´ÓóeÈ}¢××ý¶)’DÅ©_QFö—2¦f‘š'ë!YJ'Òy…ôP"’W„Ú^­F®+¯oZ%Ç!çרQŽôiÓhÙô‚˜O'­NÃ,ëúßÔª„|ÌH‘Mc/ëÑR:Ðnüóu$›pû"#u ™OÓEݾ³…í*BÞdz#gjº‡È(3ÌçÝÏ8£RŠÂùÝÌ¡Q£lŽé™Äû“¹¨hC>%–]ßo"Ýñw#SsÊHYíé©fx ¶ÌÏ>v÷ ‰É­­o~i@JMy†r;/ÒœÍW¹Ss²kÕá×l¶Ýf· %r©0öÎÔ"£H]É›Id_ó·9ÿÙ˜òyä2uêKxŽ"¿N‰k?€Îo·6Œ¯7ú†]F òn†\uþ…̬þbéÐs"{ϧÍd_õ?S¶e”ÜÞ—õâ‘J„TJcT¹J}bÆ.ñ ¬Œ±Ñqè”'ݲ>²-I•Öi€‰ܬ3§uúÒ3õ/¹ °êIAŸ¢8Ž$£OØ38n¡•ªA³ˆJþ›g ‡³u…H`ú7?€dÓBÙR{š‰qÿàM‡cÅÐÑ~ñCÁüC(¿4Îl8Û€"…o±ô2²^nq”|«¶’Ç- WºÏäZ©tEØôШ¢0×Ëþ'vÈÂ{cÕ6òRh¼!ý“€;[›š ˈxHŸí³‡L ­¹ùôë"õ£sû³s¦‘-bo-ßq0}j>õ²• I4G šÎ~†ù ]&«%Äëî‹w·€ÖMïÆ>MA$HøÐ‚ (²Ý±¯õ¸cMŠâƒ èšÒ¹kîæ‡$‚r´<ß"…SÍõ4&XRc·*©B²!ó{—ˆë!b}/Rt)Dl=ùˆ,l$>¿î#9£ñWQ%~ÑÑðÔ,º…4¼C÷ì7Å ÄuøÅÄnèÚE üëò\²ð÷a$ò«KM“–[ÈË“:yz :‡iæÿÚ<†1A¯­¹ dxû/ÀÊç»êÍ‹ñÈ—qŸóÖ%&$$_¸µ —”©Ý* ²˜Ÿiü>¤…ܬ'‰@¨&ož;gžƒ“|ãàGö:ÿ“¡Ç=`M&}"Xøåbê;Y Õæ5Bi‚È»Šx/¾A®|K+Q3/¨òn« KBòmõÒ}Hîš}½_ñ#ò¨Wõ§AÁVjÁô;°5ÔM/ŠŒw÷l *¡°`A:½– 0ÁÄ•– H&¼¶-ô óê¬}²þ;Pôºðç²hØ4,½Wç(‡ÌÔŽSg>p"ɮ͙>Ê¿ÐÏ!ñÈú?RsŒ½Þ›¡¿âT©çB‘‰+y[¡fv®òÒϸ €ÂN&7IØ8YË™° æWs…RÆÈ›™¾êŸâ³.‰;>…Ý[â÷5ÿĒªÌÕËï¬T‘TZû‚¨ ôì=|~·®=l%Xþ™X8Š$¥[Ý‹Íå]÷—¬˜cdäy{é¾\Í-Å«GÖ÷ú!éuÏrºÑСSêHgÒp¢\ÆŒæRý´‘t=ð”˜k4ä‡f¯ìÓF–Á»g÷Ü{„43Aªš'F01ÊüÉ~/òjDdìò AÚ°ªJKé°|î˜Äç£rÈVàðóÜÑ2˜áL~tõÖ<²×ó²¬lx"•TVqâ™dø¸}¤ŒB·¢Ùh£pçôÖ~篡•áÐî~qXYösürƒ€d Û´6²‘[°Ùǽ>,±3À²ê¿º!’VLÒg;ìDî`³'¡‘0OšjS*—t%ÒÏÎR%âöóÒ/#ˆíékKÈÉw$ûèl5‘/pES:˜#ÇõQžþd˜Q©kõÌIƒb¿w•‘ û‰ y‘yÈq+-‘úê’ß¹6LÙ¿Sé·xãËQ¼®•acÊÉLI_(çGê2}'t¡³ßâÑð+A˜a2ýÅëøæ›{D~ïDêº{Û/ȶ=F爔òv³®y¶âVêùad{YTzüläi 6>m‡^¾¦¤›ÈòQ1òÅ^ˈöFR£¾‹g4R`` k×=û¸kUå¸ÓíË0ª¡IØ$®û¹Æ)óÇ4ÈG¹âwa!¦özŒ2]lAòÿ|M‘åÉöŒ!’å\É+º‰”‡Gö¶ô!ËÔ\{;£Ò…ù   €&•mÁD$ýk.zû”¬nR§BÎýÒ!“HLUOÓ4 Ù/ìd43ùß}¾óçÿ«»P¨o4WA¡W¬[O"3A0j§;¬…™üžøò¹ƒÞ”Lb.²Ñ–´ þ†šÊ4œ äH}öªï^Ÿ—Ðþ:I?Œ¦ êOn]aTC2‹ÅŒo%‘ûÁÝm!wœ°¥½ßø é<ÏéhH¤À Ê¶œ»~v»½<Ø+‹ñ~ =(4Àwó‡‘çˆß¶°“4Aîa9i™ƒ(Úq¨9Ó$Y“ó'˜€‘Íãʅ±H‘–ziú„²>›¿$êï›zé’ÍžÐÿ8ïÕu¹ï0ëš!ùµÙ–ö‰ßPJƒåÝÛ²¤¶°‹–¦«E6Çàƒ¬tȘã-¼¯vR‘j¦Å>‡•ÍCÚz=‘²D~Ç%F(U½}HèC? ª¹.=ã ½>ûW$"–‘-uÚ¯bN étl~‘­¤"ázôüQRdâ¹áÑ|IÊ×Õ'¸#‘ÊèI;k;òéÜדOÍC ÃûûùˆxðIãýù˜ f»÷™†é͆kóª‘îë´‡¯7Lÿµ×ÉIù†\ŽŸÃð#ÆIñþ¹(äºîÄâˆÈÜŸOh—ö‡-™¦À¨ÃÈp’Ý5Xå ½Ý|®—<-ÏêÖ,aîð¿W$³ña^à-CR‰Ã]®¤q,;ÙýZé" Ÿ  (ÉM‹‡ƒmH–˾òRÍ E‚¶Ã-¶ÕzøsôÄþ­Ôþ®§&-Á‡åo«t§÷ŧ¤uÁÈA8žyÅÚÜLk(‚B`ÈèøÔµ¸`¤¤lÌhÔG£ñp÷ŒdFOTo#5©´D1X̼üp‹iYý]D?TDRo^çF$5®~]5~ÙéÉ||æ`Ô_µií +Ò«¤ugåG!®a-ËH¤ ð$9Ü ëùZóá±Èj¾ûóƒyk¤6|ðËÛ èg LÞŽTn‰=aßö ÙeZ†¨µ X,±¿é¯KŒïùЦµH“²Ö)uuÒWò¦‘êaö¶ð…Ÿ{â2 ©ß_Ÿ¼£Jäq»|ùO\…aY]ù+1§a‰Pýã]€12.:½¸¢½Œ‚“Tü—æ"!tÉtÏá $ß3_$I “»>i ·#Gg¸ú2‰¬Åȉœ|0€´KY翞¼ž>;Ã5£Þ|Û ùFÒ5—”C‘å«´žÛÿÚ;x›ª÷ÿŸë^3×½®Ë5_C2DΞOªµŠJ™+MæPR¦JD2…I‘)ÊPÉB† …22»æ)ó<ý¯röóxvÏíÃõíûûý¬×«ÖÙ{¯µ>ÏšÞëYkŸ{l$3,^^§Q¦]²ÀåÏ·Y‘^¬™´rC•rd®GcÒW¾ÿYYdà´ +Ž –鮜_qÿàD™õÙ®ëʵ{Nœì(YñLìþeƒ’¡Y°ü¦/ßm4V|»-Ü9#æÉ¸ñWN'«Ì4²ÞÂlúA™3>ïˆìíwÈŒ/÷æÊ´z2G¨på!£»‹S£§Ì¾OWñsˉçǯ])Ç—Ý–R²¶8zªÎgz—q Þ=20Kœo5#Ь\v™ïá,e»´üMÆUÙÑüÞèYbò/_tÙðÎûbGö [:þ2Rfl¹µÙ•Cdt;>Ð9uý­´gí´O˸‘?žõJO™õ`«cÕ%ŠÝƒßo–õ­2}ƒ{–ß!v­˜Ôu_‡s2g…W¿ÿ.±½@©?$¬‘q¯u¨ùðÉÉâàð¬kýd¥øvb¥-¬ì&cîiójÜ®Ú2gRH¿³í ™0,”çõù½eàË¿þýQqô»ýå':)Ž^.:©\ùA2¦ù°Ac? ÓeûûýëšÕSŒ‡îŠÛÞüªÃ€^Åù<½ZáëΩñC{[lY(3ŽÑŸ½ðìF¸·ãêLíÅÁ¬NÎÄ2/Èt¯v|(߬J2ÃìO÷üÒ&·LÖ?¼¿ëC9d\ç,­?«RM&žþmôúí—dTãÜöûWɤ:és~?KFøûû q]þØZö‘2ãè[TsRüôk­Â)Ã[Ëh£N½*Í'‹ËÛîí3ªwM1«ð[Ç,-#ã©ZüúY Ó¨¡¿ yWæÉõÖ¶jß~ ½[eìŸÇöˆ%;K ¾¸[f.|ïþAO·–y*Ïœ1fÕf™!þÁe÷ÿ"¶õx³èÓ'æËÀþÓ«ºå:!³MŸïXÁX™nÖO9ª—|FfùªÉcßéoÈ䘯*ÿÙ£¾,ôÅë]ó(ºm¼tîŽõMÅÑ%ѾðYQ3ðÇ#åÎ4–Ñ/ÜW¹V]ñNévõ:YFz5ë¦uudbÓ&?³Ü–9¶öø`ÜSOÈÄu³;ž>ÖX\9Y#Ëwïï“—ÿú=Cq²üÞ]]™!³Uß(k®k&Î×ïYºLÕi2v|ûcËwýžê‡ŽXðöÃA™çÛ˜cë+×yz®î´gÚש×Yò§|÷±L0ãöN\ÙH\ˆ¹ê†—‡µþatÞӲБ…Éu¶o‘Ó›­Ú¯“q1/¼8ï=5ç×?¿¬Ð\Æ7>õg¹‡Ë„Üs¿o´v–LŸoBñoÏ“I]'}ðÄP»ë³»òÇ]’ÑÇ2=ùQâR±³~ß6‰íŸ— gÿèqäµâÀÞ†F§>ùeÂ`ë§/¿,³·ê8îð¯mÄîáå_ú¶ÎBÓðò-!sní“wLâI±oÏÎ1µ§ö•QToÝehª¿µÔN÷[ßú2CB®JO¾Kæýb¶'׬¿OX>Òúø'™kÐC­«~j‰û­½eì™ó»LÅf-#Ž'=ünå²húÄUçžy]Fÿûï·Îg 6HÝAËüUÛÏ:5×™'6¬ükÛ²bÌ€¡çZW’Ñ'Ç/[XñC™°ì®ªý‹/MÕ]Ÿå¾ÖcĉSænª:_웵<÷¹yŸË¼æÕŸCh sô¼3y]èYqrõ/Çë-e†vó'ù?{K·½ëlþâtµW¸Œq1)áøEeüýï3d¼¸É^2d¿Œm·oÑï¹;NݽºË }2±Ô_ß'Ë>üÀ¦ú¡_þPìÏü«e`x¹¸ïº•g²›9|é6qé«Kvø“8[òï¿ï‹z¢É÷÷Ý_]&¯îquá“9Vls"÷N™iPßû›UÜ,vîÌÝ:úd@Ÿ1w[ç.!±dò}±kËI™wG¡¾S£ªˆ#½B-Êß"s7ï~õôåI™ºgžû™Ø¶¿léIïÌ•I«ÿø}ÉÕÏþ·|{ÌZ#RçÝÏoµoS\æ«õ÷9}–…Wn¯LÞ~)ý³º§öóô¨FßT—kÙoXÊZ±õ݄ߊ—ûõ¯>pV±9ý{£§t°Ä…«¿â“aŒØRõñy%¯Œ#õuŸ)[Rf›²àÊðÉAq¦ä‡Õ^í”_Æ™ðKü¢ñ2~íýÁ§ç‹óå¨IsŸ'²}úãåʭŹE½÷¶ýs®,Ræø¬*ÃßÛsv8v%k#™³Oé"fL|úØœB2qr¹WÞNØ,c_Ì4 Ýô2{ÕWj,z혌ӫÉÖ#YëœråþP™µÔ=Ÿ>u^\© ——hž,ãG)2eÃݲPŸ¥Wζ©$ΞÊPfà]²ø®2±£ºµ‘yr×¾sD×{dÑÆ}ÅýÑ¯ÊØß_ÿõÐÙY2ö£A3J£Ãïç_;+Î?Vzäì7•±Ú©O†Ë#“ªô}ódtó“OOª>W|z½55_[ÛæêqlCyGîÜg[dî-.^Õ Ö^%Ž}›ù›Ë_ÉB¿¥ŸÓ=¥¬Ì]çÁŒ)US÷Ë×åj[æ1™>ùÁ^÷,í. ×ïð^Ǥ/Ä©; gúcA™í·~Á¯«´—ISl˜æ<#v'üdp…übÏ™‚û«dÈ,.$<°ö¦óâÀÇ:¹ººŒoy¬ð€—Þ–¹ôUW7\b{Ý•} güFÚ“êf ØžÊÅ Õ²T›ë¼8V—‰­//Ê·¹¸ÌÐ4ÇÕ(Ml߸¥AÞ‘MdÉû¾p¸Ë(±«bºn³w—‡·¬~rìŽÍ²ÀòßWLX6BœÞ÷m׆-ï–ÙV¶þªÑ/oÈÄQ½†ºÔ–±5“'.-ÿ‹Ì3hĵFÌ”±Ö=Ïx¡·ÌÐ÷çá1 ›ˆ«:ö²†,Ç·|²ôÀ•9"å™ÒÃvÖ©žê§þà´ž©~r¦gE?qXÞ‘o÷¬ž›Êˆ£Y^k<ªEe±uî+ ,œ'³nês¤ïASf¯Ô \¹Åò‡ÚXûשbã¯ïïyòkqñÉ¢ë«%.°Ú<5ð¨ŒÑž-š/ý2¿ÖxZꌒÅÅš»>7DƽÕw`òɼ_gü‰=$uqêÔéFʤ¯³dØ%»85ôêÏ#¼)ÓOÐíÂç£eBÿ åJm=#›?u¯¸Ø{†uxõz™)¹ÂMõ 2úîìY»Þ÷ŽÌÚ­øÑ7Ÿ¸"ã[Lùów³†˜ü}»¹óóN—Yf~ôìÑö…ÅÅ2ÿ>ꉡõ[7/4NÌ(·³ûø.2îó3ßQQæl÷ìÖ /f§“^ݳ¬T‚LÜrî©«‹ywoK\12^ÿãçZõ•é”9}yEg±·b“ó-ž(.½w¤a±,‹dTŸb{ç$s%´[÷ÐñZb[ί†wÞ4N¯—üÍ©WƒbWÝ uæuz_ü4Ïùô|³dñãÆÁ!yZÉ — ZÕï’Œß\ñ‹Wïʤ/3íú*Ç.Ûýå™v:#¶Ì)ÛÐÙxoj»|º¶ÆÅªb]ÇBÕFŒ¬$Î>Ún÷ó-~Në½ñ¹MeÞ÷~,0*¹ƒX6-sJ׫ĹèÏ4X* 6Ÿ›.ÓŸ³DÊóíê&5锺¿Ÿúz³£dúÙý®•ŒË]¤×žGÄåþÅ;fî3D擽·¼?µß¾iµ<áä÷qì÷o•l-Þ[6¼Û®2ÑÙØýø‰qbãžVÍ…¾XfÿýOù³..ì~£FñióeúêmçþÜéqåôšöТ÷d¾& l}6IÆÿ0F{cÅJ™ë⌦sÞIœ,õcõ¥¿ü)‹^ª|ç÷—Åï ×þ6z—ŒZ»èÍNmGŠ5O.YÙ*O™ycû¥9ì¯Äºù•fÚ#£36­ßù« b_Î[»OÞ$s}Ú?w¯ ]eþ/§f¨Yêèúd\:¨\úLñþwÉÀûNF-¼(?òÍ ÛeTµ•_Œhð´ÌÈÝ(im6±÷¥¬å·ë/Ö/•ƒ>{î˜Ì=$ù‰ Ï“ñJå];ûq¶Ö½–7\!£â—M*2´¾¸0bÊôjô”Içæ™so%N4›Zs\Å®â|á?V+\YæËqϯ˜"ÓÝß'ùéœCe®ÜQ/ÅýZK v»û7±wÊ©vÓ_Ü'Îïùë}ºøñ¥ÂÓן­.s=5þéõ³Š£—æÿQ.¦°8´´þÖÇï\%Î\¾{Ò¢©½d‘¤Cƒæ}HfœŸÐ2÷Šêbe•+ýš??H&œ{;iךÚ2¾÷ìÇ×,n!¶,×26.Ú\¯U·Õ¨á‰}Ãó¶´¢Oˆ“õìèÏ;*–-¹óªc).Õ{¥Wµ:™$öܾ©êàgÅùµY^¿òK!ÿÎßßgÛa\ÒbÌ.±«T³lµF6Çò†?Ø^ìN‰ïTúì¥T?¢õæ¬å‚2fPT‰ÖßÄË@…<•óY(º;ûÎÔbåd¹)WëÓ¹š8x¼†œùôS2cÅWÊN«tLF;ç4ù<¿L?7~kµßŸ…¾U}ó%d ݉×jEO•¹KO¿”íîâXñ‚M¬íȸƒ­wè Ûç­l?mHÝ?ΫݣEüa±¿ß¬¦W8!V½÷À¡Þ]Å¡ô{³Ü3ï5±¿ÿ£µ62dÆèÏÛê»É„¡wN÷Ľ2ﬕÛWÉ ý°[•‘±9²Nì}p¬,Ú´q~­O+YäáÞù£Ñw2Ó'w^ýýÔy»¿eó˜/eàÀg_~Tk›L,v~ôˆ¯ÿ;~=½¤J_)z5ÌùìGm»‹mÓWL¨úsqàU‘mÓݳĸ¯fgœ²j…L|À¸0^f˜“!Ç®7ÈrÏgéÒ÷§elþÊ_µx«½87¦qÙ±é±pȲ#æ—yíÎM^øz¿Ì±fw³å¥ú£jn-ôäf1~MåuŸ×l!3ÛzB\.(³œéî7Z¤®‡CÖäÍS²‚L¨Qù¨¾±¨ÌóðÂéeê$‰=±_¼wßäx™¼gtùãqµdRBÁ·íkgžŸ]åÜèTÿ¤nÞ}™ÈlL:yå¸ÌQuäŠ?ºOæØ,³ŒêÜá÷bSŹv¿U|~UqæÔŒ÷VŽ;/Æ/É\ymLu™;qí‘I—RýËÓùú/¿¶Ìü辦=^'ŽT(ýGëËt?]jôr¿ÏebÍ'ꕱ³ØÕûÃ…wV§wþ~glƒ2k®Fú›&3u]¿édÏK²Xþ±w´©'‹gÉÒæLåQ2ëOKè_Gl¬u Û¹~#e Soø>‡ŒoÓ2.oÍDÊô‡äªõÕdüªÝ— ¿yT\ì×±ÑãßhâÊœ7¯þ0Œûù«u†Þ'v5Öªå÷ËL©Þz±]YÅ÷oßS.K“¡ââþŸú/•ºï*ùüÙ÷óÿ ó{3xê¡2¡MŸÓîÝ, u6°Ø³kdrÙõyëîÏ/cfnÈ·uÜ`qöÐw»^ŸyI\ ô ä*rZÆÏþhÔÎ\¶Lúnï²èZ¯‰ãGþú1Y´ÅVKÝçÌž÷–5¥’˜²"ßÞo+!sv+~IÝd™«èÇ)w£d iæW?YòÌø[ï–ÕîúNìŽz¬÷¡;{È¢”ÌôÌ/²H‘œ5sæï#Rç¿ú‡yâÀ í±º?u¿æYgv}@WžÿþÓwGî’‰Û?ª½~NŒî·²F—ÙÄÑç7ì¬ÿnq ëÏ]V[fiqpa¦ ‡eæ;ŸZŸõ Ì|ô‹æ“Û¾#c;Åÿñp½52¡i·-¯L‘²ÄéémÞhû‚¸x¶äÐR³ȲÉþAJ™§ó3}ò×$ãŠÔÍúEõIâ|þ’“о–_–~»ÒÕ/`ˆ#m&Û?H/cžx58´ÛÛ©í³wTãæ–Ì{ôž?sž’º<['nM?([êýV drbJéëDÊ0³]½Ÿw‹c[?_}YX2óîú½È+ã«ç}u^ö·d W‹ ÉÃÊÉü×_ý ™+ÇŠ¢ûÖ‘Lß\¼³lK±¶Çù77îýD\¸´³ø¥=UÄΖ—ôã¿Í”…ú6é¤=ý‹Ì7Rçé/-ª½.»uZ.Ž6ì²x_…92¶Ôßÿ®lô\x ös2þD¦?hÜL¬ü}~óE­ˆsFYQ¾¶LÚ–mç}5†ˆÓåGÔ^Ø©LŸ)ýÕ?ç~»³çùTÝ5°Ù¯j2Ûôª—¶}ô„Ì=løŽõ§—Êl_8;o©-óY)æaŸÈÄœe¿m2u†L®X'¶zµ²ð©Å“§å­)³½¾wÈÄ—ú‹]kžKq>]!v>ñLJ_ßS_F÷;ùèþ'eÆ3SÆÜ5d ,f-mÚîím2þá¾ñdžd“Ù*þ¸cþå<âtÃÙo×8òƒL|8jk‰Ë,ƑѶH–—æ~Ü­ñí¼}b]‰œâ©âµÛÚ,öÚ9ó‹tÑ2þP‰ËÖ2aËÕ?Kh+N¶¨;4uE•ñkÞÞþÞ‘©2Þ®>ÚñÅî½³Mì_X;ÐcÔ+O“Q²ç·}«”3W^ùáé_ÇÊü_gi9pé*qtøÓ¿úæ±ñô/O”?’"ãf¦¿R°ë1™ôaâ¡UÕd|±Ó¾éÕX&\š¹ñ™¨Þ²p©GÛ·\RœzhùÇË”‘wd\Þö¥ÍdTÝ% §.Ë ½ ÖéøÌ'bùk%F¿³ª«˜?ùBÏÁ“eÆ‚cç~/ÕÏû6Cw?^#ær¦—ýð@™7o£²Kçí–jn]ÿác‡eº-ûö7Jß[,βuв O‰í1û)1)Iß1u×ð7'È„è¿~KæêôÕ»Æ}.ó4Ñ'Ôè0_Æ–ŸèôËQYœšô×ïÃÊ„Ñ'~~>߃2G¦çû•«QUœúã”%‰½Äîs©«Z»âb»ES&òQj»åYôYJ1™x¥S¦æõ³§¶SáþûÄQÏ›­·5-+ãGØk¾ÚUXæb¥o÷Þ"‘R}×ù–‰£S÷3¯ {xY%±¯ôø`|¥32×¼Ë'œxNf‹Ù9êB³ƒ2>ªW³˽!ïzîïÏ+õ×ú&‹h—Ï̯¸Ef˜—½Lƒo†Èœ[ï?þnîŽ2×j3akt]q¹D­~ž$.l8>áë½µeÂÜêæbg¿8š}QR½ÝCDÇÒ…ÏΣ‰•uþH/žÎ$ çÙS7ËòdŽO÷J”ÅßZiÚySyÖþÒ8ë`uqôý饯~jË<]*ƾÖû=1çÉ)ÃGWœ%3Oû9aÈÞbɆõò4k(ÓoÈu¥e™»ØÓùî\ÒFœðõä15Ÿ)ïÖé?¾Ë|qªR✉%òÊœ/,‰N˜ôƒÌ¼¸Ð¾¢©ûÇå{ŽX?JfŽëÓî@³Ôu5c«“¼(Ö>Z{X…ÐlqtÆ®•2½¦M»X9x@üY)É)_°”Ì»ãF©­Ì¦_}½ÖRغ¯ç}í»…ÿ®-p-ˆ£×Çô~¤AUžª\•´œHíæžsåÞ¨^¤åÝlÌéªìò›+GU¾Êޛͯ²Ão¹ªt7k_¤óéVÏK¿å§U}ü–çwüúÍ£å¨Òû½³¼Ñyy£\üOqJÒzÞ¥U¾HÓqé#µSU^Zµ—_´âÓ­â°ßt·ºßn•}î7ýÍ–ë·œÿ4OþSéUùoô9 iÅS¿éÓº¾ª|ªzGjÇÍΛ´*?­y ºŸV¼¹Ñö¹Q;nõ¸ô[•=‘¶K¤÷Óz>ùå‚*D:~è}.ÝÍrЯ½7Úi=noÕøM«qx£å§5‡Óêy¤v¦5¯n4Ü(¯"½ŸVãV"í'•N¤ëa¤ùÓjÞøµ+­òÝì:z³ýi¸Ùù«²ãf¹•ÖëÕÖ¦‹TçV¯?*;ý–Ë¥÷[®ßrü†[5ÎTöÝìxò›þfûéfu¸k¿÷ot©Òß(Ój<Þèz{£óåVó0Rûo´Ü[=oÕ:p«Ú?­øz«ëÁéùµËïý´âÉ­âpZq:­ìýOÙ©n¤vùMï×Õ}U9\¸YΧÕ8Œ´\.½ßrnU»¦õ8ŒÔ¾m/îyZÓÿ4gþm>úÕ£énÔÎHóÑÖåßl?qz‘Ú¡*ǯ¥“Öú*´ÖK«ñt«æÕ­Ò¹Ùú¤Uÿr×*ûTéüæOëñ¨ÒM«~P…HÛýfçE¤œð«ï·¼[ͳµÿVsÂo>¿åÒt~¯UéÒJçfç)n57Tבò-R{U!RùͯһYîø-Ÿ{~³öDÊãÕãÊ»Õüäìôþ­ž·i•ïFÛ5R½Hç»ßkNÏoù~õÿ­õ1­ÆiZ•w³íà·Üåî¿ÕÞ7kG¤ù¸ôªr8;U÷Ójœ£pÝßµÉc±ÿʉí)Ô_û.'Ó?§§×a{=ºä¾_ÝpPõÕáìàò±q¦.Ïo~ξp¹J}Eyô¾Ê.ÏsjG&æþ ÆbSOS>šVÎÕ*4åÞËïǹ×d\Qû<ö†ËUŒsÕaÆ;Wn¤vqúîý8rMu.¿rìùZU'ºãóÌ€>ö|ß³“ÓwËgúÓ§åøÕ¡íÃåãbw~ï씵àÁÇ[zÊÉT`¡:u†¹÷ã˜(çÿŸŸÍç/¿Œþ9ó[]Þ-Ì•ã)7Âçªô~ué5תt‘®ÿªça7fÚÙcoy>vWÒ3“Ow’Î/ÿâèuÊÆ›åUæct8;U1µ;Øö¢úŠõÞ÷èú}î“«áXÅ ÷>W®O¿Å½¦ó7ðÏÓ—éë¼”oÓãŸR;iÌ•#.=2iꇫòqA;^¬Û€EIÊuˆ›/Û7ï*Ò©d·¼-ý×E•õçóÜ/ïÃ1Ç}ΞHÛ›}N¹öNtÌXogâ.¿<¥A•žêÑrX{}ö§ïû ÷];¢Î.ü¼þ¶ œ}ªkN7Òõ˜+WyŸÎ»s_Niy¶ý¿å¸1Üúá¸äì¤öÐöâøÁ¥§Ï¹û4¸éi¿Ñqs©Ù³E*><––#¶µY]ø‘¤×%O¾Ó‹‹ÑíS¦½ôgW>W/öü>Â÷…*;ÄÁÍ;8÷kÏH×Q.'vv|{M¡Ó~¿/«*³GuŸ}îs>)×iÅy˜'ª2îû¹GÏV…èsOùÜ5¹ïû{ÈŠ}°›ÎcoËÕç³w˜îÛN¿þ¡"öœÇÑúú|/AƒrŸî÷¼E±NQ=Õ÷ô¹ò9}¿óÊïy# ‘~/‡OzŸíê÷=§ãwbãHǯj¾GøþBéßxæýA½aÙüóUåråÑà×ÿuŸ+Æ;ÍÏ~Jå'3ÜUÚCtØòI̧›Ô¯i÷Öü÷T8?Ö'×#>—öË=æ¹ßûÊ}‚âý=ËkŸ÷=Ï9»ý¾Œð½e¤<0éèý}¿ã÷{.\¹7|sƒÏ¹tá ªÏͶàÚßµùý;7?µ'‰èåbò]»ïñÃ÷½öý³ÞµXl<š±ñëÕ‡¹ü —“ôÏåÓüû¸÷+Ì÷8XN‡íI"×Ôî{¹®/—æçï˜Ñ°ïìœ4=÷=÷:]µj)³ŠÖgŸçNW<:WÏ‘ž“º×áú†ËãúCq®ËÏËŒMI9<òÖ¯&ííY_¹÷ŽÜúĬˮµ›öÓ±^•O6ù$‹;?ÎW:óæŒšß}~2¥Úý5Ú6sëAtið´+oœØT®ÿ ´¯½[.3¾Øüt2û6d†âº|…§}“HÌ´¯[Ñ÷Œ_ÒÞžç ¯¨Ýsww/ÌI¿ÝMWàäWŸ­zæm·¼ôcšÏzæÞØúpþ£ÇS÷¹ÂßfÓÑúrãŒò™±ÃÃ:îNÒvbËåÖYE:ºÞº÷þÑkå:ά4ˆË[Þ7§WžÌ­ƒÜ¸uÓ1íIƒkµ‹Œ7=åW_Út\ÐùJÚ}/ÎñÊG5ÊÉîeªÑq噿±† ûbðï½ðsŽ*;hÌðHì¯xoáAÇ[»írêÍ”èAßï¦Ï)w‰)ÇË{ú‡ö+±—†¹ÇóΩºþÑ’26îð‚ÙO §÷Œf<ʹæúSµ¯Pñš/~ÏÍ<õáü=jm_ÎNŽË Øó^ªOìçν<ó&üœ™áÀî)ßɺáɧ§믤×tÝVð˜ÕçüÆï¡í¨ZG\;¨=î89ÒUn),wé<#œâöS칉¢¾4°ó6çGϵŸ;G"1Ç6é?åzóh½B¿þÒе3\NÖoì¹²"µÛMÇì/Ýç4æÞP]f¿Ážw^»ÿSLJé1GÍï>CõވݟQû™ö¦:Ô^Õ¾Šåg'·O¥ãˆ±—ã;»¿÷ë„Ósû+fÝäÆ{N˥ϼRù‡®Ý\y~×wnþf™óõÓ¯í±ƒy¯HëçÚÇÙÅðÝÍ ×´}¸þfüMZ®'ø~†W–6é‹êÜ~TéwQ{.qï—e¶Á½ \ý†Ç.Æ?õ<¿vÍúŸœ=´Ÿ™z¸éý¶+£œnz…ýìø¦éh{3~…òœžyoľ'¡þ3nÝôëï+ÏÃ1sN¦Úç*ÏQçŸ4(÷OÜ<æú©·G—ó_h:NŸÛ©êËÝçúMåq\˾<}tÛ+=<ëN8ÿñzÁ¿·­ù_Èx1f©Ìô[›­'wRé³ãPq.ã9/ºØjãør Øy§xïÉžW3ç¢ìùlÂæÉb\ß·Ür˜q.ŽŽöb}µ‡ë'f]óÔCõÞ‹Ûw0þ{>ÇùÕ´Ì|f×ÎoãÞƒPÞ…õ‰n€Üg÷3´<î<[µžù}Å{„Ÿsþ £G/*?ÊS¾Ïó&v= ÇÙ”cý0Õ9+·O¡íøçÀžÃ,›ë×<ž«‘r•ç"”Çt|§{n–ì÷þv¿—kâW[ÅÜç{¹¥Ú…8™ mð‡¿ÿšåå9÷ÆÕèɉsò½µ§WVÙÉמøµûÒMuÆzÎ5Žå[;ø³æOÒ|Ü>Ç“Žá¯û<¡î€”_ôºâ|¶\ãzÚ’Ú¥:7b·Y¾rvqþ‰Ùó f½¥é¨2@îsûIšŽŒs÷~æÅ¥2ôîqIåï²çÓ?³nÙ1sÈÐ"l»…ó]ì]ᑵöy¸ŽcÚ-¯ÿàC§ÝrñꜟÜwÓQ{¹u”9‡sËQí·9?˜Øç–ç×/ãüÝô‹†æ]úÔvv¥Z§¸rI~ö½)m‡ô‡º;_£…ß÷€®žbÆ…°Ý®góö)R~uyn^‰õ曟}ÿ­¹;,صuÙªý!ëO3±Ç>îHá_¹ºvôzè;a‡¯çîù*YÜ3î5v}dΫ9޲ߣ õrŸ+Æ“§ákÕ{aR.WÎOsŸsΔ촬OˆõS¹uŒÛ¯pç&Üùb?Ì–ËÔ‡-Ÿãˆ«?jùÖÂ1ã$xíLhóyÿ¬£Øu–;¦×̾–æcùÜÿ³ó–±—õ¸÷MªóësÍ—2~‘Çnî}Bø:çû¹Ò%v„Y'Xž1ã[œ_šaøÛS^”4=­‡êœ‰ÔÇS?Rž‡Ü{ŸïµT~˜’9j¶š»ªÂ"nœxÊåö…Šss¤ÿ×ßµyÊUÅ\=¯Åt<ßûí•+'óôMÛÞ_‡c¶œÀ?§gýhb/ÍGõ<å2ùTé9®péhy~í¡åsúìþŽÄžrÉúCíâê§<·cúÝo}<öÒqEòqv©Ú™«7—Ï3~LÌ­ŒW®rÜÑû>÷ý~Ç‹ÇR/v<Upåøæ±Óoûº±bxìPpY©O×)ŸÜUµW¤íKíçÆÇ'/˜z(Ï¡H:®Þ4=WOvÜþÿq¤ˆ”ÛʘYG8;UûNOzF‡¶›ŸîW×—£âjr~WÿHÇ1ËA†×ܼRí[Yn+üOz®^ªõ•ɧš_\ÿøå §¯*‡®ï,¯H:–'\û0Üfצ\=•~c—r}\­â±ªÜxðËuÕ¸õ머Nï³ïÁ™~ôôÉÏrDñÜ·Á­ó*Î2õó\3ó†=_bêå™*?@òÑkE?©ü ¿Rg.ö;”ëW¯ð}Ò~ýv='ýÍ­CÜßþ¬‚cl¿¨Î×Xy®À¬+ª}„ßõ7ÒþV'Õ>…+_5T±êý;^8¿ŸÔ³W¹^2ùTíâ—»JPqÎí—~¹ªª}×?žöbüyû岪Ô~•ݪyèwÝöë7²út|Gú^†‹ /©ëŸ˜|œŸÅð˜-Wåç+8Ͷ¿bªÆ«ß}g·jŸæ±‹¤Sí•åùä7Û^>9©q}=v}ðiŸê¼Ï¯ÿr³í)§=õàêÃè©êÏž§+üÕþÌ﹟ßöòërù=zL=8N(÷ ?Uýàw]V¥ÿK󩏸çç~ÛÙ}ž)÷>óû=¿cuã^YN bÅþÑ÷ù;“_忨ÚÏ·BìUí×Uç œ?­ò“Øq¡ÿÇž1<Žtÿÿ]ówæ4^x~‡í,@ì-N®I=Øß÷¢å%å•$ùâ˜|ÔÞp9áüáòèß1%×%H>š.|?¬OëN—Lì ·WØZ$’Žû½êg…Ÿ_³+Ø÷uaýdrMÇ_¸¹Hºp¶3™Äáö ç§ëmWÚÞÜߟÒri¿Q;˜ßÙpï— ×árè8¦÷©=a½kõ O¿2÷ïÆd¼»éè|ësã„η8’ž›g\9Ìïqï=å+~ç˵«$‰iºp%»Âå‡õ$=}NÆM8¸õâþ¾œÚ¸>VžÓ$_ŸÞ“pƒýÊß×?÷Ôƒ¶#7¾i;=ã…ÌßppÛðQåÇ{Ö;îwJJ\ŸÏÃf]pí/@ìåÖ5Žt” ùh:zŸû½"2Ni;(ýxÚ^t¡:´}hûq¿[GyE×ÚOÜ8L&1MÏÌ3¶žtßDÛ=|M9@Ë×öå5å?á|ÔOPý)õèx¡íDýÒ/^“uÙsŸÖ‹ú-Ü8§vÐõû½º„ï‡õiùÔ)JÊ¥ã¶/éä>ç7Ñò©?DyLÇ_ØÞpùÔO ?çÖ¯p:ÂC¶ÿ×å¸d8ï™ïÉÿ\W‡ÎcZ÷{b?÷;F´ýé8áÆ­ÝoÐù 1ÇpLç7÷û,ô9·? ×'™\Óv¤þµƒrŸó‡)×ÏzNó“ñ®.ó;™ÔŸfÏû¹ý)ß½.ÎØÍýîÝ_ÐzÑqMû•êÓuÎcªGËM&ºÔNªGyªZÿ™ßÏ åïhsûjº®ÐyCýÞkõc߯0ë¤ç|‡î§éþQõ;x´ýh»ÓñJì Ößç£û_ZÚŽÜúMÛ‰®7´þ”Ìùûû„ô<€Ö›ŸŒŸÎ?yÎQ¸ý$mWnÇq†r–®+T‡ã¬ÂóøWô~8=õ7T¿ɳPÿóOI9{Â1åïtÜÑ~â~Oοs é9?&™)—á»g|…uTç4]ø:™\“þðÌ#j7íÕuhÿÐù`ž“ó¤pðð˜ò„¶k2IOìsË ÇÜùíÊ_•ŸFç=¯£ã'™¤£û¢p¹Ô çóë/S?ŸúmÔO¡~Õ£çÌ)êç?t„ëÅøû¬¿ÊìçÃå+Ç7j—êˆÛßPÿ-ÀÄÜ~ж£âßmPž#«Î¥¹ó&Úßáôd=õèÓý#­µ›Ù¯±ó’ÎGnÿ{M—öý%åÇœÿÅí ¹u#™äçxNû‹®ït>pëí/ÆŸ¤~E8xæo€ØË•ËûÐqÀí“H{èsŽ×dü³ï¥Âå…í§ë-í?nœ+Þ¯zÆI2©êwèUïui;HÌùy%‰n2£G÷Iô}O2“žö÷½j'Õáöt\Ðúû=ß\í)ãõ÷É>Ò³¿¥¼§þ(ålø>í_âw¸ù¹s,ÚÿÌ÷"<å0~š§¾Ü9N¸ÜûA®ÃåQnQ‹ÔÓc¿êßé_— õâüNÆŸ÷è’vvŸS¿‘{Çíó¸óXn|p~!w.ÎÍëë«¡^¯9¿”r^µ^Î+ýd:¾GÙó,º®rï[¨ßÁðÖã÷Ñ÷Žô|ã%©g8xêGíJ&õàü^j/×t<„cf_éñcè|åüDÕûÿð5åqø>ý^= ýz½œwý §+AîÓu—rót†'žù¶ÎwŽôœŠ´w8Ðöópãsî¥ÜŸ’ùäÚÁ'rë÷´éùÇ4å€ê܇òŒÎ[Z/âŸÓv–“ŒNÏ÷ÂÝ×’þpu¹yì+ž|Ô~jçß'“û$æöW®>Ý÷Ðy˼·÷ð™óé¸!û1ö¼’”Çž§PþÑyKÇ5·ÞϹól:$=wÞ›ÌèÒýD8æú‰Ž/ί'ýÏÖ‹òžÛ/Q»Ã×t_OÎwÙ÷fªsWb[izzŸúÅ̺Ϟ›p~-·¾2ßûbϹs3îür€óSh~î};m'j?]/¸ïS…í¡ã†Øìy"½ÏùWt}侇DôØïÉ1~™'?³_tëEù vqû>Ú¾ÔNºÞÈ}ªOŸ“yÈÅžþUqžØžqÆ}•ûþ!½¦ýMç;õ/¹ùÃù+t§ù$·œ¿Nç?MOü-î{³žyOû%@îÓvçÞÿ»×þ®¾ï½n‡Ûáv¸nmP­Ïÿéøvøßþíñu{œÞÿf¸=.o‡Ûáßç÷íùsk¿Ý·ãÿü¿5üÛíú­½o‡ÿÎp{üþw‡›G·9x;Üÿþ|øŸß·Ãíp;Üÿ³Â¿½nÜ^—n‡ÿKáßž·çÉíð1Üžÿáv{þÏ ÿÛçÉÿÖzÑp›‡ÿ-þÒ¿í/ÞªvúŸÒþáðßbïÿÔqÿmãðvøï·ÇGÚ†ÿëíóŸ«oLºÔÿE¥þ—9õ¿ô­¼Ü¤MꇸÔÿ²^»Óþ•¶M®}ÎØ¸y›¶•,ëÚe¦«—O´oü~\·n%|Y±Røiúך¼Ô¼Yø¢ÑK Ú´¹v‘¡y«F¯¼‰iÒ¸]£kŸ£4 ßNß Z‹3¿öJ‡rÈê ¯=ˆ †?házøƒþ`†?„+e‡?8á¡kÒË»Ÿ‚î'Íý¤»Ÿ ÷“é~²ÜO¶ûÉq?¹š«¡¹š«¡¹š«¡¹š«¡¹š«¡¹º«¡»º«¡»º«¡»º«¡»º«¡»†«a¸†«a¸†«a¸†«a¸†«a¸¦«aº¦«aº¦«aº¦«aº¦«aº–«a¹–«a¹–«a¹î,Ig¹–«a¹¶«a»¶«a»¶«a»¶«a»¶«a»Ž«á¸Ž«á¸Ž«á¸Ž«á¸Ž«á¸!W#äj„\«r5B®FÈÕ¹!W#Öˆ–/ƒðQƒ:|4࣠-øhÃG>‚ZÔ‚ µ ¨A-jAP ‚ZÔ‚ ¦šj¨i ¦šj¨i ¦šj:¨é ¦ƒšj:¨é ¦ƒšj:¨é f€šj¨ f€šj¨ f€šj&¨™ f‚š j&¨™ f‚š j&¨™ fšj¨Y fšj¨Y fšj6¨Ù fƒš j6¨Ù fƒš j6¨Ù æ€šj¨9 æ€šj¨9 æ€šj!P ZÔB µ¨…@-j!P–hÀ X¢K4`‰,Ñ€%°D–hÀ X¢K4`‰,Ñ€%°D–hÀ X¢K4`‰,Ñ€%°D–hÀ X¢K4`‰,Ñ€%°D–hÀ X¢K4`‰,Ñ€%°D–hÀ X¢K4`‰,Ñ€%°D–hÀ X¢K4`‰,Ñ€%°D–hÀ X¢K4`‰,Ñ€%°D–hÀ X¢K4`‰,Ñ€%°D–hÀ X¢K4`‰,Ñ€%°D–hÀ X¢K4`‰,Ñ€%°D–hÀ X¢K4`‰,Ñ€%°D–hÀ X¢K4`‰,Ñ%:°D–èÀX¢Kt`‰,Ñ%:°D–èÀX¢Kt`‰,Ñ%:°D–èÀX¢Kt`‰,Ñ%:°D–èÀX¢Kt`‰,Ñ%:°D–èÀX¢Kt`‰,Ñ%:°D–èÀX¢Kt`‰,Ñ%:°D–èÀX¢Kt`‰,Ñ%:°D–èÀX¢Kt`‰,Ñ%:°D–èÀX¢Kt`‰,Ñ%:°D–èÀX¢Kt`‰,Ñ%:°D–èÀX¢Kt`‰,Ñ%:°D–èÀX¢Kt`‰,Ñ%:°D–ÀXbK `‰,1€%°Ä–ÀXbK `‰,1€%°Ä–ÀXbK `‰,1€%°Ä–ÀXbK `‰,1€%°Ä–ÀXbK `‰,1€%°Ä–ÀXbK `‰,1€%°Ä–ÀXbK `‰,1€%°Ä–ÀXbK `‰,1€%°Ä–ÀXbK `‰,1€%°Ä–ÀXbK `‰,1€%°Ä–ÀXbK `‰,1€%°Ä–ÀXbK `‰,1€%°Ä–ÀXbK `‰ ,1%&°Ä–˜ÀXbKL`‰ ,1%&°Ä–˜ÀXbKL`‰ ,1%&°Ä–˜ÀXbKL`‰ ,1%&°Ä–˜ÀXbKL`‰ ,1%&°Ä–˜ÀXbKL`‰ ,1%&°Ä–˜ÀXbKL`‰ ,1%&°Ä–˜ÀXbKL`‰ ,1%&°Ä–˜ÀXbKL`‰ ,1%&°Ä–˜ÀXbKL`‰ ,1%&°Ä–˜ÀXbKL`‰ ,1%&°Ä–˜ÀXbKL`‰ ,1%&°Ä–˜ÀXbKL`‰ ,1%&°Ä–XÀ XbK,`‰,±€%°Ä–XÀ XbK,`‰,±€%°Ä–XÀ XbK,`‰,±€%°Ä–XÀ XbK,`‰,±€%°Ä–XÀ XbK,`‰,±€%°Ä–XÀ XbK,`‰,±€%°Ä–XÀ XbK,`‰,±€%°Ä–XÀ XbK,`‰,±€%°Ä–XÀ XbK,`‰,±€%°Ä–XÀ XbK,`‰,±€%°Ä–XÀ XbK,`‰,±€%°Ä–XÀ XbK,`‰,±€%°Ä–XÀ XbK,`‰ ,±%6°Ä–ØÀXbKl`‰ ,±%6°Ä–ØÀXbKl`‰ ,±%6°Ä–ØÀXbKl`‰ ,±%6°Ä–ØÀXbKl`‰ ,±%6°Ä–ØÀXbKl`‰ ,±%6°Ä–ØÀXbKl`‰ ,±%6°Ä–ØÀXbKl`‰ ,±%6°Ä–ØÀXbKl`‰ ,±%6°Ä–ØÀXbKl`‰ ,±%6°Ä–ØÀXbKl`‰ ,±%6°Ä–ØÀXbKl`‰ ,±%6°Ä–ØÀXbKl`‰ ,±%6°Ä–8ÀXâK`‰,q€%°Ä–8ÀXâK`‰,q€%°Ä–8ÀXâK`‰,q€%°Ä–8ÀXâK`‰,q€%°Ä–8ÀXâK`‰,q€%°Ä–8ÀXâK`‰,q€%°Ä–8ÀXâK`‰,q€%°Ä–8ÀXâK`‰,q€%°Ä–8ÀXâK`‰,q€%°Ä–8ÀXâK`‰,q€%°Ä–8ÀXâK`‰,q€%°Ä–8ÀXâK`‰,q€%°Ä–8ÀXâK`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!`IX–„€%!—%1Áò.L®~¢Ïú¬£Ïúl¢Ïúl£ÏúŒtƒH7ˆtƒH7ˆtƒH7ˆtƒH7ˆtƒH7ˆt5¤«!] éjHWCºÒÕ®†t5¤«!]éêHWGº:ÒÕ‘®Žtu¤«#]éêH×@ºÒ5®t ¤k ]éH×@ºÒ5‘®‰tM¤k"]éšH×Dº&Ò5‘®‰t-¤k!] éZH×BºÒµ®…t-¤k!]éÚH×Fº6Òµ‘®tm¤k#]éÚH×AºÒu®ƒt¤ë ]é:H×AºÒ !ÝÒ !ÝÒ !ÝÒ !ÝÒ !]Ä« âUñ*ˆxD¼ "^¯‚ˆWAÄ« âUñ*ˆxD¼ "^¯‚ˆWAÄ« âUñ*ˆxD¼ "^¯‚ˆWAÄ« âUñ*ˆxD¼ "^¯‚ˆWAÄ« âUñ*ˆxD¼ "^¯‚ˆWAÄ« âUñ*ˆxD¼ "^¯‚ˆWAÄ« âUñ*ˆxD¼ "^¯‚ˆWAÄ« âUñ*ˆxD¼ "^¯‚ˆWAÄ« âUñ*ˆxD¼ "^¯‚ˆWAÄ« âUñ*ˆxD¼ "^¯‚ˆWAÄ« âUñ*ˆxD¼ "^¯‚ˆWAÄ« âUñ*ˆxD¼ "^¯‚ˆWAÄ« âUñ*ˆxD¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯4Ä+ ñJC¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼Ò¯tÄ+ñJG¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯ Ä+ñÊ@¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼2¯LÄ+ñÊD¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯,Ä+ ñÊB¼²¯lÄ+ñÊF¼²¯lÄ+ñÊF¼²¯lÄ+ñÊF¼²¯lÄ+ñÊF¼²¯lÄ+ñÊF¼²¯lÄ+ñÊF¼²¯lÄ+ñÊF¼²¯lÄ+ñÊF¼²¯lÄ+ñÊF¼²¯lÄ+ñÊF¼²¯lÄ+ñÊF¼²¯lÄ+ñÊF¼²¯lÄ+ñÊF¼²¯lÄ«¿¾ê}õWࢯþþ©ºð¯ÁeiÜ mƒrM_kð×oÉ]¾úßÿºN­‘oMCMCpack/configure.ac0000644000176000001440000000137412133644110014146 0ustar ripleyusersAC_PREREQ(2.50) AC_INIT([DESCRIPTION]) : ${R_HOME=`R RHOME`} if test -z "${R_HOME}"; then echo "could not determine R_HOME" exit 1 fi CXX=`${R_HOME}/bin/R CMD config CXX` AC_PROG_CXX if test "${GXX}" = yes; then gxx_version=`${CXX} -v 2>&1 | grep "^.*g.. version" | \ sed -e 's/^.*g.. version *//'` case ${gxx_version} in 1.*|2.*|3.*) AC_MSG_WARN([Only g++ version 4.0 or greater can be used with MCMCpack.]) AC_MSG_ERROR([Please use a different compiler.]) ;; esac fi AC_CHECK_HEADERS(ieeefp.h, [MV_HAVE_IEEEFP_H="-DHAVE_IEEEFP_H"], [MV_HAVE_IEEFP_H=""]) AC_CHECK_FUNCS(trunc, [MV_HAVE_TRUNC="-DHAVE_TRUNC"], [MV_HAVE_TRUNC=""]) AC_SUBST(MV_HAVE_IEEEFP_H) AC_SUBST(MV_HAVE_TRUNC) AC_CONFIG_FILES([src/Makevars]) AC_OUTPUT MCMCpack/configure0000755000176000001440000041517512133644110013577 0ustar ripleyusers#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.68. # # # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, # 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software # Foundation, Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 test \$(( 1 + 1 )) = 2 || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV export CONFIG_SHELL case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec "$CONFIG_SHELL" $as_opts "$as_myself" ${1+"$@"} fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, $0: including any error possibly output before this $0: message. Then install a modern shell, or manually run $0: the script under such a shell if you do have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -p'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -p' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -p' fi else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi if test -x / >/dev/null 2>&1; then as_test_x='test -x' else if ls -dL / >/dev/null 2>&1; then as_ls_L_option=L else as_ls_L_option= fi as_test_x=' eval sh -c '\'' if test -d "$1"; then test -d "$1/."; else case $1 in #( -*)set "./$1";; esac; case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( ???[sx]*):;;*)false;;esac;fi '\'' sh ' fi as_executable_p=$as_test_x # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME= PACKAGE_TARNAME= PACKAGE_VERSION= PACKAGE_STRING= PACKAGE_BUGREPORT= PACKAGE_URL= ac_unique_file="DESCRIPTION" # Factoring default headers for most tests. ac_includes_default="\ #include #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_STRING_H # if !defined STDC_HEADERS && defined HAVE_MEMORY_H # include # endif # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif" ac_subst_vars='LTLIBOBJS LIBOBJS MV_HAVE_TRUNC MV_HAVE_IEEEFP_H EGREP GREP CPP ac_ct_CC CFLAGS CC OBJEXT EXEEXT ac_ct_CXX CPPFLAGS LDFLAGS CXXFLAGS CXX target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking ' ac_precious_vars='build_alias host_alias target_alias CXX CXXFLAGS LDFLAGS LIBS CPPFLAGS CCC CC CFLAGS CPP' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe $as_echo "$as_me: WARNING: if you wanted to set the --build type, don't use --host. If a cross compiler is detected then cross compile mode will be used" >&2 elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures this package to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF _ACEOF fi if test -n "$ac_init_help"; then cat <<\_ACEOF Some influential environment variables: CXX C++ compiler command CXXFLAGS C++ compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory CC C compiler command CFLAGS C compiler flags CPP C preprocessor Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to the package provider. _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF configure generated by GNU Autoconf 2.68 Copyright (C) 2010 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_cxx_try_compile LINENO # ---------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_compile # ac_fn_c_try_compile LINENO # -------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_compile # ac_fn_c_try_cpp LINENO # ---------------------- # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_cpp # ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using # the include files in INCLUDES and setting the cache variable VAR # accordingly. ac_fn_c_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if eval \${$3+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_mongrel # ac_fn_c_try_run LINENO # ---------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. Assumes # that executables *can* be run. ac_fn_c_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_run # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists and can be compiled using the include files in # INCLUDES, setting the cache variable VAR accordingly. ac_fn_c_check_header_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_compile # ac_fn_c_try_link LINENO # ----------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_link # ac_fn_c_check_func LINENO FUNC VAR # ---------------------------------- # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_func cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by $as_me, which was generated by GNU Autoconf 2.68. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo $as_echo "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo $as_echo "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then $as_echo "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then $as_echo "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then # We do not want a PATH search for config.site. case $CONFIG_SITE in #(( -*) ac_site_file1=./$CONFIG_SITE;; */*) ac_site_file1=$CONFIG_SITE;; *) ac_site_file1=./$CONFIG_SITE;; esac elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu : ${R_HOME=`R RHOME`} if test -z "${R_HOME}"; then echo "could not determine R_HOME" exit 1 fi CXX=`${R_HOME}/bin/R CMD config CXX` ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu if test -z "$CXX"; then if test -n "$CCC"; then CXX=$CCC else if test -n "$ac_tool_prefix"; then for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CXX"; then ac_cv_prog_CXX="$CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CXX=$ac_cv_prog_CXX if test -n "$CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 $as_echo "$CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CXX" && break done fi if test -z "$CXX"; then ac_ct_CXX=$CXX for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CXX"; then ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CXX="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CXX=$ac_cv_prog_ac_ct_CXX if test -n "$ac_ct_CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 $as_echo "$ac_ct_CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CXX" && break done if test "x$ac_ct_CXX" = x; then CXX="g++" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CXX=$ac_ct_CXX fi fi fi fi # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C++ compiler works" >&5 $as_echo_n "checking whether the C++ compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C++ compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler default output file name" >&5 $as_echo_n "checking for C++ compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C++ compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 $as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } if ${ac_cv_cxx_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_cxx_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 $as_echo "$ac_cv_cxx_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GXX=yes else GXX= fi ac_test_CXXFLAGS=${CXXFLAGS+set} ac_save_CXXFLAGS=$CXXFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 $as_echo_n "checking whether $CXX accepts -g... " >&6; } if ${ac_cv_prog_cxx_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_cxx_werror_flag=$ac_cxx_werror_flag ac_cxx_werror_flag=yes ac_cv_prog_cxx_g=no CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes else CXXFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : else ac_cxx_werror_flag=$ac_save_cxx_werror_flag CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cxx_werror_flag=$ac_save_cxx_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 $as_echo "$ac_cv_prog_cxx_g" >&6; } if test "$ac_test_CXXFLAGS" = set; then CXXFLAGS=$ac_save_CXXFLAGS elif test $ac_cv_prog_cxx_g = yes; then if test "$GXX" = yes; then CXXFLAGS="-g -O2" else CXXFLAGS="-g" fi else if test "$GXX" = yes; then CXXFLAGS="-O2" else CXXFLAGS= fi fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "${GXX}" = yes; then gxx_version=`${CXX} -v 2>&1 | grep "^.*g.. version" | \ sed -e 's/^.*g.. version *//'` case ${gxx_version} in 1.*|2.*|3.*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Only g++ version 4.0 or greater can be used with MCMCpack." >&5 $as_echo "$as_me: WARNING: Only g++ version 4.0 or greater can be used with MCMCpack." >&2;} as_fn_error $? "Please use a different compiler." "$LINENO" 5 ;; esac fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if ${ac_cv_path_GREP+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if ${ac_cv_path_EGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_header in ieeefp.h do : ac_fn_c_check_header_mongrel "$LINENO" "ieeefp.h" "ac_cv_header_ieeefp_h" "$ac_includes_default" if test "x$ac_cv_header_ieeefp_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_IEEEFP_H 1 _ACEOF MV_HAVE_IEEEFP_H="-DHAVE_IEEEFP_H" else MV_HAVE_IEEFP_H="" fi done for ac_func in trunc do : ac_fn_c_check_func "$LINENO" "trunc" "ac_cv_func_trunc" if test "x$ac_cv_func_trunc" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_TRUNC 1 _ACEOF MV_HAVE_TRUNC="-DHAVE_TRUNC" else MV_HAVE_TRUNC="" fi done ac_config_files="$ac_config_files src/Makevars" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' # Transform confdefs.h into DEFS. # Protect against shell expansion while executing Makefile rules. # Protect against Makefile macro expansion. # # If the first sed substitution is executed (which looks for macros that # take arguments), then branch to the quote section. Otherwise, # look for a macro that doesn't take arguments. ac_script=' :mline /\\$/{ N s,\\\n,, b mline } t clear :clear s/^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\)/-D\1=\2/g t quote s/^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)/-D\1=\2/g t quote b any :quote s/[ `~#$^&*(){}\\|;'\''"<>?]/\\&/g s/\[/\\&/g s/\]/\\&/g s/\$/$$/g H :any ${ g s/^\n// s/\n/ /g p } ' DEFS=`sed -n "$ac_script" confdefs.h` ac_libobjs= ac_ltlibobjs= U= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -p'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -p' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -p' fi else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi if test -x / >/dev/null 2>&1; then as_test_x='test -x' else if ls -dL / >/dev/null 2>&1; then as_ls_L_option=L else as_ls_L_option= fi as_test_x=' eval sh -c '\'' if test -d "$1"; then test -d "$1/."; else case $1 in #( -*)set "./$1";; esac; case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( ???[sx]*):;;*)false;;esac;fi '\'' sh ' fi as_executable_p=$as_test_x # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by $as_me, which was generated by GNU Autoconf 2.68. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE Configuration files: $config_files Report bugs to the package provider." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ config.status configured by $0, generated by GNU Autoconf 2.68, with options \\"\$ac_cs_config\\" Copyright (C) 2010 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --he | --h | --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "src/Makevars") CONFIG_FILES="$CONFIG_FILES src/Makevars" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" eval set X " :F $CONFIG_FILES " shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi MCMCpack/cleanup0000755000176000001440000000003512133644110013226 0ustar ripleyusers#! /bin/sh rm -f config.log