lmtest/0000755000176200001440000000000014216201562011561 5ustar liggesuserslmtest/NAMESPACE0000644000176200001440000000230213723002074012774 0ustar liggesusersimport("stats", "graphics", "zoo") useDynLib("lmtest") export( ## generic tools "coefci", "coefci.default", "coeftest", "coeftest.default", "lrtest", "lrtest.default", "waldtest", "waldtest.default", ## linear regression model under test "bgtest", "bptest", "coxtest", "dwtest", "encomptest", "gqtest", "grangertest", "grangertest.default", "harvtest", "hmctest", "jtest", "petest", "raintest", "resettest", "reset" ) S3method("coefci", "default") S3method("coefci", "glm") S3method("coefci", "mlm") S3method("coefci", "survreg") S3method("coeftest", "default") S3method("coeftest", "glm") S3method("coeftest", "mlm") S3method("coeftest", "survreg") S3method("coeftest", "breakpointsfull") S3method("print", "coeftest") S3method("coef", "coeftest") S3method("df.residual", "coeftest") S3method("nobs", "coeftest") S3method("logLik", "coeftest") S3method("confint", "coeftest") S3method("grangertest", "default") S3method("grangertest", "formula") S3method("lrtest", "default") S3method("lrtest", "formula") S3method("waldtest", "default") S3method("waldtest", "formula") S3method("waldtest", "lm") S3method("df.residual", "bgtest") S3method("vcov", "bgtest") lmtest/THANKS0000644000176200001440000000216012220001273012461 0ustar liggesusersThe authors would like to thank the following people for their help and assistance: Roger Koenker for providing the chicken-and-egg data. Walter Kraemer for providing free copies of "The Linear Regression Model under Test", Uwe Ligges and for critically testing pre-release versions, Marcel Mackiewicz for scanning the datasets, Giovanni Millo for providing the intial versions for nested and non-nested model comparison tests and most helpful discussions. David Mitchell for providing the intial version of the code for the Breusch-Godfrey test. Andrea Peters for her comments on the documentation of `lmtest' and Jeffrey Racine for testing the package and providing helpful feedback that lead to considerable improvement of the code, Mark W. Watson for providing the U.S. macroeconomic time series data. lmtest/data/0000755000176200001440000000000013715303430012472 5ustar liggesuserslmtest/data/fyff.rda0000644000176200001440000000505713715303430014123 0ustar liggesusersݏW ff7bL /X#^,F1j"\xS.z&—`4MԶ1B+Ri-"QΜ=3 =>9ؾ]$)$KB?KVt:<$KdՍlU@W=U/?r Y?;b'?5g:#`t~dOlr.c~)v<l7KM0 :gdg: ̖*Yow~Og}GVܾÝ _l9ZG;ˊ&>cV_zC_ܕ}a?~ }Vq6̗S:?ZVB8jpYCGg0~08wu%مdzj.;k~c~;~W-muțSV?izWWa~ӧqaG'&zxRj+rncZpg3J9$?0`(xGZ'q?~<5=aeAzO_ ײB?sfs윶Ę2Go8axلixTCnsւ8ex+8zkpaf#ɿa?ܐ<AezOI)W|Mo؛ ot(L?~MÃ݀߷| [o |Qeۿ;n/<1 g<van܎a;nye;͜3flÞmط Vdȫ_U~A^XUpSW'j_³u/6 ػ{6N?78Q[Vз)jJVkȿ0.=j٧?0sԽ<88>Bzk\~y ep/>u旆;g7aUB~xA4 f5/< *>ا$7:/7>?cw~ّo84>0\ |D>1?vZ'6S%^v->̯_]|mZ~?{yEg=/{ހW=^i~-Oe}q6<4u>KN ]x0o~lo?̎&kK O͓J?7[a q֍ܹ͛&'\]/u!اuS7aOm޽ax1|uv=}+ٛv?{V=J~mI'/쑿;{;SGOݣx>~vn#0{v{ sͼ˯sЂܧu~/x; ^ލO?Z~^n U^=ccXz';މNwbXz^1;މNwbXz';މNwz'Xz';މNwbXz';y@kcXz';މNwbXz灩w~NwbXz';މNwbnzz';މNwbXz';މuܡv:;ɒbϒb{̑Kl_(G;?w-~^??ܫGgv':{(} Aɂz_+U3"hlmtest/data/jocci.rda0000644000176200001440000001032613715303430014253 0ustar liggesusers]_M)4d 9elun{MEJBBmGiFv1c0Fad4u^ޯ<>{9z)8=FM v'x inˢP?tA~=-"h{FHc|cmm7;iG\wq%c@@̩#ͭ;Ns@;u}N?sO*;g~lG}>G?fv1nO~^CO3.U{xxɿϏy|g>'5x1N^~g><]Q8Ўj]NDaЎ׏~0"Y Ty%‰ y~טGRU'/58QyU^5pE Tq?*|c~/Oj;7'| s `}fwa܌\C-V=hqF-.I>z$78sܼۧG\$fܼ{87/Fb~v8F=Jp}1||o;ߣ]|߽?k_G=k Ɓ% ꕤjď~ƼMK0}xrC?͋Hq]h_2ğa0~qayA= ֓kG9[X 3J"n}|o-ޓ#=rđAăzTGd a|*HqIq]u!cI~yi><0ni ^⭒hA^9A=WIK?̓q~FY|fx98b}2X(W=l܏1>UwkЬ+2 ۪rhfn Q=0z+=8=<c˱~#nr_<8r_z5QS;zE97.EP_2"p܊ukha0. đ`' jgr0y7։x̕hY7 n2'[j]Ea],@!lƋm{nǦgF%lU]ByϮK)eֿy'AX.+L,r}s&<-:I4Y4x~nte"3'z]IK4<#Bȱ4 K?q#͓&:6En a\mj>YS~i^a#,U6^@ g!?{nAvkWycGE9kul3U(^ƥQKSik"=6 $;gJ Jb*w 6$k|BP)%qʝZcjwoT 6[Ȳsւ~ySa_s yH#=9imeq_ gRփ L[_#+qOjx THYAv8s ]jڪ t1"!˃ u Ol^o uG]6Sf??^;|mjq -O|A79-̇f=iYuż/|:G>Zѹ 9l2!?*mruFa ]qUoNzߤ?mS^n:֔x.4rFR~;PuR7Vv }0 b\=dLz!$穄VnA\r26hJ~nVE7=oK+D Dp}>w|Q/+jJlawW}v>P 6z@|3^9 (:??uqwa_5jyq`/%up^%-x2g^A)meGK3VKa!f7 nZYό*诜TBwI$P?z6ȅP H7ljf]w.G^!'ܷws&=7Pl6SPi_@Ku 4C^5a 8o䆸(}7τ!Ʉzl>*=aKo6OW[Xb`=.7p2xSj$)[{PS Ӑ.:ґc߸qk0}S¦/F[]AyPY{L8,>NB F:g]lN8+w|1kEI5E_,@72RykB>\1Pv#A|IߘlNˬv2^Κd?q^?k_"zTvrӤs׫pu-Oip)Su/+]^/wqQفJEP;VTg*{Eh

x raM(P]:N^7aydWW3s 7/x'F7o~#F7R ݤ{F7o~#F7 [zgpRo~#F7o~#|N`o~#F7o~#| .q͹ ~' F7o~#F7_{vB7o~#F7oX~m:/ a@hmEgcnFޏ{A?Z9zqqR8A8Qea z(bIa˞V1Of_ox8: ^λIEnhlmtest/data/pw561.rda0000644000176200001440000000556413715303430014056 0ustar liggesusers]iTTG.`DBq hp-v :FU)PZeqG\wFH[Lb3csRuNs_U廷վ=BHPwt1sGP'#z6p6-{y~jgϖ a-%a--xbW>w\8G+;Huiv6128^XHC<=?8E!>`R;h碄AA .*s?8?G:^ǭxNPo'wNSz/$I  {%Cƃx3p}g'Μs})\='|>9'ynCB\_'u?? a?`?I`7N~_Xl):AoğZ yO|'~&@= y 硕om/b(q|e ' Z} ].p` zΐا;NѯT y/=``]XϐX'j~] (ppBL &:xyހ]/J '0@XkЏndmbcZESd^6pLp|q%a/Ad_Zߞ8ԴTI‹)?0pDY;ѹGR _oWmwEv,2tF>x*gnfk/fkȜ᥽+"E=Iů=\pZ27ý+I~ *=p7WoʧXĘ ;yH=]Nx+yWwۖgԚ:(rhӦ@x9[inK񾲬4M \r$筽{}\jک7oZGe_n>d述koçsՅh| >?=G4շ/v ᘩKh;} EN=(`1ϳ* \izߒ4"|?ڟOeU"l"j̬8["Mia)LQ"zo=O\'e}tv0o*]'eIΈ,"pC+>{uWW[(]vyTϢx@>u蟏^gEYw t9ut8{Tޝ; D.o\`7F4*D/ruC]p8gqkZWIu⇝"C}hݏ |]+CvVlh2o O{9ݴgQ5r p|/Zq'؛dK;Ü z<{OZNM%F\*$^9"Eckiev WɌ.ѐWO_1nےm=i \n$z]qfCDG${i)_ AwrdOaW_3O'!Ryl>ll7ۣ͒hH@DL% x.s<[0(N>#׌╞`yY/Xn!DuW?yc^Rqܛ@rái};r{\_##8m-' ߓ76g\RӽƆ!7~e<\ :gcU$՚Ї?SJ?{k4rDh Gy&](4 {c\l?[޾I9D3>{8gr89"ޝ[H*uxKBiln gu~4ʭU{ opɰx*RɊJ?3XDdXm?k5d_z<p=*4\fƛ~|l 29{+2 hrOslэp ɯ%@~AU16QEɷ3դwMҴ[w^;-;0;tw׌wZiawa|wy*鰯y);3;:;0;0Ӟ<wZiawa|w-~wRwZiawa|wѐf%uK wig|5wuwa|w6?)tzw!jnmTQ)鳉ЏۢPֳjZh{==j1VQ##o\B:!͠`@~b"̖,f6hlmtest/data/valueofstocks.rda0000644000176200001440000000336313715303430016057 0ustar liggesusers]VPG>F%(X#DMW!p`",xXvQe,ǘHND4%jLĊc.ߚIgC.i.2QA^ٟf9y5̝2(9)TϔӂglhRjvxAKa~ɍg]B>EsCӏ8- //s,˺0_DZ~%$Y*:7Q7‡i{,(WA.{N+eW1qU` WBUb4s .C6 |*aWy^-O]ᵇʲ~}&*`o/'7RG\5*^{Gd?k6βV~M̛eوOou7d[Wk+у: }ra)QEo/p4ɱޫmR Oqfx |- >e^ (FaoQ2Ee:E ?Wd_T!}ko[i|wx뽵_RL?1Z!o^+O/'≏9'[(~դG'Q9?7 /akOqS3(q4A/FY9+k))a=I,xI z)GJ͢3/TW1.Ի'Ċlt#M;I!uB/}:^+pOyxe^roʴ;x&@$HR nuWY^@F)v ]ȇ2CKP%UcD )'!' ԯ<̋Iͼ;{4,&a@~{ ۿoB\;$sTl $x xHqq*aG)uCkS] ]CVECVk9a+H}0>'SĪ4(4Z:R䄐z W_3v ;>:<́U$?\qGf>t)KE[E9pk#^uo/WG3[]@j73ۼޣM۱X#O} uD8?tg:Q ħLzL[boC믠/.S߃T:߿_Uq0Wuf^LMBnO'͑csp7jZD=Uwy>>ß%~,5rxkbx'5v1 S[ n u~o¿uOC}[&!78lo˃? 1ulx`9xmHb5G ௡ ֨|?Kا'fAi o: Mw{)^?k-vyv:c6Cw߯e2xMS3{Z܅Ks.ܤ&%WxsNL{+4u|6WZ'Nu0?SrFW88ЗkhD lmtest/data/gmdc.rda0000644000176200001440000000746013715303430014103 0ustar liggesusers]yX $%}pɐou@`f`ffaEP0ȥŸf)jjႡ@j\rAoKYΌA>{ϙ;o޿X,93̭8^ۜ3u$M|qVmF룟 4/:A&,t? ~&4 > '`߀7`oНcDqD=-".w9g?@iDy}@z#N#>{o.ž\υ o>|! |/}/C!~:ȳ?Oza?z'/@a/y|AvD'`GD'B^ A<Co0 `?C<̓<蛇1y`~ Bo ECC_:~ ľ0? qza+1ƾ0)bg~~] /b+>#'_  JHHqIKQ)Žv)%E6CuŽv‘p p#Qp GÑp^y-]'qEn⊀#y@F/#%qA6Σn2ؗ }-}□dS e/~9B'G䨗qr'G|r藣^ ^L S@*'TH艄7+ }yT}R_~)S"(]Ea_G#(eЇV\Q/BTЯB}U_ zUЫ6źFߪG5qvx?԰F_5Rb{ voq|X~w374.~eQUՙL= f#tpNI^?}0k*Bt8?6:gM==s~(䱱[qXmB"{z4|+7pUY4ʥW9u==oؿA~/3+=a |>_ȻʷኲWg75_>nJ\p´#oܗ4m3c!&BՏL%1O}xų=Ss4nUѸ_J-˿RRcGoRREk:66EbBNt/lCpK\Iy'Dwڽ MhQ?B+ˢEuC~^Nثe&?3;&r|{WjO~=Ä%PݓSϾܵ,kFfy1o{- gwI-y_΄֏lԹqfg4Nj qh29-KxĎo9>cE-{sBz٦#4NߎW&!Z |aȥ%ZXJ'? WZ-ބP~2x=KH o |sf/슧qj5vBe~& v`P0ĂgK$sm"!va9>}O}ȯcNzY&K›L~jMaWnTx#:Å8Us|Z;f_AqQnÏ/y6iTGU:zb4]K㝢lۘIR>5/|7\u >}NqLpx^x+yt<.u꿔-z>aޖg|Їj2{߽՜8Oaͤp7Ҝ~.?9Qjꆮ.Vz0ݫsn<>Z~ע#SN: /űX8t^TuHkW/'f]:9K|[yVƭ?L.7j:ٗQz4߱ɕ%> z+~~#ًNHvZǜiF H*Хϐɧ q `&fOԯEf4S?dZTr=M p$_w綖rw)_9 6C=Τ r媛){,k$Ȧ˧*8sT3HNX}.5b(]>hM~wx[ZO8K3ޯ|o+|I4_hy=kvIj:b3_KWEh}WhS?ۣrzsT48 h~4g}wu{VՐ|1[ߩ5kKGmm:߳V,ξBR=^ZL2,V[,YӏwƘi=hCg fv+;kbybsdcUQ5k?9zh#{m%C~H9͗(Bé̠uۗ%?Ϻ|$w͙P2\i;Jޣ/;@rA;Okk'PC,rfR)śss5~场IzfepS2EւӸhx_fê)I.cQ$=>Eߓ)u;t߷]fytfܤʭ6$\ ޠkQ]k2([̮98n#^:ٍWggZgs,(΄ (wNO}-gk#)/P̿)Nt}?GI[?o, =jtK4?n32R%%l_o@k7mZg>cflhZ{Iw[yIrڦ-;Pڔ{-HE(I+j,K$_QtI}$ ; Ǒcӊ7ӾNQ䯼 ?R.^p}[>yy\n©uUU.d|wa|wa|w?;|FB8VS>a|wa|wa|w|'cˢa|:;0;0;s1nw Οa|wa|wa|wz|19w;0;0;?w Οa|wa|wa|t:S4֜pfoZt MB=8*,g}$cVBiGӣ889ZGuRO΄U`dJ'.=&'Nhrs n>zE_n激hlmtest/data/moneydemand.rda0000644000176200001440000000431313715303430015463 0ustar liggesuserseX UݝݙٙK("1DH,<4pQѻL4hY*EĿ"""bA-B*DEW 9Bߕ^oS[u7{gYL&d}l.g}_%YvpL&gd_?kqR5--{ѷWqu3uoY| _^Y!?3? i{8qwpN;9;-GǯݦpSX7{gDoTmOM/΋Ίky;7旋r'>5;'+p'6 g~g=y` ΁8xl9sz`V +6~b}v 3f;7\?tNr h_^y =A~6.ڿT{˂cǻJ9mvOvϐuKҵݹxs[vO_`Gxy 3o`Oq_>ޜd]+ (P@t /g=yZ ~Ky)ؠ7|?8HQGGfoYhz䩳I;:{;o ʠS*(@0PpC LQiQxS7A9n'QH0Lw z LͲo(p\xB%E3"?E<@BF)VP" 9qH" hSQߦyOu@/xe]DK>9#7:?RX>[uu~%a^㝺7{ w_ܺXuPO״=W*=I"_'n"_eA y񀸢8wf]~2W_"88Am.oMƕLmyϐMӐ|OK@_k·.vV =F.0^71¾%rp>x;Z#u`gAޣA9Wؗ[o5+% /y 9|#?)JCn}})7}î-c> 9_ c ޞ၏=<৙ aϚ/Fܸº&nM38s ;8 eÞxZǾUSŸXo?flY~&~ nƮuo8?ğcꚸ_[oAis^7湱eŜ2)ck &]П~9؝7z0_yS@ܚ*g&ϕf9x+E u.ԅLsns>W \¸d514=nYgaO0O2[xo\\ ?L~:țPW)|05`\6BĹz %ɸS6qsl["uYSdR'/am]]cld\d\%.wII]Lj3CwDd+TLa,u< T}LkI@Kx/S6?/ȳN҂\ܹYn*곿˖+,]nl::䪺LK*mKUC**׺?zNS7HÎ-|@ۂ Rqg?b3222Om{ ِkRrQg΋rp௒|``K˖- vv~KS._%$pIcKKuol®U-|0c;?L0 0ODh0E1E>RL6aFS ZY'D)6aId4ťAK]jCb3Zբ"U*֯a;TēLf~MP Tͤt_\9z( +,V@DG 4 4rRHDQȔk"pTJ$q%*L\ZH*%JaVf"lU$JB5) ~".Ad3h @4@d ,>ApQA2X"E D bt>h) ڐ  !/$#z\lNGѳٲš886aQ80MK ){Pl:L2¼ae:SG6m aF0N n)L:g#f6X}tD9Rѐ04t6ժ HdIʈrg R PR@55#93TAV,ઍB L!&Bd,ȪL--b9+"* $!TV&5R$3[^UwϳBgª|0`@2hHhߐ'3˖ι` ((Y TKRX*IKJT$)K!K Y RDRRCXB6wh @d-Kj58  Xu,HRBa`¢aR X-Tm|ߧ$*DYB%D,,)ad)JaQ)JXRR”D)JXẎd,vNɆs3gd eb&F4aBœ)fSP뛐aNb'=s: hM&a6lѣ CFᢚ4r6pa84dBt6s4t87 0֥2){"$H RIJI*%DJR,,*%,,*%*$)JXY aK }Eİ+db$%"NօDBB–BPP,,)LbRH  F`lmtest/data/USDistLag.rda0000644000176200001440000000060613715303430014763 0ustar liggesusers r0b```b`ed`b2Y#sphKfqOb:3P@á!4F~FDĪ@ D CKo-YV׹l Z|o;d!;A=:48tKB-áW d]Co1hIC 4nu 3`Ä ay ;q<or%D~)wNMqUA7]CgzDt^9%3H、AB(*KM- RÝW\[P39=ݲb(歁s0m P&$Ɯ[R e2Y c$Nlmtest/data/bondyield.rda0000644000176200001440000000321513715303430015134 0ustar liggesusers]W lU]MJ-[Q _9!Q "R$ DB?.@Cқ{s`Ww Z!+|/Ω ISą 8>dLjއ/; H`~c$3^e9SߜkL7c \cza o.Kzү0B G;L[>~8/cs^)?n>Sy.ˁ\AXE1P*Ꙍ Q d, Γ2*!0vz$c7"N8(hh22' 1cخfG2#Sgf8 1O}HDg84a?9г騷, ds[᧬S8GȷQfC2Qb 77r,^z 4Z_՘sg0/$iavYA9wa̟bԿU6H>ti'EJ֊}9Ks  O<~:+)yɸ0H<ҞC28w!9?%rHC?rW) 'UI|4) }wW y&^2'q<+'R'~u\wwÚ㇜Aτ yS7w0$Ί ǤnZ|q1WG껭Mhu8Y꼦-ZGy쳔ۖGoyPu'øO#-gfD=jaHC߻?kmBl2%1i-su݋׮!oel xbͦ$;|̽Pj-utMos M%O?$/JYcm_R05C aMVQFN@}7a8 V9DZo+s>5Co}|+y5Al 9.V#ÕMNHEUK$U"nU WRٞdwߤ:UחTJ㤊2#UQnHz#UΉ1aR'$qrD -X^Iżztp}e3;J?{)yV圓xN$C9f'OJaJeҕ|/ ;>"{̷}%uJ>,!O>C"{8efY8hn{Gc^N a3uXi`\ȕSLƶx$:TZa(Exd#O$v΂Yz+l]lmtest/data/wages.rda0000644000176200001440000000101713715303430014267 0ustar liggesusers r0b```b`ed`b2Y#s1kybzj13PH2/li`:fiF(n B ` U/KpZ]^ *./ 5_N}ۉ?vf}B_|cq+Hۉ-IxMڵx${p-X| #$/?wSA쥏8BԋMjuX'R}/4x!vZh‘D8HA q/eŕ_ %vo9tiP"XC4u&ݍ.O8Cݥ W^}e'#-q2d)^pe`b4%@%y$ U jja9xJ rt[K (C0;[q7`騹(9'j-̜ܒb%`]C%lmtest/data/growthofmoney.rda0000644000176200001440000000042213715303430016067 0ustar liggesusers r0b```b`ed`b2Y#s1ozQ~yIF~Zn~^j%3PP ?,/?00(}J`@h|8TT7g,=!|qc08  oi` "n8 W! A@h@eN-8!4biYYeٖlG|7Sy婚UTY"YL%+-{>vew? =t(Kú%p/컟s,t`Ai.l;NrWBlW^nA8V8" Ǚk~k5K9Y{ TvyQDּ׷""ăнsk\xo}d0dd7f+#ӓ^&~  (uȿK%xupEM$Ax;npMPTn x`]wkn88O"eV,fɼEnv3#ẋw*3U5"7Q%z\jU1'yxX=t>x7|vt_g0.0~܀l#*5|P~Kد)HH7,]o ~cqyQ yMF0>:oL̟~&!9{_}> 19};#vS.=.;f ˦/Fs~yN>xΑ]_̾`5}c̹}̹fӷoQG3}r_}8Y\Scg/ŝnrqs_][w%.l8)A(E"KQR47eXyVUdXeVUeE4< O4< O4@#4@#4@#҈4"H#҈4"H#҈4D#H4D#H4D#42L#42L#4 B(4 B(4 BҨ4*JҨ4*JҨ4Fh4Fh4&ƦNtZz-QˤeֲhYTͩTs9՜jN5SͩTyռj^5WͫUyՂjAZP-T ՂjAZT-UEբjQZT-TK%ՒjIZR-T˪eղjYZV-|W^t~!dq||oqqJrlmtest/data/ChickEgg.rda0000644000176200001440000000075613715303430014636 0ustar liggesusers]AHTQ?t͙FEE B*5IxF(I*tQDVE"1-26bibyܣvVU^/$ӣBTήTwsG"oY}~=:Ɔb_-$Ob@^%a8.LWurlbT~^<V3t+#[ Zrd3i2Rܢ)B*\tWӅ R~P_+lӠʫ4~Re[\=V%]!'ߒRlmtest/data/unemployment.rda0000644000176200001440000000401213715303430015713 0ustar liggesusers]W TU^̀A\y.$.\Sl'3{ZVV:95VXl1GH[Y*jo_[ֽ?>{-M/7Lfyxh6??7.sM&@cs|)Wr?g z}1eY9ngK?qi?WNͻ9_G~36s5sKypipEQTnf|AX9`~kcݿ0C vL4D;N^]rLIw:>oN}niۯwOO|<þhi[/[qDy3۹iw z?]̞alM7;[N=h]p?Wk hg qB b_ćn"tY[ʡS9H'v?%\'s_x];d*7svw"? ;C{y[kXtb{j[ P)8G6g^⧿sGR̛TelX'Ǟar(< x=!51yo9BDNW%8vf8YnzG M$K$֎Qv]V38p1bl=zPwA.qQ$8b3qrB_~rh 7w? >9bwWn/`3+|O|319;gޅ }XW4k j#aWu0Wa\G[qtȯ{ qzܯ6 ˢ05z-GOo[VbL7@_1z Š.ڿrq~ M˵>}^i/wo*ڇ{Hy ÑtxA UFp_ "鐁>6aG:tU6Pb돛 hxuSY:Dމf \?5 ]ЮahPQGjG o}I{\Ӣa&Z4:Zh"m0_E_]K39I9߷7:%~9%P:35S/߉-qרRۣϤ*ABYyNqN#E%]BK"gm7u5^N}g%=&"- u&E:O؆<('ΐQyA9~UF|9>?gU2'ulO1ㄩ x:E*m|o a$xmZǟK`_^2 O}t0KR;PkUԻs@:+Rw _<2ƅ=+~HXMR_#+{l3.`TR[;A+o@+땢V{!%v~T| n"EocXdFo"y  /|4R"zx&+qxOq{M}—(FH'(۟f B@NG4yVktlt}e{N,(V?u syGícq_8 ֶ^8IDX [l{rї*SB#ҿY֗|G_K9gas옡;orCۜ5e+ܬTt} .ӅKΣ)Cr?wZYp|s.@\1Mhk;} 2XVLs]w, ݮ&=Ln`j6}x_/KTܤ+o\sm $m~~Sh7{帛7eޭ?&J>1lmtest/data/ip.rda0000644000176200001440000001106013715303430013570 0ustar liggesusers]WF(7m 3l30 ;dEAAQ0* 4A#*nF * Q41j$H\Dy5_rz{oݺU_խ'<ZBt΀D'<ҁz?=|x,!= ~iðRiƎ!zNx%ks Kq>Ns~5wL`qމ:uu'Ϲ?͍/֞3k3q97v9;x']?\~e"oƥ?qZ,s]1WHws7q)qz;̞1]huã'@C_O؇8yb<w!!bޅ ўAE8#B"+Da"+B`|h_yă ^^Xވ7v9v5|о⦑E||Qb|0'q:jXW^xֽ'7<_~/ i}Fvў7QM1'Fň`_ O%M 1̫_x_$Fbюc=/Ms.+MqM^'-)ڿ7hi#-iW iɋ%bZiIy墩YKiIQg5zns?iIdy;S+%uXGKͻE!Z=qTNB.Fjô"eijx {Ձ'i\`2; i]S4hmvf[?fvoCO,:16^ģiSt@t(kG,A-력? a5ghX' Rn;ilgg]AGiiGے^J#3d9-WrQIKy8s^饳UOe-NmeZ\6MLX\uiuzS[X<9{,.3ne,β,?;hkI˭{@Nl!}VB˶i%ϤY͑맺fئ%I.b|hE2m޴:^CZ{5Z(paZ)g^0tb)ۥVaTL/>7v3>yVLqOՎ9;3wF,>sq*J_^}RՍ_jq׷EJغlT_0f'6-G85xrqqpqqc.>)Tl+݊V4F+ټ+V<Њ7=g-G |VNgu(eq ,3f򥃔tB]¤g+BqrO\BU>ߞzzUU)VN:0ڲ1yO&Qv%rcv,%k֌ p>@˔T N˒+u&̕TYs?*͵֫Iް.*~DbAwv+֡;b] +9aPRkw'TwÄMGQan乌P[')6NҼΣm hǻx GZJԉPˏMw M̘ٓ 0q-v/ G_*MYa^9Ʉ*?}?Zn\wPi&T~ͧ GB;w-!3S.<U`{V]O(ԫSdku M &m죲\iCUYZxx[ؗm|dg2]5&P[RC>\?`Ys =e wο7_:.!uXo+;{¶9 :wW"x67YYq*f+BطS=䛷Z\b]W+t8Xǽ&TLMcϾV0 @Ƭ4P Gyg;m`wb] slw]QAfE>a=4NyUսz < A`K{sBd  <S4'#jrh;[n_g.IcCX"k>N O8|fK!5GŒ~ TdTw9S轃zLnly&iɘg/OB;0>X"MPɶCFF&ci~We֟ CCBJyˠΒ~!S/KlqWγnC B^V.wd(枾}"cՅC|1#5V]wi qyZSN37̬{RaǭW{++txОu;ZruM~|DZ_2SזpN"Lb(YU%'IY ΦB't5|y;Feq2Ϋ~0kz(Qg1RA>[<^@}ڗO<(b\',z#Wȧ]QL0 .3~q_E ^tɳ pMJ}!h:7ޓGuvB= ֯~V }8Y'+'=OD`?bB"T +jRO!7[PPg#~[UI۪_!>KqxSV]ʐfCw OcmM?y̐^Z,qz*xJ9꾺خֵyrЋ/ PRQGz3\7WHWNDp٪tytr紒;<wx}|;<wx{#/ͯ|;<wx=;<wN&L|;;<wx|;yqs<wx|;<wx;k|d<wx|;<wx;E7Q|;<wx|;<ο|uzG@|R]PVD-NgVYݿuzNpr4qF8C8CbA T?^TlHH@'u8;Q ?x~~hlmtest/man/0000755000176200001440000000000014205560643012342 5ustar liggesuserslmtest/man/unemployment.Rd0000644000176200001440000000413712220001273015352 0ustar liggesusers\name{unemployment} \alias{unemployment} \docType{data} \encoding{latin1} \title{Unemployment Data} \usage{data(unemployment)} \description{ Unemployment Data. } \format{ A multivariate yearly time series from 1890 to 1979 with variables \describe{ \item{UN}{unemployment rate,} \item{m}{broad money supply,} \item{p}{implicit deflator of Gross National Product,} \item{G}{real purchases of goods and services,} \item{x}{real exports.} } } \source{ The data was originally studied by Rea (1983), the data set is given in Krmer and Sonnberger (1986). Below we replicate a few examples from their book. Some of these results differ more or less seriously and are sometimes parameterized differently. } \references{ J.D. Rea (1983), The Explanatory Power of Alternative Theories of Inflation and Unemployment, 1895-1979. \emph{Review of Economics and Statistics} \bold{65}, 183--195 W. Krmer & H. Sonnberger (1986), \emph{The Linear Regression Model under Test}. Heidelberg: Physica } \examples{ data(unemployment) ## data transformation myunemployment <- window(unemployment, start=1895, end=1956) time <- 6:67 ## page 144, fit Rea OLS model ## last line in Table 6.12 modelRea <- UN ~ log(m/p) + log(G) + log(x) + time lm(modelRea, data = myunemployment) ## coefficients of logged variables differ by factor 100 ## page 143, fit test statistics in table 6.11 ############################################## if(require(strucchange, quietly = TRUE)) { ## Chow 1941 sctest(modelRea, point=c(1940,1), data=myunemployment, type="Chow") } ## Breusch-Pagan bptest(modelRea, data=myunemployment, studentize=FALSE) bptest(modelRea, data=myunemployment) ## RESET (a)-(b) reset(modelRea, data=myunemployment) reset(modelRea, power=2, type="regressor", data=myunemployment) ## Harvey-Collier harvtest(modelRea, order.by = ~ log(m/p), data=myunemployment) harvtest(modelRea, order.by = ~ log(G), data=myunemployment) harvtest(modelRea, order.by = ~ log(x), data=myunemployment) harvtest(modelRea, data=myunemployment) ## Rainbow raintest(modelRea, order.by = "mahalanobis", data=myunemployment) } \keyword{datasets} lmtest/man/moneydemand.Rd0000644000176200001440000000466213715303430015133 0ustar liggesusers\name{moneydemand} \alias{moneydemand} \docType{data} \encoding{latin1} \title{Money Demand} \usage{data(moneydemand)} \description{ Money Demand Data. } \format{ A multivariate yearly time series from 1879 to 1974 with variables \describe{ \item{logM}{logarithm of quantity of money,} \item{logYp}{logarithm of real permanent income,} \item{Rs}{short term interest rate,} \item{Rm}{rate of return on money,} \item{Rl}{not documented in the sources,} \item{logSpp}{logarithm of an operational measure of the variability of the rate of price changes.} } } \source{ The data was originally studied by Allen (1982), the data set is given in Krmer and Sonnberger (1986). Below we replicate a few examples from the book. Some of these results differ more or less seriously and are sometimes parameterized differently. } \references{ S.D. Allen (1982), Klein's Price Variability Terms in the U.S. Demand for Money. \emph{Journal of Money, Credit and Banking} \bold{14}, 525--530 W. Krmer & H. Sonnberger (1986), \emph{The Linear Regression Model under Test}. Heidelberg: Physica } \examples{ data(moneydemand) moneydemand <- window(moneydemand, start=1880, end=1972) ## page 125, fit Allen OLS model (and Durbin-Watson test), ## last line in Table 6.1 modelAllen <- logM ~ logYp + Rs + Rl + logSpp lm(modelAllen, data = moneydemand) dwtest(modelAllen, data = moneydemand) ## page 127, fit test statistics in Table 6.1 c) ################################################ ## Breusch-Pagan bptest(modelAllen, studentize = FALSE, data = moneydemand) bptest(modelAllen, studentize = TRUE, data = moneydemand) ## RESET reset(modelAllen, data = moneydemand) reset(modelAllen, power = 2, type = "regressor", data = moneydemand) reset(modelAllen, type = "princomp", data = moneydemand) ## Harvey-Collier tests (up to sign of the test statistic) harvtest(modelAllen, order.by = ~logYp, data = moneydemand) harvtest(modelAllen, order.by = ~Rs, data = moneydemand) harvtest(modelAllen, order.by = ~Rl, data = moneydemand) harvtest(modelAllen, order.by = ~logSpp, data = moneydemand) ## Rainbow test raintest(modelAllen, order.by = "mahalanobis", data = moneydemand) if(require(strucchange, quietly = TRUE)) { ## Chow (1913) sctest(modelAllen, point=c(1913,1), data = moneydemand, type = "Chow") } if(require(strucchange, quietly = TRUE)) { ## Fluctuation sctest(modelAllen, type = "fluctuation", rescale = FALSE, data = moneydemand)} } \keyword{datasets} lmtest/man/gqtest.Rd0000644000176200001440000000616013715303430014135 0ustar liggesusers\name{gqtest} \alias{gqtest} \encoding{latin1} \title{Goldfeld-Quandt Test} \description{Goldfeld-Quandt test against heteroskedasticity.} \usage{ gqtest(formula, point = 0.5, fraction = 0, alternative = c("greater", "two.sided", "less"), order.by = NULL, data = list()) } \arguments{ \item{formula}{a symbolic description for the model to be tested (or a fitted \code{"lm"} object).} \item{point}{numerical. If \code{point} is smaller than 1 it is interpreted as percentages of data, i.e. \code{n*point} is taken to be the (potential) breakpoint in the variances, if \code{n} is the number of observations in the model. If \code{point} is greater than 1 it is interpreted to be the index of the breakpoint.} \item{fraction}{numerical. The number of central observations to be omitted. If \code{fraction} is smaller than 1, it is chosen to be \code{fraction*n} if \code{n} is the number of observations in the model.} \item{alternative}{a character string specifying the alternative hypothesis. The default is to test for increasing variances.} \item{order.by}{Either a vector \code{z} or a formula with a single explanatory variable like \code{~ z}. The observations in the model are ordered by the size of \code{z}. If set to \code{NULL} (the default) the observations are assumed to be ordered (e.g., a time series).} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{gqtest} is called from.} } \details{The Goldfeld-Quandt test compares the variances of two submodels divided by a specified breakpoint and rejects if the variances differ. Under \eqn{H_0} the test statistic of the Goldfeld-Quandt test follows an F distribution with the degrees of freedom as given in \code{parameter}. Examples can not only be found on this page, but also on the help pages of the data sets \code{\link{bondyield}}, \code{\link{currencysubstitution}}, \code{\link{growthofmoney}}, \code{\link{moneydemand}}, \code{\link{unemployment}}, \code{\link{wages}}. } \value{ A list with class \code{"htest"} containing the following components: \item{statistic}{the value of the test statistic.} \item{parameter}{degrees of freedom.} \item{method}{a character string indicating what type of test was performed.} \item{alternative}{a character string describing the alternative hypothesis.} \item{p.value}{the p-value of the test.} \item{data.name}{a character string giving the name(s) of the data.} } \references{ S.M. Goldfeld & R.E. Quandt (1965), Some Tests for Homoskedasticity. \emph{Journal of the American Statistical Association} \bold{60}, 539--547 W. Krmer & H. Sonnberger (1986), \emph{The Linear Regression Model under Test}. Heidelberg: Physica } \seealso{\code{\link{lm}}} \examples{ ## generate a regressor x <- rep(c(-1,1), 50) ## generate heteroskedastic and homoskedastic disturbances err1 <- c(rnorm(50, sd=1), rnorm(50, sd=2)) err2 <- rnorm(100) ## generate a linear relationship y1 <- 1 + x + err1 y2 <- 1 + x + err2 ## perform Goldfeld-Quandt test gqtest(y1 ~ x) gqtest(y2 ~ x) } \keyword{htest} lmtest/man/currencysubstitution.Rd0000644000176200001440000000607313715303430017160 0ustar liggesusers\name{currencysubstitution} \alias{currencysubstitution} \docType{data} \encoding{latin1} \title{ Currency Substitution } \usage{data(currencysubstitution)} \description{ Currency Substitution Data. } \format{ A multivariate quarterly time series from 1960(4) to 1975(4) with variables \describe{ \item{logCUS}{logarithm of the ratio of Canadian holdings of Canadian dollar balances and Canadian holdings of U.S. dollar balances,} \item{Iu}{yield on U.S. Treasury bills,} \item{Ic}{yield on Canadian Treasury bills,} \item{logY}{logarithm of Canadian real gross national product.} } } \source{ The data was originally studied by Miles (1978), the data set is given in Krmer and Sonnberger (1986). Below we replicate a few examples from their book. Some of these results differ more or less seriously and are sometimes parameterized differently. } \references{ M. Miles (1978), Currency Substitution, Flexible Exchange Rates, and Monetary Independence. \emph{American Economic Review} \bold{68}, 428--436 W. Krmer & H. Sonnberger (1986), \emph{The Linear Regression Model under Test}. Heidelberg: Physica } \examples{ data(currencysubstitution) ## page 130, fit Miles OLS model and Bordo-Choudri OLS model ## third and last line in Table 6.3 modelMiles <- logCUS ~ log((1+Iu)/(1+Ic)) lm(modelMiles, data=currencysubstitution) dwtest(modelMiles, data=currencysubstitution) modelBordoChoudri <- logCUS ~ I(Iu-Ic) + Ic + logY lm(modelBordoChoudri, data=currencysubstitution) dwtest(modelBordoChoudri, data=currencysubstitution) ## page 131, fit test statistics in Table 6.4 b) ################################################ if(require(strucchange, quietly = TRUE)) { ## Fluctuation test sctest(modelMiles, type="fluctuation", data=currencysubstitution, rescale=FALSE) } ## RESET reset(modelMiles, data=currencysubstitution) reset(modelMiles, power=2, type="regressor", data=currencysubstitution) reset(modelMiles, type="princomp", data=currencysubstitution) ## Harvey-Collier harvtest(modelMiles, order.by = ~log((1+Iu)/(1+Ic)), data=currencysubstitution) ## Rainbow raintest(modelMiles, order.by = "mahalanobis", data=currencysubstitution) ## page 132, fit test statistics in Table 6.4 d) ################################################ if(require(strucchange, quietly = TRUE)) { ## Chow 1970(2) sctest(modelBordoChoudri, point=c(1970,2), data=currencysubstitution, type="Chow") } ## Breusch-Pagan bptest(modelBordoChoudri, data=currencysubstitution, studentize=FALSE) bptest(modelBordoChoudri, data=currencysubstitution) ## RESET reset(modelBordoChoudri, data=currencysubstitution) reset(modelBordoChoudri, power=2, type="regressor", data=currencysubstitution) reset(modelBordoChoudri, type="princomp", data=currencysubstitution) ## Harvey-Collier harvtest(modelBordoChoudri, order.by = ~ I(Iu-Ic), data=currencysubstitution) harvtest(modelBordoChoudri, order.by = ~ Ic, data=currencysubstitution) harvtest(modelBordoChoudri, order.by = ~ logY, data=currencysubstitution) ## Rainbow raintest(modelBordoChoudri, order.by = "mahalanobis", data=currencysubstitution) } \keyword{datasets} lmtest/man/encomptest.Rd0000644000176200001440000000530612220001273014776 0ustar liggesusers\name{encomptest} \alias{encomptest} \title{Encompassing Test for Comparing Non-Nested Models} \description{ \code{encomptest} performs the encompassing test of Davidson & MacKinnon for comparing non-nested models. } \usage{ encomptest(formula1, formula2, data = list(), vcov. = NULL, \dots) } \arguments{ \item{formula1}{either a symbolic description for the first model to be tested, or a fitted object of class \code{"lm"}.} \item{formula2}{either a symbolic description for the second model to be tested, or a fitted object of class \code{"lm"}.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{encomptest} is called from.} \item{vcov.}{a function for estimating the covariance matrix of the regression coefficients, e.g., \code{\link[sandwich]{vcovHC}}.} \item{\dots}{further arguments passed to \code{\link{waldtest}}.} } \details{ To compare two non-nested models, the encompassing test fits an encompassing model which contains all regressors from both models such that the two models are nested within the encompassing model. A Wald test for comparing each of the models with the encompassing model is carried out by \code{\link{waldtest}}. For further details, see the references. } \value{ An object of class \code{"anova"} which contains the residual degrees of freedom in the encompassing model, the difference in degrees of freedom, Wald statistic (either \code{"F"} or \code{"Chisq"}) and corresponding p value. } \references{ R. Davidson & J. MacKinnon (1993). \emph{Estimation and Inference in Econometrics}. New York, Oxford University Press. W. H. Greene (1993), \emph{Econometric Analysis}, 2nd ed. Macmillan Publishing Company, New York. W. H. Greene (2003). \emph{Econometric Analysis}, 5th ed. New Jersey, Prentice Hall. } \seealso{\code{\link{coxtest}}, \code{\link{jtest}}} \examples{ ## Fit two competing, non-nested models for aggregate ## consumption, as in Greene (1993), Examples 7.11 and 7.12 ## load data and compute lags data(USDistLag) usdl <- na.contiguous(cbind(USDistLag, lag(USDistLag, k = -1))) colnames(usdl) <- c("con", "gnp", "con1", "gnp1") ## C(t) = a0 + a1*Y(t) + a2*C(t-1) + u fm1 <- lm(con ~ gnp + con1, data = usdl) ## C(t) = b0 + b1*Y(t) + b2*Y(t-1) + v fm2 <- lm(con ~ gnp + gnp1, data = usdl) ## Encompassing model fm3 <- lm(con ~ gnp + con1 + gnp1, data = usdl) ## Cox test in both directions: coxtest(fm1, fm2) ## ...and do the same for jtest() and encomptest(). ## Notice that in this particular case they are coincident. jtest(fm1, fm2) encomptest(fm1, fm2) ## the encompassing test is essentially waldtest(fm1, fm3, fm2) } \keyword{htest} lmtest/man/bgtest.Rd0000644000176200001440000000725213715303430014121 0ustar liggesusers\name{bgtest} \alias{bgtest} \alias{vcov.bgtest} \alias{df.residual.bgtest} \title{Breusch-Godfrey Test} \description{ \code{bgtest} performs the Breusch-Godfrey test for higher-order serial correlation. } \usage{ bgtest(formula, order = 1, order.by = NULL, type = c("Chisq", "F"), data = list(), fill = 0) } \arguments{ \item{formula}{a symbolic description for the model to be tested (or a fitted \code{"lm"} object).} \item{order}{integer. maximal order of serial correlation to be tested.} \item{order.by}{Either a vector \code{z} or a formula with a single explanatory variable like \code{~ z}. The observations in the model are ordered by the size of \code{z}. If set to \code{NULL} (the default) the observations are assumed to be ordered (e.g., a time series).} \item{type}{the type of test statistic to be returned. Either \code{"Chisq"} for the Chi-squared test statistic or \code{"F"} for the F test statistic.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{bgtest} is called from.} \item{fill}{starting values for the lagged residuals in the auxiliary regression. By default \code{0} but can also be set to \code{NA}.} } \details{ Under \eqn{H_0} the test statistic is asymptotically Chi-squared with degrees of freedom as given in \code{parameter}. If \code{type} is set to \code{"F"} the function returns a finite sample version of the test statistic, employing an \eqn{F} distribution with degrees of freedom as given in \code{parameter}. By default, the starting values for the lagged residuals in the auxiliary regression are chosen to be 0 (as in Godfrey 1978) but could also be set to \code{NA} to omit them. \code{bgtest} also returns the coefficients and estimated covariance matrix from the auxiliary regression that includes the lagged residuals. Hence, \code{\link{coeftest}} can be used to inspect the results. (Note, however, that standard theory does not always apply to the standard errors and t-statistics in this regression.) } \value{ A list with class \code{"bgtest"} inheriting from \code{"htest"} containing the following components: \item{statistic}{the value of the test statistic.} \item{p.value}{the p-value of the test.} \item{parameter}{degrees of freedom.} \item{method}{a character string indicating what type of test was performed.} \item{data.name}{a character string giving the name(s) of the data.} \item{coefficients}{coefficient estimates from the auxiliary regression.} \item{vcov}{corresponding covariance matrix estimate.} } \references{ Breusch, T.S. (1978): Testing for Autocorrelation in Dynamic Linear Models, \emph{Australian Economic Papers}, 17, 334-355. Godfrey, L.G. (1978): Testing Against General Autoregressive and Moving Average Error Models when the Regressors Include Lagged Dependent Variables', \emph{Econometrica}, 46, 1293-1301. Wooldridge, J.M. (2013): \emph{Introductory Econometrics: A Modern Approach}, 5th edition, South-Western College. } \author{David Mitchell , Achim Zeileis} \seealso{\code{\link{dwtest}}} \examples{ ## Generate a stationary and an AR(1) series x <- rep(c(1, -1), 50) y1 <- 1 + x + rnorm(100) ## Perform Breusch-Godfrey test for first-order serial correlation: bgtest(y1 ~ x) ## or for fourth-order serial correlation bgtest(y1 ~ x, order = 4) ## Compare with Durbin-Watson test results: dwtest(y1 ~ x) y2 <- filter(y1, 0.5, method = "recursive") bgtest(y2 ~ x) bg4 <- bgtest(y2 ~ x, order = 4) bg4 coeftest(bg4) } \keyword{htest} lmtest/man/coeftest.Rd0000644000176200001440000001413513723001755014447 0ustar liggesusers\name{coeftest} \alias{coeftest} \alias{coefci} \alias{coeftest.default} \alias{coeftest.survreg} \alias{coeftest.glm} \alias{coeftest.mlm} \alias{coeftest.breakpointsfull} \alias{print.coeftest} \alias{confint.coeftest} \alias{coef.coeftest} \alias{df.residual.coeftest} \alias{nobs.coeftest} \alias{logLik.coeftest} \alias{coefci.default} \alias{coefci.survreg} \alias{coefci.glm} \alias{coefci.mlm} \title{Inference for Estimated Coefficients} \description{ \code{coeftest} is a generic function for performing z and (quasi-)t Wald tests of estimated coefficients. \code{coefci} computes the corresponding Wald confidence intervals. } \usage{ coeftest(x, vcov. = NULL, df = NULL, \dots) \method{coeftest}{default}(x, vcov. = NULL, df = NULL, \dots, save = FALSE) coefci(x, parm = NULL, level = 0.95, vcov. = NULL, df = NULL, \dots) } \arguments{ \item{x}{an object (for details see below).} \item{vcov.}{a specification of the covariance matrix of the estimated coefficients. This can be specified as a matrix or as a function yielding a matrix when applied to \code{x}.} \item{df}{the degrees of freedom to be used. If this is a finite positive number a t test with \code{df} degrees of freedom is performed. In all other cases, a z test (using a normal approximation) is performed. By default it tries to use \code{x$df.residual} and performs a z test if this is \code{NULL}.} \item{\dots}{further arguments passed to the methods and to \code{vcov.} in the default method.} \item{save}{logical. Should the object \code{x} itself be saved as an attribute? (This may be useful for further processing of \code{coeftest} objects, e.g., as part of model summaries.)} \item{parm}{a specification of which parameters are to be given confidence intervals, either a vector of numbers or a vector of names. If missing, all parameters are considered.} \item{level}{the confidence level required.} } \details{ The generic function \code{coeftest} currently has a default method (which works in particular for \code{"lm"} objects) and dedicated methods for objects of class \code{"glm"} (as computed by \code{\link[stats]{glm}}), \code{"mlm"} (as computed by \code{\link[stats]{lm}} with multivariate responses), \code{"survreg"} (as computed by \code{\link[survival]{survreg}}), and \code{"breakpointsfull"} (as computed by \code{\link[strucchange]{breakpoints.formula}}). The default method assumes that a \code{coef} methods exists, such that \code{coef(x)} yields the estimated coefficients. To specify the corresponding covariance matrix \code{vcov.} to be used, there are three possibilities: 1. It is pre-computed and supplied in argument \code{vcov.}. 2. A function for extracting the covariance matrix from \code{x} is supplied, e.g., \code{\link[sandwich]{sandwich}}, \code{\link[sandwich]{vcovHC}}, \code{\link[sandwich]{vcovCL}}, or \code{\link[sandwich]{vcovHAC}} from package \pkg{sandwich}. 3. \code{vcov.} is set to \code{NULL}, then it is assumed that a \code{vcov} method exists, such that \code{vcov(x)} yields a covariance matrix. Illustrations are provided in the examples below. The degrees of freedom \code{df} determine whether a normal approximation is used or a t distribution with \code{df} degrees of freedom. The default method computes \code{df.residual(x)} and if this is \code{NULL}, \code{0}, or \code{Inf} a z test is performed. The method for \code{"glm"} objects always uses \code{df = Inf} (i.e., a z test). The corresponding Wald confidence intervals can be computed either by applying \code{coefci} to the original model or \code{\link[stats]{confint}} to the output of \code{coeftest}. See below for examples. Finally, \code{\link[stats]{nobs}} and \code{\link[stats]{logLik}} methods are provided which work, provided that there are such methods for the original object \code{x}. In that case, \code{"nobs"} and \code{"logLik"} attributes are stored in the \code{coeftest} output so that they can be still queried subsequently. If both methods are available, \code{\link[stats]{AIC}} and \code{\link[stats]{BIC}} can also be applied. } \value{ \code{coeftest} returns an object of class \code{"coeftest"} which is essentially a coefficient matrix with columns containing the estimates, associated standard errors, test statistics and p values. Attributes for a \code{"method"} label, and the \code{"df"} are added along with \code{"nobs"} and \code{"logLik"} (provided that suitable extractor methods \code{\link[stats]{nobs}} and \code{\link[stats]{logLik}} are available). Optionally, the full object \code{x} can be \code{save}d in an attribute \code{"object"} to facilitate further model summaries based on the \code{coeftest} result. \code{coefci} returns a matrix (or vector) with columns giving lower and upper confidence limits for each parameter. These will be labeled as (1-level)/2 and 1 - (1-level)/2 in percent. } \seealso{\code{\link{lm}}, \code{\link{waldtest}}} \examples{ ## load data and fit model data("Mandible", package = "lmtest") fm <- lm(length ~ age, data = Mandible, subset=(age <= 28)) ## the following commands lead to the same tests: summary(fm) (ct <- coeftest(fm)) ## a z test (instead of a t test) can be performed by coeftest(fm, df = Inf) ## corresponding confidence intervals confint(ct) coefci(fm) ## which in this simple case is equivalent to confint(fm) ## extract further model information either from ## the original model or from the coeftest output nobs(fm) nobs(ct) logLik(fm) logLik(ct) AIC(fm, ct) BIC(fm, ct) if(require("sandwich")) { ## a different covariance matrix can be also used: (ct <- coeftest(fm, df = Inf, vcov = vcovHC)) ## the corresponding confidence interval can be computed either as confint(ct) ## or based on the original model coefci(fm, df = Inf, vcov = vcovHC) ## note that the degrees of freedom _actually used_ can be extracted df.residual(ct) ## which differ here from df.residual(fm) ## vcov can also be supplied as a function with additional arguments coeftest(fm, df = Inf, vcov = vcovHC, type = "HC0") ## or as a matrix coeftest(fm, df = Inf, vcov = vcovHC(fm, type = "HC0")) } } \keyword{htest} lmtest/man/dwtest.Rd0000644000176200001440000001034513715303430014140 0ustar liggesusers\name{dwtest} \alias{dwtest} \encoding{latin1} \title{Durbin-Watson Test} \description{ Performs the Durbin-Watson test for autocorrelation of disturbances. } \usage{ dwtest(formula, order.by = NULL, alternative = c("greater", "two.sided", "less"), iterations = 15, exact = NULL, tol = 1e-10, data = list()) } \arguments{ \item{formula}{a symbolic description for the model to be tested (or a fitted \code{"lm"} object).} \item{order.by}{Either a vector \code{z} or a formula with a single explanatory variable like \code{~ z}. The observations in the model are ordered by the size of \code{z}. If set to \code{NULL} (the default) the observations are assumed to be ordered (e.g., a time series).} \item{alternative}{a character string specifying the alternative hypothesis.} \item{iterations}{an integer specifying the number of iterations when calculating the p-value with the "pan" algorithm.} \item{exact}{logical. If set to \code{FALSE} a normal approximation will be used to compute the p value, if \code{TRUE} the "pan" algorithm is used. The default is to use "pan" if the sample size is < 100.} \item{tol}{tolerance. Eigenvalues computed have to be greater than \code{tol} to be treated as non-zero.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{dwtest} is called from.} } \details{The Durbin-Watson test has the null hypothesis that the autocorrelation of the disturbances is 0. It is possible to test against the alternative that it is greater than, not equal to, or less than 0, respectively. This can be specified by the \code{alternative} argument. Under the assumption of normally distributed disturbances, the null distribution of the Durbin-Watson statistic is the distribution of a linear combination of chi-squared variables. The p-value is computed using the Fortran version of Applied Statistics Algorithm AS 153 by Farebrother (1980, 1984). This algorithm is called "pan" or "gradsol". For large sample sizes the algorithm might fail to compute the p value; in that case a warning is printed and an approximate p value will be given; this p value is computed using a normal approximation with mean and variance of the Durbin-Watson test statistic. Examples can not only be found on this page, but also on the help pages of the data sets \code{\link{bondyield}}, \code{\link{currencysubstitution}}, \code{\link{growthofmoney}}, \code{\link{moneydemand}}, \code{\link{unemployment}}, \code{\link{wages}}. } \value{An object of class \code{"htest"} containing: \item{statistic}{the test statistic.} \item{method}{a character string with the method used.} \item{alternative}{a character string describing the alternative hypothesis.} \item{p.value}{the corresponding p-value.} \item{data.name}{a character string with the data name.} } \references{ J. Durbin & G.S. Watson (1950), Testing for Serial Correlation in Least Squares Regression I. \emph{Biometrika} \bold{37}, 409--428. J. Durbin & G.S. Watson (1951), Testing for Serial Correlation in Least Squares Regression II. \emph{Biometrika} \bold{38}, 159--177. J. Durbin & G.S. Watson (1971), Testing for Serial Correlation in Least Squares Regression III. \emph{Biometrika} \bold{58}, 1--19. R.W. Farebrother (1980), Pan's Procedure for the Tail Probabilities of the Durbin-Watson Statistic (Corr: 81V30 p189; AS R52: 84V33 p363- 366; AS R53: 84V33 p366- 369). \emph{Applied Statistics} \bold{29}, 224--227. R. W. Farebrother (1984), [AS R53] A Remark on Algorithms AS 106 (77V26 p92-98), AS 153 (80V29 p224-227) and AS 155: The Distribution of a Linear Combination of \eqn{\chi^2} Random Variables (80V29 p323-333) \emph{Applied Statistics} \bold{33}, 366--369. W. Krmer & H. Sonnberger (1986), \emph{The Linear Regression Model under Test}. Heidelberg: Physica. } \seealso{\code{\link{lm}}} \examples{ ## generate two AR(1) error terms with parameter ## rho = 0 (white noise) and rho = 0.9 respectively err1 <- rnorm(100) ## generate regressor and dependent variable x <- rep(c(-1,1), 50) y1 <- 1 + x + err1 ## perform Durbin-Watson test dwtest(y1 ~ x) err2 <- filter(err1, 0.9, method="recursive") y2 <- 1 + x + err2 dwtest(y2 ~ x) } \keyword{htest} lmtest/man/hmctest.Rd0000644000176200001440000000605613715303430014301 0ustar liggesusers\name{hmctest} \alias{hmctest} \encoding{latin1} \title{Harrison-McCabe test} \description{Harrison-McCabe test for heteroskedasticity.} \usage{ hmctest(formula, point = 0.5, order.by = NULL, simulate.p = TRUE, nsim = 1000, plot = FALSE, data = list()) } \arguments{ \item{formula}{a symbolic description for the model to be tested (or a fitted \code{"lm"} object).} \item{point}{numeric. If \code{point} is smaller than 1 it is interpreted as percentages of data, i.e. \code{n*point} is taken to be the (potential) breakpoint in the variances, if \code{n} is the number of observations in the model. If \code{point} is greater than 1 it is interpreted to be the index of the breakpoint.} \item{order.by}{Either a vector \code{z} or a formula with a single explanatory variable like \code{~ z}. The observations in the model are ordered by the size of \code{z}. If set to \code{NULL} (the default) the observations are assumed to be ordered (e.g., a time series).} \item{simulate.p}{logical. If \code{TRUE} a p value will be assessed by simulation, otherwise the p value is \code{NA}.} \item{nsim}{integer. Determines how many runs are used to simulate the p value.} \item{plot}{logical. If \code{TRUE} the test statistic for all possible breakpoints is plotted.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{hmctest} is called from.} } \details{The Harrison-McCabe test statistic is the fraction of the residual sum of squares that relates to the fraction of the data before the breakpoint. Under \eqn{H_0} the test statistic should be close to the size of this fraction, e.g. in the default case close to 0.5. The null hypothesis is reject if the statistic is too small. Examples can not only be found on this page, but also on the help pages of the data sets \code{\link{bondyield}}, \code{\link{currencysubstitution}}, \code{\link{growthofmoney}}, \code{\link{moneydemand}}, \code{\link{unemployment}}, \code{\link{wages}}.} \value{ A list with class \code{"htest"} containing the following components: \item{statistic}{the value of the test statistic.} \item{p.value}{the simulated p-value of the test.} \item{method}{a character string indicating what type of test was performed.} \item{data.name}{a character string giving the name(s) of the data.} } \references{ M.J. Harrison & B.P.M McCabe (1979), A Test for Heteroscedasticity based on Ordinary Least Squares Residuals. \emph{Journal of the American Statistical Association} \bold{74}, 494--499 W. Krmer & H. Sonnberger (1986), \emph{The Linear Regression Model under Test}. Heidelberg: Physica } \seealso{\code{\link{lm}}} \examples{ ## generate a regressor x <- rep(c(-1,1), 50) ## generate heteroskedastic and homoskedastic disturbances err1 <- c(rnorm(50, sd=1), rnorm(50, sd=2)) err2 <- rnorm(100) ## generate a linear relationship y1 <- 1 + x + err1 y2 <- 1 + x + err2 ## perform Harrison-McCabe test hmctest(y1 ~ x) hmctest(y2 ~ x) } \keyword{htest} lmtest/man/wages.Rd0000644000176200001440000000376212220001273013727 0ustar liggesusers\name{wages} \alias{wages} \docType{data} \encoding{latin1} \title{Wages} \usage{data(wages)} \description{ Wages Data. } \format{ A multivariate yearly time series from 1960 to 1979 with variables \describe{ \item{w}{wages,} \item{CPI}{consumer price index,} \item{u}{unemployment,} \item{mw}{minimum wage.} } } \source{The data was originally studied by Nicols (1983), the data set is given in Krmer and Sonnberger (1986). Below we replicate a few examples from their book. Some of these results differ more or less seriously and are sometimes parameterized differently. } \references{ D.A. Nicols (1983), Macroeconomic Determinants of Wage Adjustments in White Collar Occupations. \emph{Review of Economics and Statistics} \bold{65}, 203--213 W. Krmer & H. Sonnberger (1986), \emph{The Linear Regression Model under Test}. Heidelberg: Physica } \examples{ data(wages) ## data transformation to include lagged series mywages <- cbind(wages, lag(wages[,2], k = -1), lag(wages[,2], k = -2)) colnames(mywages) <- c(colnames(wages), "CPI2", "CPI3") mywages <- window(mywages, start=1962, end=1979) ## page 142, fit Nichols OLS model ## equation (6.10) modelNichols <- w ~ CPI + CPI2 + CPI3 + u + mw lm(modelNichols, data = mywages) ## page 143, fit test statistics in table 6.11 ############################################## if(require(strucchange, quietly = TRUE)) { ## Chow 1972 sctest(modelNichols, point=c(1971,1), data=mywages, type="Chow") } ## Breusch-Pagan bptest(modelNichols, data=mywages, studentize=FALSE) bptest(modelNichols, data=mywages) ## RESET (a)-(b) reset(modelNichols, data=mywages) reset(modelNichols, power=2, type="regressor", data=mywages) ## Harvey-Collier harvtest(modelNichols, order.by = ~ CPI, data=mywages) harvtest(modelNichols, order.by = ~ CPI2, data=mywages) harvtest(modelNichols, order.by = ~ CPI3, data=mywages) harvtest(modelNichols, order.by = ~ u, data=mywages) ## Rainbow raintest(modelNichols, order.by = "mahalanobis", data=mywages) } \keyword{datasets} lmtest/man/jtest.Rd0000644000176200001440000000542412220001273013747 0ustar liggesusers\name{jtest} \alias{jtest} \title{J Test for Comparing Non-Nested Models} \description{ \code{jtest} performs the Davidson-MacKinnon J test for comparing non-nested models. } \usage{ jtest(formula1, formula2, data = list(), vcov. = NULL, \dots) } \arguments{ \item{formula1}{either a symbolic description for the first model to be tested, or a fitted object of class \code{"lm"}.} \item{formula2}{either a symbolic description for the second model to be tested, or a fitted object of class \code{"lm"}.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{jtest} is called from.} \item{vcov.}{a function for estimating the covariance matrix of the regression coefficients, e.g., \code{\link[sandwich]{vcovHC}}.} \item{\dots}{further arguments passed to \code{\link{coeftest}}.} } \details{ The idea of the J test is the following: if the first model contains the correct set of regressors, then including the fitted values of the second model into the set of regressors should provide no significant improvement. But if it does, it can be concluded that model 1 does not contain the correct set of regressors. Hence, to compare both models the fitted values of model 1 are included into model 2 and vice versa. The J test statistic is simply the marginal test of the fitted values in the augmented model. This is performed by \code{\link{coeftest}}. For further details, see the references. } \value{ An object of class \code{"anova"} which contains the coefficient estimate of the fitted values in the augmented regression plus corresponding standard error, test statistic and p value. } \references{ R. Davidson & J. MacKinnon (1981). Several Tests for Model Specification in the Presence of Alternative Hypotheses. \emph{Econometrica}, \bold{49}, 781-793. W. H. Greene (1993), \emph{Econometric Analysis}, 2nd ed. Macmillan Publishing Company, New York. W. H. Greene (2003). \emph{Econometric Analysis}, 5th ed. New Jersey, Prentice Hall. } \seealso{\code{\link{coxtest}}, \code{\link{encomptest}}} \examples{ ## Fit two competing, non-nested models for aggregate ## consumption, as in Greene (1993), Examples 7.11 and 7.12 ## load data and compute lags data(USDistLag) usdl <- na.contiguous(cbind(USDistLag, lag(USDistLag, k = -1))) colnames(usdl) <- c("con", "gnp", "con1", "gnp1") ## C(t) = a0 + a1*Y(t) + a2*C(t-1) + u fm1 <- lm(con ~ gnp + con1, data = usdl) ## C(t) = b0 + b1*Y(t) + b2*Y(t-1) + v fm2 <- lm(con ~ gnp + gnp1, data = usdl) ## Cox test in both directions: coxtest(fm1, fm2) ## ...and do the same for jtest() and encomptest(). ## Notice that in this particular case they are coincident. jtest(fm1, fm2) encomptest(fm1, fm2) } \keyword{htest} lmtest/man/bptest.Rd0000644000176200001440000000542714205560747014147 0ustar liggesusers\name{bptest} \alias{bptest} \encoding{latin1} \title{Breusch-Pagan Test} \description{Performs the Breusch-Pagan test against heteroskedasticity.} \usage{ bptest(formula, varformula = NULL, studentize = TRUE, data = list(), weights = NULL) } \arguments{ \item{formula}{a symbolic description for the model to be tested (or a fitted \code{"lm"} object).} \item{varformula}{a formula describing only the potential explanatory variables for the variance (no dependent variable needed). By default the same explanatory variables are taken as in the main regression model.} \item{studentize}{logical. If set to \code{TRUE} Koenker's studentized version of the test statistic will be used.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{bptest} is called from.} \item{weights}{an optional vector of weights to be used in the model.} } \details{The Breusch-Pagan test fits a linear regression model to the residuals of a linear regression model (by default the same explanatory variables are taken as in the main regression model) and rejects if too much of the variance is explained by the additional explanatory variables. Under \eqn{H_0} the test statistic of the Breusch-Pagan test follows a chi-squared distribution with \code{parameter} (the number of regressors without the constant in the model) degrees of freedom. Examples can not only be found on this page, but also on the help pages of the data sets \code{\link{bondyield}}, \code{\link{currencysubstitution}}, \code{\link{growthofmoney}}, \code{\link{moneydemand}}, \code{\link{unemployment}}, \code{\link{wages}}. } \value{ A list with class \code{"htest"} containing the following components: \item{statistic}{the value of the test statistic.} \item{p.value}{the p-value of the test.} \item{parameter}{degrees of freedom.} \item{method}{a character string indicating what type of test was performed.} \item{data.name}{a character string giving the name(s) of the data.} } \references{ T.S. Breusch & A.R. Pagan (1979), A Simple Test for Heteroscedasticity and Random Coefficient Variation. \emph{Econometrica} \bold{47}, 1287--1294 R. Koenker (1981), A Note on Studentizing a Test for Heteroscedasticity. \emph{Journal of Econometrics} \bold{17}, 107--112. W. Krmer & H. Sonnberger (1986), \emph{The Linear Regression Model under Test}. Heidelberg: Physica } \seealso{\code{\link{lm}}, \code{\link[car]{ncvTest}}} \examples{ ## generate a regressor x <- rep(c(-1,1), 50) ## generate heteroskedastic and homoskedastic disturbances err1 <- rnorm(100, sd=rep(c(1,2), 50)) err2 <- rnorm(100) ## generate a linear relationship y1 <- 1 + x + err1 y2 <- 1 + x + err2 ## perform Breusch-Pagan test bptest(y1 ~ x) bptest(y2 ~ x) } \keyword{htest} lmtest/man/coxtest.Rd0000644000176200001440000000476412220001273014315 0ustar liggesusers\name{coxtest} \alias{coxtest} \title{Cox Test for Comparing Non-Nested Models} \description{ \code{coxtest} performs the Cox test for comparing two non-nested models. } \usage{ coxtest(formula1, formula2, data = list()) } \arguments{ \item{formula1}{either a symbolic description for the first model to be tested, or a fitted object of class \code{"lm"}.} \item{formula2}{either a symbolic description for the second model to be tested, or a fitted object of class \code{"lm"}.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{coxtest} is called from.} } \details{ The idea of the Cox test is the following: if the first model contains the correct set of regressors, then a fit of the regressors from the second model to the fitted values from first model should have no further explanatory value. But if it has, it can be concluded that model 1 does not contain the correct set of regressors. Hence, to compare both models the fitted values of model 1 are regressed on model 2 and vice versa. A Cox test statistic is computed for each auxiliary model which is asymptotically standard normally distributed. For further details, see the references. } \value{ An object of class \code{"anova"} which contains the estimate plus corresponding standard error, z test statistic and p value for each auxiliary test. } \references{ R. Davidson & J. MacKinnon (1981). Several Tests for Model Specification in the Presence of Alternative Hypotheses. \emph{Econometrica}, \bold{49}, 781-793. W. H. Greene (1993), \emph{Econometric Analysis}, 2nd ed. Macmillan Publishing Company, New York. W. H. Greene (2003). \emph{Econometric Analysis}, 5th ed. New Jersey, Prentice Hall. } \seealso{\code{\link{jtest}}, \code{\link{encomptest}}} \examples{ ## Fit two competing, non-nested models for aggregate ## consumption, as in Greene (1993), Examples 7.11 and 7.12 ## load data and compute lags data(USDistLag) usdl <- na.contiguous(cbind(USDistLag, lag(USDistLag, k = -1))) colnames(usdl) <- c("con", "gnp", "con1", "gnp1") ## C(t) = a0 + a1*Y(t) + a2*C(t-1) + u fm1 <- lm(con ~ gnp + con1, data = usdl) ## C(t) = b0 + b1*Y(t) + b2*Y(t-1) + v fm2 <- lm(con ~ gnp + gnp1, data = usdl) ## Cox test in both directions: coxtest(fm1, fm2) ## ...and do the same for jtest() and encomptest(). ## Notice that in this particular case they are coincident. jtest(fm1, fm2) encomptest(fm1, fm2) } \keyword{htest} lmtest/man/grangertest.Rd0000644000176200001440000000421212220001273015135 0ustar liggesusers\name{grangertest} \alias{grangertest} \alias{grangertest.default} \alias{grangertest.formula} \title{Test for Granger Causality} \description{ \code{grangertest} is a generic function for performing a test for Granger causality. } \usage{ \method{grangertest}{default}(x, y, order = 1, na.action = na.omit, \dots) \method{grangertest}{formula}(formula, data = list(), \dots) } \arguments{ \item{x}{either a bivariate series (in which case \code{y} has to be missing) or a univariate series of observations.} \item{y}{a univariate series of observations (if \code{x} is univariate, too).} \item{order}{integer specifying th order of lags to include in the auxiliary regression.} \item{na.action}{a function for eliminating \code{NA}s after aligning the series \code{x} and \code{y}.} \item{\dots}{further arguments passed to \code{\link{waldtest}}.} \item{formula}{a formula specification of a bivariate series like \code{y ~ x}.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{grangertest} is called from.} } \details{ Currently, the methods for the generic function \code{grangertest} only perform tests for Granger causality in bivariate series. The test is simply a Wald test comparing the unrestricted model---in which \code{y} is explained by the lags (up to order \code{order}) of \code{y} and \code{x}---and the restricted model---in which \code{y} is only explained by the lags of \code{y}. Both methods are simply convenience interfaces to \code{\link{waldtest}}. } \value{ An object of class \code{"anova"} which contains the residual degrees of freedom, the difference in degrees of freedom, Wald statistic and corresponding p value. } \seealso{\code{\link{waldtest}}, \code{\link{ChickEgg}}} \examples{ ## Which came first: the chicken or the egg? data(ChickEgg) grangertest(egg ~ chicken, order = 3, data = ChickEgg) grangertest(chicken ~ egg, order = 3, data = ChickEgg) ## alternative ways of specifying the same test grangertest(ChickEgg, order = 3) grangertest(ChickEgg[, 1], ChickEgg[, 2], order = 3) } \keyword{htest} lmtest/man/raintest.Rd0000644000176200001440000000620213715303430014454 0ustar liggesusers\name{raintest} \alias{raintest} \encoding{latin1} \title{Rainbow Test} \description{Rainbow test for linearity.} \usage{ raintest(formula, fraction = 0.5, order.by = NULL, center = NULL, data=list()) } \arguments{ \item{formula}{a symbolic description for the model to be tested (or a fitted \code{"lm"} object).} \item{fraction}{numeric. The percentage of observations in the subset is determined by \code{fraction*n} if \code{n} is the number of observations in the model.} \item{order.by}{Either a vector \code{z} or a formula with a single explanatory variable like \code{~ z}. The observations in the model are ordered by the size of \code{z}. If set to \code{NULL} (the default) the observations are assumed to be ordered (e.g., a time series). If set to \code{"mahalanobis"} then the observations are ordered by their Mahalanobis distances from the mean regressor.} \item{center}{numeric. If \code{center} is smaller than 1 it is interpreted as percentages of data, i.e. the subset is chosen that \code{n*fraction} observations are around observation number \code{n*center}. If \code{center} is greater than 1 it is interpreted to be the index of the center of the subset. By default \code{center} is 0.5. If the Mahalanobis distance is chosen \code{center} is taken to be the mean regressor, but can be specified to be a \code{k}-dimensional vector if \code{k} is the number of regressors and should be in the range of the respective regressors.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{raintest} is called from.} } \details{The basic idea of the Rainbow test is that even if the true relationship is non-linear, a good linear fit can be achieved on a subsample in the "middle" of the data. The null hypothesis is rejected whenever the overall fit is significantly worse than the fit for the subsample. The test statistic under \eqn{H_0} follows an F distribution with \code{parameter} degrees of freedom. Examples can not only be found on this page, but also on the help pages of the data sets \code{\link{bondyield}}, \code{\link{currencysubstitution}}, \code{\link{growthofmoney}}, \code{\link{moneydemand}}, \code{\link{unemployment}}, \code{\link{wages}}. } \value{ A list with class \code{"htest"} containing the following components: \item{statistic}{the value of the test statistic.} \item{p.value}{the p-value of the test.} \item{parameter}{degrees of freedom.} \item{method}{a character string indicating what type of test was performed.} \item{data.name}{a character string giving the name(s) of the data.} } \references{ J.M. Utts (1982), The Rainbow Test for Lack of Fit in Regression. \emph{Communications in Statistics -- Theory and Methods} \bold{11}, 2801--2815. W. Krmer & H. Sonnberger (1986), \emph{The Linear Regression Model under Test}. Heidelberg: Physica } \seealso{\code{\link{lm}}} \examples{ x <- c(1:30) y <- x^2 + rnorm(30,0,2) rain <- raintest(y ~ x) rain ## critical value qf(0.95, rain$parameter[1], rain$parameter[2]) } \keyword{htest} lmtest/man/growthofmoney.Rd0000644000176200001440000000420112220001273015515 0ustar liggesusers\name{growthofmoney} \alias{growthofmoney} \docType{data} \encoding{latin1} \title{Growth of Money Supply} \usage{data(growthofmoney)} \description{ Growth of Money Supply Data. } \format{ A multivariate quarterly time series from 1970(2) to 1974(4) with variables \describe{ \item{TG1.TG0}{difference of current and preceding target for the growth rate of the money supply,} \item{AG0.TG0}{difference of actual growth rate and target growth rate for the preceding period.} } } \source{ The data was originally studied by Hetzel (1981), the data set is given in Krmer and Sonnberger (1986). Below we replicate a few examples from their book. Some of these results differ more or less seriously and are sometimes parameterized differently. } \references{ R.L. Hetzel (1981), The Federal Reserve System and Control of the Money Supply in the 1970's. \emph{Journal of Money, Credit and Banking} \bold{13}, 31--43 W. Krmer & H. Sonnberger (1986), \emph{The Linear Regression Model under Test}. Heidelberg: Physica } \examples{ data(growthofmoney) ## page 137, fit Hetzel OLS model ## first/second line in Table 6.7 modelHetzel <- TG1.TG0 ~ AG0.TG0 lm(modelHetzel, data=growthofmoney) dwtest(modelHetzel, data=growthofmoney) ## page 135, fit test statistics in Table 6.8 ############################################# if(require(strucchange, quietly = TRUE)) { ## Chow 1974(1) sctest(modelHetzel, point=c(1973,4), data=growthofmoney, type="Chow") } ## RESET reset(modelHetzel, data=growthofmoney) reset(modelHetzel, power=2, type="regressor", data=growthofmoney) reset(modelHetzel, type="princomp", data=growthofmoney) ## Harvey-Collier harvtest(modelHetzel, order.by= ~ AG0.TG0, data=growthofmoney) ## Rainbow raintest(modelHetzel, order.by = "mahalanobis", data=growthofmoney) ## Identification of outliers ############################# ## Figure 6.1 plot(modelHetzel, data=growthofmoney) abline(v=0) abline(h=0) abline(coef(lm(modelHetzel, data=growthofmoney)), col=2) ## Table 6.7, last line growthofmoney2 <- as.data.frame(growthofmoney[-c(5:6),]) lm(modelHetzel, data=growthofmoney2) dwtest(modelHetzel, data=growthofmoney2) } \keyword{datasets} lmtest/man/waldtest.Rd0000644000176200001440000001433013715303430014453 0ustar liggesusers\name{waldtest} \alias{waldtest} \alias{waldtest.formula} \alias{waldtest.default} \alias{waldtest.lm} \title{Wald Test of Nested Models} \description{ \code{waldtest} is a generic function for carrying out Wald tests. The default method can be employed for comparing nested (generalized) linear models (see details below). } \usage{ waldtest(object, \dots) \method{waldtest}{default}(object, \dots, vcov = NULL, test = c("Chisq", "F"), name = NULL) \method{waldtest}{formula}(object, \dots, data = list()) \method{waldtest}{lm}(object, \dots, test = c("F", "Chisq")) } \arguments{ \item{object}{an object. See below for details.} \item{\dots}{further object specifications passed to methods. See below for details.} \item{vcov}{a function for estimating the covariance matrix of the regression coefficients, e.g., \code{\link[sandwich]{vcovHC}}. If only two models are compared it can also be the covariance matrix of the more general model.} \item{test}{character specifying whether to compute the large sample Chi-squared statistic (with asymptotic Chi-squared distribution) or the finite sample F statistic (with approximate F distribution).} \item{name}{a function for extracting a suitable name/description from a fitted model object. By default the name is queried by calling \code{\link{formula}}.} \item{data}{a data frame containing the variables in the model.} } \details{ \code{waldtest} is intended to be a generic function for comparisons of models via Wald tests. The default method consecutively compares the fitted model object \code{object} with the models passed in \code{\dots}. Instead of passing the fitted model objects in \code{\dots}, several other specifications are possible. For all objects in \code{list(object, \dots)} the function tries to consecutively compute fitted models using the following updating algorithm: \enumerate{ \item For each two consecutive objects, \code{object1} and \code{object2} say, try to turn \code{object2} into a fitted model that can be compared to (the already fitted model object) \code{object1}. \item If \code{object2} is numeric, the corresponding element of \code{attr(terms(object1), "term.labels")} is selected to be omitted. \item If \code{object2} is a character, the corresponding terms are included into an update formula like \code{. ~ . - term2a - term2b}. \item If \code{object2} is a formula, then compute the fitted model via \code{update(object1, object2)}. } Consequently, the models in \code{\dots} can be specified as integers, characters (both for terms that should be eliminated from the previous model), update formulas or fitted model objects. Except for the last case, the existence of an \code{\link{update}} method is assumed. See also the examples for an illustration. Subsequently, a Wald test for each two consecutive models is carried out. This is similar to \code{\link{anova}} (which typically performs likelihood-ratio tests), but with a few differences. If only one fitted model object is specified, it is compared to the trivial model (with only an intercept). The test can be either the finite sample F statistic or the asymptotic Chi-squared statistic (\eqn{F = Chisq/k} if \eqn{k} is the difference in degrees of freedom). The covariance matrix is always estimated on the more general of two subsequent models (and not only in the most general model overall). If \code{vcov} is specified, HC and HAC estimators can also be plugged into \code{waldtest}. The default method is already very general and applicable to a broad range of fitted model objects, including \code{\link{lm}} and \code{\link{glm}} objects. It can be easily made applicable to other model classes as well by providing suitable methods to the standard generics \code{\link{terms}} (for determining the variables in the model along with their names), \code{\link{update}} (unless only fitted model objects are passed to \code{waldtest}, as mentioned above), \code{\link{nobs}} (or \code{\link{residuals}}, used for determining the number of observations), \code{\link{df.residual}} (needed only for the F statistic), \code{\link{coef}} (for extracting the coefficients; needs to be named matching the names in \code{terms}), \code{\link{vcov}} (can be user-supplied; needs to be named matching the names in \code{terms}). Furthermore, some means of determining a suitable \code{name} for a fitted model object can be specified (by default this is taken to be the result of a call to \code{\link{formula}}, if available). The \code{"formula"} method fits a \code{\link{lm}} first and then calls the \code{"lm"} method. The \code{"lm"} method just calls the default method, but sets the default test to be the F test. } \value{ An object of class \code{"anova"} which contains the residual degrees of freedom, the difference in degrees of freedom, Wald statistic (either \code{"Chisq"} or \code{"F"}) and corresponding p value. } \seealso{\code{\link{coeftest}}, \code{\link[stats]{anova}}, \code{\link[car]{linearHypothesis}}} \examples{ ## fit two competing, non-nested models and their encompassing ## model for aggregate consumption, as in Greene (1993), ## Examples 7.11 and 7.12 ## load data and compute lags data(USDistLag) usdl <- na.contiguous(cbind(USDistLag, lag(USDistLag, k = -1))) colnames(usdl) <- c("con", "gnp", "con1", "gnp1") ## C(t) = a0 + a1*Y(t) + a2*C(t-1) + u fm1 <- lm(con ~ gnp + con1, data = usdl) ## C(t) = b0 + b1*Y(t) + b2*Y(t-1) + v fm2 <- lm(con ~ gnp + gnp1, data = usdl) ## Encompassing model fm3 <- lm(con ~ gnp + con1 + gnp1, data = usdl) ## a simple ANOVA for fm3 vs. fm2 waldtest(fm3, fm2) anova(fm3, fm2) ## as df = 1, the test is equivalent to the corresponding t test in coeftest(fm3) ## various equivalent specifications of the two models waldtest(fm3, fm2) waldtest(fm3, 2) waldtest(fm3, "con1") waldtest(fm3, . ~ . - con1) ## comparing more than one model ## (equivalent to the encompassing test) waldtest(fm1, fm3, fm2) encomptest(fm1, fm2) ## using the asymptotic Chisq statistic waldtest(fm3, fm2, test = "Chisq") ## plugging in a HC estimator if(require(sandwich)) waldtest(fm3, fm2, vcov = vcovHC) } \keyword{htest} lmtest/man/jocci.Rd0000644000176200001440000000426313715303430013717 0ustar liggesusers\name{jocci} \alias{fyff} \alias{gmdc} \alias{ip} \alias{jocci} \alias{lhur} \alias{pw561} \title{U.S. Macroeconomic Time Series} \usage{ data(fyff) data(gmdc) data(ip) data(jocci) data(lhur) data(pw561) } \description{ Several macroeconomic time series from the U.S. } \format{ All data sets are multivariate monthly time series from 1959(8) to 1993(12) (except 1993(10) for \code{jocci}) with variables \describe{ \item{y}{original time series,} \item{dy}{transformed times series (first differences or log first differences),} \item{dy1}{transformed series at lag 1,} \item{dy2}{transformed series at lag 2,} \item{dy3}{transformed series at lag 3,} \item{dy4}{transformed series at lag 4,} \item{dy5}{transformed series at lag 5,} \item{dy6}{transformed series at lag 6.} } } \details{ The description from Stock & Watson (1996) for the time series (with the transformation used): \describe{ \item{fyff}{interest rate (first differences),} \item{gmdc}{pce, implicit price deflator: pce (1987 = 100) (log first differences),} \item{ip}{index of industrial production (log first differences),} \item{jocci}{department of commerce commodity price index (log first differences),} \item{lhur}{unemployment rate: all workers, 16 years & over (\%, sa) (first differences),} \item{pw561}{producer price index: crude petroleum (82 = 100, nsa) (log first differences).} } Stock & Watson (1996) fitted an AR(6) model to all transformed time series. } \source{ Stock & Watson (1996) study the stability of 76 macroeconomic time series, which can be obtained from Mark W. Watson's homepage at \url{http://www.princeton.edu/~mwatson/ddisk/bivtvp.zip}. } \references{ J.H. Stock & M.W. Watson (1996), Evidence on Structural Instability in Macroeconomic Time Series Relations. \emph{Journal of Business & Economic Statistics} \bold{14}, 11--30. } \examples{ data(jocci) dwtest(dy ~ 1, data = jocci) bgtest(dy ~ 1, data = jocci) ar6.model <- dy ~ dy1 + dy2 + dy3 + dy4 + dy5 +dy6 bgtest(ar6.model, data = jocci) var.model <- ~ I(dy1^2) + I(dy2^2) + I(dy3^2) + I(dy4^2) + I(dy5^2) + I(dy6^2) bptest(ar6.model, var.model, data = jocci) } \keyword{datasets} lmtest/man/valueofstocks.Rd0000644000176200001440000000175112220001273015505 0ustar liggesusers\name{valueofstocks} \alias{valueofstocks} \docType{data} \encoding{latin1} \title{ Value of Stocks } \usage{data(valueofstocks)} \description{ Value of Stocks Data } \format{ A multivariate quarterly time series from 1960(1) to 1977(3) with variables \describe{ \item{VST}{value of stocks,} \item{MB}{monetary base,} \item{RTPD}{dollar rent on producer durables,} \item{RTPS}{dollar rent on producer structures,} \item{XBC}{production capacity for business output.} } } \source{The data was originally studied by Woglom (1981), the data set is given in Krmer and Sonnberger (1986). } \references{ G. Woglom (1981), A Reexamination of the Role of Stocks in the Consumption Function and the Transmission Mechanism. \emph{Journal of Money, Credit and Banking} \bold{13}, 215--220 W. Krmer & H. Sonnberger (1986), \emph{The Linear Regression Model Under Test}. Heidelberg: Physica } \examples{ data(valueofstocks) lm(log(VST) ~., data=valueofstocks) } \keyword{datasets} lmtest/man/lrtest.Rd0000644000176200001440000000510513715303430014141 0ustar liggesusers\name{lrtest} \alias{lrtest} \alias{lrtest.formula} \alias{lrtest.default} \title{Likelihood Ratio Test of Nested Models} \description{ \code{lrtest} is a generic function for carrying out likelihood ratio tests. The default method can be employed for comparing nested (generalized) linear models (see details below). } \usage{ lrtest(object, \dots) \method{lrtest}{default}(object, \dots, name = NULL) \method{lrtest}{formula}(object, \dots, data = list()) } \arguments{ \item{object}{an object. See below for details.} \item{\dots}{further object specifications passed to methods. See below for details.} \item{name}{a function for extracting a suitable name/description from a fitted model object. By default the name is queried by calling \code{\link{formula}}.} \item{data}{a data frame containing the variables in the model.} } \details{ \code{lrtest} is intended to be a generic function for comparisons of models via asymptotic likelihood ratio tests. The default method consecutively compares the fitted model object \code{object} with the models passed in \code{\dots}. Instead of passing the fitted model objects in \code{\dots}, several other specifications are possible. The updating mechanism is the same as for \code{\link{waldtest}}: the models in \code{\dots} can be specified as integers, characters (both for terms that should be eliminated from the previous model), update formulas or fitted model objects. Except for the last case, the existence of an \code{\link{update}} method is assumed. See \code{\link{waldtest}} for details. Subsequently, an asymptotic likelihood ratio test for each two consecutive models is carried out: Twice the difference in log-likelihoods (as derived by the \code{\link{logLik}} methods) is compared with a Chi-squared distribution. The \code{"formula"} method fits a \code{\link{lm}} first and then calls the default method. } \value{ An object of class \code{"anova"} which contains the log-likelihood, degrees of freedom, the difference in degrees of freedom, likelihood ratio Chi-squared statistic and corresponding p value. } \seealso{\code{\link{waldtest}}} \examples{ ## with data from Greene (1993): ## load data and compute lags data("USDistLag") usdl <- na.contiguous(cbind(USDistLag, lag(USDistLag, k = -1))) colnames(usdl) <- c("con", "gnp", "con1", "gnp1") fm1 <- lm(con ~ gnp + gnp1, data = usdl) fm2 <- lm(con ~ gnp + con1 + gnp1, data = usdl) ## various equivalent specifications of the LR test lrtest(fm2, fm1) lrtest(fm2, 2) lrtest(fm2, "con1") lrtest(fm2, . ~ . - con1) } \keyword{htest} lmtest/man/harvtest.Rd0000644000176200001440000000430012220001273014446 0ustar liggesusers\name{harvtest} \alias{harvtest} \encoding{latin1} \title{Harvey-Collier Test} \description{Harvey-Collier test for linearity.} \usage{ harvtest(formula, order.by = NULL, data = list()) } \arguments{ \item{formula}{a symbolic description for the model to be tested (or a fitted \code{"lm"} object).} \item{order.by}{Either a vector \code{z} or a formula with a single explanatory variable like \code{~ z}. The observations in the model are ordered by the size of \code{z}. If set to \code{NULL} (the default) the observations are assumed to be ordered (e.g., a time series).} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{harvtest} is called from.} } \details{ The Harvey-Collier test performs a t-test (with \code{parameter} degrees of freedom) on the recursive residuals. If the true relationship is not linear but convex or concave the mean of the recursive residuals should differ from 0 significantly. Examples can not only be found on this page, but also on the help pages of the data sets \code{\link{bondyield}}, \code{\link{currencysubstitution}}, \code{\link{growthofmoney}}, \code{\link{moneydemand}}, \code{\link{unemployment}}, \code{\link{wages}}. } \value{ A list with class \code{"htest"} containing the following components: \item{statistic}{the value of the test statistic.} \item{p.value}{the p-value of the test.} \item{parameter}{degrees of freedom.} \item{method}{a character string indicating what type of test was performed.} \item{data.name}{a character string giving the name(s) of the data.} } \references{ A. Harvey & P. Collier (1977), Testing for Functional Misspecification in Regression Analysis. \emph{Journal of Econometrics} \bold{6}, 103--119 W. Krmer & H. Sonnberger (1986), \emph{The Linear Regression Model under Test}. Heidelberg: Physica } \seealso{\code{\link{lm}}} \examples{ # generate a regressor and dependent variable x <- 1:50 y1 <- 1 + x + rnorm(50) y2 <- y1 + 0.3*x^2 ## perform Harvey-Collier test harv <- harvtest(y1 ~ x) harv ## calculate critical value vor 0.05 level qt(0.95, harv$parameter) harvtest(y2 ~ x) } \keyword{htest} lmtest/man/Mandible.Rd0000644000176200001440000000172013715303430014336 0ustar liggesusers\name{Mandible} \alias{Mandible} \title{Mandible Data} \usage{data(Mandible)} \description{ Mandible Data. } \format{ Data from 167 fetuses, especially: \describe{ \item{age}{gestational age in weeks.} \item{length}{mandible length in mm.} } } \source{ The data was originally published by Chitty et al., 1993, and analyzed in Royston and Altman, 1994 (the data is given there). Only measurements with \code{age <= 28} were used in this analysis. } \references{ L. S. Chitty and S. Campbell and D. G. Altman (1993), Measurement of the fetal mandible -- feasibility and construction of a centile chart., \emph{Prenatal Diagnosis}, \bold{13}, 749--756. P. Royston and D. G. Altman (1994), Regression Using Fractional Polynomials of Continuous Covariates: Parsimonious Parametric Modelling. \emph{Applied Statistics}, \bold{43}, 429--453. } \examples{ data(Mandible) lm(length ~ age, data=Mandible, subset=(age <= 28)) } \keyword{datasets} lmtest/man/resettest.Rd0000644000176200001440000000551513715303430014653 0ustar liggesusers\name{resettest} \alias{resettest} \alias{reset} \encoding{latin1} \title{RESET Test} \description{Ramsey's RESET test for functional form.} \usage{ resettest(formula, power = 2:3, type = c("fitted", "regressor", "princomp"), data = list(), vcov = NULL, \dots) } \arguments{ \item{formula}{a symbolic description for the model to be tested (or a fitted \code{"lm"} object).} \item{power}{integers. A vector of positive integers indicating the powers of the variables that should be included. By default, the test is for quadratic or cubic influence of the fitted response.} \item{type}{a string indicating whether powers of the fitted response, the regressor variables (factors are left out), or the first principal component of the regressor matrix should be included in the extended model.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{resettest} is called from.} \item{vcov, \dots}{optional arguments to be passed to \code{\link{waldtest}} for carrying out the F test.} } \details{ The RESET test is a popular diagnostic for correctness of functional form. The basic assumption is that under the alternative the model can be written in the form \eqn{ y = X\beta + Z\gamma + u}{y=X * beta + Z * gamma}. \code{Z} is generated by taking powers either of the fitted response, the regressor variables, or the first principal component of \code{X}. A standard F-Test is then applied to determine whether these additional variables have significant influence. The test statistic under \eqn{H_0} follows an F distribution with \code{parameter} degrees of freedom. This function was called \code{reset} in previous versions of the package. Please use \code{resettest} instead. Examples can not only be found on this page, but also on the help pages of the data sets \code{\link{bondyield}}, \code{\link{currencysubstitution}}, \code{\link{growthofmoney}}, \code{\link{moneydemand}}, \code{\link{unemployment}}, \code{\link{wages}}. } \value{An object of class \code{"htest"} containing: \item{statistic}{the test statistic.} \item{p.value}{the corresponding p-value.} \item{parameter}{degrees of freedom.} \item{method}{a character string with the method used.} \item{data.name}{a character string with the data name.} } \references{ J.B. Ramsey (1969), Tests for Specification Errors in Classical Linear Least-Squares Regression Analysis. \emph{Journal of the Royal Statistical Society, Series B} \bold{31}, 350--371 W. Krmer & H. Sonnberger (1986), \emph{The Linear Regression Model under Test}. Heidelberg: Physica } \seealso{\code{\link{lm}}} \examples{ x <- c(1:30) y1 <- 1 + x + x^2 + rnorm(30) y2 <- 1 + x + rnorm(30) resettest(y1 ~ x, power=2, type="regressor") resettest(y2 ~ x, power=2, type="regressor") } \keyword{htest} lmtest/man/ftemp.Rd0000644000176200001440000000172013715303430013736 0ustar liggesusers\name{ftemp} \alias{ftemp} \title{Femal Temperature Data} \usage{data(ftemp)} \description{ Daily morning temperature of adult female (in degrees Celsius). } \format{ Univariate daily time series of 60 observations starting from 1990-07-11. } \details{ The data gives the daily morning temperature of an adult woman measured in degrees Celsius at about 6.30am each morning. At the start of the period the woman was sick, hence the high temperature. Then the usual monthly cycle can be seen. On the second cycle, the temperature doesn't complete the downward part of the pattern due to a conception. } \source{ The data set was taken from the Time Series Data Library, maintained by Rob Hyndman. } \examples{ data(ftemp) plot(ftemp) y <- window(ftemp, start = 8, end = 60) if(require(strucchange)) { bp <- breakpoints(y ~ 1) plot(bp) fm.seg <- lm(y ~ 0 + breakfactor(bp)) plot(y) lines(8:60, fitted(fm.seg), col = 4) lines(confint(bp)) } } \keyword{datasets} lmtest/man/petest.Rd0000644000176200001440000000623313715303430014133 0ustar liggesusers\name{petest} \alias{petest} \title{PE Test for Linear vs. Log-Linear Specifications} \description{ \code{petest} performs the MacKinnon-White-Davidson PE test for comparing linear vs. log-linear specifications in linear regressions. } \usage{ petest(formula1, formula2, data = list(), vcov. = NULL, \dots) } \arguments{ \item{formula1}{either a symbolic description for the first model to be tested, or a fitted object of class \code{"lm"}.} \item{formula2}{either a symbolic description for the second model to be tested, or a fitted object of class \code{"lm"}.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{petest} is called from.} \item{vcov.}{a function for estimating the covariance matrix of the regression coefficients, e.g., \code{\link[sandwich]{vcovHC}}.} \item{\dots}{further arguments passed to \code{\link{coeftest}}.} } \details{ The PE test compares two non-nest models where one has a linear specification of type \code{y ~ x1 + x2} and the other has a log-linear specification of type \code{log(y) ~ z1 + z2}. Typically, the regressors in the latter model are logs of the regressors in the former, i.e., \code{z1} is \code{log(x1)} etc. The idea of the PE test is the following: If the linear specification is correct then adding an auxiliary regressor with the difference of the log-fitted values from both models should be non-significant. Conversely, if the log-linear specification is correct then adding an auxiliary regressor with the difference of fitted values in levels should be non-significant. The PE test statistic is simply the marginal test of the auxiliary variable(s) in the augmented model(s). In \code{petest} this is performed by \code{\link{coeftest}}. For further details, see the references. } \value{ An object of class \code{"anova"} which contains the coefficient estimate of the auxiliary variables in the augmented regression plus corresponding standard error, test statistic and p value. } \references{ W.H. Greene (2003). \emph{Econometric Analysis}, 5th edition. Upper Saddle River, NJ: Prentice Hall. J. MacKinnon, H. White, R. Davidson (1983). Tests for Model Specification in the Presence of Alternative Hypotheses: Some Further Results. \emph{Journal of Econometrics}, \bold{21}, 53-70. M. Verbeek (2004). \emph{A Guide to Modern Econometrics}, 2nd ed. Chichester, UK: John Wiley. } \seealso{\code{\link{jtest}}, \code{\link{coxtest}}, \code{\link{encomptest}}} \examples{ if(require("AER")) { ## Verbeek (2004), Section 3 data("HousePrices", package = "AER") ### Verbeek (2004), Table 3.3 hp_lin <- lm(price ~ . , data = HousePrices) summary(hp_lin) ### Verbeek (2004), Table 3.2 hp_log <- update(hp_lin, log(price) ~ . - lotsize + log(lotsize)) summary(hp_log) ## PE test petest(hp_lin, hp_log) ## Greene (2003), Example 9.8 data("USMacroG", package = "AER") ## Greene (2003), Table 9.2 usm_lin <- lm(m1 ~ tbill + gdp, data = USMacroG) usm_log <- lm(log(m1) ~ log(tbill) + log(gdp), data = USMacroG) petest(usm_lin, usm_log) ## matches results from Greene's errata } } \keyword{htest} lmtest/man/USDistLag.Rd0000644000176200001440000000203513715303430014422 0ustar liggesusers\name{USDistLag} \alias{USDistLag} \title{US Macroeconomic Data} \description{ US macroeconomic data for fitting a distributed lag model. } \usage{data(USDistLag)} \format{ An annual time series from 1963 to 1982 with 2 variables. \describe{ \item{consumption}{real consumption,} \item{gnp}{gross national product (deflated by CPI).} } } \source{Table 7.7 in Greene (1993)} \references{ Greene W.H. (1993), \emph{Econometric Analysis}, 2nd edition. Macmillan Publishing Company, New York. Executive Office of the President (1983), \emph{Economic Report of the President}. US Government Printing Office, Washington, DC. } \examples{ ## Willam H. Greene, Econometric Analysis, 2nd Ed. ## Chapter 7 ## load data set, p. 221, Table 7.7 data(USDistLag) ## fit distributed lag model, p.221, Example 7.8 usdl <- na.contiguous(cbind(USDistLag, lag(USDistLag, k = -1))) colnames(usdl) <- c("con", "gnp", "con1", "gnp1") ## C(t) = b0 + b1*Y(t) + b2*C(t-1) + u fm <- lm(con ~ gnp + con1, data = usdl) summary(fm) vcov(fm) } \keyword{datasets} lmtest/man/bondyield.Rd0000644000176200001440000000654013715303430014601 0ustar liggesusers\name{bondyield} \alias{bondyield} \docType{data} \encoding{latin1} \title{ Bond Yield } \usage{data(bondyield)} \description{ Bond Yield Data. } \format{ A multivariate quarterly time series from 1961(1) to 1975(4) with variables \describe{ \item{RAARUS}{difference of interest rate on government and corporate bonds,} \item{MOOD}{measure of consumer sentiment,} \item{EPI}{index of employment pressure,} \item{EXP}{interest rate expectations,} \item{Y}{artificial time series based on RAARUS,} \item{K}{artificial time series based on RAARUS.} } } \source{ The data was originally studied by Cook and Hendershott (1978) and Yawitz and Marshall (1981), the data set is given in Krmer and Sonnberger (1986). Below we replicate a few examples given in their book. Some of these results differ more or less seriously and are sometimes parameterized differently. } \references{ T.Q. Cook & P.H. Hendershott (1978), The Impact of Taxes, Risk and Relative Security Supplies of Interest Rate Differentials. \emph{The Journal of Finance} \bold{33}, 1173--1186 J.B. Yawitz & W. J. Marshall (1981), Measuring the Effect of Callability on Bond Yields. \emph{Journal of Money, Credit and Banking} \bold{13}, 60--71 W. Krmer & H. Sonnberger (1986), \emph{The Linear Regression Model under Test}. Heidelberg: Physica } \examples{ data(bondyield) ## page 134, fit Cook-Hendershott OLS model and Yawitz-Marshall OLS model ## third and last line in Table 6.5 modelCH <- RAARUS ~ MOOD + EPI + EXP + RUS lm(modelCH, data=bondyield) dwtest(modelCH, data=bondyield) ## wrong sign of RUS coefficient modelYM <- RAARUS ~ MOOD + Y + K lm(modelYM, data=bondyield) dwtest(modelYM, data=bondyield) ## coefficient of Y and K differ by factor 100 ## page 135, fit test statistics in Table 6.6 b) ################################################ ## Chow 1971(1) if(require(strucchange, quietly = TRUE)) { sctest(modelCH, point=c(1971,1), data=bondyield, type="Chow") } ## Breusch-Pagan bptest(modelCH, data=bondyield, studentize=FALSE) bptest(modelCH, data=bondyield) ## Fluctuation test if(require(strucchange, quietly = TRUE)) { sctest(modelCH, type="fluctuation", data=bondyield, rescale=FALSE)} ## RESET reset(modelCH, data=bondyield) reset(modelCH, power=2, type="regressor", data=bondyield) reset(modelCH, type="princomp", data=bondyield) ## Harvey-Collier harvtest(modelCH, order.by= ~ MOOD, data=bondyield) harvtest(modelCH, order.by= ~ EPI, data=bondyield) harvtest(modelCH, order.by= ~ EXP, data=bondyield) harvtest(modelCH, order.by= ~ RUS, data=bondyield) ## Rainbow raintest(modelCH, order.by = "mahalanobis", data=bondyield) ## page 136, fit test statistics in Table 6.6 d) ################################################ ## Chow 1966(1) if(require(strucchange, quietly = TRUE)) { sctest(modelYM, point=c(1965,4), data=bondyield, type="Chow") } ## Fluctuation test if(require(strucchange, quietly = TRUE)) { sctest(modelYM, type="fluctuation", data=bondyield, rescale=FALSE) } ## RESET reset(modelYM, data=bondyield) reset(modelYM, power=2, type="regressor", data=bondyield) reset(modelYM, type="princomp", data=bondyield) ## Harvey-Collier harvtest(modelYM, order.by= ~ MOOD, data=bondyield) harvtest(modelYM, order.by= ~ Y, data=bondyield) harvtest(modelYM, order.by= ~ K, data=bondyield) ## Rainbow raintest(modelYM, order.by = "mahalanobis", data=bondyield) } \keyword{datasets} lmtest/man/ChickEgg.Rd0000644000176200001440000000304012220001273014252 0ustar liggesusers\name{ChickEgg} \alias{ChickEgg} \title{Chickens, Eggs, and Causality} \description{ US chicken population and egg production. } \usage{data(ChickEgg)} \format{ An annual time series from 1930 to 1983 with 2 variables. \describe{ \item{chicken}{number of chickens (December 1 population of all US chickens excluding commercial broilers),} \item{egg}{number of eggs (US egg production in millions of dozens).} } } \source{The data set was provided by Walter Thurman and made available for R by Roger Koenker. Unfortunately, the data is slightly different than the data analyzed in Thurman & Fisher (1988).} \references{ Thurman W.N. & Fisher M.E. (1988), Chickens, Eggs, and Causality, or Which Came First?, \emph{American Journal of Agricultural Economics}, 237-238. } \examples{ ## Which came first: the chicken or the egg? data(ChickEgg) ## chickens granger-cause eggs? grangertest(egg ~ chicken, order = 3, data = ChickEgg) ## eggs granger-cause chickens? grangertest(chicken ~ egg, order = 3, data = ChickEgg) ## To perform the same tests `by hand', you can use dynlm() and waldtest(): if(require(dynlm)) { ## chickens granger-cause eggs? em <- dynlm(egg ~ L(egg, 1) + L(egg, 2) + L(egg, 3), data = ChickEgg) em2 <- update(em, . ~ . + L(chicken, 1) + L(chicken, 2) + L(chicken, 3)) waldtest(em, em2) ## eggs granger-cause chickens? cm <- dynlm(chicken ~ L(chicken, 1) + L(chicken, 2) + L(chicken, 3), data = ChickEgg) cm2 <- update(cm, . ~ . + L(egg, 1) + L(egg, 2) + L(egg, 3)) waldtest(cm, cm2) } } \keyword{datasets} lmtest/DESCRIPTION0000644000176200001440000000306514216201562013273 0ustar liggesusersPackage: lmtest Title: Testing Linear Regression Models Version: 0.9-40 Date: 2022-03-21 Authors@R: c(person(given = "Torsten", family = "Hothorn", role = "aut", email = "Torsten.Hothorn@R-project.org", comment = c(ORCID = "0000-0001-8301-0471")), person(given = "Achim", family = "Zeileis", role = c("aut", "cre"), email = "Achim.Zeileis@R-project.org", comment = c(ORCID = "0000-0003-0918-3766")), person(given = c("Richard", "W."), family = "Farebrother", role = "aut", comment = "pan.f"), person(given = "Clint", family = "Cummins", role = "aut", comment = "pan.f"), person(given = "Giovanni", family = "Millo", role = "ctb"), person(given = "David", family = "Mitchell", role = "ctb")) Description: A collection of tests, data sets, and examples for diagnostic checking in linear regression models. Furthermore, some generic tools for inference in parametric models are provided. LazyData: yes Depends: R (>= 3.0.0), stats, zoo Suggests: car, strucchange, sandwich, dynlm, stats4, survival, AER Imports: graphics License: GPL-2 | GPL-3 NeedsCompilation: yes Packaged: 2022-03-21 20:25:17 UTC; zeileis Author: Torsten Hothorn [aut] (), Achim Zeileis [aut, cre] (), Richard W. Farebrother [aut] (pan.f), Clint Cummins [aut] (pan.f), Giovanni Millo [ctb], David Mitchell [ctb] Maintainer: Achim Zeileis Repository: CRAN Date/Publication: 2022-03-21 23:00:02 UTC lmtest/build/0000755000176200001440000000000014216157455012673 5ustar liggesuserslmtest/build/vignette.rds0000644000176200001440000000044514216157455015235 0ustar liggesusers}PN0t g~ ?@^W&VS;7z)v1l=}BbF1 wvG$%kG!Qc?U6Rxnl ! `Gӊ[绀YۼTWe߮ ꁱI_V pQA t|?iWxω*2{xBF G` M5VtM%yFu!yPn8NuԄpizelmtest/tests/0000755000176200001440000000000013715303430012723 5ustar liggesuserslmtest/tests/Examples/0000755000176200001440000000000013715303431014502 5ustar liggesuserslmtest/tests/Examples/lmtest-Ex.Rout.save0000644000176200001440000015021513723002251020173 0ustar liggesusers R version 4.0.2 (2020-06-22) -- "Taking Off Again" Copyright (C) 2020 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. Natural language support but running in an English locale R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > pkgname <- "lmtest" > source(file.path(R.home("share"), "R", "examples-header.R")) > options(warn = 1) > library('lmtest') Loading required package: zoo Attaching package: ‘zoo’ The following objects are masked from ‘package:base’: as.Date, as.Date.numeric > > base::assign(".oldSearch", base::search(), pos = 'CheckExEnv') > base::assign(".old_wd", base::getwd(), pos = 'CheckExEnv') > cleanEx() > nameEx("ChickEgg") > ### * ChickEgg > > flush(stderr()); flush(stdout()) > > ### Name: ChickEgg > ### Title: Chickens, Eggs, and Causality > ### Aliases: ChickEgg > ### Keywords: datasets > > ### ** Examples > > ## Which came first: the chicken or the egg? > data(ChickEgg) > ## chickens granger-cause eggs? > grangertest(egg ~ chicken, order = 3, data = ChickEgg) Granger causality test Model 1: egg ~ Lags(egg, 1:3) + Lags(chicken, 1:3) Model 2: egg ~ Lags(egg, 1:3) Res.Df Df F Pr(>F) 1 44 2 47 -3 0.5916 0.6238 > ## eggs granger-cause chickens? > grangertest(chicken ~ egg, order = 3, data = ChickEgg) Granger causality test Model 1: chicken ~ Lags(chicken, 1:3) + Lags(egg, 1:3) Model 2: chicken ~ Lags(chicken, 1:3) Res.Df Df F Pr(>F) 1 44 2 47 -3 5.405 0.002966 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > ## To perform the same tests `by hand', you can use dynlm() and waldtest(): > if(require(dynlm)) { + ## chickens granger-cause eggs? + em <- dynlm(egg ~ L(egg, 1) + L(egg, 2) + L(egg, 3), data = ChickEgg) + em2 <- update(em, . ~ . + L(chicken, 1) + L(chicken, 2) + L(chicken, 3)) + waldtest(em, em2) + + ## eggs granger-cause chickens? + cm <- dynlm(chicken ~ L(chicken, 1) + L(chicken, 2) + L(chicken, 3), data = ChickEgg) + cm2 <- update(cm, . ~ . + L(egg, 1) + L(egg, 2) + L(egg, 3)) + waldtest(cm, cm2) + } Loading required package: dynlm Wald test Model 1: chicken ~ L(chicken, 1) + L(chicken, 2) + L(chicken, 3) Model 2: chicken ~ L(chicken, 1) + L(chicken, 2) + L(chicken, 3) + L(egg, 1) + L(egg, 2) + L(egg, 3) Res.Df Df F Pr(>F) 1 47 2 44 3 5.405 0.002966 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > > > cleanEx() detaching ‘package:dynlm’ > nameEx("Mandible") > ### * Mandible > > flush(stderr()); flush(stdout()) > > ### Name: Mandible > ### Title: Mandible Data > ### Aliases: Mandible > ### Keywords: datasets > > ### ** Examples > > data(Mandible) > lm(length ~ age, data=Mandible, subset=(age <= 28)) Call: lm(formula = length ~ age, data = Mandible, subset = (age <= 28)) Coefficients: (Intercept) age -11.953 1.773 > > > > > cleanEx() > nameEx("USDistLag") > ### * USDistLag > > flush(stderr()); flush(stdout()) > > ### Name: USDistLag > ### Title: US Macroeconomic Data > ### Aliases: USDistLag > ### Keywords: datasets > > ### ** Examples > > ## Willam H. Greene, Econometric Analysis, 2nd Ed. > ## Chapter 7 > ## load data set, p. 221, Table 7.7 > data(USDistLag) > > ## fit distributed lag model, p.221, Example 7.8 > usdl <- na.contiguous(cbind(USDistLag, lag(USDistLag, k = -1))) > colnames(usdl) <- c("con", "gnp", "con1", "gnp1") > > ## C(t) = b0 + b1*Y(t) + b2*C(t-1) + u > fm <- lm(con ~ gnp + con1, data = usdl) > summary(fm) Call: lm(formula = con ~ gnp + con1, data = usdl) Residuals: Min 1Q Median 3Q Max -11.6127 -3.8405 -0.8536 4.3857 12.6428 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -7.69575 11.44429 -0.672 0.510891 gnp 0.40015 0.06272 6.380 9.12e-06 *** con1 0.38073 0.09479 4.017 0.000996 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 6.837 on 16 degrees of freedom Multiple R-squared: 0.9976, Adjusted R-squared: 0.9973 F-statistic: 3302 on 2 and 16 DF, p-value: < 2.2e-16 > vcov(fm) (Intercept) gnp con1 (Intercept) 130.9718602 -0.438682677 0.549587054 gnp -0.4386827 0.003933498 -0.005896092 con1 0.5495871 -0.005896092 0.008984392 > > > > cleanEx() > nameEx("bgtest") > ### * bgtest > > flush(stderr()); flush(stdout()) > > ### Name: bgtest > ### Title: Breusch-Godfrey Test > ### Aliases: bgtest vcov.bgtest df.residual.bgtest > ### Keywords: htest > > ### ** Examples > > > ## Generate a stationary and an AR(1) series > x <- rep(c(1, -1), 50) > > y1 <- 1 + x + rnorm(100) > > ## Perform Breusch-Godfrey test for first-order serial correlation: > bgtest(y1 ~ x) Breusch-Godfrey test for serial correlation of order up to 1 data: y1 ~ x LM test = 0.0036887, df = 1, p-value = 0.9516 > ## or for fourth-order serial correlation > bgtest(y1 ~ x, order = 4) Breusch-Godfrey test for serial correlation of order up to 4 data: y1 ~ x LM test = 3.0822, df = 4, p-value = 0.5442 > ## Compare with Durbin-Watson test results: > dwtest(y1 ~ x) Durbin-Watson test data: y1 ~ x DW = 1.9762, p-value = 0.4924 alternative hypothesis: true autocorrelation is greater than 0 > > y2 <- filter(y1, 0.5, method = "recursive") > bgtest(y2 ~ x) Breusch-Godfrey test for serial correlation of order up to 1 data: y2 ~ x LM test = 19.907, df = 1, p-value = 8.128e-06 > bg4 <- bgtest(y2 ~ x, order = 4) > bg4 Breusch-Godfrey test for serial correlation of order up to 4 data: y2 ~ x LM test = 23.687, df = 4, p-value = 9.228e-05 > coeftest(bg4) z test of coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 0.0049353 0.0905891 0.0545 0.9566 x -0.0057380 0.0904290 -0.0635 0.9494 lag(resid)_1 0.4619715 0.1027742 4.4950 6.957e-06 *** lag(resid)_2 -0.0401917 0.1136254 -0.3537 0.7235 lag(resid)_3 -0.1030000 0.1136071 -0.9066 0.3646 lag(resid)_4 -0.1184116 0.1049020 -1.1288 0.2590 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > > > cleanEx() > nameEx("bondyield") > ### * bondyield > > flush(stderr()); flush(stdout()) > > ### Name: bondyield > ### Title: Bond Yield > ### Aliases: bondyield > ### Keywords: datasets > > ### ** Examples > > data(bondyield) > > ## page 134, fit Cook-Hendershott OLS model and Yawitz-Marshall OLS model > ## third and last line in Table 6.5 > > modelCH <- RAARUS ~ MOOD + EPI + EXP + RUS > lm(modelCH, data=bondyield) Call: lm(formula = modelCH, data = bondyield) Coefficients: (Intercept) MOOD EPI EXP RUS 0.525913 -0.001402 -0.005623 -0.157801 0.137761 > dwtest(modelCH, data=bondyield) Durbin-Watson test data: modelCH DW = 0.84484, p-value = 2.888e-08 alternative hypothesis: true autocorrelation is greater than 0 > ## wrong sign of RUS coefficient > > modelYM <- RAARUS ~ MOOD + Y + K > lm(modelYM, data=bondyield) Call: lm(formula = modelYM, data = bondyield) Coefficients: (Intercept) MOOD Y K -5.06477 -0.01313 0.04899 1.70792 > dwtest(modelYM, data=bondyield) Durbin-Watson test data: modelYM DW = 1.4387, p-value = 0.004938 alternative hypothesis: true autocorrelation is greater than 0 > ## coefficient of Y and K differ by factor 100 > > > ## page 135, fit test statistics in Table 6.6 b) > ################################################ > > ## Chow 1971(1) > if(require(strucchange, quietly = TRUE)) { + sctest(modelCH, point=c(1971,1), data=bondyield, type="Chow") } Chow test data: modelCH F = 13.775, p-value = 1.838e-08 > > ## Breusch-Pagan > bptest(modelCH, data=bondyield, studentize=FALSE) Breusch-Pagan test data: modelCH BP = 2.0667, df = 4, p-value = 0.7235 > bptest(modelCH, data=bondyield) studentized Breusch-Pagan test data: modelCH BP = 2.9784, df = 4, p-value = 0.5614 > > ## Fluctuation test > if(require(strucchange, quietly = TRUE)) { + sctest(modelCH, type="fluctuation", data=bondyield, rescale=FALSE)} RE test (recursive estimates test) data: modelCH RE = 17.758, p-value < 2.2e-16 > > ## RESET > reset(modelCH, data=bondyield) RESET test data: modelCH RESET = 5.349, df1 = 2, df2 = 53, p-value = 0.007655 > reset(modelCH, power=2, type="regressor", data=bondyield) RESET test data: modelCH RESET = 4.7726, df1 = 4, df2 = 51, p-value = 0.002393 > reset(modelCH, type="princomp", data=bondyield) RESET test data: modelCH RESET = 9.3371, df1 = 2, df2 = 53, p-value = 0.0003359 > > ## Harvey-Collier > harvtest(modelCH, order.by= ~ MOOD, data=bondyield) Harvey-Collier test data: modelCH HC = 2.3951, df = 54, p-value = 0.02012 > harvtest(modelCH, order.by= ~ EPI, data=bondyield) Harvey-Collier test data: modelCH HC = 0.17075, df = 54, p-value = 0.8651 > harvtest(modelCH, order.by= ~ EXP, data=bondyield) Harvey-Collier test data: modelCH HC = 0.87128, df = 54, p-value = 0.3875 > harvtest(modelCH, order.by= ~ RUS, data=bondyield) Harvey-Collier test data: modelCH HC = 1.5877, df = 54, p-value = 0.1182 > > ## Rainbow > raintest(modelCH, order.by = "mahalanobis", data=bondyield) Rainbow test data: modelCH Rain = 1.702, df1 = 30, df2 = 25, p-value = 0.08919 > > > ## page 136, fit test statistics in Table 6.6 d) > ################################################ > > ## Chow 1966(1) > if(require(strucchange, quietly = TRUE)) { + sctest(modelYM, point=c(1965,4), data=bondyield, type="Chow") } Chow test data: modelYM F = 2.8266, p-value = 0.03388 > > ## Fluctuation test > if(require(strucchange, quietly = TRUE)) { + sctest(modelYM, type="fluctuation", data=bondyield, rescale=FALSE) } RE test (recursive estimates test) data: modelYM RE = 19.733, p-value < 2.2e-16 > > ## RESET > reset(modelYM, data=bondyield) RESET test data: modelYM RESET = 2.1436, df1 = 2, df2 = 54, p-value = 0.1271 > reset(modelYM, power=2, type="regressor", data=bondyield) RESET test data: modelYM RESET = 4.1716, df1 = 3, df2 = 53, p-value = 0.01003 > reset(modelYM, type="princomp", data=bondyield) RESET test data: modelYM RESET = 1.9931, df1 = 2, df2 = 54, p-value = 0.1462 > > ## Harvey-Collier > harvtest(modelYM, order.by= ~ MOOD, data=bondyield) Harvey-Collier test data: modelYM HC = 0.24869, df = 55, p-value = 0.8045 > harvtest(modelYM, order.by= ~ Y, data=bondyield) Harvey-Collier test data: modelYM HC = 2.1227, df = 55, p-value = 0.0383 > harvtest(modelYM, order.by= ~ K, data=bondyield) Harvey-Collier test data: modelYM HC = 0.026056, df = 55, p-value = 0.9793 > > ## Rainbow > raintest(modelYM, order.by = "mahalanobis", data=bondyield) Rainbow test data: modelYM Rain = 1.2978, df1 = 30, df2 = 26, p-value = 0.2515 > > > > cleanEx() detaching ‘package:strucchange’, ‘package:sandwich’ > nameEx("bptest") > ### * bptest > > flush(stderr()); flush(stdout()) > > ### Name: bptest > ### Title: Breusch-Pagan Test > ### Aliases: bptest > ### Keywords: htest > > ### ** Examples > > ## generate a regressor > x <- rep(c(-1,1), 50) > ## generate heteroskedastic and homoskedastic disturbances > err1 <- rnorm(100, sd=rep(c(1,2), 50)) > err2 <- rnorm(100) > ## generate a linear relationship > y1 <- 1 + x + err1 > y2 <- 1 + x + err2 > ## perform Breusch-Pagan test > bptest(y1 ~ x) studentized Breusch-Pagan test data: y1 ~ x BP = 11.099, df = 1, p-value = 0.0008635 > bptest(y2 ~ x) studentized Breusch-Pagan test data: y2 ~ x BP = 0.11809, df = 1, p-value = 0.7311 > > > > cleanEx() > nameEx("coeftest") > ### * coeftest > > flush(stderr()); flush(stdout()) > > ### Name: coeftest > ### Title: Inference for Estimated Coefficients > ### Aliases: coeftest coefci coeftest.default coeftest.survreg coeftest.glm > ### coeftest.mlm coeftest.breakpointsfull print.coeftest confint.coeftest > ### coef.coeftest df.residual.coeftest nobs.coeftest logLik.coeftest > ### coefci.default coefci.survreg coefci.glm coefci.mlm > ### Keywords: htest > > ### ** Examples > > ## load data and fit model > data("Mandible", package = "lmtest") > fm <- lm(length ~ age, data = Mandible, subset=(age <= 28)) > > ## the following commands lead to the same tests: > summary(fm) Call: lm(formula = length ~ age, data = Mandible, subset = (age <= 28)) Residuals: Min 1Q Median 3Q Max -9.2013 -1.6592 -0.1217 1.3420 6.4351 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -11.9534 0.9762 -12.24 <2e-16 *** age 1.7727 0.0477 37.16 <2e-16 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 2.373 on 156 degrees of freedom Multiple R-squared: 0.8985, Adjusted R-squared: 0.8978 F-statistic: 1381 on 1 and 156 DF, p-value: < 2.2e-16 > (ct <- coeftest(fm)) t test of coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -11.953366 0.976227 -12.245 < 2.2e-16 *** age 1.772730 0.047704 37.161 < 2.2e-16 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > ## a z test (instead of a t test) can be performed by > coeftest(fm, df = Inf) z test of coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) -11.953366 0.976227 -12.245 < 2.2e-16 *** age 1.772730 0.047704 37.161 < 2.2e-16 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > ## corresponding confidence intervals > confint(ct) 2.5 % 97.5 % (Intercept) -13.88169 -10.02504 age 1.67850 1.86696 > coefci(fm) 2.5 % 97.5 % (Intercept) -13.88169 -10.02504 age 1.67850 1.86696 > ## which in this simple case is equivalent to > confint(fm) 2.5 % 97.5 % (Intercept) -13.88169 -10.02504 age 1.67850 1.86696 > > ## extract further model information either from > ## the original model or from the coeftest output > nobs(fm) [1] 158 > nobs(ct) [1] 158 > logLik(fm) 'log Lik.' -359.7029 (df=3) > logLik(ct) 'log Lik.' -359.7029 (df=3) > AIC(fm, ct) df AIC fm 3 725.4059 ct 3 725.4059 > BIC(fm, ct) df BIC fm 3 734.5937 ct 3 734.5937 > > if(require("sandwich")) { + ## a different covariance matrix can be also used: + (ct <- coeftest(fm, df = Inf, vcov = vcovHC)) + + ## the corresponding confidence interval can be computed either as + confint(ct) + ## or based on the original model + coefci(fm, df = Inf, vcov = vcovHC) + + ## note that the degrees of freedom _actually used_ can be extracted + df.residual(ct) + ## which differ here from + df.residual(fm) + + ## vcov can also be supplied as a function with additional arguments + coeftest(fm, df = Inf, vcov = vcovHC, type = "HC0") + ## or as a matrix + coeftest(fm, df = Inf, vcov = vcovHC(fm, type = "HC0")) + } Loading required package: sandwich z test of coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) -11.953366 1.009817 -11.837 < 2.2e-16 *** age 1.772730 0.054343 32.621 < 2.2e-16 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > > > cleanEx() detaching ‘package:sandwich’ > nameEx("coxtest") > ### * coxtest > > flush(stderr()); flush(stdout()) > > ### Name: coxtest > ### Title: Cox Test for Comparing Non-Nested Models > ### Aliases: coxtest > ### Keywords: htest > > ### ** Examples > > ## Fit two competing, non-nested models for aggregate > ## consumption, as in Greene (1993), Examples 7.11 and 7.12 > > ## load data and compute lags > data(USDistLag) > usdl <- na.contiguous(cbind(USDistLag, lag(USDistLag, k = -1))) > colnames(usdl) <- c("con", "gnp", "con1", "gnp1") > > ## C(t) = a0 + a1*Y(t) + a2*C(t-1) + u > fm1 <- lm(con ~ gnp + con1, data = usdl) > > ## C(t) = b0 + b1*Y(t) + b2*Y(t-1) + v > fm2 <- lm(con ~ gnp + gnp1, data = usdl) > > ## Cox test in both directions: > coxtest(fm1, fm2) Cox test Model 1: con ~ gnp + con1 Model 2: con ~ gnp + gnp1 Estimate Std. Error z value Pr(>|z|) fitted(M1) ~ M2 2.8543 1.29978 2.1960 0.02809 * fitted(M2) ~ M1 -4.4003 0.78961 -5.5727 2.508e-08 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > ## ...and do the same for jtest() and encomptest(). > ## Notice that in this particular case they are coincident. > jtest(fm1, fm2) J test Model 1: con ~ gnp + con1 Model 2: con ~ gnp + gnp1 Estimate Std. Error t value Pr(>|t|) M1 + fitted(M2) -2.7041 0.76273 -3.5454 0.0029371 ** M2 + fitted(M1) 2.7436 0.52710 5.2051 0.0001067 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > encomptest(fm1, fm2) Encompassing test Model 1: con ~ gnp + con1 Model 2: con ~ gnp + gnp1 Model E: con ~ gnp + con1 + gnp1 Res.Df Df F Pr(>F) M1 vs. ME 15 -1 12.569 0.0029371 ** M2 vs. ME 15 -1 27.093 0.0001067 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > > > cleanEx() > nameEx("currencysubstitution") > ### * currencysubstitution > > flush(stderr()); flush(stdout()) > > ### Name: currencysubstitution > ### Title: Currency Substitution > ### Aliases: currencysubstitution > ### Keywords: datasets > > ### ** Examples > > data(currencysubstitution) > > ## page 130, fit Miles OLS model and Bordo-Choudri OLS model > ## third and last line in Table 6.3 > > modelMiles <- logCUS ~ log((1+Iu)/(1+Ic)) > lm(modelMiles, data=currencysubstitution) Call: lm(formula = modelMiles, data = currencysubstitution) Coefficients: (Intercept) log((1 + Iu)/(1 + Ic)) 2.562 5.871 > dwtest(modelMiles, data=currencysubstitution) Durbin-Watson test data: modelMiles DW = 0.27024, p-value < 2.2e-16 alternative hypothesis: true autocorrelation is greater than 0 > > modelBordoChoudri <- logCUS ~ I(Iu-Ic) + Ic + logY > lm(modelBordoChoudri, data=currencysubstitution) Call: lm(formula = modelBordoChoudri, data = currencysubstitution) Coefficients: (Intercept) I(Iu - Ic) Ic logY -5.9782 -9.2158 -18.4563 0.8366 > dwtest(modelBordoChoudri, data=currencysubstitution) Durbin-Watson test data: modelBordoChoudri DW = 0.53514, p-value = 2.261e-13 alternative hypothesis: true autocorrelation is greater than 0 > > > ## page 131, fit test statistics in Table 6.4 b) > ################################################ > > if(require(strucchange, quietly = TRUE)) { + ## Fluctuation test + sctest(modelMiles, type="fluctuation", data=currencysubstitution, + rescale=FALSE) } RE test (recursive estimates test) data: modelMiles RE = 3.1591, p-value = 8.583e-09 > > ## RESET > reset(modelMiles, data=currencysubstitution) RESET test data: modelMiles RESET = 1.1308, df1 = 2, df2 = 57, p-value = 0.3299 > reset(modelMiles, power=2, type="regressor", data=currencysubstitution) RESET test data: modelMiles RESET = 2.1078, df1 = 1, df2 = 58, p-value = 0.1519 > reset(modelMiles, type="princomp", data=currencysubstitution) RESET test data: modelMiles RESET = 1.1308, df1 = 2, df2 = 57, p-value = 0.3299 > > ## Harvey-Collier > harvtest(modelMiles, order.by = ~log((1+Iu)/(1+Ic)), data=currencysubstitution) Harvey-Collier test data: modelMiles HC = 0.86911, df = 58, p-value = 0.3884 > > ## Rainbow > raintest(modelMiles, order.by = "mahalanobis", data=currencysubstitution) Rainbow test data: modelMiles Rain = 1.766, df1 = 31, df2 = 28, p-value = 0.06603 > > > ## page 132, fit test statistics in Table 6.4 d) > ################################################ > > if(require(strucchange, quietly = TRUE)) { + ## Chow 1970(2) + sctest(modelBordoChoudri, point=c(1970,2), data=currencysubstitution, + type="Chow") } Chow test data: modelBordoChoudri F = 14.572, p-value = 4.317e-08 > > ## Breusch-Pagan > bptest(modelBordoChoudri, data=currencysubstitution, studentize=FALSE) Breusch-Pagan test data: modelBordoChoudri BP = 14.514, df = 3, p-value = 0.002283 > bptest(modelBordoChoudri, data=currencysubstitution) studentized Breusch-Pagan test data: modelBordoChoudri BP = 10.736, df = 3, p-value = 0.01324 > > ## RESET > reset(modelBordoChoudri, data=currencysubstitution) RESET test data: modelBordoChoudri RESET = 0.69314, df1 = 2, df2 = 55, p-value = 0.5043 > reset(modelBordoChoudri, power=2, type="regressor", data=currencysubstitution) RESET test data: modelBordoChoudri RESET = 6.6775, df1 = 3, df2 = 54, p-value = 0.0006458 > reset(modelBordoChoudri, type="princomp", data=currencysubstitution) RESET test data: modelBordoChoudri RESET = 8.2945, df1 = 2, df2 = 55, p-value = 0.0007107 > > ## Harvey-Collier > harvtest(modelBordoChoudri, order.by = ~ I(Iu-Ic), data=currencysubstitution) Harvey-Collier test data: modelBordoChoudri HC = 0.75539, df = 56, p-value = 0.4532 > harvtest(modelBordoChoudri, order.by = ~ Ic, data=currencysubstitution) Harvey-Collier test data: modelBordoChoudri HC = 0.40259, df = 56, p-value = 0.6888 > harvtest(modelBordoChoudri, order.by = ~ logY, data=currencysubstitution) Harvey-Collier test data: modelBordoChoudri HC = 2.5016, df = 56, p-value = 0.01531 > > ## Rainbow > raintest(modelBordoChoudri, order.by = "mahalanobis", data=currencysubstitution) Rainbow test data: modelBordoChoudri Rain = 3.7711, df1 = 31, df2 = 26, p-value = 0.0004604 > > > > cleanEx() detaching ‘package:strucchange’, ‘package:sandwich’ > nameEx("dwtest") > ### * dwtest > > flush(stderr()); flush(stdout()) > > ### Name: dwtest > ### Title: Durbin-Watson Test > ### Aliases: dwtest > ### Keywords: htest > > ### ** Examples > > > ## generate two AR(1) error terms with parameter > ## rho = 0 (white noise) and rho = 0.9 respectively > err1 <- rnorm(100) > > ## generate regressor and dependent variable > x <- rep(c(-1,1), 50) > y1 <- 1 + x + err1 > > ## perform Durbin-Watson test > dwtest(y1 ~ x) Durbin-Watson test data: y1 ~ x DW = 1.9762, p-value = 0.4924 alternative hypothesis: true autocorrelation is greater than 0 > > err2 <- filter(err1, 0.9, method="recursive") > y2 <- 1 + x + err2 > dwtest(y2 ~ x) Durbin-Watson test data: y2 ~ x DW = 0.45961, p-value = 7.862e-15 alternative hypothesis: true autocorrelation is greater than 0 > > > > > cleanEx() > nameEx("encomptest") > ### * encomptest > > flush(stderr()); flush(stdout()) > > ### Name: encomptest > ### Title: Encompassing Test for Comparing Non-Nested Models > ### Aliases: encomptest > ### Keywords: htest > > ### ** Examples > > ## Fit two competing, non-nested models for aggregate > ## consumption, as in Greene (1993), Examples 7.11 and 7.12 > > ## load data and compute lags > data(USDistLag) > usdl <- na.contiguous(cbind(USDistLag, lag(USDistLag, k = -1))) > colnames(usdl) <- c("con", "gnp", "con1", "gnp1") > > ## C(t) = a0 + a1*Y(t) + a2*C(t-1) + u > fm1 <- lm(con ~ gnp + con1, data = usdl) > > ## C(t) = b0 + b1*Y(t) + b2*Y(t-1) + v > fm2 <- lm(con ~ gnp + gnp1, data = usdl) > > ## Encompassing model > fm3 <- lm(con ~ gnp + con1 + gnp1, data = usdl) > > ## Cox test in both directions: > coxtest(fm1, fm2) Cox test Model 1: con ~ gnp + con1 Model 2: con ~ gnp + gnp1 Estimate Std. Error z value Pr(>|z|) fitted(M1) ~ M2 2.8543 1.29978 2.1960 0.02809 * fitted(M2) ~ M1 -4.4003 0.78961 -5.5727 2.508e-08 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > ## ...and do the same for jtest() and encomptest(). > ## Notice that in this particular case they are coincident. > jtest(fm1, fm2) J test Model 1: con ~ gnp + con1 Model 2: con ~ gnp + gnp1 Estimate Std. Error t value Pr(>|t|) M1 + fitted(M2) -2.7041 0.76273 -3.5454 0.0029371 ** M2 + fitted(M1) 2.7436 0.52710 5.2051 0.0001067 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > encomptest(fm1, fm2) Encompassing test Model 1: con ~ gnp + con1 Model 2: con ~ gnp + gnp1 Model E: con ~ gnp + con1 + gnp1 Res.Df Df F Pr(>F) M1 vs. ME 15 -1 12.569 0.0029371 ** M2 vs. ME 15 -1 27.093 0.0001067 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > ## the encompassing test is essentially > waldtest(fm1, fm3, fm2) Wald test Model 1: con ~ gnp + con1 Model 2: con ~ gnp + con1 + gnp1 Model 3: con ~ gnp + gnp1 Res.Df Df F Pr(>F) 1 16 2 15 1 12.569 0.0029371 ** 3 16 -1 27.093 0.0001067 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > > > cleanEx() > nameEx("ftemp") > ### * ftemp > > flush(stderr()); flush(stdout()) > > ### Name: ftemp > ### Title: Femal Temperature Data > ### Aliases: ftemp > ### Keywords: datasets > > ### ** Examples > > data(ftemp) > plot(ftemp) > y <- window(ftemp, start = 8, end = 60) > if(require(strucchange)) { + bp <- breakpoints(y ~ 1) + plot(bp) + fm.seg <- lm(y ~ 0 + breakfactor(bp)) + plot(y) + lines(8:60, fitted(fm.seg), col = 4) + lines(confint(bp)) + } Loading required package: strucchange Loading required package: sandwich > > > > > cleanEx() detaching ‘package:strucchange’, ‘package:sandwich’ > nameEx("gqtest") > ### * gqtest > > flush(stderr()); flush(stdout()) > > ### Name: gqtest > ### Title: Goldfeld-Quandt Test > ### Aliases: gqtest > ### Keywords: htest > > ### ** Examples > > ## generate a regressor > x <- rep(c(-1,1), 50) > ## generate heteroskedastic and homoskedastic disturbances > err1 <- c(rnorm(50, sd=1), rnorm(50, sd=2)) > err2 <- rnorm(100) > ## generate a linear relationship > y1 <- 1 + x + err1 > y2 <- 1 + x + err2 > ## perform Goldfeld-Quandt test > gqtest(y1 ~ x) Goldfeld-Quandt test data: y1 ~ x GQ = 5.4699, df1 = 48, df2 = 48, p-value = 1.404e-08 alternative hypothesis: variance increases from segment 1 to 2 > gqtest(y2 ~ x) Goldfeld-Quandt test data: y2 ~ x GQ = 1.2638, df1 = 48, df2 = 48, p-value = 0.2102 alternative hypothesis: variance increases from segment 1 to 2 > > > > cleanEx() > nameEx("grangertest") > ### * grangertest > > flush(stderr()); flush(stdout()) > > ### Name: grangertest > ### Title: Test for Granger Causality > ### Aliases: grangertest grangertest.default grangertest.formula > ### Keywords: htest > > ### ** Examples > > ## Which came first: the chicken or the egg? > data(ChickEgg) > grangertest(egg ~ chicken, order = 3, data = ChickEgg) Granger causality test Model 1: egg ~ Lags(egg, 1:3) + Lags(chicken, 1:3) Model 2: egg ~ Lags(egg, 1:3) Res.Df Df F Pr(>F) 1 44 2 47 -3 0.5916 0.6238 > grangertest(chicken ~ egg, order = 3, data = ChickEgg) Granger causality test Model 1: chicken ~ Lags(chicken, 1:3) + Lags(egg, 1:3) Model 2: chicken ~ Lags(chicken, 1:3) Res.Df Df F Pr(>F) 1 44 2 47 -3 5.405 0.002966 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > ## alternative ways of specifying the same test > grangertest(ChickEgg, order = 3) Granger causality test Model 1: egg ~ Lags(egg, 1:3) + Lags(chicken, 1:3) Model 2: egg ~ Lags(egg, 1:3) Res.Df Df F Pr(>F) 1 44 2 47 -3 0.5916 0.6238 > grangertest(ChickEgg[, 1], ChickEgg[, 2], order = 3) Granger causality test Model 1: ChickEgg[, 2] ~ Lags(ChickEgg[, 2], 1:3) + Lags(ChickEgg[, 1], 1:3) Model 2: ChickEgg[, 2] ~ Lags(ChickEgg[, 2], 1:3) Res.Df Df F Pr(>F) 1 44 2 47 -3 0.5916 0.6238 > > > > cleanEx() > nameEx("growthofmoney") > ### * growthofmoney > > flush(stderr()); flush(stdout()) > > ### Name: growthofmoney > ### Title: Growth of Money Supply > ### Aliases: growthofmoney > ### Keywords: datasets > > ### ** Examples > > data(growthofmoney) > > ## page 137, fit Hetzel OLS model > ## first/second line in Table 6.7 > > modelHetzel <- TG1.TG0 ~ AG0.TG0 > lm(modelHetzel, data=growthofmoney) Call: lm(formula = modelHetzel, data = growthofmoney) Coefficients: (Intercept) AG0.TG0 0.007322 0.324858 > dwtest(modelHetzel, data=growthofmoney) Durbin-Watson test data: modelHetzel DW = 2.9046, p-value = 0.9839 alternative hypothesis: true autocorrelation is greater than 0 > > > ## page 135, fit test statistics in Table 6.8 > ############################################# > > if(require(strucchange, quietly = TRUE)) { + ## Chow 1974(1) + sctest(modelHetzel, point=c(1973,4), data=growthofmoney, type="Chow") } Chow test data: modelHetzel F = 0.37876, p-value = 0.6911 > > ## RESET > reset(modelHetzel, data=growthofmoney) RESET test data: modelHetzel RESET = 7.9337, df1 = 2, df2 = 15, p-value = 0.004461 > reset(modelHetzel, power=2, type="regressor", data=growthofmoney) RESET test data: modelHetzel RESET = 1.5265, df1 = 1, df2 = 16, p-value = 0.2345 > reset(modelHetzel, type="princomp", data=growthofmoney) RESET test data: modelHetzel RESET = 7.9337, df1 = 2, df2 = 15, p-value = 0.004461 > > ## Harvey-Collier > harvtest(modelHetzel, order.by= ~ AG0.TG0, data=growthofmoney) Harvey-Collier test data: modelHetzel HC = 3.7768, df = 16, p-value = 0.001651 > > ## Rainbow > raintest(modelHetzel, order.by = "mahalanobis", data=growthofmoney) Rainbow test data: modelHetzel Rain = 7.1731, df1 = 10, df2 = 7, p-value = 0.007924 > > > ## Identification of outliers > ############################# > > ## Figure 6.1 > plot(modelHetzel, data=growthofmoney) > abline(v=0) > abline(h=0) > abline(coef(lm(modelHetzel, data=growthofmoney)), col=2) > > ## Table 6.7, last line > growthofmoney2 <- as.data.frame(growthofmoney[-c(5:6),]) > lm(modelHetzel, data=growthofmoney2) Call: lm(formula = modelHetzel, data = growthofmoney2) Coefficients: (Intercept) AG0.TG0 0.05673 0.17752 > dwtest(modelHetzel, data=growthofmoney2) Durbin-Watson test data: modelHetzel DW = 1.5019, p-value = 0.1351 alternative hypothesis: true autocorrelation is greater than 0 > > > > cleanEx() detaching ‘package:strucchange’, ‘package:sandwich’ > nameEx("harvtest") > ### * harvtest > > flush(stderr()); flush(stdout()) > > ### Name: harvtest > ### Title: Harvey-Collier Test > ### Aliases: harvtest > ### Keywords: htest > > ### ** Examples > > # generate a regressor and dependent variable > x <- 1:50 > y1 <- 1 + x + rnorm(50) > y2 <- y1 + 0.3*x^2 > > ## perform Harvey-Collier test > harv <- harvtest(y1 ~ x) > harv Harvey-Collier test data: y1 ~ x HC = 0.5401, df = 47, p-value = 0.5917 > ## calculate critical value vor 0.05 level > qt(0.95, harv$parameter) [1] 1.677927 > harvtest(y2 ~ x) Harvey-Collier test data: y2 ~ x HC = 7.9751, df = 47, p-value = 2.775e-10 > > > > cleanEx() > nameEx("hmctest") > ### * hmctest > > flush(stderr()); flush(stdout()) > > ### Name: hmctest > ### Title: Harrison-McCabe test > ### Aliases: hmctest > ### Keywords: htest > > ### ** Examples > > ## generate a regressor > x <- rep(c(-1,1), 50) > ## generate heteroskedastic and homoskedastic disturbances > err1 <- c(rnorm(50, sd=1), rnorm(50, sd=2)) > err2 <- rnorm(100) > ## generate a linear relationship > y1 <- 1 + x + err1 > y2 <- 1 + x + err2 > ## perform Harrison-McCabe test > hmctest(y1 ~ x) Harrison-McCabe test data: y1 ~ x HMC = 0.15542, p-value < 2.2e-16 > hmctest(y2 ~ x) Harrison-McCabe test data: y2 ~ x HMC = 0.44287, p-value = 0.225 > > > > cleanEx() > nameEx("jocci") > ### * jocci > > flush(stderr()); flush(stdout()) > > ### Name: jocci > ### Title: U.S. Macroeconomic Time Series > ### Aliases: fyff gmdc ip jocci lhur pw561 > ### Keywords: datasets > > ### ** Examples > > data(jocci) > > dwtest(dy ~ 1, data = jocci) Durbin-Watson test data: dy ~ 1 DW = 1.0581, p-value < 2.2e-16 alternative hypothesis: true autocorrelation is greater than 0 > bgtest(dy ~ 1, data = jocci) Breusch-Godfrey test for serial correlation of order up to 1 data: dy ~ 1 LM test = 91.037, df = 1, p-value < 2.2e-16 > ar6.model <- dy ~ dy1 + dy2 + dy3 + dy4 + dy5 +dy6 > bgtest(ar6.model, data = jocci) Breusch-Godfrey test for serial correlation of order up to 1 data: ar6.model LM test = 0.19999, df = 1, p-value = 0.6547 > > var.model <- ~ I(dy1^2) + I(dy2^2) + I(dy3^2) + I(dy4^2) + I(dy5^2) + I(dy6^2) > bptest(ar6.model, var.model, data = jocci) studentized Breusch-Pagan test data: ar6.model BP = 22.377, df = 6, p-value = 0.001034 > > > > cleanEx() > nameEx("jtest") > ### * jtest > > flush(stderr()); flush(stdout()) > > ### Name: jtest > ### Title: J Test for Comparing Non-Nested Models > ### Aliases: jtest > ### Keywords: htest > > ### ** Examples > > ## Fit two competing, non-nested models for aggregate > ## consumption, as in Greene (1993), Examples 7.11 and 7.12 > > ## load data and compute lags > data(USDistLag) > usdl <- na.contiguous(cbind(USDistLag, lag(USDistLag, k = -1))) > colnames(usdl) <- c("con", "gnp", "con1", "gnp1") > > ## C(t) = a0 + a1*Y(t) + a2*C(t-1) + u > fm1 <- lm(con ~ gnp + con1, data = usdl) > > ## C(t) = b0 + b1*Y(t) + b2*Y(t-1) + v > fm2 <- lm(con ~ gnp + gnp1, data = usdl) > > ## Cox test in both directions: > coxtest(fm1, fm2) Cox test Model 1: con ~ gnp + con1 Model 2: con ~ gnp + gnp1 Estimate Std. Error z value Pr(>|z|) fitted(M1) ~ M2 2.8543 1.29978 2.1960 0.02809 * fitted(M2) ~ M1 -4.4003 0.78961 -5.5727 2.508e-08 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > ## ...and do the same for jtest() and encomptest(). > ## Notice that in this particular case they are coincident. > jtest(fm1, fm2) J test Model 1: con ~ gnp + con1 Model 2: con ~ gnp + gnp1 Estimate Std. Error t value Pr(>|t|) M1 + fitted(M2) -2.7041 0.76273 -3.5454 0.0029371 ** M2 + fitted(M1) 2.7436 0.52710 5.2051 0.0001067 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > encomptest(fm1, fm2) Encompassing test Model 1: con ~ gnp + con1 Model 2: con ~ gnp + gnp1 Model E: con ~ gnp + con1 + gnp1 Res.Df Df F Pr(>F) M1 vs. ME 15 -1 12.569 0.0029371 ** M2 vs. ME 15 -1 27.093 0.0001067 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > > > cleanEx() > nameEx("lrtest") > ### * lrtest > > flush(stderr()); flush(stdout()) > > ### Name: lrtest > ### Title: Likelihood Ratio Test of Nested Models > ### Aliases: lrtest lrtest.formula lrtest.default > ### Keywords: htest > > ### ** Examples > > ## with data from Greene (1993): > ## load data and compute lags > data("USDistLag") > usdl <- na.contiguous(cbind(USDistLag, lag(USDistLag, k = -1))) > colnames(usdl) <- c("con", "gnp", "con1", "gnp1") > > fm1 <- lm(con ~ gnp + gnp1, data = usdl) > fm2 <- lm(con ~ gnp + con1 + gnp1, data = usdl) > > ## various equivalent specifications of the LR test > lrtest(fm2, fm1) Likelihood ratio test Model 1: con ~ gnp + con1 + gnp1 Model 2: con ~ gnp + gnp1 #Df LogLik Df Chisq Pr(>Chisq) 1 5 -56.069 2 4 -65.871 -1 19.605 9.524e-06 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > lrtest(fm2, 2) Likelihood ratio test Model 1: con ~ gnp + con1 + gnp1 Model 2: con ~ gnp + gnp1 #Df LogLik Df Chisq Pr(>Chisq) 1 5 -56.069 2 4 -65.871 -1 19.605 9.524e-06 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > lrtest(fm2, "con1") Likelihood ratio test Model 1: con ~ gnp + con1 + gnp1 Model 2: con ~ gnp + gnp1 #Df LogLik Df Chisq Pr(>Chisq) 1 5 -56.069 2 4 -65.871 -1 19.605 9.524e-06 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > lrtest(fm2, . ~ . - con1) Likelihood ratio test Model 1: con ~ gnp + con1 + gnp1 Model 2: con ~ gnp + gnp1 #Df LogLik Df Chisq Pr(>Chisq) 1 5 -56.069 2 4 -65.871 -1 19.605 9.524e-06 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > > > cleanEx() > nameEx("moneydemand") > ### * moneydemand > > flush(stderr()); flush(stdout()) > > ### Name: moneydemand > ### Title: Money Demand > ### Aliases: moneydemand > ### Keywords: datasets > > ### ** Examples > > data(moneydemand) > moneydemand <- window(moneydemand, start=1880, end=1972) > > ## page 125, fit Allen OLS model (and Durbin-Watson test), > ## last line in Table 6.1 > > modelAllen <- logM ~ logYp + Rs + Rl + logSpp > lm(modelAllen, data = moneydemand) Call: lm(formula = modelAllen, data = moneydemand) Coefficients: (Intercept) logYp Rs Rl logSpp -15.48806 1.61700 -0.01885 -0.08354 0.08421 > dwtest(modelAllen, data = moneydemand) Durbin-Watson test data: modelAllen DW = 0.18142, p-value < 2.2e-16 alternative hypothesis: true autocorrelation is greater than 0 > > ## page 127, fit test statistics in Table 6.1 c) > ################################################ > > ## Breusch-Pagan > bptest(modelAllen, studentize = FALSE, data = moneydemand) Breusch-Pagan test data: modelAllen BP = 13.342, df = 4, p-value = 0.009719 > bptest(modelAllen, studentize = TRUE, data = moneydemand) studentized Breusch-Pagan test data: modelAllen BP = 17.069, df = 4, p-value = 0.001874 > > ## RESET > reset(modelAllen, data = moneydemand) RESET test data: modelAllen RESET = 92.749, df1 = 2, df2 = 86, p-value < 2.2e-16 > reset(modelAllen, power = 2, type = "regressor", data = moneydemand) RESET test data: modelAllen RESET = 68.197, df1 = 4, df2 = 84, p-value < 2.2e-16 > reset(modelAllen, type = "princomp", data = moneydemand) RESET test data: modelAllen RESET = 1.7024, df1 = 2, df2 = 86, p-value = 0.1883 > > ## Harvey-Collier tests (up to sign of the test statistic) > harvtest(modelAllen, order.by = ~logYp, data = moneydemand) Harvey-Collier test data: modelAllen HC = 5.5579, df = 87, p-value = 2.943e-07 > harvtest(modelAllen, order.by = ~Rs, data = moneydemand) Harvey-Collier test data: modelAllen HC = 1.4391, df = 87, p-value = 0.1537 > harvtest(modelAllen, order.by = ~Rl, data = moneydemand) Harvey-Collier test data: modelAllen HC = 0.62177, df = 87, p-value = 0.5357 > harvtest(modelAllen, order.by = ~logSpp, data = moneydemand) Harvey-Collier test data: modelAllen HC = 0.79431, df = 87, p-value = 0.4292 > > ## Rainbow test > raintest(modelAllen, order.by = "mahalanobis", data = moneydemand) Rainbow test data: modelAllen Rain = 1.1387, df1 = 47, df2 = 41, p-value = 0.3374 > > > if(require(strucchange, quietly = TRUE)) { + ## Chow (1913) + sctest(modelAllen, point=c(1913,1), data = moneydemand, type = "Chow") } Chow test data: modelAllen F = 58.314, p-value < 2.2e-16 > > if(require(strucchange, quietly = TRUE)) { + ## Fluctuation + sctest(modelAllen, type = "fluctuation", rescale = FALSE, data = moneydemand)} RE test (recursive estimates test) data: modelAllen RE = 9.5229, p-value < 2.2e-16 > > > > cleanEx() detaching ‘package:strucchange’, ‘package:sandwich’ > nameEx("petest") > ### * petest > > flush(stderr()); flush(stdout()) > > ### Name: petest > ### Title: PE Test for Linear vs. Log-Linear Specifications > ### Aliases: petest > ### Keywords: htest > > ### ** Examples > > if(require("AER")) { + ## Verbeek (2004), Section 3 + data("HousePrices", package = "AER") + + ### Verbeek (2004), Table 3.3 + hp_lin <- lm(price ~ . , data = HousePrices) + summary(hp_lin) + + ### Verbeek (2004), Table 3.2 + hp_log <- update(hp_lin, log(price) ~ . - lotsize + log(lotsize)) + summary(hp_log) + + ## PE test + petest(hp_lin, hp_log) + + + ## Greene (2003), Example 9.8 + data("USMacroG", package = "AER") + + ## Greene (2003), Table 9.2 + usm_lin <- lm(m1 ~ tbill + gdp, data = USMacroG) + usm_log <- lm(log(m1) ~ log(tbill) + log(gdp), data = USMacroG) + petest(usm_lin, usm_log) + ## matches results from Greene's errata + } Loading required package: AER Loading required package: car Loading required package: carData Loading required package: sandwich Loading required package: survival PE test Model 1: m1 ~ tbill + gdp Model 2: log(m1) ~ log(tbill) + log(gdp) Estimate Std. Error t value Pr(>|t|) M1 + log(fit(M1))-fit(M2) -209.35 26.7580 -7.8240 2.9e-13 *** M2 + fit(M1)-exp(fit(M2)) 0.00 0.0003 -0.1603 0.8728 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > > > cleanEx() detaching ‘package:AER’, ‘package:survival’, ‘package:sandwich’, ‘package:car’, ‘package:carData’ > nameEx("raintest") > ### * raintest > > flush(stderr()); flush(stdout()) > > ### Name: raintest > ### Title: Rainbow Test > ### Aliases: raintest > ### Keywords: htest > > ### ** Examples > > x <- c(1:30) > y <- x^2 + rnorm(30,0,2) > rain <- raintest(y ~ x) > rain Rainbow test data: y ~ x Rain = 25.76, df1 = 15, df2 = 13, p-value = 3.169e-07 > ## critical value > qf(0.95, rain$parameter[1], rain$parameter[2]) [1] 2.53311 > > > > cleanEx() > nameEx("resettest") > ### * resettest > > flush(stderr()); flush(stdout()) > > ### Name: resettest > ### Title: RESET Test > ### Aliases: resettest reset > ### Keywords: htest > > ### ** Examples > > x <- c(1:30) > y1 <- 1 + x + x^2 + rnorm(30) > y2 <- 1 + x + rnorm(30) > resettest(y1 ~ x, power=2, type="regressor") RESET test data: y1 ~ x RESET = 153881, df1 = 1, df2 = 27, p-value < 2.2e-16 > resettest(y2 ~ x, power=2, type="regressor") RESET test data: y2 ~ x RESET = 0.0077266, df1 = 1, df2 = 27, p-value = 0.9306 > > > > cleanEx() > nameEx("unemployment") > ### * unemployment > > flush(stderr()); flush(stdout()) > > ### Name: unemployment > ### Title: Unemployment Data > ### Aliases: unemployment > ### Keywords: datasets > > ### ** Examples > > data(unemployment) > > ## data transformation > myunemployment <- window(unemployment, start=1895, end=1956) > time <- 6:67 > > ## page 144, fit Rea OLS model > ## last line in Table 6.12 > > modelRea <- UN ~ log(m/p) + log(G) + log(x) + time > lm(modelRea, data = myunemployment) Call: lm(formula = modelRea, data = myunemployment) Coefficients: (Intercept) log(m/p) log(G) log(x) time 86.9466 -13.8332 -5.7374 -9.5063 0.9158 > ## coefficients of logged variables differ by factor 100 > > ## page 143, fit test statistics in table 6.11 > ############################################## > > if(require(strucchange, quietly = TRUE)) { + ## Chow 1941 + sctest(modelRea, point=c(1940,1), data=myunemployment, type="Chow") } Chow test data: modelRea F = 2.7896, p-value = 0.02634 > > ## Breusch-Pagan > bptest(modelRea, data=myunemployment, studentize=FALSE) Breusch-Pagan test data: modelRea BP = 6.191, df = 4, p-value = 0.1853 > bptest(modelRea, data=myunemployment) studentized Breusch-Pagan test data: modelRea BP = 5.6325, df = 4, p-value = 0.2283 > > ## RESET (a)-(b) > reset(modelRea, data=myunemployment) RESET test data: modelRea RESET = 13.594, df1 = 2, df2 = 55, p-value = 1.595e-05 > reset(modelRea, power=2, type="regressor", data=myunemployment) RESET test data: modelRea RESET = 5.2705, df1 = 4, df2 = 53, p-value = 0.001195 > > ## Harvey-Collier > harvtest(modelRea, order.by = ~ log(m/p), data=myunemployment) Harvey-Collier test data: modelRea HC = 0.23146, df = 56, p-value = 0.8178 > harvtest(modelRea, order.by = ~ log(G), data=myunemployment) Harvey-Collier test data: modelRea HC = 1.8008, df = 56, p-value = 0.07711 > harvtest(modelRea, order.by = ~ log(x), data=myunemployment) Harvey-Collier test data: modelRea HC = 0.70681, df = 56, p-value = 0.4826 > harvtest(modelRea, data=myunemployment) Harvey-Collier test data: modelRea HC = 1.6957, df = 56, p-value = 0.0955 > > ## Rainbow > raintest(modelRea, order.by = "mahalanobis", data=myunemployment) Rainbow test data: modelRea Rain = 1.9521, df1 = 31, df2 = 26, p-value = 0.04274 > > > > cleanEx() detaching ‘package:strucchange’, ‘package:sandwich’ > nameEx("valueofstocks") > ### * valueofstocks > > flush(stderr()); flush(stdout()) > > ### Name: valueofstocks > ### Title: Value of Stocks > ### Aliases: valueofstocks > ### Keywords: datasets > > ### ** Examples > > data(valueofstocks) > lm(log(VST) ~., data=valueofstocks) Call: lm(formula = log(VST) ~ ., data = valueofstocks) Coefficients: (Intercept) MB RTPD RTPS XBC 4.724163 0.017505 -0.010756 -0.046852 0.001713 > > > > cleanEx() > nameEx("wages") > ### * wages > > flush(stderr()); flush(stdout()) > > ### Name: wages > ### Title: Wages > ### Aliases: wages > ### Keywords: datasets > > ### ** Examples > > data(wages) > > ## data transformation to include lagged series > mywages <- cbind(wages, lag(wages[,2], k = -1), lag(wages[,2], k = -2)) > colnames(mywages) <- c(colnames(wages), "CPI2", "CPI3") > mywages <- window(mywages, start=1962, end=1979) > > ## page 142, fit Nichols OLS model > ## equation (6.10) > > modelNichols <- w ~ CPI + CPI2 + CPI3 + u + mw > lm(modelNichols, data = mywages) Call: lm(formula = modelNichols, data = mywages) Coefficients: (Intercept) CPI CPI2 CPI3 u mw 4.27536 0.51882 0.12133 0.21404 -0.48786 0.03164 > > ## page 143, fit test statistics in table 6.11 > ############################################## > > if(require(strucchange, quietly = TRUE)) { + ## Chow 1972 + sctest(modelNichols, point=c(1971,1), data=mywages, type="Chow") } Chow test data: modelNichols F = 1.5372, p-value = 0.3074 > > ## Breusch-Pagan > bptest(modelNichols, data=mywages, studentize=FALSE) Breusch-Pagan test data: modelNichols BP = 3.6043, df = 5, p-value = 0.6077 > bptest(modelNichols, data=mywages) studentized Breusch-Pagan test data: modelNichols BP = 2.5505, df = 5, p-value = 0.7689 > > ## RESET (a)-(b) > reset(modelNichols, data=mywages) RESET test data: modelNichols RESET = 0.86419, df1 = 2, df2 = 10, p-value = 0.4506 > reset(modelNichols, power=2, type="regressor", data=mywages) RESET test data: modelNichols RESET = 8.3265, df1 = 5, df2 = 7, p-value = 0.00735 > > ## Harvey-Collier > harvtest(modelNichols, order.by = ~ CPI, data=mywages) Harvey-Collier test data: modelNichols HC = 2.0179, df = 11, p-value = 0.06866 > harvtest(modelNichols, order.by = ~ CPI2, data=mywages) Harvey-Collier test data: modelNichols HC = 4.1448, df = 11, p-value = 0.001631 > harvtest(modelNichols, order.by = ~ CPI3, data=mywages) Harvey-Collier test data: modelNichols HC = 2.2039, df = 11, p-value = 0.04975 > harvtest(modelNichols, order.by = ~ u, data=mywages) Harvey-Collier test data: modelNichols HC = 0.20839, df = 11, p-value = 0.8387 > > ## Rainbow > raintest(modelNichols, order.by = "mahalanobis", data=mywages) Rainbow test data: modelNichols Rain = 0.61074, df1 = 9, df2 = 3, p-value = 0.7512 > > > > cleanEx() detaching ‘package:strucchange’, ‘package:sandwich’ > nameEx("waldtest") > ### * waldtest > > flush(stderr()); flush(stdout()) > > ### Name: waldtest > ### Title: Wald Test of Nested Models > ### Aliases: waldtest waldtest.formula waldtest.default waldtest.lm > ### Keywords: htest > > ### ** Examples > > ## fit two competing, non-nested models and their encompassing > ## model for aggregate consumption, as in Greene (1993), > ## Examples 7.11 and 7.12 > > ## load data and compute lags > data(USDistLag) > usdl <- na.contiguous(cbind(USDistLag, lag(USDistLag, k = -1))) > colnames(usdl) <- c("con", "gnp", "con1", "gnp1") > > ## C(t) = a0 + a1*Y(t) + a2*C(t-1) + u > fm1 <- lm(con ~ gnp + con1, data = usdl) > > ## C(t) = b0 + b1*Y(t) + b2*Y(t-1) + v > fm2 <- lm(con ~ gnp + gnp1, data = usdl) > > ## Encompassing model > fm3 <- lm(con ~ gnp + con1 + gnp1, data = usdl) > > ## a simple ANOVA for fm3 vs. fm2 > waldtest(fm3, fm2) Wald test Model 1: con ~ gnp + con1 + gnp1 Model 2: con ~ gnp + gnp1 Res.Df Df F Pr(>F) 1 15 2 16 -1 27.093 0.0001067 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > anova(fm3, fm2) Analysis of Variance Table Model 1: con ~ gnp + con1 + gnp1 Model 2: con ~ gnp + gnp1 Res.Df RSS Df Sum of Sq F Pr(>F) 1 15 406.9 2 16 1141.8 -1 -734.93 27.093 0.0001067 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > ## as df = 1, the test is equivalent to the corresponding t test in > coeftest(fm3) t test of coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 6.334762 9.574496 0.6616 0.5182445 gnp 0.367170 0.048676 7.5432 1.763e-06 *** con1 1.044563 0.200682 5.2051 0.0001067 *** gnp1 -0.391718 0.110488 -3.5454 0.0029371 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > ## various equivalent specifications of the two models > waldtest(fm3, fm2) Wald test Model 1: con ~ gnp + con1 + gnp1 Model 2: con ~ gnp + gnp1 Res.Df Df F Pr(>F) 1 15 2 16 -1 27.093 0.0001067 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > waldtest(fm3, 2) Wald test Model 1: con ~ gnp + con1 + gnp1 Model 2: con ~ gnp + gnp1 Res.Df Df F Pr(>F) 1 15 2 16 -1 27.093 0.0001067 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > waldtest(fm3, "con1") Wald test Model 1: con ~ gnp + con1 + gnp1 Model 2: con ~ gnp + gnp1 Res.Df Df F Pr(>F) 1 15 2 16 -1 27.093 0.0001067 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > waldtest(fm3, . ~ . - con1) Wald test Model 1: con ~ gnp + con1 + gnp1 Model 2: con ~ gnp + gnp1 Res.Df Df F Pr(>F) 1 15 2 16 -1 27.093 0.0001067 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > ## comparing more than one model > ## (equivalent to the encompassing test) > waldtest(fm1, fm3, fm2) Wald test Model 1: con ~ gnp + con1 Model 2: con ~ gnp + con1 + gnp1 Model 3: con ~ gnp + gnp1 Res.Df Df F Pr(>F) 1 16 2 15 1 12.569 0.0029371 ** 3 16 -1 27.093 0.0001067 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > encomptest(fm1, fm2) Encompassing test Model 1: con ~ gnp + con1 Model 2: con ~ gnp + gnp1 Model E: con ~ gnp + con1 + gnp1 Res.Df Df F Pr(>F) M1 vs. ME 15 -1 12.569 0.0029371 ** M2 vs. ME 15 -1 27.093 0.0001067 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > ## using the asymptotic Chisq statistic > waldtest(fm3, fm2, test = "Chisq") Wald test Model 1: con ~ gnp + con1 + gnp1 Model 2: con ~ gnp + gnp1 Res.Df Df Chisq Pr(>Chisq) 1 15 2 16 -1 27.093 1.939e-07 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > ## plugging in a HC estimator > if(require(sandwich)) waldtest(fm3, fm2, vcov = vcovHC) Loading required package: sandwich Wald test Model 1: con ~ gnp + con1 + gnp1 Model 2: con ~ gnp + gnp1 Res.Df Df F Pr(>F) 1 15 2 16 -1 9.7456 0.006998 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > > > ### *