strucchange/0000755000176200001440000000000014252300432012553 5ustar liggesusersstrucchange/NAMESPACE0000644000176200001440000000514313550207777014017 0ustar liggesusersimport("stats", "graphics", "zoo", "sandwich") importFrom("utils", "globalVariables") export( ## testing "efp", "gefp", "Fstats", ## monitoring "mefp", "monitor", ## dating "breakpoints", "breakdates", "breakfactor", ## efp functionals "efpFunctional", "maxBM", "maxBB", "maxBMI", "maxBBI", "maxL2BB", "meanL2BB", "rangeBM", "rangeBB", "rangeBMI", "rangeBBI", "supLM", "maxMOSUM", "catL2BB", "ordL2BB", "ordwmax", ## new generics "sctest", "boundary", "recresid", ## utilities "recresid.default", "simulateBMDist", "root.matrix", "solveCrossprod", ## internal objects ## (currently still exported, needs fixing) "pvalue.efp", "pvalue.Fstats", "pargmaxV", "sc.beta.sup", "sc.beta.ave", "sc.beta.exp", "sc.me", "sc.meanL2", "sc.maxL2", "monitorMECritval", "monitorMECritvalData", "monitorMECritvalTable", "monitorRECritval", "monitorRECritvalData", "monitorRECritvalTable") ## methods to new generics S3method("sctest", "default") S3method("sctest", "formula") S3method("sctest", "efp") S3method("sctest", "gefp") S3method("sctest", "Fstats") S3method("boundary", "efp") S3method("boundary", "Fstats") S3method("boundary", "mefp") S3method("breakpoints", "formula") S3method("breakpoints", "breakpointsfull") S3method("breakpoints", "Fstats") S3method("breakdates", "breakpoints") S3method("breakdates", "confint.breakpoints") S3method("mefp", "formula") S3method("mefp", "efp") S3method("recresid", "formula") S3method("recresid", "lm") S3method("recresid", "default") ## methods to standard generics S3method("plot", "efp") S3method("print", "efp") S3method("lines", "efp") S3method("print", "gefp") S3method("plot", "gefp") S3method("time", "gefp") S3method("plot", "Fstats") S3method("print", "Fstats") S3method("lines", "Fstats") S3method("plot", "mefp") S3method("print", "mefp") S3method("lines", "mefp") S3method("print", "breakpoints") S3method("lines", "breakpoints") S3method("logLik", "breakpoints") S3method("summary", "breakpoints") S3method("logLik", "breakpointsfull") S3method("AIC", "breakpointsfull") S3method("summary", "breakpointsfull") S3method("plot", "breakpointsfull") S3method("confint", "breakpointsfull") S3method("coef", "breakpointsfull") S3method("vcov", "breakpointsfull") S3method("fitted", "breakpointsfull") S3method("residuals", "breakpointsfull") S3method("df.residual", "breakpointsfull") S3method("plot", "summary.breakpointsfull") S3method("print", "summary.breakpointsfull") S3method("print", "confint.breakpoints") S3method("lines", "confint.breakpoints") useDynLib(strucchange, .registration = TRUE) strucchange/demo/0000755000176200001440000000000013062350355013506 5ustar liggesusersstrucchange/demo/tkmonitoring.R0000644000176200001440000000771113062350355016363 0ustar liggesusersif(require("tcltk")) { data(UKDriverDeaths) seatbelt <- log10(UKDriverDeaths) seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) colnames(seatbelt) <- c("y", "ylag1", "ylag12") seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) data(GermanM1) data(durab) data <- tclVar("M1") type <- tclVar("OLS-CUSUM") border <- tclVar(1) h <- tclVar(0.5) h.sav <- 0.5 replot <- function(...) { h.sav <- hh <- as.numeric(tclvalue(h)) tp <- tclvalue(type) bd <- tclvalue(border) dt <- tclvalue(data) switch(dt, "UK Seatbelt" = { seat.sub <<- window(seatbelt, start = c(1975,11), end = c(1983,1)) seat.efp <- efp(y ~ ylag1 + ylag12, data = seat.sub, type = tp, h = hh) if(bd > 0 & tp %in% c("OLS-CUSUM", "RE")) bd <- newborder <- function(k) 1.5778*k/seat.efp$nobs else bd <- NULL seat.mefp <- mefp(seat.efp, period = 2, border = bd) seat.sub <<- window(seatbelt, start = c(1975, 11)) seat.mon <- monitor(seat.mefp, verbose = FALSE) plot(seat.mon) }, "M1" = { M1 <<- historyM1 m1.efp <- efp(dm ~ dy2 + dR + dR1 + dp + ecm.res + season, type = tp, h = hh, data = M1) if(bd > 0 & tp %in% c("OLS-CUSUM", "RE")) bd <- newborder <- function(k) 1.5778*k/m1.efp$nobs else bd <- NULL m1.mefp <- mefp(m1.efp, period = 2, border = bd) M1 <<- GermanM1 m1.mon <- monitor(m1.mefp, verbose = FALSE) plot(m1.mon) }, "US Durables" = { Durab <<- window(durab, start=1964, end = c(1979, 12)) durab.efp <- efp(y ~ lag, type = tp, h = hh, data = Durab) if(bd > 0 & tp %in% c("OLS-CUSUM", "RE")) bd <- newborder <- function(k) 1.5778*k/durab.efp$nobs else bd <- NULL durab.mefp <- mefp(durab.efp, period=2, border = bd) Durab <<- window(durab, start=1964) durab.mon <- monitor(durab.mefp, verbose = FALSE) plot(durab.mon) }) } base <- tktoplevel() tkwm.title(base, "Monitoring") spec.frm <- tkframe(base, borderwidth = 2) left.frm <- tkframe(spec.frm) right.frm <- tkframe(spec.frm) ## Left frame: frame1 <- tkframe(left.frm, relief="groove", borderwidth=2) tkpack(tklabel(frame1, text="Process type")) for (i in c("OLS-CUSUM", "OLS-MOSUM", "RE", "ME") ) { tmp <- tkradiobutton(frame1, command = replot, text = i, value = i, variable = type) tkpack(tmp, anchor="w") } frame4 <- tkframe(left.frm, relief = "groove", borderwidth = 2) tkpack(tklabel (frame4, text = "border type")) tmp <- tkradiobutton(frame4, command = replot, text = "Chu et al.", value = 0, variable = border) tkpack(tmp, anchor="w") tmp <- tkradiobutton(frame4, command = replot, text = "Zeileis et al.", value = 1, variable = border) tkpack(tmp, anchor="w") ## Two right frames: frame2 <-tkframe(right.frm, relief = "groove", borderwidth = 2) tkpack(tklabel(frame2, text="Data set")) for (i in c("UK Seatbelt", "M1", "US Durables") ) { tmp <- tkradiobutton(frame2, command = replot, text = i, value = i, variable = data) tkpack(tmp, anchor="w") } frame3 <- tkframe(right.frm, relief = "groove", borderwidth = 2) tkpack(tklabel (frame3, text = "Bandwidth h")) for (i in c(0.25, 0.5, 1) ) { tmp <- tkradiobutton(frame3, command = replot, text = i, value = i, variable = h) tkpack(tmp, anchor="w") } tkpack(frame1, frame4, fill="x") tkpack(frame2, frame3, fill="x") tkpack(left.frm, right.frm, side = "left", anchor = "n") ## Bottom frame on base: q.but <- tkbutton(base, text = "Quit", command = function() tkdestroy(base)) tkpack(spec.frm, q.but) replot() } strucchange/demo/tktesting.R0000644000176200001440000000763613062350355015661 0ustar liggesusersif(require("tcltk")) { data(Nile) data(UKDriverDeaths) seatbelt <- log10(UKDriverDeaths) seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) colnames(seatbelt) <- c("y", "ylag1", "ylag12") seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) data(GermanM1) data(Grossarl) data(durab) data <- tclVar("Nile") type <- tclVar("OLS-MOSUM") h <- tclVar(0.15) h.sav <- 0.15 replot <- function(...) { h.sav <- hh <- as.numeric(tclvalue(h)) tp <- tclvalue(type) dt <- tclvalue(data) if(tp == "data") { switch(dt, "Nile" = plot(Nile, ylab = "annual flow", main = "Measurements of the annual flow of the Nile at Ashwan"), "UK Seatbelt" = plot(seatbelt[,"y"], ylab = expression(log[10](casualties)), main = "UK seatbelt data"), "M1" = plot(GermanM1[,"m"], ylab = "money demand", main = "German M1 money demand"), "Grossarl" = plot(Grossarl$fraction, ylab = "fraction of illegitimate births", main = "Illegitimate births in Grossarl"), "US Durables" = plot(durab[,"y"], ylab = "productivity in the manufacturing/durables sector", main = "US labor productivity") )} else if(tp == "F statistics") { switch(dt, "Nile" = plot(Fstats(Nile ~ 1), main = "F statistics"), "UK Seatbelt" = plot(Fstats(y ~ ylag1 + ylag12, data = seatbelt, from = 0.1), main = "F statistics"), "M1" = plot(Fstats(dm ~ dy2 + dR + dR1 + dp + ecm.res + season, data = GermanM1, from = 0.2, to = 0.9), main = "F statistics"), "Grossarl" = plot(Fstats(fraction ~ politics, data = Grossarl), main = "F statistics"), "US Durables" = plot(Fstats(y ~ lag, data = durab), main = "F statistics") )} else { switch(dt, "Nile" = plot(efp(Nile ~ 1, type = tp, h = hh)), "UK Seatbelt" = plot(efp(y ~ ylag1 + ylag12, type = tp, h = hh, data = seatbelt)), "M1" = plot(efp(dm ~ dy2 + dR + dR1 + dp + ecm.res + season, type = tp, h = hh, data = GermanM1)), "Grossarl" = plot(efp(fraction ~ politics, type = tp, h = hh, data = Grossarl)), "US Durables" = plot(efp(y ~ lag, type = tp, h = hh, data = durab)) )} } replot.maybe <- function(...) { if((tclvalue(type) %in% c("Rec-MOSUM", "OLS-MOSUM", "ME")) & (as.numeric(tclvalue(h)) != h.sav)) replot() } base <- tktoplevel() tkwm.title(base, "Testing") spec.frm <- tkframe(base, borderwidth = 2) left.frm <- tkframe(spec.frm) right.frm <- tkframe(spec.frm) ## Left frame: frame1 <- tkframe(left.frm, relief="groove", borderwidth=2) tkpack(tklabel(frame1, text="Plot type")) for (i in c("data", "Rec-CUSUM", "Rec-MOSUM", "OLS-CUSUM", "OLS-MOSUM", "RE", "ME", "F statistics") ) { tmp <- tkradiobutton(frame1, command = replot, text = i, value = i, variable = type) tkpack(tmp, anchor="w") } ## Two right frames: frame2 <-tkframe(right.frm, relief = "groove", borderwidth = 2) tkpack(tklabel(frame2, text="Data set")) for (i in c("Nile", "UK Seatbelt", "M1", "Grossarl", "US Durables") ) { tmp <- tkradiobutton(frame2, command = replot, text = i, value = i, variable = data) tkpack(tmp, anchor="w") } frame3 <- tkframe(right.frm, relief = "groove", borderwidth = 2) tkpack(tklabel (frame3, text = "Bandwidth h")) tkpack(tkscale(frame3, command = replot.maybe, from = 0.05, to = 0.95, showvalue = TRUE, variable = h, resolution = 0.005, orient = "horiz")) tkpack(frame1, fill="x") tkpack(frame2, frame3, fill="x") tkpack(left.frm, right.frm, side = "left", anchor = "n") ## Bottom frame on base: q.but <- tkbutton(base, text = "Quit", command = function() tkdestroy(base)) tkpack(spec.frm, q.but) replot() } strucchange/demo/00Index0000644000176200001440000000017313062350355014641 0ustar liggesuserstkmonitoring Monitoring Structural Changes (Tcl/Tk based demo) tktesting Testing For Structural Change (Tcl/Tk based demo) strucchange/data/0000755000176200001440000000000013062350355013473 5ustar liggesusersstrucchange/data/PhillipsCurve.rda0000644000176200001440000000765413062350355016770 0ustar liggesusersZ{Mߙȹ1ܨ⡥}jK)JmPO*OB4b:H"Br$-RO1U -O {}}{X{ڟ>L>&B#ql@#ǁi)SCDԄwhL}^LLzՒߋ&3Sk pcڕv4ǟ6r/>_|6mΛ"#i| -´ȉd׮ʹmFmǴ? LtTzˢsK%w\۰=S?d.Q{Ջ=y{̥~@.sqO寘pn0ǃ%4b.7'k'þ\ NuXG*>2FvnNaՎi/I(}HjηqE㛝Gs GA҇O{}~MWF{=WC%hh? u к)+]۾B]zY')S: 7>OoyjqJzc}az`Sskfsz?)s233'\H{w31]bk BBɼG6sÙ|q Ha:As^uS42 HGzi(K=/=^e&z@mpDx<N" Fypq0 \n*\y6 7o(:@^5fB S9:kH(K`4s`ܛ'@8>Iۧ{wxE2@ym ^]HH B]mu}rwKA;I^ϔ u*wO@9 P~ ~'@zK$OYǭ"lB!0vZ{ʼV`Y68 ơDcR%ɝ+(~ummB'ՙkAf3<Ȕy `HY lP=! pJ|bA46B-Cr}@P!!!lB!`vjxl翃ב`pK+P(iP8 +Pɭp+Tm23%pk N~g_;0s?v*\mmXqa(D+TI2 FoFDZX]|P\0#}`$⸶nW\4e&p R pQmv+7R6:@ Gd 5ٲ0f!3 e2OGX^ &(fn iNQ?G_wHAU<0ۂb!4MD,BtA  9"^;Hj4#RO MrN,J\5_`:Q{H(A%밾ЩB?F|%0G˱r'p2t-ruB#XG+0L xx}3b@N|O;_^d)#"f-BFx࿯?,N%P:|ZuC0POB%l3-źV!=XY<? $4 Byt}x>cx+@(.*_萆@&@o@ņGđQöPU8x 䣾dk!{lx-dޜcMD!ٝ=/I~D[ /d!*Ntxo4+l"JDVP,Ѽcx!)j}MOR'l<\"gT)Pf-ǭq=d¤XO]Yb:Eؽis#H>,%qERt)g-~"RG\'-%v4F+|Jh %!bvhW}|4iBu/S%fb`nJVQ8(7TIQ/k"'*_pUܮGJ`(c b0(rƗA('p a.T+UKRŋ8)BD۵]X }.(}&)rnncc+mJ`(@] x:lļ`3yzgJWc6cIE~ZP6sKZ+wKז'MC\|UpNEeo[ !G.9H P9hE4 Fa qa0gف@lRVwx"̽M{:/yC?CQR3C\aT^MNxꂋ(N<OUfa>TS˪x6̘_QoRۄSU%hM;M]?JɞT9HԼrJ(:S6_\{j:T{*H?z._|p- E`yt:zs%2MEI`S:Gz ^ܼgxy=y={ =sJo Y\ʻL ϯ~OY>L!\=8۞fڞlwYyB&Ѳ˒H-#g@Pe1OPX> Nw"strucchange/data/DJIA.rda0000644000176200001440000000222113062350355014667 0ustar liggesusers]mHe_ 5\X-A i}Ⱥ#"lekVhy9rs ևj1{YwznzgUk8N%NiY4( +A#p]=qx[:˸5~׸M=3Ve&cݚC.4Lw47^lkt]pV=10JϱZ>}&>5v: wwk}0L痸IQ^ɝmц&9~goBuKzc!V٨&1/] (qT#{t֝W1vXתx-0i?p _!8G.X h o?G6Voe1O?$O?~~~~~~.]=GO?SO?ͼ].Ⱥ19HH=ulN:&S; u ƿiaO ~e.Y|C >f?g fx/~߹Y﹟  郉 0Abb.ÿ̝;̙3s0 !G 9;U4|Ҹ{#T U!?HPATVi7ABos WS$^>a*5t'LgXd"Lۅjͪc2 :&6ZKi ݮ`̑ 4J5I9'a͖$ $>^M! Cf0#PC´96G{fSPx]ɧ曵J焉4 uwHaMJ3-7'̔ظ ]Bu9fWnAC,o>*=ϵNRCC k̚6&V˛J 1DBZj$l\׭a. o_3?ZHè!s'a@G.ѐLkB?b7R7c:uDŽNWa¼MUun"5$/+a+u%g[W~j#a m|1aD7v6"l f%vf\m0amRx,S+)mji֫>ێK֣%~? r~(86CؐR!¶tRhk =SaH 7F-Fvs@9R/Mk'ltҐ" a(:o8-I.L؄Pי5# lTsraP>F؍%םzoݶ[n.yY=6H  {uC}MfªZ(DŽ=JfT'B:){aOHy-07M#cjs^Wd@Ga_BՉ m:AHmiHǹXw8aXu='6#q ޗnka߭P@;j&׿IdN#/rrzEmXЛjwP;9NDBe% 뿏^q:vCز'fъUIP<㏐*o*̩9A_Tu"UJ&PRCz0kR*CNzrh܏/Hx%QAiWD7DxZ^8jA$U;w:"ik~!니;uat"K]| _|D$}#-:H& kɉd^_eH%wIA$IMstf"R>>7c,"9GhɓDrR!DRPUuHޜHK!8lԞ#޾Ju]O򂅣ws']2UGK^;8Mxx!*~Wx?LTC/'+Jo%>TiGI!>AxD+`m3)#鶜m&>?$>cJuG#φYRcMxT(.xK|vQPI|ryB1sw/~F|.Zҕe՞;I|ySݢG ŹWXS{>y  }Ig}߃L/C `&y22:30swv\.a&9%uД7V2=J+MQ An.&lRm_HpM+ G aܓnx* ='e wWRXNQX\iOJ)| ,t* C<9fAB ;<_v4 [¼7/ ;l-UT_9ts(5vuZ|Ž Ňm=au.;<c\$I_`06.l8^ -Ʀu`쭥^ crkrEr}qe^uY "OPjt=e/.71UOx\aNhcu65|EK|A[+S (I$aԼ<0I1UIu}U-%5L_KdUBGa_7=` I͚?2WkGާ k0 y YMFS[ah9& DVi.՝Cd|(ȿ8Ji;Odh}a(%YXeau+_9PPDdiU^[g"ꇵYR*mck\G&y#^&/GIu̿Z+"뗈Fz1~ˉTQ"&bd"N,dZt.*5^^Od;uqD "xq#z"_Bd(LY~D]jL;Kd}oF=79/mZJdQ90AdM8=v~V?-,>ՎMdAJ`'"kk;JHTz FߑFduՔBd-s(gR]B#|+!N7fu3L%o/* 9k9Z+|D[QUL3ZT\[L}3쇺pٸKG$߳Ogr6~q(ňrZ8(jR1w%CzGF=jĖGp {ˋueFJp첅/-.')n&l/գ&:>]L "kk,ln˷_VS ,FI(c*.?> Ĵ_7TOu=μTN+kS|.?A>B;|d%i 79xBZ?ނ\1p9Y8 r'"'ʸZe8L<]NdoX$iSUù?5\(7Y^('zo 9-UV i*A+6 @B'RWEZѣ!rSy*|8>_FE |038zlF8=cgn: ѕc_Bmykf'"g E4хm*ux|\7ϟѨ_ ~*̙@lh{2CЗ@ϫM+w |Ahak/Na>{&9 A}ze'8pCW@ϊTëd*睫_6KG4 qqʇ0ȯ9GwVE8|j8}fG"]?Zm>[vo2\!W#7&ʑ$qtr e*W DwQ {^XDod/8"%*)G9]11-C1C'@?`sQPS2n_{!- 7|_af \՛qrj5ɧOd+54qHiy!.qdpCt j?/ļg?0 ^~kT.վ<")6 oP D_}w֕#5!dD%piZW4e,r Wu=#«7W*}F~b=GnJÛz` BϝɎG+? yu2* e}vȳ~*N4@yN_E3Vat;_Gx+)_Jޜռn?qR{})J~C Z!<2jK!̜0=sx6]M!'Z`HM־ 0ljj C-s-AW>C`83WPVOnMw:2[ k {Vi_O96‹(t1,| MGΏAotU:p4 Dk66z+Wis3^n<zGUq3ތpl7F&s =Ɔ- Xc"л8w6kSUFORhW2.ӇhIh/7\gՑC頟CT3z QG0zSj')>U6* *3[ k+cQ(7 9N+tAaBzs24-Xkhկ8q;uX?Qޫ:,| o\g5kYn6u/]UujQU1 G>Oj繎 B*Ylӎ+tXjucwo}o'Q'5_r$ef`>e.D sUh- `]*@(0GT^:Wj=ݕzb-3[+U6ۏw!BB,BH?'㨏XC ~%TŖ9jMӘ^k}÷qU_6xsk="nEbzS敿/-?c r#/ b{[Dx+1-G?k_Ҋ t-)ìt(Gc>K[|_(qyޘUA^3t 9>~#>wDhy?ϦGk}/[Ez_ keZveZvg8ޟ X>}u}Jƥ=`n Q!뵊ΎE7L~[ VTaי[9} X3rm6u RBT(Q_R)*03Аug/0ExjeWqxp8 'G s>^uWPWQULTDWMڄc@O\ǣ3]^J%YJC|?^|`nY*Hw@ I{]=,ydC{* Bέ<M8i]Հsn ͳ|)k-ȱpn2qS=*#mT@.l+G#,8RϨJg5EY8Gt?xԹ/ڕ9H۩Kh 4Z[ ~;k/Ӆ| B,' ,W+ n[oB|~kjGMCi^9N[s]].ُ܌etW眖~5[^_8M@7w6tw'(짰ﻲﻲﻲ>,V֜;i{Otuc]֧X cAD7]G ;a'C'4xr:%~ UY||>? ۭqRXS)a rߔoXor2z 8ݱ `s`pR'ٿW?B}|."_&'e$]ݱbU^-U7 "'1 {3v;Xſaklzeb#ǪNc[ҸJQc..Y#\Z3W%9n1}<{-+o\ƪA?UpeL1`ULfv a{rWױ+7'g`΁zdLոR]hWq8]W6aj퓅]?9?UϹ.&g׭v/9ض~qkgNvk5xʕXW5 gxމUsЯz&wɰ*ZKʶGM go[V4&e5g^~b/|ŧU^2>uatb+>}eۛoSh!#,q =~ħS.8gi_g>}A^q^?~8+/)6>a+= ti>0q1zujn=v׭?gviFGy'HI"&r ڳaH jq'>Kcb9O0ޚr>tߤ2>9&M4/qqlC "!B$T p0VbL$__Ab,XP1&D#@1D#@1D#@1E@#P1E@#P1E #H1D #H1D #H1E`#X1E`#X1E#D1BD#D1BD#D1BEP#T1BEP#T1BE0#L1D0#L1D0#L1Ep#\1Ep#\1 'VlQ[4 El`[4 ElQ ߆oCmh64 ߆`C lh6Z -`C mh6@Z -ІhC mhet*;o$0|y=܅/ξi_״kVuMc#[ʼ}e޾2_aV~k_Qר+uI -[2*-|FmK?ܳaNo:Qan$$$s9~kkZ333L>W88*.[ӶW+j8OXW+jn~ԞϤXyֵ[2$g@OOOV>_I{ $ tϖ"llIqGo'-kT)mgn rL-tW۵ljh KGV= ` "}~Qtaܩh7" ݍ*vKb7Ҳ~3~Q1oǪH(j ,󱿃,A*strucchange/data/SP2001.rda0000644000176200001440000021552113062350355015016 0ustar liggesusersBZh91AY&SY-,^E x}} 6px􀨪* Q)BTTR T(RE(d)$DA@`_>>4 >=*F֐>Q>LUAp4Pڑ||>'9*A@P(HPH( @ ((( (PH@HPDR5-!-(u*P)H P)@|.> mEP>=> g@lbC0P`:^E} (P <zh <Բm0} J<px (;h$cx}_j`}}5 5} h2TjzOQ'{Tژd='7#ͪ5<~zMLD 4Lѧ2A4'􌌆1F=S JP@P J*z!F FMGM 0h42iFz@4 ?J O%JR$Q@yh26TT=ޢbߪzP@ =A)JQ5OD z&I=Sڧ *jY-g$ۉֻi1)00 <$St۳ -f;cr3ak1mSn [bݸ3,̉-+OWMB=R05%ۚҶs4fA6e2ĬC#в JD藳KR< JD,*rJL@#R32 44"U LC(]"y&⛑h`&Y2*e:BLqTA2n)tqHfnw > 5h*$&x\eUʶT%jvm z`Sdjr*YiBJ/ ~6$ӊAl1yHu0Sp 1$!R.7p1t@*+6XcIp.QZ,CQ1*pɛ E|f)?: )0*biE͒7#$  ۖ{6f՚l~" vdQ]J)#Z(`cIㅏEAXͫT51)6`޴4QD?dfF}Bi!,G!,2CGf79}7[4!L*dD~UF) y`X6@CA*fEH`N7f&,,$TLEQ 3 BQ'TlpE!J8B@ lQ+GZ )Uf^Pjδ-X[$SL&v6Ay}, Ir^ ?=Ϻo[ 瞏\gy͏̿b7?yB/~u =$.y{"./-:{X?7t)Oz>9GLs7 m.:̊/N~_ Gc G79blb1{^/{~ kk0ְh;}}][9Al"1{;o1ln>^=|ϯ~kR nϘY6N=_NkcXx]=l/xU!;aNamx9sF|=O'f[.iJA!:PmXj`u{_1NN؝o٣-˦)c_VJ>80P5Pޤ/5WV_u0͝nR^!1#Y ֗F>VVPS[ݴܩsخ |w{o %@nP@ ɟRH C ف *HOCnC[_?$!$9_?FcL-^J^[0m5,rNklڷ Vg0g:Q$.$R`Y:ynbhNkHRTe(薑RUX$RaiT"蒥녞KJijZD['ehm%-Ix`nRRșFKbݭ-37 ;lcpZ$0nɓZvjjRxIU[e-lNᦛjIY7iI.سc 瑢Rjzfd;SZnrfM,mRK W11RK1!M 4 P[hkmnp ti0B)D@O7]EhM3rb"Z)dFTJ^"ZdQh*kR⑚iB꧖Ei*gIVDnN)ٌLkhhv6sU"ʱ T E-N·m&E] B$5(TC 6[iVn9:0e%(o7}~o|ߛo"o;ג.$ 1 ŋTe q@bPČU Sq ,Xf (H]>az?m~ C5)dދ]y=2wM~ڵ+ЁWzr}~ih)=>Ԕ6>6]blPiz'&W.^˾dzͭ^vwxk{7{u{4;32kR Df0yR/h y<ϮO=7}>'"trPt۔Nej9xh- e'/6sF-,Y6+ZUչkHuqնyό>HFmmkf\2Ɍevu9VN%gb:hM6Ɖ)cli'lmך4O%IgRa!%b [PR8ZM e"ǢYYN BۗwmR+sק>=bXݮ0:bƵI].&J ؉P#78ȺmA^=Q}F'v>cDž,{UJ=-e0_64!}w|hcuL}=u{H1DigK]1\V-Ȝz"\MnԮɴ5#B%\v9^GXi ֎wDb"5 I{wR>Skk ScڟwA\f+ ^QF\!+ַtPރ؞p%YGҍ)Q  ɒ?s$6Qv*mjdvh6Y;jm L6'~\n_1~KpkU/ Ӳޞ:ڂ{δk7Uwx]wxY3)ayyxk45]^mxY^S;Fgvmaݝ{vU{Hw{HuL<׸:x^5#/pgLK2 Q C3e6fFf}˛6l~D&ϿT밉IM!2*N "םrj^N&,JzЕfOzOHB{mޛ&}/({enw19<39< \յW< Yf)9s TNfN̵RSEkP\Nx'yND);>',z;ӿ83z8$22y5`T/Ij XjMZ U2̦X?et&`~(o0S{zm"CmPPBɾ,zMvν5ڒt]a5ᮡkknHM;;:|a1!Jb0 Y.ɑE )d̈"&7%@\&G1RV"̘1o-,RMc;.v$| $ ̄Xߕ}'t{=w {7R/fGkJ>kRsdo2TdC T營̔󨊂BR=-SL,yj1;UFI)w&7jMڨmڇq܆،m,X{I38o??>在DAbNIȯ%PzBe}U'eJe3lؤ6ͶA6ج{bٶ!6M͛g%$p,`c/8)`8!0.`J.X=%KxQ< O?_?yygʅWeB*>_k!+54/ou'(AQzYHFWYn=xWݞ̉TbDx Vsъ#g^8el* oaK+%(זw^+W{wO-Yk^xW'^iy˼.g7Cg{CTY5aNj.*jUI,5C1jbҁ.-EL!'Xo CUCI (p"pŒ^ ;Sm,`UI֦iIͷ00ۓncvśff346pBK٥9&BdY1 d yc8c f$f#,=C$?=wowQzg='^z~W<ϥyB=2|,s fƪ\g"'Oa-_j,#Ƽ1q#/we=YT'#i$zVJYŠ>tO*=k$^_W] QhwID@GpB8$$C{ x񕖮s 줸ҮKj)v{kri-6SC[Rlͷ*Zk1) .fէ$ e1v{v"ñӶU8trl4ݮY2;Fra4re&͍ͩK9ѳbHLÓ).g&Zi%Ȍ\QYŶͶ2"cm=_Y"I\Z-v֖-uZVF3 D~$diM#K{~:&ȱ6MKYWJK2Yt]nt\smY״6XT4"E`Q6\(C7ڳF]~ݧӀt$& '7$CS}NJ^rWWey:oP*CzP6ԛT77UfoJٛ=i!%-t'E4`iCI@&!@2ut [J](uKJof{3=8y釈^s弼QnUqN!q8UL8OXzM=;Ӥ9O^o8l9ƯjgEW-^5=7>?+ޟ}ք'qr LaY (&`0faa8 @,bq^HlnM^U :dIU ECfCdٓcefN3 HhL!`e2j%DX̑c d\hUTVx %뜞+2@?H`[?~o?5~o-^B>QJeOUJTC}oPzQS#qDSdzrƪˑj0ayO+o1bG|Ԭy'yRTc!9~3D A$I:_Q/>ӤnpW7rj,6n֠1È\7bg.jPKp[޴.x.=T$r;(!"2ԲvOP]$'Wr|ݚnZJJC1\+:A(d֗ z˂]yePS(hnM9tevGC8Pf/,ޝd%L2AlQ,R}*%ct7 -^wJ #JP.#(׺}+'l$ {qO=!=ϣmm 3IC6aP۪6Ͱ˶G! dɐݼ@q(^)HN;Pqq˙*{[U ;5+jCl*kZ%+Yhl5af_`W.,l u0Q l@1%cc12-4?/2ACgB}oӧS? sLC_OsS"篆W|4|loǢohD"UJ䛾-ie3P~__iXl]JV]3t2I)O)Y<-ݭ_J{O/#xDZˌ$/].]kXR,2)dzAdVEx ތv{w?9aϴxnGU悯Oh$C܌_3JI!'C~Bonm $zP Hv6j 6Blڪ#& &r&5u醶bu}X$jji53שiԕI@Q%_:76T7~+5דYHk&f:rknfv]ν{ CnmOϻI!{uۺZZWHb؂aI l1&T̀ ;g뿣H`@ /͍}b!kkpkАD Hxxx[TiͳY'PO յK\}tM nDmq>FтI6P?qkh?:t:~ ?Ԟq@TO?Uǃ-^sYQzBM tɶY5`r0|p Kt8_~7ngt^j!-i]q6e Ju])mR@)T+nl( Vh&  )K"2"[J^Nb;VT^NO*Hpr{|lzBM `=)\+L遥"OJ?<}GoY^W8ȮJsqJY ܈Yjgƚ3FMF:۷o8Q}dasxå"zKSD(GX]jk$oy"$%/|PDHRŷ^ X.cdDdx.i22`ZJh؉jXvWS[F;ayC#Ő,3Ϣ =.Mʕ%WJLL0{(oWˎvQl![) 9B];}oc# u]|ւᗐKAh;?,cΎ#`ћG ~ОY>gbU?GFOS.}UhힳsKP)SL;Հ̫L#\3[e~"x\'+D2 Pbxۿt)wP`tkV1i o3Ԍ P?C΄]~xa/IւLw9 n:eŪa{GrNJ haԎ\@h{gA{ccHe5ZD3@;0@^|zo-&} yǞZ;G9}U^QgK$^6%= eNZtGV>8ʌ erS\7~Wg-OW9Z,x[}Sɐ L@_R|^'d%БkyTYq2:ތ&]7COR/fiU/:zYtŪ8Duh\{W+[KfkZzQJO*q[d v_6,kA;%=<HXp9ZdLIrO;J=d_7Fs?Nk:҇qHo:ǖ 799?$EZ}%8, )>b]{Dy_=ӄJhCP復QBN-Ц 42 x//;R8"mLH7EHn4e6D4s 8`e GyMU\KR,*$(5W`&&:k/%0j+h=茐Upy! MB\=p7?m)i2hO.g~~ҕ50>v/% O`Z_JWd{*Uq˾:]4;4i@1] ǒ:iY.0|T^ g:HCXTc,{45!2h#;$K5]{P%IKX¬lu zS]HcNdtU~A XiU&)l3axԬ|lnݔ&՜_]HCf2axB{0J2/*Jz{2m29Iï0[=adSw{NV&*P ?%/W:hHxGӯ;yrm-!^r9wBr9#"Ddy')Rޖ2y7R[ lr;4|yP'H-5XGFw;F~(G m-]Vwyzm4"q m5>+Lx@k0#fd=db lǞP!M3*Q'nSJ/'H[$6yA0'Bݿ`f/5)]ZՏ?ebyf1HŶ&4>#=#n35.i@#7H&A0)rto_P}7DL|[VF7bRPֲՆrTxqf73+E|sPr(v'\<0R] /ipٰ8)RDT1*HXKDw<#0;\dKoWR:jާ0Jo|wگ{lq/JbC _ ќ/ׯ"~uX{gvܢ^̾;+ZIPNsPd+I9Z[][$`+RTrNu'=g+?.ә~?DcZM› 8A;v荪7)/M NK2).+oX'nF Npi{᫦wRHE+6FOpQ3`zBޢ3ձoLG8b=a\#;kxO݃5+XrޕT_c ]|. S~υ'gtC(^\#iV7Ky,II^ imxoCF ?֚QQ>vE,+⤱g'?*ÿT PшЁ>IdWb} Mށŭm.0Xgt)wld khDǥG*]E7LI C.o(s$E|Wlej"!R:dѼ8?J/N̋zy-v4G<$|Ւu~½.@q5}Zg! uVqvCi:.~8m-W5"L塗56<0>B]a Y=|Zdpa(67Lǩϊoŵ&X*2|(O bt)rDYF%\a'X0V'=nV%1F?fo=j.1B_|{-*tBQbJ#QSB͓>w/n?`ZRqڇE;<ׂ4w״ *[%o({--G7'Y)ie?!0(و4ħVi [W 1A9&IQڔșNO-Nфy8rI<]ObCO0 {1oWeMXܤ|^my} ӠGu䂍ߟ{A p򺣘V5Q}uYTW_cdUY ٺ7]ryÈ\ho#"g󝖛P NURh`雷> T(0K[K.bg>te֗T-n1*#qڛd>ÆHcQy;)=hEJꟇ P!Hӽ% -8$h$%ORK8qw_.Oկn,oW0c~SnzFO]I|Oڥ._&V%0ᦙo[|>yE6n$SX'" g0w%7V#>P"p-/R!c] noĥCPfnlgc^ }I5~vdQW#*Ĝҡ%uDD>Ւgy%ao]Wsi;P;FB"8-DzT 8[)褱 v_"}%BEgTc!gP:FUpCźwyPŵؿP뮄n~u1^oVf[u>pӂ걃N7ã@ӵPtk4tZ>yY3g﵄ȂF4 !*7W+.Z)xM ٣O>Η(ϋ6߲zZ_)mS# L㞸|8LXcmpzy#Lvx++y;jвSJV#Nx Ӷ$O_ o!7Q 75fQs7+[#@WndnޙjqxىR9vfׯm2[,` AeGtM"3hD%< 4m a;˨LmBK04>;IǓ女ls/ ꒂ0Hlqd?\YD`#nO=|SH.5wHM)1>iڥ+~N!z;vpCA_l}?)N^Bכ\mȆƃCEHoVkjm)IcYz?(.)2=]X_!67Z9_o{o%ӥc`td,mBVU;($Or8`ZYI0o/Gpq~XPK&*# "Q0ή{LZ:7'W̞ {q%m}*Vh5k^GՎWbIRCC-"6TN0q zf4ܔ"!|M)+FG]&Vp!*GwȥU`đ LnX *gĽ<[m,☞ѧI]OΫl}ԠLs^Z᝱#^pWupp?E\b;j_0֌Q3OBhKZǓWf#KV:͡1jjYa-8<9\Iڍ;st!'A yîoV؟Q+垗 Xk)Iy8{1:M~cFk'7-jz4g~=#:[%ԼzI2ܙOU2 eQ%`h93!3ܡZIy@ed թ]v+Qeb"Vi9xe>6'$. qч2xXD=7&Gspw*zC_ 2\0nBk~%*@yH+[H%/n}`X?ŋr6gHgBtj)Tn R|)I d/?yNw {SC;٧cu63ΘzιZ']OGEBeG=rŀ|A\R mk~"R.C3hMET[{?kZ]GK$N1f,Fuo_Jj%5ܥng /? FM[%x 8p }t@ ,_a}݂֐yCmwh/O-f28vSk9DyraŶ)OJ6bZ&^0S@bo@ݥqNv %>tXzHC&sY^cJ9Z Tc0[=!1C~Lf]oظF5~i~Li5X}mreQ>-ܿ%;,{_U-,!;m կNLdsbӤQٸBS\SC5~ԳX8D_3אWJםKiDL[?OV8݉N|fFQ-jD?G3rؖ}:Ilj|O U- _*V |6HHD+\z_UjOX- v]p彽ŵN6,Y)Щ琬^{Ǿ, 8F;,DK_N2h^/#16|Dwg`wHꗡb7es1ʨ 8>qSԤ)\i5}]`5lU2;G.F I6O r4 pe? ۠^nckl--OƘf_j<{ϖ)F'?X@)u.p!KX?6O:nܴnD?Q0cD%zQI7GLj^kCP5 zX`' ثz~S~vTE܃B){Cx}{n^N6sp|M\ 11E}' |_Z4g0 = [nl ^eJ%8@1a8#bosH>R"gW0SIeĘz=8[Y&z)hɶ)d[Bݝh6sL!h|p<bZX͑]rqjo JCF0]@ byX`p4#R ~ЍFǞf=F=@BVkd?(E 3t![1Y o.|x;=~ QJB D)m^deT||8-xftX\ dgaICAFiPؕ/,ܞ& FiD1 FPD@AerZ!a Y Υ*a-ҧMy^TKxfFs"W9ֳYmjF҉o$ !akxvg> n}- %fV`Ov˰mVA\k2ɂR"Şv[1^^%0K&=Xqoi1paZ&@~L>Zv}txn^I⊗{5Jw\Ƶh^CK`BG~9/jkzc:Nů955j͹nu%Q#=H1eZȨ܋;U7:3ƃ0(Y %ߥhwfRHR)@P[rȀcmvܷR\8~$ m/wm*FT ;HKSYj|e"zbܴw?+@rMl'l%e]>̧"Lu0!Hb,]].U% ;fܳ3t6Ҍ~<O ^m@KҚu3yv71W G|)('k' y"g#ij6A.sCc?%ͦ!+K T7Za8Д5H|{>oGA߬yc|_}׹)BGB>E}MYlHjs217:`p9b,TG$YK&4[i~.s(yi+B=,(rq;YH"4K_3g?%"Ab ԖqS_>'5`7sg,8ŷukhԽՓ뙢+zJޏkIKf'-81_s:lu ~V`8'u3b깢Uj0MT4NOݚ)ĥWSqdT0[gpz Da&zNH>[;Smوhc/zl1I)}QuAJ6Gͫ䨿.ٜODiғN4؄3Ȩq|k'闧.U) -{#0z_`.OEtMA;u~ypsO v ǎYlx`*qL瑻XqrҰB\t(1qȵ(ܼm֯?M>UD/K:Fgn9_H\#j_ekj8kRUCP=#s ?Mͯo(A}E[3~2r?x^37!\(8@91D3,M#k=cd :@+oe֭"ϑ䭂4M~ VMn(`= w^3e4>d„9,dvp1JU! *kKA2b`VS"ڂr( KaH 4RiO /kmg'L4+@4̏yCH O8_.(H KR[11йhQEP(B60n}h0\Yy6MLp""VkKFф 9)'т^rrTQ'a8ۣ@1\O' w*59V5 .)ޫ6T՛: A1)# g.?E=Y ;܂I3-/s_؜H߆w5/Hwf(ɹ '{4H\a^|a8oؚ h~'V_-UGMF?TZ D_ `D|KvVP@O aa3 =sLM#k+$whltc5ȜA f5VX,E>*sih^@YmOex3)?-K۰ .9U\.F E*3̪?Rn.xZh G0#P(Y̲3KemknUY*[<6Û ;ol|V_F aNռ,2ʷyzֲ]`;C(d%wB)y~ƴLF/ݝ;?؊V0ȦGEB7O1 Ԡ28CF)u/A#=!=\*Rs!gWFb,-· G tD-o S 솾kC6S^Ng+5\@vR:Ǵ/4Ln9wl *Vڌbsu!Mq7g͒ER5u(tMTSIs)^j~ %ڟ Vo&6NP>泌ԉ>lT#Ch7WR-+"QzW\otU0?a)0}] 5ɳRju!t Aŗi{I|K#>Of:1pXwG i+5Z+'g<xqSw:ԕM›xsP)M{ >=5V-${vyR~ߛ8+s0L< d?ZQ~^`v+ByoG+?,9R$R+K4d=Φc~0ٙ;8NLG/ º/HFUx^p VB#^;ˈ1[ں y6! k-Z\alƱc+ZrV(ZuArd'O-f_ܥ Z'{:mr +;Gpɦy=L`!f(~xd$YK~c8|R9\ x^>"pWx C1k+:㇃yǕSZe@tq́^ؠS$@Qvov=fܼ:7ܺ Q1RJT<WD~*INCh/(y,c1ZBC"lVz+F#p+<4 W ~՛J @z 8ޒ·6=d ;>b{ k_Fj ak@('ٸ@ pt`џՕr[k\<=Hʐ_4 %zI,;rSF x&~C41DcwĀ2 2TtUxC" {Q~ZAr$iݥ۔ƖC6*@}Ԓ# ?6G){ XK1ƨՓԯ1zt9|0@K)W! ~78]@d#Sd;) bwnv_TuESm |!هy28B:nOHJೈ VeЙc 3c b՛I-rwt~Ofx³%6!ޣ~FD?=,Uhd\A OXlnhϟ-S﫼=_wv`JGߪ.ץg&xY먈_fBa_@s*;C֧c 8Q7h}zg9(p}=u W_zJ8HzfgG)*= 4˘#2"*v)riCgWX/tExl֜rQKvs|v\IeO{f5F` 8:wҒXZ8ºP{v GpH*0woEYJ F,,*AS ]JZ"}jZ9Ì/V6KAt- N[{H b}~R>F |K9Ϯu)ROR #ȥcHJND! +9xV i6dM]-+ jXKE;JwI|:XΟC>ҹkVx|a(5gcO4p!8 Xb}WWs W{;F[BԑrMD.&h(A1Zg̚6^y%}#!R(S8XRadqD-(畛CNhe'PoILhw7dUnR+Ԉy2կ$[{ '*A(wWJ?0}9C ]/ 0eB2q di|3ЌPŸRLxVp=OūE QpӇ)B3s1yvA, FuNV/3NB]Ix!>>b˂ee+)wmm)Kf=z?6% 0c P2CY{؝J&pmKЪu@F/ɠ mr]LLIL XΝ&g5L4??-xl21BLjqNM*g,'De:LDl# 6GCźҽj>Q`6cQ#˚iݔ/^//kJ!rc|q9D@؃3*thh)汥BM5qC~d{c*q_V0d0 >܋׆}@yu!q #jyέ_w Z1.\>{Y?Ah*PJ;ϕ[ T3mKhX*NQ-ږ=^bqQh/9~ǃL@*ʭ y?DX5DCtɹxZ距Bs85)ןAusV;D3OFNnW|R;QKڣ,TnZ3q_XcOFhSͱ) k/ Wt/8ٯ=hEựKR%=58gyqu1:t@K:g% j٘Rxz'1LazUjѯqI?m/A3ϻ,7k{XS6فwx5ʙ?0ϷfˉxU_m(Pom;|0k:RgtʃǸi27z?ϐIskahdPk>`_n4'M>ʪg.ɞ[ԥIS|p Y9J"ZfAyBHL"Aa#1@bbs|+if R[{glVi8PnOݙ˿r6Ҵޟhad5-O?*J ɓckö[NIcc[UĢ_FCWy,<$&{DVy.1-gK]%r#3nHcͤKs&ċӚ){\"B2e;*F-hj[A{*K1ǭZ޴Jooq*Jg7q ;u#`9z=wlf +7NMc J{uCDZg_Wqjbd|Ad)ڣRU7ٰ|vSG)߂szH;k5njK_d.4ȯu{tj u3 M?ClNaFՋ V"ڵJY,H#zL79b? ǚh0y#n7z%(bVzC qU8TY<^$QӳmwSak$B !$kd;a5RZ97yw cpi"ڬw6^k)Mk_JBC3Uf-/V{K%[PXUyr;дP3G!mK+Hm9Ųa&k1t2՜(H0Mp/8er19}\ha_пPJ]ˮ.VRtGQ.u0k)Cm䍛,{liZ ;eϸmV7|/83 6PڸpqX0s!nrd9ٕ*,G=.H, ߛo%7z ^@yB_0P=I]^|i SgJyxϵmzÎn(wg ^e“|&)ݤHFkVF`1 ?$_;f4rdX/NTcL%%O XH^O8wt3:Z+cO9j qix8-V`ŦM`/ ZY+emU;hAI@Mm(xab[Ax(v/|(r>`nxb73ɺ)E6del'Ō#TjdVF$qXb;p̾hӅϑXZ {qLfaLCexx8r$v;cPQNa=|a/ݿ]ΊS[(oݎo>O Jv׹ ۧA5߅,qӁD*:0q7߲ |u(q3$)5xA/fzX/[^62}G-HIn)mY*W+XV\xdf9)U VRǫ_[PBysz>xwRעu;/}O Ȅ sqCO#!]80%_qǞEIjgeMbٹV*Q\ y6%Smc6^^=utbj+[#RAۋ=   >84 ,]&`Q P>!#5bv82،ڞ1>,lQ6$?s_Z0S!^?ҥZ%C@8TiHO?]|WǵwS=b:؂9*OSЭ>IY>%~fk r}zZNF 5,!`@`GA ? iuX4/hOAR1 rF;~!vogxrHz2;mj~~n)-0LXdq٘Q`(-,FhTD8LTb_00>IQschGrR#>۽_}FR (h2iOcKgs>JkCHTvĶ{OwҏP~֤[xoU:\zIixcw H8B-WQ/TD%>N["V1D#kcJOD7@1 =OǓ^1cKwJ3WX筯O)r5/V|#ߥhӾz=,ayA !R3&g]>}{VU| ]j$= )?C*7_Fo7kLTs](6/nֶg:҅`ʬzà 9b{`)Q 9qZN}xFߎHVJLxqϜKT[NƳ P}ekL](IM\fRV-j1kZංPb-oa7D:[q={3ȑ=U2 \[5W@0#^#]."9舨`6ЇܘudQIY2%.4|or5%ZCïDEsWG~Վf8LqH7+Nz"ȧሮ(klSR?'I@AƯl||)?8D>oKWLo|uq PW|={aSXd*_.s񷞠4nvxc<恱{:9v9!' j~92i W&3"V. 44~)eC iq! 8ηD5r|!HW ע3P2JxrXJ*isl!]i3As+rS;JYׁ16;#&g9&ȕ8 &50q]zOľ>&,q?Tp5bZ(3RR#h=P؉/ %筏4Bq硽> \k,fE痪S#s4i!ٹ]4Ea |TVC,=L-vq+Yqv7Ik ?H&?(|ac.Kbܲ,GSYSSއqn=(1@4dɍpqPenXRqAV;jЖ#HC8>~k7I(U Me덽ӓ@a+jYR X4E0/{OzV{@ [83.'_:7ZZ<]4 &FB2B4Z|@ʯ=JAb;Vٱۡ3 أdazDR(~FR܀u/f7=fX|Q;f4h}j?֦^l$2 ŏUW݂޳ '40n <0\o+R8zͿQ3-H>uJS+uΧRX7)3Qx0T̂ .9HVHBW6hJ*'g*a̪ )LwXfg|i>rDsiڣݚvJN)l[te߳FT,fT[}\!u8(vS: s7(IJ%EBY-瑵q\#۞OqQ"Q8}z3=0*p.$+!M41dΐ'7񗑱ep NsHsx5^@A-%uI$s;7W9R aON!PV0@d,.[RR,|Ll^28'6B 7I ? ;y UMBSFt SZu %J JN|qȮ"A %O|)[wi_𚷽ys@e//x20ym{Y&`GS*bv hkHD&o,o1J FBz܁;zjNHAT<<ܻ(+ )l@e'5羔%6Ҕk>wkLdQ1/}uD|u"ҒF4aVʩ6mR),V)`{}Wk{z J{c534AX.<xDu!W{I"䆊HPgl)qgo4m@`ՓQ!gQB :2ROznp6lfڿNgVS9CO2/rrgmZTOϬ'YRD]gbd70\Xx!Qq;֏S4Vo,wgb} v72 7.ԀQu'훷0$KL h\eNC 3N\0݃%_i2sdp&ꠀ!&~XT/m"} AKoj4"wZ h"+;1?!9o<`eU pl "[DڬpnD%}Jk, ʦrc[СZ^m( 8x7;b9O3Ċ5Dm7_B"ha*@z~B(*j.]ʉ]vdeUn|ٷ[a&O{ iӿe3j;0/VWs ғ}cj i ,bՇ/;p2/y^BϨЏ6 B+|[ܷb`!sfi-ATE~Փv@VD{DE<ғQ}rVdv$m~D C yu5T uPL 'WDf>hbIt+d /m{4c0AG6/&bf.tZ='[?݅A;5k&IYzԺ`<2^`PJ'6ٮ|kfRxnRQ $R%)|5>(dA=C.V3ǭLˮej|MM9fdk-ָ$xUv H QŋÛ uiVYr{2uv G7\j=1Cm'cY-\HjP}ԱO طôZCS)pFE'U>iX)%͠=Y*{yRo^],{IwF()MKmϮVv&;m75 r .J, d(ⷉ'ىe)е2,Yሪ0DiJ,qZ>CT$Y?yej%)>% 7Ʒ`]FZB)μRzSM+ݼZ}z|TXBsXοSڋBbȱv߃rIv@DLht1= :s&:c IJJ9L7xnuߣ`Țй?o`|jR|\bB[imYԈQa."<3\Ah4{?Y<6,wG4EnCed"83xDZUR՝R*jabK]D&'UC-,Uma!ewO :3<.Bs+h DZi7X]Gs0y×*`' _ pm I P0KHE|ܟ*Y)T_1)dձ+J3v?R/ DTHF7@wo>#cU] [KI3\^5ȩ㸏hpZqrFٴRSrE{.yO'Z7.u4ϥ{ߐ1 ݬ<*06ﭴb$&^qm >;ZS9]b+sUQS?t?!nIC2ȷ"] =98]> >k/ib4ID'{nQӎߢb p>e\f4pn61žwa0h|P4H&~Ϋ)LKwA!2)v.1@I})';Q:༔>[LjUs˰Yw;W~:RoF|S:9'qK(G! 0&$!Tˣ1†2UMW/k *ۉXYHx% kEˣj_%neG $9f/*6!N:gpW"ڕl$W98 O}rMoaZhi оǫŢ~+i+`4N}yy21EC1YF 8+œM̧szHL8ԖjbJ?2s?(6{X2&T!ƒ*:@m`ܾ*v? bpcTdͤolR}]$%_׀5[Y3LS >s^B3"8jy;~ůό0PA glk74LksB5a S_@Up6U/,]ZPc4>(9^jh }D*1 wJyF7 $!Q3d L#De0PBv־P3zW׾.8a:4a {jur&VY}= 1 b:vZ*ؘ2ZG7 FIj1n Yd?^Jタ 90ΰ%&Zbwg$>0^Fga VjLWz~OάfU!itI YLٓb:`}_NƢ: xd@$r9XE`md,`Y}N6؅s ?]V%ے?` c@kA*5;M;-2җ-1p,? %jgiejgU@ʲ&V֡Ȁ}f)![Ǝd@m, | c^Q/.JRx籪Ia{%}m/+V2)^_$ұ$GC# =C;n] ql`!7aq rSI" he mT:kgOL_-'!d+|8XdQ͜ED1P\B{ƫĨ REwWH'-R몙ܟ6z?-ӄr 2>z(,x.lH7{՜5v[Gw4E8䀃7ib+BZX#:_謐Dثc Љ1tT D-? +>8ɒW-7Aw9OQ#$m-x”FP*Nv~N X_JčCzڔ۬,6&p=xhv  5zrݭUhE::L@, ɇR@yB,Mc3 UUUUUTUP ؐ IpB:hAjtm6*)tx":U;N b83 usτ&*BLkބ4賎m;Ť8Ea'.g`h 7jR$jq%! u$ L'$&NFOu0f{&!lF7%$ ;!vH1%v`p0"0~ n^+i-DHHv~ߋ}ߖ!Y ?EUbFObziA*=5).ʪKjDTOc=EYPR*l,>PT>0Oû$O÷ޯ`B `%ڭlM{53e^[CiH@z!GAԈɶ'Wc%`hDP2!Oߠ&cͤBFB/^qy$Oi ,TzZ=; z5QVJ2HB;WɞLVd:14fI$4s:p{WHId4M+jMT%Z]z,_I&^tc-Xk4ȏN)q"!h)‘ @ g1]zuOn$$>/ mkb)J}} kBwqO|oɬ\֥OzfRB^Fxؘ ¦c:HH5 еK!1|\@wtrZ2ſ~OX!/'W>BՒB@CD01^ Llput${AKzut !.PNU$: 4 _ )M,Az,H(=J,$2PeMZƒ22P5 )@&O-9:9pv/_+N'] @Tj`!rТ/`z BZt:q#BIDHU+JQsߕ" \dE%}Ev68g&7RdytjɆبhl{UfX+ӓ%pgJΌ!kQ'k\C-nʙ][:6y"Y9Ob[4I(N!4.Lqe1$iq@@JU=QtdXSnY2!4B[XEVadČ{H ۲EFtMh" =רhu i`unHisU`}G_7$%mu!juv<]t/'T8=ס yBo;ȂIJ}'u}yyTA| 7Ya!< 4QX9tm{vswǃݮu<`w3'oDplE [Ө)1eÈw;hڋKRfT% uTF֨$6帢ץ A .Cr[Xur[R|8`z~+!:1|Mdɝ(BH8K#!Kz ,vRJ+ZLĔ ) 4NFG^NHT)@YzJ{_d^??@^ , SN^ޡ'xOýe׫fZ.׾2l =OyW_Xk6hcybX}gG,H}C!2?!a!%u34 ,HIC'KBǮz }7t. HCU"}NL3Hi;>BD!$ Q)L24g̺uT`ZӇP2V9`{" &T[[jf ]ˢNrc2):G;J{$")[HX(4Y!jL4G8!-iBwaD;wT׹ݳ{QmtUN嫺Zа>bYR0mHw(;(Fl1߿jj6Nֶ mo$$TMTlIDbG:vw4W@uPIHi$:̘ %D@h: 'BDɪ+Tja4LvwRWGZ`A  HGT!H_FIе$3B GVIv+_˂MjU6j^섐:|3jaT]%|ַj1kS汑H% ȧ4}$]7mU=?Eɨ҃q ^vM*0յOvx\܂-5;T,cM%5m)SvCjp_Eڿ4nSr ޛ/"0`œi>YI-Nj8z_LcXN%Pv!f3B" kD7i0 j @'up>'&Mv3.Qؖ;ycř7 fbg0HhN:iب&DKS뉤i NuaX4&P32&36&Y7&:LbOS[NO@kԅLd`<̝~&q=Gn]{;tzNݤjtRPdU wIUSBUԱVIQ-EP5VLhG+f{v͛cd@q<d ^<&N{9Rb A8Ux3Ζ+R"gΦF*!wP"\A 3.HaYyW*`SPR֝;1:@@αIƁv|̐I CnU"A$Ң89@qs?xWnRV2UyZd^f*kJγm٘Ɣͧ[SyK9씣M3.e؜يQoI WvXzͶZ9͖Բ^n_Mg Ѩ߳kli6ۚA~T7ɓ")FՅzH>7pU4Olbˇ`Pw@7< d>^`-)o),Xh7oёPvT]nc\7gl FD^e^k6wຈ,N7`yZœv:JV'H*^Up^Jwd. BUWWZc TY8D4^ױ*f:NmJJrQG3 MΓ9Ga.Rmib'ESډӢ\&^2CrIY?8_EC7~G]uQZB71BMmj1YUD^"줞5'q~v_h|vx/~V|-G{~Gt'TTI{D7 R{a}к}T<P[:E'#C־}kӪ(?qzW$ ,c{Ҭ6Փ̀"!  DDD ?en[=KjkqUEXIMubףаBTe4h$ڒLv1uVzBlCNW{H`@f s=&zRJUՑ`cJYz0S* Ӳh'[H(ІF z}7 x!Nk:5z6 A`Bws ٴ*Bx(yWAYu8ӽgqD)]ђgc>w@9X9ĝw0M5PMZQX860of8O2ig:]K(Y$!œ1p*/xF'އ "3=JtQAMwZQO7J͝s7z=H^mbHho_ tZ4P2SBhf@ ]+C%Au@m -j6e LTƒGbY; \_UwRZX[@~lVwY(.TF\4U%T\nCB08(*)22 tX(R"'F; ,U^R!5b)Hh+m s4WznO@ß- u>KBwP@0#; vQb,=WgufvdWfa;0Ǐ&B^{ЇEp\H\)r?ՁK7~jv;.\[$Ж8e-.MXu#5aU֞ͪMdGF$ڕkmr8BmmVE]vl8da؆R҃"tmYmEtyYhK5S׃B#DkPUk{kڽx׺KLX)3yS)IX8)آ`LOA4r8/f7Hg)OnX HuˁNSg&l .$| K70%eܬ7#E5 jx4M2pM6jE>gSujU ɛqpru]*NHν[\ 9+.%8)vYمWdytǏ,OMR|#W@ۧ*IsD́A9qSOc5Z)J%uԟQ"lz11O͈({ZRLl׶,iǬas>enQGBSD,QhDe'e6[Ѭ1X@j9P$WJ'"pu^5'џ>mk@cf=hVx}W0X4{|_ "wCPEr|LCO`~~y>+'"g}qO4gS?gG@RI R:Lpgɣ$,",; PYDԚ3-rr %m!PC,Yq8T8 XQBIdSTખK2ԓhѲh %M6jmMAvJmcXښ^f~|1eTA`EYˣ8}_:uQb̈UJBeDe ʕU(&Rc5Ɏn)ӦHhg|agxʒV|JmTM-ZxZ.N8xTr)0% UxOI@~!v@}mH`&Т"l~>==D/J8]?T<{D>nP/լz>y't*uwOZ}W\&{7zKƶjXnaNOqyƋyny1g2#Fep6l˴X)ҹm.9u,JD0EA|ޢ'ƴ]KZj >U1D`OJ%zTb}0^R.X7&h٫iXp¦*gk聘2َ Av% Bwn]8DHEI7Q)ʢKoCrؑ^7S>n>&?1IMekWؤJF3cv@ͯSq IVvYd^kB21i)䪿;2~?h%W}<{QO*kFݯb~W?=pX(eWꨧab6_ńa&OTz@IQc'#qBeQ_ѐ BI ZuİĨk.&.SLjBa d7EP^gWO{`=Nx^'$:z#o7rk~tIgjF!wH]C̗( `l&R k M/Ee0ni .e̤6lR]j EЪ"kbRmɷ߽x=/) Q  ,ȂDdD2dEr12@d1N&?y<).N>gy9.껿D8 :\HG:8q!(Wx45x5bZ1 :, H5qE'Sf*ڻFm9[bK.mٵ 5&a m9rL8T$m3sefg:yME*˚8–[2cy-L8,;3I=yd QU VjƄt _'7J7ܻy2EMM1g7A۶Hm3eRJV[Փq U4#&pi -1B#Xmę ձ>fk&d4\i ""H[ݹEVV!<ǬbW)Ԙ^m 8NRG iUs9B|rڧL )q{\_z=<8 Q{'n;fQZݙ'JD*ؚA޲q4$-ӁU^ qopG~T"R?%^j)W'PdG)ztܞ`a_67d>*'d= YS!{B{B **==g =2y?a<!ŏ?.)bb0JL '(1,ÈI1C~Uvd{z ;7B^ξU?#/"nEII)oRSmŔVAݬXm)Y d \"ؤ7N׿sr͏" l$iKPT n*`&: qaL8IÏ<8Η~[< {$$D@@"ѿgU{OGso{=w>{_}IO?G[ko|_PTG>QZʏŬ#n '  ]Od'*?tRʋʋbN;H^>DISNa䟓Ⳬk {Zr[OȞ"3灢6dX|Iο2'(|Ϟ:9>_yX4t;tJ(܄q) A99B%Q>uT=$~N \M̬erMMvLm&+ѹgvۭcn(e7-&q.c8V{-JvбljڱmR1q;vѸJJ4ceC˩l*]mYčy GM遲:3c[=& yMdsHbi"QMU=wJT\#w M]α6"+Dmw^/ןso^O!8^gnd*Akm,1mNVPmCl8Stcɔ v!aoxЌw)4)D"=z'˱tJⲪyۨfa=8-=p)sj.v-sQPf_nrF-w*1b|]٠C8]vH%נ<*,_׿ h'eYwR%I֓Nլi蓒k?aK]j  % FTY&l4(EIȤPvdA E(jR̈́@S&`]:l6כݹ`'`);a'aŠ{uӰCaQ;CKw`u~@'?){ӱNpyuyyמVμqy<L2I0fLMLG6ExaSRȐWuMeT{kPG@}6#WPDrv4&$!bJp,UܡwmĦg9 bÑ.˪V hm<ȡj&$vXwjU&4.oMAtM>pAagMĝP&jUr3w3('ť+lkDGNZBs{ Q}坏+J܂գrvrLKdjBD^5>4Xvu\N J /:)p[&#=Gd 87zv.*W8Ϻ|~g]~F$U*9kF@[ kw^V:-ҁ$`}3mX$i33,+13%f\ƻG2Sk9S,%7'"3XR㣿\H.2K6Y*KU6"J!X,3 f"!m0s!Ff3) &hf*5&`ٳM iIabXL"S9QL*a0)0Šb.(6 zn{;:&uIT@=6zERZޅOEQIס[Bh8eZ-qPGAY>KQINJ Øi̴F]S9'$I@>F6Q99JYy:bL2L"I[,.Huz=W %vv 7QXnURuPݰUMԖiwDY ɺYPڪAݿ& 6ei&Si6hTڛkS6l0&PM$ K& 0H`b`0\{{sBB'$ EDBfbBZTbYd9n Ѻ206>x7 hƝF"dHFZjR.E`J"eF(([.rY[ [396djTyf"Zy"(XjDV_Ranb뛑Ii*! z^fIFdyWp%R-*mPsQ7LT'P&Atؔ6QqCݖmoFpH`2t -՜ߕIX $m"1eDULh_q0p_Hͩ+IrW[)ؠiH/o5#+eX2E.(dl(zca]"4%Yܨ.ܓdo ]H4 j;L:yf(X#zD jD.I.,3eQ  uѭؼntSPj% aZQq;D\ިdeLSkq2g*e(YD(epSYBYAi,4Y^N`r*rUdcDCLp2j#J> tRg8M5AN;I)۶ [J 3`-SQ2͔eY0a^2[bj4,DrR ~R,4J \jgLJp,#5(:\]'Tԋ+5l2L0,5ʣiGHDp]HUKDi)MiDlN10jI#̬H&L`4ơ襴t*V~O= f%Wp?F- Da) M^T](k4B3L URAԁ\khV nfeFRR ;IP62 JjN`J"HEHXltFI܏Z@ꏉ356AV党>ߺu!UZTi5ԲθsV"}Bݱ Fy|3c~d֛6|x"{'{ү/w<+cm O<DhOQ߳grr/p{I!=Q'}oUb2ﲉq_e!]ovsN1G=XEgXEEd>=g}y?H)N9I':R"".)H#9P9$)AJtI;H.DN)8" $'.:8#N("䒁u8.s*"$8G:#'DH#J.q $N;N9.莈$#Nr.#(:@N(9$.*9*'9q;".".莎:⣃(;듺(:(C.8;芢 ;(( "(;K苻#:;N.8(㜺;J8㸺N(+(8'(++N+;*:J(:(B (9#;;:P")"d4Z%jNv(^.)^'\aݐuEwa$U;ugu;HNJ7NHRK鯰&%5EI6b*.M;k̆s~_=~Kd42iMӡUXpӆM:f-0 0ӆPa7u^/]UO>gOSh4 xR۔*UzD[ɹ^8]F<3j;fw‘P;Rw^w=Q{%xN\s^MD% CRETR(j$TӪ2:r|f?~,qN,4HΆ\d ҥ-ӻ' *0mKE˛̴,Rdؼd B-=v 0)ΑRt8!Lx:9ʤא}X좩JefQ(qUۢٽQ*Ovz57su\/ta,=u.#^.{l7eQ|F*6x>tNw>;nrJpۖ+晻dRt$tYa׃p%qoa`{kx?ϖ?OE>҃d`ou&؛HUc'+e Y1SCk,#hID8swSFMmGnQxUyv"" D>@ȈNB mb3,2ʠLhfscJ~,gwv %#;UHP=7V:. ={icc]ף׿uz$fxExW9DaddXK30)%I3G5@PGj!I) &a"Xf iDI&i? -GGY2ʰ EY)HSf\TS,T22n4̾t}7;)磯G^G=v^ ЇGΉļ5PxR, ;'x0B| !~N^{;w{?< TUajQV˥vRT{Qp P%I~j! U)8 1XtbRU9%34f9͚mok6L 4% # Y0 TLpUE)pYX-`E4$0,0UpXثxљ"BO oW=G߳??굪zE65|ͰA'L^Ob/g>qW=iwm=Gܻb0لmqIeQ99ޗXx<':̡4<:M A(:8DR%QEwvDi*6ÈF5;Pbq,&^rb)v]j[KɁܖ]u#6nlX\r흊u$UcgavKIZ5jF36]sfs*[rE4HI $dSkM6 vd>[kw<`%Ȅ5ua-oB;.Xȃzn>LnNJ#bNp| ܵ\ThӺu| ]I}RaYzhsɦEAXp6"r r(!pAG]09Ԃ9c Y]lnöEhE]V vY7Id3nme 6xec7L*2:su\ucaۓXM[ɒ"WouOAFo1gTK_2 T8tvѤ*PS܃.m{AnYXlj2or,췊A٫xg2vqOwZ8Cs}J{*xv$O' Oh^vhg= OAFk 94E]1"G:;b ;X(N'Ds++d1 ֟oUu#YLF%u:&X! :ؤIÁvLXq-ꆄR=bZWR<%B I^dS]cshnsn`6Q5).\{mGݎYeЊMbv3BLUpK`+artOLM ׵YIBBJ07ʾӖ$ϧULrgFdCzIieHV_UNs#jݏ~Ӏs.ڛLX ,Y6A6{k:ܧ{n:mA`mCdɰ= Y`k/F>ل>ߴ}{OXI9;)92Ũy+@#L䭋\rZYᴦG"ӻUeT )ݼ7M7d n f~Kq0q9c s0G=]:Weg}C=^~m?6^h6ؽa{;NZ"Ni^K[q33Zs1:o1n[FUByfĠdQS'T08UN&2w'`RSL'v|}٧"M(#jީM;fv[]Ap'Om?gOg'^PvxDC&{S\ŐJN(ULK$Qu"\Ew#99^T CtTbv)-&࢓rn1Y&ɽn|tN1Igl'hHh;DN3;+;,VvvT0v{]I ~~/v>ϰ=aO`*'jW$)Ouj$F^?=m%rz؋j+񂞿ĞV*u} 7{qt]t.~ѼxYݎ*ϧ_ _ @y7dCyi/!?hA(#9(CNߑx<&{ &֡У[cF*;OmuRۖgf4sv4heDd{lakmF+hղdgy݈lk&Բ:5r'Q0-`G3] im'x]זTb,l/ClrB`黧j{U1/pUg&0"T5il"Wa!b{2 \iG(ñ$5@JL2I)X/:;ArMG,*j{A8;2I 8JAUW9,*%@}Bޣ ͣyaW38sǹԉǵ[?r(WYPܑJ-q<oev[,Vm)16Lw1-Fv~; y醡"bbur k&!5vNcؤƟDR6wvh+ JVRv$m;hQBEYo ׳~ Ѯ}hI K f}q sm" q7pplZͶ20Lf-+[Sݷ8y D9 I<a(<yQVRZ.\"U2fb2(faQDfYYW4Ҫ8 ,AV $FX e$,C`e%= )%*X"LS& =9y~Ky9_:˼|'Q+l/ZSPzJRÏWZ)Yq^E⦈jPƥQ[/Ɣ)w8Xgiƌ82qq.28qqqsx53;(L`J " d2.ALI:L|_[טޘ9~lo(o*RTooT*Rof7k;ݭ?87:)7VmɹRT7"d- fܛn6Mm_7s^Ŏ0\qC4T .*c1ڨljf9qL;=M+rb'MAM$dzFFv) jaF4Qd0M\:&ezʔ9()uH!W2wc!UQF2LXCb[D\l+pE?5ZivuMg@͈PaQdV7 d,HCBy\xХɰa4hvf~>՘J?]53GgO׳OzA&E$<J%~]%|O7kִvE^Wz'b!zOkURL)Ҝt :'+_wY )qwĜWGD>j ~'ޫ }g~CߟW议`\N]Iq HrPNq$9)9 N(!(wp $;E'))$#HN" N㸹 ;$N;)΃P'88NNJ p( $N;B.#:N8)#蠨HSK":û.8.ツD:( );⊒+..:苣9;J"*s: (;.㸩;:*((..;(䣨*K;Μ+뒤8:".::889)8ꎓ(:9.(+H;㮈ุ::N;:(".:.#AP^OץX;lRkd5ւ֚Yu&;goX>8O>8(,,uE1>$F|aȿhډf0k9q{C7{n8w{}OnνhJRuvʛRlѳ6_{hp\ 2 2(d\#UW"&G&@a3'МtGBRk񓾘ϧMwķQ-T.8xÊ֜lK'1N.:Wr%6/UD>B, |+ +">A6S_ ޼rɔAX`2+IIeRL @&Re2U-TE<0&C 9y_/aB,7Y(fvwrMSxbo!LlM֩CxU.[b @Ayچc6ڛJ;T;Avbkl4i/[JEdҚK4@4#ip&M(B_ww^}MyJ(BIs[頤_Ǒ|y@?B!TE|3Y%%>Jy'<qssI>ETEGNN9))! ҢK mokhfӘҨ%2 ?ţ˘3<ۦةN R\(P݆]l+8gik6)uk6l' &T9ԙ3&TTbƝ]bpm3ay4qc$k.tY d z!bH\R~lbQjIf6g(Jek-%s/Zs`a>WC:᭸1QܛW &Ev)Uk|)FZcDsq'dA&*13 ъtbf"*='L E,bM_Ybe9#,:R3'8Sdd#3de9 kxh0S A& 3~o3۾^F,Pt#'k) 8v/rlNÅTۥ9BP7cكinԽǛ)y0eQOpYBkA؎(Y Oϓ5/| E;?d)KwQaVk%C6XumC)VAӒJ6Y$ky<(Kї_Y!J4i^My5᭯t `uC=]c ŚT N>~绾O^2|''>S9&*S>d(sE tR Rq22r<6jSGl4/BׇͼEum(:y}6!}אdQw.=p޿7 ,9d* )n{^$[%UUr(QhY iw}wۨ%zWwŒHq*:JQ4HrDirIȈP ͛4x9CKOCA6fY.JOQj154;:zaK-䛐Mdw)UPn n**rISL(`ʹMXk*YRa5ښƱi֛3k0!QaXR*\2`p0aL8LKٶ 3Z !3w{U} eTk|WP_¢0JJg?OiH#U#Qj(Ku1O"j)"O)E2ȢTxT>{S&/>)'}L6S|!qx $$Dpt"QS+k]=$nntmmݢ;8L,k 4dLƕGttf YfYl$IX*1 !j40JNbvcBC(D]lb+&BEC"@AKlRaͭ2}Np1'}Q&67J9,Ή!ZvFXe|CuPĉXσbP/NunClyUd\*ҡN ]6-;w,⁷^uT*h* 5yH鵄:&.Ү{cD[rY+7e8?6<ؽZMRvTC9t\xYxQM N b ; c>8άr^8Nɹ>u2bNI8F1+"/+x#gL."m5H`or0>$:/o07"X7F?uHтMkQK -XK)3␫'`ң)$m1Ab!ӽ~z@?z!l4ؚ"AKj&PP4ke*"1Y]}u)R:0 =xeq"%WSE W_B4c%O,Ŗ5 # aPB\M3U)S1aTsQ6ֹon"`)LLLC"ʼnb1iF#1ngI 4?)C,|^d_7ZO?KKṚw6Ps&Au)B6(8ڍ3V <؍>h{[ɀJ.᪄TmZΐ՗7Ry P$[jxElR=@O(7Ca8@Qp%]םq2 9Ig%OK=W+Re1"mv|UzR~uw@*)Ê:~UU罱t{l2UdYo:@JHč'8$RRo5W%ӷOJI}ҽC`A]́@v6flS[b: F? ot^ <0p ^P2x8l35hǝ5J<%ᔔUQ%40"M[gdMȡD6M6Y~kkl#E@fDdJB$db"I)LJHPTaEUC"䡐z !wꨙQ & '6\:}m}t*g3={=%(z{C#xwcKַ/w" oWԽ|V/_wz[W7^_%U^VE@ZiP%c%M4\L8!q,G\Q9{Ӓp$Ɠ 4LrcY,S1CH Gp7TFTܠwj$wJ7BTݻWnmPt b(wZUA1m1H{7UiBL L&ľ\(fF0fbfoo۷nݜ$a8h&!Tb\L1,F(ӊ1*3]EbGi`p.p [Xstrucchange/data/BostonHomicide.rda0000644000176200001440000000316413062350355017075 0ustar liggesusersX}lSUmݲ̰cn}_L0J$`bcm$@bF##AG fJBt!B !*${4OOrw߹ݾwn^,)Qŭ25Q:/LŻWwu)gYҭL9reoAHr^'$W$\GW:̣2|_OG~ES-+uOO/2.BrkKғJv[oA>B7>…͙4%3LZFYNq jms'4yGy Mc_~,## ?X  p~;'o=ŀ x?~qb>6V[_<ps::՟W:+BUϨf+/C|pG4|ǀ| 2^8==-`oݜ!Anb]>p?_Azw݌ >z&ֹu1 !?}@~/YOX/>~X)ź'Rq->uwCXo+ބuc _1~̿z>aK[_cObOa^EXo`G9be?Ev>< WkzdzxܤO#3Ix.^Z櫔+OSΓZg g 1*Igy:$>?!g$@RtkH8u#Nsw8$cYvM6-哢SǓTWq::eLV׸ `id+qdڶQK~fN[^Ņi\aڬv.L+ȅͬp/S S ~Y5K9o OQ[_ uXk>4_¸Pa\BFXdx0>qR'ì7A\-]ێ8b?~ۏq~cj0jߜ`g0{V-l޷W}cYumx߫1zvk?(X#(Tvെx?ՂؚXՇ%%{c˅M;11b{V sQ[0Fͱ6{_mK\;q8s9‰nXB\ݼpcakaaG2k?tEW&۸ D;Mu.!Z<՘i+銭[ET9omoq]'⽡N+, .~.x.kjF#p=YaT4t L"d5E*qġJ*qzUPC%84ЈC#84ֈC#84ЉC'8tЉC'8tЉC'8 0 8 0 8 0$8L0$8L0$qD#BG8"8KVDSP{(strucchange/data/durab.rda0000644000176200001440000001130313062350355015256 0ustar liggesusersZ;Uk~PptIґHDe!21c-lv6!I4 AJ:*$ :)4h@7{?/bֳ>?\,$%!DTJ$QO%?>*P;3P1;4%#5>u:6K۲PZm4 $"B4#T+N=>P韮%Tv2B/TX*\-qBms |FS^!EN7V.#X7SBqEBӇ]zz ݔܱ -[{5}Ž nI*!TVԥXNk{ػK B.P}F(g!73܋ʹyd4r;^vg+ GS^{U \_! ΗkfٻHSEB%Y- ?M  _,# }?n :ȑܲe5 L 23ϭ[m hcSb&H%~*g`8|qZ`kzU;V6Yʌ h2Fwdw/Q\S\sx4 HͲE"뱄oQ(N_{N/O6')\v[.`ɓ8!҄(ĭ#N\ ?h Z[CS'Ծmu3[ߨx<=:dedOyvҀ-p?݅?P;gYkCV i_?{ LμCM>E]=|ךV?n?;}7K@| n| 5,F>"7#CtD׃7ORQ"B{vSi~N NN&*D :dѣ>0u26nψs|)uG5mzZ&ojؓ=|7:~Ϻ]E+䄗ɻ R"3қyvCKdS\az%x?ES }-LurB6p6cnu6R-sS+Dg8[9ڇwATۻM* pOK?>m4v >F}n]|'@מ{Z}wĒY(e?诿89H8܂Qڑ JއO|<20ƺb/*s#cptir8+b^p(SZїY h K/ž-VGo b˧CΖjyp~XB`mYI~f$ԟ [3WzSIoF:H};jN$;:h1iϾqz}ЛAڑGe~f.7`x*-yݒRȏ XeMn: {CZm<7n;fEjwmt賋N5u;*h`8ꚻI^s\ƺwc S/#_{QU:vI..uV'SC##}qQ;(#Gj$%"Zm^3䘕kG>:W#'s_,5gNo+#Ϸ\sw&qrڸdʉp.?ap͸q'QE%`\xbN{y^^kgB)7ǑJ YWѵv'4zOCi=&Fl`λ/ν<@J;.9 %Q݅`+92xO!\`̝Iҡ |'>Yȡm_KNms GŽCB{aGê[ڽ%Y'M CdQ?sw##΂lGܓ76['pEP7-$=O*sA xkQuv("mKsJ/ CySxEƶv^hXxWפm8D -3vSu~$ Z58ֳeMaӟfWg9p#1ءI.\v;sK s;5+ǕX7ŲT&Ȕ: [oV 95Чԃ ׹y ~v6;cUM' t?|ϼ}*eaā?a+'AYOur#v| T\9~lE 77<3oaAqB΋cn BU-Uɧwbǣn4.~GE–\)z7KR}iR3hgo5;=1_h~2fbZX޺})xbVj 2?L*-ܤGdiG_ 9ˁ{r{R&M??q__G:_/YriW;:EKo5lFOot_~mhH/_\?lޡsnJ4vX/=S9 ?Em|xEF]NtywTެO*]3[ :l {5?f dW tv畩Z3?/ WJ߬ڹ/ig8x-hQiBHQ [sys<_J.s@P cb>jqŚ.uMzXwQɪ_&;K=68Vj<< *~Ac >,i}xŁoْI"d^7ѻ(:/218pρa՗|)nU)7T|b45Ptw`+;2AW^:Hˤ j?4 u0;(]nu'wnUp߁[WHKŇ%+׼]%r%A p:FV)@ "$/17F)PdvsAh(@šX 2WIfKstrucchange/data/scPublications.rda0000644000176200001440000007233113062350355017153 0ustar liggesusersBZh91AY&SYJS$UTUUETUEDU/(/@*$wi(({B>s|;ai7 DHA@[gN=X-\B@$Zk$0Q4M`@Bzi"(%J TdA* @"@Z Ȥu,([R[jTiZC<h4@Mh&j`d  FQ)fSɦC(ih2=M44ڞiA S4ROHhh hSABh"?TTSOIM h2h@  4 cE=OiMm=CО4@PhHDTm yz2i=LѤ2b`IQFM616iɠ4&4d D2h yiASM =@4C@424=.-! 8zƅx}*wM?a(/^KݍJ(23s{CR_Մ/Cxm=RgΟ׭'*9aeT-5f,/?Nu?}Ap>0_fg/ Wygo=oL+ }{~~9MPz@`f_GŋWp*R2KY_Hk$Ix2I=CD/%g |X%@y%W"f_Q=KYȁxi?iU &}rfW ;"PԸZBb̍d:WWӯ5^ G˓e?KuS?7^>9kO'R"+[^Ux!Np)bI7ۉ)1ZMj6*_=D?ڙ rrfZZR>1ï^g [p@3b_S aO,P0tO*SrG5U@N}j?X~<9Mi< ܩ5ЧnM*mqJu K5/*pGrYx:d'M%Pbwbwz|$Oȃ&nՖ8T B&>b2 Nf0%급h~_1 _`:V\RcSb)*u"G ȖnMIc:F/{~^F}G|7C?}8݊qYf;͒v5ET/O66oQVjzYwf\Wx\MA1Uf8c{2)Su*IJ4Vz1_^Q؊XStφ+ASWry&د@NJ(J53MsM剮LLc +{O&݉Irڶ4IXc/ '+c {'u/EKv+UEu,x֥\Իvŗn?[p緫j\[kG19ˇfEw\g/3&ma>Urn}s_nG.^B$䗺e]#`6MEPTI?-3waz{#_&UK_F}@fVOg XއWx?Û?oƷikvw&ㅗV6 ! TY UB"J, hU\?ʦDbXD/0.h)( D Qd$EDd" AHX((EQbȉ)F 'fmxv $:UbRVIWź@OEP5J!QkQHpٚջ8?n**9 @&E1%TmU7!~?A?9n8:okci5]pY~cᴴR[L2޲_^Zo5AƖ5[6iӣTW.-b-WؙЮU奼=5`mG8˒G eEmṭXl+F ÎF6hGՙc[.9^yEZl0[kTM#ˈ 2 Lb.hۡЩce%YE.t:4S,nJZ.q۬pDeLvDw0d?:/fIo^Ire'FP-wfAnK߿4J8 v߲vwn8o#@қZ MJG]#KRuMohEnհқ:/ m[^:]9+N)7h#5s2U;W^/A}͝z}c׶k;Lq=q{^$qǚP;Qvޛa6:3nz?:d(x:Gn u]z 9~:xoϖ!&#Ȅ'z˟-#S>F^9mxB ke1fi'Qghqp5>TF+y닧v9MS8a&̣Yg1{cdU⎦L-qʁܹ7L#Z=9Bw#SMu[?v;~he,{1}x.Y iT!MCrGkG4ub$8,([{1&,/1$[9{7M`4Gː,j}f,M0ELllntZ| ci> e>O]0҇1i`jGf.q&Jsersі|Ђ0(c񴃞K(#DDЂIbMY0sHgߦ5.""A:8z8mO|@q(ANKDh!2yV Q”]h61b0UT9mb!n61IXjbvLc6_f라E=﹝xODAۡ?VBO>iX~j >YX*1 TXii?\脈(?V*"HK?"1X sR_őHUD@6!~|Q`=.yHwEȱed^EGGOݒPF %9E1PD/i.ѥZ=/ ĠoO+XH~]}:)GNd#@?L1q*%M=;&I(?އ""E$MFC : 3&Ҟr@ ӄUmH& ` jה=*]*??y "̀g~=[ Ģi䊧]yUSkVuv=UŁN$ڣM;#)1Q T{G*Q X 2m{Ǒ{fO3//K` S6 O7;/yCy}?X%};kp"N;JQ>lCG7yXHV)@7%lRY:ԁ'ᅨBauf߀P l"h%zetU_:n___nzb/** -ccS)Cuw^ 﩮<r Tधg 5Q5Hhn-I!H>QB/lY>]?i5|dORj[ -@]lm&g~Jڰeb&VeH)G+yXY p[0%Rd@DE( -()j!"x&mhPy_Ó]ghkVJU" 9>[ҕ-ǰ95"7Wm_+-< Ųdמ˭N>vK醑9Y.a7fb9jƝI5xdre'^z/vBV[znuE˽!nm%03LNZC)C'ߌx*YFct.5tq݅Wӈlq7]]"B;v.3j ˬrs x]{c[`ٻ]k1ò$:;C{=jX1iv]lh߮Xu0 x+[;=w 9.rvpqIZBS[= ;n!V 3ߕyLYاZu؈Ky.s20b*MmKX2J,cHC;*Q^I,)DxR:롁̷!@W簋}C)FK>..>3/ %J)(/[ŨB@Ӫk;2ׂm+n/inw^> <׈r]^ ىD%a :BnmdH(SB4X m!-iK$*^&Ђy!M٘ORg~쨞v_JOPױx4@.`[VJ9T p63~4N&΋{BhͽݬRf(X1,ۃfv[qЉ'b/9.4v."h 8\d #Chl=/&ev.%3\^ݼ}+mU?/YTGZm)~74XHe; ӄ騧%)b BV\hc^8 ofբvt4/N575 e̐6ve,H)CXKB21/h#XL&.Gday$&k]Ҟ\ rBXQ̑):Q꩏I KvIet>  *u$Y|ߌ.G⇴4K:ം]މ#ߍuǃ$pvb`p[$3)Lê̯?ǩXr6pOEu*AX";nxZH; C~DO&ʌlxunL.Jp[${YrrYHiYL՟}9Ok[? D޿VWh8hA#DVnG Ü9L\%X+R449&q6# ] "8ZlI9vڵvmjeig8hSCmBJٰê.8"rفWV@?a *=& gPoνis&?7o?Z#0oBpcwߌxFy9qσ5D^ez G^|z٨2s`&zd|<}S9#{ ˋ!8ׇŇydUysh}H$g2Ǔ6՘A}u  z,א2WkU`Fgjprfy%u gCB-lp -oqpқ$?no,տ"=4ޣCݬ(!\0(zL ${/_[JR[{ہ1#|8#glBzgX6!5|wr"G88G 酑K@J2L@ 㩦G/9=dv$P!ai+6m%V*-24 "5@HYD<2+k5-IWĬ53_ێ8O;8ꑪFg0β7 6n:r9yx|un[Ӕ zi[cMw H[9u$2  o zD. }n&5:vΗWzIe>!@Fj;챀eJ0bAE.l;\/53r ٔBQ(!wd[^I03em2H#>{IA~ad6?q q D`}ʹ>½ڶ:^<@}B:gߝ4qQ L~ـxl PU`s1`6wx$;hj`LzgGZ֐tA',Q9鷈bG@)]Co=rھy 蝜ٸ\DTJ𚨓8Ū1t~:MFYÏ xwi epyLD,>2^s̅N1x;:7\<项"YuNӬ"ޡJ%A$nMDl6 vb]8%XS34!ܙhwXFQL)"$S4(.TՑJ0 v`oyf ;URf9@C9# C'C\iY[!()F9\b r%NL`wDG2J뜆:B6BChp#OS>ڞ/nuhx@IpIA8tdzZΪ70ODmǙ|A0$ u |i@uЉ;y2rم ,{T_K䁌F7=d{x̓k64o3FBuKj/x7?oO1noxϖ9=yDmZ|eAv~,ǽ,]:W9SE`"z:ͼ!<ă5t~|[bK3/z3w"Q>$CEƎYHSG؆7H|^ K10kB=0mXLqHtQaFɦ =C`\(} d;#8 1%2$"_:N'PIr)GǃgHA{ڔvW,+a/+#W|8o_kfq I7$E2#Fihy 1_}e!. B,C AQ8lEvXn$37.eilS]qgL_5iy|;)"KQk@,<,;8F).Xr'$$I!t]Y8tD>gOtzc㱣VnLiuˊٕ/!$Gz!^XxFP0Q[>d=wf+>xD3vFb>%ˈ!j arn>= 1ȝF3 l#fӗaj#sԫ?1'݈X6<޹g\l&0/Fo>sp>UB6Ÿpm\YqOB<&kBOL %Ifˆ-]gthszuB68 H!V8v7K,lT!Aa,2 ^¾CtOf"nm$ Add+M ei-KAinD"Ar_-yݸ}~qJW]|t+uTV ®xy ù.8؝V˟GCci] G$9;o&q 4MF-+q+TKZ[sY!2V/:#s-9㞋 #_5~zFsM>FܿyEG/7; {q­. I:*ob[lu }5OߖnӢsw8֦zߎuó#~C_o{y~ 'o<\3 P#+FO=qߤ(JM dۈ .D<]-Ij?op 4};KrZ:seр?iP%jmhxQ_?1tזҨ#7 Lqx;.Wl⮦OZc-.fcC%xAGmdgI*g O]8Yj AR,YEJLkAurjI-%B8T 7<*cHU~wĂ!DI<7-RMT^n5.n[(N EI`\Uu^>lB۷n7f1104DRX $`UB 0 c}ߙ 4͙w^K]U~/LL]VUfl]n],W@]tUW@UVi[36f.M4M+4ാ'o~M$bP%2 !Y*u]uI$I{޵kZֵ9̬4ʬ 蹺6mӕiuZefVffUffeffe]jWJUW@]65"뮺뮺nHbi& ' qqyֵkZֵk33333vfff쬬̬ ׯ%7$-\J*WA .P%]I$JIRJu$RhΩ!ojwukZֵ{zֵkZs2fffffԝ ߳WRI%U$$$IUu$I]ܒk$w$I.I$v*I$U*J33333 83~=ׅWRI$%]I&IWRI$TUԒI&®pudJIRMY$;㺫iWu33fffff`dzfffqԒI$$RIWRI*I« Y$$IWRI$I%]I$J㫪㪫ݥfk[v=u͹+ U$$TP9WY*u$I*I$::뮺뮼!!}O+O=C6]v.]U|_/(s3voUԒI$$NyW@I$՛UNJNk@5qEݵUUUW՛4USF 7wwr(UUN;C?cT-fdCЂ91iq*^*D @h"E >oH"O `ld5QGt83j9 V)#%BR7t?8H|'tr7;@SѵC٥B˳ץSlMNfaNwɫw"ӺS8/YN 9;Avu}L`PQH,QUV1`TUH 2A,XP"QIXł AV ȏ `+V"AUE*XE(EAX $EōBEQQj*kYXVEdFH тEc(AEPQ"(((R1EE`* ,2 "c"1EQUcAbdX(F"Ad#" H*$X)"0DFA`,` R(1`(,aH(QȤQ`E$Pȱ,]EE"RDbR(aE QH( " $ H ( EV ! EP A` !1**2 (I @N_M>aCSORH [R͏To]x Z e7}?")h!gCM@_Aa"a}CΔMPAK|S:s 4脇k s?Iz> ~jo}C7lww};옛gS۽rOEclu]z-oo>eпTUgX |<7Oh竘BCіђ8B/`BS{A׾Ed-2#B[4 vt'| E<}.7ٿu6sP!L_G4L^s^9^ NcJƿW~bz/!g]#4W bmŜq(R[zu>Tp`IN3}47 0 QQ*JZ18u}ْڎKR/UNW{ ojѼDa"9p&-C)2W( }duR'nz΄5 @PT G0za '9'չ÷ 6Cs~3|a66r0ZomsekʃsSwS|b[:#Bc>_øxv|('o>{].CI<_8k>QKOMg|.w:tVU o}H&PB#KP[ r"A!&*f) :ug,P2!09d LrO>6zZBr@.f,"x{ӈĎ' wB],N`@fe+EPZ$K c4P\OLx qI,lGRXD@6-6qK0[ *$?⋇U2o$94ۗtðУRP,`r0s@S0v(۾ǯ#߅<?}*v~M~Y&m}]Esf%XVݭAY̝^,R/zw`EKM}` )6FڝsBƱ=8M(m1eEe UQPo,k=;Q#@#"x_ \O/+(HU~vO3:ő*3H1E`^j W:ȣʋyX̗<7R& a MW  ,p"5tR6e8z{ e쯥Jf!uҕenSeiw) sZ׫"uN꺝킯 Pm2i$^b>1+AyY1㺌svU3 = (ES4+8Bx O%d𧻓@j!TM5(< m^nx8XsЧSI62j5ĺP_ >Ebk5a/>WWl91rFt˛^Cp=',%0&r(l!:s50uPnkU;J. lUYxGZZ@xE݅J @4zqs Z4 wBn)p'"ВphBf].s;jyQ##Ģ,FGn܅P>ޛ%lmMZC:7(ľ'k]C`u=e]a-(crz.'m`>dD0b;^-A}M'~'A) . /a!,S =Om(rA Zg-jIPіh]`zo~i矫?cZ 6S77\*.`|I~OdSe"]պ|2lt+ׇW~'tod ?`?}w;b^M}H#>}=0@Z5oAu A_]kVڒ 1]uo!h:n`I״K7!; (?Gf fb/~wkqPm#ͻ0A?y SN=X0 `o=3;d֏d+kb8;F_O΀r۲4Oq];N9Gk$ d{upb\7-\0ƖqE>`aF^+[ SKHZP̀;.kwޯa\(Lљ1<uP V% f7>A ʹL>LWÙ%f_q=ˊ4H] ERs*( *27 NbD`t\tZ/[9|7 9 #(D/@!i}n;jǫԱEǃSSo{~!>G⫍zU|Uc7/ͩOCvz]A_7̆P{S*jeӿޒ9ߎ>R.'M,(e[4L2A$mHog&X-!RMW 5kvǶ'{q}'@+FDEV ϝ¢>efGʃ1ƇeΞزJ )%61! 1Ɍ0JB482$$lK{pS}o60\@07 ~~Ūl* 0r֤z]RUQSҺJ7rbbdȑBs1ziŦ<ݯ >4XԳ$ 8d=4VdPщe \Syo(C?mB}*Ϭad!p:Hh{)iE#(L<+g9|4&z[>i@k 2QD&}'+l˚J(i$O/=V08N.$iB[LnA<sfԧ :ꡳ|KLJ)f'1P%_Ü[?:) Y 3Az\ f3` -}c8J;6yDW"mV{ݻ\幢3V91o:&]%\]7m%Ek- -rl&+ܹ,ae,p'IRAR!DƬ6[ 6i={>6\}0sdNDK<7u򻌐=BH{!X\#vgS=Dpu5ՠk}/BbQpjcbdHD],%;.َm6)*X9kuU,*+1揖V`aT]L>}{rjMlb![C_n+>˂6wXcoٛtX93ǚGf%+/HvD:N̡"f穆nP,AN vihLX^\4A "l& *LR. ftӈN%~t G^;z`7T7,L(HylsYm qYnx̻6Vl1.:]Csa/4P)wgK>͌/^S]R<׀bi[)TR3mh;@E ]M`>}϶%ؕGyNŠ0p"j#XXaR8v3*VRX%a9a!AN28,d67`]B]Rp`{- P5j$X Fs֤B>p)/3^H1;NsjJ;6A\"0A1MH1 9qTdX RBX #-Hc E'STf,cSCʤ9J3$\O"nʠ abAb"PX b!)l$QRB- O[&B㌥TTX,MמN aR A )P Zu6lsSSaLu@tB@\pܞ:2 Dی)0DTTCQQ"ŀirqTn89Ӟa==+ FD,YH,Y[+mQ`6ԕ&FSS2"嫨tҢ(+ZEELT*خdjK~SSoyG \ns" "$Tq0t]ZuF&pDLU\˫0r)RѤc4Mj EL@4#UŌd fc(3T D`L$ JJ;S ոX%"%KN0ܘF waHߎ),Rހm[s9'wlӞ, DdzЅ@V4b}tOv dFw94C58aD=%)at#21"$bF/bIE ^)Yl^ ZJ&M3hm7a7Tf,HB0]ޖ-CU'7XBTj3dq|ć.r" ȑK#j*ʲ +ƒu% =k =De>Ǻ_QAC`D$NpQzC 5 dPp׊dge)^`ל1Vxo!ˏf{{aem`f8!)-RX.9ieroVgzg'*,&ЭIxaM+Fe+SNj{rfqNxR1e}\,HC է75a3AXF0 FEGL/c'`4͉4Ų #``\("rU7gιyD Y'rN"Aw`8Í6Ks[0+[V=ސo".홷c" КD:;&"N@RuDrYI ŷ%p[ɝN_ u8 {,2A19.g0ia,O䛕`8k,-{16ƧHdz,&҆7Y1jUi-';3$H=c1 "ycݧbmFT9cH)a!}>d |y5tj8vq.r /܋}ϯ5ev0ILM2bX7K JRakF \yeC< ;2bX6$yS>vӏɥ#Anp G<#6ދ{{]i ThEaZII@nX؄XX\~hQ`4oݜ)rΝ=;T맕W6I@Hq98OlvIےwE,qlNYU-_:AfNEDC`{}ϭ@lqKvsXQM&npI&6_k|~8Z9"m. HmUTPںa[1TF ]SQ)'ғs$Sd^k`3{v`6"(|0߶] 8m?%#iYwv0 '["{Og]J?gףc`mQS9weC&&-'jzdᩒ?-RU+:"/cbH 66N,i|53Q?oJ:֮փrÈGV 4p@@ 'Ͳպ!):jXINJX$l3~'a5avy!Ąc$NE"Z~vNh %۳õ)%ڄ"1ğɴ͐:DNvyX]K( دZIJpw; j 8,6Sw{B&S65ɭ!IsKYhV )Q4T E9$鷶M7ٹ9~xLf#3/ (V{{aVo.l) {c<gY49Cjڣ ISI 64OQXD7+W C1w*+q>S"f$~71w1==tۺkHSh~pmTZQޑDC$О$dUTGѹd7 P!ďdH41sLH( 3H,0%~,5l@c"$I~:s; 0ܗ6m6cX* HH%O }_BzV\ %W\o^n)=v}7vN݆̚})aP$ɷ8/ dZ&mުL 1|ڷCXDTKp{N|~{3'?Cc:vnd$D?9,p$G؅#+(  2)d* FEʢ +ɔEBC{ &ċD IUX.I@Z"~ZI#ej6H d앉'W31X2?kHT"dmR& (DbqL"V2*T) EuG|(zAUF$@A@BlE.'4x͵Y#5V"_}{m -7Zm&@6 ",!d(bTB() @QE EPX)""6hm#uL\QlJZ hu@UY'tIj~>9֚7<R ~eě3 bPBGτh_[%T1RȄ>tH|O1% K8IYr3sK.- ?  ۠9fpc<# NDQ;겙ϗ^yy<\لH¬wkޠKkLN@2C맻oa03@G baL':+aGPŤ՝?fxWU#y?Բ߅u ~ XZ(Y.2ږ,o%U* m6,KhR֜8Zsq23shi!ѱUX}|籥HI\6sk鳚#.j4w$3["} ^i!{xl*`آ0'jn<`!R}l6}[$z0ϳ"QAb(UZD@PV "#H DIX0+2 PDR` H  1"F#CFqփ}DsY3t,|XEDRώb͒?\,>3o 1Œ}ණBhFt%LD[m==zh5qQ5h q>Su,*OTb0dl fW+D 6h0RRP4ىW65)uvJ /g'..]bUD  ZV]x4"  -F pEB=Ћ+ QD+ݰFk}wp8N YPQ"21YcH{]-lk JҘ+ +k0ְX)zY6^|'[}f}U' `AeElS\fw~`ux7dT""X; t vl  ${.`Ɨ"޷;{8ƨHŘ4wGl,+E0 $?DDRohzYs mbimf:k.,*Je6qiӪ5V,,"q@f1 /'h M|.U6K 3V6"2g3@1"AWl?N"68dKXTH*Nudkr"*gp}D7 8+ dHN㾁~Nz燒`2$ R)X8BR{pQ7m[2jԼpdlJt HYJ[\FPܤD|XE R()IP vԡ:0>,K YzC&^ؓQ5E` ) @!6M$֜ɦ' i_m8(4HeQݾi[47Jmj庬¦e6h\K-˦f mXҏ1jd1S0xB j')x25&+Y$a1ݹ6,xwkS"eV*[V4S50m9cScMCnm{)VcBp]E NU9a*83)5 @-dsɣF,U 5iR@47F ZљsfXf=u I~Cp0{7>EU+J/٦Z:h) HB` H ED$ u3ZiBKBi4Rz,6 3z,|"p/bN{hp Nl"Ch.^!1,7獛۶ZfWZљwӰU| Afw@޼եΘt=i1T!X94ʩ00,]H #pu9dX2 J*0XT PKlF md] `"Hjj5i]R` nBVQ(5"c^0,e2AVE̱KƐg=- i\%2q+ C݄%~d6ē5`>{v:hx{l:ݖ Rm $/Kht msSmIOG1/]Bי|XօOt~$b@S,ߵ/n/)VD -bI#d9r+3_ ߮+K}g4l*$}`PEB=VLc {AM ZB9КD H!8]zn!0ɱL1=LFhzJ*0 LTmi16F vnc|oV0Z*67`*"ž'KOgtƸvE:*$U&TEXXȰ*(Z9J#" 2a&O/WSJt'`]KSHD"mV8cRc)Ѫs={4aEo1D<ϥd6;b Шؼn -4 >:Tt1EQR"0]}p|'WXC z()!H6yxl|w$He$v@rSPa 2km6iԷRAkB r+ !VCZR֒`IY*Lq({ Ϸ>ɴ &[DTX$JL*Quh x %~kH}Sދz_jCM%<]Vs;Fm@Q"rnQphttw2/| d765Ou +9l$!) b߶v^Ƒzqʹy*JD Ll'߷\+izb@5BjqaQ#\KuH-Q])ăEFVZ")Xa'+ٝ3E*h!Li'cfƠ'nk"̵DTF.ZDJX+P-Ȫ(Y*mEX#Q+iiOk{f3օYaӹI = D."؍a x;Rn,S Ed6 ds"yUHKҌ Ѽ 2'22x '_L0Cǫ&aK@d4JYQ *C>X",0*TjaVbT h`1YjV,d-b (fBDR.BbZ (,kcH1R]YubPY+a9n}:rlɍyWq `14L@*ExCy=9d*s4 jBqaad[*uB;ln@,a,0.t ZӸ4sL.ެ މ߶|JgaNaJZY-"Os^$Zl-b\\ٷ;^OG$! 6B`\ <xmJ*Ó f1> QgV2-$C4'x֥P1bY-Yh# (eho*U{Oq!̓ݪV>d"$RCq[07B$¡K$YA=ߕ&L$RCвQ
h.@M&'TAF8dD?RA AͳW^B1\ ubDEHRL Q8j!{(A0GƓ촜}nɈ~IQK9 dgqUJ21dC]mF&MixTў4SLĭZ2զ ,³4ar`UA$*H gusR+sJŝ=34@`/~fVID/kPI݊"ELbbi"ꣵrO8 s ..֏J6RF\oپ؞2d'+MQβTIA!z Knwjcc *jSH1$|*$hj՟0[i7fSf&,*Io 猘Ht2#OuCnTK5L,c~ij0qȊV5nez bYEE$1Hfz ,T`TV"("lY )WNP6][S,"@UEr4d|KXX{'H LAB6tc<˵={\I{ܦ"޳i0WoR5H0ʍ]l\bDvc X,$i/k)m/a]XNs2!@gH}F|N8/4>9xTuyVd$& O 6 }ՊnœYww7kCS@fμجA L~VzId!Ԗty8 {`Fc*&g"w<Ĭ4Y cd DwCP;Y6F3:$d.k+7N ts_ q=scAfލh"tCP< k*̒+gļC6Cm XcQ+M(iAj2+ /15'{ ,{;y饴Ϻytɤ 2m&hu hE$,j66^ byshzv|d؛LR)DF"R,Qe:4OZS)X=;H!")RBYb6c-`\ m+. HIۗ5<b1S*s=$5$AiAX-U@A1*aEDdmQ[j eZ9ZKTUڪtALf +^Xv)- ADrC&"01$QQt0V!Z %KP_p~zni|ƙ7n*]aFo|AI%}$pW@ChChXeT[Z A ƱDXYVi,mzKnWf[ܔi.;X! FsPMM (ffRꗦnib^斤 kdF>2Dp%;UZpDsH=D9h:o)#ᳶyXo5 u!T#;5d>,t0wăn6ùpI² a;{>pQ I!Ƿ1x m(5h+Hד=E4RPJ !/~1nBGF ad NƟ}2ȍP=gA %}p}rDS(ۿ3A-F_x vmGh0P?.;!h95:V>\/Pj&zn͂͠n۷RwMx4TU"/c3CA6؁Z!6W q'Q̽([  D9HfY UE}Hi"EXY$&I)ф* 2aĹ7Csnڌ=sg= 'A;Ct{YUV,:qXĤ_6JmEgQb 9#ae2S{3}RzJo"1},,Ib'񗇃y! 7# '8x͔@ 69W0[PVl?b0*8(s 5pq@Y"2F8~wK7.`V=)~ Ozbi޻qr?}臤(w˝pȈX_f P@l5>"jMң6qu]i9NP%CE"C"sXf_ȰZFm92aٞ^w` $ Y+F\#t7j%*W >[wßIP$?G+#\ @ .Ao)6;P0_o9;m܃vK}4.,:H}3 EM?t!FD$nqocy֙ r 빙Qx< ^kL Fk 9T*g(oB`V0=(h@n)c*ۣ:"Y[2z΋"+O=#@+2^ R6b1{ңxZNJ7F[mmrQ.o/TCbڧ#he4eһ{1OKW Yu7SSNM)|zـLPi$U$Db~uz?-q-/"Pj<+t@يR5:ݎ/F/ b|IB%%"䞍@PdK"iRhƔȪݦzMkώDDgfO7譜 7aikbfQʆCk݊%g%ӳ^*4 C ơk"D^\/-F"\rݭF8 ,1eF6nZ}[A[ggץ~ߊVGgt;ΦS Xbzܽ>Wr` DPPA;[w}Oᙣɣm6[-A[\<:zPaqq0g7:ɪT[)j]KiZ쪥O΢ue;QoSH=Ÿb\hB/u=:Vc!!S:`WG![ ;N̗H1 )緊1vx$.OhT!_G]C41OuؓMT/"c~{юqce_ec<ͅtqb:~wy?rPOd\J;9DH/E/ϭ{9NSmgE$%>} ESN S!Ƹ-R~C4Bk2!X},NIf4яyS85ۊ~&y2kI,-\aA.<^ݏƩ<&Q)K;<0q]')_J٢%9vH6QY_E쪱jm/v{ĺ#!n._[| )[/i?mzl.g1N iO[.~9O&2Oȷm?V^@wxkwF3grfeoG׳muA>D`]73z [|̃}?4H|5@BB_؉:_Wu{-Zbr ]1A `ncab>B\g`C< =euK>wsq+-kıЃsOgy2^>#utײq Z}%oo7SXG8BOs#~C'+#nnhJh6zY-#Kl6Tajo;LG;N5x0v]]!v ѥ>NɰhNJL쇙2}f4?ynǮ# }>QBQQ=;X/~/2Α>ut%VnmozЋKבͰCKE?/<~8vqȎ3ׅ>xհNS]߰>Aq.Yߊ~^"f_!3]-޴oG?>?v~[v_FjOGjf}گ.ZSm|Y'}#sߎVQ}TՂǎ,x*x;hvڱQv>?>j_EI?ҌZrk⺌Q#f\v4 hvo0*yߐoPz4^գ*봮ѦaՁ)Q;d=նǿv}Aekx&AUεy/Ǯc>"#}(c*a֫J}nWaYmaF-dDa_پw{F?V_V㞡޴V[j`-W\l7tLbxu`KmJɷ-:y0T抃ko/h쾡z~nhVT¹g :.|Qj}xǤj׮{;,{~ =^՛YOZ@Z%n/"WղW_9DW_+hC/D=2 ᧁ%L/tk`hœ Bt}`b]jDQQN(cJq8m)‚)AIahbN(g" NQ&CbdY9᪒l,y'Z*!K\X*Wjbnmxc_Ȑw]](8Qtw#]9˕~3X?6dEޔ:tR?3iJ^0[XBSqj o9=_O0;[WHuT YC*̃!DkqȖum|حewuTΌ)zX/WzHԄ%r% ԋszAKJG5X)Ey !|J8M73/g_"|M·QcN%D-v{":I.{2>*핝Nj!"ބДp 9ZB BKB+uք6Tn 'HH#t t$t"t&t!t%t#t'P)Zv)%~Bpf-EH'dzz2 }} Y~l@ `P0p­##  c I0p7!Ho}#ٳYCDL%pXS љ=an^^$:pnKђ81'9I(E^pRW8I C<Ԓ sy,azNwPᴮ/JwAQ ^|(QB;\rr%7<\r%?Np29d's8Np19\bs.p19f7snp39fsx0919g?s~39gs#0G9`s4E)Mc^S ð9 ð9 ð9 ð9 ð9 Ӱ9 Ӱ9 Ӱ9 Ӱ9 Ӱ9 ˰ ˰ ˰ ˰ ˰ ۰ ۰ ۰ ۰ ۰ ǰy ǰy ǰy ǰy ǰy װy װy װy װy װy ϰ ϰ ϰ ϰ ϰ ߰ ߰ ߰ ߰ ߰ [ [ [ [ [\r//yb0J0M9wCstrucchange/data/USIncExp.rda0000644000176200001440000000623313062350355015625 0ustar liggesusers]tU# vQ]=kQ]Q M6̤L&'NEWlEEaM9{{Wo]즐e!aaQAC=ߨg ^+3d0Mwydh 6 M )#rսC?oM{;u䨧AD )_^mčS%G/D~ 2#^Dō~ r32ybo_W1q'ď[xqy;)9ߢ8/߀< 4oݼd}cV 'LWdOX']=K gx?Zc?ra Ryhyq٭bUceYNb$Afe?xgӎw3s)^ssIΝ8K}^(d ~&I{yyc<ؿ~cg_̿3@K+9Qy hWP+\ҏBC]#~WZ%/@f^RbKLK)n 몄*K7#!22·2A`9|3+.WH_Q'Kd!~TUQ;/Vǐu`п/}XpհԐY~5uq^$(^m[t+ٱP|Gxpb?"~K_u+nђNJ_?(`X@qww†o`^4 r4ŞK /N?t$20 =rAZ2FkمZuxuHx7# ~[Xq۞obobNog}?1}U{/_lS~vg;"y`8o8[q\aCv 93GoF6܀>to 98y PЯ?aK;ߩAakw'чq K<ᬻpYgx¸s:'A aKۋs-ELS)qA#A= eayD܈܋#ywFrdMJohA|3ĽĽLzB'{q|M_þoe{ZZh ccJ6d_P0YVoCY'IׁI`gIoVdɫ'y܆|R.?v#>LaJ4aRyw?@uu'4ާiGi#{hoNc;@z3țA"yɺݙ ?w{ierdvd1_Y/fܳgfbK3I,9e`sȓsJr.fBs_RZS\c䭑=/٧8yo+n>?ϼ罛q_@J/_8Kr!B̫ƒ+b(QdCߊ:d1ymlj_~^|D<%jQqrdGv%Mo^z}^Gݩ<)nMS>Z=}20Dzszx+~#7%}#mc؆%7W{LD}Js'zrےx-·ֹk])^d[ n3ȯm*2uﶭ`Zxw۩G7۱NtPo؂~mYbCޫxv2:gy32;@<htNv؝3sE~N瑹782{/NBbw-~"ԟdx ;d[,5ל#vYU.s.Ꜯ8 j}(}]vsqDw[/ 웢fܪ>Sy}Կc%O?lwuAf sX?dd7x@M{=r_]=Կz.W{zf)"{b=GV^ƻit}Ъu N'!V~C&0gܺ+lZtjK7Zm~JU 2cǮܸ"4 6߆[teoǰ strucchange/man/0000755000176200001440000000000014235073217013337 5ustar liggesusersstrucchange/man/sctest.Rd0000644000176200001440000000353514127174721015143 0ustar liggesusers\name{sctest} \alias{sctest} \title{Structural Change Tests} \description{ Generic function for performing structural change tests. } \usage{ sctest(x, \dots) } \arguments{ \item{x}{an object.} \item{\dots}{arguments passed to methods.} } \details{ \code{sctest} is a generic function for performing/extracting structural change tests based on various types of objects. The \code{strucchange} package provides various types of methods. First, structural change tests based on F statistics in linear regression models (\code{\link{Fstats}}), empirical fluctuation processes in linear regression models (\code{\link{efp}}), and generalized empirical fluctuation processes in parametric models (\code{\link{gefp}}) are available in the corresponding \code{sctest} methods. Second, convenience interfaces for carrying out structural change tests in linear regression models and general parametric models are provided in \code{\link{sctest.formula}} and \code{\link{sctest.default}}, respectively. } \value{ An object of class \code{"htest"} containing: \item{statistic}{the test statistic,} \item{p.value}{the corresponding p value,} \item{method}{a character string with the method used,} \item{data.name}{a character string with the data name.} } \references{ Zeileis A., Leisch F., Hornik K., Kleiber C. (2002), \code{strucchange}: An R Package for Testing for Structural Change in Linear Regression Models, \emph{Journal of Statistical Software}, \bold{7}(2), 1-38. \doi{10.18637/jss.v007.i02}. Zeileis A. (2006), Implementing a Class of Structural Change Tests: An Econometric Computing Approach. \emph{Computational Statistics & Data Analysis}, \bold{50}, 2987--3008. \doi{10.1016/j.csda.2005.07.001}. } \seealso{\code{\link{sctest.formula}}, \code{\link{sctest.default}}, \code{\link{sctest.Fstats}}, \code{\link{sctest.efp}}, \code{\link{sctest.gefp}}} strucchange/man/gefp.Rd0000644000176200001440000000743413062350355014555 0ustar liggesusers\name{gefp} \alias{gefp} \alias{print.gefp} \alias{sctest.gefp} \alias{plot.gefp} \alias{time.gefp} \alias{print.gefp} \title{Generalized Empirical M-Fluctuation Processes} \description{Computes an empirical M-fluctuation process from the scores of a fitted model.} \usage{ gefp(\dots, fit = glm, scores = estfun, vcov = NULL, decorrelate = TRUE, sandwich = TRUE, order.by = NULL, fitArgs = NULL, parm = NULL, data = list()) } \arguments{ \item{\dots}{specification of some model which is passed together with \code{data} to the \code{fit} function: \code{fm <- fit(\dots, data = data)}. If \code{fit} is set to \code{NULL} the first argument \code{\dots} is assumed to be already the fitted model \code{fm} (all other arguments in \code{\dots} are ignored and a warning is issued in this case).} \item{fit}{a model fitting function, typically \code{\link{lm}}, \code{\link{glm}} or \code{\link[MASS]{rlm}}.} \item{scores}{a function which extracts the scores or estimating function from the fitted object: \code{scores(fm)}.} \item{vcov}{a function to extract the covariance matrix for the coefficients of the fitted model: \code{vcov(fm, order.by = order.by, data = data)}.} \item{decorrelate}{logical. Should the process be decorrelated?} \item{sandwich}{logical. Is the function \code{vcov} the full sandwich estimator or only the meat?} \item{order.by}{Either a vector \code{z} or a formula with a single explanatory variable like \code{~ z}. The observations in the model are ordered by the size of \code{z}. If set to \code{NULL} (the default) the observations are assumed to be ordered (e.g., a time series).} \item{fitArgs}{List of additional arguments which could be passed to the \code{fit} function. Usually, this is not needed and \code{\dots} will be sufficient to pass arguments to \code{fit}.} \item{parm}{integer or character specifying the component of the estimating functions which should be used (by default all components are used).} \item{data}{an optional data frame containing the variables in the \code{\dots} specification and the \code{order.by} model. By default the variables are taken from the environment which \code{gefp} is called from.} } \value{ \code{gefp} returns a list of class \code{"gefp"} with components including: \item{process}{the fitted empirical fluctuation process of class \code{"zoo"},} \item{nreg}{the number of regressors,} \item{nobs}{the number of observations,} \item{fit}{the fit function used,} \item{scores}{the scores function used,} \item{fitted.model}{the fitted model.} } \references{ Zeileis A. (2005), A Unified Approach to Structural Change Tests Based on ML Scores, F Statistics, and OLS Residuals. \emph{Econometric Reviews}, \bold{24}, 445--466. doi:10.1080/07474930500406053. Zeileis A. (2006), Implementing a Class of Structural Change Tests: An Econometric Computing Approach. \emph{Computational Statistics & Data Analysis}, \bold{50}, 2987--3008. doi:10.1016/j.csda.2005.07.001. Zeileis A., Hornik K. (2007), Generalized M-Fluctuation Tests for Parameter Instability, \emph{Statistica Neerlandica}, \bold{61}, 488--508. doi:10.1111/j.1467-9574.2007.00371.x. Zeileis A., Shah A., Patnaik I. (2010), Testing, Monitoring, and Dating Structural Changes in Exchange Rate Regimes, \emph{Computational Statistics and Data Analysis}, \bold{54}(6), 1696--1706. doi:10.1016/j.csda.2009.12.005. } \seealso{\code{\link{efp}}, \code{\link{efpFunctional}}} \examples{ data("BostonHomicide") gcus <- gefp(homicides ~ 1, family = poisson, vcov = kernHAC, data = BostonHomicide) plot(gcus, aggregate = FALSE) gcus sctest(gcus) } \concept{M-fluctuation} \concept{fluctuation test} \concept{maximum likelihood scores} \concept{structural change} \keyword{regression} strucchange/man/GermanM1.Rd0000644000176200001440000000710313062350355015234 0ustar liggesusers\name{GermanM1} \alias{GermanM1} \alias{historyM1} \alias{monitorM1} \docType{data} \encoding{latin1} \title{German M1 Money Demand} \usage{data("GermanM1")} \description{ German M1 money demand. } \format{ \code{GermanM1} is a data frame containing 12 quarterly time series from 1961(1) to 1995(4) and two further variables. \code{historyM1} is the subset of \code{GermanM1} up to 1990(2), i.e., the data before the German monetary unification on 1990-06-01. \code{monitorM1} is the complement of \code{historyM1}, i.e., the data after the unification. All three data frames contain the variables \describe{ \item{m}{time series. Logarithm of real M1 per capita,} \item{p}{time series. Logarithm of a price index,} \item{y}{time series. Logarithm of real per capita gross national product,} \item{R}{time series. Long-run interest rate,} \item{dm}{time series. First differences of \code{m},} \item{dy2}{time series. First differences of lag 2 of \code{y},} \item{dR}{time series. First differences of \code{R},} \item{dR1}{time series. First differences of lag 1 of \code{R},} \item{dp}{time series. First differences of \code{p},} \item{m1}{time series. Lag 1 of \code{m},} \item{y1}{time series. Lag 1 of \code{y},} \item{R1}{time series. Lag 1 of \code{R},} \item{season}{factor coding the seasonality,} \item{ecm.res}{vector containing the OLS residuals of the Ltkepohl et al. (1999) model fitted in the history period.} } } \details{ Ltkepohl et al. (1999) investigate the linearity and stability of German M1 money demand: they find a stable regression relation for the time before the monetary union on 1990-06-01 but a clear structural instability afterwards. Zeileis et al. (2005) use a model with \code{ecm.res} instead of \code{m1}, \code{y1} and \code{R1}, which leads to equivalent results in the history period but slightly different results in the monitoring period. The reason for the replacement is that stationary regressors are needed for the structural change tests. See references and the examples below for more details. } \source{The data is provided by the German central bank and is available online in the data archive of the Journal of Applied Econometrics \url{http://qed.econ.queensu.ca/jae/1999-v14.5/lutkepohl-terasvirta-wolters/}.} \references{ Ltkepohl H., Tersvirta T., Wolters J. (1999), Investigating Stability and Linearity of a German M1 Money Demand Function, \emph{Journal of Applied Econometrics}, \bold{14}, 511-525. Zeileis A., Leisch F., Kleiber C., Hornik K. (2005), Monitoring Structural Change in Dynamic Econometric Models, \emph{Journal of Applied Econometrics}, \bold{20}, 99--121. } \examples{ data("GermanM1") ## Ltkepohl et al. (1999) use the following model LTW.model <- dm ~ dy2 + dR + dR1 + dp + m1 + y1 + R1 + season ## Zeileis et al. (2005) use M1.model <- dm ~ dy2 + dR + dR1 + dp + ecm.res + season ## historical tests ols <- efp(LTW.model, data = GermanM1, type = "OLS-CUSUM") plot(ols) re <- efp(LTW.model, data = GermanM1, type = "fluctuation") plot(re) fs <- Fstats(LTW.model, data = GermanM1, from = 0.1) plot(fs) ## monitoring M1 <- historyM1 ols.efp <- efp(M1.model, type = "OLS-CUSUM", data = M1) newborder <- function(k) 1.5778*k/118 ols.mefp <- mefp(ols.efp, period = 2) ols.mefp2 <- mefp(ols.efp, border = newborder) M1 <- GermanM1 ols.mon <- monitor(ols.mefp) ols.mon2 <- monitor(ols.mefp2) plot(ols.mon) lines(boundary(ols.mon2), col = 2) ## dating bp <- breakpoints(LTW.model, data = GermanM1) summary(bp) plot(bp) plot(fs) lines(confint(bp)) } \keyword{datasets} strucchange/man/supLM.Rd0000644000176200001440000000614413062350355014671 0ustar liggesusers\name{supLM} \alias{supLM} \alias{maxMOSUM} \title{Generators for efpFunctionals along Continuous Variables} \description{ Generators for \code{efpFunctional} objects suitable for aggregating empirical fluctuation processes to test statistics along continuous variables (i.e., along time in time series applications). } \usage{ supLM(from = 0.15, to = NULL) maxMOSUM(width = 0.15) } \arguments{ \item{from, to}{numeric from interval (0, 1) specifying start and end of trimmed sample period. By default, \code{to} is \code{1 - from}, i.e., with the default \code{from = 0.15} the first and last 15 percent of observations are trimmed.} \item{width}{a numeric from interval (0,1) specifying the bandwidth. Determines the size of the moving data window relative to sample size.} } \details{ \code{supLM} and \code{maxMOSUM} generate \code{\link{efpFunctional}} objects for Andrews' supLM test and a (maximum) MOSUM test, respectively, with the specified optional parameters (\code{from} and \code{to}, and \code{width}, respectively). The resulting objects can be used in combination with empirical fluctuation processes of class \code{\link{gefp}} for significance testing and visualization. The corresponding statistics are useful for carrying out structural change tests along a continuous variable (i.e., along time in time series applications). Further typical \code{\link{efpFunctional}}s for this setting are the double-maximum functional \code{\link{maxBB}} and the Cramer-von Mises functional \code{\link{meanL2BB}}. } \value{ An object of class \code{efpFunctional}. } \references{ Merkle E.C., Zeileis A. (2013), Tests of Measurement Invariance without Subgroups: A Generalization of Classical Methods. \emph{Psychometrika}, \bold{78}(1), 59--82. doi:10.1007/S11336-012-9302-4 Zeileis A. (2005), A Unified Approach to Structural Change Tests Based on ML Scores, F Statistics, and OLS Residuals. \emph{Econometric Reviews}, \bold{24}, 445--466. doi:10.1080/07474930500406053. Zeileis A. (2006), Implementing a Class of Structural Change Tests: An Econometric Computing Approach. \emph{Computational Statistics & Data Analysis}, \bold{50}, 2987--3008. doi:10.1016/j.csda.2005.07.001. Zeileis A., Hornik K. (2007), Generalized M-Fluctuation Tests for Parameter Instability, \emph{Statistica Neerlandica}, \bold{61}, 488--508. doi:10.1111/j.1467-9574.2007.00371.x. } \seealso{\code{\link{efpFunctional}}, \code{\link{gefp}}} \examples{ ## seatbelt data data("UKDriverDeaths") seatbelt <- log10(UKDriverDeaths) seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) colnames(seatbelt) <- c("y", "ylag1", "ylag12") seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) ## empirical fluctuation process scus.seat <- gefp(y ~ ylag1 + ylag12, data = seatbelt) ## supLM test plot(scus.seat, functional = supLM(0.1)) ## MOSUM test plot(scus.seat, functional = maxMOSUM(0.25)) ## double maximum test plot(scus.seat) ## range test plot(scus.seat, functional = rangeBB) ## Cramer-von Mises statistic (Nyblom-Hansen test) plot(scus.seat, functional = meanL2BB) } \keyword{regression} strucchange/man/efp.Rd0000644000176200001440000001773514127174652014422 0ustar liggesusers\name{efp} \alias{efp} \alias{print.efp} \encoding{latin1} \title{Empirical Fluctuation Processes} \description{Computes an empirical fluctuation process according to a specified method from the generalized fluctuation test framework, which includes CUSUM and MOSUM tests based on recursive or OLS residuals, parameter estimates or ML scores (OLS first order conditions).} \usage{ efp(formula, data, type = , h = 0.15, dynamic = FALSE, rescale = TRUE, lrvar = FALSE, vcov = NULL)} \arguments{ \item{formula}{a symbolic description for the model to be tested.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{efp} is called from.} \item{type}{specifies which type of fluctuation process will be computed, the default is \code{"Rec-CUSUM"}. For details see below.} \item{h}{a numeric from interval (0,1) specifying the bandwidth. determines the size of the data window relative to sample size (for MOSUM and ME processes only).} \item{dynamic}{logical. If \code{TRUE} the lagged observations are included as a regressor.} \item{rescale}{logical. If \code{TRUE} the estimates will be standardized by the regressor matrix of the corresponding subsample according to Kuan & Chen (1994); if \code{FALSE} the whole regressor matrix will be used. (only if \code{type} is either \code{"RE"} or \code{"ME"})} \item{lrvar}{logical or character. Should a long-run variance estimator be used for the residuals? By default, the standard OLS variance is employed. Alternatively, \code{\link[sandwich]{lrvar}} can be used. If \code{lrvar} is character (\code{"Andrews"} or \code{"Newey-West"}), then the corresponding \code{type} of long-run variance is used. (The argument is ignored for the score-based tests where \code{\link{gefp}} should be used instead.)} \item{vcov}{a function to extract the covariance matrix for the coefficients of the fitted model (only for \code{"RE"} and \code{"ME"}).} } \details{If \code{type} is one of \code{"Rec-CUSUM"}, \code{"OLS-CUSUM"}, \code{"Rec-MOSUM"} or \code{"OLS-MOSUM"} the function \code{efp} will return a one-dimensional empirical process of sums of residuals. Either it will be based on recursive residuals or on OLS residuals and the process will contain CUmulative SUMs or MOving SUMs of residuals in a certain data window. For the MOSUM and ME processes all estimations are done for the observations in a moving data window, whose size is determined by \code{h} and which is shifted over the whole sample. If \code{type} is either \code{"RE"} or \code{"ME"} a \emph{k}-dimensional process will be returned, if \emph{k} is the number of regressors in the model, as it is based on recursive OLS estimates of the regression coefficients or moving OLS estimates respectively. The recursive estimates test is also called fluctuation test, therefore setting \code{type} to \code{"fluctuation"} was used to specify it in earlier versions of strucchange. It still can be used now, but will be forced to \code{"RE"}. If \code{type} is \code{"Score-CUSUM"} or \code{"Score-MOSUM"} a \emph{k+1}-dimensional process will be returned, one for each score of the regression coefficients and one for the scores of the variance. The process gives the decorrelated cumulative sums of the ML scores (in a Gaussian model) or first order conditions respectively (in an OLS framework). If there is a single structural change point \eqn{t^*}, the recursive CUSUM path starts to depart from its mean 0 at \eqn{t^*}. The Brownian bridge type paths will have their respective peaks around \eqn{t^*}. The Brownian bridge increments type paths should have a strong change at \eqn{t^*}. The function \code{\link{plot}} has a method to plot the empirical fluctuation process; with \code{sctest} the corresponding test on structural change can be performed. } \value{ \code{efp} returns a list of class \code{"efp"} with components including: \item{process}{the fitted empirical fluctuation process of class \code{"ts"} or \code{"mts"} respectively,} \item{type}{a string with the \code{type} of the process fitted,} \item{nreg}{the number of regressors,} \item{nobs}{the number of observations,} \item{par}{the bandwidth \code{h} used.} } \references{ Brown R.L., Durbin J., Evans J.M. (1975), Techniques for testing constancy of regression relationships over time, \emph{Journal of the Royal Statistical Society}, B, \bold{37}, 149-163. Chu C.-S., Hornik K., Kuan C.-M. (1995), MOSUM tests for parameter constancy, \emph{Biometrika}, \bold{82}, 603-617. Chu C.-S., Hornik K., Kuan C.-M. (1995), The moving-estimates test for parameter stability, \emph{Econometric Theory}, \bold{11}, 669-720. Hansen B. (1992), Testing for Parameter Instability in Linear Models, \emph{Journal of Policy Modeling}, \bold{14}, 517-533. Hjort N.L., Koning A. (2002), Tests for Constancy of Model Parameters Over Time, \emph{Nonparametric Statistics}, \bold{14}, 113-132. Krmer W., Ploberger W., Alt R. (1988), Testing for structural change in dynamic models, \emph{Econometrica}, \bold{56}, 1355-1369. Kuan C.-M., Hornik K. (1995), The generalized fluctuation test: A unifying view, \emph{Econometric Reviews}, \bold{14}, 135 - 161. Kuan C.-M., Chen (1994), Implementing the fluctuation and moving estimates tests in dynamic econometric models, \emph{Economics Letters}, \bold{44}, 235-239. Ploberger W., Krmer W. (1992), The CUSUM test with OLS residuals, \emph{Econometrica}, \bold{60}, 271-285. Zeileis A., Leisch F., Hornik K., Kleiber C. (2002), \code{strucchange}: An R Package for Testing for Structural Change in Linear Regression Models, \emph{Journal of Statistical Software}, \bold{7}(2), 1-38. \doi{10.18637/jss.v007.i02}. Zeileis A. (2005), A Unified Approach to Structural Change Tests Based on ML Scores, F Statistics, and OLS Residuals. \emph{Econometric Reviews}, \bold{24}, 445--466. \doi{10.1080/07474930500406053}. Zeileis A. (2006), Implementing a Class of Structural Change Tests: An Econometric Computing Approach. \emph{Computational Statistics & Data Analysis}, \bold{50}, 2987--3008. \doi{10.1016/j.csda.2005.07.001}. Zeileis A., Hornik K. (2007), Generalized M-Fluctuation Tests for Parameter Instability, \emph{Statistica Neerlandica}, \bold{61}, 488--508. \doi{10.1111/j.1467-9574.2007.00371.x}. } \seealso{\code{\link{gefp}}, \code{\link{plot.efp}}, \code{\link{print.efp}}, \code{\link{sctest.efp}}, \code{\link{boundary.efp}}} \examples{ ## Nile data with one breakpoint: the annual flows drop in 1898 ## because the first Ashwan dam was built data("Nile") plot(Nile) ## test the null hypothesis that the annual flow remains constant ## over the years ## compute OLS-based CUSUM process and plot ## with standard and alternative boundaries ocus.nile <- efp(Nile ~ 1, type = "OLS-CUSUM") plot(ocus.nile) plot(ocus.nile, alpha = 0.01, alt.boundary = TRUE) ## calculate corresponding test statistic sctest(ocus.nile) ## UK Seatbelt data: a SARIMA(1,0,0)(1,0,0)_12 model ## (fitted by OLS) is used and reveals (at least) two ## breakpoints - one in 1973 associated with the oil crisis and ## one in 1983 due to the introduction of compulsory ## wearing of seatbelts in the UK. data("UKDriverDeaths") seatbelt <- log10(UKDriverDeaths) seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) colnames(seatbelt) <- c("y", "ylag1", "ylag12") seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) ## use RE process re.seat <- efp(y ~ ylag1 + ylag12, data = seatbelt, type = "RE") plot(re.seat) plot(re.seat, functional = NULL) sctest(re.seat) } \concept{CUSUM} \concept{MOSUM} \concept{recursive estimates} \concept{moving estimates} \concept{fluctuation test} \concept{maximum likelihood scores} \concept{structural change} \concept{CUSUM} \concept{MOSUM} \concept{recursive estimates} \concept{moving estimates} \concept{fluctuation test} \concept{maximum likelihood scores} \concept{structural change} \keyword{regression} strucchange/man/BostonHomicide.Rd0000644000176200001440000000506013550277371016543 0ustar liggesusers\name{BostonHomicide} \alias{BostonHomicide} \title{Youth Homicides in Boston} \usage{data("BostonHomicide")} \description{ Data about the number of youth homicides in Boston during the `Boston Gun Project'---a policing initiative aiming at lowering homicide victimization among young people in Boston. } \format{ A data frame containing 6 monthly time series and two factors coding seasonality and year, respectively. \describe{ \item{homicides}{time series. Number of youth homicides.} \item{population}{time series. Boston population (aged 25-44), linearly interpolated from annual data.} \item{populationBM}{time series. Population of black males (aged 15-24), linearly interpolated from annual data.} \item{ahomicides25}{time series. Number of adult homicides (aged 25 and older).} \item{ahomicides35}{time series. Number of adult homicides (aged 35-44).} \item{unemploy}{time series. Teen unemployment rate (in percent).} \item{season}{factor coding the month.} \item{year}{factor coding the year.} } } \details{The `Boston Gun Project' is a policing initiative aiming at lowering youth homicides in Boston. The project began in early 1995 and implemented the so-called `Operation Ceasefire' intervention which began in the late spring of 1996. } \source{Piehl et al. (2004), Figure 1, Figure 3, and Table 1. From the table it is not clear how the data should be linearly interpolated. Here, it was chosen to use the given observations for July of the corresponding year and then use \code{\link{approx}} with \code{rule = 2}. } \references{ Piehl A.M., Cooper S.J., Braga A.A., Kennedy D.M. (2003), Testing for Structural Breaks in the Evaluation of Programs, \emph{The Review of Economics and Statistics}, \bold{85}(3), 550-558. Kennedy D.M., Piehl A.M., Braga A.A. (1996), Youth Violence in Boston: Gun Markets, Serious Youth Offenders, and a Use-Reduction Strategy, \emph{Law and Contemporary Problems}, \bold{59}, 147-183. } \examples{ data("BostonHomicide") attach(BostonHomicide) ## data from Table 1 tapply(homicides, year, mean) populationBM[0:6*12 + 7] tapply(ahomicides25, year, mean) tapply(ahomicides35, year, mean) population[0:6*12 + 7] unemploy[0:6*12 + 7] ## model A ## via OLS fmA <- lm(homicides ~ populationBM + season) anova(fmA) ## as GLM fmA1 <- glm(homicides ~ populationBM + season, family = poisson) anova(fmA1, test = "Chisq") ## model B & C fmB <- lm(homicides ~ populationBM + season + ahomicides25) fmC <- lm(homicides ~ populationBM + season + ahomicides25 + unemploy) detach(BostonHomicide) } \keyword{datasets} strucchange/man/efpFunctional.Rd0000644000176200001440000001426713062350355016433 0ustar liggesusers\name{efpFunctional} \alias{efpFunctional} \alias{simulateBMDist} \alias{maxBM} \alias{maxBB} \alias{maxBMI} \alias{maxBBI} \alias{maxL2BB} \alias{meanL2BB} \alias{rangeBM} \alias{rangeBB} \alias{rangeBMI} \alias{rangeBBI} \title{Functionals for Fluctuation Processes} \description{Computes an object for aggregating, plotting and testing empirical fluctuation processes.} \usage{ efpFunctional(functional = list(comp = function(x) max(abs(x)), time = max), boundary = function(x) rep(1, length(x)), computePval = NULL, computeCritval = NULL, plotProcess = NULL, lim.process = "Brownian bridge", nobs = 10000, nrep = 50000, nproc = 1:20, h = 0.5, probs = c(0:84/100, 850:1000/1000)) } \arguments{ \item{functional}{either a function for aggregating fluctuation processes or a list with two functions names \code{"comp"} and \code{"time"}.} \item{boundary}{a boundary function.} \item{computePval}{a function for computing p values. If neither \code{computePval} nor \code{computeCritval} are specified critical values are simulated with settings as specified below.} \item{computeCritval}{a function for computing critical values. If neither \code{computePval} nor \code{computeCritval} are specified critical values are simulated with settings as specified below.} \item{plotProcess}{a function for plotting the empirical process, if set to \code{NULL} a suitable function is set up.} \item{lim.process}{a string specifying the limiting process.} \item{nobs}{integer specifying the number of observations of each Brownian motion simulated.} \item{nrep}{integer specifying the number of replications.} \item{nproc}{integer specifying for which number of processes Brownian motions should be simulated. If set to \code{NULL} only \code{nproc = 1} is used and all other values are derived from a Bonferroni correction.} \item{h}{bandwidth parameter for increment processes.} \item{probs}{numeric vector specifying for which probabilities critical values should be tabulated.} } \details{ \code{efpFunctional} computes an object of class \code{"efpFunctional"} which then knows how to do inference based on empirical fluctuation processes (currently only for \code{\link{gefp}} objects and not yet for \code{\link{efp}} objects) and how to visualize the corresponding processes. \code{efpFunctional}s for many frequently used test statistics are provided: \code{\link{maxBB}} for the double maximum statistic, \code{\link{meanL2BB}} for the Cramer-von Mises statistic, or \code{rangeBB} for the range statistic. Furthermore, \code{\link{supLM}} generates an object of class \code{"efpFunctional"} for a certain trimming parameter, see the examples. More details can be found in Zeileis (2006). Based on Merkle, Fan, and Zeileis (2014), further \code{efpFunctional} generators for aggregating along (ordered) categorical variables have been added: \code{\link{catL2BB}}, \code{\link{ordL2BB}}, \code{\link{ordwmax}}. For setting up an \code{efpFunctional}, the functions \code{computeStatistic}, \code{computePval}, and \code{plotProcess} need to be supplied. These should have the following interfaces: \code{computeStatistic} should take a single argument which is the process itself, i.e., essentially a n x k matrix where n is the number of observations and k the number of processes (regressors). \code{computePval} should take two arguments: a scalar test statistic and the number of processes k. \code{plotProcess} should take two arguments: an object of class \code{"gefp"} and \code{alpha} the level of significance for any boundaries or critical values to be visualized. } \value{ \code{efpFunctional} returns a list of class \code{"efpFunctional"} with components including: \item{plotProcess}{a function for plotting empirical fluctuation processes,} \item{computeStatistic}{a function for computing a test statistic from an empirical fluctuation process,} \item{computePval}{a function for computing the corresponding p value,} \item{computeCritval}{a function for computing critical values.} } \references{ Merkle E.C., Zeileis A. (2013), Tests of Measurement Invariance without Subgroups: A Generalization of Classical Methods. \emph{Psychometrika}, \bold{78}(1), 59--82. doi:10.1007/S11336-012-9302-4 Merkle E.C., Fan J., Zeileis A. (2014), Testing for Measurement Invariance with Respect to an Ordinal Variable. \emph{Psychometrika}, \bold{79}(4), 569--584. doi:10.1007/S11336-013-9376-7. Zeileis A. (2005), A Unified Approach to Structural Change Tests Based on ML Scores, F Statistics, and OLS Residuals. \emph{Econometric Reviews}, \bold{24}, 445--466. doi:10.1080/07474930500406053. Zeileis A. (2006), Implementing a Class of Structural Change Tests: An Econometric Computing Approach. \emph{Computational Statistics & Data Analysis}, \bold{50}, 2987--3008. doi:10.1016/j.csda.2005.07.001. Zeileis A., Hornik K. (2007), Generalized M-Fluctuation Tests for Parameter Instability, \emph{Statistica Neerlandica}, \bold{61}, 488--508. doi:10.1111/j.1467-9574.2007.00371.x. } \seealso{\code{\link{gefp}}, \code{\link{supLM}}, \code{\link{catL2BB}}, \code{\link{sctest.default}}} \examples{ data("BostonHomicide") gcus <- gefp(homicides ~ 1, family = poisson, vcov = kernHAC, data = BostonHomicide) plot(gcus, functional = meanL2BB) gcus sctest(gcus, functional = meanL2BB) y <- rnorm(1000) x1 <- runif(1000) x2 <- runif(1000) ## supWald statistic computed by Fstats() fs <- Fstats(y ~ x1 + x2, from = 0.1) plot(fs) sctest(fs) ## compare with supLM statistic scus <- gefp(y ~ x1 + x2, fit = lm) plot(scus, functional = supLM(0.1)) sctest(scus, functional = supLM(0.1)) ## seatbelt data data("UKDriverDeaths") seatbelt <- log10(UKDriverDeaths) seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) colnames(seatbelt) <- c("y", "ylag1", "ylag12") seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) scus.seat <- gefp(y ~ ylag1 + ylag12, data = seatbelt) ## double maximum test plot(scus.seat) ## range test plot(scus.seat, functional = rangeBB) ## Cramer-von Mises statistic (Nyblom-Hansen test) plot(scus.seat, functional = meanL2BB) ## supLM test plot(scus.seat, functional = supLM(0.1)) } \keyword{regression} strucchange/man/sctest.formula.Rd0000644000176200001440000001000413062350355016570 0ustar liggesusers\name{sctest.formula} \alias{sctest.formula} \title{Structural Change Tests in Linear Regression Models} \description{Performs tests for structural change in linear regression models.} \usage{ \method{sctest}{formula}(formula, type = , h = 0.15, alt.boundary = FALSE, functional = c("max", "range", "maxL2", "meanL2"), from = 0.15, to = NULL, point = 0.5, asymptotic = FALSE, data, ...) } \arguments{ \item{formula}{a formula describing the model to be tested.} \item{type}{a character string specifying the structural change test that is to be performed, the default is \code{"Rec-CUSUM"}. Besides the test types described in \code{\link{efp}} and \code{\link{sctest.Fstats}} the Chow test and the Nyblom-Hansen test can be performed by setting type to \code{"Chow"} or \code{"Nyblom-Hansen"}, respectively.} \item{h}{numeric from interval (0,1) specifying the bandwidth. Determines the size of the data window relative to the sample size (for MOSUM and ME tests only).} \item{alt.boundary}{logical. If set to \code{TRUE} alternative boundaries (instead of the standard linear boundaries) will be used (for CUSUM processes only).} \item{functional}{indicates which functional should be used to aggregate the empirical fluctuation processes to a test statistic.} \item{from, to}{numeric. If \code{from} is smaller than 1 they are interpreted as percentages of data and by default \code{to} is taken to be the 1 - \code{from}. F statistics will be calculated for the observations \code{(n*from):(n*to)}, when \code{n} is the number of observations in the model. If \code{from} is greater than 1 it is interpreted to be the index and \code{to} defaults to \code{n - from}. (for F tests only)} \item{point}{parameter of the Chow test for the potential change point. Interpreted analogous to the \code{from} parameter. By default taken to be \code{floor(n*0.5)} if \code{n} is the number of observations in the model.} \item{asymptotic}{logical. If \code{TRUE} the asymptotic (chi-square) distribution instead of the exact (F) distribution will be used to compute the p value (for Chow test only).} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{sctest} is called from.} \item{...}{further arguments passed to \code{\link{efp}} or \code{\link{Fstats}}.} } \details{ \code{sctest.formula} is a convenience interface for performing structural change tests in linear regression models based on \code{\link{efp}} and \code{\link{Fstats}}. It is mainly a wrapper for \code{\link{sctest.efp}} and \code{\link{sctest.Fstats}} as it fits an empirical fluctuation process first or computes the F statistics respectively and subsequently performs the corresponding test. The Chow test and the Nyblom-Hansen test are available explicitly here. An alternative convenience interface for performing structural change tests in general parametric models (based on \code{\link{gefp}}) is available in \code{\link{sctest.default}}. } \value{ An object of class \code{"htest"} containing: \item{statistic}{the test statistic,} \item{p.value}{the corresponding p value,} \item{method}{a character string with the method used,} \item{data.name}{a character string with the data name.} } \seealso{\code{\link{sctest.efp}}, \code{\link{sctest.Fstats}}, \code{\link{sctest.default}}} \examples{ ## Example 7.4 from Greene (1993), "Econometric Analysis" ## Chow test on Longley data data("longley") sctest(Employed ~ Year + GNP.deflator + GNP + Armed.Forces, data = longley, type = "Chow", point = 7) ## which is equivalent to segmenting the regression via fac <- factor(c(rep(1, 7), rep(2, 9))) fm0 <- lm(Employed ~ Year + GNP.deflator + GNP + Armed.Forces, data = longley) fm1 <- lm(Employed ~ fac/(Year + GNP.deflator + GNP + Armed.Forces), data = longley) anova(fm0, fm1) ## estimates from Table 7.5 in Greene (1993) summary(fm0) summary(fm1) } \keyword{htest} strucchange/man/breakdates.Rd0000644000176200001440000000316313062350355015734 0ustar liggesusers\name{breakdates} \alias{breakdates} \alias{breakdates.breakpoints} \alias{breakdates.confint.breakpoints} \title{Breakdates Corresponding to Breakpoints} \description{ A generic function for computing the breakdates corresponding to breakpoints (and their confidence intervals). } \usage{ breakdates(obj, format.times = FALSE, ...) } \arguments{ \item{obj}{An object of class \code{"breakpoints"}, \code{"breakpointsfull"} or their confidence intervals as returned by \code{\link{confint}}.} \item{format.times}{logical. If set to \code{TRUE} a vector of strings with the formatted breakdates. See details for more information.} \item{\dots}{currently not used.} } \details{ Breakpoints are the number of observations that are the last in one segment and breakdates are the corresponding points on the underlying time scale. The breakdates can be formatted which enhances readability in particular for quarterly or monthly time series. For example the breakdate \code{2002.75} of a monthly time series will be formatted to \code{"2002(10)"}. } \value{ A vector or matrix containing the breakdates. } \seealso{\code{\link{breakpoints}}, \code{\link{confint}}} \examples{ ## Nile data with one breakpoint: the annual flows drop in 1898 ## because the first Ashwan dam was built data("Nile") plot(Nile) bp.nile <- breakpoints(Nile ~ 1) summary(bp.nile) plot(bp.nile) ## compute breakdates corresponding to the ## breakpoints of minimum BIC segmentation breakdates(bp.nile) ## confidence intervals ci.nile <- confint(bp.nile) breakdates(ci.nile) ci.nile plot(Nile) lines(ci.nile) } \keyword{regression} strucchange/man/plot.mefp.Rd0000644000176200001440000000242413062350355015532 0ustar liggesusers\name{plot.mefp} \alias{plot.mefp} \alias{lines.mefp} \title{Plot Methods for mefp Objects} \description{This is a method of the generic \code{\link{plot}} function for for \code{"mefp"} objects as returned by \code{\link{mefp}} or \code{\link{monitor}}. It plots the empirical fluctuation process (or a functional thereof) as a time series plot, and includes boundaries corresponding to the significance level of the monitoring procedure. } \usage{ \method{plot}{mefp}(x, boundary = TRUE, functional = "max", main = NULL, ylab = "Empirical fluctuation process", ylim = NULL, ...) } \arguments{ \item{x}{an object of class \code{"mefp"}.} \item{boundary}{if \code{FALSE}, plotting of boundaries is suppressed.} \item{functional}{indicates which functional should be applied to a multivariate empirical process. If set to \code{NULL} all dimensions of the process (one process per coefficient in the linear model) are plotted. } \item{main, ylab, ylim, ...}{high-level \code{\link{plot}} function parameters.} } \seealso{\code{\link{mefp}}} \examples{ df1 <- data.frame(y=rnorm(300)) df1[150:300,"y"] <- df1[150:300,"y"]+1 me1 <- mefp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1, alpha=0.05) me2 <- monitor(me1, data=df1) plot(me2) } \keyword{hplot} strucchange/man/USIncExp.Rd0000644000176200001440000001405314251757642015300 0ustar liggesusers\name{USIncExp} \alias{USIncExp} \title{Income and Expenditures in the US} \description{Personal income and personal consumption expenditures in the US between January 1959 and February 2001 (seasonally adjusted at annual rates).} \usage{data("USIncExp")} \format{ A multivariate monthly time series from 1959(1) to 2001(2) with variables \describe{ \item{income}{monthly personal income (in billion US dollars),} \item{expenditure}{monthly personal consumption expenditures (in billion US Dollars).} } } \source{\url{https://www.economagic.com/} } \references{ A. Zeileis, F. Leisch, K. Hornik, C. Kleiber (2002), strucchange: An R Package for Testing for Structural Change in Linear Regression Models. \emph{Journal of Statistical Software} \bold{7}(2), 1--38. } \examples{ ## These example are presented in the vignette distributed with this ## package, the code was generated by Stangle("strucchange-intro.Rnw") ################################################### ### chunk number 1: data ################################################### library("strucchange") data("USIncExp") plot(USIncExp, plot.type = "single", col = 1:2, ylab = "billion US$") legend(1960, max(USIncExp), c("income", "expenditures"), lty = c(1,1), col = 1:2, bty = "n") ################################################### ### chunk number 2: subset ################################################### library("strucchange") data("USIncExp") USIncExp2 <- window(USIncExp, start = c(1985,12)) ################################################### ### chunk number 3: ecm-setup ################################################### coint.res <- residuals(lm(expenditure ~ income, data = USIncExp2)) coint.res <- lag(ts(coint.res, start = c(1985,12), freq = 12), k = -1) USIncExp2 <- cbind(USIncExp2, diff(USIncExp2), coint.res) USIncExp2 <- window(USIncExp2, start = c(1986,1), end = c(2001,2)) colnames(USIncExp2) <- c("income", "expenditure", "diff.income", "diff.expenditure", "coint.res") ecm.model <- diff.expenditure ~ coint.res + diff.income ################################################### ### chunk number 4: ts-used ################################################### plot(USIncExp2[,3:5], main = "") ################################################### ### chunk number 5: efp ################################################### ocus <- efp(ecm.model, type="OLS-CUSUM", data=USIncExp2) me <- efp(ecm.model, type="ME", data=USIncExp2, h=0.2) ################################################### ### chunk number 6: efp-boundary ################################################### bound.ocus <- boundary(ocus, alpha=0.05) ################################################### ### chunk number 7: OLS-CUSUM ################################################### plot(ocus) ################################################### ### chunk number 8: efp-boundary2 ################################################### plot(ocus, boundary = FALSE) lines(bound.ocus, col = 4) lines(-bound.ocus, col = 4) ################################################### ### chunk number 9: ME-null ################################################### plot(me, functional = NULL) ################################################### ### chunk number 10: efp-sctest ################################################### sctest(ocus) ################################################### ### chunk number 11: efp-sctest2 ################################################### sctest(ecm.model, type="OLS-CUSUM", data=USIncExp2) ################################################### ### chunk number 12: Fstats ################################################### fs <- Fstats(ecm.model, from = c(1990, 1), to = c(1999,6), data = USIncExp2) ################################################### ### chunk number 13: Fstats-plot ################################################### plot(fs) ################################################### ### chunk number 14: pval-plot ################################################### plot(fs, pval=TRUE) ################################################### ### chunk number 15: aveF-plot ################################################### plot(fs, aveF=TRUE) ################################################### ### chunk number 16: Fstats-sctest ################################################### sctest(fs, type="expF") ################################################### ### chunk number 17: Fstats-sctest2 ################################################### sctest(ecm.model, type = "expF", from = 49, to = 162, data = USIncExp2) ################################################### ### chunk number 18: mefp ################################################### USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1989,12)) me.mefp <- mefp(ecm.model, type = "ME", data = USIncExp3, alpha = 0.05) ################################################### ### chunk number 19: monitor1 ################################################### USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1990,12)) me.mefp <- monitor(me.mefp) ################################################### ### chunk number 20: monitor2 ################################################### USIncExp3 <- window(USIncExp2, start = c(1986, 1)) me.mefp <- monitor(me.mefp) me.mefp ################################################### ### chunk number 21: monitor-plot ################################################### plot(me.mefp) ################################################### ### chunk number 22: mefp2 ################################################### USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1989,12)) me.efp <- efp(ecm.model, type = "ME", data = USIncExp3, h = 0.5) me.mefp <- mefp(me.efp, alpha=0.05) ################################################### ### chunk number 23: monitor3 ################################################### USIncExp3 <- window(USIncExp2, start = c(1986, 1)) me.mefp <- monitor(me.mefp) ################################################### ### chunk number 24: monitor-plot2 ################################################### plot(me.mefp) } \keyword{datasets} strucchange/man/strucchange.internal.Rd0000644000176200001440000000073513062350355017752 0ustar liggesusers\name{strucchange.internal} \alias{sc.beta.sup} \alias{sc.beta.ave} \alias{sc.beta.exp} \alias{sc.me} \alias{sc.meanL2} \alias{sc.maxL2} \alias{pvalue.efp} \alias{pvalue.Fstats} \alias{monitorMECritval} \alias{monitorMECritvalData} \alias{monitorMECritvalTable} \alias{monitorRECritval} \alias{monitorRECritvalData} \alias{monitorRECritvalTable} \alias{pargmaxV} \title{Internal strucchange objects} \description{ These are not to be called by the user. } \keyword{internal} strucchange/man/durab.Rd0000644000176200001440000000454013062350355014724 0ustar liggesusers\name{durab} \alias{durab} \title{US Labor Productivity} \usage{data("durab")} \description{ US labor productivity in the manufacturing/durables sector. } \format{ \code{durab} is a multivariate monthly time series from 1947(3) to 2001(4) with variables \describe{ \item{y}{growth rate of the Industrial Production Index to average weekly labor hours in the manufacturing/durables sector,} \item{lag}{lag 1 of the series \code{y},} } } \source{The data set is available from Bruce Hansen's homepage \url{http://www.ssc.wisc.edu/~bhansen/}. For more information see Hansen (2001).} \references{ Hansen B. (2001), The New Econometrics of Structural Change: Dating Breaks in U.S. Labor Productivity, \emph{Journal of Economic Perspectives}, \bold{15}, 117--128. Zeileis A., Leisch F., Kleiber C., Hornik K. (2005), Monitoring Structural Change in Dynamic Econometric Models, \emph{Journal of Applied Econometrics}, \bold{20}, 99--121. } \examples{ data("durab") ## use AR(1) model as in Hansen (2001) and Zeileis et al. (2005) durab.model <- y ~ lag ## historical tests ## OLS-based CUSUM process ols <- efp(durab.model, data = durab, type = "OLS-CUSUM") plot(ols) ## F statistics fs <- Fstats(durab.model, data = durab, from = 0.1) plot(fs) ## F statistics based on heteroskadisticy-consistent covariance matrix fsHC <- Fstats(durab.model, data = durab, from = 0.1, vcov = function(x, ...) vcovHC(x, type = "HC", ...)) plot(fsHC) ## monitoring Durab <- window(durab, start=1964, end = c(1979, 12)) ols.efp <- efp(durab.model, type = "OLS-CUSUM", data = Durab) newborder <- function(k) 1.723 * k/192 ols.mefp <- mefp(ols.efp, period=2) ols.mefp2 <- mefp(ols.efp, border=newborder) Durab <- window(durab, start=1964) ols.mon <- monitor(ols.mefp) ols.mon2 <- monitor(ols.mefp2) plot(ols.mon) lines(boundary(ols.mon2), col = 2) ## Note: critical value for linear boundary taken from Table III ## in Zeileis et al. 2005: (1.568 + 1.896)/2 = 1.732 is a linear ## interpolation between the values for T = 2 and T = 3 at ## alpha = 0.05. A typo switched 1.732 to 1.723. ## dating bp <- breakpoints(durab.model, data = durab) summary(bp) plot(summary(bp)) plot(ols) lines(breakpoints(bp, breaks = 1), col = 3) lines(breakpoints(bp, breaks = 2), col = 4) plot(fs) lines(breakpoints(bp, breaks = 1), col = 3) lines(breakpoints(bp, breaks = 2), col = 4) } \keyword{datasets} strucchange/man/recresid.Rd0000644000176200001440000000673313550050707015435 0ustar liggesusers\name{recresid} \alias{recresid} \alias{recresid.default} \alias{recresid.formula} \alias{recresid.lm} \title{Recursive Residuals} \description{ A generic function for computing the recursive residuals (standardized one step prediction errors) of a linear regression model. } \usage{ \method{recresid}{default}(x, y, start = ncol(x) + 1, end = nrow(x), tol = sqrt(.Machine$double.eps)/ncol(x), qr.tol = 1e-7, engine = c("R", "C"), \dots) \method{recresid}{formula}(formula, data = list(), \dots) \method{recresid}{lm}(x, data = list(), \dots) } \arguments{ \item{x, y, formula}{specification of the linear regression model: either by a regressor matrix \code{x} and a response variable \code{y}, or by a \code{formula} or by a fitted object \code{x} of class \code{"lm"}.} \item{start, end}{integer. Index of the first and last observation, respectively, for which recursive residuals should be computed. By default, the maximal range is selected.} \item{tol}{numeric. A relative tolerance for precision of recursive coefficient estimates, see details.} \item{qr.tol}{numeric. The \code{tol}erance passed to \code{\link[stats]{lm.fit}} for detecting linear dependencies.} \item{engine}{character. In addition to the R implementation of the default method, there is also a faster C implementation (see below for further details).} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{recresid} is called from. Specifying \code{data} might also be necessary when applying \code{recresid} to a fitted model of class \code{"lm"} if this does not contain the regressor matrix and the response.} \item{\dots}{\emph{currently not used.}} } \details{ Recursive residuals are standardized one-step-ahead prediction errors. Under the usual assumptions for the linear regression model they are (asymptotically) normal and i.i.d. (see Brown, Durbin, Evans, 1975, for details). The default method computes the initial coefficient estimates via QR decomposition, using \code{\link{lm.fit}}. In subsequent steps, the updating formula provided by Brown, Durbin, Evans (1975) is employed. To avoid numerical instabilities in the first steps (with typically small sample sizes), the QR solution is computed for comparison. When the relative difference (assessed bey \code{\link{all.equal}}) between the two solutions falls below \code{tol}, only the updating formula is used in subsequent steps. In large data sets, the R implementation can become rather slow. Hence, a C implementation is also available. This is not the default, yet, because it should receive more testing in numerically challenging cases. In addition to the R and C implementation, there is also an Armadillo-based C++ implementation available on R-Forge in package strucchangeArmadillo. For models with about 10 parameters, the C and C++ version perform similarly. For larger models, the C++ implementation seems to scale better. } \value{ A vector containing the recursive residuals. } \references{ Brown R.L., Durbin J., Evans J.M. (1975), Techniques for testing constancy of regression relationships over time, \emph{Journal of the Royal Statistical Society}, B, \bold{37}, 149-163. } \seealso{\code{\link{efp}}} \examples{ x <- rnorm(100) + rep(c(0, 2), each = 50) rr <- recresid(x ~ 1) plot(cumsum(rr), type = "l") plot(efp(x ~ 1, type = "Rec-CUSUM")) } \keyword{regression} strucchange/man/logLik.breakpoints.Rd0000644000176200001440000000407613062350355017374 0ustar liggesusers\name{logLik.breakpoints} \alias{logLik.breakpoints} \alias{logLik.breakpointsfull} \alias{AIC.breakpointsfull} \title{Log Likelihood and Information Criteria for Breakpoints} \description{ Computation of log likelihood and AIC type information criteria for partitions given by breakpoints. } \usage{ \method{logLik}{breakpointsfull}(object, breaks = NULL, ...) \method{AIC}{breakpointsfull}(object, breaks = NULL, ..., k = 2) } \arguments{ \item{object}{an object of class \code{"breakpoints"} or \code{"breakpointsfull"}.} \item{breaks}{if \code{object} is of class \code{"breakpointsfull"} the number of breaks can be specified.} \item{\dots}{\emph{currently not used}.} \item{k}{the penalty parameter to be used, the default \code{k = 2} is the classical AIC, \code{k = log(n)} gives the BIC, if \code{n} is the number of observations.} } \details{ As for linear models the log likelihood is computed on a normal model and the degrees of freedom are the number of regression coefficients multiplied by the number of segments plus the number of estimated breakpoints plus 1 for the error variance. If \code{AIC} is applied to an object of class \code{"breakpointsfull"} \code{breaks} can be a vector of integers and the AIC for each corresponding partition will be returned. By default the maximal number of breaks stored in the \code{object} is used. See below for an example. } \value{ An object of class \code{"logLik"} or a simple vector containing the AIC respectively. } \seealso{\code{\link{breakpoints}}} \examples{ ## Nile data with one breakpoint: the annual flows drop in 1898 ## because the first Ashwan dam was built data("Nile") plot(Nile) bp.nile <- breakpoints(Nile ~ 1) summary(bp.nile) plot(bp.nile) ## BIC of partitions with0 to 5 breakpoints plot(0:5, AIC(bp.nile, k = log(bp.nile$nobs)), type = "b") ## AIC plot(0:5, AIC(bp.nile), type = "b") ## BIC, AIC, log likelihood of a single partition bp.nile1 <- breakpoints(bp.nile, breaks = 1) AIC(bp.nile1, k = log(bp.nile1$nobs)) AIC(bp.nile1) logLik(bp.nile1) } \keyword{regression} strucchange/man/breakpoints.Rd0000644000176200001440000003244114235073217016153 0ustar liggesusers\name{breakpoints} \alias{breakpoints} \alias{breakpoints.formula} \alias{breakpoints.breakpointsfull} \alias{breakpoints.Fstats} \alias{summary.breakpoints} \alias{summary.breakpointsfull} \alias{plot.breakpointsfull} \alias{plot.summary.breakpointsfull} \alias{print.breakpoints} \alias{print.summary.breakpointsfull} \alias{lines.breakpoints} \alias{coef.breakpointsfull} \alias{vcov.breakpointsfull} \alias{fitted.breakpointsfull} \alias{residuals.breakpointsfull} \alias{df.residual.breakpointsfull} \encoding{latin1} \title{Dating Breaks} \description{ Computation of breakpoints in regression relationships. Given a number of breaks the function computes the optimal breakpoints. } \usage{ \method{breakpoints}{formula}(formula, h = 0.15, breaks = NULL, data = list(), hpc = c("none", "foreach"), \dots) \method{breakpoints}{breakpointsfull}(obj, breaks = NULL, \dots) \method{summary}{breakpointsfull}(object, breaks = NULL, sort = NULL, format.times = NULL, \dots) \method{lines}{breakpoints}(x, breaks = NULL, lty = 2, \dots) \method{coef}{breakpointsfull}(object, breaks = NULL, names = NULL, \dots) \method{fitted}{breakpointsfull}(object, breaks = NULL, \dots) \method{residuals}{breakpointsfull}(object, breaks = NULL, \dots) \method{vcov}{breakpointsfull}(object, breaks = NULL, names = NULL, het.reg = TRUE, het.err = TRUE, vcov. = NULL, sandwich = TRUE, \dots) } \arguments{ \item{formula}{a symbolic description for the model in which breakpoints will be estimated.} \item{h}{minimal segment size either given as fraction relative to the sample size or as an integer giving the minimal number of observations in each segment.} \item{breaks}{positive integer specifying the maximal number of breaks to be calculated. By default the maximal number allowed by \code{h} is used.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{breakpoints} is called from.} \item{hpc}{a character specifying the high performance computing support. Default is \code{"none"}, can be set to \code{"foreach"}.} \item{\dots}{arguments passed to \code{\link{recresid}}.} \item{obj, object}{an object of class \code{"breakpointsfull"}.} \item{sort}{logical. If set to \code{TRUE} \code{summary} tries to match the breakpoints from partitions with different numbers of breaks. The default tries to sort if a suitable matching can be found.} \item{format.times}{logical. If set to \code{TRUE} a vector of strings with the formatted breakdates is printed. See \code{\link{breakdates}} for more information.} \item{x}{an object of class \code{"breakpoints"}.} \item{lty}{line type.} \item{names}{a character vector giving the names of the segments. If of length 1 it is taken to be a generic prefix, e.g. \code{"segment"}.} \item{het.reg}{logical. Should heterogeneous regressors be assumed? If set to \code{FALSE} the distribution of the regressors is assumed to be homogeneous over the segments.} \item{het.err}{logical. Should heterogeneous errors be assumed? If set to \code{FALSE} the distribution of the errors is assumed to be homogeneous over the segments.} \item{vcov.}{a function to extract the covariance matrix for the coefficients of a fitted model of class \code{"lm"}.} \item{sandwich}{logical. Is the function \code{vcov.} the sandwich estimator or only the middle part?} } \details{ All procedures in this package are concerned with testing or assessing deviations from stability in the classical linear regression model \deqn{y_i = x_i^\top \beta + u_i}{y_i = x_i' b + u_i} In many applications it is reasonable to assume that there are \eqn{m} breakpoints, where the coefficients shift from one stable regression relationship to a different one. Thus, there are \eqn{m+1} segments in which the regression coefficients are constant, and the model can be rewritten as \deqn{y_i = x_i^\top \beta_j + u_i \qquad (i = i_{j-1} + 1, \dots, i_j, \quad j = 1, \dots, m+1)}{y_i = x_i' b_j + u_i (i = i_{j-1} + 1, \dots, i_j, j = 1, \dots, m+1)} where \eqn{j} denotes the segment index. In practice the breakpoints \eqn{i_j} are rarely given exogenously, but have to be estimated. \code{breakpoints} estimates these breakpoints by minimizing the residual sum of squares (RSS) of the equation above. The foundation for estimating breaks in time series regression models was given by Bai (1994) and was extended to multiple breaks by Bai (1997ab) and Bai & Perron (1998). \code{breakpoints} implements the algorithm described in Bai & Perron (2003) for simultaneous estimation of multiple breakpoints. The distribution function used for the confidence intervals for the breakpoints is given in Bai (1997b). The ideas behind this implementation are described in Zeileis et al. (2003). The algorithm for computing the optimal breakpoints given the number of breaks is based on a dynamic programming approach. The underlying idea is that of the Bellman principle. The main computational effort is to compute a triangular RSS matrix, which gives the residual sum of squares for a segment starting at observation \eqn{i} and ending at \eqn{i'} with \eqn{i} < \eqn{i'}. Given a \code{formula} as the first argument, \code{breakpoints} computes an object of class \code{"breakpointsfull"} which inherits from \code{"breakpoints"}. This contains in particular the triangular RSS matrix and functions to extract an optimal segmentation. A \code{summary} of this object will give the breakpoints (and associated) breakdates for all segmentations up to the maximal number of breaks together with the associated RSS and BIC. These will be plotted if \code{plot} is applied and thus visualize the minimum BIC estimator of the number of breakpoints. From an object of class \code{"breakpointsfull"} an arbitrary number of \code{breaks} (admissible by the minimum segment size \code{h}) can be extracted by another application of \code{breakpoints}, returning an object of class \code{"breakpoints"}. This contains only the breakpoints for the specified number of breaks and some model properties (number of observations, regressors, time series properties and the associated RSS) but not the triangular RSS matrix and related extractor functions. The set of breakpoints which is associated by default with a \code{"breakpointsfull"} object is the minimum BIC partition. Breakpoints are the number of observations that are the last in one segment, it is also possible to compute the corresponding \code{breakdates} which are the breakpoints on the underlying time scale. The breakdates can be formatted which enhances readability in particular for quarterly or monthly time series. For example the breakdate \code{2002.75} of a monthly time series will be formatted to \code{"2002(10)"}. See \code{\link{breakdates}} for more details. From a \code{"breakpointsfull"} object confidence intervals for the breakpoints can be computed using the method of \code{\link{confint}}. The breakdates corresponding to the breakpoints can again be computed by \code{\link{breakdates}}. The breakpoints and their confidence intervals can be visualized by \code{lines}. Convenience functions are provided for extracting the coefficients and covariance matrix, fitted values and residuals of segmented models. The log likelihood as well as some information criteria can be computed using the methods for the \code{\link{logLik}} and \code{\link{AIC}}. As for linear models the log likelihood is computed on a normal model and the degrees of freedom are the number of regression coefficients multiplied by the number of segments plus the number of estimated breakpoints plus 1 for the error variance. More details can be found on the help page of the method \code{\link{logLik.breakpoints}}. As the maximum of a sequence of F statistics is equivalent to the minimum OLS estimator of the breakpoint in a 2-segment partition it can be extracted by \code{breakpoints} from an object of class \code{"Fstats"} as computed by \code{\link{Fstats}}. However, this cannot be used to extract a larger number of breakpoints. For illustration see the commented examples below and Zeileis et al. (2003). Optional support for high performance computing is available, currently using \code{\link[foreach]{foreach}} for the dynamic programming algorithm. If \code{hpc = "foreach"} is to be used, a parallel backend should be registered before. See \code{\link[foreach]{foreach}} for more information. } \section{value}{ An object of class \code{"breakpoints"} is a list with the following elements: \describe{ \item{breakpoints}{the breakpoints of the optimal partition with the number of breaks specified (set to \code{NA} if the optimal 1-segment solution is reported),} \item{RSS}{the associated RSS,} \item{nobs}{the number of observations,} \item{nreg}{the number of regressors,} \item{call}{the function call,} \item{datatsp}{the time series properties \code{tsp} of the data, if any, \code{c(1/nobs, 1, nobs)} otherwise.} } If applied to a \code{formula} as first argument, \code{breakpoints} returns an object of class \code{"breakpointsfull"} (which inherits from \code{"breakpoints"}), that contains some additional (or slightly different) elements such as: \describe{ \item{breakpoints}{the breakpoints of the minimum BIC partition,} \item{RSS}{a function which takes two arguments \code{i,j} and computes the residual sum of squares for a segment starting at observation \code{i} and ending at \code{j} by looking up the corresponding element in the triangular RSS matrix \code{RSS.triang},} \item{RSS.triang}{a list encoding the triangular RSS matrix.} } } \references{ Bai J. (1994), Least Squares Estimation of a Shift in Linear Processes, \emph{Journal of Time Series Analysis}, \bold{15}, 453-472. Bai J. (1997a), Estimating Multiple Breaks One at a Time, \emph{Econometric Theory}, \bold{13}, 315-352. Bai J. (1997b), Estimation of a Change Point in Multiple Regression Models, \emph{Review of Economics and Statistics}, \bold{79}, 551-563. Bai J., Perron P. (1998), Estimating and Testing Linear Models With Multiple Structural Changes, \emph{Econometrica}, \bold{66}, 47-78. Bai J., Perron P. (2003), Computation and Analysis of Multiple Structural Change Models, \emph{Journal of Applied Econometrics}, \bold{18}, 1-22. Zeileis A., Kleiber C., Krmer W., Hornik K. (2003), Testing and Dating of Structural Changes in Practice, \emph{Computational Statistics and Data Analysis}, \bold{44}, 109-123. doi:10.1016/S0167-9473(03)00030-6. Zeileis A., Shah A., Patnaik I. (2010), Testing, Monitoring, and Dating Structural Changes in Exchange Rate Regimes, \emph{Computational Statistics and Data Analysis}, \bold{54}(6), 1696--1706. doi:10.1016/j.csda.2009.12.005. } \examples{ ## Nile data with one breakpoint: the annual flows drop in 1898 ## because the first Ashwan dam was built data("Nile") plot(Nile) ## F statistics indicate one breakpoint fs.nile <- Fstats(Nile ~ 1) plot(fs.nile) breakpoints(fs.nile) lines(breakpoints(fs.nile)) ## or bp.nile <- breakpoints(Nile ~ 1) summary(bp.nile) ## the BIC also chooses one breakpoint plot(bp.nile) breakpoints(bp.nile) ## fit null hypothesis model and model with 1 breakpoint fm0 <- lm(Nile ~ 1) fm1 <- lm(Nile ~ breakfactor(bp.nile, breaks = 1)) plot(Nile) lines(ts(fitted(fm0), start = 1871), col = 3) lines(ts(fitted(fm1), start = 1871), col = 4) lines(bp.nile) ## confidence interval ci.nile <- confint(bp.nile) ci.nile lines(ci.nile) ## UK Seatbelt data: a SARIMA(1,0,0)(1,0,0)_12 model ## (fitted by OLS) is used and reveals (at least) two ## breakpoints - one in 1973 associated with the oil crisis and ## one in 1983 due to the introduction of compulsory ## wearing of seatbelts in the UK. data("UKDriverDeaths") seatbelt <- log10(UKDriverDeaths) seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) colnames(seatbelt) <- c("y", "ylag1", "ylag12") seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) ## testing re.seat <- efp(y ~ ylag1 + ylag12, data = seatbelt, type = "RE") plot(re.seat) ## dating bp.seat <- breakpoints(y ~ ylag1 + ylag12, data = seatbelt, h = 0.1) summary(bp.seat) lines(bp.seat, breaks = 2) ## minimum BIC partition plot(bp.seat) breakpoints(bp.seat) ## the BIC would choose 0 breakpoints although the RE and supF test ## clearly reject the hypothesis of structural stability. Bai & ## Perron (2003) report that the BIC has problems in dynamic regressions. ## due to the shape of the RE process of the F statistics choose two ## breakpoints and fit corresponding models bp.seat2 <- breakpoints(bp.seat, breaks = 2) fm0 <- lm(y ~ ylag1 + ylag12, data = seatbelt) fm1 <- lm(y ~ breakfactor(bp.seat2)/(ylag1 + ylag12) - 1, data = seatbelt) ## plot plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) time.seat <- as.vector(time(seatbelt)) lines(time.seat, fitted(fm0), col = 3) lines(time.seat, fitted(fm1), col = 4) lines(bp.seat2) ## confidence intervals ci.seat2 <- confint(bp.seat, breaks = 2) ci.seat2 lines(ci.seat2) } \concept{breakpoint estimation} \concept{changepoint estimation} \concept{segmented regression} \keyword{regression} strucchange/man/boundary.mefp.Rd0000644000176200001440000000136013062350355016375 0ustar liggesusers\name{boundary.mefp} \alias{boundary.mefp} \title{Boundary Function for Monitoring of Structural Changes} \description{Computes boundary for an object of class \code{"mefp"}} \usage{ \method{boundary}{mefp}(x, ...)} \arguments{ \item{x}{an object of class \code{"mefp"}.} \item{...}{currently not used.} } \value{an object of class \code{"ts"} with the same time properties as the monitored process} \seealso{\code{\link{mefp}}, \code{\link{plot.mefp}}} \examples{ df1 <- data.frame(y=rnorm(300)) df1[150:300,"y"] <- df1[150:300,"y"]+1 me1 <- mefp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1, alpha=0.05) me2 <- monitor(me1, data=df1) plot(me2, boundary=FALSE) lines(boundary(me2), col="green", lty="44") } \keyword{regression} strucchange/man/boundary.efp.Rd0000644000176200001440000000300113062350355016212 0ustar liggesusers\name{boundary.efp} \alias{boundary.efp} \title{Boundary for Empirical Fluctuation Processes} \description{Computes boundary for an object of class \code{"efp"}} \usage{ \method{boundary}{efp}(x, alpha = 0.05, alt.boundary = FALSE, functional = "max", ...) } \arguments{ \item{x}{an object of class \code{"efp"}.} \item{alpha}{numeric from interval (0,1) indicating the confidence level for which the boundary of the corresponding test will be computed.} \item{alt.boundary}{logical. If set to \code{TRUE} alternative boundaries (instead of the standard linear boundaries) will be computed (for Brownian bridge type processes only).} \item{functional}{indicates which functional should be applied to the empirical fluctuation process. See also \code{\link{plot.efp}}.} \item{\dots}{currently not used.} } \value{an object of class \code{"ts"} with the same time properties as the process in \code{x}} \seealso{\code{\link{efp}}, \code{\link{plot.efp}}} \examples{ ## Load dataset "nhtemp" with average yearly temperatures in New Haven data("nhtemp") ## plot the data plot(nhtemp) ## test the model null hypothesis that the average temperature remains constant ## over the years ## compute OLS-CUSUM fluctuation process temp.cus <- efp(nhtemp ~ 1, type = "OLS-CUSUM") ## plot the process without boundaries plot(temp.cus, alpha = 0.01, boundary = FALSE) ## add the boundaries in another colour bound <- boundary(temp.cus, alpha = 0.01) lines(bound, col=4) lines(-bound, col=4) } \keyword{regression} strucchange/man/root.matrix.Rd0000644000176200001440000000065013062350355016113 0ustar liggesusers\name{root.matrix} \alias{root.matrix} \title{Root of a Matrix} \description{Computes the root of a symmetric and positive semidefinite matrix.} \usage{ root.matrix(X) } \arguments{ \item{X}{a symmetric and positive semidefinite matrix} } \value{a symmetric matrix of same dimensions as \code{X}} \examples{ X <- matrix(c(1,2,2,8), ncol=2) test <- root.matrix(X) ## control results X test \%*\% test } \keyword{algebra} strucchange/man/confint.breakpointsfull.Rd0000644000176200001440000000644113062350355020474 0ustar liggesusers\name{confint.breakpointsfull} \alias{confint.breakpointsfull} \alias{lines.confint.breakpoints} \alias{print.confint.breakpoints} \title{Confidence Intervals for Breakpoints} \description{ Computes confidence intervals for breakpoints. } \usage{ \method{confint}{breakpointsfull}(object, parm = NULL, level = 0.95, breaks = NULL, het.reg = TRUE, het.err = TRUE, vcov. = NULL, sandwich = TRUE, ...) \method{lines}{confint.breakpoints}(x, col = 2, angle = 90, length = 0.05, code = 3, at = NULL, breakpoints = TRUE, ...) } \arguments{ \item{object}{an object of class \code{"breakpointsfull"} as computed by \code{\link{breakpoints}} from a \code{formula}.} \item{parm}{the same as \code{breaks}, only one of the two should be specified.} \item{level}{the confidence level required.} \item{breaks}{an integer specifying the number of breaks to be used. By default the breaks of the minimum BIC partition are used.} \item{het.reg}{logical. Should heterogeneous regressors be assumed? If set to \code{FALSE} the distribution of the regressors is assumed to be homogeneous over the segments.} \item{het.err}{logical. Should heterogeneous errors be assumed? If set to \code{FALSE} the distribution of the errors is assumed to be homogeneous over the segments.} \item{vcov.}{a function to extract the covariance matrix for the coefficients of a fitted model of class \code{"lm"}.} \item{sandwich}{logical. Is the function \code{vcov.} the sandwich estimator or only the middle part?} \item{x}{an object of class \code{"confint.breakpoints"} as returned by \code{confint}.} \item{col, angle, length, code}{arguments passed to \code{\link{arrows}}.} \item{at}{position on the y axis, where the confidence arrows should be drawn. By default they are drawn at the bottom of the plot.} \item{breakpoints}{logical. If \code{TRUE} vertical lines for the breakpoints are drawn.} \item{\dots}{\emph{currently not used}.} } \details{ As the breakpoints are integers (observation numbers) the corresponding confidence intervals are also rounded to integers. The distribution function used for the computation of confidence intervals of breakpoints is given in Bai (1997). The procedure, in particular the usage of heterogeneous regressors and/or errors, is described in more detail in Bai & Perron (2003). The breakpoints should be computed from a formula with \code{breakpoints}, then the confidence intervals for the breakpoints can be derived by \code{confint} and these can be visualized by \code{lines}. For an example see below. } \value{ A matrix containing the breakpoints and their lower and upper confidence boundary for the given level. } \references{ Bai J. (1997), Estimation of a Change Point in Multiple Regression Models, \emph{Review of Economics and Statistics}, \bold{79}, 551-563. Bai J., Perron P. (2003), Computation and Analysis of Multiple Structural Change Models, \emph{Journal of Applied Econometrics}, \bold{18}, 1-22. } \seealso{\code{\link{breakpoints}}} \examples{ ## Nile data with one breakpoint: the annual flows drop in 1898 ## because the first Ashwan dam was built data("Nile") plot(Nile) ## dating breaks bp.nile <- breakpoints(Nile ~ 1) ci.nile <- confint(bp.nile, breaks = 1) lines(ci.nile) } \keyword{regression} strucchange/man/catL2BB.Rd0000644000176200001440000001050313366337662015011 0ustar liggesusers\name{catL2BB} \alias{catL2BB} \alias{ordL2BB} \alias{ordwmax} \title{Generators for efpFunctionals along Categorical Variables} \description{ Generators for \code{efpFunctional} objects suitable for aggregating empirical fluctuation processes to test statistics along (ordinal) categorical variables. } \usage{ catL2BB(freq) ordL2BB(freq, nproc = NULL, nrep = 1e5, probs = c(0:84/100, 850:1000/1000), \dots) ordwmax(freq, algorithm = mvtnorm::GenzBretz(), \dots) } \arguments{ \item{freq}{object specifying the category frequencies for the categorical variable to be used for aggregation: either a \code{\link{gefp}} object, a \code{\link{factor}}, or a numeric vector with either absolute or relative category frequencies.} \item{nproc}{numeric. Number of processes used for simulating from the asymptotic distribution (passed to \code{\link{efpFunctional}}). If \code{feq} is a \code{\link{gefp}} object, then its number of processes is used by default.} \item{nrep}{numeric. Number of replications used for simulating from the asymptotic distribution (passed to \code{\link{efpFunctional}}).} \item{probs}{numeric vector specifying for which probabilities critical values should be tabulated.} \item{\dots}{further arguments passed to \code{\link{efpFunctional}}.} \item{algorithm}{algorithm specification passed to \code{\link[mvtnorm]{pmvnorm}} for computing the asymptotic distribution.} } \details{ Merkle, Fan, and Zeileis (2014) discuss three functionals that are suitable for aggregating empirical fluctuation processes along categorical variables, especially ordinal variables. The functions \code{catL2BB}, \code{ordL2BB}, and \code{ordwmax} all require a specification of the relative frequencies within each category (which can be computed from various specifications, see arguments). All of them employ \code{\link{efpFunctional}} (Zeileis 2006) internally to set up an object that can be employed with \code{\link{gefp}} fluctuation processes. \code{catL2BB} results in a chi-squared test. This is essentially the LM test counterpart to the likelihood ratio test that assesses a split into unordered categories. \code{ordL2BB} is the ordinal counterpart to \code{\link{supLM}} where aggregation is done along the ordered categories (rather than continuously). The asymptotic distribution is non-standard and needs to be simulated for every combination of frequencies and number of processes. Hence, this is somewhat more time-consuming compared to the closed-form solution employed in \code{catL2BB}. It is also possible to store the result of \code{ordL2BB} in case it needs to be applied several \code{\link{gefp}} fluctuation processes. \code{ordwmax} is a weighted double maximum test based on ideas previously suggested by Hothorn and Zeileis (2008) in the context of maximally selected statistics. The asymptotic distribution is (multivariate) normal and computed by means of \code{\link[mvtnorm]{pmvnorm}}. } \value{ An object of class \code{efpFunctional}. } \references{ Hothorn T., Zeileis A. (2008), Generalized Maximally Selected Statistics. \emph{Biometrics}, \bold{64}, 1263--1269. Merkle E.C., Fan J., Zeileis A. (2014), Testing for Measurement Invariance with Respect to an Ordinal Variable. \emph{Psychometrika}, \bold{79}(4), 569--584. doi:10.1007/S11336-013-9376-7. Zeileis A. (2006), Implementing a Class of Structural Change Tests: An Econometric Computing Approach. \emph{Computational Statistics & Data Analysis}, \bold{50}, 2987--3008. doi:10.1016/j.csda.2005.07.001. } \seealso{\code{\link{efpFunctional}}, \code{\link{gefp}}} \examples{ ## artificial data set.seed(1) d <- data.frame( x = runif(200, -1, 1), z = factor(rep(1:4, each = 50)), err = rnorm(200) ) d$y <- rep(c(0.5, -0.5), c(150, 50)) * d$x + d$err ## empirical fluctuation process scus <- gefp(y ~ x, data = d, fit = lm, order.by = ~ z) ## chi-squared-type test (unordered LM-type test) LMuo <- catL2BB(scus) plot(scus, functional = LMuo) sctest(scus, functional = LMuo) ## ordinal maxLM test (with few replications only to save time) maxLMo <- ordL2BB(scus, nrep = 10000) plot(scus, functional = maxLMo) sctest(scus, functional = maxLMo) ## ordinal weighted double maximum test WDM <- ordwmax(scus) plot(scus, functional = WDM) sctest(scus, functional = WDM) } \keyword{regression} strucchange/man/sctest.efp.Rd0000644000176200001440000000643114127174734015716 0ustar liggesusers\name{sctest.efp} \alias{sctest.efp} \encoding{latin1} \title{Generalized Fluctuation Tests} \description{Performs a generalized fluctuation test.} \usage{ \method{sctest}{efp}(x, alt.boundary = FALSE, functional = c("max", "range", "maxL2", "meanL2"), ...) } \arguments{ \item{x}{an object of class \code{"efp"}.} \item{alt.boundary}{logical. If set to \code{TRUE} alternative boundaries (instead of the standard linear boundaries) will be used (for CUSUM processes only).} \item{functional}{indicates which functional should be applied to the empirical fluctuation process.} \item{...}{currently not used.} } \details{The critical values for the MOSUM tests and the ME test are just tabulated for confidence levels between 0.1 and 0.01, thus the p value approximations will be poor for other p values. Similarly the critical values for the maximum and mean squared Euclidean norm (\code{"maxL2"} and \code{"meanL2"}) are tabulated for confidence levels between 0.2 and 0.005.} \value{ An object of class \code{"htest"} containing: \item{statistic}{the test statistic,} \item{p.value}{the corresponding p value,} \item{method}{a character string with the method used,} \item{data.name}{a character string with the data name.} } \references{Brown R.L., Durbin J., Evans J.M. (1975), Techniques for testing constancy of regression relationships over time, \emph{Journal of the Royal Statistical Society}, B, \bold{37}, 149-163. Chu C.-S., Hornik K., Kuan C.-M. (1995), MOSUM tests for parameter constancy, \emph{Biometrika}, \bold{82}, 603-617. Chu C.-S., Hornik K., Kuan C.-M. (1995), The moving-estimates test for parameter stability, \emph{Econometric Theory}, \bold{11}, 669-720. Krmer W., Ploberger W., Alt R. (1988), Testing for structural change in dynamic models, \emph{Econometrica}, \bold{56}, 1355-1369. Kuan C.-M., Hornik K. (1995), The generalized fluctuation test: A unifying view, \emph{Econometric Reviews}, \bold{14}, 135 - 161. Kuan C.-M., Chen (1994), Implementing the fluctuation and moving estimates tests in dynamic econometric models, \emph{Economics Letters}, \bold{44}, 235-239. Ploberger W., Krmer W. (1992), The CUSUM Test with OLS Residuals, \emph{Econometrica}, \bold{60}, 271-285. Zeileis A., Leisch F., Hornik K., Kleiber C. (2002), \code{strucchange}: An R Package for Testing for Structural Change in Linear Regression Models, \emph{Journal of Statistical Software}, \bold{7}(2), 1-38. \doi{10.18637/jss.v007.i02}. Zeileis A. (2004), Alternative Boundaries for CUSUM Tests, \emph{Statistical Papers}, \bold{45}, 123--131. } \seealso{\code{\link{efp}}, \code{\link{plot.efp}}} \examples{ ## Load dataset "nhtemp" with average yearly temperatures in New Haven data("nhtemp") ## plot the data plot(nhtemp) ## test the model null hypothesis that the average temperature remains ## constant over the years compute OLS-CUSUM fluctuation process temp.cus <- efp(nhtemp ~ 1, type = "OLS-CUSUM") ## plot the process with alternative boundaries plot(temp.cus, alpha = 0.01, alt.boundary = TRUE) ## and calculate the test statistic sctest(temp.cus) ## compute moving estimates fluctuation process temp.me <- efp(nhtemp ~ 1, type = "ME", h = 0.2) ## plot the process with functional = "max" plot(temp.me) ## and perform the corresponding test sctest(temp.me) } \keyword{htest} strucchange/man/RealInt.Rd0000644000176200001440000000311213062350355015157 0ustar liggesusers\name{RealInt} \alias{RealInt} \title{US Ex-post Real Interest Rate} \description{ US ex-post real interest rate: the three-month treasury bill deflated by the CPI inflation rate. } \usage{data("RealInt")} \format{ A quarterly time series from 1961(1) to 1986(3). } \source{The data is available online in the data archive of the Journal of Applied Econometrics \url{http://qed.econ.queensu.ca/jae/2003-v18.1/bai-perron/}.} \references{ Bai J., Perron P. (2003), Computation and Analysis of Multiple Structural Change Models, \emph{Journal of Applied Econometrics}, \bold{18}, 1-22. Zeileis A., Kleiber C. (2005), Validating Multiple Structural Change Models - A Case Study. Journal of Applied Econometrics, \bold{20}, 685-690. } \examples{ ## load and plot data data("RealInt") plot(RealInt) ## estimate breakpoints bp.ri <- breakpoints(RealInt ~ 1, h = 15) plot(bp.ri) summary(bp.ri) ## fit segmented model with three breaks fac.ri <- breakfactor(bp.ri, breaks = 3, label = "seg") fm.ri <- lm(RealInt ~ 0 + fac.ri) summary(fm.ri) ## setup kernel HAC estimator vcov.ri <- function(x, ...) kernHAC(x, kernel = "Quadratic Spectral", prewhite = 1, approx = "AR(1)", ...) ## Results from Table 1 in Bai & Perron (2003): ## coefficient estimates coef(bp.ri, breaks = 3) ## corresponding standard errors sapply(vcov(bp.ri, breaks = 3, vcov = vcov.ri), sqrt) ## breakpoints and confidence intervals confint(bp.ri, breaks = 3, vcov = vcov.ri) ## Visualization plot(RealInt) lines(as.vector(time(RealInt)), fitted(fm.ri), col = 4) lines(confint(bp.ri, breaks = 3, vcov = vcov.ri)) } \keyword{datasets} strucchange/man/sctest.default.Rd0000644000176200001440000001670213062350355016562 0ustar liggesusers\name{sctest.default} \alias{sctest.default} \title{Structural Change Tests in Parametric Models} \description{ Performs model-based tests for structural change (or parameter instability) in parametric models. } \usage{ \method{sctest}{default}(x, order.by = NULL, functional = maxBB, vcov = NULL, scores = estfun, decorrelate = TRUE, sandwich = TRUE, parm = NULL, plot = FALSE, from = 0.1, to = NULL, nobs = NULL, nrep = 50000, width = 0.15, xlab = NULL, \dots) } \arguments{ \item{x}{a model object. The model class can in principle be arbitrary but needs to provide suitable methods for extracting the \code{scores} and associated variance-covariance matrix \code{vcov}.} \item{order.by}{either a vector \code{z} or a formula with a single explanatory variable like \code{~ z}. The observations in the model are ordered by the size of \code{z}. If set to \code{NULL} (the default) the observations are assumed to be ordered (e.g., a time series).} \item{functional}{either a character specification of the functional to be used or an \code{\link{efpFunctional}} object. For a list of functionals see the details.} \item{vcov}{a function to extract the covariance matrix for the coefficients of the fitted model: \code{vcov(x, order.by = order.by, data = data)}. Alternatively, the character string \code{"info"}, for details see below.} \item{scores}{a function which extracts the scores or estimating function from the fitted object: \code{scores(x)}, by default this is \code{\link[sandwich]{estfun}}.} \item{decorrelate}{logical. Should the process be decorrelated?} \item{sandwich}{logical. Is the function \code{vcov} the full sandwich estimator or only the meat?} \item{parm}{integer or character specifying the component of the estimating functions which should be used (by default all components are used).} \item{plot}{logical. Should the result of the test also be visualized?} \item{from, to}{numeric. In case the \code{functional} is \code{"supLM"} (or equivalently \code{"maxLM"}), \code{from} and \code{to} can be passed to the \code{\link{supLM}} functional.} \item{nobs, nrep}{numeric. In case the \code{functional} is \code{"maxLMo"}, \code{nobs} and \code{nrep} are passed to the \code{\link{catL2BB}} functional.} \item{width}{numeric. In case the \code{functional} is \code{"MOSUM"}, the bandwidth \code{width} is passed to the \code{\link{maxMOSUM}} functional.} \item{xlab, \dots}{graphical parameters passed to the plot method (in case \code{plot = TRUE}).} } \details{ \code{sctest.default} is a convenience interface to \code{\link{gefp}} for structural change tests (or parameter instability tests) in general parametric models. It proceeds in the following steps: \enumerate{ \item The generalized empirical fluctuation process (or score-based CUSUM process) is computed via \code{scus <- gefp(x, fit = NULL, \dots)} where \code{\dots} comprises the arguments \code{order.by}, \code{vcov}, \code{scores}, \code{decorrelate}, \code{sandwich}, \code{parm} that are simply passed on to \code{\link{gefp}}. \item The empirical fluctuation process is visualized (if \code{plot = TRUE}) via \code{plot(scus, functional = functional, \dots)}. \item The empirical fluctuation is assessed by the corresponding significance test via \code{sctest(scus, functional = functional)}. } The main motivation for prociding the convenience interface is that these three steps can be easily carried out in one go along with a two convenience options: \enumerate{ \item By default, the covariance is computed by an outer-product of gradients estimator just as in \code{gefp}. This is always available based on the \code{scores}. Additionally, by setting \code{vcov = "info"}, the corresponding information matrix can be used. Then the average information is assumed to be provided by the \code{vcov} method for the model class. (Note that this is only sensible for models estimated by maximum likelihood.) \item Instead of providing the \code{functional} by an \code{\link{efpFunctional}} object, the test labels employed by Merkle and Zeileis (2013) and Merkle, Fan, and Zeileis (2013) can be used for convenience. Namely, for continuous numeric orderings, the following functionals are available: \code{functional = "DM"} or \code{"dmax"} provides the double-maximum test (\code{\link{maxBB}}). \code{"CvM"} is the Cramer-von Mises functional \code{\link{meanL2BB}}. \code{"supLM"} or equivalently \code{"maxLM"} is Andrews' supLM test (\code{\link{supLM}}). \code{"MOSUM"} or \code{"maxMOSUM"} is the MOSUM functional (\code{\link{maxMOSUM}}), and \code{"range"} is the range functional \code{\link{rangeBB}}. Furthermore, several functionals suitable for (ordered) categorical \code{order.by} variables are provided: \code{"LMuo"} is the unordered LM test (\code{\link{catL2BB}}), \code{"WDMo"} is the weighted double-maximum test for ordered variables (\code{\link{ordwmax}}), and \code{"maxLMo"} is the maxLM test for ordered variables (\code{\link{ordL2BB}}). } The theoretical model class is introduced in Zeileis and Hornik (2007) with a unifying view in Zeileis (2005), especially from an econometric perspective. Zeileis (2006) introduces the underling computational tools \code{gefp} and \code{efpFunctional}. Merkle and Zeileis (2013) discuss the methods in the context of measurement invariance which is particularly relevant to psychometric models for cross section data. Merkle, Fan, and Zeileis (2014) extend the results to ordered categorical variables. Zeileis, Shah, and Patnaik (2013) provide a unifying discussion in the context of time series methods, specifically in financial econometrics. } \value{ An object of class \code{"htest"} containing: \item{statistic}{the test statistic,} \item{p.value}{the corresponding p value,} \item{method}{a character string with the method used,} \item{data.name}{a character string with the data name.} } \references{ Merkle E.C., Zeileis A. (2013), Tests of Measurement Invariance without Subgroups: A Generalization of Classical Methods. \emph{Psychometrika}, \bold{78}(1), 59--82. doi:10.1007/S11336-012-9302-4 Merkle E.C., Fan J., Zeileis A. (2014), Testing for Measurement Invariance with Respect to an Ordinal Variable. \emph{Psychometrika}, \bold{79}(4), 569--584. doi:10.1007/S11336-013-9376-7. Zeileis A. (2005), A Unified Approach to Structural Change Tests Based on ML Scores, F Statistics, and OLS Residuals. \emph{Econometric Reviews}, \bold{24}, 445--466. doi:10.1080/07474930500406053. Zeileis A. (2006), Implementing a Class of Structural Change Tests: An Econometric Computing Approach. \emph{Computational Statistics & Data Analysis}, \bold{50}, 2987--3008. doi:10.1016/j.csda.2005.07.001. Zeileis A., Hornik K. (2007), Generalized M-Fluctuation Tests for Parameter Instability, \emph{Statistica Neerlandica}, \bold{61}, 488--508. doi:10.1111/j.1467-9574.2007.00371.x. Zeileis A., Shah A., Patnaik I. (2010), Testing, Monitoring, and Dating Structural Changes in Exchange Rate Regimes, \emph{Computational Statistics and Data Analysis}, \bold{54}(6), 1696--1706. doi:10.1016/j.csda.2009.12.005. } \seealso{\code{\link{gefp}}, \code{\link{efpFunctional}}} \examples{ ## Zeileis and Hornik (2007), Section 5.3, Figure 6 data("Grossarl") m <- glm(cbind(illegitimate, legitimate) ~ 1, family = binomial, data = Grossarl, subset = time(fraction) <= 1800) sctest(m, order.by = 1700:1800, functional = "CvM") } \keyword{htest} strucchange/man/plot.efp.Rd0000644000176200001440000001015314127174674015366 0ustar liggesusers\name{plot.efp} \alias{plot.efp} \alias{lines.efp} \encoding{latin1} \title{Plot Empirical Fluctuation Process} \description{Plot and lines method for objects of class \code{"efp"}} \usage{ \method{plot}{efp}(x, alpha = 0.05, alt.boundary = FALSE, boundary = TRUE, functional = "max", main = NULL, ylim = NULL, ylab = "Empirical fluctuation process", ...) \method{lines}{efp}(x, functional = "max", ...) } \arguments{ \item{x}{an object of class \code{"efp"}.} \item{alpha}{numeric from interval (0,1) indicating the confidence level for which the boundary of the corresponding test will be computed.} \item{alt.boundary}{logical. If set to \code{TRUE} alternative boundaries (instead of the standard linear boundaries) will be plotted (for CUSUM processes only).} \item{boundary}{logical. If set to \code{FALSE} the boundary will be computed but not plotted.} \item{functional}{indicates which functional should be applied to the process before plotting and which boundaries should be used. If set to \code{NULL} a multiple process with boundaries for the \code{"max"} functional is plotted. For more details see below.} \item{main, ylim, ylab, ...}{high-level \code{\link{plot}} function parameters.} } \details{Plots are available for the \code{"max"} functional for all process types. For Brownian bridge type processes the maximum or mean squared Euclidean norm (\code{"maxL2"} and \code{"meanL2"}) can be used for aggregating before plotting. No plots are available for the \code{"range"} functional. Alternative boundaries that are proportional to the standard deviation of the corresponding limiting process are available for processes with Brownian motion or Brownian bridge limiting processes. } \value{\code{\link{efp}} returns an object of class \code{"efp"} which inherits from the class \code{"ts"} or \code{"mts"} respectively. The function \code{\link{plot}} has a method to plot the empirical fluctuation process; with \code{sctest} the corresponding test for structural change can be performed.} \references{Brown R.L., Durbin J., Evans J.M. (1975), Techniques for testing constancy of regression relationships over time, \emph{Journal of the Royal Statistical Society}, B, \bold{37}, 149-163. Chu C.-S., Hornik K., Kuan C.-M. (1995), MOSUM tests for parameter constancy, \emph{Biometrika}, \bold{82}, 603-617. Chu C.-S., Hornik K., Kuan C.-M. (1995), The moving-estimates test for parameter stability, \emph{Econometric Theory}, \bold{11}, 669-720. Krmer W., Ploberger W., Alt R. (1988), Testing for structural change in dynamic models, \emph{Econometrica}, \bold{56}, 1355-1369. Kuan C.-M., Hornik K. (1995), The generalized fluctuation test: A unifying view, \emph{Econometric Reviews}, \bold{14}, 135 - 161. Kuan C.-M., Chen (1994), Implementing the fluctuation and moving estimates tests in dynamic econometric models, \emph{Economics Letters}, \bold{44}, 235-239. Ploberger W., Krmer W. (1992), The CUSUM test with OLS residuals, \emph{Econometrica}, \bold{60}, 271-285. Zeileis A., Leisch F., Hornik K., Kleiber C. (2002), \code{strucchange}: An R Package for Testing for Structural Change in Linear Regression Models, \emph{Journal of Statistical Software}, \bold{7}(2), 1-38. \doi{10.18637/jss.v007.i02}. Zeileis A. (2004), Alternative Boundaries for CUSUM Tests, \emph{Statistical Papers}, \bold{45}, 123--131. } \seealso{\code{\link{efp}}, \code{\link{boundary.efp}}, \code{\link{sctest.efp}}} \examples{ ## Load dataset "nhtemp" with average yearly temperatures in New Haven data("nhtemp") ## plot the data plot(nhtemp) ## test the model null hypothesis that the average temperature remains ## constant over the years ## compute Rec-CUSUM fluctuation process temp.cus <- efp(nhtemp ~ 1) ## plot the process plot(temp.cus, alpha = 0.01) ## and calculate the test statistic sctest(temp.cus) ## compute (recursive estimates) fluctuation process ## with an additional linear trend regressor lin.trend <- 1:60 temp.me <- efp(nhtemp ~ lin.trend, type = "fluctuation") ## plot the bivariate process plot(temp.me, functional = NULL) ## and perform the corresponding test sctest(temp.me) } \keyword{hplot} strucchange/man/solveCrossprod.Rd0000644000176200001440000000142613062350355016656 0ustar liggesusers\name{solveCrossprod} \alias{solveCrossprod} \title{Inversion of X'X} \description{Computes the inverse of the cross-product of a matrix X.} \usage{ solveCrossprod(X, method = c("qr", "chol", "solve")) } \arguments{ \item{X}{a matrix, typically a regressor matrix.} \item{method}{a string indicating whether the QR decomposition, the Cholesky decomposition or \code{solve} should be used.} } \details{Using the Cholesky decomposition of X'X (as computed by \code{crossprod(X)}) is computationally faster and preferred to \code{solve(crossprod(X))}. Using the QR decomposition of X is slower but should be more accurate.} \value{a matrix containing the inverse of \code{crossprod(X)}.} \examples{ X <- cbind(1, rnorm(100)) solveCrossprod(X) solve(crossprod(X)) } \keyword{algebra} strucchange/man/Fstats.Rd0000644000176200001440000001067413062350355015100 0ustar liggesusers\name{Fstats} \alias{Fstats} \alias{print.Fstats} \title{F Statistics} \description{Computes a series of F statistics for a specified data window.} \usage{ Fstats(formula, from = 0.15, to = NULL, data = list(), vcov. = NULL)} \arguments{ \item{formula}{a symbolic description for the model to be tested} \item{from, to}{numeric. If \code{from} is smaller than 1 they are interpreted as percentages of data and by default \code{to} is taken to be 1 - \code{from}. F statistics will be calculated for the observations \code{(n*from):(n*to)}, when \code{n} is the number of observations in the model. If \code{from} is greater than 1 it is interpreted to be the index and \code{to} defaults to \code{n - from}. If \code{from} is a vector with two elements, then \code{from} and \code{to} are interpreted as time specifications like in \code{\link{ts}}, see also the examples.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{Fstats} is called from.} \item{vcov.}{a function to extract the covariance matrix for the coefficients of a fitted model of class \code{"lm"}.} } \details{For every potential change point in \code{from:to} a F statistic (Chow test statistic) is computed. For this an OLS model is fitted for the observations before and after the potential change point, i.e. \code{2k} parameters have to be estimated, and the error sum of squares is computed (ESS). Another OLS model for all observations with a restricted sum of squares (RSS) is computed, hence \code{k} parameters have to be estimated here. If \code{n} is the number of observations and \code{k} the number of regressors in the model, the formula is: \deqn{F = \frac{(RSS - ESS)}{ESS/(n - 2 k)}}{F = (RSS-ESS)/ESS * (n-2*k)} Note that this statistic has an asymptotic chi-squared distribution with k degrees of freedom and (under the assumption of normality) F/k has an exact F distribution with k and n - 2k degrees of freedom. } \value{\code{Fstats} returns an object of class \code{"Fstats"}, which contains mainly a time series of F statistics. The function \code{\link{plot}} has a method to plot the F statistics or the corresponding p values; with \code{sctest} a supF-, aveF- or expF-test on structural change can be performed.} \references{ Andrews D.W.K. (1993), Tests for parameter instability and structural change with unknown change point, \emph{Econometrica}, \bold{61}, 821-856. Hansen B. (1992), Tests for parameter instability in regressions with I(1) processes, \emph{Journal of Business & Economic Statistics}, \bold{10}, 321-335. Hansen B. (1997), Approximate asymptotic p values for structural-change tests, \emph{Journal of Business & Economic Statistics}, \bold{15}, 60-67. } \seealso{\code{\link{plot.Fstats}}, \code{\link{sctest.Fstats}}, \code{\link{boundary.Fstats}}} \examples{ ## Nile data with one breakpoint: the annual flows drop in 1898 ## because the first Ashwan dam was built data("Nile") plot(Nile) ## test the null hypothesis that the annual flow remains constant ## over the years fs.nile <- Fstats(Nile ~ 1) plot(fs.nile) sctest(fs.nile) ## visualize the breakpoint implied by the argmax of the F statistics plot(Nile) lines(breakpoints(fs.nile)) ## UK Seatbelt data: a SARIMA(1,0,0)(1,0,0)_12 model ## (fitted by OLS) is used and reveals (at least) two ## breakpoints - one in 1973 associated with the oil crisis and ## one in 1983 due to the introduction of compulsory ## wearing of seatbelts in the UK. data("UKDriverDeaths") seatbelt <- log10(UKDriverDeaths) seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) colnames(seatbelt) <- c("y", "ylag1", "ylag12") seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) ## compute F statistics for potential breakpoints between ## 1971(6) (corresponds to from = 0.1) and 1983(6) (corresponds to ## to = 0.9 = 1 - from, the default) ## compute F statistics fs <- Fstats(y ~ ylag1 + ylag12, data = seatbelt, from = 0.1) ## this gives the same result fs <- Fstats(y ~ ylag1 + ylag12, data = seatbelt, from = c(1971, 6), to = c(1983, 6)) ## plot the F statistics plot(fs, alpha = 0.01) ## plot F statistics with aveF boundary plot(fs, aveF = TRUE) ## perform the expF test sctest(fs, type = "expF") } \concept{F statistics} \concept{Andrews test} \concept{Chow test} \concept{Quandt test} \keyword{regression} strucchange/man/scPublications.Rd0000644000176200001440000000526614251757670016633 0ustar liggesusers\name{scPublications} \alias{scPublications} \title{Structural Change Publications} \usage{data("scPublications")} \description{ Bibliographic information about papers related to structural change and changepoints published in 27 different econometrics and statistics journals. } \format{ A data frame containing information on 835 structural change papers in 9 variables. \describe{ \item{author}{character. Author(s) of the paper.} \item{title}{character. Title of the paper.} \item{journal}{factor. In which journal was the paper published?} \item{year}{numeric. Year of publication.} \item{volume}{numeric. Journal volume.} \item{issue}{character. Issue within the journal volume.} \item{bpage}{numeric. Page on which the paper begins.} \item{epage}{numeric. Page on which the paper ends.} \item{type}{factor. Is the journal an econometrics or statistics journal?} } } \details{ The data set \code{scPublications} includes bibliographic information about publications related to structural change and obtained from the \sQuote{ISI Web of Science}. The query was based on the \sQuote{Science Citation Index Expanded} and \sQuote{Social Sciences Citation Index} (for the full range of years available: 1900-2006 and 1956-2006, respectively). The \sQuote{Source Title} was restricted to the 27 journals in the data frame and the \sQuote{Topic} to be one of the following: structural change, structural break, structural stability, structural instability, parameter instability, parameter stability, parameter constancy, change point, changepoint, change-point, breakpoint, break-point, break point, CUSUM, MOSUM. Additionally, the famous CUSUM paper of Brown, Durbin and Evans (1975) was added manually to \code{scPublications} (because it did not match the query above). } \source{ISI Web of Science at \url{https://www.webofknowledge.com/}. Queried by James Bullard. } \examples{ ## construct time series: ## number of sc publications in econometrics/statistics data("scPublications") ## select years from 1987 and ## `most important' journals pub <- scPublications pub <- subset(pub, year > 1986) tab1 <- table(pub$journal) nam1 <- names(tab1)[as.vector(tab1) > 9] ## at least 10 papers tab2 <- sapply(levels(pub$journal), function(x) min(subset(pub, journal == x)$year)) nam2 <- names(tab2)[as.vector(tab2) < 1991] ## started at least in 1990 nam <- nam1[nam1 \%in\% nam2] pub <- subset(pub, as.character(journal) \%in\% nam) pub$journal <- factor(pub$journal) pub_data <- pub ## generate time series pub <- with(pub, tapply(type, year, table)) pub <- zoo(t(sapply(pub, cbind)), 1987:2006) colnames(pub) <- levels(pub_data$type) ## visualize plot(pub, ylim = c(0, 35)) } \keyword{datasets} strucchange/man/DJIA.Rd0000644000176200001440000000306113062350355014333 0ustar liggesusers\name{DJIA} \alias{DJIA} \title{Dow Jones Industrial Average} \description{ Weekly closing values of the Dow Jones Industrial Average. } \usage{data("DJIA")} \format{ A weekly univariate time series of class \code{"zoo"} from 1971-07-01 to 1974-08-02. } \source{ Appendix A in Hsu (1979). } \references{ Hsu D. A. (1979), Detecting Shifts of Parameter in Gamma Sequences with Applications to Stock Price and Air Traffic Flow Analysis, \emph{Journal of the American Statistical Association}, \bold{74}, 31--40. } \examples{ data("DJIA") ## look at log-difference returns djia <- diff(log(DJIA)) plot(djia) ## convenience functions ## set up a normal regression model which ## explicitely also models the variance normlm <- function(formula, data = list()) { rval <- lm(formula, data = data) class(rval) <- c("normlm", "lm") return(rval) } estfun.normlm <- function(obj) { res <- residuals(obj) ef <- NextMethod(obj) sigma2 <- mean(res^2) rval <- cbind(ef, res^2 - sigma2) colnames(rval) <- c(colnames(ef), "(Variance)") return(rval) } ## normal model (with constant mean and variance) for log returns m1 <- gefp(djia ~ 1, fit = normlm, vcov = meatHAC, sandwich = FALSE) plot(m1, aggregate = FALSE) ## suggests a clear break in the variance (but not the mean) ## dating bp <- breakpoints(I(djia^2) ~ 1) plot(bp) ## -> clearly one break bp time(djia)[bp$breakpoints] ## visualization plot(djia) abline(v = time(djia)[bp$breakpoints], lty = 2) lines(time(djia)[confint(bp)$confint[c(1,3)]], rep(min(djia), 2), col = 2, type = "b", pch = 3) } \keyword{datasets} strucchange/man/breakfactor.Rd0000644000176200001440000000234013062350355016106 0ustar liggesusers\name{breakfactor} \alias{breakfactor} \title{Factor Coding of Segmentations} \description{ Generates a factor encoding the segmentation given by a set of breakpoints. } \usage{ breakfactor(obj, breaks = NULL, labels = NULL, ...) } \arguments{ \item{obj}{An object of class \code{"breakpoints"} or \code{"breakpointsfull"} respectively.} \item{breaks}{an integer specifying the number of breaks to extract (only if \code{obj} is of class \code{"breakpointsfull"}), by default the minimum BIC partition is used.} \item{labels}{a vector of labels for the returned factor, by default the segments are numbered starting from \code{"segment1"}.} \item{\dots}{further arguments passed to \code{factor}.} } \value{ A factor encoding the segmentation. } \seealso{\code{\link{breakpoints}}} \examples{ ## Nile data with one breakpoint: the annual flows drop in 1898 ## because the first Ashwan dam was built data("Nile") plot(Nile) ## compute breakpoints bp.nile <- breakpoints(Nile ~ 1) ## fit and visualize segmented and unsegmented model fm0 <- lm(Nile ~ 1) fm1 <- lm(Nile ~ breakfactor(bp.nile, breaks = 1)) lines(fitted(fm0), col = 3) lines(fitted(fm1), col = 4) lines(bp.nile, breaks = 1) } \keyword{regression} strucchange/man/sctest.Fstats.Rd0000644000176200001440000000500613062350355016375 0ustar liggesusers\name{sctest.Fstats} \alias{sctest.Fstats} \title{supF-, aveF- and expF-Test} \description{Performs the supF-, aveF- or expF-test} \usage{ \method{sctest}{Fstats}(x, type = c("supF", "aveF", "expF"), asymptotic = FALSE, ...) } \arguments{ \item{x}{an object of class \code{"Fstats"}.} \item{type}{a character string specifying which test will be performed.} \item{asymptotic}{logical. Only necessary if \code{x} contains just a single F statistic and type is \code{"supF"} or \code{"aveF"}. If then set to \code{TRUE} the asymptotic (chi-square) distribution instead of the exact (F) distribution will be used to compute the p value.} \item{...}{currently not used.} } \details{If \code{x} contains just a single F statistic and type is \code{"supF"} or \code{"aveF"} the Chow test will be performed. The original GAUSS code for computing the p values of the supF-, aveF- and expF-test was written by Bruce Hansen and is available from \url{http://www.ssc.wisc.edu/~bhansen/}. R port by Achim Zeileis. } \value{ An object of class \code{"htest"} containing: \item{statistic}{the test statistic,} \item{p.value}{the corresponding p value,} \item{method}{a character string with the method used,} \item{data.name}{a character string with the data name.} } \references{ Andrews D.W.K. (1993), Tests for parameter instability and structural change with unknown change point, \emph{Econometrica}, \bold{61}, 821-856. Andrews D.W.K., Ploberger W. (1994), Optimal tests when a nuisance parameter is present only under the alternative, \emph{Econometrica}, \bold{62}, 1383-1414. Hansen B. (1992), Tests for parameter instability in regressions with I(1) processes, \emph{Journal of Business & Economic Statistics}, \bold{10}, 321-335. Hansen B. (1997), Approximate asymptotic p values for structural-change tests, \emph{Journal of Business & Economic Statistics}, \bold{15}, 60-67. } \seealso{\code{\link{Fstats}}, \code{\link{plot.Fstats}}} \examples{ ## Load dataset "nhtemp" with average yearly temperatures in New Haven data(nhtemp) ## plot the data plot(nhtemp) ## test the model null hypothesis that the average temperature remains ## constant over the years for potential break points between 1941 ## (corresponds to from = 0.5) and 1962 (corresponds to to = 0.85) ## compute F statistics fs <- Fstats(nhtemp ~ 1, from = 0.5, to = 0.85) ## plot the F statistics plot(fs, alpha = 0.01) ## and the corresponding p values plot(fs, pval = TRUE, alpha = 0.01) ## perform the aveF test sctest(fs, type = "aveF") } \keyword{htest} strucchange/man/boundary.Fstats.Rd0000644000176200001440000000343613062350355016720 0ustar liggesusers\name{boundary.Fstats} \alias{boundary.Fstats} \title{Boundary for F Statistics} \description{Computes boundary for an object of class \code{"Fstats"}} \usage{ \method{boundary}{Fstats}(x, alpha = 0.05, pval = FALSE, aveF = FALSE, asymptotic = FALSE, ...)} \arguments{ \item{x}{an object of class \code{"Fstats"}.} \item{alpha}{numeric from interval (0,1) indicating the confidence level for which the boundary of the supF test will be computed.} \item{pval}{logical. If set to \code{TRUE} a boundary for the corresponding p values will be computed.} \item{aveF}{logical. If set to \code{TRUE} the boundary of the aveF (instead of the supF) test will be computed. The resulting boundary then is a boundary for the mean of the F statistics rather than for the F statistics themselves.} \item{asymptotic}{logical. If set to \code{TRUE} the asymptotic (chi-square) distribution instead of the exact (F) distribution will be used to compute the p values (only if \code{pval} is \code{TRUE}).} \item{...}{currently not used.} } \value{an object of class \code{"ts"} with the same time properties as the time series in \code{x}} \seealso{\code{\link{Fstats}}, \code{\link{plot.Fstats}}} \examples{ ## Load dataset "nhtemp" with average yearly temperatures in New Haven data("nhtemp") ## plot the data plot(nhtemp) ## test the model null hypothesis that the average temperature remains ## constant over the years for potential break points between 1941 ## (corresponds to from = 0.5) and 1962 (corresponds to to = 0.85) ## compute F statistics fs <- Fstats(nhtemp ~ 1, from = 0.5, to = 0.85) ## plot the p values without boundary plot(fs, pval = TRUE, alpha = 0.01) ## add the boundary in another colour lines(boundary(fs, pval = TRUE, alpha = 0.01), col = 2) } \keyword{regression} strucchange/man/plot.Fstats.Rd0000644000176200001440000000465413062350355016056 0ustar liggesusers\name{plot.Fstats} \alias{plot.Fstats} \alias{lines.Fstats} \title{Plot F Statistics} \description{Plotting method for objects of class \code{"Fstats"}} \usage{ \method{plot}{Fstats}(x, pval = FALSE, asymptotic = FALSE, alpha = 0.05, boundary = TRUE, aveF = FALSE, xlab = "Time", ylab = NULL, ylim = NULL, ...) } \arguments{ \item{x}{an object of class \code{"Fstats"}.} \item{pval}{logical. If set to \code{TRUE} the corresponding p values instead of the original F statistics will be plotted.} \item{asymptotic}{logical. If set to \code{TRUE} the asymptotic (chi-square) distribution instead of the exact (F) distribution will be used to compute the p values (only if \code{pval} is \code{TRUE}).} \item{alpha}{numeric from interval (0,1) indicating the confidence level for which the boundary of the supF test will be computed.} \item{boundary}{logical. If set to \code{FALSE} the boundary will be computed but not plotted.} \item{aveF}{logical. If set to \code{TRUE} the boundary of the aveF test will be plotted. As this is a boundary for the mean of the F statistics rather than for the F statistics themselves a dashed line for the mean of the F statistics will also be plotted.} \item{xlab, ylab, ylim, ...}{high-level \code{\link{plot}} function parameters.}} \references{ Andrews D.W.K. (1993), Tests for parameter instability and structural change with unknown change point, \emph{Econometrica}, \bold{61}, 821-856. Hansen B. (1992), Tests for parameter instability in regressions with I(1) processes, \emph{Journal of Business & Economic Statistics}, \bold{10}, 321-335. Hansen B. (1997), Approximate asymptotic p values for structural-change tests, \emph{Journal of Business & Economic Statistics}, \bold{15}, 60-67. } \seealso{\code{\link{Fstats}}, \code{\link{boundary.Fstats}}, \code{\link{sctest.Fstats}}} \examples{ ## Load dataset "nhtemp" with average yearly temperatures in New Haven data("nhtemp") ## plot the data plot(nhtemp) ## test the model null hypothesis that the average temperature remains ## constant over the years for potential break points between 1941 ## (corresponds to from = 0.5) and 1962 (corresponds to to = 0.85) ## compute F statistics fs <- Fstats(nhtemp ~ 1, from = 0.5, to = 0.85) ## plot the F statistics plot(fs, alpha = 0.01) ## and the corresponding p values plot(fs, pval = TRUE, alpha = 0.01) ## perform the aveF test sctest(fs, type = "aveF") } \keyword{hplot} strucchange/man/boundary.Rd0000644000176200001440000000113513062350355015447 0ustar liggesusers\name{boundary} \alias{boundary} \title{Boundary Function for Structural Change Tests} \description{A generic function computing boundaries for structural change tests} \usage{ boundary(x, ...)} \arguments{ \item{x}{an object. Use \code{\link{methods}} to see which \code{\link{class}} has a method for boundary.} \item{...}{additional arguments affecting the boundary.} } \value{an object of class \code{"ts"} with the same time properties as the time series in \code{x}} \seealso{\code{\link{boundary.efp}}, \code{\link{boundary.mefp}}, \code{\link{boundary.Fstats}}} \keyword{regression} strucchange/man/PhillipsCurve.Rd0000644000176200001440000000460513062350355016422 0ustar liggesusers\name{PhillipsCurve} \alias{PhillipsCurve} \title{UK Phillips Curve Equation Data} \usage{data("PhillipsCurve")} \description{ Macroeconomic time series from the United Kingdom with variables for estimating the Phillips curve equation. } \format{ A multivariate annual time series from 1857 to 1987 with the columns \describe{ \item{p}{Logarithm of the consumer price index,} \item{w}{Logarithm of nominal wages,} \item{u}{Unemployment rate,} \item{dp}{First differences of \code{p},} \item{dw}{First differences of \code{w},} \item{du}{First differences of \code{u}} \item{u1}{Lag 1 of \code{u},} \item{dp1}{Lag 1 of \code{dp}.} } } \source{The data is available online in the data archive of the Journal of Applied Econometrics \url{http://qed.econ.queensu.ca/jae/2003-v18.1/bai-perron/}.} \references{ Alogoskoufis G.S., Smith R. (1991), The Phillips Curve, the Persistence of Inflation, and the Lucas Critique: Evidence from Exchange Rate Regimes, \emph{American Economic Review}, \bold{81}, 1254-1275. Bai J., Perron P. (2003), Computation and Analysis of Multiple Structural Change Models, \emph{Journal of Applied Econometrics}, \bold{18}, 1-22. } \examples{ ## load and plot data data("PhillipsCurve") uk <- window(PhillipsCurve, start = 1948) plot(uk[, "dp"]) ## AR(1) inflation model ## estimate breakpoints bp.inf <- breakpoints(dp ~ dp1, data = uk, h = 8) plot(bp.inf) summary(bp.inf) ## fit segmented model with three breaks fac.inf <- breakfactor(bp.inf, breaks = 2, label = "seg") fm.inf <- lm(dp ~ 0 + fac.inf/dp1, data = uk) summary(fm.inf) ## Results from Table 2 in Bai & Perron (2003): ## coefficient estimates coef(bp.inf, breaks = 2) ## corresponding standard errors sqrt(sapply(vcov(bp.inf, breaks = 2), diag)) ## breakpoints and confidence intervals confint(bp.inf, breaks = 2) ## Phillips curve equation ## estimate breakpoints bp.pc <- breakpoints(dw ~ dp1 + du + u1, data = uk, h = 5, breaks = 5) ## look at RSS and BIC plot(bp.pc) summary(bp.pc) ## fit segmented model with three breaks fac.pc <- breakfactor(bp.pc, breaks = 2, label = "seg") fm.pc <- lm(dw ~ 0 + fac.pc/dp1 + du + u1, data = uk) summary(fm.pc) ## Results from Table 3 in Bai & Perron (2003): ## coefficient estimates coef(fm.pc) ## corresponding standard errors sqrt(diag(vcov(fm.pc))) ## breakpoints and confidence intervals confint(bp.pc, breaks = 2, het.err = FALSE) } \keyword{datasets} strucchange/man/Grossarl.Rd0000644000176200001440000001210013550277070015416 0ustar liggesusers\name{Grossarl} \alias{Grossarl} \docType{data} \encoding{latin1} \title{Marriages, Births and Deaths in Grossarl} \usage{data("Grossarl")} \description{ Data about the number of marriages, illegitimate and legitimate births, and deaths in the Austrian Alpine village Grossarl during the 18th and 19th century. } \format{ \code{Grossarl} is a data frame containing 6 annual time series (1700 - 1899), 3 factors coding policy interventions and 1 vector with the year (plain numeric). \describe{ \item{marriages}{time series. Number of marriages,} \item{illegitimate}{time series. Number of illegitimate births,} \item{legitimate}{time series. Number of legitimate births,} \item{legitimate}{time series. Number of deaths,} \item{fraction}{time series. Fraction of illegitimate births,} \item{lag.marriages}{time series. Number of marriages in the previous year,} \item{politics}{ordered factor coding 4 different political regimes,} \item{morals}{ordered factor coding 5 different moral regulations,} \item{nuptiality}{ordered factor coding 5 different marriage restrictions,} \item{year}{numeric. Year of observation.} } } \details{The data frame contains historical demographic data from Grossarl, a village in the Alpine region of Salzburg, Austria, during the 18th and 19th century. During this period, the total population of Grossarl did not vary much on the whole, with the very exception of the period of the protestant emigrations in 1731/32. Especially during the archbishopric, moral interventions aimed at lowering the proportion of illegitimate baptisms. For details see the references.} \source{Parish registers provide the basic demographic series of baptisms and burials (which is almost equivalent to births and deaths in the study area) and marriages. For more information see Veichtlbauer et al. (2006).} \references{ Veichtlbauer O., Zeileis A., Leisch F. (2006), The Impact Of Policy Interventions on a Pre-Industrial Population System in the Austrian Alps, forthcoming. Zeileis A., Veichtlbauer O. (2002), Policy Interventions Affecting Illegitimacy in Preindustrial Austria: A Structural Change Analysis, In R. Dutter (ed.), \emph{Festschrift 50 Jahre sterreichische Statistische Gesellschaft}, 133-146, sterreichische Statistische Gesellschaft. } \examples{ data("Grossarl") ## time series of births, deaths, marriages ########################################### with(Grossarl, plot(cbind(deaths, illegitimate + legitimate, marriages), plot.type = "single", col = grey(c(0.7, 0, 0)), lty = c(1, 1, 3), lwd = 1.5, ylab = "annual Grossarl series")) legend("topright", c("deaths", "births", "marriages"), col = grey(c(0.7, 0, 0)), lty = c(1, 1, 3), bty = "n") ## illegitimate births ###################### ## lm + MOSUM plot(Grossarl$fraction) fm.min <- lm(fraction ~ politics, data = Grossarl) fm.ext <- lm(fraction ~ politics + morals + nuptiality + marriages, data = Grossarl) lines(ts(fitted(fm.min), start = 1700), col = 2) lines(ts(fitted(fm.ext), start = 1700), col = 4) mos.min <- efp(fraction ~ politics, data = Grossarl, type = "OLS-MOSUM") mos.ext <- efp(fraction ~ politics + morals + nuptiality + marriages, data = Grossarl, type = "OLS-MOSUM") plot(mos.min) lines(mos.ext, lty = 2) ## dating bp <- breakpoints(fraction ~ 1, data = Grossarl, h = 0.1) summary(bp) ## RSS, BIC, AIC plot(bp) plot(0:8, AIC(bp), type = "b") ## probably use 5 or 6 breakpoints and compare with ## coding of the factors as used by us ## ## politics 1803 1816 1850 ## morals 1736 1753 1771 1803 ## nuptiality 1803 1810 1816 1883 ## ## m = 5 1753 1785 1821 1856 1878 ## m = 6 1734 1754 1785 1821 1856 1878 ## 6 2 5 1 4 3 ## fitted models coef(bp, breaks = 6) plot(Grossarl$fraction) lines(fitted(bp, breaks = 6), col = 2) lines(ts(fitted(fm.ext), start = 1700), col = 4) ## marriages ############ ## lm + MOSUM plot(Grossarl$marriages) fm.min <- lm(marriages ~ politics, data = Grossarl) fm.ext <- lm(marriages ~ politics + morals + nuptiality, data = Grossarl) lines(ts(fitted(fm.min), start = 1700), col = 2) lines(ts(fitted(fm.ext), start = 1700), col = 4) mos.min <- efp(marriages ~ politics, data = Grossarl, type = "OLS-MOSUM") mos.ext <- efp(marriages ~ politics + morals + nuptiality, data = Grossarl, type = "OLS-MOSUM") plot(mos.min) lines(mos.ext, lty = 2) ## dating bp <- breakpoints(marriages ~ 1, data = Grossarl, h = 0.1) summary(bp) ## RSS, BIC, AIC plot(bp) plot(0:8, AIC(bp), type = "b") ## probably use 3 or 4 breakpoints and compare with ## coding of the factors as used by us ## ## politics 1803 1816 1850 ## morals 1736 1753 1771 1803 ## nuptiality 1803 1810 1816 1883 ## ## m = 3 1738 1813 1875 ## m = 4 1738 1794 1814 1875 ## 2 4 1 3 ## fitted models coef(bp, breaks = 4) plot(Grossarl$marriages) lines(fitted(bp, breaks = 4), col = 2) lines(ts(fitted(fm.ext), start = 1700), col = 4) } \keyword{datasets} strucchange/man/SP2001.Rd0000644000176200001440000000635714251757630014474 0ustar liggesusers\name{SP2001} \alias{SP2001} \title{S&P 500 Stock Prices} \description{ A multivariate series of all S&P 500 stock prices in the second half of the year 2001, i.e., before and after the terrorist attacks of 2001-09-11. } \usage{data("SP2001")} \format{ A multivariate daily \code{"zoo"} series with \code{"Date"} index from 2001-07-31 to 2001-12-31 (103 observations) of all 500 S&P stock prices. } \source{Yahoo! Finance: \url{https://finance.yahoo.com/}.} \references{ Zeileis A., Leisch F., Kleiber C., Hornik K. (2005), Monitoring Structural Change in Dynamic Econometric Models, \emph{Journal of Applied Econometrics}, \bold{20}, 99--121. } \seealso{\code{\link[tseries]{get.hist.quote}}} \examples{ ## load and transform data ## (DAL: Delta Air Lines, LU: Lucent Technologies) data("SP2001") stock.prices <- SP2001[, c("DAL", "LU")] stock.returns <- diff(log(stock.prices)) ## price and return series plot(stock.prices, ylab = c("Delta Air Lines", "Lucent Technologies"), main = "") plot(stock.returns, ylab = c("Delta Air Lines", "Lucent Technologies"), main = "") ## monitoring of DAL series myborder <- function(k) 1.939*k/28 x <- as.vector(stock.returns[, "DAL"][1:28]) dal.cusum <- mefp(x ~ 1, type = "OLS-CUSUM", border = myborder) dal.mosum <- mefp(x ~ 1, type = "OLS-MOSUM", h = 0.5, period = 4) x <- as.vector(stock.returns[, "DAL"]) dal.cusum <- monitor(dal.cusum) dal.mosum <- monitor(dal.mosum) ## monitoring of LU series x <- as.vector(stock.returns[, "LU"][1:28]) lu.cusum <- mefp(x ~ 1, type = "OLS-CUSUM", border = myborder) lu.mosum <- mefp(x ~ 1, type = "OLS-MOSUM", h = 0.5, period = 4) x <- as.vector(stock.returns[, "LU"]) lu.cusum <- monitor(lu.cusum) lu.mosum <- monitor(lu.mosum) ## pretty plotting ## (needs some work because lm() does not keep "zoo" attributes) cus.bound <- zoo(c(rep(NA, 27), myborder(28:102)), index(stock.returns)) mos.bound <- as.vector(boundary(dal.mosum)) mos.bound <- zoo(c(rep(NA, 27), mos.bound[1], mos.bound), index(stock.returns)) ## Lucent Technologies: CUSUM test plot(zoo(c(lu.cusum$efpprocess, lu.cusum$process), index(stock.prices)), ylim = c(-1, 1) * coredata(cus.bound)[102], xlab = "Time", ylab = "empirical fluctuation process") abline(0, 0) abline(v = as.Date("2001-09-10"), lty = 2) lines(cus.bound, col = 2) lines(-cus.bound, col = 2) ## Lucent Technologies: MOSUM test plot(zoo(c(lu.mosum$efpprocess, lu.mosum$process), index(stock.prices)[-(1:14)]), ylim = c(-1, 1) * coredata(mos.bound)[102], xlab = "Time", ylab = "empirical fluctuation process") abline(0, 0) abline(v = as.Date("2001-09-10"), lty = 2) lines(mos.bound, col = 2) lines(-mos.bound, col = 2) ## Delta Air Lines: CUSUM test plot(zoo(c(dal.cusum$efpprocess, dal.cusum$process), index(stock.prices)), ylim = c(-1, 1) * coredata(cus.bound)[102], xlab = "Time", ylab = "empirical fluctuation process") abline(0, 0) abline(v = as.Date("2001-09-10"), lty = 2) lines(cus.bound, col = 2) lines(-cus.bound, col = 2) ## Delta Air Lines: MOSUM test plot(zoo(c(dal.mosum$efpprocess, dal.mosum$process), index(stock.prices)[-(1:14)]), ylim = range(dal.mosum$process), xlab = "Time", ylab = "empirical fluctuation process") abline(0, 0) abline(v = as.Date("2001-09-10"), lty = 2) lines(mos.bound, col = 2) lines(-mos.bound, col = 2) } \keyword{datasets} strucchange/man/mefp.Rd0000644000176200001440000001454113062350355014560 0ustar liggesusers\name{mefp} \alias{mefp} \alias{mefp.formula} \alias{mefp.efp} \alias{print.mefp} \alias{monitor} \title{Monitoring of Empirical Fluctuation Processes} \description{ Online monitoring of structural breaks in a linear regression model. A sequential fluctuation test based on parameter estimates or OLS residuals signals structural breaks. } \usage{ mefp(obj, ...) \method{mefp}{formula}(formula, type = c("OLS-CUSUM", "OLS-MOSUM", "RE", "ME", "fluctuation"), data, h = 1, alpha = 0.05, functional = c("max", "range"), period = 10, tolerance = .Machine$double.eps^0.5, CritvalTable = NULL, rescale = NULL, border = NULL, ...) \method{mefp}{efp}(obj, alpha=0.05, functional = c("max", "range"), period = 10, tolerance = .Machine$double.eps^0.5, CritvalTable = NULL, rescale = NULL, border = NULL, ...) monitor(obj, data = NULL, verbose = TRUE) } \arguments{ \item{formula}{a symbolic description for the model to be tested.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{efp} is called from.} \item{type}{specifies which type of fluctuation process will be computed.} \item{h}{(only used for MOSUM/ME processes). A numeric scalar from interval (0,1) specifying the size of the data window relative to the sample size.} \item{obj}{Object of class \code{"efp"} (for \code{mefp}) or \code{"mefp"} (for \code{monitor}).} \item{alpha}{Significance level of the test, i.e., probability of type I error.} \item{functional}{Determines if maximum or range of parameter differences is used as statistic.} \item{period}{(only used for MOSUM/ME processes). Maximum time (relative to the history period) that will be monitored. Default is 10 times the history period.} \item{tolerance}{Tolerance for numeric \code{==} comparisons.} \item{CritvalTable}{Table of critical values, this table is interpolated to get critical values for arbitrary \code{alpha}s. The default depends on the \code{type} of fluctuation process (pre-computed tables are available for all types). \emph{This argument is under development.}} \item{rescale}{If \code{TRUE} the estimates will be standardized by the regressor matrix of the corresponding subsample similar to Kuan & Chen (1994); if \code{FALSE} the historic regressor matrix will be used. The default is to rescale the monitoring processes of type \code{"ME"} but not of \code{"RE"}.} \item{border}{An optional user-specified border function for the empirical process. \emph{This argument is under development.}} \item{verbose}{If \code{TRUE}, signal breaks by text output.} \item{...}{Currently not used.} } \details{ \code{\link{mefp}} creates an object of class \code{"mefp"} either from a model formula or from an object of class \code{"efp"}. In addition to the arguments of \code{\link{efp}}, the type of statistic and a significance level for the monitoring must be specified. The monitoring itself is performed by \code{monitor}, which can be called arbitrarily often on objects of class \code{"mefp"}. If new data have arrived, then the empirical fluctuation process is computed for the new data. If the process crosses the boundaries corresponding to the significance level \code{alpha}, a structural break is detected (and signaled). The typical usage is to initialize the monitoring by creation of an object of class \code{"mefp"} either using a formula or an \code{"efp"} object. Data available at this stage are considered the \emph{history sample}, which is kept fixed during the complete monitoring process, and may not contain any structural changes. Subsequent calls to \code{monitor} perform a sequential test of the null hypothesis of no structural change in new data against the general alternative of changes in one or more of the coefficients of the regression model. The recursive estimates test is also called fluctuation test, therefore setting \code{type} to \code{"fluctuation"} was used to specify it in earlier versions of strucchange. It still can be used now, but will be forced to \code{"RE"} } \seealso{\code{\link{plot.mefp}}, \code{\link{boundary.mefp}}} \references{ Leisch F., Hornik K., Kuan C.-M. (2000), Monitoring Structural Changes with the Generalized Fluctuation Test, \emph{Econometric Theory}, \bold{16}, 835--854. Zeileis A., Leisch F., Kleiber C., Hornik K. (2005), Monitoring Structural Change in Dynamic Econometric Models, \emph{Journal of Applied Econometrics}, \bold{20}, 99--121. doi:10.1002/jae.776. Zeileis A. (2005), A Unified Approach to Structural Change Tests Based on ML Scores, F Statistics, and OLS Residuals. \emph{Econometric Reviews}, \bold{24}, 445--466. doi:10.1080/07474930500406053. Zeileis A., Shah A., Patnaik I. (2010), Testing, Monitoring, and Dating Structural Changes in Exchange Rate Regimes, \emph{Computational Statistics and Data Analysis}, \bold{54}(6), 1696--1706. doi:10.1016/j.csda.2009.12.005. } \examples{ df1 <- data.frame(y=rnorm(300)) df1[150:300,"y"] <- df1[150:300,"y"]+1 ## use the first 50 observations as history period e1 <- efp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1) me1 <- mefp(e1, alpha=0.05) ## the same in one function call me1 <- mefp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1, alpha=0.05) ## monitor the 50 next observations me2 <- monitor(me1, data=df1[1:100,,drop=FALSE]) plot(me2) # and now monitor on all data me3 <- monitor(me2, data=df1) plot(me3) ## Load dataset "USIncExp" with income and expenditure in the US ## and choose a suitable subset for the history period data("USIncExp") USIncExp3 <- window(USIncExp, start=c(1969,1), end=c(1971,12)) ## initialize the monitoring with the formula interface me.mefp <- mefp(expenditure~income, type="ME", rescale=TRUE, data=USIncExp3, alpha=0.05) ## monitor the new observations for the year 1972 USIncExp3 <- window(USIncExp, start=c(1969,1), end=c(1972,12)) me.mefp <- monitor(me.mefp) ## monitor the new data for the years 1973-1976 USIncExp3 <- window(USIncExp, start=c(1969,1), end=c(1976,12)) me.mefp <- monitor(me.mefp) plot(me.mefp, functional = NULL) } \concept{CUSUM} \concept{MOSUM} \concept{recursive estimates} \concept{moving estimates} \concept{fluctuation test} \concept{monitoring} \concept{structural change} \keyword{regression} strucchange/DESCRIPTION0000644000176200001440000000414414252300432014264 0ustar liggesusersPackage: strucchange Version: 1.5-3 Date: 2022-06-14 Title: Testing, Monitoring, and Dating Structural Changes Authors@R: c(person(given = "Achim", family = "Zeileis", role = c("aut", "cre"), email = "Achim.Zeileis@R-project.org", comment = c(ORCID = "0000-0003-0918-3766")), person(given = "Friedrich", family = "Leisch", role = "aut", email = "Friedrich.Leisch@R-project.org"), person(given = "Kurt", family = "Hornik", role = "aut", email = "Kurt.Hornik@R-project.org"), person(given = "Christian", family = "Kleiber", role = "aut", email = "Christian.Kleiber@unibas.ch"), person(given = "Bruce", family = "Hansen", role = "ctb"), person(given = c("Edgar", "C."), family = "Merkle", role = "ctb"), person(given = "Nikolaus", family = "Umlauf", role = "ctb")) Description: Testing, monitoring and dating structural changes in (linear) regression models. strucchange features tests/methods from the generalized fluctuation test framework as well as from the F test (Chow test) framework. This includes methods to fit, plot and test fluctuation processes (e.g., CUSUM, MOSUM, recursive/moving estimates) and F statistics, respectively. It is possible to monitor incoming data online using fluctuation processes. Finally, the breakpoints in regression models with structural changes can be estimated together with confidence intervals. Emphasis is always given to methods for visualizing the data. LazyData: yes Depends: R (>= 2.10.0), zoo, sandwich Suggests: stats4, car, dynlm, e1071, foreach, lmtest, mvtnorm, tseries Imports: graphics, stats, utils License: GPL-2 | GPL-3 NeedsCompilation: yes Packaged: 2022-06-15 00:00:55 UTC; zeileis Author: Achim Zeileis [aut, cre] (), Friedrich Leisch [aut], Kurt Hornik [aut], Christian Kleiber [aut], Bruce Hansen [ctb], Edgar C. Merkle [ctb], Nikolaus Umlauf [ctb] Maintainer: Achim Zeileis Repository: CRAN Date/Publication: 2022-06-15 06:50:02 UTC strucchange/build/0000755000176200001440000000000014252220466013662 5ustar liggesusersstrucchange/build/vignette.rds0000644000176200001440000000050014252220466016214 0ustar liggesusersR]O0-1!lً..e6pFh/w ب&6~{ o>Bhv@3}䡩K%$y',ʔaj ='#&g\ HEYAJ< PDrRRP  dFtH 쁥&m;yޯm6o V TB`SI|*J~0y*.t8QħME~ 5fb%-z]/^_JNN[}isǖ<)%SH strucchange/build/partial.rdb0000644000176200001440000000007314252220456016006 0ustar liggesusersb```b`a 0X84k^bnj1!d7strucchange/tests/0000755000176200001440000000000013062350355013724 5ustar liggesusersstrucchange/tests/Examples/0000755000176200001440000000000013062350355015502 5ustar liggesusersstrucchange/tests/Examples/strucchange-Ex.Rout.save0000644000176200001440000021062113550207541022174 0ustar liggesusers R version 3.2.0 (2015-04-16) -- "Full of Ingredients" Copyright (C) 2015 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > pkgname <- "strucchange" > source(file.path(R.home("share"), "R", "examples-header.R")) > options(warn = 1) > library('strucchange') Loading required package: zoo Attaching package: 'zoo' The following objects are masked from 'package:base': as.Date, as.Date.numeric Loading required package: sandwich > > base::assign(".oldSearch", base::search(), pos = 'CheckExEnv') > cleanEx() > nameEx("BostonHomicide") > ### * BostonHomicide > > flush(stderr()); flush(stdout()) > > ### Name: BostonHomicide > ### Title: Youth Homicides in Boston > ### Aliases: BostonHomicide > ### Keywords: datasets > > ### ** Examples > > data("BostonHomicide") > attach(BostonHomicide) > > ## data from Table 1 > tapply(homicides, year, mean) 1992 1993 1994 1995 1996 1997 1998 3.083333 4.000000 3.166667 3.833333 2.083333 1.250000 0.800000 > populationBM[0:6*12 + 7] [1] 12977 12455 12272 12222 11895 12038 NA > tapply(ahomicides25, year, mean) 1992 1993 1994 1995 1996 1997 1998 3.250000 4.166667 3.916667 4.166667 2.666667 2.333333 1.400000 > tapply(ahomicides35, year, mean) 1992 1993 1994 1995 1996 1997 1998 0.8333333 1.0833333 1.3333333 1.1666667 1.0833333 0.7500000 0.4000000 > population[0:6*12 + 7] [1] 228465 227218 226611 231367 230744 228696 NA > unemploy[0:6*12 + 7] [1] 20.2 18.8 15.9 14.7 13.8 12.6 NA > > ## model A > ## via OLS > fmA <- lm(homicides ~ populationBM + season) > anova(fmA) Analysis of Variance Table Response: homicides Df Sum Sq Mean Sq F value Pr(>F) populationBM 1 14.364 14.3642 3.7961 0.05576 . season 11 47.254 4.2959 1.1353 0.34985 Residuals 64 242.174 3.7840 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 > ## as GLM > fmA1 <- glm(homicides ~ populationBM + season, family = poisson) > anova(fmA1, test = "Chisq") Analysis of Deviance Table Model: poisson, link: log Response: homicides Terms added sequentially (first to last) Df Deviance Resid. Df Resid. Dev Pr(>Chi) NULL 76 115.649 populationBM 1 4.9916 75 110.657 0.02547 * season 11 18.2135 64 92.444 0.07676 . --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 > > ## model B & C > fmB <- lm(homicides ~ populationBM + season + ahomicides25) > fmC <- lm(homicides ~ populationBM + season + ahomicides25 + unemploy) > > detach(BostonHomicide) > > > > cleanEx() > nameEx("DJIA") > ### * DJIA > > flush(stderr()); flush(stdout()) > > ### Name: DJIA > ### Title: Dow Jones Industrial Average > ### Aliases: DJIA > ### Keywords: datasets > > ### ** Examples > > data("DJIA") > ## look at log-difference returns > djia <- diff(log(DJIA)) > plot(djia) > > ## convenience functions > ## set up a normal regression model which > ## explicitely also models the variance > normlm <- function(formula, data = list()) { + rval <- lm(formula, data = data) + class(rval) <- c("normlm", "lm") + return(rval) + } > estfun.normlm <- function(obj) { + res <- residuals(obj) + ef <- NextMethod(obj) + sigma2 <- mean(res^2) + rval <- cbind(ef, res^2 - sigma2) + colnames(rval) <- c(colnames(ef), "(Variance)") + return(rval) + } > > ## normal model (with constant mean and variance) for log returns > m1 <- gefp(djia ~ 1, fit = normlm, vcov = meatHAC, sandwich = FALSE) > plot(m1, aggregate = FALSE) > ## suggests a clear break in the variance (but not the mean) > > ## dating > bp <- breakpoints(I(djia^2) ~ 1) > plot(bp) > ## -> clearly one break > bp Optimal 2-segment partition: Call: breakpoints.formula(formula = I(djia^2) ~ 1) Breakpoints at observation number: 89 Corresponding to breakdates: 0.552795 > time(djia)[bp$breakpoints] [1] "1973-03-16" > > ## visualization > plot(djia) > abline(v = time(djia)[bp$breakpoints], lty = 2) > lines(time(djia)[confint(bp)$confint[c(1,3)]], rep(min(djia), 2), col = 2, type = "b", pch = 3) > > > > cleanEx() > nameEx("Fstats") > ### * Fstats > > flush(stderr()); flush(stdout()) > > ### Name: Fstats > ### Title: F Statistics > ### Aliases: Fstats print.Fstats > ### Keywords: regression > > ### ** Examples > > ## Nile data with one breakpoint: the annual flows drop in 1898 > ## because the first Ashwan dam was built > data("Nile") > plot(Nile) > > ## test the null hypothesis that the annual flow remains constant > ## over the years > fs.nile <- Fstats(Nile ~ 1) > plot(fs.nile) > sctest(fs.nile) supF test data: fs.nile sup.F = 75.93, p-value = 2.22e-16 > ## visualize the breakpoint implied by the argmax of the F statistics > plot(Nile) > lines(breakpoints(fs.nile)) > > ## UK Seatbelt data: a SARIMA(1,0,0)(1,0,0)_12 model > ## (fitted by OLS) is used and reveals (at least) two > ## breakpoints - one in 1973 associated with the oil crisis and > ## one in 1983 due to the introduction of compulsory > ## wearing of seatbelts in the UK. > data("UKDriverDeaths") > seatbelt <- log10(UKDriverDeaths) > seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) > colnames(seatbelt) <- c("y", "ylag1", "ylag12") > seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) > plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) > > ## compute F statistics for potential breakpoints between > ## 1971(6) (corresponds to from = 0.1) and 1983(6) (corresponds to > ## to = 0.9 = 1 - from, the default) > ## compute F statistics > fs <- Fstats(y ~ ylag1 + ylag12, data = seatbelt, from = 0.1) > ## this gives the same result > fs <- Fstats(y ~ ylag1 + ylag12, data = seatbelt, from = c(1971, 6), + to = c(1983, 6)) > ## plot the F statistics > plot(fs, alpha = 0.01) > ## plot F statistics with aveF boundary > plot(fs, aveF = TRUE) > ## perform the expF test > sctest(fs, type = "expF") expF test data: fs exp.F = 6.4247, p-value = 0.008093 > > > > cleanEx() > nameEx("GermanM1") > ### * GermanM1 > > flush(stderr()); flush(stdout()) > > ### Encoding: UTF-8 > > ### Name: GermanM1 > ### Title: German M1 Money Demand > ### Aliases: GermanM1 historyM1 monitorM1 > ### Keywords: datasets > > ### ** Examples > > data("GermanM1") > ## Lütkepohl et al. (1999) use the following model > LTW.model <- dm ~ dy2 + dR + dR1 + dp + m1 + y1 + R1 + season > ## Zeileis et al. (2005) use > M1.model <- dm ~ dy2 + dR + dR1 + dp + ecm.res + season > > > ## historical tests > ols <- efp(LTW.model, data = GermanM1, type = "OLS-CUSUM") > plot(ols) > re <- efp(LTW.model, data = GermanM1, type = "fluctuation") > plot(re) > fs <- Fstats(LTW.model, data = GermanM1, from = 0.1) > plot(fs) > > ## monitoring > M1 <- historyM1 > ols.efp <- efp(M1.model, type = "OLS-CUSUM", data = M1) > newborder <- function(k) 1.5778*k/118 > ols.mefp <- mefp(ols.efp, period = 2) > ols.mefp2 <- mefp(ols.efp, border = newborder) > M1 <- GermanM1 > ols.mon <- monitor(ols.mefp) Break detected at observation # 128 > ols.mon2 <- monitor(ols.mefp2) Break detected at observation # 135 > plot(ols.mon) > lines(boundary(ols.mon2), col = 2) > > ## dating > bp <- breakpoints(LTW.model, data = GermanM1) > summary(bp) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = LTW.model, data = GermanM1) Breakpoints at observation number: m = 1 119 m = 2 42 119 m = 3 48 71 119 m = 4 27 48 71 119 m = 5 27 48 71 98 119 Corresponding to breakdates: m = 1 1990(3) m = 2 1971(2) 1990(3) m = 3 1972(4) 1978(3) 1990(3) m = 4 1967(3) 1972(4) 1978(3) 1990(3) m = 5 1967(3) 1972(4) 1978(3) 1985(2) 1990(3) Fit: m 0 1 2 3 4 5 RSS 3.683e-02 1.916e-02 1.522e-02 1.301e-02 1.053e-02 9.198e-03 BIC -6.974e+02 -7.296e+02 -7.025e+02 -6.653e+02 -6.356e+02 -5.952e+02 > plot(bp) > > plot(fs) > lines(confint(bp)) > > > > cleanEx() > nameEx("Grossarl") > ### * Grossarl > > flush(stderr()); flush(stdout()) > > ### Name: Grossarl > ### Title: Marriages, Births and Deaths in Grossarl > ### Aliases: Grossarl > ### Keywords: datasets > > ### ** Examples > > data("Grossarl") > > ## time series of births, deaths, marriages > ########################################### > > with(Grossarl, plot(cbind(deaths, illegitimate + legitimate, marriages), + plot.type = "single", col = grey(c(0.7, 0, 0)), lty = c(1, 1, 3), + lwd = 1.5, ylab = "annual Grossarl series")) > legend("topright", c("deaths", "births", "marriages"), col = grey(c(0.7, 0, 0)), + lty = c(1, 1, 3), bty = "n") > > ## illegitimate births > ###################### > ## lm + MOSUM > plot(Grossarl$fraction) > fm.min <- lm(fraction ~ politics, data = Grossarl) > fm.ext <- lm(fraction ~ politics + morals + nuptiality + marriages, + data = Grossarl) > lines(ts(fitted(fm.min), start = 1700), col = 2) > lines(ts(fitted(fm.ext), start = 1700), col = 4) > mos.min <- efp(fraction ~ politics, data = Grossarl, type = "OLS-MOSUM") > mos.ext <- efp(fraction ~ politics + morals + nuptiality + marriages, + data = Grossarl, type = "OLS-MOSUM") > plot(mos.min) > lines(mos.ext, lty = 2) > > ## dating > bp <- breakpoints(fraction ~ 1, data = Grossarl, h = 0.1) > summary(bp) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = fraction ~ 1, h = 0.1, data = Grossarl) Breakpoints at observation number: m = 1 127 m = 2 55 122 m = 3 55 124 180 m = 4 55 122 157 179 m = 5 54 86 122 157 179 m = 6 35 55 86 122 157 179 m = 7 35 55 80 101 122 157 179 m = 8 35 55 79 99 119 139 159 179 Corresponding to breakdates: m = 1 1826 m = 2 1754 1821 m = 3 1754 1823 1879 m = 4 1754 1821 1856 1878 m = 5 1753 1785 1821 1856 1878 m = 6 1734 1754 1785 1821 1856 1878 m = 7 1734 1754 1779 1800 1821 1856 1878 m = 8 1734 1754 1778 1798 1818 1838 1858 1878 Fit: m 0 1 2 3 4 5 6 RSS 1.1088 0.8756 0.6854 0.6587 0.6279 0.6019 0.5917 BIC -460.8402 -497.4625 -535.8459 -533.1857 -532.1789 -530.0501 -522.8510 m 7 8 RSS 0.5934 0.6084 BIC -511.7017 -496.0924 > ## RSS, BIC, AIC > plot(bp) > plot(0:8, AIC(bp), type = "b") > > ## probably use 5 or 6 breakpoints and compare with > ## coding of the factors as used by us > ## > ## politics 1803 1816 1850 > ## morals 1736 1753 1771 1803 > ## nuptiality 1803 1810 1816 1883 > ## > ## m = 5 1753 1785 1821 1856 1878 > ## m = 6 1734 1754 1785 1821 1856 1878 > ## 6 2 5 1 4 3 > > ## fitted models > coef(bp, breaks = 6) (Intercept) 1700 - 1734 0.16933985 1735 - 1754 0.14078070 1755 - 1785 0.09890276 1786 - 1821 0.05955620 1822 - 1856 0.17441529 1857 - 1878 0.22425604 1879 - 1899 0.15414723 > plot(Grossarl$fraction) > lines(fitted(bp, breaks = 6), col = 2) > lines(ts(fitted(fm.ext), start = 1700), col = 4) > > > ## marriages > ############ > ## lm + MOSUM > plot(Grossarl$marriages) > fm.min <- lm(marriages ~ politics, data = Grossarl) > fm.ext <- lm(marriages ~ politics + morals + nuptiality, data = Grossarl) > lines(ts(fitted(fm.min), start = 1700), col = 2) > lines(ts(fitted(fm.ext), start = 1700), col = 4) > mos.min <- efp(marriages ~ politics, data = Grossarl, type = "OLS-MOSUM") > mos.ext <- efp(marriages ~ politics + morals + nuptiality, data = Grossarl, + type = "OLS-MOSUM") > plot(mos.min) > lines(mos.ext, lty = 2) > > ## dating > bp <- breakpoints(marriages ~ 1, data = Grossarl, h = 0.1) > summary(bp) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = marriages ~ 1, h = 0.1, data = Grossarl) Breakpoints at observation number: m = 1 114 m = 2 39 114 m = 3 39 114 176 m = 4 39 95 115 176 m = 5 39 62 95 115 176 m = 6 39 62 95 115 136 176 m = 7 39 62 95 115 136 156 176 m = 8 21 41 62 95 115 136 156 176 Corresponding to breakdates: m = 1 1813 m = 2 1738 1813 m = 3 1738 1813 1875 m = 4 1738 1794 1814 1875 m = 5 1738 1761 1794 1814 1875 m = 6 1738 1761 1794 1814 1835 1875 m = 7 1738 1761 1794 1814 1835 1855 1875 m = 8 1720 1740 1761 1794 1814 1835 1855 1875 Fit: m 0 1 2 3 4 5 6 7 8 RSS 3832 3059 2863 2723 2671 2634 2626 2626 2645 BIC 1169 1134 1132 1132 1139 1147 1157 1167 1179 > ## RSS, BIC, AIC > plot(bp) > plot(0:8, AIC(bp), type = "b") > > ## probably use 3 or 4 breakpoints and compare with > ## coding of the factors as used by us > ## > ## politics 1803 1816 1850 > ## morals 1736 1753 1771 1803 > ## nuptiality 1803 1810 1816 1883 > ## > ## m = 3 1738 1813 1875 > ## m = 4 1738 1794 1814 1875 > ## 2 4 1 3 > > ## fitted models > coef(bp, breaks = 4) (Intercept) 1700 - 1738 13.487179 1739 - 1794 10.160714 1795 - 1814 12.150000 1815 - 1875 6.885246 1876 - 1899 9.750000 > plot(Grossarl$marriages) > lines(fitted(bp, breaks = 4), col = 2) > lines(ts(fitted(fm.ext), start = 1700), col = 4) > > > > cleanEx() > nameEx("PhillipsCurve") > ### * PhillipsCurve > > flush(stderr()); flush(stdout()) > > ### Name: PhillipsCurve > ### Title: UK Phillips Curve Equation Data > ### Aliases: PhillipsCurve > ### Keywords: datasets > > ### ** Examples > > ## load and plot data > data("PhillipsCurve") > uk <- window(PhillipsCurve, start = 1948) > plot(uk[, "dp"]) > > ## AR(1) inflation model > ## estimate breakpoints > bp.inf <- breakpoints(dp ~ dp1, data = uk, h = 8) > plot(bp.inf) > summary(bp.inf) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = dp ~ dp1, h = 8, data = uk) Breakpoints at observation number: m = 1 20 m = 2 20 28 m = 3 9 20 28 Corresponding to breakdates: m = 1 1967 m = 2 1967 1975 m = 3 1956 1967 1975 Fit: m 0 1 2 3 RSS 0.03068 0.02672 0.01838 0.01786 BIC -162.34174 -156.80265 -160.70385 -150.78479 > > ## fit segmented model with three breaks > fac.inf <- breakfactor(bp.inf, breaks = 2, label = "seg") > fm.inf <- lm(dp ~ 0 + fac.inf/dp1, data = uk) > summary(fm.inf) Call: lm(formula = dp ~ 0 + fac.inf/dp1, data = uk) Residuals: Min 1Q Median 3Q Max -0.046987 -0.014861 -0.003593 0.006286 0.058081 Coefficients: Estimate Std. Error t value Pr(>|t|) fac.infseg1 0.024501 0.011176 2.192 0.0353 * fac.infseg2 -0.000775 0.017853 -0.043 0.9656 fac.infseg3 0.017603 0.015007 1.173 0.2489 fac.infseg1:dp1 0.274012 0.269892 1.015 0.3171 fac.infseg2:dp1 1.343369 0.224521 5.983 9.05e-07 *** fac.infseg3:dp1 0.683410 0.130106 5.253 8.07e-06 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.02325 on 34 degrees of freedom Multiple R-squared: 0.9237, Adjusted R-squared: 0.9103 F-statistic: 68.64 on 6 and 34 DF, p-value: < 2.2e-16 > > ## Results from Table 2 in Bai & Perron (2003): > ## coefficient estimates > coef(bp.inf, breaks = 2) (Intercept) dp1 1948 - 1967 0.0245010729 0.2740125 1968 - 1975 -0.0007750299 1.3433686 1976 - 1987 0.0176032179 0.6834098 > ## corresponding standard errors > sqrt(sapply(vcov(bp.inf, breaks = 2), diag)) 1948 - 1967 1968 - 1975 1976 - 1987 (Intercept) 0.008268814 0.01985539 0.01571339 dp1 0.199691273 0.24969992 0.13622996 > ## breakpoints and confidence intervals > confint(bp.inf, breaks = 2) Confidence intervals for breakpoints of optimal 3-segment partition: Call: confint.breakpointsfull(object = bp.inf, breaks = 2) Breakpoints at observation number: 2.5 % breakpoints 97.5 % 1 18 20 25 2 26 28 34 Corresponding to breakdates: 2.5 % breakpoints 97.5 % 1 1965 1967 1972 2 1973 1975 1981 > > ## Phillips curve equation > ## estimate breakpoints > bp.pc <- breakpoints(dw ~ dp1 + du + u1, data = uk, h = 5, breaks = 5) > ## look at RSS and BIC > plot(bp.pc) > summary(bp.pc) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = dw ~ dp1 + du + u1, h = 5, breaks = 5, data = uk) Breakpoints at observation number: m = 1 26 m = 2 20 28 m = 3 9 25 30 m = 4 11 16 25 30 m = 5 11 16 22 27 32 Corresponding to breakdates: m = 1 1973 m = 2 1967 1975 m = 3 1956 1972 1977 m = 4 1958 1963 1972 1977 m = 5 1958 1963 1969 1974 1979 Fit: m 0 1 2 3 4 5 RSS 3.409e-02 1.690e-02 1.062e-02 7.835e-03 5.183e-03 3.388e-03 BIC -1.508e+02 -1.604e+02 -1.605e+02 -1.542e+02 -1.523e+02 -1.509e+02 > > ## fit segmented model with three breaks > fac.pc <- breakfactor(bp.pc, breaks = 2, label = "seg") > fm.pc <- lm(dw ~ 0 + fac.pc/dp1 + du + u1, data = uk) > summary(fm.pc) Call: lm(formula = dw ~ 0 + fac.pc/dp1 + du + u1, data = uk) Residuals: Min 1Q Median 3Q Max -0.041392 -0.011516 0.000089 0.010036 0.044539 Coefficients: Estimate Std. Error t value Pr(>|t|) fac.pcseg1 0.06574 0.01169 5.623 3.24e-06 *** fac.pcseg2 0.06231 0.01883 3.310 0.00232 ** fac.pcseg3 0.18093 0.05388 3.358 0.00204 ** du -0.14408 0.58218 -0.247 0.80611 u1 -0.87516 0.37274 -2.348 0.02523 * fac.pcseg1:dp1 0.09373 0.24053 0.390 0.69936 fac.pcseg2:dp1 1.23143 0.20498 6.008 1.06e-06 *** fac.pcseg3:dp1 0.01618 0.25667 0.063 0.95013 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.02021 on 32 degrees of freedom Multiple R-squared: 0.9655, Adjusted R-squared: 0.9569 F-statistic: 112 on 8 and 32 DF, p-value: < 2.2e-16 > > ## Results from Table 3 in Bai & Perron (2003): > ## coefficient estimates > coef(fm.pc) fac.pcseg1 fac.pcseg2 fac.pcseg3 du u1 0.06574278 0.06231337 0.18092502 -0.14408073 -0.87515585 fac.pcseg1:dp1 fac.pcseg2:dp1 fac.pcseg3:dp1 0.09372759 1.23143008 0.01617826 > ## corresponding standard errors > sqrt(diag(vcov(fm.pc))) fac.pcseg1 fac.pcseg2 fac.pcseg3 du u1 0.01169149 0.01882668 0.05388166 0.58217571 0.37273955 fac.pcseg1:dp1 fac.pcseg2:dp1 fac.pcseg3:dp1 0.24052539 0.20497973 0.25666903 > ## breakpoints and confidence intervals > confint(bp.pc, breaks = 2, het.err = FALSE) Confidence intervals for breakpoints of optimal 3-segment partition: Call: confint.breakpointsfull(object = bp.pc, breaks = 2, het.err = FALSE) Breakpoints at observation number: 2.5 % breakpoints 97.5 % 1 19 20 21 2 27 28 29 Corresponding to breakdates: 2.5 % breakpoints 97.5 % 1 1966 1967 1968 2 1974 1975 1976 > > > > cleanEx() > nameEx("RealInt") > ### * RealInt > > flush(stderr()); flush(stdout()) > > ### Name: RealInt > ### Title: US Ex-post Real Interest Rate > ### Aliases: RealInt > ### Keywords: datasets > > ### ** Examples > > ## load and plot data > data("RealInt") > plot(RealInt) > > ## estimate breakpoints > bp.ri <- breakpoints(RealInt ~ 1, h = 15) > plot(bp.ri) > summary(bp.ri) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = RealInt ~ 1, h = 15) Breakpoints at observation number: m = 1 79 m = 2 47 79 m = 3 24 47 79 m = 4 24 47 64 79 m = 5 16 31 47 64 79 Corresponding to breakdates: m = 1 1980(3) m = 2 1972(3) 1980(3) m = 3 1966(4) 1972(3) 1980(3) m = 4 1966(4) 1972(3) 1976(4) 1980(3) m = 5 1964(4) 1968(3) 1972(3) 1976(4) 1980(3) Fit: m 0 1 2 3 4 5 RSS 1214.9 645.0 456.0 445.2 444.9 449.6 BIC 555.7 499.8 473.3 480.1 489.3 499.7 > > ## fit segmented model with three breaks > fac.ri <- breakfactor(bp.ri, breaks = 3, label = "seg") > fm.ri <- lm(RealInt ~ 0 + fac.ri) > summary(fm.ri) Call: lm(formula = RealInt ~ 0 + fac.ri) Residuals: Min 1Q Median 3Q Max -4.5157 -1.3674 -0.0578 1.3248 6.0990 Coefficients: Estimate Std. Error t value Pr(>|t|) fac.riseg1 1.8236 0.4329 4.213 5.57e-05 *** fac.riseg2 0.8661 0.4422 1.959 0.053 . fac.riseg3 -1.7961 0.3749 -4.791 5.83e-06 *** fac.riseg4 5.6429 0.4329 13.036 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 2.121 on 99 degrees of freedom Multiple R-squared: 0.6842, Adjusted R-squared: 0.6714 F-statistic: 53.62 on 4 and 99 DF, p-value: < 2.2e-16 > > ## setup kernel HAC estimator > vcov.ri <- function(x, ...) kernHAC(x, kernel = "Quadratic Spectral", + prewhite = 1, approx = "AR(1)", ...) > > ## Results from Table 1 in Bai & Perron (2003): > ## coefficient estimates > coef(bp.ri, breaks = 3) (Intercept) 1961(1) - 1966(4) 1.8236167 1967(1) - 1972(3) 0.8660848 1972(4) - 1980(3) -1.7961384 1980(4) - 1986(3) 5.6428896 > ## corresponding standard errors > sapply(vcov(bp.ri, breaks = 3, vcov = vcov.ri), sqrt) 1961(1) - 1966(4) 1967(1) - 1972(3) 1972(4) - 1980(3) 1980(4) - 1986(3) 0.1857577 0.1499849 0.5026749 0.5887460 > ## breakpoints and confidence intervals > confint(bp.ri, breaks = 3, vcov = vcov.ri) Confidence intervals for breakpoints of optimal 4-segment partition: Call: confint.breakpointsfull(object = bp.ri, breaks = 3, vcov. = vcov.ri) Breakpoints at observation number: 2.5 % breakpoints 97.5 % 1 18 24 35 2 33 47 48 3 77 79 81 Corresponding to breakdates: Warning: Overlapping confidence intervals 2.5 % breakpoints 97.5 % 1 1965(2) 1966(4) 1969(3) 2 1969(1) 1972(3) 1972(4) 3 1980(1) 1980(3) 1981(1) > > ## Visualization > plot(RealInt) > lines(as.vector(time(RealInt)), fitted(fm.ri), col = 4) > lines(confint(bp.ri, breaks = 3, vcov = vcov.ri)) Warning: Overlapping confidence intervals > > > > cleanEx() > nameEx("SP2001") > ### * SP2001 > > flush(stderr()); flush(stdout()) > > ### Name: SP2001 > ### Title: S&P 500 Stock Prices > ### Aliases: SP2001 > ### Keywords: datasets > > ### ** Examples > > ## load and transform data > ## (DAL: Delta Air Lines, LU: Lucent Technologies) > data("SP2001") > stock.prices <- SP2001[, c("DAL", "LU")] > stock.returns <- diff(log(stock.prices)) > > ## price and return series > plot(stock.prices, ylab = c("Delta Air Lines", "Lucent Technologies"), main = "") > plot(stock.returns, ylab = c("Delta Air Lines", "Lucent Technologies"), main = "") > > ## monitoring of DAL series > myborder <- function(k) 1.939*k/28 > x <- as.vector(stock.returns[, "DAL"][1:28]) > dal.cusum <- mefp(x ~ 1, type = "OLS-CUSUM", border = myborder) > dal.mosum <- mefp(x ~ 1, type = "OLS-MOSUM", h = 0.5, period = 4) > x <- as.vector(stock.returns[, "DAL"]) > dal.cusum <- monitor(dal.cusum) Break detected at observation # 29 > dal.mosum <- monitor(dal.mosum) Break detected at observation # 29 > > ## monitoring of LU series > x <- as.vector(stock.returns[, "LU"][1:28]) > lu.cusum <- mefp(x ~ 1, type = "OLS-CUSUM", border = myborder) > lu.mosum <- mefp(x ~ 1, type = "OLS-MOSUM", h = 0.5, period = 4) > x <- as.vector(stock.returns[, "LU"]) > lu.cusum <- monitor(lu.cusum) > lu.mosum <- monitor(lu.mosum) > > ## pretty plotting > ## (needs some work because lm() does not keep "zoo" attributes) > cus.bound <- zoo(c(rep(NA, 27), myborder(28:102)), index(stock.returns)) > mos.bound <- as.vector(boundary(dal.mosum)) > mos.bound <- zoo(c(rep(NA, 27), mos.bound[1], mos.bound), index(stock.returns)) > > ## Lucent Technologies: CUSUM test > plot(zoo(c(lu.cusum$efpprocess, lu.cusum$process), index(stock.prices)), + ylim = c(-1, 1) * coredata(cus.bound)[102], xlab = "Time", ylab = "empirical fluctuation process") > abline(0, 0) > abline(v = as.Date("2001-09-10"), lty = 2) > lines(cus.bound, col = 2) > lines(-cus.bound, col = 2) > > ## Lucent Technologies: MOSUM test > plot(zoo(c(lu.mosum$efpprocess, lu.mosum$process), index(stock.prices)[-(1:14)]), + ylim = c(-1, 1) * coredata(mos.bound)[102], xlab = "Time", ylab = "empirical fluctuation process") > abline(0, 0) > abline(v = as.Date("2001-09-10"), lty = 2) > lines(mos.bound, col = 2) > lines(-mos.bound, col = 2) > > ## Delta Air Lines: CUSUM test > plot(zoo(c(dal.cusum$efpprocess, dal.cusum$process), index(stock.prices)), + ylim = c(-1, 1) * coredata(cus.bound)[102], xlab = "Time", ylab = "empirical fluctuation process") > abline(0, 0) > abline(v = as.Date("2001-09-10"), lty = 2) > lines(cus.bound, col = 2) > lines(-cus.bound, col = 2) > > ## Delta Air Lines: MOSUM test > plot(zoo(c(dal.mosum$efpprocess, dal.mosum$process), index(stock.prices)[-(1:14)]), + ylim = range(dal.mosum$process), xlab = "Time", ylab = "empirical fluctuation process") > abline(0, 0) > abline(v = as.Date("2001-09-10"), lty = 2) > lines(mos.bound, col = 2) > lines(-mos.bound, col = 2) > > > > cleanEx() > nameEx("USIncExp") > ### * USIncExp > > flush(stderr()); flush(stdout()) > > ### Name: USIncExp > ### Title: Income and Expenditures in the US > ### Aliases: USIncExp > ### Keywords: datasets > > ### ** Examples > > ## These example are presented in the vignette distributed with this > ## package, the code was generated by Stangle("strucchange-intro.Rnw") > > ################################################### > ### chunk number 1: data > ################################################### > library("strucchange") > data("USIncExp") > plot(USIncExp, plot.type = "single", col = 1:2, ylab = "billion US$") > legend(1960, max(USIncExp), c("income", "expenditures"), + lty = c(1,1), col = 1:2, bty = "n") > > > ################################################### > ### chunk number 2: subset > ################################################### > library("strucchange") > data("USIncExp") > USIncExp2 <- window(USIncExp, start = c(1985,12)) > > > ################################################### > ### chunk number 3: ecm-setup > ################################################### > coint.res <- residuals(lm(expenditure ~ income, data = USIncExp2)) > coint.res <- lag(ts(coint.res, start = c(1985,12), freq = 12), k = -1) > USIncExp2 <- cbind(USIncExp2, diff(USIncExp2), coint.res) > USIncExp2 <- window(USIncExp2, start = c(1986,1), end = c(2001,2)) > colnames(USIncExp2) <- c("income", "expenditure", "diff.income", + "diff.expenditure", "coint.res") > ecm.model <- diff.expenditure ~ coint.res + diff.income > > > ################################################### > ### chunk number 4: ts-used > ################################################### > plot(USIncExp2[,3:5], main = "") > > > ################################################### > ### chunk number 5: efp > ################################################### > ocus <- efp(ecm.model, type="OLS-CUSUM", data=USIncExp2) > me <- efp(ecm.model, type="ME", data=USIncExp2, h=0.2) > > > ################################################### > ### chunk number 6: efp-boundary > ################################################### > bound.ocus <- boundary(ocus, alpha=0.05) > > > ################################################### > ### chunk number 7: OLS-CUSUM > ################################################### > plot(ocus) > > > ################################################### > ### chunk number 8: efp-boundary2 > ################################################### > plot(ocus, boundary = FALSE) > lines(bound.ocus, col = 4) > lines(-bound.ocus, col = 4) > > > ################################################### > ### chunk number 9: ME-null > ################################################### > plot(me, functional = NULL) > > > ################################################### > ### chunk number 10: efp-sctest > ################################################### > sctest(ocus) OLS-based CUSUM test data: ocus S0 = 1.5511, p-value = 0.01626 > > > ################################################### > ### chunk number 11: efp-sctest2 > ################################################### > sctest(ecm.model, type="OLS-CUSUM", data=USIncExp2) OLS-based CUSUM test data: ecm.model S0 = 1.5511, p-value = 0.01626 > > > ################################################### > ### chunk number 12: Fstats > ################################################### > fs <- Fstats(ecm.model, from = c(1990, 1), to = c(1999,6), data = USIncExp2) > > > ################################################### > ### chunk number 13: Fstats-plot > ################################################### > plot(fs) > > > ################################################### > ### chunk number 14: pval-plot > ################################################### > plot(fs, pval=TRUE) > > > ################################################### > ### chunk number 15: aveF-plot > ################################################### > plot(fs, aveF=TRUE) > > > ################################################### > ### chunk number 16: Fstats-sctest > ################################################### > sctest(fs, type="expF") expF test data: fs exp.F = 8.9955, p-value = 0.001311 > > > ################################################### > ### chunk number 17: Fstats-sctest2 > ################################################### > sctest(ecm.model, type = "expF", from = 49, to = 162, data = USIncExp2) expF test data: ecm.model exp.F = 8.9955, p-value = 0.001311 > > > ################################################### > ### chunk number 18: mefp > ################################################### > USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1989,12)) > me.mefp <- mefp(ecm.model, type = "ME", data = USIncExp3, alpha = 0.05) > > > ################################################### > ### chunk number 19: monitor1 > ################################################### > USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1990,12)) > me.mefp <- monitor(me.mefp) > > > ################################################### > ### chunk number 20: monitor2 > ################################################### > USIncExp3 <- window(USIncExp2, start = c(1986, 1)) > me.mefp <- monitor(me.mefp) Break detected at observation # 72 > me.mefp Monitoring with ME test (moving estimates test) Initial call: mefp.formula(formula = ecm.model, type = "ME", data = USIncExp3, alpha = 0.05) Last call: monitor(obj = me.mefp) Significance level : 0.05 Critical value : 3.109524 History size : 48 Last point evaluated : 182 Structural break at : 72 Parameter estimate on history : (Intercept) coint.res diff.income 18.9299679 -0.3893141 0.3156597 Last parameter estimate : (Intercept) coint.res diff.income 27.94869106 0.00983451 0.13314662 > > > ################################################### > ### chunk number 21: monitor-plot > ################################################### > plot(me.mefp) > > > ################################################### > ### chunk number 22: mefp2 > ################################################### > USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1989,12)) > me.efp <- efp(ecm.model, type = "ME", data = USIncExp3, h = 0.5) > me.mefp <- mefp(me.efp, alpha=0.05) > > > ################################################### > ### chunk number 23: monitor3 > ################################################### > USIncExp3 <- window(USIncExp2, start = c(1986, 1)) > me.mefp <- monitor(me.mefp) Break detected at observation # 70 > > > ################################################### > ### chunk number 24: monitor-plot2 > ################################################### > plot(me.mefp) > > > > > cleanEx() > nameEx("boundary.Fstats") > ### * boundary.Fstats > > flush(stderr()); flush(stdout()) > > ### Name: boundary.Fstats > ### Title: Boundary for F Statistics > ### Aliases: boundary.Fstats > ### Keywords: regression > > ### ** Examples > > ## Load dataset "nhtemp" with average yearly temperatures in New Haven > data("nhtemp") > ## plot the data > plot(nhtemp) > > ## test the model null hypothesis that the average temperature remains > ## constant over the years for potential break points between 1941 > ## (corresponds to from = 0.5) and 1962 (corresponds to to = 0.85) > ## compute F statistics > fs <- Fstats(nhtemp ~ 1, from = 0.5, to = 0.85) > ## plot the p values without boundary > plot(fs, pval = TRUE, alpha = 0.01) > ## add the boundary in another colour > lines(boundary(fs, pval = TRUE, alpha = 0.01), col = 2) > > > > cleanEx() > nameEx("boundary.efp") > ### * boundary.efp > > flush(stderr()); flush(stdout()) > > ### Name: boundary.efp > ### Title: Boundary for Empirical Fluctuation Processes > ### Aliases: boundary.efp > ### Keywords: regression > > ### ** Examples > > ## Load dataset "nhtemp" with average yearly temperatures in New Haven > data("nhtemp") > ## plot the data > plot(nhtemp) > > ## test the model null hypothesis that the average temperature remains constant > ## over the years > ## compute OLS-CUSUM fluctuation process > temp.cus <- efp(nhtemp ~ 1, type = "OLS-CUSUM") > ## plot the process without boundaries > plot(temp.cus, alpha = 0.01, boundary = FALSE) > ## add the boundaries in another colour > bound <- boundary(temp.cus, alpha = 0.01) > lines(bound, col=4) > lines(-bound, col=4) > > > > cleanEx() > nameEx("boundary.mefp") > ### * boundary.mefp > > flush(stderr()); flush(stdout()) > > ### Name: boundary.mefp > ### Title: Boundary Function for Monitoring of Structural Changes > ### Aliases: boundary.mefp > ### Keywords: regression > > ### ** Examples > > df1 <- data.frame(y=rnorm(300)) > df1[150:300,"y"] <- df1[150:300,"y"]+1 > me1 <- mefp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1, + alpha=0.05) > me2 <- monitor(me1, data=df1) Break detected at observation # 183 > > plot(me2, boundary=FALSE) > lines(boundary(me2), col="green", lty="44") > > > > cleanEx() > nameEx("breakdates") > ### * breakdates > > flush(stderr()); flush(stdout()) > > ### Name: breakdates > ### Title: Breakdates Corresponding to Breakpoints > ### Aliases: breakdates breakdates.breakpoints > ### breakdates.confint.breakpoints > ### Keywords: regression > > ### ** Examples > > ## Nile data with one breakpoint: the annual flows drop in 1898 > ## because the first Ashwan dam was built > data("Nile") > plot(Nile) > > bp.nile <- breakpoints(Nile ~ 1) > summary(bp.nile) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = Nile ~ 1) Breakpoints at observation number: m = 1 28 m = 2 28 83 m = 3 28 68 83 m = 4 28 45 68 83 m = 5 15 30 45 68 83 Corresponding to breakdates: m = 1 1898 m = 2 1898 1953 m = 3 1898 1938 1953 m = 4 1898 1915 1938 1953 m = 5 1885 1900 1915 1938 1953 Fit: m 0 1 2 3 4 5 RSS 2835157 1597457 1552924 1538097 1507888 1659994 BIC 1318 1270 1276 1285 1292 1311 > plot(bp.nile) > > ## compute breakdates corresponding to the > ## breakpoints of minimum BIC segmentation > breakdates(bp.nile) [1] 1898 > > ## confidence intervals > ci.nile <- confint(bp.nile) > breakdates(ci.nile) 2.5 % breakpoints 97.5 % 1 1895 1898 1902 > ci.nile Confidence intervals for breakpoints of optimal 2-segment partition: Call: confint.breakpointsfull(object = bp.nile) Breakpoints at observation number: 2.5 % breakpoints 97.5 % 1 25 28 32 Corresponding to breakdates: 2.5 % breakpoints 97.5 % 1 1895 1898 1902 > > plot(Nile) > lines(ci.nile) > > > > cleanEx() > nameEx("breakfactor") > ### * breakfactor > > flush(stderr()); flush(stdout()) > > ### Name: breakfactor > ### Title: Factor Coding of Segmentations > ### Aliases: breakfactor > ### Keywords: regression > > ### ** Examples > > ## Nile data with one breakpoint: the annual flows drop in 1898 > ## because the first Ashwan dam was built > data("Nile") > plot(Nile) > > ## compute breakpoints > bp.nile <- breakpoints(Nile ~ 1) > > ## fit and visualize segmented and unsegmented model > fm0 <- lm(Nile ~ 1) > fm1 <- lm(Nile ~ breakfactor(bp.nile, breaks = 1)) > > lines(fitted(fm0), col = 3) > lines(fitted(fm1), col = 4) > lines(bp.nile, breaks = 1) > > > > cleanEx() > nameEx("breakpoints") > ### * breakpoints > > flush(stderr()); flush(stdout()) > > ### Name: breakpoints > ### Title: Dating Breaks > ### Aliases: breakpoints breakpoints.formula breakpoints.breakpointsfull > ### breakpoints.Fstats summary.breakpoints summary.breakpointsfull > ### plot.breakpointsfull plot.summary.breakpointsfull print.breakpoints > ### print.summary.breakpointsfull lines.breakpoints coef.breakpointsfull > ### vcov.breakpointsfull fitted.breakpointsfull residuals.breakpointsfull > ### df.residual.breakpointsfull > ### Keywords: regression > > ### ** Examples > > ## Nile data with one breakpoint: the annual flows drop in 1898 > ## because the first Ashwan dam was built > data("Nile") > plot(Nile) > > ## F statistics indicate one breakpoint > fs.nile <- Fstats(Nile ~ 1) > plot(fs.nile) > breakpoints(fs.nile) Optimal 2-segment partition: Call: breakpoints.Fstats(obj = fs.nile) Breakpoints at observation number: 28 Corresponding to breakdates: 1898 > lines(breakpoints(fs.nile)) > > ## or > bp.nile <- breakpoints(Nile ~ 1) > summary(bp.nile) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = Nile ~ 1) Breakpoints at observation number: m = 1 28 m = 2 28 83 m = 3 28 68 83 m = 4 28 45 68 83 m = 5 15 30 45 68 83 Corresponding to breakdates: m = 1 1898 m = 2 1898 1953 m = 3 1898 1938 1953 m = 4 1898 1915 1938 1953 m = 5 1885 1900 1915 1938 1953 Fit: m 0 1 2 3 4 5 RSS 2835157 1597457 1552924 1538097 1507888 1659994 BIC 1318 1270 1276 1285 1292 1311 > > ## the BIC also chooses one breakpoint > plot(bp.nile) > breakpoints(bp.nile) Optimal 2-segment partition: Call: breakpoints.breakpointsfull(obj = bp.nile) Breakpoints at observation number: 28 Corresponding to breakdates: 1898 > > ## fit null hypothesis model and model with 1 breakpoint > fm0 <- lm(Nile ~ 1) > fm1 <- lm(Nile ~ breakfactor(bp.nile, breaks = 1)) > plot(Nile) > lines(ts(fitted(fm0), start = 1871), col = 3) > lines(ts(fitted(fm1), start = 1871), col = 4) > lines(bp.nile) > > ## confidence interval > ci.nile <- confint(bp.nile) > ci.nile Confidence intervals for breakpoints of optimal 2-segment partition: Call: confint.breakpointsfull(object = bp.nile) Breakpoints at observation number: 2.5 % breakpoints 97.5 % 1 25 28 32 Corresponding to breakdates: 2.5 % breakpoints 97.5 % 1 1895 1898 1902 > lines(ci.nile) > > > ## UK Seatbelt data: a SARIMA(1,0,0)(1,0,0)_12 model > ## (fitted by OLS) is used and reveals (at least) two > ## breakpoints - one in 1973 associated with the oil crisis and > ## one in 1983 due to the introduction of compulsory > ## wearing of seatbelts in the UK. > data("UKDriverDeaths") > seatbelt <- log10(UKDriverDeaths) > seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) > colnames(seatbelt) <- c("y", "ylag1", "ylag12") > seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) > plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) > > ## testing > re.seat <- efp(y ~ ylag1 + ylag12, data = seatbelt, type = "RE") > plot(re.seat) > > ## dating > bp.seat <- breakpoints(y ~ ylag1 + ylag12, data = seatbelt, h = 0.1) > summary(bp.seat) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = y ~ ylag1 + ylag12, h = 0.1, data = seatbelt) Breakpoints at observation number: m = 1 46 m = 2 46 157 m = 3 46 70 157 m = 4 46 70 108 157 m = 5 46 70 120 141 160 m = 6 46 70 89 108 141 160 m = 7 46 70 89 107 125 144 162 m = 8 18 46 70 89 107 125 144 162 Corresponding to breakdates: m = 1 1973(10) m = 2 1973(10) 1983(1) m = 3 1973(10) 1975(10) 1983(1) m = 4 1973(10) 1975(10) 1978(12) 1983(1) m = 5 1973(10) 1975(10) 1979(12) 1981(9) 1983(4) m = 6 1973(10) 1975(10) 1977(5) 1978(12) 1981(9) 1983(4) m = 7 1973(10) 1975(10) 1977(5) 1978(11) 1980(5) 1981(12) 1983(6) m = 8 1971(6) 1973(10) 1975(10) 1977(5) 1978(11) 1980(5) 1981(12) 1983(6) Fit: m 0 1 2 3 4 5 6 RSS 0.3297 0.2967 0.2676 0.2438 0.2395 0.2317 0.2258 BIC -602.8611 -601.0539 -598.9042 -594.8774 -577.2905 -562.4880 -546.3632 m 7 8 RSS 0.2244 0.2231 BIC -526.7295 -506.9886 > lines(bp.seat, breaks = 2) > > ## minimum BIC partition > plot(bp.seat) > breakpoints(bp.seat) Optimal 1-segment partition: Call: breakpoints.breakpointsfull(obj = bp.seat) Breakpoints at observation number: NA Corresponding to breakdates: NA > ## the BIC would choose 0 breakpoints although the RE and supF test > ## clearly reject the hypothesis of structural stability. Bai & > ## Perron (2003) report that the BIC has problems in dynamic regressions. > ## due to the shape of the RE process of the F statistics choose two > ## breakpoints and fit corresponding models > bp.seat2 <- breakpoints(bp.seat, breaks = 2) > fm0 <- lm(y ~ ylag1 + ylag12, data = seatbelt) > fm1 <- lm(y ~ breakfactor(bp.seat2)/(ylag1 + ylag12) - 1, data = seatbelt) > > ## plot > plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) > time.seat <- as.vector(time(seatbelt)) > lines(time.seat, fitted(fm0), col = 3) > lines(time.seat, fitted(fm1), col = 4) > lines(bp.seat2) > > ## confidence intervals > ci.seat2 <- confint(bp.seat, breaks = 2) > ci.seat2 Confidence intervals for breakpoints of optimal 3-segment partition: Call: confint.breakpointsfull(object = bp.seat, breaks = 2) Breakpoints at observation number: 2.5 % breakpoints 97.5 % 1 33 46 56 2 144 157 171 Corresponding to breakdates: 2.5 % breakpoints 97.5 % 1 1972(9) 1973(10) 1974(8) 2 1981(12) 1983(1) 1984(3) > lines(ci.seat2) > > > > cleanEx() > nameEx("catL2BB") > ### * catL2BB > > flush(stderr()); flush(stdout()) > > ### Name: catL2BB > ### Title: Generators for efpFunctionals along Categorical Variables > ### Aliases: catL2BB ordL2BB ordwmax > ### Keywords: regression > > ### ** Examples > > ## artificial data > set.seed(1) > d <- data.frame( + x = runif(200, -1, 1), + z = factor(rep(1:4, each = 50)), + err = rnorm(200) + ) > d$y <- rep(c(0.5, -0.5), c(150, 50)) * d$x + d$err > > ## empirical fluctuation process > scus <- gefp(y ~ x, data = d, fit = lm, order.by = ~ z) > > ## chi-squared-type test (unordered LM-type test) > LMuo <- catL2BB(scus) > plot(scus, functional = LMuo) > sctest(scus, functional = LMuo) M-fluctuation test data: scus f(efp) = 12.375, p-value = 0.05411 > > ## ordinal maxLM test (with few replications only to save time) > maxLMo <- ordL2BB(scus, nrep = 10000) > plot(scus, functional = maxLMo) > sctest(scus, functional = maxLMo) M-fluctuation test data: scus f(efp) = 9.0937, p-value = 0.03173 > > ## ordinal weighted double maximum test > WDM <- ordwmax(scus) > plot(scus, functional = WDM) > sctest(scus, functional = WDM) M-fluctuation test data: scus f(efp) = 3.001, p-value = 0.01498 > > > > cleanEx() > nameEx("confint.breakpointsfull") > ### * confint.breakpointsfull > > flush(stderr()); flush(stdout()) > > ### Name: confint.breakpointsfull > ### Title: Confidence Intervals for Breakpoints > ### Aliases: confint.breakpointsfull lines.confint.breakpoints > ### print.confint.breakpoints > ### Keywords: regression > > ### ** Examples > > ## Nile data with one breakpoint: the annual flows drop in 1898 > ## because the first Ashwan dam was built > data("Nile") > plot(Nile) > > ## dating breaks > bp.nile <- breakpoints(Nile ~ 1) > ci.nile <- confint(bp.nile, breaks = 1) > lines(ci.nile) > > > > cleanEx() > nameEx("durab") > ### * durab > > flush(stderr()); flush(stdout()) > > ### Name: durab > ### Title: US Labor Productivity > ### Aliases: durab > ### Keywords: datasets > > ### ** Examples > > data("durab") > ## use AR(1) model as in Hansen (2001) and Zeileis et al. (2005) > durab.model <- y ~ lag > > ## historical tests > ## OLS-based CUSUM process > ols <- efp(durab.model, data = durab, type = "OLS-CUSUM") > plot(ols) > ## F statistics > fs <- Fstats(durab.model, data = durab, from = 0.1) > plot(fs) > > ## F statistics based on heteroskadisticy-consistent covariance matrix > fsHC <- Fstats(durab.model, data = durab, from = 0.1, + vcov = function(x, ...) vcovHC(x, type = "HC", ...)) > plot(fsHC) > > ## monitoring > Durab <- window(durab, start=1964, end = c(1979, 12)) > ols.efp <- efp(durab.model, type = "OLS-CUSUM", data = Durab) > newborder <- function(k) 1.723 * k/192 > ols.mefp <- mefp(ols.efp, period=2) > ols.mefp2 <- mefp(ols.efp, border=newborder) > Durab <- window(durab, start=1964) > ols.mon <- monitor(ols.mefp) Break detected at observation # 437 > ols.mon2 <- monitor(ols.mefp2) Break detected at observation # 416 > plot(ols.mon) > lines(boundary(ols.mon2), col = 2) > ## Note: critical value for linear boundary taken from Table III > ## in Zeileis et al. 2005: (1.568 + 1.896)/2 = 1.732 is a linear > ## interpolation between the values for T = 2 and T = 3 at > ## alpha = 0.05. A typo switched 1.732 to 1.723. > > ## dating > bp <- breakpoints(durab.model, data = durab) > summary(bp) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = durab.model, data = durab) Breakpoints at observation number: m = 1 418 m = 2 221 530 m = 3 114 225 530 m = 4 114 221 418 531 m = 5 114 221 319 418 531 Corresponding to breakdates: m = 1 1981(12) m = 2 1965(7) 1991(4) m = 3 1956(8) 1965(11) 1991(4) m = 4 1956(8) 1965(7) 1981(12) 1991(5) m = 5 1956(8) 1965(7) 1973(9) 1981(12) 1991(5) Fit: m 0 1 2 3 4 5 RSS 5.586e-02 5.431e-02 5.325e-02 5.220e-02 5.171e-02 5.157e-02 BIC -4.221e+03 -4.220e+03 -4.213e+03 -4.207e+03 -4.194e+03 -4.176e+03 > plot(summary(bp)) > > plot(ols) > lines(breakpoints(bp, breaks = 1), col = 3) > lines(breakpoints(bp, breaks = 2), col = 4) > plot(fs) > lines(breakpoints(bp, breaks = 1), col = 3) > lines(breakpoints(bp, breaks = 2), col = 4) > > > > cleanEx() > nameEx("efp") > ### * efp > > flush(stderr()); flush(stdout()) > > ### Name: efp > ### Title: Empirical Fluctuation Processes > ### Aliases: efp print.efp > ### Keywords: regression > > ### ** Examples > > ## Nile data with one breakpoint: the annual flows drop in 1898 > ## because the first Ashwan dam was built > data("Nile") > plot(Nile) > > ## test the null hypothesis that the annual flow remains constant > ## over the years > ## compute OLS-based CUSUM process and plot > ## with standard and alternative boundaries > ocus.nile <- efp(Nile ~ 1, type = "OLS-CUSUM") > plot(ocus.nile) > plot(ocus.nile, alpha = 0.01, alt.boundary = TRUE) > ## calculate corresponding test statistic > sctest(ocus.nile) OLS-based CUSUM test data: ocus.nile S0 = 2.9518, p-value = 5.409e-08 > > ## UK Seatbelt data: a SARIMA(1,0,0)(1,0,0)_12 model > ## (fitted by OLS) is used and reveals (at least) two > ## breakpoints - one in 1973 associated with the oil crisis and > ## one in 1983 due to the introduction of compulsory > ## wearing of seatbelts in the UK. > data("UKDriverDeaths") > seatbelt <- log10(UKDriverDeaths) > seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) > colnames(seatbelt) <- c("y", "ylag1", "ylag12") > seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) > plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) > > ## use RE process > re.seat <- efp(y ~ ylag1 + ylag12, data = seatbelt, type = "RE") > plot(re.seat) > plot(re.seat, functional = NULL) > sctest(re.seat) RE test (recursive estimates test) data: re.seat RE = 1.6311, p-value = 0.02904 > > > > cleanEx() > nameEx("efpFunctional") > ### * efpFunctional > > flush(stderr()); flush(stdout()) > > ### Name: efpFunctional > ### Title: Functionals for Fluctuation Processes > ### Aliases: efpFunctional simulateBMDist maxBM maxBB maxBMI maxBBI maxL2BB > ### meanL2BB rangeBM rangeBB rangeBMI rangeBBI > ### Keywords: regression > > ### ** Examples > > > data("BostonHomicide") > gcus <- gefp(homicides ~ 1, family = poisson, vcov = kernHAC, + data = BostonHomicide) > plot(gcus, functional = meanL2BB) > gcus Generalized Empirical M-Fluctuation Process Call: gefp(homicides ~ 1, family = poisson, vcov = kernHAC, data = BostonHomicide) Fitted model: Call: fit(formula = ..1, family = ..2, data = data) Coefficients: (Intercept) 1.017 Degrees of Freedom: 76 Total (i.e. Null); 76 Residual Null Deviance: 115.6 Residual Deviance: 115.6 AIC: 316.5 > sctest(gcus, functional = meanL2BB) M-fluctuation test data: gcus f(efp) = 0.93375, p-value = 0.005 > > y <- rnorm(1000) > x1 <- runif(1000) > x2 <- runif(1000) > > ## supWald statistic computed by Fstats() > fs <- Fstats(y ~ x1 + x2, from = 0.1) > plot(fs) > sctest(fs) supF test data: fs sup.F = 12.252, p-value = 0.1161 > > ## compare with supLM statistic > scus <- gefp(y ~ x1 + x2, fit = lm) > plot(scus, functional = supLM(0.1)) > sctest(scus, functional = supLM(0.1)) M-fluctuation test data: scus f(efp) = 12.258, p-value = 0.1158 > > ## seatbelt data > data("UKDriverDeaths") > seatbelt <- log10(UKDriverDeaths) > seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) > colnames(seatbelt) <- c("y", "ylag1", "ylag12") > seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) > > scus.seat <- gefp(y ~ ylag1 + ylag12, data = seatbelt) > > ## double maximum test > plot(scus.seat) > ## range test > plot(scus.seat, functional = rangeBB) > ## Cramer-von Mises statistic (Nyblom-Hansen test) > plot(scus.seat, functional = meanL2BB) > ## supLM test > plot(scus.seat, functional = supLM(0.1)) > > > > cleanEx() > nameEx("gefp") > ### * gefp > > flush(stderr()); flush(stdout()) > > ### Name: gefp > ### Title: Generalized Empirical M-Fluctuation Processes > ### Aliases: gefp print.gefp sctest.gefp plot.gefp time.gefp print.gefp > ### Keywords: regression > > ### ** Examples > > data("BostonHomicide") > gcus <- gefp(homicides ~ 1, family = poisson, vcov = kernHAC, + data = BostonHomicide) > plot(gcus, aggregate = FALSE) > gcus Generalized Empirical M-Fluctuation Process Call: gefp(homicides ~ 1, family = poisson, vcov = kernHAC, data = BostonHomicide) Fitted model: Call: fit(formula = ..1, family = ..2, data = data) Coefficients: (Intercept) 1.017 Degrees of Freedom: 76 Total (i.e. Null); 76 Residual Null Deviance: 115.6 Residual Deviance: 115.6 AIC: 316.5 > sctest(gcus) M-fluctuation test data: gcus f(efp) = 1.669, p-value = 0.007613 > > > > cleanEx() > nameEx("logLik.breakpoints") > ### * logLik.breakpoints > > flush(stderr()); flush(stdout()) > > ### Name: logLik.breakpoints > ### Title: Log Likelihood and Information Criteria for Breakpoints > ### Aliases: logLik.breakpoints logLik.breakpointsfull AIC.breakpointsfull > ### Keywords: regression > > ### ** Examples > > ## Nile data with one breakpoint: the annual flows drop in 1898 > ## because the first Ashwan dam was built > data("Nile") > plot(Nile) > > bp.nile <- breakpoints(Nile ~ 1) > summary(bp.nile) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = Nile ~ 1) Breakpoints at observation number: m = 1 28 m = 2 28 83 m = 3 28 68 83 m = 4 28 45 68 83 m = 5 15 30 45 68 83 Corresponding to breakdates: m = 1 1898 m = 2 1898 1953 m = 3 1898 1938 1953 m = 4 1898 1915 1938 1953 m = 5 1885 1900 1915 1938 1953 Fit: m 0 1 2 3 4 5 RSS 2835157 1597457 1552924 1538097 1507888 1659994 BIC 1318 1270 1276 1285 1292 1311 > plot(bp.nile) > > ## BIC of partitions with0 to 5 breakpoints > plot(0:5, AIC(bp.nile, k = log(bp.nile$nobs)), type = "b") > ## AIC > plot(0:5, AIC(bp.nile), type = "b") > > ## BIC, AIC, log likelihood of a single partition > bp.nile1 <- breakpoints(bp.nile, breaks = 1) > AIC(bp.nile1, k = log(bp.nile1$nobs)) [1] 1270.084 > AIC(bp.nile1) [1] 1259.663 > logLik(bp.nile1) 'log Lik.' -625.8315 (df=4) > > > > cleanEx() > nameEx("mefp") > ### * mefp > > flush(stderr()); flush(stdout()) > > ### Name: mefp > ### Title: Monitoring of Empirical Fluctuation Processes > ### Aliases: mefp mefp.formula mefp.efp print.mefp monitor > ### Keywords: regression > > ### ** Examples > > df1 <- data.frame(y=rnorm(300)) > df1[150:300,"y"] <- df1[150:300,"y"]+1 > > ## use the first 50 observations as history period > e1 <- efp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1) > me1 <- mefp(e1, alpha=0.05) > > ## the same in one function call > me1 <- mefp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1, + alpha=0.05) > > ## monitor the 50 next observations > me2 <- monitor(me1, data=df1[1:100,,drop=FALSE]) > plot(me2) > > # and now monitor on all data > me3 <- monitor(me2, data=df1) Break detected at observation # 183 > plot(me3) > > > ## Load dataset "USIncExp" with income and expenditure in the US > ## and choose a suitable subset for the history period > data("USIncExp") > USIncExp3 <- window(USIncExp, start=c(1969,1), end=c(1971,12)) > ## initialize the monitoring with the formula interface > me.mefp <- mefp(expenditure~income, type="ME", rescale=TRUE, + data=USIncExp3, alpha=0.05) > > ## monitor the new observations for the year 1972 > USIncExp3 <- window(USIncExp, start=c(1969,1), end=c(1972,12)) > me.mefp <- monitor(me.mefp) > > ## monitor the new data for the years 1973-1976 > USIncExp3 <- window(USIncExp, start=c(1969,1), end=c(1976,12)) > me.mefp <- monitor(me.mefp) Break detected at observation # 58 > plot(me.mefp, functional = NULL) > > > > cleanEx() > nameEx("plot.Fstats") > ### * plot.Fstats > > flush(stderr()); flush(stdout()) > > ### Name: plot.Fstats > ### Title: Plot F Statistics > ### Aliases: plot.Fstats lines.Fstats > ### Keywords: hplot > > ### ** Examples > > ## Load dataset "nhtemp" with average yearly temperatures in New Haven > data("nhtemp") > ## plot the data > plot(nhtemp) > > ## test the model null hypothesis that the average temperature remains > ## constant over the years for potential break points between 1941 > ## (corresponds to from = 0.5) and 1962 (corresponds to to = 0.85) > ## compute F statistics > fs <- Fstats(nhtemp ~ 1, from = 0.5, to = 0.85) > ## plot the F statistics > plot(fs, alpha = 0.01) > ## and the corresponding p values > plot(fs, pval = TRUE, alpha = 0.01) > ## perform the aveF test > sctest(fs, type = "aveF") aveF test data: fs ave.F = 10.81, p-value = 2.059e-06 > > > > cleanEx() > nameEx("plot.efp") > ### * plot.efp > > flush(stderr()); flush(stdout()) > > ### Name: plot.efp > ### Title: Plot Empirical Fluctuation Process > ### Aliases: plot.efp lines.efp > ### Keywords: hplot > > ### ** Examples > > ## Load dataset "nhtemp" with average yearly temperatures in New Haven > data("nhtemp") > ## plot the data > plot(nhtemp) > > ## test the model null hypothesis that the average temperature remains > ## constant over the years > ## compute Rec-CUSUM fluctuation process > temp.cus <- efp(nhtemp ~ 1) > ## plot the process > plot(temp.cus, alpha = 0.01) > ## and calculate the test statistic > sctest(temp.cus) Recursive CUSUM test data: temp.cus S = 1.2724, p-value = 0.002902 > > ## compute (recursive estimates) fluctuation process > ## with an additional linear trend regressor > lin.trend <- 1:60 > temp.me <- efp(nhtemp ~ lin.trend, type = "fluctuation") > ## plot the bivariate process > plot(temp.me, functional = NULL) > ## and perform the corresponding test > sctest(temp.me) RE test (recursive estimates test) data: temp.me RE = 1.4938, p-value = 0.04558 > > > > cleanEx() > nameEx("plot.mefp") > ### * plot.mefp > > flush(stderr()); flush(stdout()) > > ### Name: plot.mefp > ### Title: Plot Methods for mefp Objects > ### Aliases: plot.mefp lines.mefp > ### Keywords: hplot > > ### ** Examples > > df1 <- data.frame(y=rnorm(300)) > df1[150:300,"y"] <- df1[150:300,"y"]+1 > me1 <- mefp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1, + alpha=0.05) > me2 <- monitor(me1, data=df1) Break detected at observation # 183 > > plot(me2) > > > > cleanEx() > nameEx("recresid") > ### * recresid > > flush(stderr()); flush(stdout()) > > ### Name: recresid > ### Title: Recursive Residuals > ### Aliases: recresid recresid.default recresid.formula recresid.lm > ### Keywords: regression > > ### ** Examples > > x <- rnorm(100) + rep(c(0, 2), each = 50) > rr <- recresid(x ~ 1) > plot(cumsum(rr), type = "l") > > plot(efp(x ~ 1, type = "Rec-CUSUM")) > > > > cleanEx() > nameEx("root.matrix") > ### * root.matrix > > flush(stderr()); flush(stdout()) > > ### Name: root.matrix > ### Title: Root of a Matrix > ### Aliases: root.matrix > ### Keywords: algebra > > ### ** Examples > > X <- matrix(c(1,2,2,8), ncol=2) > test <- root.matrix(X) > ## control results > X [,1] [,2] [1,] 1 2 [2,] 2 8 > test %*% test [,1] [,2] [1,] 1 2 [2,] 2 8 > > > > cleanEx() > nameEx("scPublications") > ### * scPublications > > flush(stderr()); flush(stdout()) > > ### Name: scPublications > ### Title: Structural Change Publications > ### Aliases: scPublications > ### Keywords: datasets > > ### ** Examples > > ## construct time series: > ## number of sc publications in econometrics/statistics > data("scPublications") > > ## select years from 1987 and > ## `most important' journals > pub <- scPublications > pub <- subset(pub, year > 1986) > tab1 <- table(pub$journal) > nam1 <- names(tab1)[as.vector(tab1) > 9] ## at least 10 papers > tab2 <- sapply(levels(pub$journal), function(x) min(subset(pub, journal == x)$year)) > nam2 <- names(tab2)[as.vector(tab2) < 1991] ## started at least in 1990 > nam <- nam1[nam1 %in% nam2] > pub <- subset(pub, as.character(journal) %in% nam) > pub$journal <- factor(pub$journal) > pub_data <- pub > > ## generate time series > pub <- with(pub, tapply(type, year, table)) > pub <- zoo(t(sapply(pub, cbind)), 1987:2006) > colnames(pub) <- levels(pub_data$type) > > ## visualize > plot(pub, ylim = c(0, 35)) > > > > cleanEx() > nameEx("sctest.Fstats") > ### * sctest.Fstats > > flush(stderr()); flush(stdout()) > > ### Name: sctest.Fstats > ### Title: supF-, aveF- and expF-Test > ### Aliases: sctest.Fstats > ### Keywords: htest > > ### ** Examples > > ## Load dataset "nhtemp" with average yearly temperatures in New Haven > data(nhtemp) > ## plot the data > plot(nhtemp) > > ## test the model null hypothesis that the average temperature remains > ## constant over the years for potential break points between 1941 > ## (corresponds to from = 0.5) and 1962 (corresponds to to = 0.85) > ## compute F statistics > fs <- Fstats(nhtemp ~ 1, from = 0.5, to = 0.85) > ## plot the F statistics > plot(fs, alpha = 0.01) > ## and the corresponding p values > plot(fs, pval = TRUE, alpha = 0.01) > ## perform the aveF test > sctest(fs, type = "aveF") aveF test data: fs ave.F = 10.81, p-value = 2.059e-06 > > > > cleanEx() > nameEx("sctest.default") > ### * sctest.default > > flush(stderr()); flush(stdout()) > > ### Name: sctest.default > ### Title: Structural Change Tests in Parametric Models > ### Aliases: sctest.default > ### Keywords: htest > > ### ** Examples > > ## Zeileis and Hornik (2007), Section 5.3, Figure 6 > data("Grossarl") > m <- glm(cbind(illegitimate, legitimate) ~ 1, family = binomial, data = Grossarl, + subset = time(fraction) <= 1800) > sctest(m, order.by = 1700:1800, functional = "CvM") M-fluctuation test data: m f(efp) = 3.5363, p-value = 0.005 > > > > cleanEx() > nameEx("sctest.efp") > ### * sctest.efp > > flush(stderr()); flush(stdout()) > > ### Name: sctest.efp > ### Title: Generalized Fluctuation Tests > ### Aliases: sctest.efp > ### Keywords: htest > > ### ** Examples > > ## Load dataset "nhtemp" with average yearly temperatures in New Haven > data("nhtemp") > ## plot the data > plot(nhtemp) > > ## test the model null hypothesis that the average temperature remains > ## constant over the years compute OLS-CUSUM fluctuation process > temp.cus <- efp(nhtemp ~ 1, type = "OLS-CUSUM") > ## plot the process with alternative boundaries > plot(temp.cus, alpha = 0.01, alt.boundary = TRUE) > ## and calculate the test statistic > sctest(temp.cus) OLS-based CUSUM test data: temp.cus S0 = 2.0728, p-value = 0.0003709 > > ## compute moving estimates fluctuation process > temp.me <- efp(nhtemp ~ 1, type = "ME", h = 0.2) > ## plot the process with functional = "max" > plot(temp.me) > ## and perform the corresponding test > sctest(temp.me) ME test (moving estimates test) data: temp.me ME = 1.5627, p-value = 0.01 > > > > cleanEx() > nameEx("sctest.formula") > ### * sctest.formula > > flush(stderr()); flush(stdout()) > > ### Name: sctest.formula > ### Title: Structural Change Tests in Linear Regression Models > ### Aliases: sctest.formula > ### Keywords: htest > > ### ** Examples > > ## Example 7.4 from Greene (1993), "Econometric Analysis" > ## Chow test on Longley data > data("longley") > sctest(Employed ~ Year + GNP.deflator + GNP + Armed.Forces, data = longley, + type = "Chow", point = 7) Chow test data: Employed ~ Year + GNP.deflator + GNP + Armed.Forces F = 3.9268, p-value = 0.06307 > > ## which is equivalent to segmenting the regression via > fac <- factor(c(rep(1, 7), rep(2, 9))) > fm0 <- lm(Employed ~ Year + GNP.deflator + GNP + Armed.Forces, data = longley) > fm1 <- lm(Employed ~ fac/(Year + GNP.deflator + GNP + Armed.Forces), data = longley) > anova(fm0, fm1) Analysis of Variance Table Model 1: Employed ~ Year + GNP.deflator + GNP + Armed.Forces Model 2: Employed ~ fac/(Year + GNP.deflator + GNP + Armed.Forces) Res.Df RSS Df Sum of Sq F Pr(>F) 1 11 4.8987 2 6 1.1466 5 3.7521 3.9268 0.06307 . --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 > > ## estimates from Table 7.5 in Greene (1993) > summary(fm0) Call: lm(formula = Employed ~ Year + GNP.deflator + GNP + Armed.Forces, data = longley) Residuals: Min 1Q Median 3Q Max -0.9058 -0.3427 -0.1076 0.2168 1.4377 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 1.169e+03 8.359e+02 1.399 0.18949 Year -5.765e-01 4.335e-01 -1.330 0.21049 GNP.deflator -1.977e-02 1.389e-01 -0.142 0.88940 GNP 6.439e-02 1.995e-02 3.227 0.00805 ** Armed.Forces -1.015e-04 3.086e-03 -0.033 0.97436 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.6673 on 11 degrees of freedom Multiple R-squared: 0.9735, Adjusted R-squared: 0.9639 F-statistic: 101.1 on 4 and 11 DF, p-value: 1.346e-08 > summary(fm1) Call: lm(formula = Employed ~ fac/(Year + GNP.deflator + GNP + Armed.Forces), data = longley) Residuals: Min 1Q Median 3Q Max -0.47717 -0.18950 0.02089 0.14836 0.56493 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 1.678e+03 9.390e+02 1.787 0.12413 fac2 2.098e+03 1.786e+03 1.174 0.28473 fac1:Year -8.352e-01 4.847e-01 -1.723 0.13563 fac2:Year -1.914e+00 7.913e-01 -2.419 0.05194 . fac1:GNP.deflator -1.633e-01 1.762e-01 -0.927 0.38974 fac2:GNP.deflator -4.247e-02 2.238e-01 -0.190 0.85576 fac1:GNP 9.481e-02 3.815e-02 2.485 0.04747 * fac2:GNP 1.123e-01 2.269e-02 4.951 0.00258 ** fac1:Armed.Forces -2.467e-03 6.965e-03 -0.354 0.73532 fac2:Armed.Forces -2.579e-02 1.259e-02 -2.049 0.08635 . --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.4372 on 6 degrees of freedom Multiple R-squared: 0.9938, Adjusted R-squared: 0.9845 F-statistic: 106.9 on 9 and 6 DF, p-value: 6.28e-06 > > > > cleanEx() > nameEx("solveCrossprod") > ### * solveCrossprod > > flush(stderr()); flush(stdout()) > > ### Name: solveCrossprod > ### Title: Inversion of X'X > ### Aliases: solveCrossprod > ### Keywords: algebra > > ### ** Examples > > X <- cbind(1, rnorm(100)) > solveCrossprod(X) [,1] [,2] [1,] 0.010148448 -0.001363317 [2,] -0.001363317 0.012520432 > solve(crossprod(X)) [,1] [,2] [1,] 0.010148448 -0.001363317 [2,] -0.001363317 0.012520432 > > > > cleanEx() > nameEx("supLM") > ### * supLM > > flush(stderr()); flush(stdout()) > > ### Name: supLM > ### Title: Generators for efpFunctionals along Continuous Variables > ### Aliases: supLM maxMOSUM > ### Keywords: regression > > ### ** Examples > > ## seatbelt data > data("UKDriverDeaths") > seatbelt <- log10(UKDriverDeaths) > seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) > colnames(seatbelt) <- c("y", "ylag1", "ylag12") > seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) > > ## empirical fluctuation process > scus.seat <- gefp(y ~ ylag1 + ylag12, data = seatbelt) > > ## supLM test > plot(scus.seat, functional = supLM(0.1)) > ## MOSUM test > plot(scus.seat, functional = maxMOSUM(0.25)) > ## double maximum test > plot(scus.seat) > ## range test > plot(scus.seat, functional = rangeBB) > ## Cramer-von Mises statistic (Nyblom-Hansen test) > plot(scus.seat, functional = meanL2BB) > > > > ### *