strucchange/0000755000175400001440000000000013550416405012756 5ustar zeileisusersstrucchange/NAMESPACE0000644000175400001440000000514313550207777014212 0ustar zeileisusersimport("stats", "graphics", "zoo", "sandwich") importFrom("utils", "globalVariables") export( ## testing "efp", "gefp", "Fstats", ## monitoring "mefp", "monitor", ## dating "breakpoints", "breakdates", "breakfactor", ## efp functionals "efpFunctional", "maxBM", "maxBB", "maxBMI", "maxBBI", "maxL2BB", "meanL2BB", "rangeBM", "rangeBB", "rangeBMI", "rangeBBI", "supLM", "maxMOSUM", "catL2BB", "ordL2BB", "ordwmax", ## new generics "sctest", "boundary", "recresid", ## utilities "recresid.default", "simulateBMDist", "root.matrix", "solveCrossprod", ## internal objects ## (currently still exported, needs fixing) "pvalue.efp", "pvalue.Fstats", "pargmaxV", "sc.beta.sup", "sc.beta.ave", "sc.beta.exp", "sc.me", "sc.meanL2", "sc.maxL2", "monitorMECritval", "monitorMECritvalData", "monitorMECritvalTable", "monitorRECritval", "monitorRECritvalData", "monitorRECritvalTable") ## methods to new generics S3method("sctest", "default") S3method("sctest", "formula") S3method("sctest", "efp") S3method("sctest", "gefp") S3method("sctest", "Fstats") S3method("boundary", "efp") S3method("boundary", "Fstats") S3method("boundary", "mefp") S3method("breakpoints", "formula") S3method("breakpoints", "breakpointsfull") S3method("breakpoints", "Fstats") S3method("breakdates", "breakpoints") S3method("breakdates", "confint.breakpoints") S3method("mefp", "formula") S3method("mefp", "efp") S3method("recresid", "formula") S3method("recresid", "lm") S3method("recresid", "default") ## methods to standard generics S3method("plot", "efp") S3method("print", "efp") S3method("lines", "efp") S3method("print", "gefp") S3method("plot", "gefp") S3method("time", "gefp") S3method("plot", "Fstats") S3method("print", "Fstats") S3method("lines", "Fstats") S3method("plot", "mefp") S3method("print", "mefp") S3method("lines", "mefp") S3method("print", "breakpoints") S3method("lines", "breakpoints") S3method("logLik", "breakpoints") S3method("summary", "breakpoints") S3method("logLik", "breakpointsfull") S3method("AIC", "breakpointsfull") S3method("summary", "breakpointsfull") S3method("plot", "breakpointsfull") S3method("confint", "breakpointsfull") S3method("coef", "breakpointsfull") S3method("vcov", "breakpointsfull") S3method("fitted", "breakpointsfull") S3method("residuals", "breakpointsfull") S3method("df.residual", "breakpointsfull") S3method("plot", "summary.breakpointsfull") S3method("print", "summary.breakpointsfull") S3method("print", "confint.breakpoints") S3method("lines", "confint.breakpoints") useDynLib(strucchange, .registration = TRUE) strucchange/demo/0000755000175400001440000000000013062350355013701 5ustar zeileisusersstrucchange/demo/tkmonitoring.R0000644000175400001440000000771113062350355016556 0ustar zeileisusersif(require("tcltk")) { data(UKDriverDeaths) seatbelt <- log10(UKDriverDeaths) seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) colnames(seatbelt) <- c("y", "ylag1", "ylag12") seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) data(GermanM1) data(durab) data <- tclVar("M1") type <- tclVar("OLS-CUSUM") border <- tclVar(1) h <- tclVar(0.5) h.sav <- 0.5 replot <- function(...) { h.sav <- hh <- as.numeric(tclvalue(h)) tp <- tclvalue(type) bd <- tclvalue(border) dt <- tclvalue(data) switch(dt, "UK Seatbelt" = { seat.sub <<- window(seatbelt, start = c(1975,11), end = c(1983,1)) seat.efp <- efp(y ~ ylag1 + ylag12, data = seat.sub, type = tp, h = hh) if(bd > 0 & tp %in% c("OLS-CUSUM", "RE")) bd <- newborder <- function(k) 1.5778*k/seat.efp$nobs else bd <- NULL seat.mefp <- mefp(seat.efp, period = 2, border = bd) seat.sub <<- window(seatbelt, start = c(1975, 11)) seat.mon <- monitor(seat.mefp, verbose = FALSE) plot(seat.mon) }, "M1" = { M1 <<- historyM1 m1.efp <- efp(dm ~ dy2 + dR + dR1 + dp + ecm.res + season, type = tp, h = hh, data = M1) if(bd > 0 & tp %in% c("OLS-CUSUM", "RE")) bd <- newborder <- function(k) 1.5778*k/m1.efp$nobs else bd <- NULL m1.mefp <- mefp(m1.efp, period = 2, border = bd) M1 <<- GermanM1 m1.mon <- monitor(m1.mefp, verbose = FALSE) plot(m1.mon) }, "US Durables" = { Durab <<- window(durab, start=1964, end = c(1979, 12)) durab.efp <- efp(y ~ lag, type = tp, h = hh, data = Durab) if(bd > 0 & tp %in% c("OLS-CUSUM", "RE")) bd <- newborder <- function(k) 1.5778*k/durab.efp$nobs else bd <- NULL durab.mefp <- mefp(durab.efp, period=2, border = bd) Durab <<- window(durab, start=1964) durab.mon <- monitor(durab.mefp, verbose = FALSE) plot(durab.mon) }) } base <- tktoplevel() tkwm.title(base, "Monitoring") spec.frm <- tkframe(base, borderwidth = 2) left.frm <- tkframe(spec.frm) right.frm <- tkframe(spec.frm) ## Left frame: frame1 <- tkframe(left.frm, relief="groove", borderwidth=2) tkpack(tklabel(frame1, text="Process type")) for (i in c("OLS-CUSUM", "OLS-MOSUM", "RE", "ME") ) { tmp <- tkradiobutton(frame1, command = replot, text = i, value = i, variable = type) tkpack(tmp, anchor="w") } frame4 <- tkframe(left.frm, relief = "groove", borderwidth = 2) tkpack(tklabel (frame4, text = "border type")) tmp <- tkradiobutton(frame4, command = replot, text = "Chu et al.", value = 0, variable = border) tkpack(tmp, anchor="w") tmp <- tkradiobutton(frame4, command = replot, text = "Zeileis et al.", value = 1, variable = border) tkpack(tmp, anchor="w") ## Two right frames: frame2 <-tkframe(right.frm, relief = "groove", borderwidth = 2) tkpack(tklabel(frame2, text="Data set")) for (i in c("UK Seatbelt", "M1", "US Durables") ) { tmp <- tkradiobutton(frame2, command = replot, text = i, value = i, variable = data) tkpack(tmp, anchor="w") } frame3 <- tkframe(right.frm, relief = "groove", borderwidth = 2) tkpack(tklabel (frame3, text = "Bandwidth h")) for (i in c(0.25, 0.5, 1) ) { tmp <- tkradiobutton(frame3, command = replot, text = i, value = i, variable = h) tkpack(tmp, anchor="w") } tkpack(frame1, frame4, fill="x") tkpack(frame2, frame3, fill="x") tkpack(left.frm, right.frm, side = "left", anchor = "n") ## Bottom frame on base: q.but <- tkbutton(base, text = "Quit", command = function() tkdestroy(base)) tkpack(spec.frm, q.but) replot() } strucchange/demo/tktesting.R0000644000175400001440000000763613062350355016054 0ustar zeileisusersif(require("tcltk")) { data(Nile) data(UKDriverDeaths) seatbelt <- log10(UKDriverDeaths) seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) colnames(seatbelt) <- c("y", "ylag1", "ylag12") seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) data(GermanM1) data(Grossarl) data(durab) data <- tclVar("Nile") type <- tclVar("OLS-MOSUM") h <- tclVar(0.15) h.sav <- 0.15 replot <- function(...) { h.sav <- hh <- as.numeric(tclvalue(h)) tp <- tclvalue(type) dt <- tclvalue(data) if(tp == "data") { switch(dt, "Nile" = plot(Nile, ylab = "annual flow", main = "Measurements of the annual flow of the Nile at Ashwan"), "UK Seatbelt" = plot(seatbelt[,"y"], ylab = expression(log[10](casualties)), main = "UK seatbelt data"), "M1" = plot(GermanM1[,"m"], ylab = "money demand", main = "German M1 money demand"), "Grossarl" = plot(Grossarl$fraction, ylab = "fraction of illegitimate births", main = "Illegitimate births in Grossarl"), "US Durables" = plot(durab[,"y"], ylab = "productivity in the manufacturing/durables sector", main = "US labor productivity") )} else if(tp == "F statistics") { switch(dt, "Nile" = plot(Fstats(Nile ~ 1), main = "F statistics"), "UK Seatbelt" = plot(Fstats(y ~ ylag1 + ylag12, data = seatbelt, from = 0.1), main = "F statistics"), "M1" = plot(Fstats(dm ~ dy2 + dR + dR1 + dp + ecm.res + season, data = GermanM1, from = 0.2, to = 0.9), main = "F statistics"), "Grossarl" = plot(Fstats(fraction ~ politics, data = Grossarl), main = "F statistics"), "US Durables" = plot(Fstats(y ~ lag, data = durab), main = "F statistics") )} else { switch(dt, "Nile" = plot(efp(Nile ~ 1, type = tp, h = hh)), "UK Seatbelt" = plot(efp(y ~ ylag1 + ylag12, type = tp, h = hh, data = seatbelt)), "M1" = plot(efp(dm ~ dy2 + dR + dR1 + dp + ecm.res + season, type = tp, h = hh, data = GermanM1)), "Grossarl" = plot(efp(fraction ~ politics, type = tp, h = hh, data = Grossarl)), "US Durables" = plot(efp(y ~ lag, type = tp, h = hh, data = durab)) )} } replot.maybe <- function(...) { if((tclvalue(type) %in% c("Rec-MOSUM", "OLS-MOSUM", "ME")) & (as.numeric(tclvalue(h)) != h.sav)) replot() } base <- tktoplevel() tkwm.title(base, "Testing") spec.frm <- tkframe(base, borderwidth = 2) left.frm <- tkframe(spec.frm) right.frm <- tkframe(spec.frm) ## Left frame: frame1 <- tkframe(left.frm, relief="groove", borderwidth=2) tkpack(tklabel(frame1, text="Plot type")) for (i in c("data", "Rec-CUSUM", "Rec-MOSUM", "OLS-CUSUM", "OLS-MOSUM", "RE", "ME", "F statistics") ) { tmp <- tkradiobutton(frame1, command = replot, text = i, value = i, variable = type) tkpack(tmp, anchor="w") } ## Two right frames: frame2 <-tkframe(right.frm, relief = "groove", borderwidth = 2) tkpack(tklabel(frame2, text="Data set")) for (i in c("Nile", "UK Seatbelt", "M1", "Grossarl", "US Durables") ) { tmp <- tkradiobutton(frame2, command = replot, text = i, value = i, variable = data) tkpack(tmp, anchor="w") } frame3 <- tkframe(right.frm, relief = "groove", borderwidth = 2) tkpack(tklabel (frame3, text = "Bandwidth h")) tkpack(tkscale(frame3, command = replot.maybe, from = 0.05, to = 0.95, showvalue = TRUE, variable = h, resolution = 0.005, orient = "horiz")) tkpack(frame1, fill="x") tkpack(frame2, frame3, fill="x") tkpack(left.frm, right.frm, side = "left", anchor = "n") ## Bottom frame on base: q.but <- tkbutton(base, text = "Quit", command = function() tkdestroy(base)) tkpack(spec.frm, q.but) replot() } strucchange/demo/00Index0000644000175400001440000000017313062350355015034 0ustar zeileisuserstkmonitoring Monitoring Structural Changes (Tcl/Tk based demo) tktesting Testing For Structural Change (Tcl/Tk based demo) strucchange/data/0000755000175400001440000000000013062350355013666 5ustar zeileisusersstrucchange/data/PhillipsCurve.rda0000644000175400001440000000765613550277443017175 0ustar zeileisusersZ{$'r-m(}O+B,T$ۇ$ d-!'lMbɌRWձ^Dw9<||3{poF?#!ę9g~ip8xwɡ}#âqi;dG.NfƉcF[wjY%s{0JۥLˉf{/LڦoL$1u7ӦImfZ_U3m{bֹeڪ|cwu 9pGse9L).2Ϳ0Ӿ<ЦdZd1^eF">ɴK1찿i0iz۩,9|趼buRf zl=faqoq\q@?ifAF㪋9Im(pt1,mje ]#3]> q/ ˬ?? F [>\X>N\sSL;.m)2ט%\#랼=e? zfX#f8#&\ f|p‚YT$$sڻ|ϜĮm̜>HէЎѴ]HQIY]@39N'ډ|KW51}vn}4遐t(}8ӥ!чxe$}̛>hveq^v3^Zw|ŏm;~k ޕw~yɝrz;0pV;ЫcoMMjc:Bh7zn|MvV/h5 \pX8ѼUNJV?bYu뾝;1ϔ3Oؿfg0π2i͢/ <%}"'}Ldߦ{βl8c{ y8=S~zm%Ђ #.ս;T}D@qjLǸ}3#{aSݜh/n!=L':3zk7F_ҙ^>G|E xȀ1WFpM{kf- j1p X04}9Qz[kJmzokajeo`bװ`wjW[\X ?OQx^!kf V$H3 $W` 5kW{C i=qqAP- l: /!?CP[!$z0\{`Q`P2h,۠+4 Q#j}t|I`-Tg'rvL;f(I\.ٞb$`]A Ok'BfuB)IBN_T KD?# 7:]qO 8&t-:q@h"(fa#(TN3sN&yBNuQr =3b`Nϻ|XH;f_^h &[K`lD<DosoAw T;ǡ[p D'P2D˱}Htkf:O&Fh:qB'J;Bi'\l%bXhފ?&%ʺK:d,&/#ylǣWȉ(aQ*O~}|B Q[!j!uޮf\՛2} )qҹe2Oy3a]ʼny`l-BVDYLd^P< ͬ!MVtZ: 4tT=a2!?cʡ "7 mnc H&V`=uua;cG 1|XN`%2>ݷSגHq@8إ8T)q/B ڻKh(Ct}c\u/9U4iBu/~K7 bA[+{+?XD9\۾H Oyq_?Q±G4w_vTDOA3 jDYMGҋf(Y}Ȝ~+] ̐#RR22'C1 dPàx8xhCSt,4|<1x1C!&?hE? &2)K Jΰ( Uxv}q9 (Ú>&GݑQ">۩ G]u\u *vxOҗ5o@^k[tM%x n[:[T 39ԶR|Ca6sP/̈́'n1GBi`D~6S)DϕJOwT|ٝf9q|?Ƣ!(]P!0#hZ*u&IGyuE3McNR'J*^s CD(^\eQ<a&6=B/ޒژUU)hM;]?JɑTI}:?DquQ%G!Hՙ qeN}SW7rA 8vq'5ߦ7:ЬLmH,Rt!/B򃲃N%S^o@[)" ~*^ȬŃ /^3qsn/"\ic"strucchange/data/DJIA.rda0000644000175400001440000000221113550277442015070 0ustar zeileisusers]mHe_j\X-A i}Ⱥ#"le;5s+xެty߆9V] *#ޠZ畏O(\ ?إߪx~R/aF=?itn^σwEɷQy;'^⟑~j1i_%Q Tm/^9m2Ŋ35G1K/i̋zg~槛ϻ7x[^A bzTI y)?Jp+u/- y "?[wP߅[}|Uݓm VϮ͸w>= ?֥NIGuO+`$XT'2 맕DzQ^Ram[I&`<~[*7k>5F7yJGɣTCa_xa9P 4$9߬)xA|np.p7sb/Ͱ fg9:{pIL?$O?SOߢߢߢߢߢE~.={_~>}?@O?40/sd~ .s >sg uS$ÿ#HG~ć8Ze!>~@!>,b>y|?Y?NAbb.,g??Y.@sus?_?yC$OeK['pfl y!"lK'v3~c["ld'8g(tQ[=nfiF* 9 |-vBsf)~ l2^Bب τMJ u ;>[]3H5'vch]rywFmR癵jޣ=a3`WGQ?;Wd&(}Lأ)ao&Ku.ݯoB x4=6E@xnH&atThkv_] )YߩsyoƜ}uX7ЃxbË<~} { n frDd?B1"a,ǚM w]ԆvOSANd.TG--Y>2tɓ!"eO̢x!UFTS!sEL+4w`QעU&߇l!,Q_06xt4F<'-O(51wAJ͈=. &'|F>x(]xR]`❯|dU`C+-~4އ_NVK|ܩTx.X:Xyħe">ͻ狽J/f}ƫYGژE|: C|.щ'7WħcugSasGm9-M|~ځ?4I| F g e铩'[Q\|&r݋c_\X-=ԥ+,s=w |.TH5b-&ɑ.'_X+V%\=M%Leetf^u%a.ZTxZvE]V9M sJ )o&lej{W^楃]Ljھ ֹ̑7ZW@T6'}T _) ;Z{.O6V`TןSXXd;,U<%lyr"l̓ C=kvxR;a'i30y5o^vZ)?[`YXra#"(Eu1Ian\ 5 #p[zM;"7[KTHm\ҀjYR]ٷZsZֵ oy>tO-(*-w-ӾCJ8lܥlәͻCz*J1ã&-V"(J1T dHhYG ay/7py ;[uS Nyr]ETšzB{fQvqK_| _=j2>:-ӡŴ/ҩ|kqe\1<oŽ];̎;r1@Lu@dYKbVz?#çZNV rK'-"nl劁ɒY;);Q,=>^+~erJl_'+_O" Oӟƃ &έBϪB9W{k(8h)-MH ,VzpL_5-Mƭ@H0>>y|^kEM-4~]3uΜ+7_sUI zrJc a1Gȑ ($3D6p̑A~ +ù_;1"|CӝRm{[*7&?~fDS$;Ex#y3ԯ9YA*p/-Q}[!w{ Dgw}=yd"5' mP>: ѕc_Bmykf'"g E4хm*ux|\7ϟѨ_ P?LCo6zxZ!{PK?UՈ_z|ذ5ixÁf|0u_PSDלנQ̲w8ʡ+gEU2U/[ڥ#چ~Cל;F+ "\4Etc Ouu|. 6-At|^\7JvZQWH8NDx2_;(=/t,B^_BMޒ\xã.E{Ҙɖi (HOB(p )Wݖ\/0GW.8x9SR~п[2 8~]}t~2o8}!:5w?b޳GtPfz/bѵI*j_ }f7(L _п;Z2‡ 84?+ z29`r]O, cQ䆯DXt~)sg# OB{^]L_oe]9a>򬟊8fW5<U>JW衱{7#:5ڏb@_-BVR3gF9 n>ꨥ,^4/SWO踄M8pCɥX3R_3)f"x\DsFKЕ,>`e'NLU2ǠӁr]*O߁EO |M+dZBx~;^S:g4,wYZ^fm烖 aQ`aRyJ"Try89 +X_A< z&V=ÙF9o]`< ?C%C}ӌ|w~תAtJ>9h[6yroW-A?\ލpb?u12XG8[ƑqxGcVe2፝Ur@NG:/=oSK:"czrZe-Xe><^USiή6 ] h 4âxkSj9E0]h!M;UQUi =C7#ۍfɜi'n/1ᘭȽ&寅흍Te}A>!{2 D4'AwuP:gL;jG!=gFT^I|͊( Z hXJBJ%]Pǃ0{} g+aNOj?> B-F`-7@ͦe/"ʹN}_8j z#_rgC ]rc<XAC7-t1aNkC\wͼ $_@wtK}= чL%` 0W~؂5q r sDxiME%s1]9j~'b>Qe+*_qbk(aO)Ba)/?!G}\Oۋ\K-_sB;Ԛ1џi"~oz+./1b ОP>c=t<kn A~g0.RT|~>'#o}<˂Hk~Eqt~;䙎p\.1A53ɏrrtmJ: ^3' x27! фsU 8<˛Fj{ &#z0Fc|0]!;]\ƪ`?{Aۺ+Emaܴ iƍFKe?RSŌOP~}>tsk3L7/֊0^gx\QQQ}zs[9+++|3{y +++|Y+2kUSvU>.ױ㆏nw9 :_@1Otuࡱ&}>tI')c{B?}|mX8J'Lٜ#U?sNӍ }m*}vX7ZUs6Oij58p~Ǫv[|O 8ۿ9fbZapLULŪCA1k^,yqe/p[6`UϮdtD0P{džp8[gw^pqZHh3rgrJpmۨjt[ڂJ>jO g8qPH>#b_lㇳ}:֠|zMgT*ҥoޠXת=Ojcd7V}~vNkk4!10޽NFowj< cz0$LoIcO=;^3.3}Saq/w*΁AOLJ\\g^ |MΞ8ыpv׎.ڏv֟UyTq(D4͠ `U6ZUu3 z益U^JR;oK*}gFqg9׏pj\Hu>~Yܯq1VeY:1!ǀUq3s{`c#1@a_]Ǫ3ܜU;ꁓ1WJu_ǥcvv_%ڄU?}Ovi~l>pvT#>~]ڕ8׋|c'QS; 1)WzcU_Ͼ|0}#>ݩ<=z'Լx\ק U wNQ": LG8=* J@ >q@" :h(N{}0{ X|+ Ȝ{h[  P>  _> 0}>;ɀ>}|ڇF|_DO@ B_OjS$>y̘} MOJFFiMm5 4OOAM<0Q504<a3Hz&M4bbi'ͪO*T&L 0LL&F ai00F!i4*S#M4`&&LL#L`F!&C&&LF O%JR$ PzzSyA~S'4z5Tޚe ߪAhC@A)JJIiy骞SJ1IOdQz=PF@dMIB!4`BaM4 dSMOQ)QOQz4m@d C4dMOꮿ]B\clF,HZcbmMac 6K]ME "E"yM٭wzîbR&`aZy#*HfZ4v(2fbۖ4AۦŻqfYEZWzi`j 6Ke4'imil̂"leYbGe /fxX*U"稕**^F!fejzi%hEQEL37#j)MBL#4$ eTtlE!H) qe 0 Re 6d"Eh"r;V*RdIٴ5N' qȩdqa (#T$TUN+B`!DeNgO tĐHXCm=`ڌM`a&g KpEj$ DĪ&l$@U6HL,'d2nXgeaViF&PdL6uoÝk"bUYM*EO,z( m\:AIg@5hѢ&%s23O b9 e vb;6Y x"٤L aT'$X|u{1Tb6SdD0odFdT TɌ`mvam؍^HbEDX5rj;=< *uF\r$ 9x`5u XIkɕeZBՌE@,!H4q"gnkdT*SĖ=Y+RkuY2ІiG[c6R"U-2 jzWU&*D.+5i9&a2 Bՙ=ҩOı%A@%ehpYL,ʛFt5 ꕣ%N|ʳMBSSPUD53t@W#@ƪ9FT&Ar$ф*A"?*RAHiHcG`@"JqŊR|C^B23S!D k^pME?7lm ēk-w{D~zGą/|A巴UOkn?IG0>RfBxQ~UYّ_o_mQUǎȡ=ro g1AZ,ROT P)EBjDsr$ *?eY?`OOj,G#c%$EE(zSuJG7q=GCBc\!$P(*_ye:9o$$w @>p7KRdCdI dR].֭O|Cn.s`:R%'P!JP(AҔ Jt;m&L0fG&6o#l]WCHs-ڴ)DusFN3Ӣӥ픞F0(AҔ m,txtҐ94j=3`tw 5v7|ˣK1XX9uGI%Z*mM%R&ծzX׃-ڇ~>-o.'pJS!sZDzLkԕo\nAoqt[UhK.K'K%3v?'M>497U 猄GD$ʓ^=wN9;kyheBE`v,麽+=%qq1N6389*qϓ7jڷZd21X\ERdȪL2QjIȋ1XYEbfD%00kas ts$ 8$8p ޯx:yޫO{oy"UR+XP7CxAPE& U:ooVd,mnnof9`kkɑj4E7(2dp1LHň0g葩B ${_j:/~:>Ֆ=J{ǽ=i7z޷zǍ!}։޿P{ًwwkWG|;<{5=F:UγyPR$΅|UvjK'.ˋ}^:񶠷u^7}Uv"Sޗ^Fdr^yƼo"a]{.6=mzmkzw%z,>bwjsӚ+Ss3Q*)Yjb{qb7 ߈'}gŝÿyw|;x9N#$ CRsY}|v&ʐق͐&TT6b;)a6h6T2=pCToCa8CnZ "C;rPP\dbuVM&gnmIF ۛpPc 6]5h_; lWScr &51^1 c jĘuqRIXK2abx07FƦMMsI $?oO??_j}g_ ceKн1a5 mlT4WYCj=6-g[,$(,vK|t %JG(t}#YJ'CҰa2i~|;8$=S8$HP G F˜l%6V6^Y2jXZq"TH adˑmPD%M=.W9B2r#9(fZv4֑r5*ʦ03m8@{6M7ojاlZ`&(VPL"h%"&-row!ӑn5|hv nJ\&!ύ Ė 5|9R.GA`%<L yn>ᩖs]⢾hB\4y\ .dܦhCZ¡˦ !k)l/u>.:]M7?Q|;U9i#6XYz޶!&yNs8T$aG5!wQjԠV#axp.WU.]m(t"/utY r[?cw{׏92"SkӈWqpdwc}.NXyة^*Z3ۮ3֗$XPm*rX j:6 "Ō|f:Q9 dl/F4:p V5r !"~,a0?}UvRIMU*hU i-jzEFb3W8p5Po#WVCDӥ4*颕6ҫy4/vH#i(3e38) l;>qC9C< g˝2ͻ|ۄ:/[/ŗˤ/a~_ 묗_֋g?; ƛxQ= OG_Gyzg̅+!Mڔz *i4x%fs^kb(Pq]hAIH{|@+!n/rdNв:*?OX:ylg' gnTA3(х ܰAFN&AÊ 'I#'AIF ZCapxCqN#qGˋbB|t]-OP6Ef(ʂ,Bɱ 6T"͓d0hl_6Ow1.1a7iTK<5'(‰NJxuM %U'^SCZLL3ɞbs3/av$\3K>/5a!LdƲb@ P)ME&'V8;d($?$???QgzOzu!}G\v=z<|V+7`n1~Ū5mG>\Hv=U ﴃ$t>S'CꞿpJiN!%JqqG%✐"'p+tm5s 줸ҮKj)v{kri-6SC[Rlͷ*Zk1) .fէ$ e1v{v"ñӶU8trl4ݮY2;Fra4re&͍ͩK9ѳbHLÓ).g&Zi%Ȍ\QYŶͶ2"cm=_ٯQDȓJ_F@~}P$|EJwHs6G[k V;$'.enn[f;Y!Tfˋd\TXށnk\NǗhw:<2gBAŗj$wcgssEsP.pL/eK7w dR=EIymǂbsa8ֽk}n#F~w'|~,ty-qq0i1,ӡ Uǝ88swA;;,brfz08BF&_L ]q|&,_ 8ՂюUuk*-XNZXDc{AIhF@H5ǻwCnQC-I 3-IfS "60.96h, msB, IDXmܨ)fpU 3}>eͣ? v C'H N}0]*E44˜#'*&Z+n@km ߁t0P7T74* (z7zBKZ[E O5`kCY@&!@2u _Z]hu[ZoxBB^Ԛ{Hwy:X^~oWx~&#}4N['',*yg-WjhEcN[\X<*)^ GIծR[+BR×b*%^Yj A<_`E0#0 00_ _ oo%$T*Aa2\l)PV(E̋J,έTZA_m_x.]<mc2jDEѵ bdOi"ʼnAXL*݊fl Q(+PݪP77dfs)h Ht&02@ ud,f8914:)15U1ef{HIz $P}vv k< ~b8 +H^{羞{羖ru{Ͻ{j֌.=OyXUCֈ~LǫrcT|E"0Xyd6-TyJ?Ry|)>,ڬUcTr"CE\p $?sep-2ѺGvʸq[Faj iLڡUvӶig5jK+:1-jvP';`(4ذMTQBDEi)Ěa!6r]ζÜ kΘmNY&lg77QBy(aII%`q">^%KȜNd)t*lf;7ؒZW^J*m ;rD mLrbȗiTf7s+݇E9^aYj7kPa@DSӱ3CVbӵ([s-Z<9{ÑjY ;nl.Z̾n7 z- T%xYh%g˘V֝m2^Kφ}ed.2)AӔP7W_{c2#!̓BLh|Ky?fAD{dQ! {Xժ)=DgAduUр,ڋ? NIAa{3߳gB&v,&rF Lp!:*~@ ǏZy'+D9ԇ?50@WM"EX~ZڳUiX, 3Ha˥dm3?o'&9H9NK&42%UL!Bd`Ԇ@ǐqU[&мKŰ͝ѽ{SI]/XEHB튽н)AСDQx3ڨUwXpTIxdE&A hPϠtTeEIh]AL:&obޭBÓPIKHZ%2U0Ԝ<<<<$Y6mŒT6mKfPmCl c ͤKhÇ}kokk{I;,KZ* QMBA2w7@t ]unrAt$@IvQV$ "=]-B"7%Ld qj ͵d"inltp6h*e.[h[%MaOmC TI 03m:[c&$ݢc{EU)MQJU*5= 10"LORJ-)-%DA% R=<B](H#2@#SCy*:{m()y(u 4yކTH@!gX2+8I$")j2SUc Sg 9 Y%gGAdRA"'(Z3+]N3k#M$؊AL[UK@IWBkGK9p@tb]P# ;EmJBle뜉oxfʬqx,k%R(% ^-Z*d3mlFI֪؛F㊔FPۡ̂f(JxT)-ж/ Kid~(UAu<С//T1ŜTZPipȤeM!p6L2U$TuT`J3Fue&ĄsZ ^lɵ,cV*d0S4DP*]贶Чă.Vn2qƆhZ{ASRI֭aTW"f%!1qfTTpT\$IƜga<em)F2qdֵB+$YȔ9yT"ۿ(Ɇ_OB%*#bdb)g ]*@ XksQ?T-lj8*"8;4)ʽpbs@ȶ@AQoɓxM"T5r'B"= ?qB@ 50)}@!"$i%I",&G,1RV$j.A@^!JD!) <(2U 0%AHL ~Ηp*:'TN8__ׯb:cSSOwS|Vj}+Z2(zVQ2|YDЉD'+7|;ldg6G==#=DYw$xRO>U{o//7/#x쌢-C||Uj.ߒ<.;T/wOtw⾙wwҝߕe}%}!IwbV>f/m ?H{}['s^3JI!'Gސ3{;4oo]td $zP Hvwj 7Bnڪ#& &&6v؏fv;h$RԕI@Q%^DU:f2p| 8*oWRmɷ&Ґ!Mͺ v;d w1{XI{[wvj@n0RD :s4t $ o=>_FnCpRn)7M$jssmCsqw'p_ `1 ,"%񹷸^$6= !@PĀOXJx|_5x)UKxmkW<"S1\VJ͛F-QR<"Q'9lF-& ",`i8+4IzUi֞8xo! ^yNYp{yj^9# A`C`"T$R0=ĠR("N;D 'o $;j)$qó;GsB]E$g٢,iAxe& <+7 ]xtp՘W g PUXgYC83 g .s6zVSgXg67/ v] Y¨0L00p ` _b\ M;. $dq~+?_z~Ux^e/a=z5fe"{8o ]=w}s/SޤԾ#z^_z+OQu.޵oswY'E)=誥z?`|슡ENG"t~Z 8@!$ +lwl!QlL6ݓ&l/awU*j睝dž|/>FMm"ZU6fvڶaȦb۫͊h[tmIf咘#"5*8s-YΦ9uXR2:7ʀ0YZ"!2X<DDXP?69Edyb;$!hȰT.,op^qMh)uO $'N~RnP3-seMDH ɶPɸb'$RL+H3u.p,r+u;#B\%usg8!AL׵|E1W&@|dʜpuo $ M;oJ<X@4y\i>dPs"ޜy{ |oL <;"Ǽ/>Ґz0؟n+Z1~!Wo0>SyNonf4 x,WwFɢZ>1x 0{ʩZĝ$|::Ҧ^/:&=Av#AiN`s.WOH럾uF'۩!;kQ0?D9ж8:|WMdOiՒ\@@u '`w~_gqhq :jIh"+;ߥu$߾ ԳD?K~HZuzۧMpԊJ?+C_{9}GS;ߌd#+5ck`$sY;:p=Γ5?r֜7޺̨.R<+D/9=3[&0vEUq,,4#aAU5!bwFXQG%RXW4Э1\=%~z~k͹:=P~oNO >}!#+8uz:=B-d-nBzUn7#KߋC͒ig8 x{BN8yYr<Ki3I\} 5 Ύah R 9~{MMO5#LFAPbk b嚭& L%Q eY =]t̢Xjd3s޲8 2L .# d\Gz8I-,Bsso8t\SdƦp ܻ5~fQvyQKe#$T  `Um0jn=#dLN/c\;E_ځ G)pC{iaHnB8^ BP沱K%X(؂ıeʿqD S ZIGħku:{x8Y8;e '\@؇P[Qkվzt d%ihЯH09L$ 53fMΔ.IE.uI ph7nFN\hRͭ:}rl%qTWXN* tP)=J{ta*>Piqzx$rj1ts9gyRu]1\<"qEcS]y5'33o!z<>ɧ{N*_ގa..Rqs(=ɔ'Iøa4]j#*l]q *h9-KxV<1N%`?l1 rYC"4hș"~7#FeWo;1t_\%n0/Nr:eYFX R@3-: ql$r'=E0\xuxٲcA!r-$" RXY)C !wPP:ؑ9r-ռsQ < ]$ Mtt=; -@JAQaGdMGvq-R߯RSvUlseܑX2'3/<;$7: = ٸGһL~?2 a4x/EVbd뜯jQo͋:{,H/MSãCq{|4VwSN4:P(J>8;)m֥ "!;VN_Ols [0 N)~]F\|\yrڌvPLEzM$t'>i>-y$]P9Ӆ{j* >ߪ's~fXhcWBRS A?>*Q`MQTΊG+ĕY1 2YգdAM4'aIml'O#^B^+#(h쾶 "ЫݶzKn%|~;ϸ.땔0hiF#NjGڝ2K*lG<)%\|a/.)0вbGlܨZfɭkZtxFSZA=,Fi֫icu=3^][S϶wTnFA}+FBJnpAPO`[>(㝗Ҭsd)$8%&)Si0Hcxd! JiokieZ>(rkvא|,CA_>WG!;+$oFb$OpE W6-e~i(J0s:0f'MډM' =BD:2cj뼪;Ԁ"l40:Ζm[K&)9cu<_\r kFfe}ds+'@UCı|o8c~>- xWi#:2~)$G:ڹJgП6P$T1'mi9Ni򘹳඙zH3]߯viD2:$: iN6BV?=z8t޵swG5@K& 6 :+8{Rݮ^Z@ 4{{&u% ~:Zᦁ%Jv+B萳 P!7/.EF bq3X-jɜvW [VJ^ՙM+ JrWTŽ\R:984b\Ѥ8yY$`i*2I՗^J$O}?\u%d 4/--I _ڳ-Rŗ=:vejvNUwVbޞՔjNP IZ^aw!ۃ/ 壹:9&>]Fr/4֢DOISĐA:JӶ޶Oh2ԕ^z@nuS+ 8b*zYaC7rv-\:XnԄ$yL&r\e0b.DAil2R/0]pzz ]Le vPn|nP*jCޯ#^)X̅nj参">sB:(3rk51\1i<_TÒ׵yA/.We }yA1} 8b;5U 0턯 Zav@8rdd<` }P!IuzdF*IWFָjcxg "INFEq=M4+gK!]d\ZQDzpԚ8$iNIC03ͧ| r.sIvBpi% w1$T =rTYmO]ޙt,Ly1[^GEG\EBUQIΣ5vShaoWy1}飤[ 9;/><{gٓ9s}tj DmnЂý"w+}sq4c̣g\(`KMFR:h &9kNr>^=, T#k'ۍMTiB]WEznSY?8JJC>BprU"kU RH[nn,t&=m]9ڂ˫R/@CIF E &oehi";4j1^5˰N  )x㯍\;r?+)k\w'6}xd8!C)ޯ7-nո-g(zY=e ;j+ Ҥg\gb*vpb) lQNR+ R;-= A5)MlLjhdGu7|%I/F|_glڂ0p*҉+ܱ:6Iayh,}U=@WGs+ ?&"{ص]&;nR͐\WL7pxRrY/HLeGVզl}E+H?ebigAw̫O9C4Y3r6BZN[hC|UĮPڙ*9 ΜW&HӕIg}8\bxKO-1|ב L\cCN&]C dΛ[hB'/f[̜xy Çpyf/O4b+ܞq̸͑l[A6 Ȑj;n>[u{ LEuSeԁHfCؓvmS 2OySݶ֋GcƝ.SFkTĬ뾿~5z0 S6?~'!g6;=䡼!7WY,y>k#^5c "uadJXʐџv -R֜I(R1s@tsf](i])GNDOk\^.U㋓EvH՚_7Vsl8W k(Vg*fT`%v:l[Gϴj~|77co&0f߻x vthQt }G2*=cҩVIx'G >rJ_ZGTy7 ?[@wлjW{r x'ؚ';WN/bDsWnVow,qBbrd)oC l aDl7NCl VRJ6Ɛ!%8P=u/|>sc35Ѝ {/nֱqBmA LD$]!"OQW\xtzN3?-hN PA<\.G5`!p:*\=&Ah{?ҕ %$YDwš|DXݲ\c[Բ&3H?JˤZί/x{{c}ꋉ4;$^Q 0iN8OP} 9O7 ﳣd$G,7 poïT0-(zGA5a|qbP̨ IA5D9L̝i>J~F\pKDPk?EP@288aȾ:+5\mHRkkd氡ꖖR;r gg0 >{q+=!(&5Aa<& P1(Rń#x` &V%[hXO1g'7nP#lEs5ϣ:RWt}Sovroq>֝UνaN3m,; 'jN?5.\Ԟ] 9iW^oz:h"nV6(c[VWL/<0i}F=ѠR[KV')eMDO^4]RR9XRȝ] |+;w3X;=q&)G n[f]uC/ηQCQO"nՙX-Їnw9>VvqЕ9/S"~1dBvD3hKb3GTuDP#UۍPesaA63\bIVn94 ^tܭf/D)е{j: (FzqSz,: 4{xeyS$8 5yz4}hLKǼls>L<;]@@k&vua% ֘8Dncͭ;ŁdUW|f~SɵI : jZ~b]74R%j";SIR1,^{ ~ 8pA Tܝ/x;cgЉ6dsWG&rXCJ0]rA)@'|yE v))F*Hlې`)ze9a@)ءmTO`-ی=q'իlnL%D+{~:zF|$+m."=rU^Of8 $/dgߧSv k:D~GNIq_2yWnDHD)K^9HQ)c].yHZlCz|VN,M*{DV -rcu X!NxіX^aA4PmgNp4{"rMTb:3;LOgaDJwml3F1l OF vl" >GEjC$z3[8UcK-'/GZq֥zza^uq|Ny$!"S0q>P#g}xjowשj[nt̥KݱD8T!'nJy5w:{N0SO7F@̘bAά; |lX:0Mw7zykoQ6F!*\I]apNw4O ZLc[ost|>)Hq+̽hs~ZNZuDmPW<?uEl6(ʚw&[`g<ћyFp_wD%j=PEFHN:߷1&S P{:һ$3{S0|l LY2)r;eg×Σ\PkǤ\m c .ط#[7˫Sryz.%7X2w@QH4i)X;j\[5o`V>&FZ,i7ʴ3MAiq>=FdS= AwDdC)HB" m]kױ Jy'zPRMST;mS8t2{_--y@VP܎T,p3`ӧ ރCb%/ǫ]uʼnZqOߡMjWNV%FHVGa qsj]mwط {6ևCnnĤ5mRQբ3Kz»;D 6/ V4JaDZXWa 1 +sص޺Es]ζz5g9I /8%q`ŁmyǪWDuN[}j$|0g[w@ZXԊ]'n>c%syٖܸM0OP ~9 rde>k%\PտBSQ(jP={{"瀗Xk,H5lzF;\|'xNm)lyq1qs)9Tڹkm8scpHÉ-Sw>v@](vћ\I{rN bo4=;jQ!_ஔHj ̀~~gf/Z{o;uڞ>o𼡨4O7Z1$)|)-8G.}rwteE l 㹻V.b>Pk4ʹ[) ./c46NyF'(Mİh"Ɩ -K̴R Eر>50VĄ4}=iBF[;ꍄ S\! (>J"QýPv*̈2[%+r(ՅIv=z>IoNH1k7j)J"ߌyeԎcZ[ (*Ic&NJ9A}Je9ӁJ2VzFJrա+L`'S ,Z^dCS&hZv&S=#0+OF$Zq ӯU"odX{ҵQ}n(-;ڬw-$ t%^qbL1._/>%e;^vZzOvbj{Zjk!-鍫GiWOcJ 7s_ҥb=1lꐕtY V)g>p:uܼ 3JZE4DF(Yށlhbdt]V%av?{>{CP< >hg*qu̽^׃!G)`kcij|w;8Tyt|OPu&iӈ @|u dakX:"/a)(u@ A1 eS8̇ :o&A!I<ȋ;CbUK+8)*_Ts%ewOȓrC: pͭ+\DQ-}||8w"u^2WN7VgCxcz\־E0a3{)qp8FPI0;n_Zқ}ņG1ʖǞTrj׃qm.`Tpr!^& \BX7?1 C~>ؗ5RpRSVdb^RX6zngKZ'dIaVzi6ޔjS4 *Qemyo5{k\f8#g=4pYTg *i@9%:.o~ aJ2;s(&> +W`JG 2/vg|\'H\(Lw%T.,~ H62ʩ):#߅ y޴D/Q p@G!ABZmilx0;fXw/B"mTYS%eiYsʏQFHF e Pd3q9R8k3.Dn2oI::̠N~W{/-OS8A+UH3B7\-yӂY9lI;|nIXʔ%c*X/ӞUߠˤ1/#-x5_=LWtݺ >>7,r %r ,˶V ٭[㋼|-8\LNvVյtB+ʾjSjAA$-\*QLh %ԹMˇpf5VӕO=)%r\*[IJwV<3: X9z5݈d HR)|M⑏Xtuب0|uRKO:Ktyv g`ʂ#TtAeSr/ Ѭ$hjG0P1Bt!ʻ3UnSS{?/uss4/Y寯iDu)Ts"5^@ǂn M-1; w%8éW?JIƁ(\4ԧ;ZJqNc|7nf; 01ɽk1k ?[GЗLA~Lm+rȃ"8x6h,q,Z)~gdu) )fjcqeyqNPgkPғI<B [==: P(19Y7}}29z^^]O;RSqܣKpxGFUީڋ'* 5u$D/aq+&Yw$$BF aLy|.61_(pa~X"훜ސUo*p/YRPeA߷mr>A1ʄ%(޻(L-zqFcJh E^NvŬ^U^ }ʔz  0hRJ< |;$ (f\ϴyR UMI7Зj\|RׂWXD_SEޟ ^P)gQ Di8Cvc؆/g?7e-vB=t<|1ims Fl1(벁*-@[Fx~M:)މVhj^gOĺQ@nSdSE?_]ֽG0Y)yנCh$Х&6N|2(خ2ׯxK5*X/(rmٺ |ksܜΛy pC,ԬD.|% ޫJ|K6WwjX5a$ a@  {ƽ>:q)7[KF[-3,o)Ih6=avo\E{Da[Mun]^R&N+"_>Nt{Xw6 /48RkRλ:1c2IEⲵRsq0n^$фi=ZO 3@V"ّU!N?0MΦ^Bw&.i;8wBq``k(*l݀wlNM>u],: S!xtMsHALJdD}l%Y]v4]^@HAfV:@s6om$ej﬊a d+\}TR$Qºt]g#qus qTKР?Os{@}3=_?& EJd4VCgdu1 ~e0``:S/eLM<-d<%Qt#߻n/ѿ%2]K=q#xDf[tlF8TtpM@SJʔq5/tu\lOIY: Dq@pΘpzw9Ÿ:tχ 0)[Qt\}En=Z_YЅ@=oqfT"M)V9_'Q4+ D!yXi{)D/L흷Tb'1M ]Jt|[*fo30);)9`aUCcwsа!kQvRwx( -C!:H )LQH:[]Cd1(^^pׂB(‚q$'YiC:C'D=ĨIY9$z@EZKrރ"dLuf SBIUX5)\@5Is盈{f=Љu*: fÙA!Aד(pS[Xm =xL$)i-͇(ǨJԶC! gAb!H07r Hէ'`<^J\_H[0Nj (_x5@C?d_C%HHٻho͇PjFx~Vgm.RwuZճ9AsD~CN[׎#2XRy̪c >nY$2_TS6zV;E`5vmFGF{1T5Y3#"}rEP0aŝUz#f~K\.oK](%y)5{f 8 + eIwǽޡ BP ٪>;m1>pp:0;o76`/r+G:|Ն.fQ3dH:#*)eܚ1X3Vaa7z8DG%z O͞[T }ElȣSxDJ,D!z+h `jc בIĦ6K:>9]~NeMN>i*g?'xq:c-Bx̯ y 7zj`uR|% ByJ@҂7JXIatV@Ke؇6Lwb}fņ6 g)ӫ'bd CKCμ`ō؈d3U楦 Tr08/CQ4!m.x-sbGb=^.O0(ޞd9D':?xX^+Ɩ!2j(*Ois5zRG {j:r00@w_0#⫉vgfJt!nZ ua ڽ5}UD@ 8LjA$9 ƒim6.m/0{TAb"刮mмK3W/)O!0؄+AZDM ]塕̨ ߪЈ?"RyOMf3Ζ +? êoVWGb{|U\[ GhHKWckU1?vZ\G\D*nCC V(L\ԡ^׳c46]Zc{4|hO[˜Nh>T^$ ܹ)@6ҵV~'\f' f*OIݳߕXtwCSiQ=;1mR©sJ2قiJ((C,_n='D\ϩaG58U8vӣ5qwxNÜyيtVoVϜT#$ڷABIҰ({pPZ%>'L[噆CM(Ķ[M#ӏs4SF fc?DȤ9W,b"96<(yVÇ֜ʹwnY}̒Wyl6^-"WRaɬBSҊQyfv~'V]Qͳ2-Upb0 ,.L":[y@t&]^c ȃXUd[CU ER8P Fw(t͸63:71ѦpDk@5>A1,Dk+9;wRS6*JNqXk%mMB]hC^mU^0:\3Vty`+[,w\N-O м)}$p˖sqk1)*}٭]P~޽+൝w%91v50kzPa8~+j ȥ}e$ḵ300(4lCF 8L;۠e ".CULpܑ\֭.ܜ`ry5NZExPH#"9?9 w\LxDឋ20wEY 1 8(/T_joJґRqC74g{t1U҃/5a9!ihwUо/ dN.MzìWz3%PU6^'ҡњNtgt|@dP'n72㲀y8{͢ 4V/(P$Бso>sTI C!0UMrGVy[ƠΖq,+-8~B(ɜnffDt9RveWv>?Ѝ{| +^tuÉV\ekLڞTu,pff:g%1H۔Dg9f+=y}ܮ'Ljh..bj;L\i|2 v ƥRNPv O%HXRz2"15(>H60ԉZe٩2s8S:iP ^wU4D9se{P Lvj?Rzc5:rO,Э ӺoGls~ =zӫSC lgGhkly:h[e7Ϗ'z+#ܒ cM":>y_\x!p f´W譠4g9{tdNUZo#̋ʱz kv5/Ń RT)9o$PL;*zP4eZn l\!gh_+'7(= {5]ހ`0N=exgZ&8^<)Wr&.gMK XZ7EoF.J$0=]ڙղ8Ń'5Gj\$^\J"9NrOXP"b2Cxpz4$五nAtN^G4,vg5)'?O[S+(]#X*n.WV`kL  K=5S6F*Cx[4Jt&9B vsZ΋ k7 Hm$h "O$"Hh0=JWvdy O{Wڪz & 8$3)1=,z&Y ZsX#sKYiP;Z˂2͝-=٥Vv[.3 'VnzaWԂɝAw$"śS ȓ'*R=w9@cw<6ȓsץ8`M)Ǫ`!bJ<|w)S c-O$ S߳iF}5V =E&)Y$Hb̈ٓ. on%v&rF*r.<~ 'WVBi?@SH[Sb͈(5p^oZ-=mzȝt -8)U+Qm'4ұf;! ЃPyw' lb'g.\]`{#Ekj).w ],[X>EWluA=R;  G΁4\ķm+}gfY`?xAslРY~庙FSPĊ|C[=itЇ,Ż@ʾ3'pط=-Iif^J<{ ^0kW*ms=ͷz(y#4 ^GI^1hMhIЎR.M RÇIf4iAYˑZTbI`]!XG(`M6&%2 I l/莢oDhJ45'CCֱveBHkڗx/p^&E}`qYTL;}2 x~xzZ46Zz6 4,6f=}#cωg3mP(26$ʄY9ly 6AYXd-+a,WrV JҲ@# c(ҢlQƎVmTzSco$z[''ʴOYwMάDcy n6QPpAaӊonpb+jiX!n7l)tvc*U(V\Lq+i?o>t\ d_ajޣ|@/#ƪ.FGc׸bY&7mR CY8.pJA ET0ۤkF~u?%oVܣF߶oT)'cx<؂SJg BE>o3ZA<}RO|'?EK(Dz? 4hO[G_ǺVo&ȇehߤR^;w B)߶Oc 3bQz @q=tY.ߌo֊y7GGǎ3Rٙ͢e?y)|l~ Cr%*q/,@; A*tv"K'$ڹ̶@3bxB `u 7]V%X3k/rV\P"ךн3Fjr/.2W&1qA?t Y(t1 V1^gB#sڈn"KD[oF.g~;Z6GxL&܃Yȭ,5ꗉ(ILD.0OeI/?H&Qpa]T$Ŵxy&ِ{kӒge؄UéxkF 5/nH Kq^ՙP) &H0Yp3f&Z~#${^W:ufK(@WY'0~`GaWձ$ϩPS5?:~Pj8$unRb >V=3!m43?xbGQ{#+"! p+?Mō9ϾBxzzOVm%}qV1[>N#Ia{ǩ-)V ^dK:!%wv|#jvmlC\V)>2̧BIɛc١Yצc $B"UM 6U}4׭xҌCwz\i9Mb${Xʏs uwf3 B"֌-eV֕;w1`aL3Bu "Dk/ZyӅT7!:MO)ψZ|P7042PS3N?ˈ,{ iEҭy$Tz<9oo^œ7} pYT4,0:e8cL2 1$EE YI.ձVZK}8B(dv}%Mńr4>lw޵-w (N~۵}G.@AFAHqitBѸ2{;\DWVS1 6FƀM`N DJ'9.…zmaQޯWPn_!VCMVʔf"CG/'%9"H L(>y(4aM̸v{ǃ֣o U(zoBB☌&[oC" 䕨Q075Q%ﭱ"ҍ .ڦ۪6% G]7^>렅0WI1̚gu؄H+>ȷ* }JeKq\-\U[G;9= m-wT)Ӯ7MnNq7sZF! f|˧@H=ap!뫋ckE6%^.QE|wu>:%R^ƨ2$tkIe}NÀ Cs8-+؞#<N]uGbKߐe:SKP*AN&ER ΫM3g 5Wk+#(FzӫVLFZz{ۙ`og K|d`dMA,CK(Go^(Z=uF"{6-Z}69!?fN",o-r%+̨#ҐFeִq(G9e,n-*dA+]"[ix8^6v`^S;ĦBv!gUs+xS?#'Dž9o>9a:nձH9p-^X v4evp\]T49ɴjCs7O.j8iWfP{vn|:VCWšAU/9G?\Ε[ wf=UȹOT)30d==] dn:ds`4ybEKD݈a CAr~jIݼɉs@Kux.הz !l/[x瞩|y{,CZI([%qo*Uoƪ- Ԧ 'Ԥx1/!{1 ,e{bmxVA("u2q︥\M,p}Q lxRi)!f[Pvgq8[$̹˞u==uq5p{a5KK.|]N/R\&`4)G4ۦݼ挸]wls&fˍ e9 IѺ.fIB}Fx8|^R\iW! SӉ22LfOa1i$Pq4z9BG,B8RhK.k?`{#]quq޺@͠#k8?+Y\ vEZLgo}} %wT̃RfZVFɄն?7ԎBǸ7m`8$(C]8 $õ9⻯S喐T>΂}x2IyF8B#|˩Hɮ)HO:%T{ə$OGDLT(XQe; =0L5DDōԣ ц) ppCʐVFlz|f[fG1B=0y֭}Sb)eiB!]Nbl?Jҹ.HH"FE)xAbZfo@7+eht 0țɼۧ :1I@"Oϭ[,/ϼk貥KQ˺!GCbc3$QX32הlI= {x,\EFӝV")}J1FYzO.l[=qq|3E6ʷJ)@sTn7E)B\QHR b p %kzg*hP;IZm1ڨs[6#?Q":L@SW"DjSz*w'S\*H.+ýGMcip0CcA3*q曤wg?7 }1iȂSrԎe'\wBToHl kBIINL"~2d^B15Gҫ\w,sP(Y t]p1)6tPwHׇyw zD <>}$}ᑕ=> A 4 37 "a{.V2F9FnrUb2xLk.x[ B}boG`Wʳe Vwo'3:־m&0%%뚲VґT'a%@һ`jJ^'8tqXqiGbn;Dv7xo^/͙Nl,WyN( +L՜R2TF: TΒ؛@W[jPqS8'vWT<͔ߢ۸Ra-O'!) dy0M+jT%twtWyM0Gcx~Mv)4뗣g˜ӓݢ!viy -BEՀ Sۗ (T8OabAoxmNN(KE686$D&eBvIPN Bъm u] {_͗U;TRUTuk@B{"5Wٖ&L0u_9ѥXr D:a⦆aF׌DƞC;лeژ>3Ϟds7vѾ.\/qj V]JtMp=mpnkP=f-`Q AQ@*A7)F/-Ăbs a*cl3{QLeveN;tE.&w3Icd|G+1HE 6OB`3'eoTrl,薃 нV#i22;<]7S L_xq)-$"O=Wp7@M>RqYYFjVO~{tƮᰒq8W|0r[JvҩG N1 N5W(zu?;R$?HD=;Mۤ,[zԗ`tgrA-5OaNP^YC r ȆX! o :.NtT$!q]esĞ#D"\7P"5O,%>۰pb?BDA3>2s**tGnV6$!{HTA*T}ź~ń^ B3@)z nbq諸cc([2GMB=+v\ :>qc;x$ It!2 A?v\+_ĈB)uOtJ8z*@g~zPhurh8+"v-e!pn.Ӑ2NEHT‘ r}M MY7( jY37qQ"wZ60D*d;ST NqA)n$s22 =Or39`\.܌8XH}Z%mDLOb{▱kQ/c(7@m&M}urvD*㸃LMA4b?ZfOv/)Ǫ ?UCpFI!Ao?0xZezJF#S~` C9 h7TI!bWcM3 ^fm2. h:[̺$ŰT$@O>rOCFtO~@U݂G}љkx^u%y+^0garɄ'T{n_^cʔ"SBvIףsN#d3; %(7_Hbs8#j_VR[[F60 /}l="6JvXa,wkF/F3sm"z>$lo5<ȁ#AbB@>^>NwݮO Iab+X FPt:CTvBgM(íUS / uX_@½5"^}A121gpa#PάiS 3QFR*@a+Z) bh%P@A 0bhrߩ-_jg$@/KHZ v" RTkf Z BͲKu%hXI ߵOcqE~4}[cHO](1k%T?&}> 4~9h#۪ EVMŒh J$I;*DXw;{S7z{&ּ-LV3-d~cHgʔtkd T clmI )$j0kh^ u4E$ mclĉ9/k#BMD켖s:je蝜kyw1'$D91w9V;w4Yp6ԙ#BU7I Ǎyn(CPG%;G\Ö8tC<Ը_.??t%>' R,sr>$vBuNgN&+IĔxDV9 urV;Nu,$I}W'8r8|V5ߠ:I =?}K{%}?-sKk[?Aއ{M{u_eb}9`[R9u:=dl9Q45 -, G\ InUB\0>-K@vYȤIѨ@٩zHVsH/ B_nG.W \ŅJjK] 7]rA;{w,N FOu~އ>>Jސ̕[nj)6dČ{* bgiN䙳pn0B% er{B -[vLyEUYap$ ஹV"԰ @7`9A$@pC!EXXVN^vV]Uꨊ4T:tug'ò'fvV/Xx,Bc@.kj2C컪.\ A$Ң89Tyy66WnRV2UyZd^f*kJγm٘Ɣͧ[S-yK9씣M3.e؜ي7$ц+,=f-VɜjYJ/7m/^3hoׂӶm=5 O?UMIrdȁJ`r0DQaik @k;?7ߍ\Ҋ?7n'ٻg/x7eOt?7NP{?@1 -bk%6_jX"M@$NF[F7{#Fs_jNƴNN() YU)T%*H1%WM,UVSh)ouY: - ;4!Fc/o1o\ֹUظ`%,F3T Hbi ` Z7,2 1I}\:^I. ]euѐb5ٷNslx ˎѕ{(iEKMT ѓ!c#z\\F*8Í8Da8a^%b~k'q4`hVw ؆롪6qL̤7Yl*q s xkbƀOO(X3hdA!@BtPb3b*l#*2c(0e(߶xS$VM9rYnhe~9MH@ &#. 2I:y:nrwwViy7<>gU}5oԨW5}RO lžWUX1<3 'vJO mk0|r2s-C5q^I95~pN mk#ZH0;O*@j\fi+rgk.@EhK9:FID ,$I׆k'dY:!(æ]QF@FH;`սLUz]YX[TYM 5l@ R 4arFd|UpD%55* $."[rS6n+;-J.RQMEγq  JB)C`8PJ`)T7NW: ٶ)NE*|6}]o~WZ@?Ot;uv vѝ\X"6j"yڝ;IUڰ0qb~nz!Cb,Ub.A$Hr.p%l2VŚu5\ mf{9df@8݈KHEWjO(6=[H'߹rY‡ Jҕe`$p".A 'BB0 ЄJԈ>2*P&ɡ4##'-Q 3uFY\XJk(&Ƿ^#~W{8yj2SUP xÄ8|$N *3~_sϷaqh*}'Df!Tt =>!Ky?p~GR?B>,Y>S7P~_}Fy¾=s~H"I!$ BjI CSWz2IH"òl u 4Mll#2\ɐɐI/diC[gp-pTp@<§ 8AC)8xhU-nK2Փ)0_LTʑ&PL2̨7eS(vvo޿%ۡ^j&ED UYf*dbDU 2 cBUT &F.FZ%3 d&DE121,LSA2?x rC\|gʒV|JTM-ZxZ.',xʛkyju(N)9'"$&( |u)X`X7`/()QMkNkqq~6(M PЋ} oyw袉}]hIq\qRQAJL̊Lș5E˚h 6`߆re:S.\CPpTDd5݂QXl)`&ě0;OWnѐHߩxN8PTt-ikm³l[L;6cZق6vldR QOwcCBHBW00"EE VB56:+jͳi[mk"Y2`vv9iRMUJ2OEQ ]C-CD -,B5T0$ĀDRQKE ("LSUvm*d >IW&ɐ%(H$#X d 0J"QhEy(}i+8&͆rRF\XD2DFbJl RI(Rlެqu؈W+!3L<̂ "YD4TuQ< VqH)$ CD<(?I1]$@r>P)BjeY BW#xE4Ii QHWYJ:3K HRl$H2>t&{0,o4JTY]72ƮaS9ZQt2jL /3%Uƀ-*Btec+Y!+6 W vu]bqpu9>}_?W_.=>yB~l!{fI3ӧ"%)ϥv/*~c<~Ңxޏd!=\&{7{Ş2BIG3I ̓ I!==#JdQ)$ꅮ{ 9QXΤ5 PT"*wzWѲ:GA^ .ѿE"_|gň/O?(~KSOstNGdd6@ a AFlGbءb6'TÃc\UU; -8I,*W G* +$ (.Y2zJd\Y2PnMͽswP5>ã[VH)uM&؆Ƽ8),x(sҦnH܍U ROC))nhQ)9YO=5Ug- s@s5IĜv==ޑ]zoO=ƏJqAHGKɍ@cqSHPUJXcī1cLN8:l6X.4T0  ÓNFtx*w!pP8b6h~u{Ve4 I)4M Q.44)4MBnk[M~2c1 Ʋʱ@I12cX%LF0{ ;#Pb0)Γ8p9S%XC,Ψ֢k= 5ȹVTuE,}lp%a"ՕTMuj2I#96öntd[e)mf0iͺٶΗ CVȮ8v5lVƞ؇"\n-r/nKe(B/dR8)+c:YĪy6L֏mFƳ*-l3IyJ}P L=cP3eܦ;WÒ0&P'l$ā*Y Ę(L|N)YӻCkl"aǍ:Q=hqR/'n,Ş gV *E`ڦճ.5`SJcaRX ל(sfkxlMz+vA,oi4B\|Tpi<^(HQ/,]$yHr0E`NTސfbz 3D wc3f8mؔt+9oeݹt#9&ӜDCcE އ"oF|n /TolQۺFG,첿dU] deK_G֡,GjYN @o@Rr9ۿ[ϮX$\&DXl$F`p-ApL,bL2_T  '?jh*h4o4—EBWBhټNIu~g[|kk~b$J5"OO|DV,*^U(Ȱ7q 6ݴ4c'Fص =qA"t*lܔt޻t/k! A ucAqc1ؘc11 s|)Fq;]<5&H4l"v lICJsbZw^#DQ3ھd1EF(Dt+8!sZ3zSbxdٌ!dD@ {(ќ;ǖC18Pj8ZQ͡{ H^m4qt(O.[T⩑VT8:;a%./rT2'LJ!j#r܇lXb+C!|! eS'//*'чA} ={=ROkhYp\`r(ҭbhڈf|({Ok) )?(~S>c,XaF*`` { AC :|W0%зux v`o龅,zEol)q7jQMDT7%5R[ Rѿdř b+=Av3:9b-9+38g\\%wYeo[x2,@t&&bJXbpԦLI1Tb*BqLd Pdcl@@@=ϻ_;xȟA]! CJw7{ϳ}~~}_1|]N=/?_Wx'>O_[oG?DD^>Dx-a6s$,/ T'v?%< 'ϥMĜ?wƐ"|isĒƜ" ?pO|cC4/OӔx&~4OnWˡj{zQH Ϸ0lj=zD'iՐ7 0bL)QgV}NY})TwnAsnw|_m~ֺ)uڭ B_gM rL?bq\b/NC]qZwE~r% Gh hOm;ws_x޶hWi̒kXDbIJu!M*0JChXh4{f#ڻvwICep~"֕j1mTVU7Z.ݨXPǽGz1f[hUUl7nK b(7)&QUa:lLU˗-e eb/úSn17;[k ~_'wxtsV36dX|Iꌞ1C,Ƨ?Q|~/'?D`;tJ(܄q) A99B%'/wU"I+?VN \M̬erMMvLm&+ѹgvۭcn(e7-&q.c8V{-JvбljڱmR1q;vѸJJ4ceC˩l*]mY+)bTX6YȻ6?g#ҷD"ii^pT_7^/%FA7qT'n+!P]p\-7=ww:tbku9x+^};y<\y1k, z:29ZBq/ Mюk&R<)۔B2 ޼ЦK=۴.+ʩnxۻP3dd|!ͨڛ!EBm|IkUܨ=wf!vc5 ^~ݑI*уQ{l=S*X|Y0RbldIMZRJhLK`X=]*DP>=h@$0i"G5H,P{Eܽͨ$mZ$=~Tl3P$*/_a=v^o`&CTAYYZUlDݪ9L₂gX T gfqsM3|qR%}˅y " ,!2$e0 k1lE1) 캦~;Nߣ*OT_sԢ/P|jSP}EjR!sa+Qrs9ֺ/8IX9f-YH5p|[e3<2הV*r 8Ӑ:9DJNAg%SSrx.881gQ1I0PT0Č2&zG&aisZɝ{IuzmlQ{oDGY{k07Rn,T6Qd!@&"AE,7*ܣqJ7Lf໌zs9tϟy=L;!; /eID;);-HmٔܩNʖD;&#N7HӰwI﻽q叵T<1秙o_Wǎ?9霢',$G}W zX= zl'Oa釡=8h¾TKb:+'ҿIOR{!5ErD' NND >8n9\M**#P#XLun*IPl45<'2^PkCÕgi/ivڬ\"TKmvROV9NEI]ڹ2a؍Yf÷F,d[]]3m\ƱU;7nLp>_l3&Mb&#B"0ԡ2$]SYU7MT.Q-\ ītX sw(][q)cC~pA˲7 [O2(p-Zom,I,Pa`"\tAYtq'T U\LfjJ$is;xQӺ_F9gc R hܧnݤ4R/wХ%be!G*M)oV,D,!pCӠfH.7 ή@J\ɈOi,B77N7zl{ީݫKsʧU3Fb)AlE}Ze9V鶱\*)$Δ'l Zj 3$ '~?6xĝ03LĺL Lj1̔c mfŇ%xw3J3hAKSr"S99K*KvY*KUv"\h!BC0XfDC4̆Z35YHQ3C4͙V93 u*IHń(LU5*`B C "R:`0o?/ow:}gQS|ї_7lx/k/6}?!E!BIT+.:I+lmf(EF%fEy#kFlr D?MnVz!edh(蔋ayFFDJ* &kĔfs4lɸ yf"Zy"(XjDV_Ranb뛑Ii*! z^fIFdyWp%R-*mPs|;&)] ]6% TyF\~7e@[ќ)) 7Kug7RV"(I=G'%lHYisE6Uf;ԭ9L3jA>EJfjnv(R H)V"vLmde+= '"^gH+I{!FVc-*&Gˀw$eR Z2YfZ RK, $#,em evvy&(ꬂ셂tkhv/-CXuVu4N2 7hYFŜLCE(1Q \qcZc! )VWʳe, Zҏhd9kPCSNJa4tV҃j-TTl̡seiFL:WvLؚ ԃ_ .RBjpA%R\-K)a ;j'EI!,=u" [ jn ! r}09G&FR0D:Q+D `)D*SdZQ47DLL2ڒpiH71+# S0yM1cAz)m)16SH7'n{ɗ/Q6I'1НU G Z+%I $JI (ҕS} #f󘱝LJүlfpUbQ OB')" :-FS'thN Ŝp #W"R'!#0P8 xС$eLE&{)(HvO ^omqQ/!+n\/} ?{Љ͌O5s=,okM?͟z"|:fhsfx{;$x@=Q#ߓҜU"2ﰉq_a*I+Tأ&|싾hb#ГdoW?ETEc SNRIΔ($ rr'"HANT!I8J@PC8pRN(ˑ$@듀B.(8H:I: ˎ88"$$sN"+@ Έ;ã.CRB:I#䓨NK# $;."s;"I$ D@DN;㢃#*:#㎨;J*.#* (:"(N89;;;+軠;"䣺;"軣㓺.. ;8.㻩:8*( 8.;J.胊::* S㣢 ":(: *;为::H뢮룻$ X=bPj(CN̕; ١{,R3NGc'!^0C@IޝwwJF:7hHRKɷ&%5EI7r*.᷸M;mf^w^/w{M^~5rk]סUX׆=zf%0 p׆T`7r/IUS>zOmT)N^O=SUsCPYbiNjxsE(c̪CU@IVgL),'Ijc)Mi 6Ii{'[d K(J$&PHTR,)#ML* ȁIK uEE:aaFtb&)9^/t{=;>%̟Ī?]NH|H[Y-qPwgw+݁P`_QemENgu݇ 'tYw^evoop?BCL BZbRSآ6تPmfl-U; 5ű0N $~ϹOTayy=}?̾_=96_HT[3`O0y7-RUTOJS <<2 '}f Bo.xC H$Pq 8Dp'E*䆻ֶS~x4ߧbRƷlTlbSbլZ+gK\vuԈCZgf eʩ#t˲2ìӲh]]֕)b˶tl6i.y0bh2_Z(*5# ~S R&6١2[fQ 5=|-GX/nVo13z4B";,:}K[SvNT/>9"a۸7۽5is.XɩyUY2[z+!a:'s4aS#8pC:)tsI r2]1ER!̣5P⫷Ez$&UHk\o^9CٸY{H\GNQ>\" n(˸Tm}.|0wGr-KW3vȥ IMӶ5ҳa&IZ#J.2#Xbmcܚ[5P,)BqP"x唖  tfi}!VH8FZPi9%T5n+ }SlTUk6 $H鐐@gL3:f!C23.l̪ fg1kf5~7%.Rez=~2$Y)oE"02, e(#!I) &a"Xf QTӚrL,Lãkg:=$"$5T21&#w{95uMNodن$-+ e"6ٔ0Mu F.mk ɎAQᡎ:88tU' # ԇ4J)Vp 'aITS̡(Fc5f)l.ތ[ d/f_Ao UQJ[,VUX/kQdM EDfn)c]ͳF` =bPϿz._]޿=mzl/Y'WGWU3O[.m?9 n׋^*Kܨ8!D=/O,<<߭W]䤤DHIG9qߴl9%mk~A(ll8cQ'l"a%&"eٱ]N֡ 东lE{+^yޞDc#mԘ͋`˝n]WNLll=;Ni+];mHWDZ*4xi:.)_06; 'NQ׸t)+;HH.p!&(3!@D Y Hh"!tbӰnBE"EolA\b8p>V'dJΔiMצGvY(J0Sϣfj;E䰽9'#erW%;P]ݲpuW5L'&yp7Bpw_~}wkNdU辶%}jΰF9ZPaD >5FSj"TP  <1fdPH.`3N0NrYPA2/;G醝:@f6fJ*؈iKAqM()R,M( LP NQPoMUis&`  &`0E d`-X|oEKKXT>} [e7j#E\GU_?dBzr\ݖn5|s|yoHR]zf;#8 xҒN$$Y88iܱB-Kvu(YR(ԳfKURxn.ν>X+s֔vImYGہCRXqN%N>cYv9!\ h2 -TdE$, d218f?_ϩ;pd{JYBt{d5;,S#XI5Zfoɾo,w E|ͣgr.ɳ6T6bm &`T ءkyRLCB]dA$zoMv:. ?D ݍj1}o{zZzzWz3޽Z=e-YooK|}x8z%+Iڥޮ9v Bd^:>p=>\)>/zrY<9ˣ1=Uƴb9t(tfԉl;hp/pb$8;pg)̮`s0OI?s]bDD.3A7Hw8 zrNq7y)>'RGr];d: p$xiԏiؖԏ P,W/XE(([(6MlMd#JK2lcYt"XГ ,g{$xrX+\9*)0CVmP361rzI%gF+O]cG-ud5qRj#@fKJpxRԔ!++#ZWmS}keev:|#AfV,eY7A2U*S 3+]FEVXaCtMޕ,а6`?W廫v]i 1X2i,k,ZKTiiWKL[.\J4Y㴦FN*n%:.D &4߷(b\$ī!Ak ŋZ.'x?W>>~>{x:{Z/\sÛM9[ׂe-9؝G5]^-Yω_W$9PU9ݜH" 9(r!C94-iqkHd@Cc S i1Tij")ka1f#O_7 T8E8,RJn'nv|}ڧ"Mj":wE,;fv\-ԤZ$~֟HwO@}x왡%{û";qSݺ,"T"CB(;ݸ^Հ{[wYaEhA@4 RCJlxQI7ޓAIО矫O/bNb"NvDgm{wvvHU CT:ZÖ$.px|5Gw~t诺~{-KŸ{z>5-'AavdUai('Sؕj%E}[xzcxcJY:O<ybp2䁢ogAU2hI$$>lI3T Wt(,7$ oM3 l&#Hr)JM,5MD4Z`MYJg!kXCPb Rfe 1IVf30QDfYs32iUrper2tX> K k2R|6!|/~_MkEզu?>I:~Gz}Y;޴ÊΞ.bBR媕kr7 0NTKmRJZT+u.rÑ<9UNTaI)99J圊rǿŵP影2 ƎF j)131cscuuوט߀M SC;t ڥTfK~)T7)7VmRT7d- fޛĪo7MhԽw(qE JP08j a !j!`0ha?F7 Bt 3\]uQ'[ k& !I"h1dhS`"ifՓM&mm##*Ij"Xey%F6iI(IZɴmdӈӲ4aۍnkFYb!+]%@-44f 3fB:Mm=/رfȐe-sm-2nrbfV(i,RfӶ&\Ԛlw6;1pWaj9mD[IN=B-"uk +wZ˩!z0̔,QM{I-SKŊ$"-&dD#-:jBϮP81%FZLl)mF#Zԣbʈ(G *"NHXI7$ahBҜZtQE6"LrFqb#%RA4 \xAu:,(=hutLy+1 !L Hf>f5kB!QCj&,ƀINuS|LQHDѣUV\ W `,琾 q"n{ZBVV'NqJ 8:B2JŪz'FGzy"oجCTT!_~oѣ˳{;;4%~U"H{ "S߳{"~c=Wu^ yc\؊2z'b!zOKUI2/b?7ftNgbW=+= {*:w 9}A! B \|t W.;qqzй9_}VK8⾊::'zUODUa_>G' T\N]Iq HrPNq$9)9 N(!(wp $;E'))$#HN" N㸹 ;$N;)΃P'88NNJ p( $N;B.#:N8)#蠨HSK":û.8.ツD:( );⊒+..:苣9;J"*s: (;.㸩;:*((..;(䣨*K;Μ+뒤8:".::889)8ꎓ(:9.(+H;㮈ุ::N;:(".:.#⨤ ߻v^6&FCh .m) 6i*XmRm=ͣw:s3Ob/TS>CATg$*OȊc~:ĊnMLTY2r* e&U u2ɺMٔ6h\ Ò&FA&@2Rd$2cjDS׬y<ȜOD|ykR3ȬOEG^UGmV%h)or@QJZr,99j]ڨ_|YDV3V!EO|OO~S3tɏ!"d2@1&FʤLɒcZ XaF<U0 ss^~Xo{~P.Q߆GBo˜} fbhj7QB -` (]Ee,,ʦYG-Pw7bmn!5k/_ZEd֚5@5c* 7/I |ogzs.>|6ov|-x*>_&Ǖ|yVPESʨhgʲyjU˕)lЖ{ . \[Q@(<yXRO-%>ZygOUA#;䤧1EMssU\#CGxysgt{;[ir Uq˒j۰˷mcgL6b]:mf%"nbMf${*=&vDʊlPU8ӫs X &l1]zo<ƝN0de΋6# $.HCBH |lbQjIf6g(Jek-%s/Zs`a>WC:᭸1QܛW &Ev)Uk|)FZcDsq'dA&*13 ъtbf"*='L E,bM_Ybe9#,:R3'8Sdd#3de9 kxh0S A& 3ohY% NVcze"!!Y[-#pt'"J{0z-ڗb=O5blj7tI (Yh; !Iض"ybıZD>0PUFBn? >si RXR&B*PA>`$}&Ys&/ARZ 8kdLrmɷ Cm:wkmRk H\m qچiW6ʸqM%ʠJ^.1h,YEV*+n]#j#$}jO/(~+~~)I%2 m(,R T] $EBjHN."7)~>7}ݿ>`# XlTll0Mdٛ!ٛ;W zP[M/RWi_ i iy\a_ R 4H'|<(<5 t,|*KCIĊq*n*lJGU8)8aī8RW]kJIRd jcePc F51  “z7zR̅9Ef :M\m.TV()pw!Y;FV,S;3 Vɴk z}$0QD&>L,%**"dLP*Ęђ&>U $2cb΅R& 0 mi1 |Q[;"k~:y랓>)="ZXOSH\}}|yy;I}L}7* OP'jdBUgn 6:9]޿5PgcîM3yds2{IݟmqX aDown* H$R`D6ނDF1Qv38T3ljUɮ(ll#B=`.RNvIl = %t"Atْ)-N_E!LF~Yh20sC13IWj 22d:}^Q\N6'/D'']k%U0'1Rq%T Td2u2Y`e]ܡP6ʳ(n, Mɹ ׁJPuìruu޵W\-:[3{=O:EZ臭=_n'zۇk\H=m+*ӻ|7O G¶Q4"s*+MyU.0mv5j*(:y}6!{{^B"N_tTL$q}^/P{>Z7:uqnTZBÕ)œQ,UaQTҒ*,, CQQT $0iDbm2iDv(Nnif͚J(qi496g@lRlD+b;E M}p^ξ`A&;PY6iooT ›ʨ˸ ުRffi xD5Gq )pԭm) mmtZSh6;[[Sve. 0 ,+ET0 K |/us{5i?g~^wyJ>rQl>o3ޭ>mg[oN%OI_Yn$1W|񬸾xv"|~km|e_xL|y!uP|J|y%2>O#j x61b ЊDqNBDֻmxXݨ-;85EmvvqX+iF5J* 鑇ͶCF+C66-I̓bU^cJiCk8i`!. ƄQ؈U"JűVM8h b$ -~! Vmi3zG`cxtμBda{.-EIY6WϔUJҦb#XǚXOCBaR*ʫ Ƕ1DE%svXCp(#l͋դU*XmnUD8HzǁՙP$(, ӻ;CmNw.Eӄ원[Q/V(D3hr"6t&^p9Jd.v ˶#Kۓp;=v r{ċO!YT7RX+Nd7Lc\, XΧb|{)ZJfH-sދBugp o1!6Dd60;TMh6ۈUDbmQ&*R!ur` {u5NhE(NE,(x-]JV98K'%Y-'\֔44&e9Rn2iJ t[2_6lfHaAfa00 JJ!.ìkp{RMHjjLnsyie ";k%>o8/-JO:;aly/5TyG4XEGWmnG(ǙaX悓9r>k syӚ^LH! &%Ċ&N+&\80.N\Xnݸ\ϼNCUVk[0YIYWEoTU % ^>Bg ~κ@7jNQow&大}> ~s zUW{‡|rdW2zYWz(6Wo]Gyן^>q}Y<:yNx9yc<0uXyST PQ|)*D3QO+'=+G0X9I9r *dvs˙(Bp3Iv)D\4;% ][v3X29t#u[UmdcPMaٱ@IWM33n-eck3mVj&ڢq Yy4Z&&kJAD2fdFRx|xO4ltjmyRuB;%89 W;G84IAZ}M*.RulQqcvfx9|єl3Bh]Uڵ!/n2 ]8[A Iζ92!ͬ;Gh{tQ*n˦q#{\אJ3:eALs+ J{V7bDު MU۴x5|uwp2M3^zR$q)}o'W}cOKPeV/wcK[;˭|4WN|P Q"`Q!|gݷ}Mӓܝs;ߣ~7-QK*V4Mè {Zw wiY [3[c%৞W;<w>{ w xãnN\_ցk_<|2Ȼ i߂xK/D\w {pyO#~m??|W?;0>{_yhg ?t3 ף п&gom`|qӱm@߄e_ ٫}ɲ}²A>|3P/*OO<+= coAz[&Ka _c|a_[:eX?a$ƾ)Yߌz'cG ,_:(t_U8Oc#3Ax?{opyD^E2Yh\q|b}(#= uEIT6HDZ]E_Ww%O3X%[ϖ~[36@wJ[d\hiލ04![bg-9ԇnb,͢Km u[x됷 xk~m[ }jPg?qE0. 0ƅ1.? a| 9bO7찕fjV1 bA b\WqW}Wf>,}n13gaYU%z _]{kM*"*VNg]Q'$~x7M,fs<Һ:FvZENn>ؒn6%Z$k_o$S>;˓UUUU˒T;)tE'y]|\|\hϹhYQT4t LbdՓE*qġJ*q|UPC%84ЈC#84qkġFqġN:qġN:qġAqaAqaAqaI&qaI&qaG8b#qĈ#F1$L<Қ~#+WKstrucchange/data/durab.rda0000644000175400001440000001131213550277443015461 0ustar zeileisusersZ?QjH2bR4$$Qy,YʚlG98c_EPT*5THJ$-&iтD||/׫y~z_}!DQ1|$D$>'n'Dl:ϤO]Pk:ݥeJ6yxq1NjO*z|B'ȜLvJ׎D*Q+aɭ?J\P;\^5j׷pddv1ӌ}+ՔP]7^t`UHڰ XOVa>o7E=spw%uT/g٫բsBa)?y#b>SWy&t^Z ٗf6t*>^v~õ-V"y^ɽ o&jbm ǦLsK쿖π[p6sqkq%[ݯx\kwV  $IN8cGO٩&nTw~V5UΑ:E(^`=8 W sCϙuz73snKM=sK.D:o ?]Αj;F[^:}Gfn<{ >R"tJf&hl-EގLmw@;kf/UQ xD;([=)M]Z@F ]<܋VW~x߿?gN= =L`~]sm7{_cu4D͙A}s[+oۊ.᷄9x_߬u[.7/!-L;x㑄*c)u~\!xcEkf(OmƷPf%Ѿ-ZNyR8* GH>۫[K`ɛ? hЩS3A^>SB/~ qsT淌ZX~E|c/%#f2w vcfI%dF =?/D~sn|F}i?~;a%r)烨F)xՠ%Pe" x/;ěRq?s!ܴ͆Emۨ mea>L>_u^(Fk2&Q1έJ}x\~ڢ?Sz:uhKY@%RlTpa79~j[.iO*G'zD &/%[N-x/6>s ;=~Hu"k6~8)?i1lG.F znA;(ͺcicK5_d1Tp: vJL+UzKnӱS /t'ž{M鸿ծmБ ZX~[BNa 9 \wBmx2ye;NYK^3}8f8(擃]|Ou^VmGB?`ugv9[Ҋ#ZT]tppm:IKןBF\A;)~=w%[noAxoxƠ_Ĝق:}zgi!iͮNz6UHES|Ff>n1BGi%rc{Pl4_GuTC^]OL)S ^ՂnwY@GNv{爯elr sc^;XPW5:MASxNNevXA<^zOV2>.A'W1xtێ,1}޷Kިp_|K-:'Bub^:KxSVrK VZ@OI{c]utwenYS~&ՙ822s%̛!]7ֶB'@_c&Vձqĵ݅K+ݽ/;_X?0cpm)so1/X=O^(nS $)uyAϥӑ_O&j*`M@"Jpo╰1ɑ~/ο&ͭE>|N1!c7a-[=]\Ű[s}J5p۠"u ߵuqF' Tt><JaaA<+e1O0F>1RrDzQ7ܜ EQri׃FCucQ}wAwZX`~Gy–])z~7SR u鏩R3(įgy`5;-_jSi~mfbJX޺Yr.o̔f)ia[Yuy^KCryQw{R_ 5?z"97J2A_Gt`|)UXwtn^ z?A^[֗Coe$Tc.(; ĹĹĹĹĹĹĹĹĹĹĹĹĹĹĹĹĹĹĹĹs/_o$ϐΊwٿ7?{a~=':ә{êWm?4'a~C,WQN?N|/b)strucchange/data/RealInt.rda0000644000175400001440000000172613550277443015732 0ustar zeileisusers r0b```b`fcd`b2Y# 'fJM+a``r)}Wa_xI&c4_|9?'v_|*#oeykl %?l|biKo S8pZ~Ӂi~@,~&>ȝXb7+a3gx9pOu2\j#٤i_AG8nz+h]_%"K.m:sJv^w]}{CMS_>h=ΓT˯ ?_u;t\<'z*gͻkÉ.JۛIX?xaCTA߽P-``tolcpW0ݱ yQә_2UBk%!8CiU;0m13*MyI?jak0oUXiwS…xg Zx\?ʋ+k_=z͢-ҁ9ˮH{b|)?'3y@8AxNhʛaa̲SG8X#weI.:*Y|Oؿ{g'JD/h,|p@@u?P^;m3>8-9[2R&zw]g7E1qP9#AR]6Cցí7冃/0\l&=juuue'XF8|:?KiVbX 6F[D&@}K?r5Aέ B;pI}$}U3D hNhي9%3Hs0[Pp%Pu ը ʝ0|.-(ZXsA 2lstrucchange/data/scPublications.rda0000644000175400001440000007232513550277443017361 0ustar zeileisusersBZh91AY&SY~$UTUUETUEDUm{k;`(QNF@πFOc T8 )E+TR^#w/|$U i[0ѭF$ >*0ñ(AK zQ`e*V)M{tJ5PevH ]`;h@u (`$IAT)*BF0$dPKgC@\! `L #ML!L d=*~l5OOD4zhzLҘ#Gꔩ=SzI4M42 h  h2hbhhjy!@=6zjy&@h2MhHOQ4= i@z4J 4&L0*~&SA3oDbDՇEϘ"oNR_8N1n5KXAA>[G īqo26Ax=rs?ב2pˆ`+69xf*K?19e߻`pΠ~YȽ7|ýA<{}e|$.a]̩H.f?6Hx`$aDKI {>JPEf,̿RZ}5M a! fEYY%ơ'Qp.< D:WWӫ4^ H?s~kej>_}V3L={>o^{Aaw^Db}38?s~X4C%)jR\|_E)`= r\9=Oz ~Ӟ q.lKd @%H(zM$=rJrRRukN{MSMx剮Or&1\zŴJf\d=f’f}NYVgĘv2~UK;n)ӿw '[JȺUWXkoj۽VoZݾΏWrgݹfUu$\<˶9A NLGfx3Cd"zAC;!| TD N۶їq_P 2u=aOTt΀ʉᇜޙ;_uH|VTM+}Ij1dN2MG׶J_Y`Z_ i p ^SQ6kԲp2eΨO&$Y&L [j"ck?hSRUIMOi6ZrUw%^9wM`c4S ETUߐ$$lCB :9 KZ%m-UQvKFリ̚j5jW sK3Ym0ˣ/;S0Ke҆ڛfXKo[6iӣTk֮]aJWؙЮU奼=jWvq%Kۮ0mvoW淥b-+o9\wXufeձۇ:+֮^n&kIk7DnN٭Q4\g."0ɔZcyuFK)*)sӣE2̶ᔥ [nrVMLvDw0d>d){#2HZJQ} NZ/8|c W]kmZoӯ+G>:~@@,uC][:l))Q\g*(9e6sm60_?1^]z;Vܾ#ϝ24cxf]"{Co`(zMg?a;=ķ㏳ǃ(*vmsWda#3ZD|GosM,i3!}@noԴ3紂BT-D`(OY~(Xoac$Fg3}EVLqvB gE1f4燓Q>'Hn5CM1ZƵwz|GC<_1`)АJ5 b4v9}LjwG3v ;@\[xg{;pNcovrа]Tֺ;?ZOr`CB9LwxT;,rMCpJZ3\A >GYnj姷χ%en?ua+f.͹k&׮.@\esn{f,Mk"N&\Ge1rna=iܻUba }5L$CU>ü98;6%F\2b8CiwhAAQPs ZQFVSҴ6]9$1AsWb6 i= O[QJ4"{C&ı upN)@sCi"vrǏ5hՊ(k+m!-US]^{,ȁe'ÙE>@3DD0"]cwȽO*VJ\-ܡ $=kNc)GpH[9}F;:;x|B* ~iI۸@*fZ$!h>ogu/Ozli.:`z,!DZګ }|N&,6},+l,.6S)r:pꐹ wtY9r:8m-I!8>T^ٗkW1>]?-)rk} ~%>XkG-U\dr#BF.${iJNZIa![#~/o28Y'ӡoaKAiS n{6c8cr6ӷs˸AhE# qZ>?=WGИG|dKi_'' B#{nQpһlLb'  :My욋R*Wpo?"+ؼ9Y.a,rڒѩ;~8mxdre'^{wmjv;8apU{s+-=\7zڍQrH[[,$Ib /41)^(`6?db<_VvQ:K."ޑ4>.={wS!tbH8vR]/^[)dh M0wPaC39D>q7]]"B;v.4b 2绮Z5.dž۳)k1ò$:;C{=jX1kv]lh߷,:yێAbPiph\s4X&bV#, ItkF< pOߢ8#$Sd 5LyHkļ߻GԜbZhTցxp[(V"̨ HPXr 5M94K:bKu|(GW ݏQ)g& `t)uO왕OwcD96ϒM ,Gc5;ZՖ`vC] |v*,Z1z7$p{BK5 ` tG9óZ:y!bCi3V=2f-=,AN4| Tm37JҳwG 8,H$5`ΘS7 -EsH$ Zئ]7 鉎X܆.rȍ;i]@}(j@W.K>o" 5 +f{zqN+4b \/d" &=TyÛ?/C͹0ݭ%yS7PM^>^7G3a1묈zy n}[[p1/+}暢 ϐ#1Xz^ 8{y\}S28WGҠX9۽=zX:%cD {6sN|C k]oq`@fVr&M\ #&v0[F]f6靆sPH9}5bvMVm9_b ~@j G#sok!||ܖ֌K5 -o27ކ,қ$>,UxNIZ~71ue+ȇւKT3$q!")W (-,7KcXv!xG9]_O;`p٧9aq ٨]6 I۶9Z|p.rw`6iYuJmJ坼Z+kIR\m4I+6m%V*-N;ݷ;:NP8(fEw ~y[50! 8fpKM7Ƹ$:j`bo2q j2 6ˌn~g{swdz-ㄈ%"(wxf"p8u$yƚ2#|afr(ZeQ(宝CوD^fN`3YD( dYظ g`kfo,ḀsE[/.Vݪo%1>h2魝 afâhexH؟SGW=fPjqף6\x qw DX|TxE7Wcq9qo !qV;|!VL~ـxl8 PUXq1j`65{ ]+us-Џ?O6#$O>)؞!>&cl=բbG@)h!;O⹇ז9~^x S/<5Q'%-ۓTbJuR DwΦzNgT^-Xxs:ϝq/nk߸?wm~|툸Ǜ]:e ܻL)#,bf˼;!`sᶩX'"i_ fBWc|G$1 ˚vرN "Q* :tJh$prѼq֮h]e2Q-Z^3n1zy2 @1,̱@8vÐJoSW" a3EDw0ɷzͣoJ p @FO"=<Z3 5hlAJ1ݮ&h-P.‡(U6Kf2ŃZmf_:JkzJ뜃 "NAH; ǚeHzG>t){PY&h ܛ#|C*[1p.%%{X&-g8@m3z98/j7HjXXɀhYre+<{J8lBćbd* R/K*,0+ VZ~6`с &hs/#V$bMENSzF[<^O>{|V}^voEYXgb>y77) :"AkdY=Oh1 %ve1㜳:F9CˢcG ){CyWkv0ucjM2gT͇=e{:\XG#^su Ş/[Y>{އeu0+Bdݙ)SMQ3 8Drb/;ܵkARԘuij:e./O:an*y4, V%D9p]]N:T K%y8["a6}3DߺQlz?X6[. 1HxPZ6XID3/N9dSpFZ۰b#DLS^}HAǑ1Kd5rQ8&T2^]qzR^0=t4p» 3$i6o/;:q InQxx4Q -aeHʉ`vB,C  s5Yfym2¶)%Z{ɬ vl,.C{宁ffJ;ZNHra9n.SB:]ZQ֢s'X=I[̓pq9acNW:`Kn;xs04uɛҜN4™¼ Lk "> nbڔ {[4'%cYad+iJe1p:ht'x7łwvjtz^DƴiwU᝻8]ZiE%x^njrN!.r9hD|qT?^|=CkBA ~{7͏b&ްHjYa]鷝Ҧ5 Z>5 džsvo%7!FG puk&0.KQ5/ZZ,$WQ/)f/59`p]K6sCGAW/[q9.΄7ڰ0n5:ӏ/#GF_wĩEG7 ֣ZAV@.I:\*nbLs ] 3'}vn_O86m;j`xw6w ~(l=9x'UxKBΫ 0mB-0}<0xqpH'm1;,V ;wZnZޞ "#,τC}ӷn B P J[ԵQ (Ukp.a@m]YCM6<,a-t^7lWS'4͖Z4aC%xAGmdiI*>H;2qB% XA2<'-m Sl=( cP)oђZKp!(@n"xS Ɛ b Mp(/y=˞eZCyyԹn'lId+t}=]W]u]u@4kNmI%+SoHT)YmjjW_wfffffffff`JڳY3333 MUZZl;v흳vVV39fb]kZVj +TV ͊}^m +ޒ/zI1_V*XQ Ic!R H, k߰m>883333303+9emeVffff'ַm۷n3mUm̬̭Vffeffej(o@R+TVn +U֫UTOV T@Xd`{:hztC+U4T;Q7 "B$!(("bDUX,UEcb$F) AH(D ),?PDQV)"PA"c`*Y,~X(*`eK(Q"aZ 1QD XU**1YmETbk"*XZ0XȢ e( $Ee F V1DADV bDF(*b(,X,DR(,!ddAIEX$FU,E,AE #PA, I*2@Y,H#((PHUEU ,"Ȥ  )dDSd"IAATXEH*d"BAbVHP,$$Q(RDFI$~Gz^L<_v=Tzᚁ PNݦ^-,2>ꟑF4!{'H>?\CCRTd# %!!ڈ?R{G?6ʻwP;mhw'7-N:譗MNþ7FJrʹR2M:|$Da}j½Ćaw"X^>,'1zKH{|]{7${3ʨ쵺YZϥbWsM//1z((]30z\K^{-8xV11}|gݥȾ`~T =AT%pO ka҅%SH^ .a^ؐV˔{ C܊ 3+}[H(FãvOnd_*:^wͨqzJt9V%)4^L_qxq^AHcݞEsۤgZ+XDk $jp@_WH B,,#*ˬ"q31b6߰)+:g w nE+O?8Zn) =*ؚDܮcuἪyѬdM @ͨ k 2"4 D BmQŔ%kAt]W#[+>pYAI&Nz":=ފ>v2=Uc; ʬ,32
2mp<8V/ʅJ @4.mOsX4ԹH \D噑NF@S8@ɝܡ[{mg5BbަIpz]a#cBŀX '`vN3K k{;lMa -GΠU\&2q@YVQg)U]8gI,tjtwY/8K;{{x!x?aM&C-b(0de@7q矣o3*uutd1Wlmzlg"&cy+0CSDZOiğ.lzj櫝y^OKgtVZ!bw/I]/k7s3'/L|+HxW^4O/} pf^_| t wX. Hj:~RtA#nɒ/tds!ʀCeOCN%; Hw]C3y{x6"p6Rv aOD܋CohKج'˃#+R#/M)aOAC&Aֺq<<*e;@ F֭$bM5xInttr԰$o,Cq^mQ|~eA \mnよGߤSϪ(+LDC;e9GxuyKڐp=zQm؍ڋ#l¸L)-IQO βGƧh2=:V%~K;gʤ}0cG}6:, HN*`yzY鬋_z7Z~5yl'Z<+1T^vv%FNhBp"hkPP2"+@0XhN*[GifH{@HMWK TU-}S|BH2.spy!]`_ha) #(D.@!n;cpskxiky{C<~|T3O>v'TyɰRnWy B~pzP×:W9xW!->~y'jE/yu"e+VuU~ƦSHg\4 N!@kn~ffyz:On#_&Qٱ*hCv;짩CivthQ![w%D@J`6c0,RE_χ#mDK7hD NN&peש4O(K=/@ O>ܝ[T(EXns '==cBc#Xi7ڗmC? @f%zҚ(rQθ18qzօUn@~׻ "H 'ZzZ:Wzzg? #?́^??̖uMJ>\/3MM*~RM`#""F AZ }hp:Ӂ+'S@h{]cd~F|xҰ7LHQ$!PYvLaT&}@*mLz/~ K{pS}o3a¸+1aO/=PD"14,ˎ{ZvJ'` Oq > mhQ{޺l&a{| A %l3s|OR &hZ0_!wNsm1ibH9;r% ! -F?WkYG-dy6xӗRj}nn Yɖiʈ!399Xq?(՚CI"yyw8D*0~_W.$!Y6M7պ6ǝT9͛R$Zex1bbsgR+Ʀ(NnV8\ e47VN֎ARRDQ\K&NZw%u DW"حv%`g5u4>l & 0Bgc(f XˈuǿR"LKFnu Kæ(۵ }ɤ)XbEy*DĐ hB0l#~XM ǯgf9(D{WL /i$'DA$HclD4\#viSy۫@"o[=1ȳCt٣ٯ8揋KA9*Zn1r`266Hܐ]ڑydq<ѭaLW NGSnփ _$Fñ;1"itv[]3 m;dRT{bsu[ZcɋӟUSCWc-N&¨Ę};< Y5@1lL|Gl~(\ܸ+9nՄ5;`l&4 KV^|nR2I ovkb7]0&S]Z t#6e4/.EI˦¥ʸ%cdm^f<ڛN!; q߮VRHTWIxnm<²fC' bu%ѻd}h`+7p`Sse鳧:?c>;6"4Ev{_D,͂{}%,lC=}=!Kuҙ*^~&c 2>ulpN:]lv5FiaEjb(r@rYT)Y&d;K& VaPF)"*"`B`@% T"uB(KRl;kZȜV6hI3b{3Nj3 yaJhl%0Ӷd}MMLVm EnS6%Wfl O=2 Dی)0DTTCQQ"ŀirqTn8y=I`BdhX) f e"BXdW-nsCLZE+RTeMLȊ,EJ:FPZE^N&CpuI(p~?O5:=y=4yް+vث^Qb* "EL 7JEժ(YTbi TAteX̺,B-F3I+֮ 2 TX$ˤ CAQH:KXXM@&f2#5IXDF 1tJԪu;j-V?2bdwq0W# @qIb'x/mR۝;`V$@WLT 0@$ C{АUX"3ǩɢX !/ʖgTbeN%{I;So/r.쁩\:ۧq8쯫O\CO~wA/k=(C%p3vpr%[NH`)x=C@ i60lӍ4ь9gos/Gk բx0ɰ{M7#0m{$ΩӤțoP8`z=(g. 480^cR`W@gf׎Ocr zaI ,rm݆arѦİsaUOu+%M8lkN5 x,҉V.8ir8gU!E/^;úmvwgi6jM; hX"3-e\jsSۖ35SBxʤfve0 ]9vȡ  +:bAc::߮x$XVGcdCȵQ.0, ]TPJ ڙ592Qll.IԻӆȐ] q n;KszְoJw] B=36];.`<,+b(dMnE&nuDrYI ؄Mb5#1usפ. }I)4}ӘZ;'Xwxt 4F9&iL.,XLB*ȡ.ӡr1X8 F$l)BX<⍽fc9i"<57F t"sهglFSLaK`=>Ҕx|ՃDgqOFH 8Xa]ĵ,n P@SgVg-WۯJGN G<#6ދ{{,?8@ЋvB3k %%-bS4"wjFBxYI?cP_-B.Q| ӺYuZw“ۜ@L q98OlvIےwE,qlNYU, $G6o6cPь3qKvXQM&nI&6FLd|{ۼH ̠ipbCmJb֚ a:0b{ā..N\ԃЂ^m0\b =M mY0߲] 8l?-#aYwv0B ,SO)os݁ߏE ޅ[iTTpPɉb )=O^\Z 6#qڐBHY}?FidۋBl\ol|D>fN>j)yZu׺a>OfUgRirJDS,MC%Ȓ,W>Xg)A@0h-ӭ=߈P++V4 *D E !ER( ֤t; ӂNG}g`1 LO,Y` ,R1XŊAPH DE X,PYE)A`,C^H‰\@UY“ bXQ'6e#ԮihH2MD1D(!#f`_[%TcRȄ>tH|OA% K8IYr4s[.-r ?JV`6;nL"`ͯSƻml 2DǍB[} Vd4w筜(BDb(halL0#랤gŲ44@ZHY'UqP8!؋t+ 7HK"cW2jRTQg`jZ0lT.ر-J£'jpqiyY漇MPscF˶Co*)e"˵ȖQBQ*=ZjwGnn7mg*zͅus$tj`pkkX\Lw@3{c^y;%vj;{r^W\6:C|%} dLύ%CѼ[ٮ]'܅$^i`\΀I}0l8#\Od6Pv9WC :dx"Iy.³8 Fd6pĝe]enVƔJ"u'apͮk@};%ɛl$4){@c$'}(,Qd70lIiM.wurh\ EE($b*Ŋ",P ')R$DF%d, 0` V$,d(#Z=H a1"F#CFqNaH0% c<36g(˕X$xm/6e_;>A<(11z{wDt.}ہUblm_w:T:$;{ui9+MOS$5!`qj6$\o^֠Hڇ$8>4[][Pһm !f--Qw ׳4o|l损+24abq Uiq0PrՐXJiYw}eUT!b|?3|ǩo$DR,@FDmi;ZZF'%A;~u#hA-{/̗-, k !>W- &doF4 gFd^pkT[)&<ᯠ]ķ aEKQ!%!k+_^b+Y"xa7б>xIlym )J@= CsI""E tݩ~Ie=i"ѕ1 ȑI T(HVP% +@PCC' m ЩRLD^d\B"ɏkIݜ97!!6r|0Ș|09 cSUJL: m~󴾀yy%ZB07h?Cס(b %mBm-mңHUYH' @ L ;,*Ohaq[2EA^'\718Eu=BVǷä V<{(&xqˬJ5ȁ3p"!Z+PBI -Io:0U ^;7x};V@"?._ *4k^s<>p %dE@V F"2,d@a!0"Z׫0Q*gT4QXh֮\(+\BĹO;ɴSIHr5c(JAeEgm,Av&%l)"YdUg!3\.́Q@6%D`0cKo[٥L2`Fѱ=a{( Vh$~C1|Cga5W H)fj70+=TŤ^']0瓑~^^= ?'t*;>Hfs,J0le@yzqƴ}ThHċE]@>*Ɛo&=BNdh_ӧUjzR߼[6j6Qk m&&9Np8#/ يI'p*|o4, y} aq+ T$(,PY!U$b14E,"EQQBYQ 4B}佟|yM)?Z1E{4$c)ժsۦ"il gnhTbl^[ ez4/ߞEDHuDB@> o +D}^ǚI C{߷ރ͡hcp^96\lI&$=QTF)vlER;5Qd@#GT AXk"ŰT+ PDrb! `XiIoeR,Il+~I-x=|ȅC!?5B){$ڀfϤ"MlHd$Hp#LxѲwt`1\)HBݬL))bY] :D&uB%i hwvQ&OɊ4H`ŘMl2O97&M:U 4͇4|Q4]@M&&AFeK2`" Aي+hBsDZT@`1҈HRL Qb8j!i9чfWoZvYN[L>jY VRd?)A$Kv 3ݨjR2BD1M}j",rA1߾ 2)]3ZN"mP7;L&ay <0E8yQ,ff9-un 0(5.j!@hB\ P'8cr/yzL4sCLX^skх`C3#^}qq˾DR,JhFD!U|lBygu(fÞR+-6l0WayZlqmc;ͶƑB1-+L2!AuťZ6a'RL9\ HcF&pU *X2@G'U 3@1ԇDV>MiyT܇ DDk(R@E*U)$BEA@**NUU2H(]ߝhWLXlRښh<zsq4ILC8!ņg\Cz9Lo`EJ؄Qȇb2i3i2gYh;*HiI5rB3 Vty8 {`Fͱ4B&SkFCSD&5$X(ơS#25 R;+ip&l!҇ "2"l@XAƲKd+X-I[>%ٱrhJ#_mCL+Q^&IyP 9 UPI3G>Lj82 " AB~Wڍn%ddMGIEd*AЊHBBC\R@#6l/ R(*F1)fjY2f0̺SX!&vih"=`QG /M`{[H@ۭdV>2Dp%;UZpDsH=D:#`:︉)#l:4m u!Tc;6^2_x:~(IsA÷iMh`vJQ*m6~pg$@$۠Lz~mE^uIy8YrڵSJ e! ~*$tbJVHpi؏,oes`o0BH dDQaQĒdP6");+Q4/1B\#MfPO.9*rj6t!L|_Wp-M [Aݳg&>icm| Qh&Л+ t#3AğGk2o 14 Yҫ?a4pGC $V1~Hckjq" 5é JtXvd0\ ܛY}ڌ='CwP::GEUbö'5{}1]m ,[B2ŎH8D(%di*\ȩ2/ʢ˒J}@֘<^ hb 틖YIs⢞'}N{'sM,&c[maNI CC&VUsnoǔ\54킿w*[♍i=v1X'uS9IPīHPH&W,ֱ6ƙO VliwzkXqQ:tTFy~'ƈM#xj"R`GՖ$E}Fsa g · e8XkPi V  )6;P_o9;m܃K}5.,x$kauUZRP>6+ly̭ISy7i̮x"AXqxQ:};vM0|ߍXdK moyM("oƪm6]8D/{0xnH6* \_Odt:g/mq0JZrؙkr!7bYidx(W,ӮDC)-dCȀӗ#PkZƣ7zEHȳlZj}{[ ml81_˩rsF&mX}Z:_W|4s?[gF 6m>C Xbѱ{yz<z=DPPA;#K $&*0Bj6'sVأ (b]dKBB;*(ZTEJ?y} ʑ~w}0Ι6.f%S@4%ٚL.)+ mċNcF.;}NZfi2SKiC}ݴ,]t Mw2CXW腽ntaWM[';vv:D;dX&d{a'։rqA_'!q.b/*;ǑП!$=2D??P=qRq]D;Ƶ3>}㌗2]Oq:~wy?rPd\J;9RH/KVƽ'ϩ_3RՒ= GS'ʨ~FcBjo)d? y!xy! !X},YIfLяy8385żN }rZ2KK?;}Q OWip0뱉vaGlN=!q\I헒}XzNRMEq-v5Gy[]v{Ļ#!n._[|e)[/[k?mzl.1N iO'.~9O&2Oȷm?^1^q_9 mjJY3Fi9\ Ҳn빶:]tZ]R;n-v>>ϫz0v D!*D/W+Wku`['::@ ę}/}1o}}oW qB?|?}C/"-q!zlBџ+x ?CC|W!1_>BǁOk) ~`1臽DquycRx|wcr?y^9q#vKEB]Q뜠=;?Ǯv`?u]fW9Fۗ%ut%^nmob=^ͺl}Uk/0vh{b֑_;N]`cNy0&JQGv}jX'ǩŮ~uK+bizWfcg~9Aȼ9HH~ [i)j#0~=}y vc-:9ԞVO:MU_zrq6e->zH}۷Z1ݳ:E9WU4*mwֿ@Qy~Z=V)=}w9Uc53:^vf~l߿zm7`Yo"ӝG;jaרZ}n6oWcY1 yaNw[,dEa_Wپ](|JFњ7^T㞣^V[viy Ms_K Sh{9: γG2K$U/5;S%sOS{t\lvc՚Ȗu]J{]ڏ+Y+'͠Z@Zw_Ʒ_9_9_9}|bOW o<6d]+$4àЌP!~[/ 'OTP64\0Y7'O O KKCepAPGJKJӚ%%)ph o NQM'G2d'X9ႊlч腬x+V*!+]*WjJA[}ZK?!庋܅Q0ӁEwO=kϹ?Wy>ɍa$dt>"YY84uza0RPRhlBSYh25 Tg:8e;*Ttq<8RiMllknϹcr7dL]^وv |¹Jg}i=ƃ,:)8jXKK "W*Pcl8ωiI3)ӄg=/^ ,#,' $"&!HXKxPA$T6 5-ZVe6vN.n^˄W^#' $"Nxp&ᯄ"M;»#E1$'|H#DŽ>!|Jp9 —!|]5v$PB[BINcӞJ. ^t/"ОۛZ(- Z.%%#'\F@H \ND93 B•LBWB7BwBBOB/BoBApb!x > \ECp /A 9~A!\0pµQ:ф1q7n"LH$LNg~+a*a mc29Z/ (3{ÂT:EH/N ZRizk!!_Su:@=$,D-xzi Xh͒f Kf:su G&M(xPCB@Y\rp%\p%\b's8Np29d's8.p19\bs.p39f7snp3919|cs>39g?s~39`s#0G9G#+4E)Mc^S ð9 ð9 ð9 ð9 ð9 Ӱ9 Ӱ9 Ӱ9 Ӱ9 Ӱ9 ˰ ˰ ˰ ˰ ˰ ۰ ۰ ۰ ۰ ۰ ǰy ǰy ǰy ǰy ǰy װy װy װy װy װy ϰ ϰ ϰ ϰ ϰ ߰ ߰ ߰ ߰ ߰ [ [ [ [ [\\rK{+L$ W cwCstrucchange/data/USIncExp.rda0000644000175400001440000000623013550277443016025 0ustar zeileisusers]te" vQ, q- :* "I e{fR&Bz3=(`]TX]+79swytʛV q ~3zQ1CbCkn0Mwzho1-7c?ưW a ۏ܃? '<]oa?sGAވ K1]㣰?<^0 ͩ7cgRhV~NPzKX-?a' GB2^A'˞wJ ?~KTKeO2CO{& R@{2IIxI;XV~ykB<$] pȸIiQ~=6Ƌ8_[voC򧮕=>+yV%]J%]Zti3Y+xl CF&yˠv-ߕ+]&))xeI=2θκ\|Y"oˊ'=_yWHpڝ~ɊyU<П99ű>.^ =/{W9y+~G_Ҏ|C\+}* ;G› ׅ{Un!hE+yUzS|Ł_z]P<(a,eP9ղU*oi< +hG[1գ2W\{W+_NU5*FqUKūUG|OCo1kMHykՎ+}$p.vk7i`>K! .qQw>S |_NF?oFmxθk8doLξdP&4͆W؏+y3k^kۅ-~RmyU| |^${+ui} 6aXmKYp֫w; v^wLY@ ;E _)od;ߙP{*c}WWK1O~wlE*xX~(c ˹T:)ɸvP`$dtȻ|Gt ~CŻoSj+\r`z[y]_Z>ssp޽E:M^p?vYݯLܟxxsO| <'R9^Q^waV=\Zy}cTo|3-P>Ϸ |7(\؏6)W:;G Ay/P\ UP{'ܭ<=ދ{}EP9cT7WǔeM׬<}OW|?M_^U7+0wUn#q}q`H(ܩ<&'}%ĵ,F~2{uX8x]m[ ?5>T4M"qμLޙ>{{Yťdzd0^2̃]׮dϜ̻Ϥ3y0kYKZ2γJ,gQNlŲgoWj/0r˟(O[ʛy$2ra-=s.~ϸ?և? ؛Yz^xw"JW}xp]ż{X¼/'](Kҗ6]e#ʖ^!}ҲO.T߀SWpX V/aC+Ǖ>*̣J1*P<8-USTٕ}joz+v+|D5=@:Up%NnOfx MS ޝj/vެrjw`?Vz_7 P ^ ]H_^'}}*;] 57,J}; F֫FF+ؽO#CuMԷ)^MM׼FX7¹2λw0{5:V筼o6Ai㽽mwbg>݇?~@;%lgϕ2<:&L@c爿ctU\:V2;ѝ3r~qyO Q{:ѕ.A쓝o*c쇎g*D:89xtiP='Us8Q;QpjYss5ȵApw6W(\8;RV:*Bz^=ǽ_s3Oߩq43#(ToXqt'Vy{_'o}%~MrTyJ鞣򻗀9uGwVy{~k7K 7:%$3 ~C&.͸km];]uk7\Lۇ`rV u3cǮٲdC~[L::D} strucchange/man/0000755000175400001440000000000013550050707013530 5ustar zeileisusersstrucchange/man/sctest.Rd0000644000175400001440000000355313062350355015332 0ustar zeileisusers\name{sctest} \alias{sctest} \title{Structural Change Tests} \description{ Generic function for performing structural change tests. } \usage{ sctest(x, \dots) } \arguments{ \item{x}{an object.} \item{\dots}{arguments passed to methods.} } \details{ \code{sctest} is a generic function for performing/extracting structural change tests based on various types of objects. The \code{strucchange} package provides various types of methods. First, structural change tests based on F statistics in linear regression models (\code{\link{Fstats}}), empirical fluctuation processes in linear regression models (\code{\link{efp}}), and generalized empirical fluctuation processes in parametric models (\code{\link{gefp}}) are available in the corresponding \code{sctest} methods. Second, convenience interfaces for carrying out structural change tests in linear regression models and general parametric models are provided in \code{\link{sctest.formula}} and \code{\link{sctest.default}}, respectively. } \value{ An object of class \code{"htest"} containing: \item{statistic}{the test statistic,} \item{p.value}{the corresponding p value,} \item{method}{a character string with the method used,} \item{data.name}{a character string with the data name.} } \references{ Zeileis A., Leisch F., Hornik K., Kleiber C. (2002), \code{strucchange}: An R Package for Testing for Structural Change in Linear Regression Models, \emph{Journal of Statistical Software}, \bold{7}(2), 1-38. URL \url{http://www.jstatsoft.org/v07/i02/}. Zeileis A. (2006), Implementing a Class of Structural Change Tests: An Econometric Computing Approach. \emph{Computational Statistics & Data Analysis}, \bold{50}, 2987--3008. doi:10.1016/j.csda.2005.07.001. } \seealso{\code{\link{sctest.formula}}, \code{\link{sctest.default}}, \code{\link{sctest.Fstats}}, \code{\link{sctest.efp}}, \code{\link{sctest.gefp}}} strucchange/man/gefp.Rd0000644000175400001440000000743413062350355014750 0ustar zeileisusers\name{gefp} \alias{gefp} \alias{print.gefp} \alias{sctest.gefp} \alias{plot.gefp} \alias{time.gefp} \alias{print.gefp} \title{Generalized Empirical M-Fluctuation Processes} \description{Computes an empirical M-fluctuation process from the scores of a fitted model.} \usage{ gefp(\dots, fit = glm, scores = estfun, vcov = NULL, decorrelate = TRUE, sandwich = TRUE, order.by = NULL, fitArgs = NULL, parm = NULL, data = list()) } \arguments{ \item{\dots}{specification of some model which is passed together with \code{data} to the \code{fit} function: \code{fm <- fit(\dots, data = data)}. If \code{fit} is set to \code{NULL} the first argument \code{\dots} is assumed to be already the fitted model \code{fm} (all other arguments in \code{\dots} are ignored and a warning is issued in this case).} \item{fit}{a model fitting function, typically \code{\link{lm}}, \code{\link{glm}} or \code{\link[MASS]{rlm}}.} \item{scores}{a function which extracts the scores or estimating function from the fitted object: \code{scores(fm)}.} \item{vcov}{a function to extract the covariance matrix for the coefficients of the fitted model: \code{vcov(fm, order.by = order.by, data = data)}.} \item{decorrelate}{logical. Should the process be decorrelated?} \item{sandwich}{logical. Is the function \code{vcov} the full sandwich estimator or only the meat?} \item{order.by}{Either a vector \code{z} or a formula with a single explanatory variable like \code{~ z}. The observations in the model are ordered by the size of \code{z}. If set to \code{NULL} (the default) the observations are assumed to be ordered (e.g., a time series).} \item{fitArgs}{List of additional arguments which could be passed to the \code{fit} function. Usually, this is not needed and \code{\dots} will be sufficient to pass arguments to \code{fit}.} \item{parm}{integer or character specifying the component of the estimating functions which should be used (by default all components are used).} \item{data}{an optional data frame containing the variables in the \code{\dots} specification and the \code{order.by} model. By default the variables are taken from the environment which \code{gefp} is called from.} } \value{ \code{gefp} returns a list of class \code{"gefp"} with components including: \item{process}{the fitted empirical fluctuation process of class \code{"zoo"},} \item{nreg}{the number of regressors,} \item{nobs}{the number of observations,} \item{fit}{the fit function used,} \item{scores}{the scores function used,} \item{fitted.model}{the fitted model.} } \references{ Zeileis A. (2005), A Unified Approach to Structural Change Tests Based on ML Scores, F Statistics, and OLS Residuals. \emph{Econometric Reviews}, \bold{24}, 445--466. doi:10.1080/07474930500406053. Zeileis A. (2006), Implementing a Class of Structural Change Tests: An Econometric Computing Approach. \emph{Computational Statistics & Data Analysis}, \bold{50}, 2987--3008. doi:10.1016/j.csda.2005.07.001. Zeileis A., Hornik K. (2007), Generalized M-Fluctuation Tests for Parameter Instability, \emph{Statistica Neerlandica}, \bold{61}, 488--508. doi:10.1111/j.1467-9574.2007.00371.x. Zeileis A., Shah A., Patnaik I. (2010), Testing, Monitoring, and Dating Structural Changes in Exchange Rate Regimes, \emph{Computational Statistics and Data Analysis}, \bold{54}(6), 1696--1706. doi:10.1016/j.csda.2009.12.005. } \seealso{\code{\link{efp}}, \code{\link{efpFunctional}}} \examples{ data("BostonHomicide") gcus <- gefp(homicides ~ 1, family = poisson, vcov = kernHAC, data = BostonHomicide) plot(gcus, aggregate = FALSE) gcus sctest(gcus) } \concept{M-fluctuation} \concept{fluctuation test} \concept{maximum likelihood scores} \concept{structural change} \keyword{regression} strucchange/man/GermanM1.Rd0000644000175400001440000000710313062350355015427 0ustar zeileisusers\name{GermanM1} \alias{GermanM1} \alias{historyM1} \alias{monitorM1} \docType{data} \encoding{latin1} \title{German M1 Money Demand} \usage{data("GermanM1")} \description{ German M1 money demand. } \format{ \code{GermanM1} is a data frame containing 12 quarterly time series from 1961(1) to 1995(4) and two further variables. \code{historyM1} is the subset of \code{GermanM1} up to 1990(2), i.e., the data before the German monetary unification on 1990-06-01. \code{monitorM1} is the complement of \code{historyM1}, i.e., the data after the unification. All three data frames contain the variables \describe{ \item{m}{time series. Logarithm of real M1 per capita,} \item{p}{time series. Logarithm of a price index,} \item{y}{time series. Logarithm of real per capita gross national product,} \item{R}{time series. Long-run interest rate,} \item{dm}{time series. First differences of \code{m},} \item{dy2}{time series. First differences of lag 2 of \code{y},} \item{dR}{time series. First differences of \code{R},} \item{dR1}{time series. First differences of lag 1 of \code{R},} \item{dp}{time series. First differences of \code{p},} \item{m1}{time series. Lag 1 of \code{m},} \item{y1}{time series. Lag 1 of \code{y},} \item{R1}{time series. Lag 1 of \code{R},} \item{season}{factor coding the seasonality,} \item{ecm.res}{vector containing the OLS residuals of the Ltkepohl et al. (1999) model fitted in the history period.} } } \details{ Ltkepohl et al. (1999) investigate the linearity and stability of German M1 money demand: they find a stable regression relation for the time before the monetary union on 1990-06-01 but a clear structural instability afterwards. Zeileis et al. (2005) use a model with \code{ecm.res} instead of \code{m1}, \code{y1} and \code{R1}, which leads to equivalent results in the history period but slightly different results in the monitoring period. The reason for the replacement is that stationary regressors are needed for the structural change tests. See references and the examples below for more details. } \source{The data is provided by the German central bank and is available online in the data archive of the Journal of Applied Econometrics \url{http://qed.econ.queensu.ca/jae/1999-v14.5/lutkepohl-terasvirta-wolters/}.} \references{ Ltkepohl H., Tersvirta T., Wolters J. (1999), Investigating Stability and Linearity of a German M1 Money Demand Function, \emph{Journal of Applied Econometrics}, \bold{14}, 511-525. Zeileis A., Leisch F., Kleiber C., Hornik K. (2005), Monitoring Structural Change in Dynamic Econometric Models, \emph{Journal of Applied Econometrics}, \bold{20}, 99--121. } \examples{ data("GermanM1") ## Ltkepohl et al. (1999) use the following model LTW.model <- dm ~ dy2 + dR + dR1 + dp + m1 + y1 + R1 + season ## Zeileis et al. (2005) use M1.model <- dm ~ dy2 + dR + dR1 + dp + ecm.res + season ## historical tests ols <- efp(LTW.model, data = GermanM1, type = "OLS-CUSUM") plot(ols) re <- efp(LTW.model, data = GermanM1, type = "fluctuation") plot(re) fs <- Fstats(LTW.model, data = GermanM1, from = 0.1) plot(fs) ## monitoring M1 <- historyM1 ols.efp <- efp(M1.model, type = "OLS-CUSUM", data = M1) newborder <- function(k) 1.5778*k/118 ols.mefp <- mefp(ols.efp, period = 2) ols.mefp2 <- mefp(ols.efp, border = newborder) M1 <- GermanM1 ols.mon <- monitor(ols.mefp) ols.mon2 <- monitor(ols.mefp2) plot(ols.mon) lines(boundary(ols.mon2), col = 2) ## dating bp <- breakpoints(LTW.model, data = GermanM1) summary(bp) plot(bp) plot(fs) lines(confint(bp)) } \keyword{datasets} strucchange/man/supLM.Rd0000644000175400001440000000614413062350355015064 0ustar zeileisusers\name{supLM} \alias{supLM} \alias{maxMOSUM} \title{Generators for efpFunctionals along Continuous Variables} \description{ Generators for \code{efpFunctional} objects suitable for aggregating empirical fluctuation processes to test statistics along continuous variables (i.e., along time in time series applications). } \usage{ supLM(from = 0.15, to = NULL) maxMOSUM(width = 0.15) } \arguments{ \item{from, to}{numeric from interval (0, 1) specifying start and end of trimmed sample period. By default, \code{to} is \code{1 - from}, i.e., with the default \code{from = 0.15} the first and last 15 percent of observations are trimmed.} \item{width}{a numeric from interval (0,1) specifying the bandwidth. Determines the size of the moving data window relative to sample size.} } \details{ \code{supLM} and \code{maxMOSUM} generate \code{\link{efpFunctional}} objects for Andrews' supLM test and a (maximum) MOSUM test, respectively, with the specified optional parameters (\code{from} and \code{to}, and \code{width}, respectively). The resulting objects can be used in combination with empirical fluctuation processes of class \code{\link{gefp}} for significance testing and visualization. The corresponding statistics are useful for carrying out structural change tests along a continuous variable (i.e., along time in time series applications). Further typical \code{\link{efpFunctional}}s for this setting are the double-maximum functional \code{\link{maxBB}} and the Cramer-von Mises functional \code{\link{meanL2BB}}. } \value{ An object of class \code{efpFunctional}. } \references{ Merkle E.C., Zeileis A. (2013), Tests of Measurement Invariance without Subgroups: A Generalization of Classical Methods. \emph{Psychometrika}, \bold{78}(1), 59--82. doi:10.1007/S11336-012-9302-4 Zeileis A. (2005), A Unified Approach to Structural Change Tests Based on ML Scores, F Statistics, and OLS Residuals. \emph{Econometric Reviews}, \bold{24}, 445--466. doi:10.1080/07474930500406053. Zeileis A. (2006), Implementing a Class of Structural Change Tests: An Econometric Computing Approach. \emph{Computational Statistics & Data Analysis}, \bold{50}, 2987--3008. doi:10.1016/j.csda.2005.07.001. Zeileis A., Hornik K. (2007), Generalized M-Fluctuation Tests for Parameter Instability, \emph{Statistica Neerlandica}, \bold{61}, 488--508. doi:10.1111/j.1467-9574.2007.00371.x. } \seealso{\code{\link{efpFunctional}}, \code{\link{gefp}}} \examples{ ## seatbelt data data("UKDriverDeaths") seatbelt <- log10(UKDriverDeaths) seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) colnames(seatbelt) <- c("y", "ylag1", "ylag12") seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) ## empirical fluctuation process scus.seat <- gefp(y ~ ylag1 + ylag12, data = seatbelt) ## supLM test plot(scus.seat, functional = supLM(0.1)) ## MOSUM test plot(scus.seat, functional = maxMOSUM(0.25)) ## double maximum test plot(scus.seat) ## range test plot(scus.seat, functional = rangeBB) ## Cramer-von Mises statistic (Nyblom-Hansen test) plot(scus.seat, functional = meanL2BB) } \keyword{regression} strucchange/man/efp.Rd0000644000175400001440000001774713062350355014611 0ustar zeileisusers\name{efp} \alias{efp} \alias{print.efp} \encoding{latin1} \title{Empirical Fluctuation Processes} \description{Computes an empirical fluctuation process according to a specified method from the generalized fluctuation test framework, which includes CUSUM and MOSUM tests based on recursive or OLS residuals, parameter estimates or ML scores (OLS first order conditions).} \usage{ efp(formula, data, type = , h = 0.15, dynamic = FALSE, rescale = TRUE, lrvar = FALSE, vcov = NULL)} \arguments{ \item{formula}{a symbolic description for the model to be tested.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{efp} is called from.} \item{type}{specifies which type of fluctuation process will be computed, the default is \code{"Rec-CUSUM"}. For details see below.} \item{h}{a numeric from interval (0,1) specifying the bandwidth. determines the size of the data window relative to sample size (for MOSUM and ME processes only).} \item{dynamic}{logical. If \code{TRUE} the lagged observations are included as a regressor.} \item{rescale}{logical. If \code{TRUE} the estimates will be standardized by the regressor matrix of the corresponding subsample according to Kuan & Chen (1994); if \code{FALSE} the whole regressor matrix will be used. (only if \code{type} is either \code{"RE"} or \code{"ME"})} \item{lrvar}{logical or character. Should a long-run variance estimator be used for the residuals? By default, the standard OLS variance is employed. Alternatively, \code{\link[sandwich]{lrvar}} can be used. If \code{lrvar} is character (\code{"Andrews"} or \code{"Newey-West"}), then the corresponding \code{type} of long-run variance is used. (The argument is ignored for the score-based tests where \code{\link{gefp}} should be used instead.)} \item{vcov}{a function to extract the covariance matrix for the coefficients of the fitted model (only for \code{"RE"} and \code{"ME"}).} } \details{If \code{type} is one of \code{"Rec-CUSUM"}, \code{"OLS-CUSUM"}, \code{"Rec-MOSUM"} or \code{"OLS-MOSUM"} the function \code{efp} will return a one-dimensional empirical process of sums of residuals. Either it will be based on recursive residuals or on OLS residuals and the process will contain CUmulative SUMs or MOving SUMs of residuals in a certain data window. For the MOSUM and ME processes all estimations are done for the observations in a moving data window, whose size is determined by \code{h} and which is shifted over the whole sample. If \code{type} is either \code{"RE"} or \code{"ME"} a \emph{k}-dimensional process will be returned, if \emph{k} is the number of regressors in the model, as it is based on recursive OLS estimates of the regression coefficients or moving OLS estimates respectively. The recursive estimates test is also called fluctuation test, therefore setting \code{type} to \code{"fluctuation"} was used to specify it in earlier versions of strucchange. It still can be used now, but will be forced to \code{"RE"}. If \code{type} is \code{"Score-CUSUM"} or \code{"Score-MOSUM"} a \emph{k+1}-dimensional process will be returned, one for each score of the regression coefficients and one for the scores of the variance. The process gives the decorrelated cumulative sums of the ML scores (in a Gaussian model) or first order conditions respectively (in an OLS framework). If there is a single structural change point \eqn{t^*}, the recursive CUSUM path starts to depart from its mean 0 at \eqn{t^*}. The Brownian bridge type paths will have their respective peaks around \eqn{t^*}. The Brownian bridge increments type paths should have a strong change at \eqn{t^*}. The function \code{\link{plot}} has a method to plot the empirical fluctuation process; with \code{sctest} the corresponding test on structural change can be performed. } \value{ \code{efp} returns a list of class \code{"efp"} with components including: \item{process}{the fitted empirical fluctuation process of class \code{"ts"} or \code{"mts"} respectively,} \item{type}{a string with the \code{type} of the process fitted,} \item{nreg}{the number of regressors,} \item{nobs}{the number of observations,} \item{par}{the bandwidth \code{h} used.} } \references{ Brown R.L., Durbin J., Evans J.M. (1975), Techniques for testing constancy of regression relationships over time, \emph{Journal of the Royal Statistical Society}, B, \bold{37}, 149-163. Chu C.-S., Hornik K., Kuan C.-M. (1995), MOSUM tests for parameter constancy, \emph{Biometrika}, \bold{82}, 603-617. Chu C.-S., Hornik K., Kuan C.-M. (1995), The moving-estimates test for parameter stability, \emph{Econometric Theory}, \bold{11}, 669-720. Hansen B. (1992), Testing for Parameter Instability in Linear Models, \emph{Journal of Policy Modeling}, \bold{14}, 517-533. Hjort N.L., Koning A. (2002), Tests for Constancy of Model Parameters Over Time, \emph{Nonparametric Statistics}, \bold{14}, 113-132. Krmer W., Ploberger W., Alt R. (1988), Testing for structural change in dynamic models, \emph{Econometrica}, \bold{56}, 1355-1369. Kuan C.-M., Hornik K. (1995), The generalized fluctuation test: A unifying view, \emph{Econometric Reviews}, \bold{14}, 135 - 161. Kuan C.-M., Chen (1994), Implementing the fluctuation and moving estimates tests in dynamic econometric models, \emph{Economics Letters}, \bold{44}, 235-239. Ploberger W., Krmer W. (1992), The CUSUM test with OLS residuals, \emph{Econometrica}, \bold{60}, 271-285. Zeileis A., Leisch F., Hornik K., Kleiber C. (2002), \code{strucchange}: An R Package for Testing for Structural Change in Linear Regression Models, \emph{Journal of Statistical Software}, \bold{7}(2), 1-38. URL \url{http://www.jstatsoft.org/v07/i02/}. Zeileis A. (2005), A Unified Approach to Structural Change Tests Based on ML Scores, F Statistics, and OLS Residuals. \emph{Econometric Reviews}, \bold{24}, 445--466. doi:10.1080/07474930500406053. Zeileis A. (2006), Implementing a Class of Structural Change Tests: An Econometric Computing Approach. \emph{Computational Statistics & Data Analysis}, \bold{50}, 2987--3008. doi:10.1016/j.csda.2005.07.001. Zeileis A., Hornik K. (2007), Generalized M-Fluctuation Tests for Parameter Instability, \emph{Statistica Neerlandica}, \bold{61}, 488--508. doi:10.1111/j.1467-9574.2007.00371.x. } \seealso{\code{\link{gefp}}, \code{\link{plot.efp}}, \code{\link{print.efp}}, \code{\link{sctest.efp}}, \code{\link{boundary.efp}}} \examples{ ## Nile data with one breakpoint: the annual flows drop in 1898 ## because the first Ashwan dam was built data("Nile") plot(Nile) ## test the null hypothesis that the annual flow remains constant ## over the years ## compute OLS-based CUSUM process and plot ## with standard and alternative boundaries ocus.nile <- efp(Nile ~ 1, type = "OLS-CUSUM") plot(ocus.nile) plot(ocus.nile, alpha = 0.01, alt.boundary = TRUE) ## calculate corresponding test statistic sctest(ocus.nile) ## UK Seatbelt data: a SARIMA(1,0,0)(1,0,0)_12 model ## (fitted by OLS) is used and reveals (at least) two ## breakpoints - one in 1973 associated with the oil crisis and ## one in 1983 due to the introduction of compulsory ## wearing of seatbelts in the UK. data("UKDriverDeaths") seatbelt <- log10(UKDriverDeaths) seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) colnames(seatbelt) <- c("y", "ylag1", "ylag12") seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) ## use RE process re.seat <- efp(y ~ ylag1 + ylag12, data = seatbelt, type = "RE") plot(re.seat) plot(re.seat, functional = NULL) sctest(re.seat) } \concept{CUSUM} \concept{MOSUM} \concept{recursive estimates} \concept{moving estimates} \concept{fluctuation test} \concept{maximum likelihood scores} \concept{structural change} \concept{CUSUM} \concept{MOSUM} \concept{recursive estimates} \concept{moving estimates} \concept{fluctuation test} \concept{maximum likelihood scores} \concept{structural change} \keyword{regression} strucchange/man/BostonHomicide.Rd0000644000175400001440000000506013550277371016736 0ustar zeileisusers\name{BostonHomicide} \alias{BostonHomicide} \title{Youth Homicides in Boston} \usage{data("BostonHomicide")} \description{ Data about the number of youth homicides in Boston during the `Boston Gun Project'---a policing initiative aiming at lowering homicide victimization among young people in Boston. } \format{ A data frame containing 6 monthly time series and two factors coding seasonality and year, respectively. \describe{ \item{homicides}{time series. Number of youth homicides.} \item{population}{time series. Boston population (aged 25-44), linearly interpolated from annual data.} \item{populationBM}{time series. Population of black males (aged 15-24), linearly interpolated from annual data.} \item{ahomicides25}{time series. Number of adult homicides (aged 25 and older).} \item{ahomicides35}{time series. Number of adult homicides (aged 35-44).} \item{unemploy}{time series. Teen unemployment rate (in percent).} \item{season}{factor coding the month.} \item{year}{factor coding the year.} } } \details{The `Boston Gun Project' is a policing initiative aiming at lowering youth homicides in Boston. The project began in early 1995 and implemented the so-called `Operation Ceasefire' intervention which began in the late spring of 1996. } \source{Piehl et al. (2004), Figure 1, Figure 3, and Table 1. From the table it is not clear how the data should be linearly interpolated. Here, it was chosen to use the given observations for July of the corresponding year and then use \code{\link{approx}} with \code{rule = 2}. } \references{ Piehl A.M., Cooper S.J., Braga A.A., Kennedy D.M. (2003), Testing for Structural Breaks in the Evaluation of Programs, \emph{The Review of Economics and Statistics}, \bold{85}(3), 550-558. Kennedy D.M., Piehl A.M., Braga A.A. (1996), Youth Violence in Boston: Gun Markets, Serious Youth Offenders, and a Use-Reduction Strategy, \emph{Law and Contemporary Problems}, \bold{59}, 147-183. } \examples{ data("BostonHomicide") attach(BostonHomicide) ## data from Table 1 tapply(homicides, year, mean) populationBM[0:6*12 + 7] tapply(ahomicides25, year, mean) tapply(ahomicides35, year, mean) population[0:6*12 + 7] unemploy[0:6*12 + 7] ## model A ## via OLS fmA <- lm(homicides ~ populationBM + season) anova(fmA) ## as GLM fmA1 <- glm(homicides ~ populationBM + season, family = poisson) anova(fmA1, test = "Chisq") ## model B & C fmB <- lm(homicides ~ populationBM + season + ahomicides25) fmC <- lm(homicides ~ populationBM + season + ahomicides25 + unemploy) detach(BostonHomicide) } \keyword{datasets} strucchange/man/efpFunctional.Rd0000644000175400001440000001426713062350355016626 0ustar zeileisusers\name{efpFunctional} \alias{efpFunctional} \alias{simulateBMDist} \alias{maxBM} \alias{maxBB} \alias{maxBMI} \alias{maxBBI} \alias{maxL2BB} \alias{meanL2BB} \alias{rangeBM} \alias{rangeBB} \alias{rangeBMI} \alias{rangeBBI} \title{Functionals for Fluctuation Processes} \description{Computes an object for aggregating, plotting and testing empirical fluctuation processes.} \usage{ efpFunctional(functional = list(comp = function(x) max(abs(x)), time = max), boundary = function(x) rep(1, length(x)), computePval = NULL, computeCritval = NULL, plotProcess = NULL, lim.process = "Brownian bridge", nobs = 10000, nrep = 50000, nproc = 1:20, h = 0.5, probs = c(0:84/100, 850:1000/1000)) } \arguments{ \item{functional}{either a function for aggregating fluctuation processes or a list with two functions names \code{"comp"} and \code{"time"}.} \item{boundary}{a boundary function.} \item{computePval}{a function for computing p values. If neither \code{computePval} nor \code{computeCritval} are specified critical values are simulated with settings as specified below.} \item{computeCritval}{a function for computing critical values. If neither \code{computePval} nor \code{computeCritval} are specified critical values are simulated with settings as specified below.} \item{plotProcess}{a function for plotting the empirical process, if set to \code{NULL} a suitable function is set up.} \item{lim.process}{a string specifying the limiting process.} \item{nobs}{integer specifying the number of observations of each Brownian motion simulated.} \item{nrep}{integer specifying the number of replications.} \item{nproc}{integer specifying for which number of processes Brownian motions should be simulated. If set to \code{NULL} only \code{nproc = 1} is used and all other values are derived from a Bonferroni correction.} \item{h}{bandwidth parameter for increment processes.} \item{probs}{numeric vector specifying for which probabilities critical values should be tabulated.} } \details{ \code{efpFunctional} computes an object of class \code{"efpFunctional"} which then knows how to do inference based on empirical fluctuation processes (currently only for \code{\link{gefp}} objects and not yet for \code{\link{efp}} objects) and how to visualize the corresponding processes. \code{efpFunctional}s for many frequently used test statistics are provided: \code{\link{maxBB}} for the double maximum statistic, \code{\link{meanL2BB}} for the Cramer-von Mises statistic, or \code{rangeBB} for the range statistic. Furthermore, \code{\link{supLM}} generates an object of class \code{"efpFunctional"} for a certain trimming parameter, see the examples. More details can be found in Zeileis (2006). Based on Merkle, Fan, and Zeileis (2014), further \code{efpFunctional} generators for aggregating along (ordered) categorical variables have been added: \code{\link{catL2BB}}, \code{\link{ordL2BB}}, \code{\link{ordwmax}}. For setting up an \code{efpFunctional}, the functions \code{computeStatistic}, \code{computePval}, and \code{plotProcess} need to be supplied. These should have the following interfaces: \code{computeStatistic} should take a single argument which is the process itself, i.e., essentially a n x k matrix where n is the number of observations and k the number of processes (regressors). \code{computePval} should take two arguments: a scalar test statistic and the number of processes k. \code{plotProcess} should take two arguments: an object of class \code{"gefp"} and \code{alpha} the level of significance for any boundaries or critical values to be visualized. } \value{ \code{efpFunctional} returns a list of class \code{"efpFunctional"} with components including: \item{plotProcess}{a function for plotting empirical fluctuation processes,} \item{computeStatistic}{a function for computing a test statistic from an empirical fluctuation process,} \item{computePval}{a function for computing the corresponding p value,} \item{computeCritval}{a function for computing critical values.} } \references{ Merkle E.C., Zeileis A. (2013), Tests of Measurement Invariance without Subgroups: A Generalization of Classical Methods. \emph{Psychometrika}, \bold{78}(1), 59--82. doi:10.1007/S11336-012-9302-4 Merkle E.C., Fan J., Zeileis A. (2014), Testing for Measurement Invariance with Respect to an Ordinal Variable. \emph{Psychometrika}, \bold{79}(4), 569--584. doi:10.1007/S11336-013-9376-7. Zeileis A. (2005), A Unified Approach to Structural Change Tests Based on ML Scores, F Statistics, and OLS Residuals. \emph{Econometric Reviews}, \bold{24}, 445--466. doi:10.1080/07474930500406053. Zeileis A. (2006), Implementing a Class of Structural Change Tests: An Econometric Computing Approach. \emph{Computational Statistics & Data Analysis}, \bold{50}, 2987--3008. doi:10.1016/j.csda.2005.07.001. Zeileis A., Hornik K. (2007), Generalized M-Fluctuation Tests for Parameter Instability, \emph{Statistica Neerlandica}, \bold{61}, 488--508. doi:10.1111/j.1467-9574.2007.00371.x. } \seealso{\code{\link{gefp}}, \code{\link{supLM}}, \code{\link{catL2BB}}, \code{\link{sctest.default}}} \examples{ data("BostonHomicide") gcus <- gefp(homicides ~ 1, family = poisson, vcov = kernHAC, data = BostonHomicide) plot(gcus, functional = meanL2BB) gcus sctest(gcus, functional = meanL2BB) y <- rnorm(1000) x1 <- runif(1000) x2 <- runif(1000) ## supWald statistic computed by Fstats() fs <- Fstats(y ~ x1 + x2, from = 0.1) plot(fs) sctest(fs) ## compare with supLM statistic scus <- gefp(y ~ x1 + x2, fit = lm) plot(scus, functional = supLM(0.1)) sctest(scus, functional = supLM(0.1)) ## seatbelt data data("UKDriverDeaths") seatbelt <- log10(UKDriverDeaths) seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) colnames(seatbelt) <- c("y", "ylag1", "ylag12") seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) scus.seat <- gefp(y ~ ylag1 + ylag12, data = seatbelt) ## double maximum test plot(scus.seat) ## range test plot(scus.seat, functional = rangeBB) ## Cramer-von Mises statistic (Nyblom-Hansen test) plot(scus.seat, functional = meanL2BB) ## supLM test plot(scus.seat, functional = supLM(0.1)) } \keyword{regression} strucchange/man/sctest.formula.Rd0000644000175400001440000001000413062350355016763 0ustar zeileisusers\name{sctest.formula} \alias{sctest.formula} \title{Structural Change Tests in Linear Regression Models} \description{Performs tests for structural change in linear regression models.} \usage{ \method{sctest}{formula}(formula, type = , h = 0.15, alt.boundary = FALSE, functional = c("max", "range", "maxL2", "meanL2"), from = 0.15, to = NULL, point = 0.5, asymptotic = FALSE, data, ...) } \arguments{ \item{formula}{a formula describing the model to be tested.} \item{type}{a character string specifying the structural change test that is to be performed, the default is \code{"Rec-CUSUM"}. Besides the test types described in \code{\link{efp}} and \code{\link{sctest.Fstats}} the Chow test and the Nyblom-Hansen test can be performed by setting type to \code{"Chow"} or \code{"Nyblom-Hansen"}, respectively.} \item{h}{numeric from interval (0,1) specifying the bandwidth. Determines the size of the data window relative to the sample size (for MOSUM and ME tests only).} \item{alt.boundary}{logical. If set to \code{TRUE} alternative boundaries (instead of the standard linear boundaries) will be used (for CUSUM processes only).} \item{functional}{indicates which functional should be used to aggregate the empirical fluctuation processes to a test statistic.} \item{from, to}{numeric. If \code{from} is smaller than 1 they are interpreted as percentages of data and by default \code{to} is taken to be the 1 - \code{from}. F statistics will be calculated for the observations \code{(n*from):(n*to)}, when \code{n} is the number of observations in the model. If \code{from} is greater than 1 it is interpreted to be the index and \code{to} defaults to \code{n - from}. (for F tests only)} \item{point}{parameter of the Chow test for the potential change point. Interpreted analogous to the \code{from} parameter. By default taken to be \code{floor(n*0.5)} if \code{n} is the number of observations in the model.} \item{asymptotic}{logical. If \code{TRUE} the asymptotic (chi-square) distribution instead of the exact (F) distribution will be used to compute the p value (for Chow test only).} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{sctest} is called from.} \item{...}{further arguments passed to \code{\link{efp}} or \code{\link{Fstats}}.} } \details{ \code{sctest.formula} is a convenience interface for performing structural change tests in linear regression models based on \code{\link{efp}} and \code{\link{Fstats}}. It is mainly a wrapper for \code{\link{sctest.efp}} and \code{\link{sctest.Fstats}} as it fits an empirical fluctuation process first or computes the F statistics respectively and subsequently performs the corresponding test. The Chow test and the Nyblom-Hansen test are available explicitly here. An alternative convenience interface for performing structural change tests in general parametric models (based on \code{\link{gefp}}) is available in \code{\link{sctest.default}}. } \value{ An object of class \code{"htest"} containing: \item{statistic}{the test statistic,} \item{p.value}{the corresponding p value,} \item{method}{a character string with the method used,} \item{data.name}{a character string with the data name.} } \seealso{\code{\link{sctest.efp}}, \code{\link{sctest.Fstats}}, \code{\link{sctest.default}}} \examples{ ## Example 7.4 from Greene (1993), "Econometric Analysis" ## Chow test on Longley data data("longley") sctest(Employed ~ Year + GNP.deflator + GNP + Armed.Forces, data = longley, type = "Chow", point = 7) ## which is equivalent to segmenting the regression via fac <- factor(c(rep(1, 7), rep(2, 9))) fm0 <- lm(Employed ~ Year + GNP.deflator + GNP + Armed.Forces, data = longley) fm1 <- lm(Employed ~ fac/(Year + GNP.deflator + GNP + Armed.Forces), data = longley) anova(fm0, fm1) ## estimates from Table 7.5 in Greene (1993) summary(fm0) summary(fm1) } \keyword{htest} strucchange/man/breakdates.Rd0000644000175400001440000000316313062350355016127 0ustar zeileisusers\name{breakdates} \alias{breakdates} \alias{breakdates.breakpoints} \alias{breakdates.confint.breakpoints} \title{Breakdates Corresponding to Breakpoints} \description{ A generic function for computing the breakdates corresponding to breakpoints (and their confidence intervals). } \usage{ breakdates(obj, format.times = FALSE, ...) } \arguments{ \item{obj}{An object of class \code{"breakpoints"}, \code{"breakpointsfull"} or their confidence intervals as returned by \code{\link{confint}}.} \item{format.times}{logical. If set to \code{TRUE} a vector of strings with the formatted breakdates. See details for more information.} \item{\dots}{currently not used.} } \details{ Breakpoints are the number of observations that are the last in one segment and breakdates are the corresponding points on the underlying time scale. The breakdates can be formatted which enhances readability in particular for quarterly or monthly time series. For example the breakdate \code{2002.75} of a monthly time series will be formatted to \code{"2002(10)"}. } \value{ A vector or matrix containing the breakdates. } \seealso{\code{\link{breakpoints}}, \code{\link{confint}}} \examples{ ## Nile data with one breakpoint: the annual flows drop in 1898 ## because the first Ashwan dam was built data("Nile") plot(Nile) bp.nile <- breakpoints(Nile ~ 1) summary(bp.nile) plot(bp.nile) ## compute breakdates corresponding to the ## breakpoints of minimum BIC segmentation breakdates(bp.nile) ## confidence intervals ci.nile <- confint(bp.nile) breakdates(ci.nile) ci.nile plot(Nile) lines(ci.nile) } \keyword{regression} strucchange/man/plot.mefp.Rd0000644000175400001440000000242413062350355015725 0ustar zeileisusers\name{plot.mefp} \alias{plot.mefp} \alias{lines.mefp} \title{Plot Methods for mefp Objects} \description{This is a method of the generic \code{\link{plot}} function for for \code{"mefp"} objects as returned by \code{\link{mefp}} or \code{\link{monitor}}. It plots the empirical fluctuation process (or a functional thereof) as a time series plot, and includes boundaries corresponding to the significance level of the monitoring procedure. } \usage{ \method{plot}{mefp}(x, boundary = TRUE, functional = "max", main = NULL, ylab = "Empirical fluctuation process", ylim = NULL, ...) } \arguments{ \item{x}{an object of class \code{"mefp"}.} \item{boundary}{if \code{FALSE}, plotting of boundaries is suppressed.} \item{functional}{indicates which functional should be applied to a multivariate empirical process. If set to \code{NULL} all dimensions of the process (one process per coefficient in the linear model) are plotted. } \item{main, ylab, ylim, ...}{high-level \code{\link{plot}} function parameters.} } \seealso{\code{\link{mefp}}} \examples{ df1 <- data.frame(y=rnorm(300)) df1[150:300,"y"] <- df1[150:300,"y"]+1 me1 <- mefp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1, alpha=0.05) me2 <- monitor(me1, data=df1) plot(me2) } \keyword{hplot} strucchange/man/USIncExp.Rd0000644000175400001440000001405213062350355015457 0ustar zeileisusers\name{USIncExp} \alias{USIncExp} \title{Income and Expenditures in the US} \description{Personal income and personal consumption expenditures in the US between January 1959 and February 2001 (seasonally adjusted at annual rates).} \usage{data("USIncExp")} \format{ A multivariate monthly time series from 1959(1) to 2001(2) with variables \describe{ \item{income}{monthly personal income (in billion US dollars),} \item{expenditure}{monthly personal consumption expenditures (in billion US Dollars).} } } \source{\url{http://www.economagic.com/} } \references{ A. Zeileis, F. Leisch, K. Hornik, C. Kleiber (2002), strucchange: An R Package for Testing for Structural Change in Linear Regression Models. \emph{Journal of Statistical Software} \bold{7}(2), 1--38. } \examples{ ## These example are presented in the vignette distributed with this ## package, the code was generated by Stangle("strucchange-intro.Rnw") ################################################### ### chunk number 1: data ################################################### library("strucchange") data("USIncExp") plot(USIncExp, plot.type = "single", col = 1:2, ylab = "billion US$") legend(1960, max(USIncExp), c("income", "expenditures"), lty = c(1,1), col = 1:2, bty = "n") ################################################### ### chunk number 2: subset ################################################### library("strucchange") data("USIncExp") USIncExp2 <- window(USIncExp, start = c(1985,12)) ################################################### ### chunk number 3: ecm-setup ################################################### coint.res <- residuals(lm(expenditure ~ income, data = USIncExp2)) coint.res <- lag(ts(coint.res, start = c(1985,12), freq = 12), k = -1) USIncExp2 <- cbind(USIncExp2, diff(USIncExp2), coint.res) USIncExp2 <- window(USIncExp2, start = c(1986,1), end = c(2001,2)) colnames(USIncExp2) <- c("income", "expenditure", "diff.income", "diff.expenditure", "coint.res") ecm.model <- diff.expenditure ~ coint.res + diff.income ################################################### ### chunk number 4: ts-used ################################################### plot(USIncExp2[,3:5], main = "") ################################################### ### chunk number 5: efp ################################################### ocus <- efp(ecm.model, type="OLS-CUSUM", data=USIncExp2) me <- efp(ecm.model, type="ME", data=USIncExp2, h=0.2) ################################################### ### chunk number 6: efp-boundary ################################################### bound.ocus <- boundary(ocus, alpha=0.05) ################################################### ### chunk number 7: OLS-CUSUM ################################################### plot(ocus) ################################################### ### chunk number 8: efp-boundary2 ################################################### plot(ocus, boundary = FALSE) lines(bound.ocus, col = 4) lines(-bound.ocus, col = 4) ################################################### ### chunk number 9: ME-null ################################################### plot(me, functional = NULL) ################################################### ### chunk number 10: efp-sctest ################################################### sctest(ocus) ################################################### ### chunk number 11: efp-sctest2 ################################################### sctest(ecm.model, type="OLS-CUSUM", data=USIncExp2) ################################################### ### chunk number 12: Fstats ################################################### fs <- Fstats(ecm.model, from = c(1990, 1), to = c(1999,6), data = USIncExp2) ################################################### ### chunk number 13: Fstats-plot ################################################### plot(fs) ################################################### ### chunk number 14: pval-plot ################################################### plot(fs, pval=TRUE) ################################################### ### chunk number 15: aveF-plot ################################################### plot(fs, aveF=TRUE) ################################################### ### chunk number 16: Fstats-sctest ################################################### sctest(fs, type="expF") ################################################### ### chunk number 17: Fstats-sctest2 ################################################### sctest(ecm.model, type = "expF", from = 49, to = 162, data = USIncExp2) ################################################### ### chunk number 18: mefp ################################################### USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1989,12)) me.mefp <- mefp(ecm.model, type = "ME", data = USIncExp3, alpha = 0.05) ################################################### ### chunk number 19: monitor1 ################################################### USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1990,12)) me.mefp <- monitor(me.mefp) ################################################### ### chunk number 20: monitor2 ################################################### USIncExp3 <- window(USIncExp2, start = c(1986, 1)) me.mefp <- monitor(me.mefp) me.mefp ################################################### ### chunk number 21: monitor-plot ################################################### plot(me.mefp) ################################################### ### chunk number 22: mefp2 ################################################### USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1989,12)) me.efp <- efp(ecm.model, type = "ME", data = USIncExp3, h = 0.5) me.mefp <- mefp(me.efp, alpha=0.05) ################################################### ### chunk number 23: monitor3 ################################################### USIncExp3 <- window(USIncExp2, start = c(1986, 1)) me.mefp <- monitor(me.mefp) ################################################### ### chunk number 24: monitor-plot2 ################################################### plot(me.mefp) } \keyword{datasets} strucchange/man/strucchange.internal.Rd0000644000175400001440000000073513062350355020145 0ustar zeileisusers\name{strucchange.internal} \alias{sc.beta.sup} \alias{sc.beta.ave} \alias{sc.beta.exp} \alias{sc.me} \alias{sc.meanL2} \alias{sc.maxL2} \alias{pvalue.efp} \alias{pvalue.Fstats} \alias{monitorMECritval} \alias{monitorMECritvalData} \alias{monitorMECritvalTable} \alias{monitorRECritval} \alias{monitorRECritvalData} \alias{monitorRECritvalTable} \alias{pargmaxV} \title{Internal strucchange objects} \description{ These are not to be called by the user. } \keyword{internal} strucchange/man/durab.Rd0000644000175400001440000000454013062350355015117 0ustar zeileisusers\name{durab} \alias{durab} \title{US Labor Productivity} \usage{data("durab")} \description{ US labor productivity in the manufacturing/durables sector. } \format{ \code{durab} is a multivariate monthly time series from 1947(3) to 2001(4) with variables \describe{ \item{y}{growth rate of the Industrial Production Index to average weekly labor hours in the manufacturing/durables sector,} \item{lag}{lag 1 of the series \code{y},} } } \source{The data set is available from Bruce Hansen's homepage \url{http://www.ssc.wisc.edu/~bhansen/}. For more information see Hansen (2001).} \references{ Hansen B. (2001), The New Econometrics of Structural Change: Dating Breaks in U.S. Labor Productivity, \emph{Journal of Economic Perspectives}, \bold{15}, 117--128. Zeileis A., Leisch F., Kleiber C., Hornik K. (2005), Monitoring Structural Change in Dynamic Econometric Models, \emph{Journal of Applied Econometrics}, \bold{20}, 99--121. } \examples{ data("durab") ## use AR(1) model as in Hansen (2001) and Zeileis et al. (2005) durab.model <- y ~ lag ## historical tests ## OLS-based CUSUM process ols <- efp(durab.model, data = durab, type = "OLS-CUSUM") plot(ols) ## F statistics fs <- Fstats(durab.model, data = durab, from = 0.1) plot(fs) ## F statistics based on heteroskadisticy-consistent covariance matrix fsHC <- Fstats(durab.model, data = durab, from = 0.1, vcov = function(x, ...) vcovHC(x, type = "HC", ...)) plot(fsHC) ## monitoring Durab <- window(durab, start=1964, end = c(1979, 12)) ols.efp <- efp(durab.model, type = "OLS-CUSUM", data = Durab) newborder <- function(k) 1.723 * k/192 ols.mefp <- mefp(ols.efp, period=2) ols.mefp2 <- mefp(ols.efp, border=newborder) Durab <- window(durab, start=1964) ols.mon <- monitor(ols.mefp) ols.mon2 <- monitor(ols.mefp2) plot(ols.mon) lines(boundary(ols.mon2), col = 2) ## Note: critical value for linear boundary taken from Table III ## in Zeileis et al. 2005: (1.568 + 1.896)/2 = 1.732 is a linear ## interpolation between the values for T = 2 and T = 3 at ## alpha = 0.05. A typo switched 1.732 to 1.723. ## dating bp <- breakpoints(durab.model, data = durab) summary(bp) plot(summary(bp)) plot(ols) lines(breakpoints(bp, breaks = 1), col = 3) lines(breakpoints(bp, breaks = 2), col = 4) plot(fs) lines(breakpoints(bp, breaks = 1), col = 3) lines(breakpoints(bp, breaks = 2), col = 4) } \keyword{datasets} strucchange/man/recresid.Rd0000644000175400001440000000673313550050707015630 0ustar zeileisusers\name{recresid} \alias{recresid} \alias{recresid.default} \alias{recresid.formula} \alias{recresid.lm} \title{Recursive Residuals} \description{ A generic function for computing the recursive residuals (standardized one step prediction errors) of a linear regression model. } \usage{ \method{recresid}{default}(x, y, start = ncol(x) + 1, end = nrow(x), tol = sqrt(.Machine$double.eps)/ncol(x), qr.tol = 1e-7, engine = c("R", "C"), \dots) \method{recresid}{formula}(formula, data = list(), \dots) \method{recresid}{lm}(x, data = list(), \dots) } \arguments{ \item{x, y, formula}{specification of the linear regression model: either by a regressor matrix \code{x} and a response variable \code{y}, or by a \code{formula} or by a fitted object \code{x} of class \code{"lm"}.} \item{start, end}{integer. Index of the first and last observation, respectively, for which recursive residuals should be computed. By default, the maximal range is selected.} \item{tol}{numeric. A relative tolerance for precision of recursive coefficient estimates, see details.} \item{qr.tol}{numeric. The \code{tol}erance passed to \code{\link[stats]{lm.fit}} for detecting linear dependencies.} \item{engine}{character. In addition to the R implementation of the default method, there is also a faster C implementation (see below for further details).} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{recresid} is called from. Specifying \code{data} might also be necessary when applying \code{recresid} to a fitted model of class \code{"lm"} if this does not contain the regressor matrix and the response.} \item{\dots}{\emph{currently not used.}} } \details{ Recursive residuals are standardized one-step-ahead prediction errors. Under the usual assumptions for the linear regression model they are (asymptotically) normal and i.i.d. (see Brown, Durbin, Evans, 1975, for details). The default method computes the initial coefficient estimates via QR decomposition, using \code{\link{lm.fit}}. In subsequent steps, the updating formula provided by Brown, Durbin, Evans (1975) is employed. To avoid numerical instabilities in the first steps (with typically small sample sizes), the QR solution is computed for comparison. When the relative difference (assessed bey \code{\link{all.equal}}) between the two solutions falls below \code{tol}, only the updating formula is used in subsequent steps. In large data sets, the R implementation can become rather slow. Hence, a C implementation is also available. This is not the default, yet, because it should receive more testing in numerically challenging cases. In addition to the R and C implementation, there is also an Armadillo-based C++ implementation available on R-Forge in package strucchangeArmadillo. For models with about 10 parameters, the C and C++ version perform similarly. For larger models, the C++ implementation seems to scale better. } \value{ A vector containing the recursive residuals. } \references{ Brown R.L., Durbin J., Evans J.M. (1975), Techniques for testing constancy of regression relationships over time, \emph{Journal of the Royal Statistical Society}, B, \bold{37}, 149-163. } \seealso{\code{\link{efp}}} \examples{ x <- rnorm(100) + rep(c(0, 2), each = 50) rr <- recresid(x ~ 1) plot(cumsum(rr), type = "l") plot(efp(x ~ 1, type = "Rec-CUSUM")) } \keyword{regression} strucchange/man/logLik.breakpoints.Rd0000644000175400001440000000407613062350355017567 0ustar zeileisusers\name{logLik.breakpoints} \alias{logLik.breakpoints} \alias{logLik.breakpointsfull} \alias{AIC.breakpointsfull} \title{Log Likelihood and Information Criteria for Breakpoints} \description{ Computation of log likelihood and AIC type information criteria for partitions given by breakpoints. } \usage{ \method{logLik}{breakpointsfull}(object, breaks = NULL, ...) \method{AIC}{breakpointsfull}(object, breaks = NULL, ..., k = 2) } \arguments{ \item{object}{an object of class \code{"breakpoints"} or \code{"breakpointsfull"}.} \item{breaks}{if \code{object} is of class \code{"breakpointsfull"} the number of breaks can be specified.} \item{\dots}{\emph{currently not used}.} \item{k}{the penalty parameter to be used, the default \code{k = 2} is the classical AIC, \code{k = log(n)} gives the BIC, if \code{n} is the number of observations.} } \details{ As for linear models the log likelihood is computed on a normal model and the degrees of freedom are the number of regression coefficients multiplied by the number of segments plus the number of estimated breakpoints plus 1 for the error variance. If \code{AIC} is applied to an object of class \code{"breakpointsfull"} \code{breaks} can be a vector of integers and the AIC for each corresponding partition will be returned. By default the maximal number of breaks stored in the \code{object} is used. See below for an example. } \value{ An object of class \code{"logLik"} or a simple vector containing the AIC respectively. } \seealso{\code{\link{breakpoints}}} \examples{ ## Nile data with one breakpoint: the annual flows drop in 1898 ## because the first Ashwan dam was built data("Nile") plot(Nile) bp.nile <- breakpoints(Nile ~ 1) summary(bp.nile) plot(bp.nile) ## BIC of partitions with0 to 5 breakpoints plot(0:5, AIC(bp.nile, k = log(bp.nile$nobs)), type = "b") ## AIC plot(0:5, AIC(bp.nile), type = "b") ## BIC, AIC, log likelihood of a single partition bp.nile1 <- breakpoints(bp.nile, breaks = 1) AIC(bp.nile1, k = log(bp.nile1$nobs)) AIC(bp.nile1) logLik(bp.nile1) } \keyword{regression} strucchange/man/breakpoints.Rd0000644000175400001440000003233613062350355016347 0ustar zeileisusers\name{breakpoints} \alias{breakpoints} \alias{breakpoints.formula} \alias{breakpoints.breakpointsfull} \alias{breakpoints.Fstats} \alias{summary.breakpoints} \alias{summary.breakpointsfull} \alias{plot.breakpointsfull} \alias{plot.summary.breakpointsfull} \alias{print.breakpoints} \alias{print.summary.breakpointsfull} \alias{lines.breakpoints} \alias{coef.breakpointsfull} \alias{vcov.breakpointsfull} \alias{fitted.breakpointsfull} \alias{residuals.breakpointsfull} \alias{df.residual.breakpointsfull} \encoding{latin1} \title{Dating Breaks} \description{ Computation of breakpoints in regression relationships. Given a number of breaks the function computes the optimal breakpoints. } \usage{ \method{breakpoints}{formula}(formula, h = 0.15, breaks = NULL, data = list(), hpc = c("none", "foreach"), \dots) \method{breakpoints}{breakpointsfull}(obj, breaks = NULL, \dots) \method{summary}{breakpointsfull}(object, breaks = NULL, sort = TRUE, format.times = NULL, \dots) \method{lines}{breakpoints}(x, breaks = NULL, lty = 2, \dots) \method{coef}{breakpointsfull}(object, breaks = NULL, names = NULL, \dots) \method{fitted}{breakpointsfull}(object, breaks = NULL, \dots) \method{residuals}{breakpointsfull}(object, breaks = NULL, \dots) \method{vcov}{breakpointsfull}(object, breaks = NULL, names = NULL, het.reg = TRUE, het.err = TRUE, vcov. = NULL, sandwich = TRUE, \dots) } \arguments{ \item{formula}{a symbolic description for the model in which breakpoints will be estimated.} \item{h}{minimal segment size either given as fraction relative to the sample size or as an integer giving the minimal number of observations in each segment.} \item{breaks}{positive integer specifying the maximal number of breaks to be calculated. By default the maximal number allowed by \code{h} is used.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{breakpoints} is called from.} \item{hpc}{a character specifying the high performance computing support. Default is \code{"none"}, can be set to \code{"foreach"}.} \item{\dots}{arguments passed to \code{\link{recresid}}.} \item{obj, object}{an object of class \code{"breakpointsfull"}.} \item{sort}{logical. If set to \code{TRUE} \code{summary} tries to match the breakpoints from partitions with different numbers of breaks.} \item{format.times}{logical. If set to \code{TRUE} a vector of strings with the formatted breakdates is printed. See \code{\link{breakdates}} for more information.} \item{x}{an object of class \code{"breakpoints"}.} \item{lty}{line type.} \item{names}{a character vector giving the names of the segments. If of length 1 it is taken to be a generic prefix, e.g. \code{"segment"}.} \item{het.reg}{logical. Should heterogeneous regressors be assumed? If set to \code{FALSE} the distribution of the regressors is assumed to be homogeneous over the segments.} \item{het.err}{logical. Should heterogeneous errors be assumed? If set to \code{FALSE} the distribution of the errors is assumed to be homogeneous over the segments.} \item{vcov.}{a function to extract the covariance matrix for the coefficients of a fitted model of class \code{"lm"}.} \item{sandwich}{logical. Is the function \code{vcov.} the sandwich estimator or only the middle part?} } \details{ All procedures in this package are concerned with testing or assessing deviations from stability in the classical linear regression model \deqn{y_i = x_i^\top \beta + u_i}{y_i = x_i' b + u_i} In many applications it is reasonable to assume that there are \eqn{m} breakpoints, where the coefficients shift from one stable regression relationship to a different one. Thus, there are \eqn{m+1} segments in which the regression coefficients are constant, and the model can be rewritten as \deqn{y_i = x_i^\top \beta_j + u_i \qquad (i = i_{j-1} + 1, \dots, i_j, \quad j = 1, \dots, m+1)}{y_i = x_i' b_j + u_i (i = i_{j-1} + 1, \dots, i_j, j = 1, \dots, m+1)} where \eqn{j} denotes the segment index. In practice the breakpoints \eqn{i_j} are rarely given exogenously, but have to be estimated. \code{breakpoints} estimates these breakpoints by minimizing the residual sum of squares (RSS) of the equation above. The foundation for estimating breaks in time series regression models was given by Bai (1994) and was extended to multiple breaks by Bai (1997ab) and Bai & Perron (1998). \code{breakpoints} implements the algorithm described in Bai & Perron (2003) for simultaneous estimation of multiple breakpoints. The distribution function used for the confidence intervals for the breakpoints is given in Bai (1997b). The ideas behind this implementation are described in Zeileis et al. (2003). The algorithm for computing the optimal breakpoints given the number of breaks is based on a dynamic programming approach. The underlying idea is that of the Bellman principle. The main computational effort is to compute a triangular RSS matrix, which gives the residual sum of squares for a segment starting at observation \eqn{i} and ending at \eqn{i'} with \eqn{i} < \eqn{i'}. Given a \code{formula} as the first argument, \code{breakpoints} computes an object of class \code{"breakpointsfull"} which inherits from \code{"breakpoints"}. This contains in particular the triangular RSS matrix and functions to extract an optimal segmentation. A \code{summary} of this object will give the breakpoints (and associated) breakdates for all segmentations up to the maximal number of breaks together with the associated RSS and BIC. These will be plotted if \code{plot} is applied and thus visualize the minimum BIC estimator of the number of breakpoints. From an object of class \code{"breakpointsfull"} an arbitrary number of \code{breaks} (admissible by the minimum segment size \code{h}) can be extracted by another application of \code{breakpoints}, returning an object of class \code{"breakpoints"}. This contains only the breakpoints for the specified number of breaks and some model properties (number of observations, regressors, time series properties and the associated RSS) but not the triangular RSS matrix and related extractor functions. The set of breakpoints which is associated by default with a \code{"breakpointsfull"} object is the minimum BIC partition. Breakpoints are the number of observations that are the last in one segment, it is also possible to compute the corresponding \code{breakdates} which are the breakpoints on the underlying time scale. The breakdates can be formatted which enhances readability in particular for quarterly or monthly time series. For example the breakdate \code{2002.75} of a monthly time series will be formatted to \code{"2002(10)"}. See \code{\link{breakdates}} for more details. From a \code{"breakpointsfull"} object confidence intervals for the breakpoints can be computed using the method of \code{\link{confint}}. The breakdates corresponding to the breakpoints can again be computed by \code{\link{breakdates}}. The breakpoints and their confidence intervals can be visualized by \code{lines}. Convenience functions are provided for extracting the coefficients and covariance matrix, fitted values and residuals of segmented models. The log likelihood as well as some information criteria can be computed using the methods for the \code{\link{logLik}} and \code{\link{AIC}}. As for linear models the log likelihood is computed on a normal model and the degrees of freedom are the number of regression coefficients multiplied by the number of segments plus the number of estimated breakpoints plus 1 for the error variance. More details can be found on the help page of the method \code{\link{logLik.breakpoints}}. As the maximum of a sequence of F statistics is equivalent to the minimum OLS estimator of the breakpoint in a 2-segment partition it can be extracted by \code{breakpoints} from an object of class \code{"Fstats"} as computed by \code{\link{Fstats}}. However, this cannot be used to extract a larger number of breakpoints. For illustration see the commented examples below and Zeileis et al. (2003). Optional support for high performance computing is available, currently using \code{\link[foreach]{foreach}} for the dynamic programming algorithm. If \code{hpc = "foreach"} is to be used, a parallel backend should be registered before. See \code{\link[foreach]{foreach}} for more information. } \section{value}{ An object of class \code{"breakpoints"} is a list with the following elements: \describe{ \item{breakpoints}{the breakpoints of the optimal partition with the number of breaks specified (set to \code{NA} if the optimal 1-segment solution is reported),} \item{RSS}{the associated RSS,} \item{nobs}{the number of observations,} \item{nreg}{the number of regressors,} \item{call}{the function call,} \item{datatsp}{the time series properties \code{tsp} of the data, if any, \code{c(1/nobs, 1, nobs)} otherwise.} } If applied to a \code{formula} as first argument, \code{breakpoints} returns an object of class \code{"breakpointsfull"} (which inherits from \code{"breakpoints"}), that contains some additional (or slightly different) elements such as: \describe{ \item{breakpoints}{the breakpoints of the minimum BIC partition,} \item{RSS}{a function which takes two arguments \code{i,j} and computes the residual sum of squares for a segment starting at observation \code{i} and ending at \code{j} by looking up the corresponding element in the triangular RSS matrix \code{RSS.triang},} \item{RSS.triang}{a list encoding the triangular RSS matrix.} } } \references{ Bai J. (1994), Least Squares Estimation of a Shift in Linear Processes, \emph{Journal of Time Series Analysis}, \bold{15}, 453-472. Bai J. (1997a), Estimating Multiple Breaks One at a Time, \emph{Econometric Theory}, \bold{13}, 315-352. Bai J. (1997b), Estimation of a Change Point in Multiple Regression Models, \emph{Review of Economics and Statistics}, \bold{79}, 551-563. Bai J., Perron P. (1998), Estimating and Testing Linear Models With Multiple Structural Changes, \emph{Econometrica}, \bold{66}, 47-78. Bai J., Perron P. (2003), Computation and Analysis of Multiple Structural Change Models, \emph{Journal of Applied Econometrics}, \bold{18}, 1-22. Zeileis A., Kleiber C., Krmer W., Hornik K. (2003), Testing and Dating of Structural Changes in Practice, \emph{Computational Statistics and Data Analysis}, \bold{44}, 109-123. doi:10.1016/S0167-9473(03)00030-6. Zeileis A., Shah A., Patnaik I. (2010), Testing, Monitoring, and Dating Structural Changes in Exchange Rate Regimes, \emph{Computational Statistics and Data Analysis}, \bold{54}(6), 1696--1706. doi:10.1016/j.csda.2009.12.005. } \examples{ ## Nile data with one breakpoint: the annual flows drop in 1898 ## because the first Ashwan dam was built data("Nile") plot(Nile) ## F statistics indicate one breakpoint fs.nile <- Fstats(Nile ~ 1) plot(fs.nile) breakpoints(fs.nile) lines(breakpoints(fs.nile)) ## or bp.nile <- breakpoints(Nile ~ 1) summary(bp.nile) ## the BIC also chooses one breakpoint plot(bp.nile) breakpoints(bp.nile) ## fit null hypothesis model and model with 1 breakpoint fm0 <- lm(Nile ~ 1) fm1 <- lm(Nile ~ breakfactor(bp.nile, breaks = 1)) plot(Nile) lines(ts(fitted(fm0), start = 1871), col = 3) lines(ts(fitted(fm1), start = 1871), col = 4) lines(bp.nile) ## confidence interval ci.nile <- confint(bp.nile) ci.nile lines(ci.nile) ## UK Seatbelt data: a SARIMA(1,0,0)(1,0,0)_12 model ## (fitted by OLS) is used and reveals (at least) two ## breakpoints - one in 1973 associated with the oil crisis and ## one in 1983 due to the introduction of compulsory ## wearing of seatbelts in the UK. data("UKDriverDeaths") seatbelt <- log10(UKDriverDeaths) seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) colnames(seatbelt) <- c("y", "ylag1", "ylag12") seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) ## testing re.seat <- efp(y ~ ylag1 + ylag12, data = seatbelt, type = "RE") plot(re.seat) ## dating bp.seat <- breakpoints(y ~ ylag1 + ylag12, data = seatbelt, h = 0.1) summary(bp.seat) lines(bp.seat, breaks = 2) ## minimum BIC partition plot(bp.seat) breakpoints(bp.seat) ## the BIC would choose 0 breakpoints although the RE and supF test ## clearly reject the hypothesis of structural stability. Bai & ## Perron (2003) report that the BIC has problems in dynamic regressions. ## due to the shape of the RE process of the F statistics choose two ## breakpoints and fit corresponding models bp.seat2 <- breakpoints(bp.seat, breaks = 2) fm0 <- lm(y ~ ylag1 + ylag12, data = seatbelt) fm1 <- lm(y ~ breakfactor(bp.seat2)/(ylag1 + ylag12) - 1, data = seatbelt) ## plot plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) time.seat <- as.vector(time(seatbelt)) lines(time.seat, fitted(fm0), col = 3) lines(time.seat, fitted(fm1), col = 4) lines(bp.seat2) ## confidence intervals ci.seat2 <- confint(bp.seat, breaks = 2) ci.seat2 lines(ci.seat2) } \concept{breakpoint estimation} \concept{changepoint estimation} \concept{segmented regression} \keyword{regression} strucchange/man/boundary.mefp.Rd0000644000175400001440000000136013062350355016570 0ustar zeileisusers\name{boundary.mefp} \alias{boundary.mefp} \title{Boundary Function for Monitoring of Structural Changes} \description{Computes boundary for an object of class \code{"mefp"}} \usage{ \method{boundary}{mefp}(x, ...)} \arguments{ \item{x}{an object of class \code{"mefp"}.} \item{...}{currently not used.} } \value{an object of class \code{"ts"} with the same time properties as the monitored process} \seealso{\code{\link{mefp}}, \code{\link{plot.mefp}}} \examples{ df1 <- data.frame(y=rnorm(300)) df1[150:300,"y"] <- df1[150:300,"y"]+1 me1 <- mefp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1, alpha=0.05) me2 <- monitor(me1, data=df1) plot(me2, boundary=FALSE) lines(boundary(me2), col="green", lty="44") } \keyword{regression} strucchange/man/boundary.efp.Rd0000644000175400001440000000300113062350355016405 0ustar zeileisusers\name{boundary.efp} \alias{boundary.efp} \title{Boundary for Empirical Fluctuation Processes} \description{Computes boundary for an object of class \code{"efp"}} \usage{ \method{boundary}{efp}(x, alpha = 0.05, alt.boundary = FALSE, functional = "max", ...) } \arguments{ \item{x}{an object of class \code{"efp"}.} \item{alpha}{numeric from interval (0,1) indicating the confidence level for which the boundary of the corresponding test will be computed.} \item{alt.boundary}{logical. If set to \code{TRUE} alternative boundaries (instead of the standard linear boundaries) will be computed (for Brownian bridge type processes only).} \item{functional}{indicates which functional should be applied to the empirical fluctuation process. See also \code{\link{plot.efp}}.} \item{\dots}{currently not used.} } \value{an object of class \code{"ts"} with the same time properties as the process in \code{x}} \seealso{\code{\link{efp}}, \code{\link{plot.efp}}} \examples{ ## Load dataset "nhtemp" with average yearly temperatures in New Haven data("nhtemp") ## plot the data plot(nhtemp) ## test the model null hypothesis that the average temperature remains constant ## over the years ## compute OLS-CUSUM fluctuation process temp.cus <- efp(nhtemp ~ 1, type = "OLS-CUSUM") ## plot the process without boundaries plot(temp.cus, alpha = 0.01, boundary = FALSE) ## add the boundaries in another colour bound <- boundary(temp.cus, alpha = 0.01) lines(bound, col=4) lines(-bound, col=4) } \keyword{regression} strucchange/man/root.matrix.Rd0000644000175400001440000000065013062350355016306 0ustar zeileisusers\name{root.matrix} \alias{root.matrix} \title{Root of a Matrix} \description{Computes the root of a symmetric and positive semidefinite matrix.} \usage{ root.matrix(X) } \arguments{ \item{X}{a symmetric and positive semidefinite matrix} } \value{a symmetric matrix of same dimensions as \code{X}} \examples{ X <- matrix(c(1,2,2,8), ncol=2) test <- root.matrix(X) ## control results X test \%*\% test } \keyword{algebra} strucchange/man/confint.breakpointsfull.Rd0000644000175400001440000000644113062350355020667 0ustar zeileisusers\name{confint.breakpointsfull} \alias{confint.breakpointsfull} \alias{lines.confint.breakpoints} \alias{print.confint.breakpoints} \title{Confidence Intervals for Breakpoints} \description{ Computes confidence intervals for breakpoints. } \usage{ \method{confint}{breakpointsfull}(object, parm = NULL, level = 0.95, breaks = NULL, het.reg = TRUE, het.err = TRUE, vcov. = NULL, sandwich = TRUE, ...) \method{lines}{confint.breakpoints}(x, col = 2, angle = 90, length = 0.05, code = 3, at = NULL, breakpoints = TRUE, ...) } \arguments{ \item{object}{an object of class \code{"breakpointsfull"} as computed by \code{\link{breakpoints}} from a \code{formula}.} \item{parm}{the same as \code{breaks}, only one of the two should be specified.} \item{level}{the confidence level required.} \item{breaks}{an integer specifying the number of breaks to be used. By default the breaks of the minimum BIC partition are used.} \item{het.reg}{logical. Should heterogeneous regressors be assumed? If set to \code{FALSE} the distribution of the regressors is assumed to be homogeneous over the segments.} \item{het.err}{logical. Should heterogeneous errors be assumed? If set to \code{FALSE} the distribution of the errors is assumed to be homogeneous over the segments.} \item{vcov.}{a function to extract the covariance matrix for the coefficients of a fitted model of class \code{"lm"}.} \item{sandwich}{logical. Is the function \code{vcov.} the sandwich estimator or only the middle part?} \item{x}{an object of class \code{"confint.breakpoints"} as returned by \code{confint}.} \item{col, angle, length, code}{arguments passed to \code{\link{arrows}}.} \item{at}{position on the y axis, where the confidence arrows should be drawn. By default they are drawn at the bottom of the plot.} \item{breakpoints}{logical. If \code{TRUE} vertical lines for the breakpoints are drawn.} \item{\dots}{\emph{currently not used}.} } \details{ As the breakpoints are integers (observation numbers) the corresponding confidence intervals are also rounded to integers. The distribution function used for the computation of confidence intervals of breakpoints is given in Bai (1997). The procedure, in particular the usage of heterogeneous regressors and/or errors, is described in more detail in Bai & Perron (2003). The breakpoints should be computed from a formula with \code{breakpoints}, then the confidence intervals for the breakpoints can be derived by \code{confint} and these can be visualized by \code{lines}. For an example see below. } \value{ A matrix containing the breakpoints and their lower and upper confidence boundary for the given level. } \references{ Bai J. (1997), Estimation of a Change Point in Multiple Regression Models, \emph{Review of Economics and Statistics}, \bold{79}, 551-563. Bai J., Perron P. (2003), Computation and Analysis of Multiple Structural Change Models, \emph{Journal of Applied Econometrics}, \bold{18}, 1-22. } \seealso{\code{\link{breakpoints}}} \examples{ ## Nile data with one breakpoint: the annual flows drop in 1898 ## because the first Ashwan dam was built data("Nile") plot(Nile) ## dating breaks bp.nile <- breakpoints(Nile ~ 1) ci.nile <- confint(bp.nile, breaks = 1) lines(ci.nile) } \keyword{regression} strucchange/man/catL2BB.Rd0000644000175400001440000001050313366337662015204 0ustar zeileisusers\name{catL2BB} \alias{catL2BB} \alias{ordL2BB} \alias{ordwmax} \title{Generators for efpFunctionals along Categorical Variables} \description{ Generators for \code{efpFunctional} objects suitable for aggregating empirical fluctuation processes to test statistics along (ordinal) categorical variables. } \usage{ catL2BB(freq) ordL2BB(freq, nproc = NULL, nrep = 1e5, probs = c(0:84/100, 850:1000/1000), \dots) ordwmax(freq, algorithm = mvtnorm::GenzBretz(), \dots) } \arguments{ \item{freq}{object specifying the category frequencies for the categorical variable to be used for aggregation: either a \code{\link{gefp}} object, a \code{\link{factor}}, or a numeric vector with either absolute or relative category frequencies.} \item{nproc}{numeric. Number of processes used for simulating from the asymptotic distribution (passed to \code{\link{efpFunctional}}). If \code{feq} is a \code{\link{gefp}} object, then its number of processes is used by default.} \item{nrep}{numeric. Number of replications used for simulating from the asymptotic distribution (passed to \code{\link{efpFunctional}}).} \item{probs}{numeric vector specifying for which probabilities critical values should be tabulated.} \item{\dots}{further arguments passed to \code{\link{efpFunctional}}.} \item{algorithm}{algorithm specification passed to \code{\link[mvtnorm]{pmvnorm}} for computing the asymptotic distribution.} } \details{ Merkle, Fan, and Zeileis (2014) discuss three functionals that are suitable for aggregating empirical fluctuation processes along categorical variables, especially ordinal variables. The functions \code{catL2BB}, \code{ordL2BB}, and \code{ordwmax} all require a specification of the relative frequencies within each category (which can be computed from various specifications, see arguments). All of them employ \code{\link{efpFunctional}} (Zeileis 2006) internally to set up an object that can be employed with \code{\link{gefp}} fluctuation processes. \code{catL2BB} results in a chi-squared test. This is essentially the LM test counterpart to the likelihood ratio test that assesses a split into unordered categories. \code{ordL2BB} is the ordinal counterpart to \code{\link{supLM}} where aggregation is done along the ordered categories (rather than continuously). The asymptotic distribution is non-standard and needs to be simulated for every combination of frequencies and number of processes. Hence, this is somewhat more time-consuming compared to the closed-form solution employed in \code{catL2BB}. It is also possible to store the result of \code{ordL2BB} in case it needs to be applied several \code{\link{gefp}} fluctuation processes. \code{ordwmax} is a weighted double maximum test based on ideas previously suggested by Hothorn and Zeileis (2008) in the context of maximally selected statistics. The asymptotic distribution is (multivariate) normal and computed by means of \code{\link[mvtnorm]{pmvnorm}}. } \value{ An object of class \code{efpFunctional}. } \references{ Hothorn T., Zeileis A. (2008), Generalized Maximally Selected Statistics. \emph{Biometrics}, \bold{64}, 1263--1269. Merkle E.C., Fan J., Zeileis A. (2014), Testing for Measurement Invariance with Respect to an Ordinal Variable. \emph{Psychometrika}, \bold{79}(4), 569--584. doi:10.1007/S11336-013-9376-7. Zeileis A. (2006), Implementing a Class of Structural Change Tests: An Econometric Computing Approach. \emph{Computational Statistics & Data Analysis}, \bold{50}, 2987--3008. doi:10.1016/j.csda.2005.07.001. } \seealso{\code{\link{efpFunctional}}, \code{\link{gefp}}} \examples{ ## artificial data set.seed(1) d <- data.frame( x = runif(200, -1, 1), z = factor(rep(1:4, each = 50)), err = rnorm(200) ) d$y <- rep(c(0.5, -0.5), c(150, 50)) * d$x + d$err ## empirical fluctuation process scus <- gefp(y ~ x, data = d, fit = lm, order.by = ~ z) ## chi-squared-type test (unordered LM-type test) LMuo <- catL2BB(scus) plot(scus, functional = LMuo) sctest(scus, functional = LMuo) ## ordinal maxLM test (with few replications only to save time) maxLMo <- ordL2BB(scus, nrep = 10000) plot(scus, functional = maxLMo) sctest(scus, functional = maxLMo) ## ordinal weighted double maximum test WDM <- ordwmax(scus) plot(scus, functional = WDM) sctest(scus, functional = WDM) } \keyword{regression} strucchange/man/sctest.efp.Rd0000644000175400001440000000645113062350355016103 0ustar zeileisusers\name{sctest.efp} \alias{sctest.efp} \encoding{latin1} \title{Generalized Fluctuation Tests} \description{Performs a generalized fluctuation test.} \usage{ \method{sctest}{efp}(x, alt.boundary = FALSE, functional = c("max", "range", "maxL2", "meanL2"), ...) } \arguments{ \item{x}{an object of class \code{"efp"}.} \item{alt.boundary}{logical. If set to \code{TRUE} alternative boundaries (instead of the standard linear boundaries) will be used (for CUSUM processes only).} \item{functional}{indicates which functional should be applied to the empirical fluctuation process.} \item{...}{currently not used.} } \details{The critical values for the MOSUM tests and the ME test are just tabulated for confidence levels between 0.1 and 0.01, thus the p value approximations will be poor for other p values. Similarly the critical values for the maximum and mean squared Euclidean norm (\code{"maxL2"} and \code{"meanL2"}) are tabulated for confidence levels between 0.2 and 0.005.} \value{ An object of class \code{"htest"} containing: \item{statistic}{the test statistic,} \item{p.value}{the corresponding p value,} \item{method}{a character string with the method used,} \item{data.name}{a character string with the data name.} } \references{Brown R.L., Durbin J., Evans J.M. (1975), Techniques for testing constancy of regression relationships over time, \emph{Journal of the Royal Statistical Society}, B, \bold{37}, 149-163. Chu C.-S., Hornik K., Kuan C.-M. (1995), MOSUM tests for parameter constancy, \emph{Biometrika}, \bold{82}, 603-617. Chu C.-S., Hornik K., Kuan C.-M. (1995), The moving-estimates test for parameter stability, \emph{Econometric Theory}, \bold{11}, 669-720. Krmer W., Ploberger W., Alt R. (1988), Testing for structural change in dynamic models, \emph{Econometrica}, \bold{56}, 1355-1369. Kuan C.-M., Hornik K. (1995), The generalized fluctuation test: A unifying view, \emph{Econometric Reviews}, \bold{14}, 135 - 161. Kuan C.-M., Chen (1994), Implementing the fluctuation and moving estimates tests in dynamic econometric models, \emph{Economics Letters}, \bold{44}, 235-239. Ploberger W., Krmer W. (1992), The CUSUM Test with OLS Residuals, \emph{Econometrica}, \bold{60}, 271-285. Zeileis A., Leisch F., Hornik K., Kleiber C. (2002), \code{strucchange}: An R Package for Testing for Structural Change in Linear Regression Models, \emph{Journal of Statistical Software}, \bold{7}(2), 1-38. URL \url{http://www.jstatsoft.org/v07/i02/}. Zeileis A. (2004), Alternative Boundaries for CUSUM Tests, \emph{Statistical Papers}, \bold{45}, 123--131. } \seealso{\code{\link{efp}}, \code{\link{plot.efp}}} \examples{ ## Load dataset "nhtemp" with average yearly temperatures in New Haven data("nhtemp") ## plot the data plot(nhtemp) ## test the model null hypothesis that the average temperature remains ## constant over the years compute OLS-CUSUM fluctuation process temp.cus <- efp(nhtemp ~ 1, type = "OLS-CUSUM") ## plot the process with alternative boundaries plot(temp.cus, alpha = 0.01, alt.boundary = TRUE) ## and calculate the test statistic sctest(temp.cus) ## compute moving estimates fluctuation process temp.me <- efp(nhtemp ~ 1, type = "ME", h = 0.2) ## plot the process with functional = "max" plot(temp.me) ## and perform the corresponding test sctest(temp.me) } \keyword{htest} strucchange/man/RealInt.Rd0000644000175400001440000000311213062350355015352 0ustar zeileisusers\name{RealInt} \alias{RealInt} \title{US Ex-post Real Interest Rate} \description{ US ex-post real interest rate: the three-month treasury bill deflated by the CPI inflation rate. } \usage{data("RealInt")} \format{ A quarterly time series from 1961(1) to 1986(3). } \source{The data is available online in the data archive of the Journal of Applied Econometrics \url{http://qed.econ.queensu.ca/jae/2003-v18.1/bai-perron/}.} \references{ Bai J., Perron P. (2003), Computation and Analysis of Multiple Structural Change Models, \emph{Journal of Applied Econometrics}, \bold{18}, 1-22. Zeileis A., Kleiber C. (2005), Validating Multiple Structural Change Models - A Case Study. Journal of Applied Econometrics, \bold{20}, 685-690. } \examples{ ## load and plot data data("RealInt") plot(RealInt) ## estimate breakpoints bp.ri <- breakpoints(RealInt ~ 1, h = 15) plot(bp.ri) summary(bp.ri) ## fit segmented model with three breaks fac.ri <- breakfactor(bp.ri, breaks = 3, label = "seg") fm.ri <- lm(RealInt ~ 0 + fac.ri) summary(fm.ri) ## setup kernel HAC estimator vcov.ri <- function(x, ...) kernHAC(x, kernel = "Quadratic Spectral", prewhite = 1, approx = "AR(1)", ...) ## Results from Table 1 in Bai & Perron (2003): ## coefficient estimates coef(bp.ri, breaks = 3) ## corresponding standard errors sapply(vcov(bp.ri, breaks = 3, vcov = vcov.ri), sqrt) ## breakpoints and confidence intervals confint(bp.ri, breaks = 3, vcov = vcov.ri) ## Visualization plot(RealInt) lines(as.vector(time(RealInt)), fitted(fm.ri), col = 4) lines(confint(bp.ri, breaks = 3, vcov = vcov.ri)) } \keyword{datasets} strucchange/man/sctest.default.Rd0000644000175400001440000001670213062350355016755 0ustar zeileisusers\name{sctest.default} \alias{sctest.default} \title{Structural Change Tests in Parametric Models} \description{ Performs model-based tests for structural change (or parameter instability) in parametric models. } \usage{ \method{sctest}{default}(x, order.by = NULL, functional = maxBB, vcov = NULL, scores = estfun, decorrelate = TRUE, sandwich = TRUE, parm = NULL, plot = FALSE, from = 0.1, to = NULL, nobs = NULL, nrep = 50000, width = 0.15, xlab = NULL, \dots) } \arguments{ \item{x}{a model object. The model class can in principle be arbitrary but needs to provide suitable methods for extracting the \code{scores} and associated variance-covariance matrix \code{vcov}.} \item{order.by}{either a vector \code{z} or a formula with a single explanatory variable like \code{~ z}. The observations in the model are ordered by the size of \code{z}. If set to \code{NULL} (the default) the observations are assumed to be ordered (e.g., a time series).} \item{functional}{either a character specification of the functional to be used or an \code{\link{efpFunctional}} object. For a list of functionals see the details.} \item{vcov}{a function to extract the covariance matrix for the coefficients of the fitted model: \code{vcov(x, order.by = order.by, data = data)}. Alternatively, the character string \code{"info"}, for details see below.} \item{scores}{a function which extracts the scores or estimating function from the fitted object: \code{scores(x)}, by default this is \code{\link[sandwich]{estfun}}.} \item{decorrelate}{logical. Should the process be decorrelated?} \item{sandwich}{logical. Is the function \code{vcov} the full sandwich estimator or only the meat?} \item{parm}{integer or character specifying the component of the estimating functions which should be used (by default all components are used).} \item{plot}{logical. Should the result of the test also be visualized?} \item{from, to}{numeric. In case the \code{functional} is \code{"supLM"} (or equivalently \code{"maxLM"}), \code{from} and \code{to} can be passed to the \code{\link{supLM}} functional.} \item{nobs, nrep}{numeric. In case the \code{functional} is \code{"maxLMo"}, \code{nobs} and \code{nrep} are passed to the \code{\link{catL2BB}} functional.} \item{width}{numeric. In case the \code{functional} is \code{"MOSUM"}, the bandwidth \code{width} is passed to the \code{\link{maxMOSUM}} functional.} \item{xlab, \dots}{graphical parameters passed to the plot method (in case \code{plot = TRUE}).} } \details{ \code{sctest.default} is a convenience interface to \code{\link{gefp}} for structural change tests (or parameter instability tests) in general parametric models. It proceeds in the following steps: \enumerate{ \item The generalized empirical fluctuation process (or score-based CUSUM process) is computed via \code{scus <- gefp(x, fit = NULL, \dots)} where \code{\dots} comprises the arguments \code{order.by}, \code{vcov}, \code{scores}, \code{decorrelate}, \code{sandwich}, \code{parm} that are simply passed on to \code{\link{gefp}}. \item The empirical fluctuation process is visualized (if \code{plot = TRUE}) via \code{plot(scus, functional = functional, \dots)}. \item The empirical fluctuation is assessed by the corresponding significance test via \code{sctest(scus, functional = functional)}. } The main motivation for prociding the convenience interface is that these three steps can be easily carried out in one go along with a two convenience options: \enumerate{ \item By default, the covariance is computed by an outer-product of gradients estimator just as in \code{gefp}. This is always available based on the \code{scores}. Additionally, by setting \code{vcov = "info"}, the corresponding information matrix can be used. Then the average information is assumed to be provided by the \code{vcov} method for the model class. (Note that this is only sensible for models estimated by maximum likelihood.) \item Instead of providing the \code{functional} by an \code{\link{efpFunctional}} object, the test labels employed by Merkle and Zeileis (2013) and Merkle, Fan, and Zeileis (2013) can be used for convenience. Namely, for continuous numeric orderings, the following functionals are available: \code{functional = "DM"} or \code{"dmax"} provides the double-maximum test (\code{\link{maxBB}}). \code{"CvM"} is the Cramer-von Mises functional \code{\link{meanL2BB}}. \code{"supLM"} or equivalently \code{"maxLM"} is Andrews' supLM test (\code{\link{supLM}}). \code{"MOSUM"} or \code{"maxMOSUM"} is the MOSUM functional (\code{\link{maxMOSUM}}), and \code{"range"} is the range functional \code{\link{rangeBB}}. Furthermore, several functionals suitable for (ordered) categorical \code{order.by} variables are provided: \code{"LMuo"} is the unordered LM test (\code{\link{catL2BB}}), \code{"WDMo"} is the weighted double-maximum test for ordered variables (\code{\link{ordwmax}}), and \code{"maxLMo"} is the maxLM test for ordered variables (\code{\link{ordL2BB}}). } The theoretical model class is introduced in Zeileis and Hornik (2007) with a unifying view in Zeileis (2005), especially from an econometric perspective. Zeileis (2006) introduces the underling computational tools \code{gefp} and \code{efpFunctional}. Merkle and Zeileis (2013) discuss the methods in the context of measurement invariance which is particularly relevant to psychometric models for cross section data. Merkle, Fan, and Zeileis (2014) extend the results to ordered categorical variables. Zeileis, Shah, and Patnaik (2013) provide a unifying discussion in the context of time series methods, specifically in financial econometrics. } \value{ An object of class \code{"htest"} containing: \item{statistic}{the test statistic,} \item{p.value}{the corresponding p value,} \item{method}{a character string with the method used,} \item{data.name}{a character string with the data name.} } \references{ Merkle E.C., Zeileis A. (2013), Tests of Measurement Invariance without Subgroups: A Generalization of Classical Methods. \emph{Psychometrika}, \bold{78}(1), 59--82. doi:10.1007/S11336-012-9302-4 Merkle E.C., Fan J., Zeileis A. (2014), Testing for Measurement Invariance with Respect to an Ordinal Variable. \emph{Psychometrika}, \bold{79}(4), 569--584. doi:10.1007/S11336-013-9376-7. Zeileis A. (2005), A Unified Approach to Structural Change Tests Based on ML Scores, F Statistics, and OLS Residuals. \emph{Econometric Reviews}, \bold{24}, 445--466. doi:10.1080/07474930500406053. Zeileis A. (2006), Implementing a Class of Structural Change Tests: An Econometric Computing Approach. \emph{Computational Statistics & Data Analysis}, \bold{50}, 2987--3008. doi:10.1016/j.csda.2005.07.001. Zeileis A., Hornik K. (2007), Generalized M-Fluctuation Tests for Parameter Instability, \emph{Statistica Neerlandica}, \bold{61}, 488--508. doi:10.1111/j.1467-9574.2007.00371.x. Zeileis A., Shah A., Patnaik I. (2010), Testing, Monitoring, and Dating Structural Changes in Exchange Rate Regimes, \emph{Computational Statistics and Data Analysis}, \bold{54}(6), 1696--1706. doi:10.1016/j.csda.2009.12.005. } \seealso{\code{\link{gefp}}, \code{\link{efpFunctional}}} \examples{ ## Zeileis and Hornik (2007), Section 5.3, Figure 6 data("Grossarl") m <- glm(cbind(illegitimate, legitimate) ~ 1, family = binomial, data = Grossarl, subset = time(fraction) <= 1800) sctest(m, order.by = 1700:1800, functional = "CvM") } \keyword{htest} strucchange/man/plot.efp.Rd0000644000175400001440000001017313062350355015550 0ustar zeileisusers\name{plot.efp} \alias{plot.efp} \alias{lines.efp} \encoding{latin1} \title{Plot Empirical Fluctuation Process} \description{Plot and lines method for objects of class \code{"efp"}} \usage{ \method{plot}{efp}(x, alpha = 0.05, alt.boundary = FALSE, boundary = TRUE, functional = "max", main = NULL, ylim = NULL, ylab = "Empirical fluctuation process", ...) \method{lines}{efp}(x, functional = "max", ...) } \arguments{ \item{x}{an object of class \code{"efp"}.} \item{alpha}{numeric from interval (0,1) indicating the confidence level for which the boundary of the corresponding test will be computed.} \item{alt.boundary}{logical. If set to \code{TRUE} alternative boundaries (instead of the standard linear boundaries) will be plotted (for CUSUM processes only).} \item{boundary}{logical. If set to \code{FALSE} the boundary will be computed but not plotted.} \item{functional}{indicates which functional should be applied to the process before plotting and which boundaries should be used. If set to \code{NULL} a multiple process with boundaries for the \code{"max"} functional is plotted. For more details see below.} \item{main, ylim, ylab, ...}{high-level \code{\link{plot}} function parameters.} } \details{Plots are available for the \code{"max"} functional for all process types. For Brownian bridge type processes the maximum or mean squared Euclidean norm (\code{"maxL2"} and \code{"meanL2"}) can be used for aggregating before plotting. No plots are available for the \code{"range"} functional. Alternative boundaries that are proportional to the standard deviation of the corresponding limiting process are available for processes with Brownian motion or Brownian bridge limiting processes. } \value{\code{\link{efp}} returns an object of class \code{"efp"} which inherits from the class \code{"ts"} or \code{"mts"} respectively. The function \code{\link{plot}} has a method to plot the empirical fluctuation process; with \code{sctest} the corresponding test for structural change can be performed.} \references{Brown R.L., Durbin J., Evans J.M. (1975), Techniques for testing constancy of regression relationships over time, \emph{Journal of the Royal Statistical Society}, B, \bold{37}, 149-163. Chu C.-S., Hornik K., Kuan C.-M. (1995), MOSUM tests for parameter constancy, \emph{Biometrika}, \bold{82}, 603-617. Chu C.-S., Hornik K., Kuan C.-M. (1995), The moving-estimates test for parameter stability, \emph{Econometric Theory}, \bold{11}, 669-720. Krmer W., Ploberger W., Alt R. (1988), Testing for structural change in dynamic models, \emph{Econometrica}, \bold{56}, 1355-1369. Kuan C.-M., Hornik K. (1995), The generalized fluctuation test: A unifying view, \emph{Econometric Reviews}, \bold{14}, 135 - 161. Kuan C.-M., Chen (1994), Implementing the fluctuation and moving estimates tests in dynamic econometric models, \emph{Economics Letters}, \bold{44}, 235-239. Ploberger W., Krmer W. (1992), The CUSUM test with OLS residuals, \emph{Econometrica}, \bold{60}, 271-285. Zeileis A., Leisch F., Hornik K., Kleiber C. (2002), \code{strucchange}: An R Package for Testing for Structural Change in Linear Regression Models, \emph{Journal of Statistical Software}, \bold{7}(2), 1-38. URL \url{http://www.jstatsoft.org/v07/i02/}. Zeileis A. (2004), Alternative Boundaries for CUSUM Tests, \emph{Statistical Papers}, \bold{45}, 123--131. } \seealso{\code{\link{efp}}, \code{\link{boundary.efp}}, \code{\link{sctest.efp}}} \examples{ ## Load dataset "nhtemp" with average yearly temperatures in New Haven data("nhtemp") ## plot the data plot(nhtemp) ## test the model null hypothesis that the average temperature remains ## constant over the years ## compute Rec-CUSUM fluctuation process temp.cus <- efp(nhtemp ~ 1) ## plot the process plot(temp.cus, alpha = 0.01) ## and calculate the test statistic sctest(temp.cus) ## compute (recursive estimates) fluctuation process ## with an additional linear trend regressor lin.trend <- 1:60 temp.me <- efp(nhtemp ~ lin.trend, type = "fluctuation") ## plot the bivariate process plot(temp.me, functional = NULL) ## and perform the corresponding test sctest(temp.me) } \keyword{hplot} strucchange/man/solveCrossprod.Rd0000644000175400001440000000142613062350355017051 0ustar zeileisusers\name{solveCrossprod} \alias{solveCrossprod} \title{Inversion of X'X} \description{Computes the inverse of the cross-product of a matrix X.} \usage{ solveCrossprod(X, method = c("qr", "chol", "solve")) } \arguments{ \item{X}{a matrix, typically a regressor matrix.} \item{method}{a string indicating whether the QR decomposition, the Cholesky decomposition or \code{solve} should be used.} } \details{Using the Cholesky decomposition of X'X (as computed by \code{crossprod(X)}) is computationally faster and preferred to \code{solve(crossprod(X))}. Using the QR decomposition of X is slower but should be more accurate.} \value{a matrix containing the inverse of \code{crossprod(X)}.} \examples{ X <- cbind(1, rnorm(100)) solveCrossprod(X) solve(crossprod(X)) } \keyword{algebra} strucchange/man/Fstats.Rd0000644000175400001440000001067413062350355015273 0ustar zeileisusers\name{Fstats} \alias{Fstats} \alias{print.Fstats} \title{F Statistics} \description{Computes a series of F statistics for a specified data window.} \usage{ Fstats(formula, from = 0.15, to = NULL, data = list(), vcov. = NULL)} \arguments{ \item{formula}{a symbolic description for the model to be tested} \item{from, to}{numeric. If \code{from} is smaller than 1 they are interpreted as percentages of data and by default \code{to} is taken to be 1 - \code{from}. F statistics will be calculated for the observations \code{(n*from):(n*to)}, when \code{n} is the number of observations in the model. If \code{from} is greater than 1 it is interpreted to be the index and \code{to} defaults to \code{n - from}. If \code{from} is a vector with two elements, then \code{from} and \code{to} are interpreted as time specifications like in \code{\link{ts}}, see also the examples.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{Fstats} is called from.} \item{vcov.}{a function to extract the covariance matrix for the coefficients of a fitted model of class \code{"lm"}.} } \details{For every potential change point in \code{from:to} a F statistic (Chow test statistic) is computed. For this an OLS model is fitted for the observations before and after the potential change point, i.e. \code{2k} parameters have to be estimated, and the error sum of squares is computed (ESS). Another OLS model for all observations with a restricted sum of squares (RSS) is computed, hence \code{k} parameters have to be estimated here. If \code{n} is the number of observations and \code{k} the number of regressors in the model, the formula is: \deqn{F = \frac{(RSS - ESS)}{ESS/(n - 2 k)}}{F = (RSS-ESS)/ESS * (n-2*k)} Note that this statistic has an asymptotic chi-squared distribution with k degrees of freedom and (under the assumption of normality) F/k has an exact F distribution with k and n - 2k degrees of freedom. } \value{\code{Fstats} returns an object of class \code{"Fstats"}, which contains mainly a time series of F statistics. The function \code{\link{plot}} has a method to plot the F statistics or the corresponding p values; with \code{sctest} a supF-, aveF- or expF-test on structural change can be performed.} \references{ Andrews D.W.K. (1993), Tests for parameter instability and structural change with unknown change point, \emph{Econometrica}, \bold{61}, 821-856. Hansen B. (1992), Tests for parameter instability in regressions with I(1) processes, \emph{Journal of Business & Economic Statistics}, \bold{10}, 321-335. Hansen B. (1997), Approximate asymptotic p values for structural-change tests, \emph{Journal of Business & Economic Statistics}, \bold{15}, 60-67. } \seealso{\code{\link{plot.Fstats}}, \code{\link{sctest.Fstats}}, \code{\link{boundary.Fstats}}} \examples{ ## Nile data with one breakpoint: the annual flows drop in 1898 ## because the first Ashwan dam was built data("Nile") plot(Nile) ## test the null hypothesis that the annual flow remains constant ## over the years fs.nile <- Fstats(Nile ~ 1) plot(fs.nile) sctest(fs.nile) ## visualize the breakpoint implied by the argmax of the F statistics plot(Nile) lines(breakpoints(fs.nile)) ## UK Seatbelt data: a SARIMA(1,0,0)(1,0,0)_12 model ## (fitted by OLS) is used and reveals (at least) two ## breakpoints - one in 1973 associated with the oil crisis and ## one in 1983 due to the introduction of compulsory ## wearing of seatbelts in the UK. data("UKDriverDeaths") seatbelt <- log10(UKDriverDeaths) seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) colnames(seatbelt) <- c("y", "ylag1", "ylag12") seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) ## compute F statistics for potential breakpoints between ## 1971(6) (corresponds to from = 0.1) and 1983(6) (corresponds to ## to = 0.9 = 1 - from, the default) ## compute F statistics fs <- Fstats(y ~ ylag1 + ylag12, data = seatbelt, from = 0.1) ## this gives the same result fs <- Fstats(y ~ ylag1 + ylag12, data = seatbelt, from = c(1971, 6), to = c(1983, 6)) ## plot the F statistics plot(fs, alpha = 0.01) ## plot F statistics with aveF boundary plot(fs, aveF = TRUE) ## perform the expF test sctest(fs, type = "expF") } \concept{F statistics} \concept{Andrews test} \concept{Chow test} \concept{Quandt test} \keyword{regression} strucchange/man/scPublications.Rd0000644000175400001440000000526313062350355017007 0ustar zeileisusers\name{scPublications} \alias{scPublications} \title{Structural Change Publications} \usage{data("scPublications")} \description{ Bibliographic information about papers related to structural change and changepoints published in 27 different econometrics and statistics journals. } \format{ A data frame containing information on 835 structural change papers in 9 variables. \describe{ \item{author}{character. Author(s) of the paper.} \item{title}{character. Title of the paper.} \item{journal}{factor. In which journal was the paper published?} \item{year}{numeric. Year of publication.} \item{volume}{numeric. Journal volume.} \item{issue}{character. Issue within the journal volume.} \item{bpage}{numeric. Page on which the paper begins.} \item{epage}{numeric. Page on which the paper ends.} \item{type}{factor. Is the journal an econometrics or statistics journal?} } } \details{ The data set \code{scPublications} includes bibliographic information about publications related to structural change and obtained from the \sQuote{ISI Web of Science}. The query was based on the \sQuote{Science Citation Index Expanded} and \sQuote{Social Sciences Citation Index} (for the full range of years available: 1900-2006 and 1956-2006, respectively). The \sQuote{Source Title} was restricted to the 27 journals in the data frame and the \sQuote{Topic} to be one of the following: structural change, structural break, structural stability, structural instability, parameter instability, parameter stability, parameter constancy, change point, changepoint, change-point, breakpoint, break-point, break point, CUSUM, MOSUM. Additionally, the famous CUSUM paper of Brown, Durbin and Evans (1975) was added manually to \code{scPublications} (because it did not match the query above). } \source{ISI Web of Science at \url{http://www.isiknowledge.com/}. Queried by James Bullard. } \examples{ ## construct time series: ## number of sc publications in econometrics/statistics data("scPublications") ## select years from 1987 and ## `most important' journals pub <- scPublications pub <- subset(pub, year > 1986) tab1 <- table(pub$journal) nam1 <- names(tab1)[as.vector(tab1) > 9] ## at least 10 papers tab2 <- sapply(levels(pub$journal), function(x) min(subset(pub, journal == x)$year)) nam2 <- names(tab2)[as.vector(tab2) < 1991] ## started at least in 1990 nam <- nam1[nam1 \%in\% nam2] pub <- subset(pub, as.character(journal) \%in\% nam) pub$journal <- factor(pub$journal) pub_data <- pub ## generate time series pub <- with(pub, tapply(type, year, table)) pub <- zoo(t(sapply(pub, cbind)), 1987:2006) colnames(pub) <- levels(pub_data$type) ## visualize plot(pub, ylim = c(0, 35)) } \keyword{datasets} strucchange/man/DJIA.Rd0000644000175400001440000000306113062350355014526 0ustar zeileisusers\name{DJIA} \alias{DJIA} \title{Dow Jones Industrial Average} \description{ Weekly closing values of the Dow Jones Industrial Average. } \usage{data("DJIA")} \format{ A weekly univariate time series of class \code{"zoo"} from 1971-07-01 to 1974-08-02. } \source{ Appendix A in Hsu (1979). } \references{ Hsu D. A. (1979), Detecting Shifts of Parameter in Gamma Sequences with Applications to Stock Price and Air Traffic Flow Analysis, \emph{Journal of the American Statistical Association}, \bold{74}, 31--40. } \examples{ data("DJIA") ## look at log-difference returns djia <- diff(log(DJIA)) plot(djia) ## convenience functions ## set up a normal regression model which ## explicitely also models the variance normlm <- function(formula, data = list()) { rval <- lm(formula, data = data) class(rval) <- c("normlm", "lm") return(rval) } estfun.normlm <- function(obj) { res <- residuals(obj) ef <- NextMethod(obj) sigma2 <- mean(res^2) rval <- cbind(ef, res^2 - sigma2) colnames(rval) <- c(colnames(ef), "(Variance)") return(rval) } ## normal model (with constant mean and variance) for log returns m1 <- gefp(djia ~ 1, fit = normlm, vcov = meatHAC, sandwich = FALSE) plot(m1, aggregate = FALSE) ## suggests a clear break in the variance (but not the mean) ## dating bp <- breakpoints(I(djia^2) ~ 1) plot(bp) ## -> clearly one break bp time(djia)[bp$breakpoints] ## visualization plot(djia) abline(v = time(djia)[bp$breakpoints], lty = 2) lines(time(djia)[confint(bp)$confint[c(1,3)]], rep(min(djia), 2), col = 2, type = "b", pch = 3) } \keyword{datasets} strucchange/man/breakfactor.Rd0000644000175400001440000000234013062350355016301 0ustar zeileisusers\name{breakfactor} \alias{breakfactor} \title{Factor Coding of Segmentations} \description{ Generates a factor encoding the segmentation given by a set of breakpoints. } \usage{ breakfactor(obj, breaks = NULL, labels = NULL, ...) } \arguments{ \item{obj}{An object of class \code{"breakpoints"} or \code{"breakpointsfull"} respectively.} \item{breaks}{an integer specifying the number of breaks to extract (only if \code{obj} is of class \code{"breakpointsfull"}), by default the minimum BIC partition is used.} \item{labels}{a vector of labels for the returned factor, by default the segments are numbered starting from \code{"segment1"}.} \item{\dots}{further arguments passed to \code{factor}.} } \value{ A factor encoding the segmentation. } \seealso{\code{\link{breakpoints}}} \examples{ ## Nile data with one breakpoint: the annual flows drop in 1898 ## because the first Ashwan dam was built data("Nile") plot(Nile) ## compute breakpoints bp.nile <- breakpoints(Nile ~ 1) ## fit and visualize segmented and unsegmented model fm0 <- lm(Nile ~ 1) fm1 <- lm(Nile ~ breakfactor(bp.nile, breaks = 1)) lines(fitted(fm0), col = 3) lines(fitted(fm1), col = 4) lines(bp.nile, breaks = 1) } \keyword{regression} strucchange/man/sctest.Fstats.Rd0000644000175400001440000000500613062350355016570 0ustar zeileisusers\name{sctest.Fstats} \alias{sctest.Fstats} \title{supF-, aveF- and expF-Test} \description{Performs the supF-, aveF- or expF-test} \usage{ \method{sctest}{Fstats}(x, type = c("supF", "aveF", "expF"), asymptotic = FALSE, ...) } \arguments{ \item{x}{an object of class \code{"Fstats"}.} \item{type}{a character string specifying which test will be performed.} \item{asymptotic}{logical. Only necessary if \code{x} contains just a single F statistic and type is \code{"supF"} or \code{"aveF"}. If then set to \code{TRUE} the asymptotic (chi-square) distribution instead of the exact (F) distribution will be used to compute the p value.} \item{...}{currently not used.} } \details{If \code{x} contains just a single F statistic and type is \code{"supF"} or \code{"aveF"} the Chow test will be performed. The original GAUSS code for computing the p values of the supF-, aveF- and expF-test was written by Bruce Hansen and is available from \url{http://www.ssc.wisc.edu/~bhansen/}. R port by Achim Zeileis. } \value{ An object of class \code{"htest"} containing: \item{statistic}{the test statistic,} \item{p.value}{the corresponding p value,} \item{method}{a character string with the method used,} \item{data.name}{a character string with the data name.} } \references{ Andrews D.W.K. (1993), Tests for parameter instability and structural change with unknown change point, \emph{Econometrica}, \bold{61}, 821-856. Andrews D.W.K., Ploberger W. (1994), Optimal tests when a nuisance parameter is present only under the alternative, \emph{Econometrica}, \bold{62}, 1383-1414. Hansen B. (1992), Tests for parameter instability in regressions with I(1) processes, \emph{Journal of Business & Economic Statistics}, \bold{10}, 321-335. Hansen B. (1997), Approximate asymptotic p values for structural-change tests, \emph{Journal of Business & Economic Statistics}, \bold{15}, 60-67. } \seealso{\code{\link{Fstats}}, \code{\link{plot.Fstats}}} \examples{ ## Load dataset "nhtemp" with average yearly temperatures in New Haven data(nhtemp) ## plot the data plot(nhtemp) ## test the model null hypothesis that the average temperature remains ## constant over the years for potential break points between 1941 ## (corresponds to from = 0.5) and 1962 (corresponds to to = 0.85) ## compute F statistics fs <- Fstats(nhtemp ~ 1, from = 0.5, to = 0.85) ## plot the F statistics plot(fs, alpha = 0.01) ## and the corresponding p values plot(fs, pval = TRUE, alpha = 0.01) ## perform the aveF test sctest(fs, type = "aveF") } \keyword{htest} strucchange/man/boundary.Fstats.Rd0000644000175400001440000000343613062350355017113 0ustar zeileisusers\name{boundary.Fstats} \alias{boundary.Fstats} \title{Boundary for F Statistics} \description{Computes boundary for an object of class \code{"Fstats"}} \usage{ \method{boundary}{Fstats}(x, alpha = 0.05, pval = FALSE, aveF = FALSE, asymptotic = FALSE, ...)} \arguments{ \item{x}{an object of class \code{"Fstats"}.} \item{alpha}{numeric from interval (0,1) indicating the confidence level for which the boundary of the supF test will be computed.} \item{pval}{logical. If set to \code{TRUE} a boundary for the corresponding p values will be computed.} \item{aveF}{logical. If set to \code{TRUE} the boundary of the aveF (instead of the supF) test will be computed. The resulting boundary then is a boundary for the mean of the F statistics rather than for the F statistics themselves.} \item{asymptotic}{logical. If set to \code{TRUE} the asymptotic (chi-square) distribution instead of the exact (F) distribution will be used to compute the p values (only if \code{pval} is \code{TRUE}).} \item{...}{currently not used.} } \value{an object of class \code{"ts"} with the same time properties as the time series in \code{x}} \seealso{\code{\link{Fstats}}, \code{\link{plot.Fstats}}} \examples{ ## Load dataset "nhtemp" with average yearly temperatures in New Haven data("nhtemp") ## plot the data plot(nhtemp) ## test the model null hypothesis that the average temperature remains ## constant over the years for potential break points between 1941 ## (corresponds to from = 0.5) and 1962 (corresponds to to = 0.85) ## compute F statistics fs <- Fstats(nhtemp ~ 1, from = 0.5, to = 0.85) ## plot the p values without boundary plot(fs, pval = TRUE, alpha = 0.01) ## add the boundary in another colour lines(boundary(fs, pval = TRUE, alpha = 0.01), col = 2) } \keyword{regression} strucchange/man/plot.Fstats.Rd0000644000175400001440000000465413062350355016251 0ustar zeileisusers\name{plot.Fstats} \alias{plot.Fstats} \alias{lines.Fstats} \title{Plot F Statistics} \description{Plotting method for objects of class \code{"Fstats"}} \usage{ \method{plot}{Fstats}(x, pval = FALSE, asymptotic = FALSE, alpha = 0.05, boundary = TRUE, aveF = FALSE, xlab = "Time", ylab = NULL, ylim = NULL, ...) } \arguments{ \item{x}{an object of class \code{"Fstats"}.} \item{pval}{logical. If set to \code{TRUE} the corresponding p values instead of the original F statistics will be plotted.} \item{asymptotic}{logical. If set to \code{TRUE} the asymptotic (chi-square) distribution instead of the exact (F) distribution will be used to compute the p values (only if \code{pval} is \code{TRUE}).} \item{alpha}{numeric from interval (0,1) indicating the confidence level for which the boundary of the supF test will be computed.} \item{boundary}{logical. If set to \code{FALSE} the boundary will be computed but not plotted.} \item{aveF}{logical. If set to \code{TRUE} the boundary of the aveF test will be plotted. As this is a boundary for the mean of the F statistics rather than for the F statistics themselves a dashed line for the mean of the F statistics will also be plotted.} \item{xlab, ylab, ylim, ...}{high-level \code{\link{plot}} function parameters.}} \references{ Andrews D.W.K. (1993), Tests for parameter instability and structural change with unknown change point, \emph{Econometrica}, \bold{61}, 821-856. Hansen B. (1992), Tests for parameter instability in regressions with I(1) processes, \emph{Journal of Business & Economic Statistics}, \bold{10}, 321-335. Hansen B. (1997), Approximate asymptotic p values for structural-change tests, \emph{Journal of Business & Economic Statistics}, \bold{15}, 60-67. } \seealso{\code{\link{Fstats}}, \code{\link{boundary.Fstats}}, \code{\link{sctest.Fstats}}} \examples{ ## Load dataset "nhtemp" with average yearly temperatures in New Haven data("nhtemp") ## plot the data plot(nhtemp) ## test the model null hypothesis that the average temperature remains ## constant over the years for potential break points between 1941 ## (corresponds to from = 0.5) and 1962 (corresponds to to = 0.85) ## compute F statistics fs <- Fstats(nhtemp ~ 1, from = 0.5, to = 0.85) ## plot the F statistics plot(fs, alpha = 0.01) ## and the corresponding p values plot(fs, pval = TRUE, alpha = 0.01) ## perform the aveF test sctest(fs, type = "aveF") } \keyword{hplot} strucchange/man/boundary.Rd0000644000175400001440000000113513062350355015642 0ustar zeileisusers\name{boundary} \alias{boundary} \title{Boundary Function for Structural Change Tests} \description{A generic function computing boundaries for structural change tests} \usage{ boundary(x, ...)} \arguments{ \item{x}{an object. Use \code{\link{methods}} to see which \code{\link{class}} has a method for boundary.} \item{...}{additional arguments affecting the boundary.} } \value{an object of class \code{"ts"} with the same time properties as the time series in \code{x}} \seealso{\code{\link{boundary.efp}}, \code{\link{boundary.mefp}}, \code{\link{boundary.Fstats}}} \keyword{regression} strucchange/man/PhillipsCurve.Rd0000644000175400001440000000460513062350355016615 0ustar zeileisusers\name{PhillipsCurve} \alias{PhillipsCurve} \title{UK Phillips Curve Equation Data} \usage{data("PhillipsCurve")} \description{ Macroeconomic time series from the United Kingdom with variables for estimating the Phillips curve equation. } \format{ A multivariate annual time series from 1857 to 1987 with the columns \describe{ \item{p}{Logarithm of the consumer price index,} \item{w}{Logarithm of nominal wages,} \item{u}{Unemployment rate,} \item{dp}{First differences of \code{p},} \item{dw}{First differences of \code{w},} \item{du}{First differences of \code{u}} \item{u1}{Lag 1 of \code{u},} \item{dp1}{Lag 1 of \code{dp}.} } } \source{The data is available online in the data archive of the Journal of Applied Econometrics \url{http://qed.econ.queensu.ca/jae/2003-v18.1/bai-perron/}.} \references{ Alogoskoufis G.S., Smith R. (1991), The Phillips Curve, the Persistence of Inflation, and the Lucas Critique: Evidence from Exchange Rate Regimes, \emph{American Economic Review}, \bold{81}, 1254-1275. Bai J., Perron P. (2003), Computation and Analysis of Multiple Structural Change Models, \emph{Journal of Applied Econometrics}, \bold{18}, 1-22. } \examples{ ## load and plot data data("PhillipsCurve") uk <- window(PhillipsCurve, start = 1948) plot(uk[, "dp"]) ## AR(1) inflation model ## estimate breakpoints bp.inf <- breakpoints(dp ~ dp1, data = uk, h = 8) plot(bp.inf) summary(bp.inf) ## fit segmented model with three breaks fac.inf <- breakfactor(bp.inf, breaks = 2, label = "seg") fm.inf <- lm(dp ~ 0 + fac.inf/dp1, data = uk) summary(fm.inf) ## Results from Table 2 in Bai & Perron (2003): ## coefficient estimates coef(bp.inf, breaks = 2) ## corresponding standard errors sqrt(sapply(vcov(bp.inf, breaks = 2), diag)) ## breakpoints and confidence intervals confint(bp.inf, breaks = 2) ## Phillips curve equation ## estimate breakpoints bp.pc <- breakpoints(dw ~ dp1 + du + u1, data = uk, h = 5, breaks = 5) ## look at RSS and BIC plot(bp.pc) summary(bp.pc) ## fit segmented model with three breaks fac.pc <- breakfactor(bp.pc, breaks = 2, label = "seg") fm.pc <- lm(dw ~ 0 + fac.pc/dp1 + du + u1, data = uk) summary(fm.pc) ## Results from Table 3 in Bai & Perron (2003): ## coefficient estimates coef(fm.pc) ## corresponding standard errors sqrt(diag(vcov(fm.pc))) ## breakpoints and confidence intervals confint(bp.pc, breaks = 2, het.err = FALSE) } \keyword{datasets} strucchange/man/Grossarl.Rd0000644000175400001440000001210013550277070015611 0ustar zeileisusers\name{Grossarl} \alias{Grossarl} \docType{data} \encoding{latin1} \title{Marriages, Births and Deaths in Grossarl} \usage{data("Grossarl")} \description{ Data about the number of marriages, illegitimate and legitimate births, and deaths in the Austrian Alpine village Grossarl during the 18th and 19th century. } \format{ \code{Grossarl} is a data frame containing 6 annual time series (1700 - 1899), 3 factors coding policy interventions and 1 vector with the year (plain numeric). \describe{ \item{marriages}{time series. Number of marriages,} \item{illegitimate}{time series. Number of illegitimate births,} \item{legitimate}{time series. Number of legitimate births,} \item{legitimate}{time series. Number of deaths,} \item{fraction}{time series. Fraction of illegitimate births,} \item{lag.marriages}{time series. Number of marriages in the previous year,} \item{politics}{ordered factor coding 4 different political regimes,} \item{morals}{ordered factor coding 5 different moral regulations,} \item{nuptiality}{ordered factor coding 5 different marriage restrictions,} \item{year}{numeric. Year of observation.} } } \details{The data frame contains historical demographic data from Grossarl, a village in the Alpine region of Salzburg, Austria, during the 18th and 19th century. During this period, the total population of Grossarl did not vary much on the whole, with the very exception of the period of the protestant emigrations in 1731/32. Especially during the archbishopric, moral interventions aimed at lowering the proportion of illegitimate baptisms. For details see the references.} \source{Parish registers provide the basic demographic series of baptisms and burials (which is almost equivalent to births and deaths in the study area) and marriages. For more information see Veichtlbauer et al. (2006).} \references{ Veichtlbauer O., Zeileis A., Leisch F. (2006), The Impact Of Policy Interventions on a Pre-Industrial Population System in the Austrian Alps, forthcoming. Zeileis A., Veichtlbauer O. (2002), Policy Interventions Affecting Illegitimacy in Preindustrial Austria: A Structural Change Analysis, In R. Dutter (ed.), \emph{Festschrift 50 Jahre sterreichische Statistische Gesellschaft}, 133-146, sterreichische Statistische Gesellschaft. } \examples{ data("Grossarl") ## time series of births, deaths, marriages ########################################### with(Grossarl, plot(cbind(deaths, illegitimate + legitimate, marriages), plot.type = "single", col = grey(c(0.7, 0, 0)), lty = c(1, 1, 3), lwd = 1.5, ylab = "annual Grossarl series")) legend("topright", c("deaths", "births", "marriages"), col = grey(c(0.7, 0, 0)), lty = c(1, 1, 3), bty = "n") ## illegitimate births ###################### ## lm + MOSUM plot(Grossarl$fraction) fm.min <- lm(fraction ~ politics, data = Grossarl) fm.ext <- lm(fraction ~ politics + morals + nuptiality + marriages, data = Grossarl) lines(ts(fitted(fm.min), start = 1700), col = 2) lines(ts(fitted(fm.ext), start = 1700), col = 4) mos.min <- efp(fraction ~ politics, data = Grossarl, type = "OLS-MOSUM") mos.ext <- efp(fraction ~ politics + morals + nuptiality + marriages, data = Grossarl, type = "OLS-MOSUM") plot(mos.min) lines(mos.ext, lty = 2) ## dating bp <- breakpoints(fraction ~ 1, data = Grossarl, h = 0.1) summary(bp) ## RSS, BIC, AIC plot(bp) plot(0:8, AIC(bp), type = "b") ## probably use 5 or 6 breakpoints and compare with ## coding of the factors as used by us ## ## politics 1803 1816 1850 ## morals 1736 1753 1771 1803 ## nuptiality 1803 1810 1816 1883 ## ## m = 5 1753 1785 1821 1856 1878 ## m = 6 1734 1754 1785 1821 1856 1878 ## 6 2 5 1 4 3 ## fitted models coef(bp, breaks = 6) plot(Grossarl$fraction) lines(fitted(bp, breaks = 6), col = 2) lines(ts(fitted(fm.ext), start = 1700), col = 4) ## marriages ############ ## lm + MOSUM plot(Grossarl$marriages) fm.min <- lm(marriages ~ politics, data = Grossarl) fm.ext <- lm(marriages ~ politics + morals + nuptiality, data = Grossarl) lines(ts(fitted(fm.min), start = 1700), col = 2) lines(ts(fitted(fm.ext), start = 1700), col = 4) mos.min <- efp(marriages ~ politics, data = Grossarl, type = "OLS-MOSUM") mos.ext <- efp(marriages ~ politics + morals + nuptiality, data = Grossarl, type = "OLS-MOSUM") plot(mos.min) lines(mos.ext, lty = 2) ## dating bp <- breakpoints(marriages ~ 1, data = Grossarl, h = 0.1) summary(bp) ## RSS, BIC, AIC plot(bp) plot(0:8, AIC(bp), type = "b") ## probably use 3 or 4 breakpoints and compare with ## coding of the factors as used by us ## ## politics 1803 1816 1850 ## morals 1736 1753 1771 1803 ## nuptiality 1803 1810 1816 1883 ## ## m = 3 1738 1813 1875 ## m = 4 1738 1794 1814 1875 ## 2 4 1 3 ## fitted models coef(bp, breaks = 4) plot(Grossarl$marriages) lines(fitted(bp, breaks = 4), col = 2) lines(ts(fitted(fm.ext), start = 1700), col = 4) } \keyword{datasets} strucchange/man/SP2001.Rd0000644000175400001440000000636113062350355014652 0ustar zeileisusers\name{SP2001} \alias{SP2001} \title{S\&P 500 Stock Prices} \description{ A multivariate series of all S\&P 500 stock prices in the second half of the year 2001, i.e., before and after the terrorist attacks of 2001-09-11. } \usage{data("SP2001")} \format{ A multivariate daily \code{"zoo"} series with \code{"Date"} index from 2001-07-31 to 2001-12-31 (103 observations) of all 500 S\&P stock prices. } \source{Yahoo! Finance: \url{http://finance.yahoo.com/}.} \references{ Zeileis A., Leisch F., Kleiber C., Hornik K. (2005), Monitoring Structural Change in Dynamic Econometric Models, \emph{Journal of Applied Econometrics}, \bold{20}, 99--121. } \seealso{\code{\link[tseries]{get.hist.quote}}} \examples{ ## load and transform data ## (DAL: Delta Air Lines, LU: Lucent Technologies) data("SP2001") stock.prices <- SP2001[, c("DAL", "LU")] stock.returns <- diff(log(stock.prices)) ## price and return series plot(stock.prices, ylab = c("Delta Air Lines", "Lucent Technologies"), main = "") plot(stock.returns, ylab = c("Delta Air Lines", "Lucent Technologies"), main = "") ## monitoring of DAL series myborder <- function(k) 1.939*k/28 x <- as.vector(stock.returns[, "DAL"][1:28]) dal.cusum <- mefp(x ~ 1, type = "OLS-CUSUM", border = myborder) dal.mosum <- mefp(x ~ 1, type = "OLS-MOSUM", h = 0.5, period = 4) x <- as.vector(stock.returns[, "DAL"]) dal.cusum <- monitor(dal.cusum) dal.mosum <- monitor(dal.mosum) ## monitoring of LU series x <- as.vector(stock.returns[, "LU"][1:28]) lu.cusum <- mefp(x ~ 1, type = "OLS-CUSUM", border = myborder) lu.mosum <- mefp(x ~ 1, type = "OLS-MOSUM", h = 0.5, period = 4) x <- as.vector(stock.returns[, "LU"]) lu.cusum <- monitor(lu.cusum) lu.mosum <- monitor(lu.mosum) ## pretty plotting ## (needs some work because lm() does not keep "zoo" attributes) cus.bound <- zoo(c(rep(NA, 27), myborder(28:102)), index(stock.returns)) mos.bound <- as.vector(boundary(dal.mosum)) mos.bound <- zoo(c(rep(NA, 27), mos.bound[1], mos.bound), index(stock.returns)) ## Lucent Technologies: CUSUM test plot(zoo(c(lu.cusum$efpprocess, lu.cusum$process), index(stock.prices)), ylim = c(-1, 1) * coredata(cus.bound)[102], xlab = "Time", ylab = "empirical fluctuation process") abline(0, 0) abline(v = as.Date("2001-09-10"), lty = 2) lines(cus.bound, col = 2) lines(-cus.bound, col = 2) ## Lucent Technologies: MOSUM test plot(zoo(c(lu.mosum$efpprocess, lu.mosum$process), index(stock.prices)[-(1:14)]), ylim = c(-1, 1) * coredata(mos.bound)[102], xlab = "Time", ylab = "empirical fluctuation process") abline(0, 0) abline(v = as.Date("2001-09-10"), lty = 2) lines(mos.bound, col = 2) lines(-mos.bound, col = 2) ## Delta Air Lines: CUSUM test plot(zoo(c(dal.cusum$efpprocess, dal.cusum$process), index(stock.prices)), ylim = c(-1, 1) * coredata(cus.bound)[102], xlab = "Time", ylab = "empirical fluctuation process") abline(0, 0) abline(v = as.Date("2001-09-10"), lty = 2) lines(cus.bound, col = 2) lines(-cus.bound, col = 2) ## Delta Air Lines: MOSUM test plot(zoo(c(dal.mosum$efpprocess, dal.mosum$process), index(stock.prices)[-(1:14)]), ylim = range(dal.mosum$process), xlab = "Time", ylab = "empirical fluctuation process") abline(0, 0) abline(v = as.Date("2001-09-10"), lty = 2) lines(mos.bound, col = 2) lines(-mos.bound, col = 2) } \keyword{datasets} strucchange/man/mefp.Rd0000644000175400001440000001454113062350355014753 0ustar zeileisusers\name{mefp} \alias{mefp} \alias{mefp.formula} \alias{mefp.efp} \alias{print.mefp} \alias{monitor} \title{Monitoring of Empirical Fluctuation Processes} \description{ Online monitoring of structural breaks in a linear regression model. A sequential fluctuation test based on parameter estimates or OLS residuals signals structural breaks. } \usage{ mefp(obj, ...) \method{mefp}{formula}(formula, type = c("OLS-CUSUM", "OLS-MOSUM", "RE", "ME", "fluctuation"), data, h = 1, alpha = 0.05, functional = c("max", "range"), period = 10, tolerance = .Machine$double.eps^0.5, CritvalTable = NULL, rescale = NULL, border = NULL, ...) \method{mefp}{efp}(obj, alpha=0.05, functional = c("max", "range"), period = 10, tolerance = .Machine$double.eps^0.5, CritvalTable = NULL, rescale = NULL, border = NULL, ...) monitor(obj, data = NULL, verbose = TRUE) } \arguments{ \item{formula}{a symbolic description for the model to be tested.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{efp} is called from.} \item{type}{specifies which type of fluctuation process will be computed.} \item{h}{(only used for MOSUM/ME processes). A numeric scalar from interval (0,1) specifying the size of the data window relative to the sample size.} \item{obj}{Object of class \code{"efp"} (for \code{mefp}) or \code{"mefp"} (for \code{monitor}).} \item{alpha}{Significance level of the test, i.e., probability of type I error.} \item{functional}{Determines if maximum or range of parameter differences is used as statistic.} \item{period}{(only used for MOSUM/ME processes). Maximum time (relative to the history period) that will be monitored. Default is 10 times the history period.} \item{tolerance}{Tolerance for numeric \code{==} comparisons.} \item{CritvalTable}{Table of critical values, this table is interpolated to get critical values for arbitrary \code{alpha}s. The default depends on the \code{type} of fluctuation process (pre-computed tables are available for all types). \emph{This argument is under development.}} \item{rescale}{If \code{TRUE} the estimates will be standardized by the regressor matrix of the corresponding subsample similar to Kuan & Chen (1994); if \code{FALSE} the historic regressor matrix will be used. The default is to rescale the monitoring processes of type \code{"ME"} but not of \code{"RE"}.} \item{border}{An optional user-specified border function for the empirical process. \emph{This argument is under development.}} \item{verbose}{If \code{TRUE}, signal breaks by text output.} \item{...}{Currently not used.} } \details{ \code{\link{mefp}} creates an object of class \code{"mefp"} either from a model formula or from an object of class \code{"efp"}. In addition to the arguments of \code{\link{efp}}, the type of statistic and a significance level for the monitoring must be specified. The monitoring itself is performed by \code{monitor}, which can be called arbitrarily often on objects of class \code{"mefp"}. If new data have arrived, then the empirical fluctuation process is computed for the new data. If the process crosses the boundaries corresponding to the significance level \code{alpha}, a structural break is detected (and signaled). The typical usage is to initialize the monitoring by creation of an object of class \code{"mefp"} either using a formula or an \code{"efp"} object. Data available at this stage are considered the \emph{history sample}, which is kept fixed during the complete monitoring process, and may not contain any structural changes. Subsequent calls to \code{monitor} perform a sequential test of the null hypothesis of no structural change in new data against the general alternative of changes in one or more of the coefficients of the regression model. The recursive estimates test is also called fluctuation test, therefore setting \code{type} to \code{"fluctuation"} was used to specify it in earlier versions of strucchange. It still can be used now, but will be forced to \code{"RE"} } \seealso{\code{\link{plot.mefp}}, \code{\link{boundary.mefp}}} \references{ Leisch F., Hornik K., Kuan C.-M. (2000), Monitoring Structural Changes with the Generalized Fluctuation Test, \emph{Econometric Theory}, \bold{16}, 835--854. Zeileis A., Leisch F., Kleiber C., Hornik K. (2005), Monitoring Structural Change in Dynamic Econometric Models, \emph{Journal of Applied Econometrics}, \bold{20}, 99--121. doi:10.1002/jae.776. Zeileis A. (2005), A Unified Approach to Structural Change Tests Based on ML Scores, F Statistics, and OLS Residuals. \emph{Econometric Reviews}, \bold{24}, 445--466. doi:10.1080/07474930500406053. Zeileis A., Shah A., Patnaik I. (2010), Testing, Monitoring, and Dating Structural Changes in Exchange Rate Regimes, \emph{Computational Statistics and Data Analysis}, \bold{54}(6), 1696--1706. doi:10.1016/j.csda.2009.12.005. } \examples{ df1 <- data.frame(y=rnorm(300)) df1[150:300,"y"] <- df1[150:300,"y"]+1 ## use the first 50 observations as history period e1 <- efp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1) me1 <- mefp(e1, alpha=0.05) ## the same in one function call me1 <- mefp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1, alpha=0.05) ## monitor the 50 next observations me2 <- monitor(me1, data=df1[1:100,,drop=FALSE]) plot(me2) # and now monitor on all data me3 <- monitor(me2, data=df1) plot(me3) ## Load dataset "USIncExp" with income and expenditure in the US ## and choose a suitable subset for the history period data("USIncExp") USIncExp3 <- window(USIncExp, start=c(1969,1), end=c(1971,12)) ## initialize the monitoring with the formula interface me.mefp <- mefp(expenditure~income, type="ME", rescale=TRUE, data=USIncExp3, alpha=0.05) ## monitor the new observations for the year 1972 USIncExp3 <- window(USIncExp, start=c(1969,1), end=c(1972,12)) me.mefp <- monitor(me.mefp) ## monitor the new data for the years 1973-1976 USIncExp3 <- window(USIncExp, start=c(1969,1), end=c(1976,12)) me.mefp <- monitor(me.mefp) plot(me.mefp, functional = NULL) } \concept{CUSUM} \concept{MOSUM} \concept{recursive estimates} \concept{moving estimates} \concept{fluctuation test} \concept{monitoring} \concept{structural change} \keyword{regression} strucchange/DESCRIPTION0000644000175400001440000000401013550416405014457 0ustar zeileisusersPackage: strucchange Version: 1.5-2 Date: 2019-10-12 Title: Testing, Monitoring, and Dating Structural Changes Authors@R: c(person(given = "Achim", family = "Zeileis", role = c("aut", "cre"), email = "Achim.Zeileis@R-project.org", comment = c(ORCID = "0000-0003-0918-3766")), person(given = "Friedrich", family = "Leisch", role = "aut", email = "Friedrich.Leisch@R-project.org"), person(given = "Kurt", family = "Hornik", role = "aut", email = "Kurt.Hornik@R-project.org"), person(given = "Christian", family = "Kleiber", role = "aut", email = "Christian.Kleiber@unibas.ch"), person(given = "Bruce", family = "Hansen", role = "ctb"), person(given = c("Edgar", "C."), family = "Merkle", role = "ctb")) Description: Testing, monitoring and dating structural changes in (linear) regression models. strucchange features tests/methods from the generalized fluctuation test framework as well as from the F test (Chow test) framework. This includes methods to fit, plot and test fluctuation processes (e.g., CUSUM, MOSUM, recursive/moving estimates) and F statistics, respectively. It is possible to monitor incoming data online using fluctuation processes. Finally, the breakpoints in regression models with structural changes can be estimated together with confidence intervals. Emphasis is always given to methods for visualizing the data. LazyData: yes Depends: R (>= 2.10.0), zoo, sandwich Suggests: stats4, car, dynlm, e1071, foreach, lmtest, mvtnorm, tseries Imports: graphics, stats, utils License: GPL-2 | GPL-3 NeedsCompilation: yes Packaged: 2019-10-12 07:22:10 UTC; zeileis Author: Achim Zeileis [aut, cre] (), Friedrich Leisch [aut], Kurt Hornik [aut], Christian Kleiber [aut], Bruce Hansen [ctb], Edgar C. Merkle [ctb] Maintainer: Achim Zeileis Repository: CRAN Date/Publication: 2019-10-12 18:35:49 UTC strucchange/build/0000755000175400001440000000000013550277442014063 5ustar zeileisusersstrucchange/build/vignette.rds0000644000175400001440000000050113550277442016416 0ustar zeileisusersN0 LfLf}y̖¶rZFɝe $-1!%37gbCddJcLdFFmzOFtM%5(EvW5Kd9y}2t!Ԃ?6kGtPBn'u%Xv@$u*9l;uٯ)nVE,^/NhSi|JM B %\qX6qg$as^s}vlnw:~O*lռ/scKLْYsGstrucchange/tests/0000755000175400001440000000000013062350355014117 5ustar zeileisusersstrucchange/tests/Examples/0000755000175400001440000000000013062350355015675 5ustar zeileisusersstrucchange/tests/Examples/strucchange-Ex.Rout.save0000644000175400001440000021062113550207541022367 0ustar zeileisusers R version 3.2.0 (2015-04-16) -- "Full of Ingredients" Copyright (C) 2015 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > pkgname <- "strucchange" > source(file.path(R.home("share"), "R", "examples-header.R")) > options(warn = 1) > library('strucchange') Loading required package: zoo Attaching package: 'zoo' The following objects are masked from 'package:base': as.Date, as.Date.numeric Loading required package: sandwich > > base::assign(".oldSearch", base::search(), pos = 'CheckExEnv') > cleanEx() > nameEx("BostonHomicide") > ### * BostonHomicide > > flush(stderr()); flush(stdout()) > > ### Name: BostonHomicide > ### Title: Youth Homicides in Boston > ### Aliases: BostonHomicide > ### Keywords: datasets > > ### ** Examples > > data("BostonHomicide") > attach(BostonHomicide) > > ## data from Table 1 > tapply(homicides, year, mean) 1992 1993 1994 1995 1996 1997 1998 3.083333 4.000000 3.166667 3.833333 2.083333 1.250000 0.800000 > populationBM[0:6*12 + 7] [1] 12977 12455 12272 12222 11895 12038 NA > tapply(ahomicides25, year, mean) 1992 1993 1994 1995 1996 1997 1998 3.250000 4.166667 3.916667 4.166667 2.666667 2.333333 1.400000 > tapply(ahomicides35, year, mean) 1992 1993 1994 1995 1996 1997 1998 0.8333333 1.0833333 1.3333333 1.1666667 1.0833333 0.7500000 0.4000000 > population[0:6*12 + 7] [1] 228465 227218 226611 231367 230744 228696 NA > unemploy[0:6*12 + 7] [1] 20.2 18.8 15.9 14.7 13.8 12.6 NA > > ## model A > ## via OLS > fmA <- lm(homicides ~ populationBM + season) > anova(fmA) Analysis of Variance Table Response: homicides Df Sum Sq Mean Sq F value Pr(>F) populationBM 1 14.364 14.3642 3.7961 0.05576 . season 11 47.254 4.2959 1.1353 0.34985 Residuals 64 242.174 3.7840 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 > ## as GLM > fmA1 <- glm(homicides ~ populationBM + season, family = poisson) > anova(fmA1, test = "Chisq") Analysis of Deviance Table Model: poisson, link: log Response: homicides Terms added sequentially (first to last) Df Deviance Resid. Df Resid. Dev Pr(>Chi) NULL 76 115.649 populationBM 1 4.9916 75 110.657 0.02547 * season 11 18.2135 64 92.444 0.07676 . --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 > > ## model B & C > fmB <- lm(homicides ~ populationBM + season + ahomicides25) > fmC <- lm(homicides ~ populationBM + season + ahomicides25 + unemploy) > > detach(BostonHomicide) > > > > cleanEx() > nameEx("DJIA") > ### * DJIA > > flush(stderr()); flush(stdout()) > > ### Name: DJIA > ### Title: Dow Jones Industrial Average > ### Aliases: DJIA > ### Keywords: datasets > > ### ** Examples > > data("DJIA") > ## look at log-difference returns > djia <- diff(log(DJIA)) > plot(djia) > > ## convenience functions > ## set up a normal regression model which > ## explicitely also models the variance > normlm <- function(formula, data = list()) { + rval <- lm(formula, data = data) + class(rval) <- c("normlm", "lm") + return(rval) + } > estfun.normlm <- function(obj) { + res <- residuals(obj) + ef <- NextMethod(obj) + sigma2 <- mean(res^2) + rval <- cbind(ef, res^2 - sigma2) + colnames(rval) <- c(colnames(ef), "(Variance)") + return(rval) + } > > ## normal model (with constant mean and variance) for log returns > m1 <- gefp(djia ~ 1, fit = normlm, vcov = meatHAC, sandwich = FALSE) > plot(m1, aggregate = FALSE) > ## suggests a clear break in the variance (but not the mean) > > ## dating > bp <- breakpoints(I(djia^2) ~ 1) > plot(bp) > ## -> clearly one break > bp Optimal 2-segment partition: Call: breakpoints.formula(formula = I(djia^2) ~ 1) Breakpoints at observation number: 89 Corresponding to breakdates: 0.552795 > time(djia)[bp$breakpoints] [1] "1973-03-16" > > ## visualization > plot(djia) > abline(v = time(djia)[bp$breakpoints], lty = 2) > lines(time(djia)[confint(bp)$confint[c(1,3)]], rep(min(djia), 2), col = 2, type = "b", pch = 3) > > > > cleanEx() > nameEx("Fstats") > ### * Fstats > > flush(stderr()); flush(stdout()) > > ### Name: Fstats > ### Title: F Statistics > ### Aliases: Fstats print.Fstats > ### Keywords: regression > > ### ** Examples > > ## Nile data with one breakpoint: the annual flows drop in 1898 > ## because the first Ashwan dam was built > data("Nile") > plot(Nile) > > ## test the null hypothesis that the annual flow remains constant > ## over the years > fs.nile <- Fstats(Nile ~ 1) > plot(fs.nile) > sctest(fs.nile) supF test data: fs.nile sup.F = 75.93, p-value = 2.22e-16 > ## visualize the breakpoint implied by the argmax of the F statistics > plot(Nile) > lines(breakpoints(fs.nile)) > > ## UK Seatbelt data: a SARIMA(1,0,0)(1,0,0)_12 model > ## (fitted by OLS) is used and reveals (at least) two > ## breakpoints - one in 1973 associated with the oil crisis and > ## one in 1983 due to the introduction of compulsory > ## wearing of seatbelts in the UK. > data("UKDriverDeaths") > seatbelt <- log10(UKDriverDeaths) > seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) > colnames(seatbelt) <- c("y", "ylag1", "ylag12") > seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) > plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) > > ## compute F statistics for potential breakpoints between > ## 1971(6) (corresponds to from = 0.1) and 1983(6) (corresponds to > ## to = 0.9 = 1 - from, the default) > ## compute F statistics > fs <- Fstats(y ~ ylag1 + ylag12, data = seatbelt, from = 0.1) > ## this gives the same result > fs <- Fstats(y ~ ylag1 + ylag12, data = seatbelt, from = c(1971, 6), + to = c(1983, 6)) > ## plot the F statistics > plot(fs, alpha = 0.01) > ## plot F statistics with aveF boundary > plot(fs, aveF = TRUE) > ## perform the expF test > sctest(fs, type = "expF") expF test data: fs exp.F = 6.4247, p-value = 0.008093 > > > > cleanEx() > nameEx("GermanM1") > ### * GermanM1 > > flush(stderr()); flush(stdout()) > > ### Encoding: UTF-8 > > ### Name: GermanM1 > ### Title: German M1 Money Demand > ### Aliases: GermanM1 historyM1 monitorM1 > ### Keywords: datasets > > ### ** Examples > > data("GermanM1") > ## Lütkepohl et al. (1999) use the following model > LTW.model <- dm ~ dy2 + dR + dR1 + dp + m1 + y1 + R1 + season > ## Zeileis et al. (2005) use > M1.model <- dm ~ dy2 + dR + dR1 + dp + ecm.res + season > > > ## historical tests > ols <- efp(LTW.model, data = GermanM1, type = "OLS-CUSUM") > plot(ols) > re <- efp(LTW.model, data = GermanM1, type = "fluctuation") > plot(re) > fs <- Fstats(LTW.model, data = GermanM1, from = 0.1) > plot(fs) > > ## monitoring > M1 <- historyM1 > ols.efp <- efp(M1.model, type = "OLS-CUSUM", data = M1) > newborder <- function(k) 1.5778*k/118 > ols.mefp <- mefp(ols.efp, period = 2) > ols.mefp2 <- mefp(ols.efp, border = newborder) > M1 <- GermanM1 > ols.mon <- monitor(ols.mefp) Break detected at observation # 128 > ols.mon2 <- monitor(ols.mefp2) Break detected at observation # 135 > plot(ols.mon) > lines(boundary(ols.mon2), col = 2) > > ## dating > bp <- breakpoints(LTW.model, data = GermanM1) > summary(bp) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = LTW.model, data = GermanM1) Breakpoints at observation number: m = 1 119 m = 2 42 119 m = 3 48 71 119 m = 4 27 48 71 119 m = 5 27 48 71 98 119 Corresponding to breakdates: m = 1 1990(3) m = 2 1971(2) 1990(3) m = 3 1972(4) 1978(3) 1990(3) m = 4 1967(3) 1972(4) 1978(3) 1990(3) m = 5 1967(3) 1972(4) 1978(3) 1985(2) 1990(3) Fit: m 0 1 2 3 4 5 RSS 3.683e-02 1.916e-02 1.522e-02 1.301e-02 1.053e-02 9.198e-03 BIC -6.974e+02 -7.296e+02 -7.025e+02 -6.653e+02 -6.356e+02 -5.952e+02 > plot(bp) > > plot(fs) > lines(confint(bp)) > > > > cleanEx() > nameEx("Grossarl") > ### * Grossarl > > flush(stderr()); flush(stdout()) > > ### Name: Grossarl > ### Title: Marriages, Births and Deaths in Grossarl > ### Aliases: Grossarl > ### Keywords: datasets > > ### ** Examples > > data("Grossarl") > > ## time series of births, deaths, marriages > ########################################### > > with(Grossarl, plot(cbind(deaths, illegitimate + legitimate, marriages), + plot.type = "single", col = grey(c(0.7, 0, 0)), lty = c(1, 1, 3), + lwd = 1.5, ylab = "annual Grossarl series")) > legend("topright", c("deaths", "births", "marriages"), col = grey(c(0.7, 0, 0)), + lty = c(1, 1, 3), bty = "n") > > ## illegitimate births > ###################### > ## lm + MOSUM > plot(Grossarl$fraction) > fm.min <- lm(fraction ~ politics, data = Grossarl) > fm.ext <- lm(fraction ~ politics + morals + nuptiality + marriages, + data = Grossarl) > lines(ts(fitted(fm.min), start = 1700), col = 2) > lines(ts(fitted(fm.ext), start = 1700), col = 4) > mos.min <- efp(fraction ~ politics, data = Grossarl, type = "OLS-MOSUM") > mos.ext <- efp(fraction ~ politics + morals + nuptiality + marriages, + data = Grossarl, type = "OLS-MOSUM") > plot(mos.min) > lines(mos.ext, lty = 2) > > ## dating > bp <- breakpoints(fraction ~ 1, data = Grossarl, h = 0.1) > summary(bp) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = fraction ~ 1, h = 0.1, data = Grossarl) Breakpoints at observation number: m = 1 127 m = 2 55 122 m = 3 55 124 180 m = 4 55 122 157 179 m = 5 54 86 122 157 179 m = 6 35 55 86 122 157 179 m = 7 35 55 80 101 122 157 179 m = 8 35 55 79 99 119 139 159 179 Corresponding to breakdates: m = 1 1826 m = 2 1754 1821 m = 3 1754 1823 1879 m = 4 1754 1821 1856 1878 m = 5 1753 1785 1821 1856 1878 m = 6 1734 1754 1785 1821 1856 1878 m = 7 1734 1754 1779 1800 1821 1856 1878 m = 8 1734 1754 1778 1798 1818 1838 1858 1878 Fit: m 0 1 2 3 4 5 6 RSS 1.1088 0.8756 0.6854 0.6587 0.6279 0.6019 0.5917 BIC -460.8402 -497.4625 -535.8459 -533.1857 -532.1789 -530.0501 -522.8510 m 7 8 RSS 0.5934 0.6084 BIC -511.7017 -496.0924 > ## RSS, BIC, AIC > plot(bp) > plot(0:8, AIC(bp), type = "b") > > ## probably use 5 or 6 breakpoints and compare with > ## coding of the factors as used by us > ## > ## politics 1803 1816 1850 > ## morals 1736 1753 1771 1803 > ## nuptiality 1803 1810 1816 1883 > ## > ## m = 5 1753 1785 1821 1856 1878 > ## m = 6 1734 1754 1785 1821 1856 1878 > ## 6 2 5 1 4 3 > > ## fitted models > coef(bp, breaks = 6) (Intercept) 1700 - 1734 0.16933985 1735 - 1754 0.14078070 1755 - 1785 0.09890276 1786 - 1821 0.05955620 1822 - 1856 0.17441529 1857 - 1878 0.22425604 1879 - 1899 0.15414723 > plot(Grossarl$fraction) > lines(fitted(bp, breaks = 6), col = 2) > lines(ts(fitted(fm.ext), start = 1700), col = 4) > > > ## marriages > ############ > ## lm + MOSUM > plot(Grossarl$marriages) > fm.min <- lm(marriages ~ politics, data = Grossarl) > fm.ext <- lm(marriages ~ politics + morals + nuptiality, data = Grossarl) > lines(ts(fitted(fm.min), start = 1700), col = 2) > lines(ts(fitted(fm.ext), start = 1700), col = 4) > mos.min <- efp(marriages ~ politics, data = Grossarl, type = "OLS-MOSUM") > mos.ext <- efp(marriages ~ politics + morals + nuptiality, data = Grossarl, + type = "OLS-MOSUM") > plot(mos.min) > lines(mos.ext, lty = 2) > > ## dating > bp <- breakpoints(marriages ~ 1, data = Grossarl, h = 0.1) > summary(bp) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = marriages ~ 1, h = 0.1, data = Grossarl) Breakpoints at observation number: m = 1 114 m = 2 39 114 m = 3 39 114 176 m = 4 39 95 115 176 m = 5 39 62 95 115 176 m = 6 39 62 95 115 136 176 m = 7 39 62 95 115 136 156 176 m = 8 21 41 62 95 115 136 156 176 Corresponding to breakdates: m = 1 1813 m = 2 1738 1813 m = 3 1738 1813 1875 m = 4 1738 1794 1814 1875 m = 5 1738 1761 1794 1814 1875 m = 6 1738 1761 1794 1814 1835 1875 m = 7 1738 1761 1794 1814 1835 1855 1875 m = 8 1720 1740 1761 1794 1814 1835 1855 1875 Fit: m 0 1 2 3 4 5 6 7 8 RSS 3832 3059 2863 2723 2671 2634 2626 2626 2645 BIC 1169 1134 1132 1132 1139 1147 1157 1167 1179 > ## RSS, BIC, AIC > plot(bp) > plot(0:8, AIC(bp), type = "b") > > ## probably use 3 or 4 breakpoints and compare with > ## coding of the factors as used by us > ## > ## politics 1803 1816 1850 > ## morals 1736 1753 1771 1803 > ## nuptiality 1803 1810 1816 1883 > ## > ## m = 3 1738 1813 1875 > ## m = 4 1738 1794 1814 1875 > ## 2 4 1 3 > > ## fitted models > coef(bp, breaks = 4) (Intercept) 1700 - 1738 13.487179 1739 - 1794 10.160714 1795 - 1814 12.150000 1815 - 1875 6.885246 1876 - 1899 9.750000 > plot(Grossarl$marriages) > lines(fitted(bp, breaks = 4), col = 2) > lines(ts(fitted(fm.ext), start = 1700), col = 4) > > > > cleanEx() > nameEx("PhillipsCurve") > ### * PhillipsCurve > > flush(stderr()); flush(stdout()) > > ### Name: PhillipsCurve > ### Title: UK Phillips Curve Equation Data > ### Aliases: PhillipsCurve > ### Keywords: datasets > > ### ** Examples > > ## load and plot data > data("PhillipsCurve") > uk <- window(PhillipsCurve, start = 1948) > plot(uk[, "dp"]) > > ## AR(1) inflation model > ## estimate breakpoints > bp.inf <- breakpoints(dp ~ dp1, data = uk, h = 8) > plot(bp.inf) > summary(bp.inf) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = dp ~ dp1, h = 8, data = uk) Breakpoints at observation number: m = 1 20 m = 2 20 28 m = 3 9 20 28 Corresponding to breakdates: m = 1 1967 m = 2 1967 1975 m = 3 1956 1967 1975 Fit: m 0 1 2 3 RSS 0.03068 0.02672 0.01838 0.01786 BIC -162.34174 -156.80265 -160.70385 -150.78479 > > ## fit segmented model with three breaks > fac.inf <- breakfactor(bp.inf, breaks = 2, label = "seg") > fm.inf <- lm(dp ~ 0 + fac.inf/dp1, data = uk) > summary(fm.inf) Call: lm(formula = dp ~ 0 + fac.inf/dp1, data = uk) Residuals: Min 1Q Median 3Q Max -0.046987 -0.014861 -0.003593 0.006286 0.058081 Coefficients: Estimate Std. Error t value Pr(>|t|) fac.infseg1 0.024501 0.011176 2.192 0.0353 * fac.infseg2 -0.000775 0.017853 -0.043 0.9656 fac.infseg3 0.017603 0.015007 1.173 0.2489 fac.infseg1:dp1 0.274012 0.269892 1.015 0.3171 fac.infseg2:dp1 1.343369 0.224521 5.983 9.05e-07 *** fac.infseg3:dp1 0.683410 0.130106 5.253 8.07e-06 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.02325 on 34 degrees of freedom Multiple R-squared: 0.9237, Adjusted R-squared: 0.9103 F-statistic: 68.64 on 6 and 34 DF, p-value: < 2.2e-16 > > ## Results from Table 2 in Bai & Perron (2003): > ## coefficient estimates > coef(bp.inf, breaks = 2) (Intercept) dp1 1948 - 1967 0.0245010729 0.2740125 1968 - 1975 -0.0007750299 1.3433686 1976 - 1987 0.0176032179 0.6834098 > ## corresponding standard errors > sqrt(sapply(vcov(bp.inf, breaks = 2), diag)) 1948 - 1967 1968 - 1975 1976 - 1987 (Intercept) 0.008268814 0.01985539 0.01571339 dp1 0.199691273 0.24969992 0.13622996 > ## breakpoints and confidence intervals > confint(bp.inf, breaks = 2) Confidence intervals for breakpoints of optimal 3-segment partition: Call: confint.breakpointsfull(object = bp.inf, breaks = 2) Breakpoints at observation number: 2.5 % breakpoints 97.5 % 1 18 20 25 2 26 28 34 Corresponding to breakdates: 2.5 % breakpoints 97.5 % 1 1965 1967 1972 2 1973 1975 1981 > > ## Phillips curve equation > ## estimate breakpoints > bp.pc <- breakpoints(dw ~ dp1 + du + u1, data = uk, h = 5, breaks = 5) > ## look at RSS and BIC > plot(bp.pc) > summary(bp.pc) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = dw ~ dp1 + du + u1, h = 5, breaks = 5, data = uk) Breakpoints at observation number: m = 1 26 m = 2 20 28 m = 3 9 25 30 m = 4 11 16 25 30 m = 5 11 16 22 27 32 Corresponding to breakdates: m = 1 1973 m = 2 1967 1975 m = 3 1956 1972 1977 m = 4 1958 1963 1972 1977 m = 5 1958 1963 1969 1974 1979 Fit: m 0 1 2 3 4 5 RSS 3.409e-02 1.690e-02 1.062e-02 7.835e-03 5.183e-03 3.388e-03 BIC -1.508e+02 -1.604e+02 -1.605e+02 -1.542e+02 -1.523e+02 -1.509e+02 > > ## fit segmented model with three breaks > fac.pc <- breakfactor(bp.pc, breaks = 2, label = "seg") > fm.pc <- lm(dw ~ 0 + fac.pc/dp1 + du + u1, data = uk) > summary(fm.pc) Call: lm(formula = dw ~ 0 + fac.pc/dp1 + du + u1, data = uk) Residuals: Min 1Q Median 3Q Max -0.041392 -0.011516 0.000089 0.010036 0.044539 Coefficients: Estimate Std. Error t value Pr(>|t|) fac.pcseg1 0.06574 0.01169 5.623 3.24e-06 *** fac.pcseg2 0.06231 0.01883 3.310 0.00232 ** fac.pcseg3 0.18093 0.05388 3.358 0.00204 ** du -0.14408 0.58218 -0.247 0.80611 u1 -0.87516 0.37274 -2.348 0.02523 * fac.pcseg1:dp1 0.09373 0.24053 0.390 0.69936 fac.pcseg2:dp1 1.23143 0.20498 6.008 1.06e-06 *** fac.pcseg3:dp1 0.01618 0.25667 0.063 0.95013 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.02021 on 32 degrees of freedom Multiple R-squared: 0.9655, Adjusted R-squared: 0.9569 F-statistic: 112 on 8 and 32 DF, p-value: < 2.2e-16 > > ## Results from Table 3 in Bai & Perron (2003): > ## coefficient estimates > coef(fm.pc) fac.pcseg1 fac.pcseg2 fac.pcseg3 du u1 0.06574278 0.06231337 0.18092502 -0.14408073 -0.87515585 fac.pcseg1:dp1 fac.pcseg2:dp1 fac.pcseg3:dp1 0.09372759 1.23143008 0.01617826 > ## corresponding standard errors > sqrt(diag(vcov(fm.pc))) fac.pcseg1 fac.pcseg2 fac.pcseg3 du u1 0.01169149 0.01882668 0.05388166 0.58217571 0.37273955 fac.pcseg1:dp1 fac.pcseg2:dp1 fac.pcseg3:dp1 0.24052539 0.20497973 0.25666903 > ## breakpoints and confidence intervals > confint(bp.pc, breaks = 2, het.err = FALSE) Confidence intervals for breakpoints of optimal 3-segment partition: Call: confint.breakpointsfull(object = bp.pc, breaks = 2, het.err = FALSE) Breakpoints at observation number: 2.5 % breakpoints 97.5 % 1 19 20 21 2 27 28 29 Corresponding to breakdates: 2.5 % breakpoints 97.5 % 1 1966 1967 1968 2 1974 1975 1976 > > > > cleanEx() > nameEx("RealInt") > ### * RealInt > > flush(stderr()); flush(stdout()) > > ### Name: RealInt > ### Title: US Ex-post Real Interest Rate > ### Aliases: RealInt > ### Keywords: datasets > > ### ** Examples > > ## load and plot data > data("RealInt") > plot(RealInt) > > ## estimate breakpoints > bp.ri <- breakpoints(RealInt ~ 1, h = 15) > plot(bp.ri) > summary(bp.ri) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = RealInt ~ 1, h = 15) Breakpoints at observation number: m = 1 79 m = 2 47 79 m = 3 24 47 79 m = 4 24 47 64 79 m = 5 16 31 47 64 79 Corresponding to breakdates: m = 1 1980(3) m = 2 1972(3) 1980(3) m = 3 1966(4) 1972(3) 1980(3) m = 4 1966(4) 1972(3) 1976(4) 1980(3) m = 5 1964(4) 1968(3) 1972(3) 1976(4) 1980(3) Fit: m 0 1 2 3 4 5 RSS 1214.9 645.0 456.0 445.2 444.9 449.6 BIC 555.7 499.8 473.3 480.1 489.3 499.7 > > ## fit segmented model with three breaks > fac.ri <- breakfactor(bp.ri, breaks = 3, label = "seg") > fm.ri <- lm(RealInt ~ 0 + fac.ri) > summary(fm.ri) Call: lm(formula = RealInt ~ 0 + fac.ri) Residuals: Min 1Q Median 3Q Max -4.5157 -1.3674 -0.0578 1.3248 6.0990 Coefficients: Estimate Std. Error t value Pr(>|t|) fac.riseg1 1.8236 0.4329 4.213 5.57e-05 *** fac.riseg2 0.8661 0.4422 1.959 0.053 . fac.riseg3 -1.7961 0.3749 -4.791 5.83e-06 *** fac.riseg4 5.6429 0.4329 13.036 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 2.121 on 99 degrees of freedom Multiple R-squared: 0.6842, Adjusted R-squared: 0.6714 F-statistic: 53.62 on 4 and 99 DF, p-value: < 2.2e-16 > > ## setup kernel HAC estimator > vcov.ri <- function(x, ...) kernHAC(x, kernel = "Quadratic Spectral", + prewhite = 1, approx = "AR(1)", ...) > > ## Results from Table 1 in Bai & Perron (2003): > ## coefficient estimates > coef(bp.ri, breaks = 3) (Intercept) 1961(1) - 1966(4) 1.8236167 1967(1) - 1972(3) 0.8660848 1972(4) - 1980(3) -1.7961384 1980(4) - 1986(3) 5.6428896 > ## corresponding standard errors > sapply(vcov(bp.ri, breaks = 3, vcov = vcov.ri), sqrt) 1961(1) - 1966(4) 1967(1) - 1972(3) 1972(4) - 1980(3) 1980(4) - 1986(3) 0.1857577 0.1499849 0.5026749 0.5887460 > ## breakpoints and confidence intervals > confint(bp.ri, breaks = 3, vcov = vcov.ri) Confidence intervals for breakpoints of optimal 4-segment partition: Call: confint.breakpointsfull(object = bp.ri, breaks = 3, vcov. = vcov.ri) Breakpoints at observation number: 2.5 % breakpoints 97.5 % 1 18 24 35 2 33 47 48 3 77 79 81 Corresponding to breakdates: Warning: Overlapping confidence intervals 2.5 % breakpoints 97.5 % 1 1965(2) 1966(4) 1969(3) 2 1969(1) 1972(3) 1972(4) 3 1980(1) 1980(3) 1981(1) > > ## Visualization > plot(RealInt) > lines(as.vector(time(RealInt)), fitted(fm.ri), col = 4) > lines(confint(bp.ri, breaks = 3, vcov = vcov.ri)) Warning: Overlapping confidence intervals > > > > cleanEx() > nameEx("SP2001") > ### * SP2001 > > flush(stderr()); flush(stdout()) > > ### Name: SP2001 > ### Title: S&P 500 Stock Prices > ### Aliases: SP2001 > ### Keywords: datasets > > ### ** Examples > > ## load and transform data > ## (DAL: Delta Air Lines, LU: Lucent Technologies) > data("SP2001") > stock.prices <- SP2001[, c("DAL", "LU")] > stock.returns <- diff(log(stock.prices)) > > ## price and return series > plot(stock.prices, ylab = c("Delta Air Lines", "Lucent Technologies"), main = "") > plot(stock.returns, ylab = c("Delta Air Lines", "Lucent Technologies"), main = "") > > ## monitoring of DAL series > myborder <- function(k) 1.939*k/28 > x <- as.vector(stock.returns[, "DAL"][1:28]) > dal.cusum <- mefp(x ~ 1, type = "OLS-CUSUM", border = myborder) > dal.mosum <- mefp(x ~ 1, type = "OLS-MOSUM", h = 0.5, period = 4) > x <- as.vector(stock.returns[, "DAL"]) > dal.cusum <- monitor(dal.cusum) Break detected at observation # 29 > dal.mosum <- monitor(dal.mosum) Break detected at observation # 29 > > ## monitoring of LU series > x <- as.vector(stock.returns[, "LU"][1:28]) > lu.cusum <- mefp(x ~ 1, type = "OLS-CUSUM", border = myborder) > lu.mosum <- mefp(x ~ 1, type = "OLS-MOSUM", h = 0.5, period = 4) > x <- as.vector(stock.returns[, "LU"]) > lu.cusum <- monitor(lu.cusum) > lu.mosum <- monitor(lu.mosum) > > ## pretty plotting > ## (needs some work because lm() does not keep "zoo" attributes) > cus.bound <- zoo(c(rep(NA, 27), myborder(28:102)), index(stock.returns)) > mos.bound <- as.vector(boundary(dal.mosum)) > mos.bound <- zoo(c(rep(NA, 27), mos.bound[1], mos.bound), index(stock.returns)) > > ## Lucent Technologies: CUSUM test > plot(zoo(c(lu.cusum$efpprocess, lu.cusum$process), index(stock.prices)), + ylim = c(-1, 1) * coredata(cus.bound)[102], xlab = "Time", ylab = "empirical fluctuation process") > abline(0, 0) > abline(v = as.Date("2001-09-10"), lty = 2) > lines(cus.bound, col = 2) > lines(-cus.bound, col = 2) > > ## Lucent Technologies: MOSUM test > plot(zoo(c(lu.mosum$efpprocess, lu.mosum$process), index(stock.prices)[-(1:14)]), + ylim = c(-1, 1) * coredata(mos.bound)[102], xlab = "Time", ylab = "empirical fluctuation process") > abline(0, 0) > abline(v = as.Date("2001-09-10"), lty = 2) > lines(mos.bound, col = 2) > lines(-mos.bound, col = 2) > > ## Delta Air Lines: CUSUM test > plot(zoo(c(dal.cusum$efpprocess, dal.cusum$process), index(stock.prices)), + ylim = c(-1, 1) * coredata(cus.bound)[102], xlab = "Time", ylab = "empirical fluctuation process") > abline(0, 0) > abline(v = as.Date("2001-09-10"), lty = 2) > lines(cus.bound, col = 2) > lines(-cus.bound, col = 2) > > ## Delta Air Lines: MOSUM test > plot(zoo(c(dal.mosum$efpprocess, dal.mosum$process), index(stock.prices)[-(1:14)]), + ylim = range(dal.mosum$process), xlab = "Time", ylab = "empirical fluctuation process") > abline(0, 0) > abline(v = as.Date("2001-09-10"), lty = 2) > lines(mos.bound, col = 2) > lines(-mos.bound, col = 2) > > > > cleanEx() > nameEx("USIncExp") > ### * USIncExp > > flush(stderr()); flush(stdout()) > > ### Name: USIncExp > ### Title: Income and Expenditures in the US > ### Aliases: USIncExp > ### Keywords: datasets > > ### ** Examples > > ## These example are presented in the vignette distributed with this > ## package, the code was generated by Stangle("strucchange-intro.Rnw") > > ################################################### > ### chunk number 1: data > ################################################### > library("strucchange") > data("USIncExp") > plot(USIncExp, plot.type = "single", col = 1:2, ylab = "billion US$") > legend(1960, max(USIncExp), c("income", "expenditures"), + lty = c(1,1), col = 1:2, bty = "n") > > > ################################################### > ### chunk number 2: subset > ################################################### > library("strucchange") > data("USIncExp") > USIncExp2 <- window(USIncExp, start = c(1985,12)) > > > ################################################### > ### chunk number 3: ecm-setup > ################################################### > coint.res <- residuals(lm(expenditure ~ income, data = USIncExp2)) > coint.res <- lag(ts(coint.res, start = c(1985,12), freq = 12), k = -1) > USIncExp2 <- cbind(USIncExp2, diff(USIncExp2), coint.res) > USIncExp2 <- window(USIncExp2, start = c(1986,1), end = c(2001,2)) > colnames(USIncExp2) <- c("income", "expenditure", "diff.income", + "diff.expenditure", "coint.res") > ecm.model <- diff.expenditure ~ coint.res + diff.income > > > ################################################### > ### chunk number 4: ts-used > ################################################### > plot(USIncExp2[,3:5], main = "") > > > ################################################### > ### chunk number 5: efp > ################################################### > ocus <- efp(ecm.model, type="OLS-CUSUM", data=USIncExp2) > me <- efp(ecm.model, type="ME", data=USIncExp2, h=0.2) > > > ################################################### > ### chunk number 6: efp-boundary > ################################################### > bound.ocus <- boundary(ocus, alpha=0.05) > > > ################################################### > ### chunk number 7: OLS-CUSUM > ################################################### > plot(ocus) > > > ################################################### > ### chunk number 8: efp-boundary2 > ################################################### > plot(ocus, boundary = FALSE) > lines(bound.ocus, col = 4) > lines(-bound.ocus, col = 4) > > > ################################################### > ### chunk number 9: ME-null > ################################################### > plot(me, functional = NULL) > > > ################################################### > ### chunk number 10: efp-sctest > ################################################### > sctest(ocus) OLS-based CUSUM test data: ocus S0 = 1.5511, p-value = 0.01626 > > > ################################################### > ### chunk number 11: efp-sctest2 > ################################################### > sctest(ecm.model, type="OLS-CUSUM", data=USIncExp2) OLS-based CUSUM test data: ecm.model S0 = 1.5511, p-value = 0.01626 > > > ################################################### > ### chunk number 12: Fstats > ################################################### > fs <- Fstats(ecm.model, from = c(1990, 1), to = c(1999,6), data = USIncExp2) > > > ################################################### > ### chunk number 13: Fstats-plot > ################################################### > plot(fs) > > > ################################################### > ### chunk number 14: pval-plot > ################################################### > plot(fs, pval=TRUE) > > > ################################################### > ### chunk number 15: aveF-plot > ################################################### > plot(fs, aveF=TRUE) > > > ################################################### > ### chunk number 16: Fstats-sctest > ################################################### > sctest(fs, type="expF") expF test data: fs exp.F = 8.9955, p-value = 0.001311 > > > ################################################### > ### chunk number 17: Fstats-sctest2 > ################################################### > sctest(ecm.model, type = "expF", from = 49, to = 162, data = USIncExp2) expF test data: ecm.model exp.F = 8.9955, p-value = 0.001311 > > > ################################################### > ### chunk number 18: mefp > ################################################### > USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1989,12)) > me.mefp <- mefp(ecm.model, type = "ME", data = USIncExp3, alpha = 0.05) > > > ################################################### > ### chunk number 19: monitor1 > ################################################### > USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1990,12)) > me.mefp <- monitor(me.mefp) > > > ################################################### > ### chunk number 20: monitor2 > ################################################### > USIncExp3 <- window(USIncExp2, start = c(1986, 1)) > me.mefp <- monitor(me.mefp) Break detected at observation # 72 > me.mefp Monitoring with ME test (moving estimates test) Initial call: mefp.formula(formula = ecm.model, type = "ME", data = USIncExp3, alpha = 0.05) Last call: monitor(obj = me.mefp) Significance level : 0.05 Critical value : 3.109524 History size : 48 Last point evaluated : 182 Structural break at : 72 Parameter estimate on history : (Intercept) coint.res diff.income 18.9299679 -0.3893141 0.3156597 Last parameter estimate : (Intercept) coint.res diff.income 27.94869106 0.00983451 0.13314662 > > > ################################################### > ### chunk number 21: monitor-plot > ################################################### > plot(me.mefp) > > > ################################################### > ### chunk number 22: mefp2 > ################################################### > USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1989,12)) > me.efp <- efp(ecm.model, type = "ME", data = USIncExp3, h = 0.5) > me.mefp <- mefp(me.efp, alpha=0.05) > > > ################################################### > ### chunk number 23: monitor3 > ################################################### > USIncExp3 <- window(USIncExp2, start = c(1986, 1)) > me.mefp <- monitor(me.mefp) Break detected at observation # 70 > > > ################################################### > ### chunk number 24: monitor-plot2 > ################################################### > plot(me.mefp) > > > > > cleanEx() > nameEx("boundary.Fstats") > ### * boundary.Fstats > > flush(stderr()); flush(stdout()) > > ### Name: boundary.Fstats > ### Title: Boundary for F Statistics > ### Aliases: boundary.Fstats > ### Keywords: regression > > ### ** Examples > > ## Load dataset "nhtemp" with average yearly temperatures in New Haven > data("nhtemp") > ## plot the data > plot(nhtemp) > > ## test the model null hypothesis that the average temperature remains > ## constant over the years for potential break points between 1941 > ## (corresponds to from = 0.5) and 1962 (corresponds to to = 0.85) > ## compute F statistics > fs <- Fstats(nhtemp ~ 1, from = 0.5, to = 0.85) > ## plot the p values without boundary > plot(fs, pval = TRUE, alpha = 0.01) > ## add the boundary in another colour > lines(boundary(fs, pval = TRUE, alpha = 0.01), col = 2) > > > > cleanEx() > nameEx("boundary.efp") > ### * boundary.efp > > flush(stderr()); flush(stdout()) > > ### Name: boundary.efp > ### Title: Boundary for Empirical Fluctuation Processes > ### Aliases: boundary.efp > ### Keywords: regression > > ### ** Examples > > ## Load dataset "nhtemp" with average yearly temperatures in New Haven > data("nhtemp") > ## plot the data > plot(nhtemp) > > ## test the model null hypothesis that the average temperature remains constant > ## over the years > ## compute OLS-CUSUM fluctuation process > temp.cus <- efp(nhtemp ~ 1, type = "OLS-CUSUM") > ## plot the process without boundaries > plot(temp.cus, alpha = 0.01, boundary = FALSE) > ## add the boundaries in another colour > bound <- boundary(temp.cus, alpha = 0.01) > lines(bound, col=4) > lines(-bound, col=4) > > > > cleanEx() > nameEx("boundary.mefp") > ### * boundary.mefp > > flush(stderr()); flush(stdout()) > > ### Name: boundary.mefp > ### Title: Boundary Function for Monitoring of Structural Changes > ### Aliases: boundary.mefp > ### Keywords: regression > > ### ** Examples > > df1 <- data.frame(y=rnorm(300)) > df1[150:300,"y"] <- df1[150:300,"y"]+1 > me1 <- mefp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1, + alpha=0.05) > me2 <- monitor(me1, data=df1) Break detected at observation # 183 > > plot(me2, boundary=FALSE) > lines(boundary(me2), col="green", lty="44") > > > > cleanEx() > nameEx("breakdates") > ### * breakdates > > flush(stderr()); flush(stdout()) > > ### Name: breakdates > ### Title: Breakdates Corresponding to Breakpoints > ### Aliases: breakdates breakdates.breakpoints > ### breakdates.confint.breakpoints > ### Keywords: regression > > ### ** Examples > > ## Nile data with one breakpoint: the annual flows drop in 1898 > ## because the first Ashwan dam was built > data("Nile") > plot(Nile) > > bp.nile <- breakpoints(Nile ~ 1) > summary(bp.nile) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = Nile ~ 1) Breakpoints at observation number: m = 1 28 m = 2 28 83 m = 3 28 68 83 m = 4 28 45 68 83 m = 5 15 30 45 68 83 Corresponding to breakdates: m = 1 1898 m = 2 1898 1953 m = 3 1898 1938 1953 m = 4 1898 1915 1938 1953 m = 5 1885 1900 1915 1938 1953 Fit: m 0 1 2 3 4 5 RSS 2835157 1597457 1552924 1538097 1507888 1659994 BIC 1318 1270 1276 1285 1292 1311 > plot(bp.nile) > > ## compute breakdates corresponding to the > ## breakpoints of minimum BIC segmentation > breakdates(bp.nile) [1] 1898 > > ## confidence intervals > ci.nile <- confint(bp.nile) > breakdates(ci.nile) 2.5 % breakpoints 97.5 % 1 1895 1898 1902 > ci.nile Confidence intervals for breakpoints of optimal 2-segment partition: Call: confint.breakpointsfull(object = bp.nile) Breakpoints at observation number: 2.5 % breakpoints 97.5 % 1 25 28 32 Corresponding to breakdates: 2.5 % breakpoints 97.5 % 1 1895 1898 1902 > > plot(Nile) > lines(ci.nile) > > > > cleanEx() > nameEx("breakfactor") > ### * breakfactor > > flush(stderr()); flush(stdout()) > > ### Name: breakfactor > ### Title: Factor Coding of Segmentations > ### Aliases: breakfactor > ### Keywords: regression > > ### ** Examples > > ## Nile data with one breakpoint: the annual flows drop in 1898 > ## because the first Ashwan dam was built > data("Nile") > plot(Nile) > > ## compute breakpoints > bp.nile <- breakpoints(Nile ~ 1) > > ## fit and visualize segmented and unsegmented model > fm0 <- lm(Nile ~ 1) > fm1 <- lm(Nile ~ breakfactor(bp.nile, breaks = 1)) > > lines(fitted(fm0), col = 3) > lines(fitted(fm1), col = 4) > lines(bp.nile, breaks = 1) > > > > cleanEx() > nameEx("breakpoints") > ### * breakpoints > > flush(stderr()); flush(stdout()) > > ### Name: breakpoints > ### Title: Dating Breaks > ### Aliases: breakpoints breakpoints.formula breakpoints.breakpointsfull > ### breakpoints.Fstats summary.breakpoints summary.breakpointsfull > ### plot.breakpointsfull plot.summary.breakpointsfull print.breakpoints > ### print.summary.breakpointsfull lines.breakpoints coef.breakpointsfull > ### vcov.breakpointsfull fitted.breakpointsfull residuals.breakpointsfull > ### df.residual.breakpointsfull > ### Keywords: regression > > ### ** Examples > > ## Nile data with one breakpoint: the annual flows drop in 1898 > ## because the first Ashwan dam was built > data("Nile") > plot(Nile) > > ## F statistics indicate one breakpoint > fs.nile <- Fstats(Nile ~ 1) > plot(fs.nile) > breakpoints(fs.nile) Optimal 2-segment partition: Call: breakpoints.Fstats(obj = fs.nile) Breakpoints at observation number: 28 Corresponding to breakdates: 1898 > lines(breakpoints(fs.nile)) > > ## or > bp.nile <- breakpoints(Nile ~ 1) > summary(bp.nile) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = Nile ~ 1) Breakpoints at observation number: m = 1 28 m = 2 28 83 m = 3 28 68 83 m = 4 28 45 68 83 m = 5 15 30 45 68 83 Corresponding to breakdates: m = 1 1898 m = 2 1898 1953 m = 3 1898 1938 1953 m = 4 1898 1915 1938 1953 m = 5 1885 1900 1915 1938 1953 Fit: m 0 1 2 3 4 5 RSS 2835157 1597457 1552924 1538097 1507888 1659994 BIC 1318 1270 1276 1285 1292 1311 > > ## the BIC also chooses one breakpoint > plot(bp.nile) > breakpoints(bp.nile) Optimal 2-segment partition: Call: breakpoints.breakpointsfull(obj = bp.nile) Breakpoints at observation number: 28 Corresponding to breakdates: 1898 > > ## fit null hypothesis model and model with 1 breakpoint > fm0 <- lm(Nile ~ 1) > fm1 <- lm(Nile ~ breakfactor(bp.nile, breaks = 1)) > plot(Nile) > lines(ts(fitted(fm0), start = 1871), col = 3) > lines(ts(fitted(fm1), start = 1871), col = 4) > lines(bp.nile) > > ## confidence interval > ci.nile <- confint(bp.nile) > ci.nile Confidence intervals for breakpoints of optimal 2-segment partition: Call: confint.breakpointsfull(object = bp.nile) Breakpoints at observation number: 2.5 % breakpoints 97.5 % 1 25 28 32 Corresponding to breakdates: 2.5 % breakpoints 97.5 % 1 1895 1898 1902 > lines(ci.nile) > > > ## UK Seatbelt data: a SARIMA(1,0,0)(1,0,0)_12 model > ## (fitted by OLS) is used and reveals (at least) two > ## breakpoints - one in 1973 associated with the oil crisis and > ## one in 1983 due to the introduction of compulsory > ## wearing of seatbelts in the UK. > data("UKDriverDeaths") > seatbelt <- log10(UKDriverDeaths) > seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) > colnames(seatbelt) <- c("y", "ylag1", "ylag12") > seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) > plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) > > ## testing > re.seat <- efp(y ~ ylag1 + ylag12, data = seatbelt, type = "RE") > plot(re.seat) > > ## dating > bp.seat <- breakpoints(y ~ ylag1 + ylag12, data = seatbelt, h = 0.1) > summary(bp.seat) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = y ~ ylag1 + ylag12, h = 0.1, data = seatbelt) Breakpoints at observation number: m = 1 46 m = 2 46 157 m = 3 46 70 157 m = 4 46 70 108 157 m = 5 46 70 120 141 160 m = 6 46 70 89 108 141 160 m = 7 46 70 89 107 125 144 162 m = 8 18 46 70 89 107 125 144 162 Corresponding to breakdates: m = 1 1973(10) m = 2 1973(10) 1983(1) m = 3 1973(10) 1975(10) 1983(1) m = 4 1973(10) 1975(10) 1978(12) 1983(1) m = 5 1973(10) 1975(10) 1979(12) 1981(9) 1983(4) m = 6 1973(10) 1975(10) 1977(5) 1978(12) 1981(9) 1983(4) m = 7 1973(10) 1975(10) 1977(5) 1978(11) 1980(5) 1981(12) 1983(6) m = 8 1971(6) 1973(10) 1975(10) 1977(5) 1978(11) 1980(5) 1981(12) 1983(6) Fit: m 0 1 2 3 4 5 6 RSS 0.3297 0.2967 0.2676 0.2438 0.2395 0.2317 0.2258 BIC -602.8611 -601.0539 -598.9042 -594.8774 -577.2905 -562.4880 -546.3632 m 7 8 RSS 0.2244 0.2231 BIC -526.7295 -506.9886 > lines(bp.seat, breaks = 2) > > ## minimum BIC partition > plot(bp.seat) > breakpoints(bp.seat) Optimal 1-segment partition: Call: breakpoints.breakpointsfull(obj = bp.seat) Breakpoints at observation number: NA Corresponding to breakdates: NA > ## the BIC would choose 0 breakpoints although the RE and supF test > ## clearly reject the hypothesis of structural stability. Bai & > ## Perron (2003) report that the BIC has problems in dynamic regressions. > ## due to the shape of the RE process of the F statistics choose two > ## breakpoints and fit corresponding models > bp.seat2 <- breakpoints(bp.seat, breaks = 2) > fm0 <- lm(y ~ ylag1 + ylag12, data = seatbelt) > fm1 <- lm(y ~ breakfactor(bp.seat2)/(ylag1 + ylag12) - 1, data = seatbelt) > > ## plot > plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) > time.seat <- as.vector(time(seatbelt)) > lines(time.seat, fitted(fm0), col = 3) > lines(time.seat, fitted(fm1), col = 4) > lines(bp.seat2) > > ## confidence intervals > ci.seat2 <- confint(bp.seat, breaks = 2) > ci.seat2 Confidence intervals for breakpoints of optimal 3-segment partition: Call: confint.breakpointsfull(object = bp.seat, breaks = 2) Breakpoints at observation number: 2.5 % breakpoints 97.5 % 1 33 46 56 2 144 157 171 Corresponding to breakdates: 2.5 % breakpoints 97.5 % 1 1972(9) 1973(10) 1974(8) 2 1981(12) 1983(1) 1984(3) > lines(ci.seat2) > > > > cleanEx() > nameEx("catL2BB") > ### * catL2BB > > flush(stderr()); flush(stdout()) > > ### Name: catL2BB > ### Title: Generators for efpFunctionals along Categorical Variables > ### Aliases: catL2BB ordL2BB ordwmax > ### Keywords: regression > > ### ** Examples > > ## artificial data > set.seed(1) > d <- data.frame( + x = runif(200, -1, 1), + z = factor(rep(1:4, each = 50)), + err = rnorm(200) + ) > d$y <- rep(c(0.5, -0.5), c(150, 50)) * d$x + d$err > > ## empirical fluctuation process > scus <- gefp(y ~ x, data = d, fit = lm, order.by = ~ z) > > ## chi-squared-type test (unordered LM-type test) > LMuo <- catL2BB(scus) > plot(scus, functional = LMuo) > sctest(scus, functional = LMuo) M-fluctuation test data: scus f(efp) = 12.375, p-value = 0.05411 > > ## ordinal maxLM test (with few replications only to save time) > maxLMo <- ordL2BB(scus, nrep = 10000) > plot(scus, functional = maxLMo) > sctest(scus, functional = maxLMo) M-fluctuation test data: scus f(efp) = 9.0937, p-value = 0.03173 > > ## ordinal weighted double maximum test > WDM <- ordwmax(scus) > plot(scus, functional = WDM) > sctest(scus, functional = WDM) M-fluctuation test data: scus f(efp) = 3.001, p-value = 0.01498 > > > > cleanEx() > nameEx("confint.breakpointsfull") > ### * confint.breakpointsfull > > flush(stderr()); flush(stdout()) > > ### Name: confint.breakpointsfull > ### Title: Confidence Intervals for Breakpoints > ### Aliases: confint.breakpointsfull lines.confint.breakpoints > ### print.confint.breakpoints > ### Keywords: regression > > ### ** Examples > > ## Nile data with one breakpoint: the annual flows drop in 1898 > ## because the first Ashwan dam was built > data("Nile") > plot(Nile) > > ## dating breaks > bp.nile <- breakpoints(Nile ~ 1) > ci.nile <- confint(bp.nile, breaks = 1) > lines(ci.nile) > > > > cleanEx() > nameEx("durab") > ### * durab > > flush(stderr()); flush(stdout()) > > ### Name: durab > ### Title: US Labor Productivity > ### Aliases: durab > ### Keywords: datasets > > ### ** Examples > > data("durab") > ## use AR(1) model as in Hansen (2001) and Zeileis et al. (2005) > durab.model <- y ~ lag > > ## historical tests > ## OLS-based CUSUM process > ols <- efp(durab.model, data = durab, type = "OLS-CUSUM") > plot(ols) > ## F statistics > fs <- Fstats(durab.model, data = durab, from = 0.1) > plot(fs) > > ## F statistics based on heteroskadisticy-consistent covariance matrix > fsHC <- Fstats(durab.model, data = durab, from = 0.1, + vcov = function(x, ...) vcovHC(x, type = "HC", ...)) > plot(fsHC) > > ## monitoring > Durab <- window(durab, start=1964, end = c(1979, 12)) > ols.efp <- efp(durab.model, type = "OLS-CUSUM", data = Durab) > newborder <- function(k) 1.723 * k/192 > ols.mefp <- mefp(ols.efp, period=2) > ols.mefp2 <- mefp(ols.efp, border=newborder) > Durab <- window(durab, start=1964) > ols.mon <- monitor(ols.mefp) Break detected at observation # 437 > ols.mon2 <- monitor(ols.mefp2) Break detected at observation # 416 > plot(ols.mon) > lines(boundary(ols.mon2), col = 2) > ## Note: critical value for linear boundary taken from Table III > ## in Zeileis et al. 2005: (1.568 + 1.896)/2 = 1.732 is a linear > ## interpolation between the values for T = 2 and T = 3 at > ## alpha = 0.05. A typo switched 1.732 to 1.723. > > ## dating > bp <- breakpoints(durab.model, data = durab) > summary(bp) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = durab.model, data = durab) Breakpoints at observation number: m = 1 418 m = 2 221 530 m = 3 114 225 530 m = 4 114 221 418 531 m = 5 114 221 319 418 531 Corresponding to breakdates: m = 1 1981(12) m = 2 1965(7) 1991(4) m = 3 1956(8) 1965(11) 1991(4) m = 4 1956(8) 1965(7) 1981(12) 1991(5) m = 5 1956(8) 1965(7) 1973(9) 1981(12) 1991(5) Fit: m 0 1 2 3 4 5 RSS 5.586e-02 5.431e-02 5.325e-02 5.220e-02 5.171e-02 5.157e-02 BIC -4.221e+03 -4.220e+03 -4.213e+03 -4.207e+03 -4.194e+03 -4.176e+03 > plot(summary(bp)) > > plot(ols) > lines(breakpoints(bp, breaks = 1), col = 3) > lines(breakpoints(bp, breaks = 2), col = 4) > plot(fs) > lines(breakpoints(bp, breaks = 1), col = 3) > lines(breakpoints(bp, breaks = 2), col = 4) > > > > cleanEx() > nameEx("efp") > ### * efp > > flush(stderr()); flush(stdout()) > > ### Name: efp > ### Title: Empirical Fluctuation Processes > ### Aliases: efp print.efp > ### Keywords: regression > > ### ** Examples > > ## Nile data with one breakpoint: the annual flows drop in 1898 > ## because the first Ashwan dam was built > data("Nile") > plot(Nile) > > ## test the null hypothesis that the annual flow remains constant > ## over the years > ## compute OLS-based CUSUM process and plot > ## with standard and alternative boundaries > ocus.nile <- efp(Nile ~ 1, type = "OLS-CUSUM") > plot(ocus.nile) > plot(ocus.nile, alpha = 0.01, alt.boundary = TRUE) > ## calculate corresponding test statistic > sctest(ocus.nile) OLS-based CUSUM test data: ocus.nile S0 = 2.9518, p-value = 5.409e-08 > > ## UK Seatbelt data: a SARIMA(1,0,0)(1,0,0)_12 model > ## (fitted by OLS) is used and reveals (at least) two > ## breakpoints - one in 1973 associated with the oil crisis and > ## one in 1983 due to the introduction of compulsory > ## wearing of seatbelts in the UK. > data("UKDriverDeaths") > seatbelt <- log10(UKDriverDeaths) > seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) > colnames(seatbelt) <- c("y", "ylag1", "ylag12") > seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) > plot(seatbelt[,"y"], ylab = expression(log[10](casualties))) > > ## use RE process > re.seat <- efp(y ~ ylag1 + ylag12, data = seatbelt, type = "RE") > plot(re.seat) > plot(re.seat, functional = NULL) > sctest(re.seat) RE test (recursive estimates test) data: re.seat RE = 1.6311, p-value = 0.02904 > > > > cleanEx() > nameEx("efpFunctional") > ### * efpFunctional > > flush(stderr()); flush(stdout()) > > ### Name: efpFunctional > ### Title: Functionals for Fluctuation Processes > ### Aliases: efpFunctional simulateBMDist maxBM maxBB maxBMI maxBBI maxL2BB > ### meanL2BB rangeBM rangeBB rangeBMI rangeBBI > ### Keywords: regression > > ### ** Examples > > > data("BostonHomicide") > gcus <- gefp(homicides ~ 1, family = poisson, vcov = kernHAC, + data = BostonHomicide) > plot(gcus, functional = meanL2BB) > gcus Generalized Empirical M-Fluctuation Process Call: gefp(homicides ~ 1, family = poisson, vcov = kernHAC, data = BostonHomicide) Fitted model: Call: fit(formula = ..1, family = ..2, data = data) Coefficients: (Intercept) 1.017 Degrees of Freedom: 76 Total (i.e. Null); 76 Residual Null Deviance: 115.6 Residual Deviance: 115.6 AIC: 316.5 > sctest(gcus, functional = meanL2BB) M-fluctuation test data: gcus f(efp) = 0.93375, p-value = 0.005 > > y <- rnorm(1000) > x1 <- runif(1000) > x2 <- runif(1000) > > ## supWald statistic computed by Fstats() > fs <- Fstats(y ~ x1 + x2, from = 0.1) > plot(fs) > sctest(fs) supF test data: fs sup.F = 12.252, p-value = 0.1161 > > ## compare with supLM statistic > scus <- gefp(y ~ x1 + x2, fit = lm) > plot(scus, functional = supLM(0.1)) > sctest(scus, functional = supLM(0.1)) M-fluctuation test data: scus f(efp) = 12.258, p-value = 0.1158 > > ## seatbelt data > data("UKDriverDeaths") > seatbelt <- log10(UKDriverDeaths) > seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) > colnames(seatbelt) <- c("y", "ylag1", "ylag12") > seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) > > scus.seat <- gefp(y ~ ylag1 + ylag12, data = seatbelt) > > ## double maximum test > plot(scus.seat) > ## range test > plot(scus.seat, functional = rangeBB) > ## Cramer-von Mises statistic (Nyblom-Hansen test) > plot(scus.seat, functional = meanL2BB) > ## supLM test > plot(scus.seat, functional = supLM(0.1)) > > > > cleanEx() > nameEx("gefp") > ### * gefp > > flush(stderr()); flush(stdout()) > > ### Name: gefp > ### Title: Generalized Empirical M-Fluctuation Processes > ### Aliases: gefp print.gefp sctest.gefp plot.gefp time.gefp print.gefp > ### Keywords: regression > > ### ** Examples > > data("BostonHomicide") > gcus <- gefp(homicides ~ 1, family = poisson, vcov = kernHAC, + data = BostonHomicide) > plot(gcus, aggregate = FALSE) > gcus Generalized Empirical M-Fluctuation Process Call: gefp(homicides ~ 1, family = poisson, vcov = kernHAC, data = BostonHomicide) Fitted model: Call: fit(formula = ..1, family = ..2, data = data) Coefficients: (Intercept) 1.017 Degrees of Freedom: 76 Total (i.e. Null); 76 Residual Null Deviance: 115.6 Residual Deviance: 115.6 AIC: 316.5 > sctest(gcus) M-fluctuation test data: gcus f(efp) = 1.669, p-value = 0.007613 > > > > cleanEx() > nameEx("logLik.breakpoints") > ### * logLik.breakpoints > > flush(stderr()); flush(stdout()) > > ### Name: logLik.breakpoints > ### Title: Log Likelihood and Information Criteria for Breakpoints > ### Aliases: logLik.breakpoints logLik.breakpointsfull AIC.breakpointsfull > ### Keywords: regression > > ### ** Examples > > ## Nile data with one breakpoint: the annual flows drop in 1898 > ## because the first Ashwan dam was built > data("Nile") > plot(Nile) > > bp.nile <- breakpoints(Nile ~ 1) > summary(bp.nile) Optimal (m+1)-segment partition: Call: breakpoints.formula(formula = Nile ~ 1) Breakpoints at observation number: m = 1 28 m = 2 28 83 m = 3 28 68 83 m = 4 28 45 68 83 m = 5 15 30 45 68 83 Corresponding to breakdates: m = 1 1898 m = 2 1898 1953 m = 3 1898 1938 1953 m = 4 1898 1915 1938 1953 m = 5 1885 1900 1915 1938 1953 Fit: m 0 1 2 3 4 5 RSS 2835157 1597457 1552924 1538097 1507888 1659994 BIC 1318 1270 1276 1285 1292 1311 > plot(bp.nile) > > ## BIC of partitions with0 to 5 breakpoints > plot(0:5, AIC(bp.nile, k = log(bp.nile$nobs)), type = "b") > ## AIC > plot(0:5, AIC(bp.nile), type = "b") > > ## BIC, AIC, log likelihood of a single partition > bp.nile1 <- breakpoints(bp.nile, breaks = 1) > AIC(bp.nile1, k = log(bp.nile1$nobs)) [1] 1270.084 > AIC(bp.nile1) [1] 1259.663 > logLik(bp.nile1) 'log Lik.' -625.8315 (df=4) > > > > cleanEx() > nameEx("mefp") > ### * mefp > > flush(stderr()); flush(stdout()) > > ### Name: mefp > ### Title: Monitoring of Empirical Fluctuation Processes > ### Aliases: mefp mefp.formula mefp.efp print.mefp monitor > ### Keywords: regression > > ### ** Examples > > df1 <- data.frame(y=rnorm(300)) > df1[150:300,"y"] <- df1[150:300,"y"]+1 > > ## use the first 50 observations as history period > e1 <- efp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1) > me1 <- mefp(e1, alpha=0.05) > > ## the same in one function call > me1 <- mefp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1, + alpha=0.05) > > ## monitor the 50 next observations > me2 <- monitor(me1, data=df1[1:100,,drop=FALSE]) > plot(me2) > > # and now monitor on all data > me3 <- monitor(me2, data=df1) Break detected at observation # 183 > plot(me3) > > > ## Load dataset "USIncExp" with income and expenditure in the US > ## and choose a suitable subset for the history period > data("USIncExp") > USIncExp3 <- window(USIncExp, start=c(1969,1), end=c(1971,12)) > ## initialize the monitoring with the formula interface > me.mefp <- mefp(expenditure~income, type="ME", rescale=TRUE, + data=USIncExp3, alpha=0.05) > > ## monitor the new observations for the year 1972 > USIncExp3 <- window(USIncExp, start=c(1969,1), end=c(1972,12)) > me.mefp <- monitor(me.mefp) > > ## monitor the new data for the years 1973-1976 > USIncExp3 <- window(USIncExp, start=c(1969,1), end=c(1976,12)) > me.mefp <- monitor(me.mefp) Break detected at observation # 58 > plot(me.mefp, functional = NULL) > > > > cleanEx() > nameEx("plot.Fstats") > ### * plot.Fstats > > flush(stderr()); flush(stdout()) > > ### Name: plot.Fstats > ### Title: Plot F Statistics > ### Aliases: plot.Fstats lines.Fstats > ### Keywords: hplot > > ### ** Examples > > ## Load dataset "nhtemp" with average yearly temperatures in New Haven > data("nhtemp") > ## plot the data > plot(nhtemp) > > ## test the model null hypothesis that the average temperature remains > ## constant over the years for potential break points between 1941 > ## (corresponds to from = 0.5) and 1962 (corresponds to to = 0.85) > ## compute F statistics > fs <- Fstats(nhtemp ~ 1, from = 0.5, to = 0.85) > ## plot the F statistics > plot(fs, alpha = 0.01) > ## and the corresponding p values > plot(fs, pval = TRUE, alpha = 0.01) > ## perform the aveF test > sctest(fs, type = "aveF") aveF test data: fs ave.F = 10.81, p-value = 2.059e-06 > > > > cleanEx() > nameEx("plot.efp") > ### * plot.efp > > flush(stderr()); flush(stdout()) > > ### Name: plot.efp > ### Title: Plot Empirical Fluctuation Process > ### Aliases: plot.efp lines.efp > ### Keywords: hplot > > ### ** Examples > > ## Load dataset "nhtemp" with average yearly temperatures in New Haven > data("nhtemp") > ## plot the data > plot(nhtemp) > > ## test the model null hypothesis that the average temperature remains > ## constant over the years > ## compute Rec-CUSUM fluctuation process > temp.cus <- efp(nhtemp ~ 1) > ## plot the process > plot(temp.cus, alpha = 0.01) > ## and calculate the test statistic > sctest(temp.cus) Recursive CUSUM test data: temp.cus S = 1.2724, p-value = 0.002902 > > ## compute (recursive estimates) fluctuation process > ## with an additional linear trend regressor > lin.trend <- 1:60 > temp.me <- efp(nhtemp ~ lin.trend, type = "fluctuation") > ## plot the bivariate process > plot(temp.me, functional = NULL) > ## and perform the corresponding test > sctest(temp.me) RE test (recursive estimates test) data: temp.me RE = 1.4938, p-value = 0.04558 > > > > cleanEx() > nameEx("plot.mefp") > ### * plot.mefp > > flush(stderr()); flush(stdout()) > > ### Name: plot.mefp > ### Title: Plot Methods for mefp Objects > ### Aliases: plot.mefp lines.mefp > ### Keywords: hplot > > ### ** Examples > > df1 <- data.frame(y=rnorm(300)) > df1[150:300,"y"] <- df1[150:300,"y"]+1 > me1 <- mefp(y~1, data=df1[1:50,,drop=FALSE], type="ME", h=1, + alpha=0.05) > me2 <- monitor(me1, data=df1) Break detected at observation # 183 > > plot(me2) > > > > cleanEx() > nameEx("recresid") > ### * recresid > > flush(stderr()); flush(stdout()) > > ### Name: recresid > ### Title: Recursive Residuals > ### Aliases: recresid recresid.default recresid.formula recresid.lm > ### Keywords: regression > > ### ** Examples > > x <- rnorm(100) + rep(c(0, 2), each = 50) > rr <- recresid(x ~ 1) > plot(cumsum(rr), type = "l") > > plot(efp(x ~ 1, type = "Rec-CUSUM")) > > > > cleanEx() > nameEx("root.matrix") > ### * root.matrix > > flush(stderr()); flush(stdout()) > > ### Name: root.matrix > ### Title: Root of a Matrix > ### Aliases: root.matrix > ### Keywords: algebra > > ### ** Examples > > X <- matrix(c(1,2,2,8), ncol=2) > test <- root.matrix(X) > ## control results > X [,1] [,2] [1,] 1 2 [2,] 2 8 > test %*% test [,1] [,2] [1,] 1 2 [2,] 2 8 > > > > cleanEx() > nameEx("scPublications") > ### * scPublications > > flush(stderr()); flush(stdout()) > > ### Name: scPublications > ### Title: Structural Change Publications > ### Aliases: scPublications > ### Keywords: datasets > > ### ** Examples > > ## construct time series: > ## number of sc publications in econometrics/statistics > data("scPublications") > > ## select years from 1987 and > ## `most important' journals > pub <- scPublications > pub <- subset(pub, year > 1986) > tab1 <- table(pub$journal) > nam1 <- names(tab1)[as.vector(tab1) > 9] ## at least 10 papers > tab2 <- sapply(levels(pub$journal), function(x) min(subset(pub, journal == x)$year)) > nam2 <- names(tab2)[as.vector(tab2) < 1991] ## started at least in 1990 > nam <- nam1[nam1 %in% nam2] > pub <- subset(pub, as.character(journal) %in% nam) > pub$journal <- factor(pub$journal) > pub_data <- pub > > ## generate time series > pub <- with(pub, tapply(type, year, table)) > pub <- zoo(t(sapply(pub, cbind)), 1987:2006) > colnames(pub) <- levels(pub_data$type) > > ## visualize > plot(pub, ylim = c(0, 35)) > > > > cleanEx() > nameEx("sctest.Fstats") > ### * sctest.Fstats > > flush(stderr()); flush(stdout()) > > ### Name: sctest.Fstats > ### Title: supF-, aveF- and expF-Test > ### Aliases: sctest.Fstats > ### Keywords: htest > > ### ** Examples > > ## Load dataset "nhtemp" with average yearly temperatures in New Haven > data(nhtemp) > ## plot the data > plot(nhtemp) > > ## test the model null hypothesis that the average temperature remains > ## constant over the years for potential break points between 1941 > ## (corresponds to from = 0.5) and 1962 (corresponds to to = 0.85) > ## compute F statistics > fs <- Fstats(nhtemp ~ 1, from = 0.5, to = 0.85) > ## plot the F statistics > plot(fs, alpha = 0.01) > ## and the corresponding p values > plot(fs, pval = TRUE, alpha = 0.01) > ## perform the aveF test > sctest(fs, type = "aveF") aveF test data: fs ave.F = 10.81, p-value = 2.059e-06 > > > > cleanEx() > nameEx("sctest.default") > ### * sctest.default > > flush(stderr()); flush(stdout()) > > ### Name: sctest.default > ### Title: Structural Change Tests in Parametric Models > ### Aliases: sctest.default > ### Keywords: htest > > ### ** Examples > > ## Zeileis and Hornik (2007), Section 5.3, Figure 6 > data("Grossarl") > m <- glm(cbind(illegitimate, legitimate) ~ 1, family = binomial, data = Grossarl, + subset = time(fraction) <= 1800) > sctest(m, order.by = 1700:1800, functional = "CvM") M-fluctuation test data: m f(efp) = 3.5363, p-value = 0.005 > > > > cleanEx() > nameEx("sctest.efp") > ### * sctest.efp > > flush(stderr()); flush(stdout()) > > ### Name: sctest.efp > ### Title: Generalized Fluctuation Tests > ### Aliases: sctest.efp > ### Keywords: htest > > ### ** Examples > > ## Load dataset "nhtemp" with average yearly temperatures in New Haven > data("nhtemp") > ## plot the data > plot(nhtemp) > > ## test the model null hypothesis that the average temperature remains > ## constant over the years compute OLS-CUSUM fluctuation process > temp.cus <- efp(nhtemp ~ 1, type = "OLS-CUSUM") > ## plot the process with alternative boundaries > plot(temp.cus, alpha = 0.01, alt.boundary = TRUE) > ## and calculate the test statistic > sctest(temp.cus) OLS-based CUSUM test data: temp.cus S0 = 2.0728, p-value = 0.0003709 > > ## compute moving estimates fluctuation process > temp.me <- efp(nhtemp ~ 1, type = "ME", h = 0.2) > ## plot the process with functional = "max" > plot(temp.me) > ## and perform the corresponding test > sctest(temp.me) ME test (moving estimates test) data: temp.me ME = 1.5627, p-value = 0.01 > > > > cleanEx() > nameEx("sctest.formula") > ### * sctest.formula > > flush(stderr()); flush(stdout()) > > ### Name: sctest.formula > ### Title: Structural Change Tests in Linear Regression Models > ### Aliases: sctest.formula > ### Keywords: htest > > ### ** Examples > > ## Example 7.4 from Greene (1993), "Econometric Analysis" > ## Chow test on Longley data > data("longley") > sctest(Employed ~ Year + GNP.deflator + GNP + Armed.Forces, data = longley, + type = "Chow", point = 7) Chow test data: Employed ~ Year + GNP.deflator + GNP + Armed.Forces F = 3.9268, p-value = 0.06307 > > ## which is equivalent to segmenting the regression via > fac <- factor(c(rep(1, 7), rep(2, 9))) > fm0 <- lm(Employed ~ Year + GNP.deflator + GNP + Armed.Forces, data = longley) > fm1 <- lm(Employed ~ fac/(Year + GNP.deflator + GNP + Armed.Forces), data = longley) > anova(fm0, fm1) Analysis of Variance Table Model 1: Employed ~ Year + GNP.deflator + GNP + Armed.Forces Model 2: Employed ~ fac/(Year + GNP.deflator + GNP + Armed.Forces) Res.Df RSS Df Sum of Sq F Pr(>F) 1 11 4.8987 2 6 1.1466 5 3.7521 3.9268 0.06307 . --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 > > ## estimates from Table 7.5 in Greene (1993) > summary(fm0) Call: lm(formula = Employed ~ Year + GNP.deflator + GNP + Armed.Forces, data = longley) Residuals: Min 1Q Median 3Q Max -0.9058 -0.3427 -0.1076 0.2168 1.4377 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 1.169e+03 8.359e+02 1.399 0.18949 Year -5.765e-01 4.335e-01 -1.330 0.21049 GNP.deflator -1.977e-02 1.389e-01 -0.142 0.88940 GNP 6.439e-02 1.995e-02 3.227 0.00805 ** Armed.Forces -1.015e-04 3.086e-03 -0.033 0.97436 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.6673 on 11 degrees of freedom Multiple R-squared: 0.9735, Adjusted R-squared: 0.9639 F-statistic: 101.1 on 4 and 11 DF, p-value: 1.346e-08 > summary(fm1) Call: lm(formula = Employed ~ fac/(Year + GNP.deflator + GNP + Armed.Forces), data = longley) Residuals: Min 1Q Median 3Q Max -0.47717 -0.18950 0.02089 0.14836 0.56493 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 1.678e+03 9.390e+02 1.787 0.12413 fac2 2.098e+03 1.786e+03 1.174 0.28473 fac1:Year -8.352e-01 4.847e-01 -1.723 0.13563 fac2:Year -1.914e+00 7.913e-01 -2.419 0.05194 . fac1:GNP.deflator -1.633e-01 1.762e-01 -0.927 0.38974 fac2:GNP.deflator -4.247e-02 2.238e-01 -0.190 0.85576 fac1:GNP 9.481e-02 3.815e-02 2.485 0.04747 * fac2:GNP 1.123e-01 2.269e-02 4.951 0.00258 ** fac1:Armed.Forces -2.467e-03 6.965e-03 -0.354 0.73532 fac2:Armed.Forces -2.579e-02 1.259e-02 -2.049 0.08635 . --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.4372 on 6 degrees of freedom Multiple R-squared: 0.9938, Adjusted R-squared: 0.9845 F-statistic: 106.9 on 9 and 6 DF, p-value: 6.28e-06 > > > > cleanEx() > nameEx("solveCrossprod") > ### * solveCrossprod > > flush(stderr()); flush(stdout()) > > ### Name: solveCrossprod > ### Title: Inversion of X'X > ### Aliases: solveCrossprod > ### Keywords: algebra > > ### ** Examples > > X <- cbind(1, rnorm(100)) > solveCrossprod(X) [,1] [,2] [1,] 0.010148448 -0.001363317 [2,] -0.001363317 0.012520432 > solve(crossprod(X)) [,1] [,2] [1,] 0.010148448 -0.001363317 [2,] -0.001363317 0.012520432 > > > > cleanEx() > nameEx("supLM") > ### * supLM > > flush(stderr()); flush(stdout()) > > ### Name: supLM > ### Title: Generators for efpFunctionals along Continuous Variables > ### Aliases: supLM maxMOSUM > ### Keywords: regression > > ### ** Examples > > ## seatbelt data > data("UKDriverDeaths") > seatbelt <- log10(UKDriverDeaths) > seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12)) > colnames(seatbelt) <- c("y", "ylag1", "ylag12") > seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12)) > > ## empirical fluctuation process > scus.seat <- gefp(y ~ ylag1 + ylag12, data = seatbelt) > > ## supLM test > plot(scus.seat, functional = supLM(0.1)) > ## MOSUM test > plot(scus.seat, functional = maxMOSUM(0.25)) > ## double maximum test > plot(scus.seat) > ## range test > plot(scus.seat, functional = rangeBB) > ## Cramer-von Mises statistic (Nyblom-Hansen test) > plot(scus.seat, functional = meanL2BB) > > > > ### *