effects/0000755000176200001440000000000012654472141011677 5ustar liggesuserseffects/inst/0000755000176200001440000000000012654133545012656 5ustar liggesuserseffects/inst/CITATION0000644000176200001440000000271411265102577014015 0ustar liggesuserscitHeader("To cite effects in publications use:") citEntry(entry = "Article", title = "Effect Displays in {R} for Generalised Linear Models", author = as.person("John Fox"), journal = "Journal of Statistical Software", year = "2003", volume = "8", number = "15", pages = "1--27", url = "http://www.jstatsoft.org/v08/i15/", textVersion = paste("John Fox (2003).", "Effect Displays in R for Generalised Linear Models.", "Journal of Statistical Software, 8(15), 1-27.", "URL http://www.jstatsoft.org/v08/i15/.") ) citEntry(entry = "Article", title = "Effect Displays in {R} for Multinomial and Proportional-Odds Logit Models: Extensions to the {effects} Package", author = personList(as.person("John Fox"), as.person("Jangman Hong")), journal = "Journal of Statistical Software", year = "2009", volume = "32", number = "1", pages = "1--24", url = "http://www.jstatsoft.org/v32/i01/", textVersion = paste("John Fox, Jangman Hong (2009).", "Effect Displays in R for Multinomial and Proportional-Odds Logit Models: Extensions to the effects Package.", "Journal of Statistical Software, 32(1), 1-24.", "URL http://www.jstatsoft.org/v32/i01/."), header = "For usage in multinomial and proportional-odds logit models also cite:" ) effects/inst/CHANGES0000644000176200001440000001147011716766566013671 0ustar liggesusersVersion 0.9-0 initial release to CRAN Version 1.0-0 o Rewrote summary.effect method and added print.summary.effect method. Version 1.0-1 o Blanks can be inserted into or removed from effect names without causing an error; thus, e.g., "poly(education,3)" is equivalent to "poly(education, 3)". o Name spaces of lattice and grid packages are imported, as required in R 1.8.0. Version 1.0-2 o Added ask argument to plot.effect.list, and row, col, nrow, ncol, and more arguments to plot.effect, to support graphing an array of effect plots. o Fixed bug in plot.effect that caused xlab argument to be ignored in certain circumstances. Version 1.0-3 o effect function now works if na.action is na.exclude. Version 1.0-4 o Fixed small bug introduced in version 1.0-3. Version 1.0-5 o x.var and z.var arguments to plot.effect now take names as well as indices. Version 1.0-6 o A variable specified in xlevels can be fixed to a single value. Version 1.0-7 o Made effect() generic, with a method for lm objects that handles glm objects as well. Version 1.0-8 o Small fixes to the help files. Version 1.0-9 o Small change to compile without a warning in R 2.4.0. Version 1.0-10 o Standard errors of effects are computed using t rather than standard-normal distribution for models with estimated dispersion (as suggested by Brian Ripley). o Small fixes. o Objects are now named "eff" and "eff.list" rather than "effect" and "effect.list". o Data sets now provided by lazy data. Version 1.0-11 o Replaced obsolete \non_function{} markup in Rd files (reported by Kurt Hornik). Version 1.0-12 o key.args argument added to plot.eff() (coutesy of Michael Friendly), to allow conrol over, e.g., placement of legend. Version 2.0-0 o Jangman Hong joins project. o support added for multinomial and proportional-odds logit models, as fit by multinom() (in nnet package) and polr() (in MASS) package, following results in Fox and Andersen (2006). o added the argument given.values to effect() methods for finer-grain control of displays. Version 2.0-1 o Fixed bug in effect.polr() that prevented computation for a model with a single term (reported by Paul Prew). Version 2.0-2 o Fixed bug in print(), summary(), and plot() methods for polytomous logit models with a response with numbered levels (reported by Paul Prew). Version 2.0-3 o Fixed bug in all effect() methods that caused error when na.action="na.exclude" (reported by Tracy Lightcap and Rob Goedman). Version 2.0-4 o Palettes from the colorspace package are used by default for stacked plots. o Fixed bug in handling of typical= argument to effect() (argument was effectively ignored). o Added Titanic and Wells data sets. o Small changes. Version 2.0-5 o Added examples for Titanic, BEPS, and WVS data sets. o Arguments ... (e.g., digits) passed through in print() methods. Version 2.0-6 o Fixed small bugs in print.efflist(), summary.efflist(), and plot.effpoly() methods. o Corrected error in missing-data handling that sometimes surfaced in effect.multinom(). o Added .Rd file for package. Version 2.0-7 o Fixed bug in handling of given.values argument to effect(). Version 2.0-8 o The S3 method print.summary.eff is now declared in NAMESPACE (as it should have been all along). o Added CITATION.txt file (courtesy of Achim Zeileis). o Version corresponding to John Fox, Jangman Hong (2009), Effect Displays in R for Multinomial and Proportional-Odds Logit Models: Extensions to the effects Package. Journal of Statistical Software, 32(1), 1-24 . O Fixed [pgk] markup in .Rd file cross-references. Version 2.0-9 o Applied patches contributed by Ian Fellows to allow logical predictors and various coercions in model formulas to work properly. o Fixed name of CITATION file (was CITATION.txt). o Small changes to docs. Version 2.0-10 o Backed out Ian Fellows's patches because of errors. Version 2.0-11 o Small change to eliminate warnings produced in R 2.12.0. o Added nrows and ncols argument to plot.efflist() (following suggstion by Michael Friendly). o Small fix to docs. Version 2.0-12 o plot.eff() and plot.effpoly now return an object, printed by print.plot.eff() (after a question by Michael Friendly). o New effect.gls() method, various changes for compatibility (after a question by Oriol Verdeny Vilalta). o effect.lm() now stores the covariance matrix of the effects (after a question by Bernhard Kaess). Version 2.0-13 o effect.multinom() and effect.polr() now use update() to refit the model rather than calling multinom() or polr() directly; update for effect.multinom() sets trace=FALSE (after suggestions by David Armstrong). o Added [.efflist method (after a question by Andreas Roesch). effects/NAMESPACE0000644000176200001440000000404112633371321013110 0ustar liggesusers# last modified 2015-07-13 by J. Fox importFrom(lattice, barchart, current.panel.limits, densityplot, larrows, llines, lpoints, panel.abline, panel.grid, panel.polygon, panel.text, strip.default, trellis.par.get, trellis.par.set, xyplot) importFrom(colorspace, rainbow_hcl, sequential_hcl) importFrom(grid, grid.pretty, grid.segments, unit) importFrom(lme4, fixef) importFrom(nnet, multinom) importFrom(graphics, plot) importFrom(grDevices, gray, palette) importFrom(stats, as.formula, binomial, coef, coefficients, delete.response, family, fitted, formula, glm, lm, lm.fit, loess.smooth, model.frame, model.matrix, model.offset, model.response, na.exclude, na.omit, nlm, predict, qnorm, qt, quantile, residuals, spline, terms, update, vcov, weights, xtabs) importFrom(utils, menu) export(effect, allEffects, all.effects, Effect, setStrip, restoreStrip) S3method(Effect, default) S3method(Effect, lm) S3method(Effect, mer) S3method(Effect, merMod) S3method(Effect, lme) S3method(Effect, clm2) S3method(Effect, gls) S3method(Effect, multinom) S3method(Effect, polr) S3method(Effect, poLCA) S3method(Effect, mlm) S3method(print, eff) S3method(print, efflist) S3method(print, mlm.efflist) S3method(print, summary.eff) S3method(summary, eff) S3method(summary, efflist) S3method(summary, mlm.efflist) S3method(as.data.frame, eff) S3method(as.data.frame, effpoly) S3method(as.data.frame, efflatent) S3method(plot, eff) S3method(print, plot.eff) S3method(plot, efflist) S3method(plot, mlm.efflist) S3method(print, effpoly) S3method(summary, effpoly) S3method(plot, effpoly) S3method(print, efflatent) S3method(summary, efflatent) S3method(allEffects, default) S3method(allEffects, gls) S3method(allEffects, mer) S3method(allEffects, merMod) S3method(allEffects, lme) S3method(allEffects, clm2) S3method(allEffects, poLCA) S3method(allEffects, mlm) S3method(effect,default) S3method(effect,mer) S3method(effect, merMod) S3method(effect, clm2) S3method(vcov, eff) S3method(`[`, efflist) effects/NEWS0000644000176200001440000001554612654131635012411 0ustar liggesusersVersion 3.0-6 o Fix bug in Effect for mer objects with 'poly' in the formula (and related issues). o Allow "||" in variance formulae in lmer4 models. o Minor bug in handling class=="array" in the Effect() method. Version 3.0-5 o Fixed bug when the name of the data frame is the name of function like "sort" in mixed-effects models with lme4 (problem originally reported by Saudi Sadiq). o Fixed bug in predictor-name matching that could occur in names with periods (reported by Trevor Avery). o Fixed package imports to conform to new CRAN rules. o Added residuals.cex argument to plot.eff(). o Changes to work with pbkrtest 0.4-4. Version 3.0-4 o New default of KR=FALSE because KR=TRUE can be very slow. o KR argument now works correctly with allEffects(). o Mixed models with negative binomial did not work and now they do. o Added methods for ordinal mixed models using 'clmm2' for the ordinal package. o Moved pbkrtest to Suggests (wasn't done properly previously). o Tweak to handling key.args (suggestion of Michael Friendly). o Use non-robust loess smooth for partial residuals from non-Gaussian GLMs. o Rationalized type and rescale.axis arguments to plot.eff(); scale.axis argument is deprecated. o Added setStrip() and restoreStrip() to control colors of lattice strips and make beginning and ending conditioning lines clearer. o Added residuals.smooth.color argument to plot.eff(). o Cleaned up sources to conform to CRAN requirements. Version 3.0-3 o Made key.args argument to plot.eff() and plot.effpoly() more flexible (suggestion of Ian Kyle). o Moved pbkrtest package to Suggests and adjusted code for mixed models accordingly, to accomodate Debian (request of Dirk Eddelbuettel). o Fixed \dont-test{} examples. Version 3.0-2 o plot.eff() honors rescale.axis=FALSE when plotting partial residuals (bug reported by Alexander Wietzke). o Effect.mer() can use KR coefficient covariances to construct CIs for effects in LMMs. o Modernized package dependencies/namespace. Version 3.0-1 o Added an argument vcov. to Effect and effect (and allEffects) to select a function for computing the variance covariance matrix of the coefficient estimates. The default is the usual `vcov` fucntion. o Added a warning to the documentation for effect for using predictors of class "dates" or "times". o Fixed bug in Effect.gls() when var or cor function depends on variables in the data set (reported by Felipe Albornoz). o Small fixes/improvements. Version 3.0-0 o Added partial residuals for multidimensional component+residual plots to Effect.lm(). o Small bug fixes. Version 2.3-0 o removed stray character in a the mixed models file o ci.style="bands" is now the default for variates on the horizontal axis and can also be used with multiline=TRUE o Added ci.style='bands', band.transparency, band.colors, and lwd to plot.effpoly() for line plots to give filled confidence bands and control line width o Added Effect.mlm() for multivariate linear models o Interpolating splines are now used by default when drawing lines in effects plots unless the argument use.splines=FALSE o effect() now calls Effect(); all effect() methods are removed, but effect() will continue to work as before. o Various methods for effect objects now handle factors with a "valid" NA level (fixing bug reported by Joseph Larmarange). o Further bug fixes in effects.mer() and effects.lme() (following bug report by Felipe E. Albornoz). Version 2.2-6 o bug fixes in effects.mer and effects.lme. o added terms.gls() to make effect.gls() and Effect.gls() work again. o plot.eff() gains an lwd= option to control the width of fitted lines. o Added ci.style='bands' and alpha= to plot.eff() for non-multiline plots to give filled confidence bands. Version 2.2-5 o Added support for polytomous latent class analysis based on the poLCA package. o Modified mixed-model methods to all use in user-functions. o Changed the default method for determining number of levels for a continuous predictor; see help page for 'effect' and discussion of the 'xlevels' argument for details. Argument 'default.levels', while still included for compatibility, is depricated. o Added .merMod methods for development version of lme4. o Added support for lme4.0. o Fixed bug preventing restoration of warn option (reported by Kenneth Knoblauch). o Fixed handling of ticks.x argument to plot.eff() and plot.effpoly(), now works as advertized. o Adjusted package dependencies, imports for CRAN checks. o Changed name of Titanic dataset to TitanicSurvival to avoid name clash (request of Michael Friendly). o Minor fixes. Version 2.2-4 o Add argument 'ci.style' to plot.eff() and plot.eff() to allow confidence intervals to be displayed as lines or using error bars. Confidence bars are permitted on multiline plots (after suggestion by Steve Taylor). o Allow empty cells with crossed factors for lm, glm and multinom. o Added warning about logical predictors (suggestion of Kazuki Yoshida). Version 2.2-3 o Fixed bugs in axis scaling and xlim specification (reported by Achim Zeileis). o Small changes for compatability with R 2.16.0. Version 2.2-2 o Use asymptotic normal to get confidence limits for mer and lme objects o Correct effects.lme to work with weights o Added Effect.mer(), Effect.lme(), Effect.gls(), Effect.multinom(), and Effect.polr() methods. o Safe predictions simplified in effect.multinom() and effect.polr(). o plot() methods for eff and effpoly objects permit predictor transformations. o Added as.data.frame.eff(), as.data.frame.effpoly(), and as.data.frame.efflatent (suggestion of Steve Taylor). o Small bug fixes. Version 2.2-1 o Some examples wrapped in \donttest{} to decrease package check time. Version 2.2-0 o Introduced more flexible Effect() generic, along with Effect.lm() method for linear and generalized linear models. o Default is now ask=FALSE for plot.efflist(). o globalVariables("wt") declared for R 2.15.1 and above. o Small bug fixes. Version 2.1-2 o Offsets for linear and generalized linear (and mixed) models are now supported. o cbind(successes, failures) now supported for binomial generalized linear (and mixed) models. Version 2.1-1 o plot.effpoly() now honors ylim argument when no confidence intervals are plotted (fixing problem reported by Achim Zeileis). o safe predictions simplified in effect.lm(), producing correct results for mixed models (other methods to follow). o plot.eff() now honors type argument. o nlme and lme4 moved to Suggests. o effect() now works when options(OutDec= ',') (suggestion of Guomundur Arnkelsson). Version 2.1-0 o added support for 'mer' objects from lme4 and 'lme' objects from 'nlme'. Added 'rotx', 'roty' and 'grid' arguments to the plot methods. o See CHANGES file for changes to older versions. effects/data/0000755000176200001440000000000012654133545012612 5ustar liggesuserseffects/data/Cowles.rda0000644000176200001440000000701412654134353014536 0ustar liggesusersBZh91AY&SY?L$ @/'} RABa DB""T4" *?UTU(E0LL&0SUSJT zFiM&Jh=UG*?U?UJ ި Il$,-)!DBH*DDTP$HR" ! $, ID$!DEBTP*ED"HDBE XQB"*$@@(D!"(RŞ%XT(@BT!H{|g'$  XI * U@HHE*DUXR!T*āUD *)DHHRT( @""$DR(H H*DH" J $$*$EH)b!HDR"X$^u翖^+z"A@^x*fk.T/[mh_נ^ɿW̊sW|q}J`筅_ei3oXxdD/JWM&L-0t!Xő0&~L{9vX &ޏ`$[ygs'4#]z8*"G-SlV7b򴓬\Db1:vQ#6}g&#E?b0{ql)j ӡ0ּgpzC3Xx(PΒ{.m] cc5\r%5^KljzYb?cYUje֫*ST8Ak+!dW NF|}<[9d[(4xH>8k=哴ㅞ2}{Qm=zއ># 6B)S%z ,eMq);-zf+7ܽ<6ڙH!qMt ⽴|@;:}IQd+۪M(5P a_׀r+1sub6*m\5zlVbZTibHCBb1Ś.vMl/[bM4ұu+m/y@qyku&&B5gcw0;5hrib =||㣖50m$RN㽝w7UMxȾڦ-д*7|.% `5̑?w{4KyXʘ|021|2] Bp;mCo1]lca`/55hiw{.-zh77BHxL̓LBwso9_;ĖK0ys1gYRF ¥^yG*{}Y a1e-P^ǂ/&Qs~u(a5#[[p.O}3g2NLJ;^+)[0FX8 7eW ~}~gD/`;ŶDELHx"0H\[9z?^z=zR}@D 7UPDwNN''N/Ofm$H~Ŷmz ~?MQ>_V-Q-KUmʱ[$D[IdoKlʼneZ|-dKd?lY$Ķ!,EK,-I->>>>>>>;{@09\oz8s{9޹ps7s9o\S<IǏ)„effects/data/WVS.rda0000644000176200001440000002620012654134354013760 0ustar liggesusers7zXZi"6!XP,D])TW"nRʟ)-t^JiȖo @(==ᣑZGCs,; 3Kp2hͽDc&qݤۙ}Iv?}ːfA!P1vnqwUT؂e b s8KM ȎH=1Α-@*| :T\B# ϰء"SmOq8_~#tpB.V+z'tdڡ0b`l@.R%`~x„Z.=)YL{R6NK`כx(,&U ~!* q+s,t48mj*>!6qqڤY? da"2Ww qyO6cHh٫zGf!_0;eh5IjćjbҸָbyFk){ ݲ"ﱛFo59v%%;]4"`*g(O+g@=!HJq1(0{Z&@0ӟ]aS"2T'r!oerQo)"Ot92LK+f2^duXKt–AfE"'kCP@E=>smg1&̆'ĤB4*QsZm]X91 x.U/ʋb 3 rr_Sr'2 {G$D3A$D#j`e؞&7 2}N7f caJޛ}gA]o $j<,_ ?) z]ERspl\GEi?ih6la`P\BNIV°(_'ײ:J8bخ]x17@QԇWYS{ % Kё1mx Q*S/x /l:YT@mEr@g8af<<0;r<nckS!qxRz`Jiը!8^Mj!LW 3+ӗ'pn5* djzXY.ēH Y}Q]dF.YFgЏW537z+a#m\+k(EÁqk}Vl-*Dtbjp 6]i=rhwL%wByvʾ<$}rFE`1;#غvKJy;zQ0x]bm; ];\шF[;KJg%bbjbXGY> ZWT}(zm6k 7bponu/,ƺD9<@ ^~]9{I_5!Zpm F7zΝ=P >1 Z`VU ㅜ Co@!Dz+Bc?%bm Xql#@c;#uyϺ62hD"bH?z7g^|o8gy$RQrND`B?tS ,W=x bdJPMh )-n֢0- Eۑ$< 5 BU pqe%rkӲhFIm6sbRkV/]MD3)\UqvՆ<"Cτ"`N&1IҰ~yf` c#.~}Pe@vnН0\; H sڻ4 gYאg@Iyz`-Qg'SQC{AKe\'mEs#L㛠~ &!j'`@kGvQZS24@@4p!X޹6@棁LWa#GQRV[uyrjًԶ1.'Ȋq <ƾUWmGi-󿫺rXY4}QL`ДPdKɮD+E3=w)TMu%EM kACShԄIJzJ>[ehEN-}(zėyc:NcA50p}bě o6nHF@1@X>6hސeķ}x 1ZfiԔaeRǤ$2lGmrX̄azKZ!gK9!iU|MVB;H]׋ *Ǎ rV>E2vD&ܻk Ye^~fӚE_omj*JXl/chXTW1"R~'B"[yu6ve@MOؾ?E/%?JaxKIm!&ސP:շ`C?Xfjy$9KK79 ¡U˖%M_PJuVV} UO99o kQM`,34pن>,¶N 2?RfAi08͑ ",;I8tʳS#$p·Kq {14~a 6lUM|BN"YȇDJ_zi ɿGH٤^{cJ"ΛGUtYeT+ꂦJpW Ǫ?F͗KU-'Ylv(B6 D2ylo~Ȝ $ԟc)y'_rLAjx&Q;tU9Ȯa&MY>ByG/ԣG tI봝XP669e֞b*ji1kJnNjs;3{[*g&uι6/Wt U} ,f5 D]&3%S,`ҮۑE &#g#7@VRWc|nR1&Z1.UrN0y̿!LkbW15i:3akDpR= HXbv.eZz앎zqNQqbćAx&e!'psѐA 6V$`0[o+n.]ʕrZj.!f!i!RI|}fxt,xToBbVp J,ރflG\ ]1+|SA t6Q6a8AA+('jTXɬ$nIceD.6xaʕʵ=i7~(oD%Q.G ek@d8d tAod  ?>q| ?[eQA׺2iC'M鰬d͉wV^$|,B2S$VYMQia=S)q#0v>pZy!4ٓtďtcQak)sΡJgߎ.iY4SED+ NkziktWt3ӢU QнZ1^{)J'DX+0h'4a]kc~q]qu2" g]@Hd/WB.>#7{{(d6g' Ɇ꺸76f|ӭlCZh;.٤OXƙ?=;ܬQe]v9X4y4w6|tWO_WKUtcHOT 5p4ZR6D8O]r^ /p[s+-9ٙ32hQ4|qqԤșgrZDz9S<ڢdC-nkw)@$Їcٙz*NNH4Ȣ!rڪnp2'?H+ 32aGY9Y άNԘ+.jrwM_< v2o2"qF>37CbXPYk>ϖkͫHx-Le*>ۢ:s,;[)j1| 7d9 "aʘ/ v "6/t`2-Q41bAԯ/+*c-N( d $R.&Һ[Wk{\0NOjm#F}Z3H-!u{fc<:]=tjBJѩ G}dELIwㅅm@$ÉY} 6YH!^snM~w\ aK)x*O)kR֨~r7?iElPg،(8d$}'p#`mFh{Ie+@Mifi"okDP|m788 *jRbGLD\ ֗7@ 039txf\驟6 :D[q|XnL4bCHA,![i(ƆCD΍̴YDd!ی$=jTL9pz̃gFDACg }Ӭ>{"[ml  5K-6r 5M&#h_ݲyoƩFczş:d6d*(ggַ:Z].'m޷ & 3J޹䤝BL 2 I~[?rGũI927Zd3gWeEVW@3I%js;좔0[5o@%ƼKXY><;r51 awÚԛR8MSWF|Vh"j f} d_,q;h#Qgw9TANJ /ֶDSmuHd_}@8zgC8Pq Cvȁ.jZǎHJ_yMԐ樵wi*NM-1szFjr*0 ߗ(lB ^A"@X3ϋBm(X㵇s_FB6E5HlÕNԿ 8 h?($0Ig]cK2";+=n@!V-κ̀Z yz!0eJ -Z7+[e:a c$)=qvoѣ- cnŒewMsMp}J5,Urf[^[ . #ZYFJP>]j=~Kdsj\hGGw=䟟\^~_/r4->l,f|k2)'[ ;BuP\ H1q҈ F֎vnԚ/0l1[hC_qs#n!0݊1H4sN\ĀD8 0P 8ISi`pk»Ƃ`d{u>K4U ̟/IuY)H {d݊P>f@ǡ㩹 KDC(; ]2I&-kAc_c_$c5#.;PksXL X>$:|p1@(RHw߳کlCQ/L6c)f(+')zEuYq$0wr=Μ^awa8aakG_wSPe4̥ g1(tD#F*AwpsOBBp??m #VH}aVP{,DR>xi yL%FNc{&S=w!fa)?2<^nϲ!t;vQMH3 YgLƪ=פT-xkGpz8y|Ӳvjo.hZ wM1 v[W{.3]&Q:ɔnax1n8'}{uű |9I]. jAHߧAP@(q${e-r&dZr1b lxvR]8'EoV ZbRxǙʲ›:FSbNwGİu4'ė#5,>U" DBe`y]F%FZq&~BF?R#Y~ݱAE;{T-Ǐt/DCFtt9Ew@1ޭLhNdu& bƠwl]' V{ؗ4Ѿ@3qbdI|L07I4D(jZ4^Ze@ |~^b~М<t@=5p(DL>Sc4!k]0uS:/Petj.V7 ,"c>o˿@ 15 _Mbil[Nmy+,D*>J=ܝv|4c<]~9uMl _7'H9ήFZ6(! X~]Ô+ǥf?o ~BXd>2F)^lm|W/?ksFBeTÅܲ/iElԹ=[-~g^vpZ 0pYɜ Bch酟b;x {{`|oBu7Ĵhqmr%D%,c5"ȍ0dx=JtWcgZS;֜5rٷ$=="©PT'1_.?+vd~/OBŮ(E(13<\#3HKi˼/J6[;" i뾔KCГ}?ڦban (po"S:%V[iio k& 0)#4h;k^P>Ø쌞0?">g+-U6ez !,=ʒ]`  X1d~+ŀeVK!cykLd8sYEQV<9{U.'JSj]l)Ss lA)%N^ջ>v=,cqc^ΐ ,^[RN`v,"CAj$"n %c="jb0cnlJx"^M0x@&)  8#?\cP-a&NB?Ya. @$v|٩Ʈ TlS-U!`:IPfJA:eG|1g)U-M3|G z ۭwdKƼ]3ǐ`uFЖ56D .'a?]$VgG`҈ F1_z#6nOe`ED=VY@Ѥ!Hk/t8wH d_b5c9+kAċ8~gڙ,w#\註+ZTX|`VvRR/o&X\>$z7`(B>\lژ~ r'' タt#ߘd=O*p Ttaԯx{ JݢZ6PE2/U3 :$4WfU4}PfrJ= |JmR^P]&kdݛΧ{tzЀȍ+I  )\`Y> ND4s:G|R^K^3{% Nܻ\+Ƨ|&STs*#`4xa8+6H bx:,pTDteh;sl35D@w8y *\lO86Gʾ5"40V&np ' T, Y?)W'֑9;6T3bjU3R[lشzyvl_$pjyzUH漉T 3O =s{#W`M0+|N3|7d\sV `s|܈^:po` 8I(Eߡ˦W.RBHa#.QRdĉrdkw i"KvR#ARFKvM1, n"LŴa7$[XHfG([d͕(PKcA^~y6QOHF7HFWy I_ɆZ1xOiRCqJpuӟX#J^Nm1h9LPSsq.qEKA|ƏȍaIx|<=|jźaaPPJ'A;̖1AX{eI*3N $2ٙqK6Й=W֮YXS1z Uԙ)}#ǰrAQI k|I"W,u3?-ƺihzllXQp67S,x(^#0uQ3%7tF2 F`Da|%%fͿFWN@P9d5@9Bxt) pMB,}bt.0ACGHah52`y;ӈ4]*"ƝE1r>Ȕ*aN[Qauf^]Ty.} /Q=i22H!LɔXXݺy+}7ȴwM*)Ɩq> X4(,>szG١*FFx6&tk# ~mWvVFG?[C fZ w+rڀ1@ 8`*_]&X$~~ 4 *ѹ\nlDw5pO8ᾠ&b5"PMX '~>0 YZeffects/data/TitanicSurvival.rda0000644000176200001440000003301412654134353016430 0ustar liggesusersBZh91AY&SYBXZ?P RPAE:7OB;Dx}P^>UU'`=rev -&*nZ;2]5Y 67 zQs888豄Pl44)zOPcSGyFi !DқS'=CO(ɓF44Qj= <(&@JDMO(4@hhh  h4h$m5h4hƣe4HD@@4ORzF&3=|TVɹcFV-^5EKt £/L·+[fʹJ+֕+86iZuZ{yZ~M?5??p:ٶz|߰ϕR0e+bg%o[1ޭ]SڗJYNNd5σP w[G՞Ϗw sU#rznݙݝ9mkm^feoio Xf|[[U_L!y18tmwŬsu}30Ǟ AZ5gLyu|SWMo!~:|Ս/+2X G۔6*_),~vO?\8uo. s| WUvow=gO~µ^~qSö?̕[Om^w{+:UW]_ſ?,7|/ũgg=x]:Dsk L~+)_ݕ 1|rYkW_ Ϸ%0j߭!kJl~].i<-g9ۖVb1a2QbA|-Yk?/!]a+mipz?>P8_폃^8^2Q_myry~w-f+):pqeL!j;ЉWκ'#p8]ô>jR&u*W-BGmWkVi|mH"‚{q`LHRhI$( JB F 3x v_Zpc7JDʾ='5Ug[S橾n[2_]}`c3~hfS|0^ 1XFSEvtdhֽ.71Ϊ .q:OZHumRgl񎖍M\Ѿзh6ѥ#:*,'J_u̱pwtaM}gQ: 0hn}t\FnlAYz F 8{R ro2p` ű503T 4)eV-QZI(Q̄m9@uhޚЧMhTkV47رxm#m[bN{A#VT)DrMu#!~'/"lhWǐn*/F`imK7UxGÙG; ݈g JXr::ځ~]ΗeLE֝eE.P&JQ@EJ(:_$ {H"9΄zgDʈ ͚TְKcB4)biS!Ael`JbݮcU6C*j[2%U/+A~#OrhSm4C 5J>R X,ZX - <H[~rq0cEC J#0ERY>L`8ijUeII.Uxe8[Iz n]xTnWz~KH"uWYYfoX`)8tw6&=fyleƝy^ٰ;Iٳ^/1Ǎ"YnlC-ߚŝ5xomcz9 Ὄj{) KhݷkOgVioW6ehyP!uM[[1}XkL Nvo <2ܙBrؘ K/25)O5;hj{LXmb:מu7Q2䔘 ǍfiV"& r7-G8m3HB"$\9#^$gukk7~z7puęw[ƹ|{sEtxyMs]Aʹ"l20$I̚tq]ܢ1U  W1$eT"I `&HbtЌ7urNۆ仍@5tFF\wu޺dCVp1@ w|@l֌AiRT?oF*齵=מϧ{#d&JaֶHqg/%~],/Dwgwnlk~HkUieP_`sdDعQH p".anWp;p}C^2} HR֏`bclXCDD ,v,VzbM$2 y|kVsЙ{r >̴E ۷] `Rt nr-gH;`[s(1SIR@!:xDk~X1_Q%M#-Fh;6Ɔlm 'f@<vXSX|%j2c5l[8nHFP7K݇UDvcY,@6snoDCʉY'8[`Ǭ( J<@I+IHSхw8u]apc}CG&g}k:BPnKaBEC>|xVM/cxS -3"}?iŻr>oag^M-LHӘ̉(*yx~4IEYXD@b ַi 6'9+ۧēmoU 0==jF'@ Z'QG ىA-Q-컔˻צݴ&EPI%,ܲE™=my P JڥkȀb GɘaL1fPCN{Mjolh,Qss[;o&goZzio%6~u"8J)DXZ+J92*rRt<%#$,]6n(6-lla!#pwJr XԢi>eQlaaI:Щ#BU#c'y|=zˠ$PĹr){njpUA>ob$> g:l0q!.$[ohĖH50'uvQoQ]nIq`jx ޢ DZ= L]8@!X vt8+lk73bgLdVpʦQή,A&E`;i@'\cA5`60| χ>M6CL1{ǟl4Zg8,aGA+JZpyUQk* ѿ0_܎0<$"<$= P%LN'1ӿ#Q6uUdI"r7)IEQ;qӠڧ fuB9Fz(PY_G")X( z<{Ү\~<Ӱ*312)јJi`:ǦK @a#P ta5f[ŬnMsSlcn4unso6mg@jJX:Djbn͒ Z@qlX]TUK.XX"~1 Tsה_l>]jTo#²=]Ki',c7=~4Oj9<!4dZ+*HZ+N0,=1$q,e"݆ujѧxh^APX]A1 `֥S@+jfPNiM`UPa… /k.,&`0jٵɤίK}EwҳY -e %&.hUV9Pg8ƚ-j߻emS>? }Sag]Ta/çuA.\mP!̄ :]Gx/nNk/91K#\FW尢ȏ*'ǺH.INU_iM6t3e=٦G O*sU2Z_㛇w OwMq=&pA@e+HIZ]\apQdU5-Hk0o&;ХɯؤBepx5t&ɼ  >f z*;C00|QpIQul]muk{Z&&=Xx-}r+DQ: j{=uzDڥX-IA`޾zLNKz' x=w/i@ia{SMxt{t߿S 8"{]0/9֮5J[|${2%aR#R]eo8`MyˌNuvZy,we8}΋ѕe-\6LӿȌDD WۺV@nD55h{ Xd-xc;rw;]KY#T9UTSKF`jwzg^~MYe|ƛLkC}#ʖQNccvқz9sk7ZVrxHbV[h߫ 7wsٝv){ǝF@^MFm@ک_l% }7<FnxO 54ETr6寗 $r+YplHс®c)3Jԑczj(P|~CSNg''XC˕ӑ *ݺd`F:uL79oк`'yCbKH8aӆ,k+W ѻFӬc`Mo8W6՞lh F|LU*qx0!  xb0SImnĴ$)@^:)lm0NYx)p&4sx8es`8 殺#:Y؀ݡbh]$Q1gJ0~P00IN^ L÷ =Aa901Ǹ_U~TYo͢1xf ,"Jd!>4ӳ>~K(٢rK{^n|)1<j[՝KJH{[Hr fߚE4GKzK:NKFO|7Cwn*֍/d4~h;a%EJMr>H-쨺 ]m;Ir?)&-޽=߂[㾝mnUF(<7 /%1)XqGs,w$֧8^ʰe(4Yxt;Ӧ$QCch:(NtbEA)'6'?Umeg$C/?&5 x\}ֻJaG}y.).xyz[[ R\+8vHYDs;zFLǖ_#VTI4zN6"mw.um \L|ajnʗz*@c1kȾpҬs?ds(SÖU=%o5 ~5lN -;p:ۮr 8E<;) RfOZ6<?tKWxy5DTBK!:9u tBZe@,GSfމ];*!v]e11÷ Ω]iv6-rp#e }'2n|}>֨i33xKkT_8kt&)R^ִCPjPpAp$QSwy ='CX@DSgW>0)bӲCrq$Lx1]d#c!DLrhk bm"*gt 3۝/ׇ SI  ϝV= 5*2op0=vBJERXڅ^T-[ 7 0sHLp`@#${8aȶŷ Zp Db?5pqH6 )qu4.rR̢%xps*yDLf Dw&ЫpS!2UZfL3Εb٪ gj7zkVASeeFt!!VKX"RqRY8Al ۩]3 g>1n^aԬZ Knwk]M2G4^Cutj]w[>)}[۬l 3a&ѻʼ%ꙣL0ݖ.+2jؓ`5IdؖeToOC$BTa Neg;4 &e;h!5:gu"wnmb 005}+63qi \'Fb:ԛJΠ+,"mPT]c[_-5-5 o1@E%3:Γa @1[\ "Viջ R6in ;dӚx @I_qG6aaoVSE4F+cgb dZ)o [%:)Jҡg8mF[i$ `d)9#̀d0eDӆrYT!8)4.;l 3$s7@\=؁ 09f<) !U.]fRux` ]>߯ Z.3V{W5g"`hv[T!M}ᅮ֫U8JZ }zH@s!#p γ񤬬_3cm0\"B!_/Vdc,yu =#|=#a؆FϠ6aGJyJ4Ami\¥uYֿS6(1E!M ,*ƪҡszBo\۟232|kiIg˒]bv-=m〶DԢر(ĒLE-9':啷-7KUKe|* QAY8I˭XW#OeJXo=Z |9'25Q#L˸}ezSys_Bcr )^cD:ϋ m(dB.s #!D&8W-C^芆 B*m<$RӫȎ\ܼĒzaf5^)^"?$eiAqCX՟`7Q,ZsDQКj>7lW@c@隿_J3~Zn+ F׹qh?k 8䀌1u `H`pr&3/;Əux #E 𐼃\/N#O3{OO ۤ .@lAeZO]( M 0~"Ajl;~_16#?^&2ML1%s4 m;o-F&WHW}Y(BAa4ܺgc ZZ4ΉXx7uOބ"!Jxs]((Z4+gT^4\_RǝyVoJKǧrmF`}o xB,^_pN–|_L]B0D2FZ 936z|_~υdXImq'!l:{.q۞e }؝Bey#N ITQ{E v6n—8 !KU2\Ȼ3,TA1pjjfug JțULV9M։.f6+4`LFq0m Ý󉏖Mtf H3@+0`(]G!X[6?]݆^Zv6Klbm0_RUa]h0b]hclvEl7y&iZ,Ù/+k˅Ydm6j?O6uqQ}4Ap܏y׏~XVu>&1_=؜>Y_" Gi^jT=|J`EDYp2\c9.C.t:nHnjKb/^>}${Aki1gߴ~O_uqO뻉S6Ӯ.qJ'Xqq[WܿKe[U׃P^2xo ɦ?-/ⷆsb5ͤڴ\&^Pn"(HGS,effects/data/Wells.rda0000644000176200001440000003730012654134354014372 0ustar liggesusers7zXZi"6!XL>])TW"nRʟ)-t^JiȖo Hq9F2)\N^&Z zxZ:r\` ͓NTZfNGO[/vXLO}j 6d2/q^Dzȳvj&֘!)X̧3 Ng#~"⡿D2S9n5xI٠6RUWM)+˖~*tvX doGf3&`V8ٮi[|@Z1XTvȃ@RFL"Bd/ٚLF*blx9C(iwb1Ĵyɒ%Bx%?$h\d i`mdKfm٠9xð O鵎rHajTP \(ǩŝ JLj=}]M󺓠a-*}RkB&LH쐭+oe_@T?JJ: l݋>X\_4o3S*Ǹ]>u/=|OA{y~9ABOk~K)i5WA2M$C`X# /A&}L~nqzaO-I<][d߯LMRi/@ iA奃XLPo3~V3s](- V;i#@csfco^J4Hm%yݽ3QpC *bycӬ[K}#K<n}`KfuH4"UCu T$k XtșR A2L D~4yn)mCc,p1ePi0tLXRpxɛbCYhfX!ͦ27B?Yyh{yq98 ` g d<7rیRG\їJ؜mSf- d"_Ut'lU=crݾA@!,(D¹OfZD]DkQlKO#rB:&`-\k׌D\qn*REx" [>l}LDuFv8T,d?< b=.&EKsIy@Y{4vY+?ڵ:jBaɠILZdF ! Np6ԡt5`MLXd,ʸsl}Ɩ~]>؇.a"Skǔ~-e8IeğE!*pSE\zan2ʏb֚<'+"o^pz!?x5'ɉs Aī @.q4ߍCӡYet-oԻ*KwU7F}}el%Z ,Q~KV ZPss|1cڎ+O! ^W X̬UQewB>lƽV* {W (S6ag+5|x.RFH ^ꐺNllmc0~4^bGݢ ʥJX7AtQ}0?kZzH 8i"pS1$@vkX@2la֫iNkL{Pbr@Tmxi !d~P%8SիPVn2NqsBYs6jf*p+ kvĵ42CrK|+ȳu#z`Ah}؇xE6c7kX4+#stsT3eQj/ NU~`UZh-@v)/KtA:aVp|@h7x}s*$'ů2`Mސq6id]t,(u940b_RV/'^'2"X#ڡ?z24/oI͏)sjƅQ8(ѲbL7d)rwpQ%cWJStr0rM[!+GlV=׽Y:!Ôǚ+)4N R2WTڏ%o#!C+{' HцV+vލWS [ tWo[~A >ޛ`;RRSAQK{A! {H*RgB:]tώ /F'$fBaHr@ä)ai+fTs::z2G?;ql @1N$ :`4g'j)ƅOтfZVLӴ,6r> uO.oVp^oL6{h *yņ -!Rx.Kezu|uLS.OLR;!*dJ<:V\& qX,l( |ң5_/UafhQSB"fe~J2&OCXt1@)eڛԊU![3)1n HwŜOĒtZ`B$uvKʌGsZ>AĢXh  EF:IfY` b#w,@Hr&27vLKYn@WhXG+Ni՚{Ҵcx[w%p-2}#.}}.oIuS1 ٷ.Jl/sG`\.uݳlWuɢˬ^Pqy6vm(Ҽ/Uo3t ^7fM:&kh֖p GN'QOrd}w9~.Њо^U)G_WPnտϞ⃗zD Y%$O/߹c' W/U"Xm~P.Q w>co%N! 7Mlp.f\1~?ı=;_I9o<U䌾DUg4$#Qo4xY$吤-"^-D8|25CqtV`F8?J akܙ#Y7Ĝ,)@QQsG3eY@xq?eʠn)Ci@z msF O4&0U@p32Vf`d _o$?]6cJF$+9XyuX d$`(Rp'5Ym*Cm``S F2ԧ8\saVl5@V褥W:Hr] m]]y$KJšP}Di)d};})q tyՑiY/7, -FÛPBzfgfuMyGD}-B(h(M==gT\vO@QVs1 ԉkoI$A>eΔ>81%JɶXQˬH`m 4r֊*2<-[EHSdәAEe؉Q2b/P\B0S"T1,{"~A)`3 {OV# \ L߫З[LzA/w;e KOVt](t"taA_'x7wg,ާ2l@ 20,̅V;"}9d9Mda =xT4{6@/`M/7)K˲(ဇ0z>_5~yDLhRSvG:%1qT12X(-כ0sYr =VD,͍Etz|swY43>*P,i}R[V 20dҹcӊ&CycSҕujZr*ެEMw+6 PHi|nalMBi(D,$ ~?g4 p(;hNkMcY}y161#A#_( r!;kGΚ; ۾cjYkc~1S?h` )ZuL\,[]S 8iս0$W!> nc $9V#z'OՌ<8A bP;{ ffv>pϊ0 㻨؍0H|WgHkqܓc,| 7-ta"bp+ Ŋ~޲y[x˩.w.UXsޞ`|7VF ^4+K\/3RuID#WpmD5H5b mApP2TOʧ~7 pj3']``hG?3xf >$eAɺSR4 c+u6t߈0DpKeA&'B4#F<Ǩ_+ϑqzBEdDs椓ȩƚgFPyrz$C/(4~+|5&qrb|BI޷nMp͗gM7H~2RN .ɱ¿dy\ \')Tw](Fڀ-6le03oN1˛`c͝MOs8p}i3j[S- ua@U-%Dԭ^PN M @ML47aA jL\LnBNČ/P˝[o wt8d">6TxJvw,Ot6uyVJ F`(n7b!ƒ$Y1Fr;h C,4$iǝ \!;/*gZ`,'"#%ћ]$יTxvnU7 Ha ۃ&1Q쯯9EG.k,eظMx>rF->$O\& ս/mxŀk״r*p cL7=%}}zFqpg4vkh]+T\b;ε0 2 r91AP|rSKI}ǥC&Nzi6pOT[gJBЀu[y2WD=7BLS8U[ZZz- ,Rxe`# 6Voލx1W3JϨz؊nCCS=QgRapϐ|DkI5O% k˷_T,1Dod vuxqЬ;DTPLm.0MBXFW~{ 7^2u[ёF6 KV47;A>#W %Zu[}LOa$3Up(5Zlat̡E]BÌмi^N7u$/%۪Q]'uoY' ){F=2&#Y+[QV7T*i!%G0`LxLeJ$m0B=IZ*fz7:aYBQgkѯ6lp75.Q͘aPC}h@-&i[pR'*\ayaw'bCm$,E˦\ȋZ"SXN 2\ pg`Y$~e<Z `+,}4;o-0aIv1Ti8VAσ̮8vب۳։w87Y4!`n} Z]={M$4I& 7#_qE1mC)f uq;*OM卐7,PY%֩n.v9vZmѯd wW>2&_qŲɂPi%L!-w59Eڙ`zbw"a6p̷~$+ϟyָ/U'r4{+%(z\ɒb+[gj1KtOdYc CM'<|ވaġkM@#'1cC-԰[蕷ID&KT@qn$=y\-;9 .s ޢ}&C6E4aSF${MygQ^w@[_{&e]CgjoP{aD'G- -,nf_]b(Xfq%fGR@_RHZgd+}@IV ^r~B\CJܿ ,(e/( ٴCP7)&1.nW S|Lo ,\標Ѹ^c>KqldZLi ~E/M C#~mc^L-tܲnUzJ{[H%l:X/,8H$b$1$p>rEP^L9:ɹ6g3ovP2R_3R :~9`\~]pl%m\'73UD`B±GNt)JHm<<BՕ??+BP ̘LRS_ݩTM/l@ }'g冏@$ZD:6E:t=.~U}xV Y>}qfc;=*~or,Q7w $Zf.t+.s!+y,;@ZBr!;l< vBe|oU{SX|T~ݝG~?-:~ Ǧݎj Ej% ggu8apIʎ(#v^5p&BsUGV;Vl^|KHxj$Upee;,SϢ12u yK]J?m+bI%4鉴Mc; ~__ Tw0 %x *KW sU{ok?:Z7Z#_KYM> ;=P>nHDW/g2b'8R`b߼*Q>$4J^LN0z^`Uښ<'dHڅVf ʘrɧY2BѲ>6@8jQЩbG F}#T暥J=n|UJtx=! nBi RRPΘJO xS;Ftн=_BB+'?RaQFt-8k~+/O F_+H!"Vf,kA]zbv!0=CSBA<'9_fSN.@ZQ"xL3.`pz%uoĿ 7Ie<]zvMΓ^qHYPMHע3c?βj]&?ցZ&8m~[]Kn~· I.aq捷 ytQ':^FZ%mmS'Qȴ$)7i69څ\$WʓDV- 3 >/ úeK&L$wռQN_` wUbr.l2폌a$)pܬbfc-XSpx1r (ySTk6Zĵ{WJ[s s V68(&bS5n}J `k){/U r3"IYS"'Lx =ςD qb:|Ԃ cW^:_du>YHIcUꗹWv_ܾ (BW$B9n`׬ hCEtd1-p>+nάXZ<SKJPuwD&f9^iUXt2W^ꜽt.ek žAيuf;[[+7d(vsgMJ1ԑa 07*L J8[kʩoW݀QVTa2YYrBŲ\ IVpwN˖Kw̶RhzC$CYZ-ܦWyK"n";@wFH/oݿXΧ'"``^#/alq:L|.w?¼$@*eN^_An~fؓ]g^~HlNV\fH= g<68ںtzkshv Q+dsb "YAORc"W=_U"߮h&\u XcuȼeHrh%Bu/NRg|z k(m$ 0ܩj,c^ۙ't^6UWvӑ7R`*n0 ]ּIUntCKTDz 0txPܑR`գGF% k(pX tgO0qO|gGs&H< sfۃN܇?[؟A!/}u9S`ObOՠC5HÁl #=7V~w"iJ+QƜ>v'cVPSe rICV2&I ^h'WޣxJo+*B6q;$ts!vxM*ə~?prgJ;݄bxfj`A\05(ߛM/7˲Jhnp)dnI$w"R@"T_m,>hjM|i3xpzt`rX-Kd>fjy`]Tݹ3Keuܶ8B4yxp pHXdOkM*n0XMH 栬97m)0G+o풾0<h8r@Յm8L0v]E=D=3dqܦnkAgg{^ bk@~@ +RʥRHH wێT`>OcyLw܈Pgk@#ps73 e(2xW9}q[5mES’)gXY'wHŒf#aw2uY%&hm dBE@b Ɲ;(fm 5AT ^.Z&. E3eWYU< K.\)KqɀS !uMɲHQB则w]7~ e jQ橀z Zf&S&ko h`'I,f/%mLwU[KAYJ= lr\4M"6}@Z -^$ Mx4;2_=NVu" ..EG$^W:VXvw=Y5Q8!gKVȲ&KC.kL `^l䠄z+dPTS0B VFr f~ŬxZ.R%f4=Wc]WmJ?Qy(5ziN&͌E0ORǴ0 ? pDZv*♇tKEʥ AG?oƗSJa/9k^>Zjnϡ4(te<"UkT(J ݸdۻ#\ h %f OnN>W< V6ub w$rTha!pz[M{xYm7Ƃ̍ӱM'SX\sXE$@ y.e@d[ /eT>/a,5fa+e*lȮ  d3Ks6Z&M {XRcm~ c< sO?ܕp|ȴpŞ $/:We1~@h";`J|{dkvoRx lLƲ (Qe{F&lcY>d~9exw(LJ8Pn8ё#ydI@ܒ+ʧ^-B8n<&5ۈwGz31v ;jLQbh-t߭"9nFvrnrs0I03m#κTmD,>pJ1X|SnzsIx2pui Á|@?,H\BئPN3t^fPkֳZ6SL3<)|pxثlI?gyc!s.zlkDƑ8Lq過!O$]llVLa~Ǝ;qQ^:RR3f=,Wn+ 4DJܚKH+ˉwW<ʼn+R+%MxbRT! =PlzO#yn.{r"jv=w/j-u]{ҁ6>F*0 tI_'cB05* j0tQ3wr|LZ}& .*Ǝ6Ru8ɣD 8="ueBWgQ D3z5zw~(z>" [-n$ աҗzh^d!WGY$j~=xW>X4} t #%_~%EL_V^m*e +YsQ7 cNhh=m(@PcAH 缴ghšO[R}`O.z;Fέ xW6Ci5U .2C79m\5=П1'k9R&Ss-.#mxsFa}=5&S {֕GG'Իw -_*Nx~~Vѱ:{> L eRb8p&<2%Ro( H|?@:^͆'}rq-㤷C &A>3чkk&BSKҐ5lyvVToS`gMKxXaaM76:zH=k싥lUFэ8薩<;JRVEԙ.F޼ZZBdHQnt>oboykVF5Z;WZHǨhvW!4tK<@+2e?H9kvG`ܦ&'};6(z!0S7mV\єfL@? 3r&cNp:~@WNi}%-)EC, ? zNNS؏Hn1O qURsq%hLFl=ջ]R+bQ_G3-:!_`v0gݟ㙃61P݀+ )($z,0%FITkxS K%.`ܐi. SD&< B~?w=u-t=#Vk).ξWkvEZ߅xA:9`*u;e%vcm3n"A=Xp~k5Lo\+1՛S54xp8 %+gEt]<8Pe5% ,&uCUpIǏXP9``\JR'=LFÂ5 Iǽ笈_% 2.\qr*8bU} @a>0 YZeffects/data/Prestige.rda0000644000176200001440000000526412654134353015071 0ustar liggesusersWklW?k{mo<ݵqgL'MRٻΌgf6UhSB ?T*PT$(/*DxTD- *D B{fgn!u{=9;cMᏱ&[ZX'[bVH7dYGco'y嘚}9Qw׆Gԉ7JQ*7j uwLMc?%YunpNu֏_I 5|~*5wgݬQ'^]Cx7>Axc~v4S>}$}pv>|IefVg,X?2Lve\˒O6wح+Ye¡2~[g{j4JcTӄ?J|2m86;HǤ}_|)๝xou[K&;DS3M8if=}&xQRT197?| cbliI2_`, vm3ؤY{9$16ݶ?3*cӰCnck3v{04c265ð' 17KOG\b%H k8? n.pm,؃Ћk!߇8փh1Nu؇N>ؖk}uKi>z+ݏA6R>D7#}sG2cc9>#$te) C$rfoTo8ANJtfx}Z򱞖8{~F窋tԏ&e?fzs4n"+㥾$MS9tE?JyCq gDqRWNp мEF8ge]&d_%ԫ ZꡙGVW}}E|; [996h= bShLvELz{٢|W>?&=)TOewB2&` !|{\wZ+uˠpx.ח(x%}r3G89h|Ӿ9pJ~iw="ǻ gwH8 {}iA⽏!}@|N8g:LB<Ƃ^ÄO"y~gGȟ }נJ OO>Ђ-O{;nmǽlL@f!?Ý !'㘋8688]q7{~xHB!`&B>1 p'`tbIHb?I'n I|g]?66Agx؆^2;=2@*=))45҈cI]%y'dw gM23*~Ɔaj`ÿ;3"͔ʺ)+655Jd[!izUS[O.}h$ImEr@{azɅx9|ݸKVRv,O'N-~`)TW5-]\:JCg6ݻp~AY> :ΝD+ZU˺=qi&QpɋfUL^+VnPеpuVe@]wu-a4Z]ӣUPYM'|٨p+NU̇{;R,ynYg*O8XEC,ӊy@۵4񙵜и,C9ѹh6V-{e+ #d!3WL+g}jv2w,qz# \ܺusMX6J8p-R(fqEIzLU`ǡ1C[cKaZ`ażZ.sv[LEmn*^_`R> U-se{SĿ^)Ѐ9f'r#@#Fʺj+Suf2} qj߻Pg]̵..0tŋIɃ%\o*awwV=sr+_,eLMd5;n m YyqŕnmJ˕ ϖ~5Wr2N\ǢSc}cF7:M8nu&4Uy3I|/Lb  <d[\i&. "P=L=U*UDd-̨9ׯP:wRʪ -|a+]Ί/wDvw(ԫ3.뎺E_l``b? HYIUr ʿ{gj|ZM\w:s 5 S*9 G=+7캺>|k}ض>%̎>yW1Fi*MĮ\>dRΙ^/1z6GN$d 96큑o]u`=YRinX5f6.keKQUʤ%y<ͤZWn0]Ic&p*uH$. ?#߂` >]˿`D"4+7U) "3Rw -te7Q>Kr6 V sU({dmjǻl{ HAoB:F ~ -@ldhKXMK5nJ7s􏦥KIetHn##,փhIոΖwMfkdRо;k=r~-֥eNZQ<}r:?YžM2MD7J[/M303-Knd_3 -5o4.ibz!cO%zYnĞ1?H?@I7sV0ȯLc^gqOo*=E:qdj((%_Hx`%;$W,*Q}Sm?[%2J2{-@s`9) Rʏ%#()@Ʉ < I 2Z(]0Ct[=4`5/JQZGaɻ0<2eϬDľcNơ^`M@; 2>&CK#-aA;-ΤpM{iG[[ưv2m銈({܆e׉KoLX*~W2#Q>/-d@tY$( nP@7SS>.eQQR(T1a ]vÜ]{#buT6.c:~Î[ #Azyx:Ђভ$ipD>$qx`v%LښWvŋ]$Q!oCQkZ8u8uWc287vxRkPĥ(9kkRћxcx $jd-kA_qRCX*:xq"( *M C]0&^Aobn;XCz<]霮tu[Y܁峓5VGe7HAA#SM@G*}g}Yrh2L| yk~s}@5(}CX#y"R".yGĻ0gEBgS xCIby)ӬRx G$ADgI17RI}S[s0b-30gK- Mg-9Z JTktx(nm !ဨ3]ߤ!+Ts<48Bvl&*u&{}Cd>U ͨlJ{gqX`_--;^}Sq6% 4@;\'Wo_PQjox?Tg찤Nb} W'Ti[ࢨyh\ q\,2%pFhBM?0rTR \6c`F7lW;Q4sŵn @j@-T\, bj {jy)4:` D$ps)5Hޟp@+DͤEiەRə3 ^]"o׉I 0}JDQ8wIX1hQ2R(b́~W_k$=MhFNm*J=5[V '>gpCvyI0VӉ,>ĿIufLjE 00ODk@|,zEU1d, ϗ53Oo0!ωVf4z`TK?{!u-O% wSdJbʢmxytXzygל6!v3{IK`dAXP GgE~ GMz_vU|^Q6ZGk#Wn DY97Gwy*FL}((560&qӭhqTaXt:Ek>:pil,v2q@%}&ǜl*N=Mܜ'k:S7qKiU Q*ׅ\>o33:_%%cTe$Ek)O1:ЂS,9QGüYSʝڧ]' X\g8( c;fwU?u::.ru\eX8k̈́JYs;>eOiL:ލiHb{fwϬ8UYyzqSoŝ$r3!,n(7@:ŕc u×?UOO9GQ#&l4- 5E(& dQD[COPA骓L@H 5ѧ}#<G擲b;+ %3J<N)1Ż[fKx݈R:,VD zפBrmca:ȍ]œA!4|ˢh0@i) $ {l >nHtƞ_IlA2mc=sz}3/5UTx ReR-@&I{5zzE.E߸z,L>`xJe3o0q.&ğgDzb4{0ctBn| lm3 DmRwƓ2~ݬx⏻qIy]!N6 Ї `=<ٞFR%T)R WkW0H;YȂs?ʍu}+})\VtѦk%~V]1a<ۧkvJ:W|<2lQaTfc-&PrA|3 R+I| ׉uv*$ARQ [C8h(ubose&u:`>?1/H2#, ,~a sȶbax>A( )y%E޾w$LIm CU#v&}qg6~WqN@CIr.*b --Ñ ] A0 װ^?@x7YzdNh# X[͔\Z=+tC{f6]*JRNu=-y(3_2,5H'G #m$=Yi*+/3_kv3De] yx%>ae>)K(8Ď@7IxUG_}!0@MhRֻa5 Wפg E&tɨfa7)RW'P v=ʗԚSᾳDo<΂NÌבjz@sS*Hjg q&xUNx0,=bAg `G^^Tױ9JtR?X~Ҽ3L}mBw ^LUa=IW̠iXըG{e{[wu8]1>]2Lc {\v\L$஻ͦ(h [葨EREH+z1RRFr>ngZg@u١&‡@[G}DH(>KğH-<\l};*h80[Ȁ͟7lB|3 h[g+yk>r[ j@(': ooǠ~I8~$8(gMavXUt^&iAa?q R)X[_ ,u)N.)LnIOԭw|#1}rͱvNh ,ڹ3v+*|(E6+/4?c5Ec7H~~]2RaU`n^BgYS0Gl^j*ow'o|]p;0c;7'4azQ*φ/ 1k4duF>;3R Ty9ZsS/+"\W4pX\Z9xyƛ;{~@iL0w-fқEv+7X9bg?U0O.=Kg䃁DKs xhUA]9@ f|Ȱ7G%FjʆÙNdIjBvxE<|0/^u&bCՙrU*=$Ws·LQ#9 3O"} x~+'jW~G XlqI gnD (OSOyN`c{/y5bpynDG=|zS^MݢR8u07Q%4ƞڹsZ|*@c}(K@hs9>{Jv>u)(rΘ9%Oc#gVXn\~Z2[iY/ \^6AG?sKܣ?իDW.8b<͂@ͨ 0В撆pٴW`/iIFx9us=Mpgx ں~/_6Q-`\ԭkdSi}rU_&/p*0qW3;wq/VQO(]B<]gܽ\x}19ɨ iWRGJI(Kި_FG2HQs$/5- ɪ{0n8eD~_5lvR$t Ŝ YAv@805k9K˾iK.\'9a0KFkQ}PU_Ys,@x) Z2ֹ~u~,ў{^iIO }Fl hCB~ *\6}P͛wf@'+#;Mދ5_.bUmO Oo :7@36̡זv+^1̔QGDPx^B"Y* ^tQ68}7YCGmwL yɎr~1^, ?Nӭg|#6D|Ǫٙ[޲^3TD#-%u/N涇WNe#}V(z0a"alS< aR4ȒG}KVfC1-Fěp 9#[KïM 08XR/ju *ʽ{Q. l) v\7";Ce 4O;v35JpQMq0C0^^9.WΔka]ϵ.L7"= a1eL P6NHS3q!E}}QHJGjmbOR ]62NC\EpKYw:ݘ4< 8i 7Qfsco>QIӜZ4n2|n1E)QHwYv,S'ptS~s'5Ǖ3lpC3e 1jUR:6X|1U,&i/_,U:X}c+Yk0Z 7*T7D?r0 xGoW1`9 BeB= nu9Jq!euYApx"8[^G? և@{KD?k_ *FJH͏h/€rP%m}5*!`+D"QpOʩ( 3U)Ҽۙ3VӳʹӗV 2<3Ң 6uH͐6!eFuuڀY2ڳew3y47#R"ZW?Ba8 ߱񀒤vQ',{#cE0MmaXT`2~~SsA zh p2<4!\% SRn = ,"WW 0PE.W(^=,er&)+"l>HVzᕵ`+ )!$y,Q't;}XN}; NeD![_ ӅVU4z!V:5DT/2Z=^#ښb :X``K0ٟM>M7#CZ︊~1lx+dqkПh]c5J{ iTۨirM*Y #&~#[ʑIL7#YĘsDpbäx!ڨ @1KXzӤ򎫗473qQ:XsupsSo}V I4U=f8"CB>Dr(bQgDyneg- jz-wr6 ;%}Ļp%3Jd ׵'َ+dLM5M"kuYKDerC^YNjL > $j}.>O14g, a=M8k68<Tp8Y@`Ry5tIݓkY#'_"8:s_ql;E<[F7>*Z$ҍ/ȲS-_шhR%_F+Тɺ\~X+&6r_ z#bŋcz^F*cnsqWj@?T$o2wmuݻu!m 0 ,;C/t.Y9*(.q\ĥȲ;?`-A9}vK*LlAP1f NUQh 7ΰۜ  0%\0u:V<]=j/9ÎKEU`qz(t8햂@0}Tҥc,lƄx%!ߣbj+P*Ȃ1 9zpTOVUVH2vِ?s1lBkI.z XT?%> cf&! a]U9%DF}y'g7R^8xKj&|/E֪>/ML$€P$2^."9ңcOdҒC!7=`mXL+:]KW1yp" =)Q^HNr ;Q?"KdL6I}drsM"$-bO@ʻ/6H_4.G|V "^oqO.t|gM7&e#ᦏvmlzҾj*n2lxYhZEx"-`1?@{>ӏ㕟 Q\%N=,>O,1I6푕%<;?#5y7;(|46RDU =ב a.FMoRk!.ek='b&) pI_[/ڻhREG{D8(aӭiGٍfQDs>0 YZeffects/data/Hartnagel.rda0000644000176200001440000000213212654134353015203 0ustar liggesusers]lSe߶ ^hflb#0܂9k׮}vMDPDL1zCx K!$t\x “<>_=YNbU+))UŏGU :RP/3~j@=hTz &$x ow. {`/8>C08>G8> p| Nn)Uۢ* g g"@ OV m` Tj}`hA+@{=DAUr L_ + }xb9=Ӕnp\x< ~kYKܒd{/㾓g{._ӝ"S+yNJޤ9ř⿆/^W3*~:o'c<Go-tc/=./2Y]}迤s^0i繜'Ħ-.tE%"l O?xas:jB];TYgD՛ @PѧK+NA꺢&Vca<2""vЯ+gO&è_^샼?u{,qxGô∷Ds'et5%WWT"D `,v_R47ݱT([O>@!F͑Ң_oAsR[4@[ ֿk*6Gg_+:m̽RB:[ P5@ -@_xI\>&+*p.3qn&(Jr=ڝ5&ϵ=6͇dtWQ*伆_k̥0su'Sw_G'gJqB:߂`i6W.u})T!dL,~})<4I}` f AĎ7plqhŴ nW.u,='VQynA)A_Bcx>&fsEyd`2࠷"MXd~WDAS{JEhqx DaGm4I[^Js!C- j U$lo]nl0 fφ\k6S]6YO]:*Zհ&l |%(a/0N;׿FcmIVVXuIr&<6:˥g>BR HfnE]' ^77YNpgF -DIӑ}WΎ3@v#)I(S_zsjDUzՎ?Kf&LI]ŗa."Sx‡*gႶ| f 1(U{͈؎&i!Pc(JO g┸ SH>Wf>+GO d<[6", )M>53&$ $HsnDf7 $F_3{5"^]>6g8&Q]\.8g}_ bv&D)ML/t{P\Vn=Lcuc- +,Hמf[12" npZ-./L;zڝv?:oB(v}\/W$h>D6u^tGy:jUgT,Q=e)S<9wY<g/6t3Sh>r+.^χĪ\VV4TaA'۝ތb*e-I%tEĮ+یn?'a[oo'7|"ѩZo#tYT}MS"$jhsڅlxO:S1+oā܍J#{lˢS 9Uyh[cL''TA&V_uK~?LǶHNq̙(F.mXQ>M,N{ eo:IH}Zat z n6HZ 'yFŽΔDmF*F|AgC VeK砛.0張ծ3YѻJá9J%vW3?۱ƚkH=9)|#V9-c;4@jo4Jfd{”t*9_Dq,5tGZ.K;GH G+o6v |`1q+LԦ#Ed*|:h?e2Cz/O8'}V^o@";@yy9 vqLŵ*P58 ' d63A:Q8_~𶚂P;%pPpр}Va8Kdy~ExఫV୛yP֠%?v͙g4b~%#ӎ6O޾&b$D%Gl$` <~W@ퟗǝ0C-/oF>j2[CIֳ2P.^ld{@5\Q.]cC9ff_PI6rd^<m,#@6u$bXv|k蟕7] f=369=MܓW['[WAlsKUW+KstRv6}0{QhLifzy{<%gV,Owy!RI|3W605NXQ2dndD~`EWfJ`;guַ9F2ެqpѻBFSq|Z 7Fi g$i ?qF1_#2}aiT -" =Q 917L~#h2nL+]a;ؠ7V$xNq&YvjW#[}#Ԛb&x,W -&.Yˋngz; )wGFZ^AWQ=D_G&SY; 5GK ڑVU|R@YY)M t7g*|!@.,4XUx#|@@Y3] 8'~&nF~B\&HMƒ5C=WTKZXs^m0u} e7eZ,`(Ot2шv$պ[</FlUt > Xu*D9q>g"ݣjcpoҨRo4Bq)&&ݸbc wfMS>u;/ R,묥,vE r%;yfWW"˂ Q*y, Nүbo`Yg5:J5_$ѿ/3}47nS"JfNFyB>롪ppBsYGeH]1"E %E[Yv:W~wA <-ܵ ̂k]/ !U9Xfev\I, ‰bwO>}}NsI]ZQZ\Bh"qlR r-!u]X3)TT+p#})ʍ/I'c Wi*(cWaE4b[D9UWg3 [ >.T/T :180,%Z-jߢN 3Ac/OݥڽFH/:p6y*EkH'-ĝpK뇣g]Bt܀Эԩ I՞so_lvvDJySt̊JB p B~hjR_~' !¹1B`77\3qCaژcJ$[pbˇsT30LD~x~¥M ((@Qwipaf8P4S`㞅j8AIHź6!AR J3(VIgDvTnM-eibޅoKM!oN/a& a-#oYq`I^fnz[T(ڲ:g1t0HAU[D a!B C_ٖE8YQTqV&72>pʵA#@@ }sf CiK1CiRߜsVl,wa篝 $&୉{hJg] H?Ȯ{ si"A^ڱ~F:I/}Bk@;;=£B?zkº$>ү'=ELUEXH2\#$ſT5K(n &Sbyv+WMלPo>[DWuC5}kyF]DeR 3c,X"J[.|QiF`6i!hіAm/m Ε֠M$E=Nق%->n2c{ u #VʆlkdMNAmf8)KlhlC5 e\w-:dpQKZQmW +bx{8M w}#qKܶz5LE5`(DMھik3u #ʑ`MWQYJgzL#ͯ +܋*CI?*@ϼ;.+_k:('rxRwM<2l8҄c>הeשy4ωb:-EZd ZTfAF.jFfhij\p(~G[Z_S/ی80(/M# ] H51n+-1Bܗܩ!_( 8[sIn; JΗVWcjRue͠L21jZkqoB恙=?`)?&<#н\d}%qID,;m+,7IRj!{` kx޹h7߃=?l%"#~}UN]Ȣ POZ{I+:/.R3ny@x3ۻəh*]ױxou^cS 4mJj#V b3e=?l;ig̡-h9 9" 6=>0 YZeffects/R/0000755000176200001440000000000012654133545012102 5ustar liggesuserseffects/R/effectspoLCA.R0000644000176200001440000000360612415772752014534 0ustar liggesusers# 2013-07-31: extend effects to poLCA objects. S. Weisberg # 2013-10-15: removed effect.poLCA. J. Fox #The next two functions should be exported to the namespace allEffects.poLCA <- function(mod, ...){ allEffects(poLCA.to.fake(mod), ...) } Effect.poLCA <- function(focal.predictors, mod, ...) { result <- Effect(focal.predictors, poLCA.to.fake(mod), ...) result$formula <- as.formula(formula(mod)) result } # this function makes a 'fake' multinom object or 'glm' object so # effect.mulitnom or effect.glm can be used. # effect.multinom requires at least 3 classes, so if classes=2 use # effect.glm poLCA.to.fake <- function(mod) { dta <- eval(mod$call$data) form <- as.formula(eval(mod$call$formula)) # find the missing data: omit <- attr(model.frame(form, dta), "na.action") if(length(omit) == 0) dta$.class <- factor(mod$predclass) else{ dta$.class <- rep(NA, dim(dta)[1]) dta$.class[-omit] <- mod$predclass dta$.class <- factor(dta$.class) } # end of missing data correction formula1 <- update(form, .class ~ .) if(length(mod$P) == 2L){ mod1 <- glm(formula1, family=binomial, data=dta) mod1$call$data <- dta mod1$call$formula <- formula1 mod1$coef <- mod$coeff[, 1] mod1$vcov <- mod$coeff.V class(mod1) <- c("fakeglm", class(mod1)) } else { mod1 <- multinom(formula1, dta, Hess=TRUE, trace=FALSE, maxit=1) mod1$call$data <- dta mod1$call$formula <- formula1 mod1$coeff <- mod$coeff mod1$coeff.V <- mod$coeff.V class(mod1) <- c("fakemultinom", class(mod1)) } mod1 } coef.fakemultinom <- function(mod){ coef <- t(mod$coeff) dimnames(coef) <- list(mod$lab[-1L], mod$vcoefnames) coef } vcov.fakemultinom <- function(mod){mod$coeff.V} effects/R/effectsclmm.R0000644000176200001440000000416012536037056014515 0ustar liggesusers# 2014-12-11 Effects plots for ordinal and ordinal mixed models from the 'ordinal' package # 2014-12-11 effect.clm built from effect.mer as modified 2014-12-07, by S. Weisberg # 2015-06-10: requireNamespace("MASS") rather than require("MASS) clm2.to.polr <- function(mod) { if (requireNamespace("MASS", quietly=TRUE)){ polr <- MASS::polr } else stop("The MASS package is needed for this function") cl <- mod$call present <- match(c("scale", "nominal", "link", "threshold"), names(cl), 0L) if(any(present != 0)) { if(present[3] != 0){if(cl$link != "logistic") stop("'link' must be 'logisitic' for use with effects")} if(present[4] != 0){if(cl$threshold != "flexible") stop("'threshold' must be 'flexible' for use with effects")} if(present[1] != 0){if(!is.null(cl$scale)) stop("'scale' must be NULL for use with effects")} if(present[2] != 0){if(!is.null(cl$nominal)) stop("'nominal' must be NULL for use with effects")} } if(is.null(mod$Hessian)){ message("\nRe-fitting to get Hessian\n") mod <- update(mod, Hess=TRUE) } cl$formula <- cl$location cl$method <- cl$link m <- match(c("formula", "data", "subset","weights", "na.action", "contrasts", "method"), names(cl), 0L) cl <- cl[c(1L, m)] cl$start <- c(mod$beta, mod$Theta) cl[[1L]] <- as.name("polr") mod2 <- eval(cl) mod2$coefficients <- mod$beta # get vcov numTheta <- length(mod$Theta) numBeta <- length(mod$beta) or <- c( (numTheta+1):(numTheta + numBeta), 1:(numTheta)) mod2$vcov <- as.matrix(vcov(mod)[or, or]) class(mod2) <- c("fakeclm2", class(mod2)) mod2 } #method for 'fakeglm' objects. Do not export vcov.fakeclm2 <- function(object, ...) object$vcov #The next thre functions should be exported effect.clm2 <- function(term, mod, ...) { effect(term, clm2.to.polr(mod)) } allEffects.clm2 <- function(mod, ...){ allEffects(clm2.to.polr(mod), ...) } Effect.clm2 <- function(focal.predictors, mod, ...){ Effect(focal.predictors, clm2.to.polr(mod)) } effects/R/effectsmer.R0000644000176200001440000001606712647513565014370 0ustar liggesusers# effect.mer and effect.lme built from effect.lm by S. Weisberg 29 June 2011 # last modified 2012-03-08 to require() lme4 or nlme. J. Fox # 2012-10-05 effect.lme didn't work with 'weights', now corrected. S. Weisberg # 2013-03-05: introduced merMod methods for development version of lme4. J. Fox # 2013-04-06: added support for lme4.0, J. Fox # 2013-07-30: added 'data' argument to lme.to.glm and mer.to.glm to allow # calling effect from within a subroutine. # 2013-09-25: removed the 'data' argument as it make the functions fail with # logs, splines and polynomials # 2014-09-24: added option for KR cov matrix to mer.to.glm(). J. Fox # 2014-12-07: don't assume that pbkrtest is installed. J. Fox # 2014-12-20: mer.to.glm failed for negative.binomial() because the link has an argument # that was handled incorrectly by the family.glmResp function. This function is no longer # used by mer.to.glm. The same error will recur in any link with an argument. # 2015-06-10: requireNamespace("pbkrtest") rather than require("pbkrtest) # 2015-07-02: fixed bug when the name of the data frame was the name of a function (e.g., sort, or lm) # 2015-12-13: make it work with pbkrtest 0.4-3. J. Fox # 2016-01-07: modified 'fixmod' to allow "||" in variance formulae # 2016-01-19: Fixed bug in glm.to.mer when 'poly' is used in a model. # the function lm.wfit fit gets the hessian wrong for mer's. Get the variance # from the vcov method applied to the mer object. fixmod <- function (term) { if (!("|" %in% all.names(term)) && !("||" %in% all.names(term))) return(term) if ((is.call(term) && term[[1]] == as.name("|")) || (is.call(term) && term[[1]] == as.name("||"))) return(NULL) if (length(term) == 2) { nb <- fixmod(term[[2]]) if (is.null(nb)) return(NULL) term[[2]] <- nb return(term) } nb2 <- fixmod(term[[2]]) nb3 <- fixmod(term[[3]]) if (is.null(nb2)) return(nb3) if (is.null(nb3)) return(nb2) term[[2]] <- nb2 term[[3]] <- nb3 term } # lme.to.glm evaluates a 'glm' model that is as similar to a given 'lme' # model, in the same pattern as mer.to.glm. This could be speeded up # slightly by using 'lm' rather than 'glm' but I use 'glm' to parallel # mer.to.glm more closely. The differences are: (1) match fewer args # in the call; (2) different def of mod2$coefficients; no other # changes # The argument 'data' to lme.to.glm and to mer.to.glm copies the data # from the object into the local environment and makes it visible when 'effect' # is called from within another function. lme.to.glm <- function(mod) { cl <- mod$call cl$formula <- cl$fixed m <- match(c("formula", "data", "subset", "na.action", "contrasts"), names(cl), 0L) cl <- cl[c(1L, m)] cl[[1L]] <- as.name("glm") mod2 <- eval(cl) pw <- attr(mod$modelStruct$varStruct, "weights") if(!is.null(pw)) mod2$prior.weights <- pw mod2$coefficients <- mod$coefficients$fixed mod2$vcov <- as.matrix(vcov(mod)) mod2$linear.predictors <- model.matrix(mod2) %*% mod2$coefficients mod2$fitted.values <- mod2$family$linkinv(mod2$linear.predictors) mod2$weights <- as.vector(with(mod2, prior.weights * (family$mu.eta(linear.predictors)^2 / family$variance(fitted.values)))) mod2$residuals <- with(mod2, prior.weights * (y - fitted.values)/weights ) class(mod2) <- c("fakeglm", class(mod2)) mod2 } # mer.to.glm evaluates a 'glm' model that is as similar to a given 'mer' # model as follows. It is of class c("fakeglm", "glm", "lm") # several items are added to the created objects. Do not export mer.to.glm <- function(mod, KR=FALSE) { if (KR && !requireNamespace("pbkrtest", quietly=TRUE)){ KR <- FALSE warning("pbkrtest is not available, KR set to FALSE") } # object$family$family doesn't work correctly with the negative binomial family because of the # argument in the family function, so the old line # family <- family(mod) # returns an error message for these models. The following kluge fixes this. # If this bug is fixed in lme4, this code may break because it expects resp$family$family # to return "Link Name(arg)" with ONE argument, and so spaces between Name and "(arg)" family1 <- function(object, ...) {UseMethod("family1", object@resp)} family1.lmResp <- function(object, ...) family(object, ...) family1.glmResp <- function(object, ...){ famname <- object@resp$family$family open.paren <- regexpr("\\(", famname) if(open.paren==-1) { name <- famname arg <- list() } else { name <- sub(" ", ".", tolower(substr(famname, 1, -1 + open.paren))) arg <- list(as.numeric(gsub("\\)", "", substr(famname, 1 + open.paren, 100)))) } if(is.null(object@resp$family$initialize)) do.call(name, arg) else object@resp$family } family <- family1(mod) # end link <- family$link family <- family$family cl <- mod@call if(cl[[1]] =="nlmer") stop("effects package does not support 'nlmer' objects") m <- match(c("formula", "family", "data", "weights", "subset", "na.action", "start", "offset", "model", "contrasts"), names(cl), 0L) cl <- cl[c(1L, m)] cl[[1L]] <- as.name("glm") cl$formula <- fixmod(as.formula(cl$formula)) # cl$data <- mod@frame # caused bug with a 'poly' in the formula mod2 <- eval(cl) mod2$coefficients <- lme4::fixef(mod) #mod@fixef mod2$vcov <- if (family == "gaussian" && link == "identity" && KR) as.matrix(pbkrtest::vcovAdj(mod)) else as.matrix(vcov(mod)) mod2$linear.predictors <- model.matrix(mod2) %*% mod2$coefficients mod2$fitted.values <- mod2$family$linkinv(mod2$linear.predictors) mod2$weights <- as.vector(with(mod2, prior.weights * (family$mu.eta(linear.predictors)^2 / family$variance(fitted.values)))) mod2$residuals <- with(mod2, prior.weights * (y - fitted.values)/weights ) class(mod2) <- c("fakeglm", class(mod2)) mod2 } #method for 'fakeglm' objects. Do not export vcov.fakeglm <- function(object, ...) object$vcov #The next six functions should be exported as S3 methods effect.mer <- function(term, mod, vcov.=vcov, KR=FALSE, ...) { result <- effect(term, mer.to.glm(mod, KR=KR), vcov., ...) result$formula <- as.formula(formula(mod)) result } effect.merMod <- function(term, mod, vcov.=vcov, KR=FALSE, ...){ effect.mer(term, mod, vcov.=vcov, KR=KR, ...) } effect.lme <- function(term, mod, ...) { mod1 <- lme.to.glm(mod) result <- effect(term, mod1) result$formula <- as.formula(formula(mod)) result } allEffects.mer <- function(mod, KR=FALSE,...){ allEffects(mer.to.glm(mod,KR=KR), ...) } allEffects.merMod <- function(mod, KR=FALSE,...){ allEffects(mer.to.glm(mod,KR=KR), ...) } allEffects.lme <- function(mod, ...){ allEffects(lme.to.glm(mod), ...) } effects/R/plot.effpoly.R0000644000176200001440000005541112503556754014660 0ustar liggesusers# Plot method for effpoly objects # modified by Michael Friendly: added ci.style="bands" & alpha.band= arg # modified by Michael Friendly: added lwd= argument for llines (was lwd=2) # 2013-11-06: fixed drop dimension when only one focal predictor. John # 2014-10-10: namespace fixes. John # 2014-12-05: made key.args more flexible. John # 2014-03-22: use wide columns by default only when x for legend not set. J. Fox plot.effpoly <- function(x, type=c("probability", "logit"), x.var=which.max(levels), rug=TRUE, xlab, ylab=paste(x$response, " (", type, ")", sep=""), main=paste(effect, "effect plot"), colors, symbols, lines, cex=1.5, lwd=2, factor.names=TRUE, ci.style, band.colors, band.transparency=0.3, style=c("lines", "stacked"), confint=(style == "lines" && !is.null(x$confidence.level)), transform.x=NULL, ticks.x=NULL, xlim=NULL, ylim, rotx=0, alternating=TRUE, roty=0, grid=FALSE, layout, key.args=NULL, row=1, col=1, nrow=1, ncol=1, more=FALSE, use.splines=TRUE, ...){ ci.style <- if(missing(ci.style)) NULL else match.arg(ci.style, c("bars", "lines", "bands", "none")) type <- match.arg(type) style <- match.arg(style) effect.llines <- llines has.se <- !is.null(x$confidence.level) if (confint && !has.se) stop("there are no confidence limits to plot") if (style == "stacked"){ if (type != "probability"){ type <- "probability" warning('type set to "probability" for stacked plot') } if (confint){ confint <- FALSE warning('confint set to FALSE for stacked plot') } } if (missing(colors)){ if (style == "stacked"){ colors <- if (x$model == "multinom") rainbow_hcl(length(x$y.levels)) else sequential_hcl(length(x$y.levels)) } else colors <- palette() } if (missing(band.colors)) band.colors <- colors if (missing(symbols)) symbols <- 1:length(colors) if (missing(lines)) lines <- 1:length(colors) .mod <- function(a, b) ifelse( (d <- a %% b) == 0, b, d) .modc <- function(a) .mod(a, length(colors)) .mods <- function(a) .mod(a, length(symbols)) .modl <- function(a) .mod(a, length(lines)) effect <- paste(sapply(x$variables, "[[", "name"), collapse="*") split <- c(col, row, ncol, nrow) n.predictors <- length(names(x$x)) y.lev <- x$y.lev n.y.lev <- length(y.lev) ylevel.names <- make.names(paste("prob",y.lev)) colnames(x$prob) <- colnames(x$logit) <- colnames(x$lower.logit) <- colnames(x$upper.logit) <- colnames(x$lower.prob) <- colnames(x$upper.prob)<- ylevel.names x.frame <-as.data.frame(x) predictors <- names(x.frame)[1:n.predictors] levels <- if (n.predictors==1) length (x.frame[,predictors]) else sapply(apply(x.frame[, predictors, drop=FALSE], 2, unique), length) if (is.character(x.var)) { which.x <- which(x.var == predictors) if (length(which.x) == 0) stop(paste("x.var = '", x.var, "' is not in the effect.", sep="")) x.var <- which.x } x.vals <- x.frame[, names(x.frame)[x.var]] response <-matrix(0, nrow=nrow(x.frame), ncol=n.y.lev) for (i in 1:length(x$y.lev)){ level <- which(colnames(x$prob)[i] == ylevel.names) response[,i] <- rep(x$y.lev[level], length(response[,i])) } prob <- as.vector(x$prob) logit <- as.vector(x$logit) response <- as.vector(response) if (has.se){ lower.prob <- as.vector(x$lower.prob) upper.prob <- as.vector(x$upper.prob) lower.logit <- as.vector(x$lower.logit) upper.logit <- as.vector(x$upper.logit) } response <- factor(response, levels=y.lev) data <- data.frame(prob, logit) if (has.se) data <- cbind(data, data.frame(lower.prob, upper.prob, lower.logit, upper.logit)) data[[x$response]] <- response for (i in 1:length(predictors)){ data <-cbind(data, x.frame[predictors[i]]) } levs <- levels(x$data[[predictors[x.var]]]) n.predictor.cats <- sapply(data[, predictors[-c(x.var)], drop=FALSE], function(x) length(unique(x))) if (length(n.predictor.cats) == 0) n.predictor.cats <- 1 ci.style <- if(is.null(ci.style)) { if(is.factor(x$data[[predictors[x.var]]])) "bars" else "bands"} else ci.style if( ci.style=="none" ) confint <- FALSE ### no confidence intervals if confint == FALSE or ci.style=="none" if (!confint){ # plot without confidence bands layout <- if (missing(layout)){ lay <- c(prod(n.predictor.cats[-(n.predictors - 1)]), prod(n.predictor.cats[(n.predictors - 1)]), 1) if (lay[1] > 1) lay else lay[c(2, 1, 3)] } else layout if (style == "lines"){ # line plot if (n.y.lev > min(c(length(colors), length(lines), length(symbols)))) warning('Colors, lines and symbols may have been recycled') if (is.factor(x$data[[predictors[x.var]]])){ # x-variable a factor key <- list(title=x$response, cex.title=1, border=TRUE, text=list(as.character(unique(response))), lines=list(col=colors[.modc(1:n.y.lev)], lty=lines[.modl(1:n.y.lev)], lwd=lwd), points=list(pch=symbols[.mods(1:n.y.lev)], col=colors[.modc(1:n.y.lev)]), columns = if ("x" %in% names(key.args)) 1 else find.legend.columns(n.y.lev)) for (k in names(key.args)) key[k] <- key.args[k] result <- xyplot(eval(if (type=="probability") parse(text=if (n.predictors==1) paste("prob ~ as.numeric(", predictors[x.var], ")") else paste("prob ~ as.numeric(", predictors[x.var],") | ", paste(predictors[-x.var], collapse="*"))) else parse(text=if (n.predictors==1) paste("logit ~ as.numeric(", predictors[x.var], ")") else paste("logit ~ as.numeric(", predictors[x.var],") | ", paste(predictors[-x.var], collapse="*")))), strip=function(...) strip.default(..., strip.names=c(factor.names, TRUE)), panel=function(x, y, subscripts, rug, z, x.vals, ...){ if (grid) panel.grid() for (i in 1:n.y.lev){ sub <- z[subscripts] == y.lev[i] good <- !is.na(y[sub]) effect.llines(x[sub][good], y[sub][good], lwd=lwd, type="b", col=colors[.modc(i)], lty=lines[.modl(i)], pch=symbols[i], cex=cex, ...) } }, ylab=ylab, ylim= if (missing(ylim)) if (type == "probability") range(prob) else range(logit) else ylim, xlab=if (missing(xlab)) predictors[x.var] else xlab, x.vals=x$data[[predictors[x.var]]], rug=rug, z=response, scales=list(x=list(at=1:length(levs), labels=levs, rot=rotx), y=list(rot=roty), alternating=alternating), main=main, # key=c(key, key.args), key=key, layout=layout, data=data, ...) result$split <- split result$more <- more class(result) <- c("plot.eff", class(result)) } else { # x-variable numeric if(use.splines) effect.llines <- spline.llines # added 10/17/13 nm <- predictors[x.var] x.vals <- x$data[[nm]] if (nm %in% names(ticks.x)){ at <- ticks.x[[nm]]$at n <- ticks.x[[nm]]$n } else{ at <- NULL n <- 5 } xlm <- if (nm %in% names(xlim)){ xlim[[nm]] } else range.adj(data[nm]) # range(x.vals) tickmarks.x <- if ((nm %in% names(transform.x)) && !(is.null(transform.x))){ trans <- transform.x[[nm]]$trans make.ticks(trans(xlm), link=transform.x[[nm]]$trans, inverse=transform.x[[nm]]$inverse, at=at, n=n) } else { trans <- I make.ticks(xlm, link=I, inverse=I, at=at, n=n) } key <- list(title=x$response, cex.title=1, border=TRUE, text=list(as.character(unique(response))), lines=list(col=colors[.modc(1:n.y.lev)], lty=lines[.modl(1:n.y.lev)], lwd=lwd), columns = if ("x" %in% names(key.args)) 1 else find.legend.columns(n.y.lev)) for (k in names(key.args)) key[k] <- key.args[k] result <- xyplot(eval(if (type=="probability") parse(text=if (n.predictors==1) paste("prob ~ trans(", predictors[x.var], ")") else paste("prob ~ trans(", predictors[x.var],") |", paste(predictors[-x.var], collapse="*"))) else parse(text=if (n.predictors==1) paste("logit ~ trans(", predictors[x.var], ")") else paste("logit ~ trans(", predictors[x.var],") | ", paste(predictors[-x.var], collapse="*")))), strip=function(...) strip.default(..., strip.names=c(factor.names, TRUE)), panel=function(x, y, subscripts, rug, z, x.vals, ...){ if (grid) panel.grid() if (rug) lrug(trans(x.vals)) for (i in 1:n.y.lev){ sub <- z[subscripts] == y.lev[i] good <- !is.na(y[sub]) effect.llines(x[sub][good], y[sub][good], lwd=lwd, type="l", col=colors[.modc(i)], lty=lines[.modl(i)], ...) } }, ylab=ylab, xlim=suppressWarnings(trans(xlm)), ylim= if (missing(ylim)) if (type == "probability") range(prob) else range(logit) else ylim, xlab=if (missing(xlab)) predictors[x.var] else xlab, x.vals=x$data[[predictors[x.var]]], rug=rug, z=response, scales=list(x=list(at=tickmarks.x$at, labels=tickmarks.x$labels, rot=rotx), y=list(rot=roty), alternating=alternating), main=main, # key=c(key, key.args), key=key, layout=layout, data=data, ...) result$split <- split result$more <- more class(result) <- c("plot.eff", class(result)) } } else { # stacked plot if (n.y.lev > length(colors)) stop(paste('Not enough colors to plot', n.y.lev, 'regions')) key <- list(text=list(lab=rev(y.lev)), rectangle=list(col=rev(colors[1:n.y.lev]))) for (k in names(key.args)) key[k] <- key.args[k] if (is.factor(x$data[[predictors[x.var]]])){ # x-variable a factor result <- barchart(eval(parse(text=if (n.predictors == 1) paste("prob ~ ", predictors[x.var], sep="") else paste("prob ~ ", predictors[x.var]," | ", paste(predictors[-x.var], collapse="*")))), strip=function(...) strip.default(..., strip.names=c(factor.names, TRUE)), groups = response, col=colors, horizontal=FALSE, stack=TRUE, data=data, ylim=if (missing(ylim)) 0:1 else ylim, ylab=ylab, xlab=if (missing(xlab)) predictors[x.var] else xlab, scales=list(x=list(rot=rotx), y=list(rot=roty), alternating=alternating), main=main, # key=c(key, key.args), key=key, layout=layout) result$split <- split result$more <- more class(result) <- c("plot.eff", class(result)) } else { # x-variable numeric if(use.splines) effect.llines <- spline.llines # added 10/17/13 nm <- predictors[x.var] x.vals <- x$data[[nm]] if (nm %in% names(ticks.x)){ at <- ticks.x[[nm]]$at n <- ticks.x[[nm]]$n } else{ at <- NULL n <- 5 } xlm <- if (nm %in% names(xlim)){ xlim[[nm]] } else range.adj(data[nm]) # range(x.vals) tickmarks.x <- if ((nm %in% names(transform.x)) && !(is.null(transform.x))){ trans <- transform.x[[nm]]$trans make.ticks(trans(xlm), link=transform.x[[nm]]$trans, inverse=transform.x[[nm]]$inverse, at=at, n=n) } else { trans <- I make.ticks(xlm, link=I, inverse=I, at=at, n=n) } result <- densityplot(eval(parse(text=if (n.predictors == 1) paste("~ trans(", predictors[x.var], ")", sep="") else paste("~ trans(", predictors[x.var], ") | ", paste(predictors[-x.var], collapse="*")))), probs=x$prob, strip=function(...) strip.default(..., strip.names=c(factor.names, TRUE)), panel = function(x, subscripts, rug, x.vals, probs=probs, col=colors, ...){ fill <- function(x, y1, y2, col){ if (length(y2) == 1) y2 <- rep(y2, length(y1)) if (length(y1) == 1) y1 <- rep(y1, length(y2)) panel.polygon(c(x, rev(x)), c(y1, rev(y2)), col=col) } n <- ncol(probs) Y <- t(apply(probs[subscripts,], 1, cumsum)) fill(x, 0, Y[,1], col=col[1]) for (i in 2:n){ fill(x, Y[,i-1], Y[,i], col=col[i]) } if (rug) lrug(trans(x.vals)) }, rug=rug, x.vals=x$data[[predictors[x.var]]], data=x$x, xlim=suppressWarnings(trans(xlm)), ylim=if (missing(ylim)) 0:1 else ylim, ylab=ylab, xlab=if (missing(xlab)) predictors[x.var] else xlab, scales=list(x=list(at=tickmarks.x$at, labels=tickmarks.x$labels, rot=rotx), y=list(rot=roty), alternating=alternating), main=main, # key=c(key, key.args), key=key, layout=layout, ...) result$split <- split result$more <- more class(result) <- c("plot.eff", class(result)) } } } ### with confidence bands else{ # plot with confidence bands layout <- if(missing(layout)) c(prod(n.predictor.cats), length(levels(response)), 1) else layout if (type == "probability"){ lower <- lower.prob upper <- upper.prob } else { lower <- lower.logit upper <- upper.logit } ### factor if (is.factor(x$data[[predictors[x.var]]])){ # x-variable a factor levs <- levels(x$data[[predictors[x.var]]]) result <- xyplot(eval(if (type=="probability") parse(text=if (n.predictors==1) paste("prob ~ as.numeric(", predictors[x.var],") |", x$response) else paste("prob ~ as.numeric(", predictors[x.var],") |", paste(predictors[-x.var], collapse="*"), paste("*", x$response))) else parse(text=if (n.predictors==1) paste("logit ~ as.numeric(", predictors[x.var],") |", x$response) else paste("logit ~ as.numeric(", predictors[x.var],")|", paste(predictors[-x.var], collapse="*"), paste("*", x$response)))), par.strip.text=list(cex=0.8), strip=function(...) strip.default(..., strip.names=c(factor.names, TRUE)), panel=function(x, y, subscripts, x.vals, rug, lower, upper, ... ){ if (grid) panel.grid() good <- !is.na(y) effect.llines(x[good], y[good], lwd=lwd, type="b", pch=19, col=colors[1], cex=cex, ...) subs <- subscripts+as.numeric(rownames(data)[1])-1 if (ci.style == "bars"){ larrows(x0=x[good], y0=lower[subs][good], x1=x[good], y1=upper[subs][good], angle=90, code=3, col=colors[.modc(2)], length=0.125*cex/1.5) } else if(ci.style == "lines"){ effect.llines(x[good], lower[subs][good], lty=2, col=colors[.modc(2)]) effect.llines(x[good], upper[subs][good], lty=2, col=colors[.modc(2)]) } else { if(ci.style == "bands") { panel.bands(x[good], y[good], lower[subs][good], upper[subs][good], fill=band.colors[1], alpha=band.transparency) }} }, ylab=ylab, ylim= if (missing(ylim)) c(min(lower), max(upper)) else ylim, xlab=if (missing(xlab)) predictors[x.var] else xlab, main=main, x.vals=x$data[[predictors[x.var]]], rug=rug, lower=lower, upper=upper, scales=list(x=list(at=1:length(levs), labels=levs, rot=rotx), y=list(rot=roty), alternating=alternating), layout=layout, data=data, ...) result$split <- split result$more <- more class(result) <- c("plot.eff", class(result)) } else { # x-variable numeric if(use.splines) effect.llines <- spline.llines # added 10/17/13 nm <- predictors[x.var] x.vals <- x$data[[nm]] if (nm %in% names(ticks.x)){ at <- ticks.x[[nm]]$at n <- ticks.x[[nm]]$n } else{ at <- NULL n <- 5 } xlm <- if (nm %in% names(xlim)){ xlim[[nm]] } else range.adj(data[nm]) # range(x.vals) tickmarks.x <- if ((nm %in% names(transform.x)) && !(is.null(transform.x))){ trans <- transform.x[[nm]]$trans make.ticks(trans(xlm), link=transform.x[[nm]]$trans, inverse=transform.x[[nm]]$inverse, at=at, n=n) } else { trans <- I make.ticks(xlm, link=I, inverse=I, at=at, n=n) } result <- xyplot(eval(if (type=="probability") parse(text=if (n.predictors==1) paste("prob ~ trans(", predictors[x.var],") |", x$response) else paste("prob ~ trans(", predictors[x.var],") |", paste(predictors[-x.var], collapse="*"), paste("*", x$response))) else parse(text=if (n.predictors==1) paste("logit ~ trans(", predictors[x.var],") |", x$response) else paste("logit ~ trans(", predictors[x.var],") |", paste(predictors[-x.var], collapse="*"), paste("*", x$response))) ), par.strip.text=list(cex=0.8), strip=function(...) strip.default(..., strip.names=c(factor.names, TRUE)), panel=function(x, y, subscripts, x.vals, rug, lower, upper, ... ){ if (grid) panel.grid() if (rug) lrug(trans(x.vals)) good <- !is.na(y) effect.llines(x[good], y[good], lwd=lwd, col=colors[1], ...) subs <- subscripts+as.numeric(rownames(data)[1])-1 if (ci.style == "bars"){ larrows(x0=x[good], y0=lower[subs][good], x1=x[good], y1=upper[subs][good], angle=90, code=3, col=colors[.modc(2)], length=0.125*cex/1.5) } else if(ci.style == "lines"){ effect.llines(x[good], lower[subs][good], lty=2, col=colors[.modc(2)]) effect.llines(x[good], upper[subs][good], lty=2, col=colors[.modc(2)]) } else { if(ci.style == "bands") { panel.bands(x[good], y[good], lower[subs][good], upper[subs][good], fill=band.colors[1], alpha=band.transparency) }} }, ylab=ylab, xlim=suppressWarnings(trans(xlm)), ylim= if (missing(ylim)) c(min(lower), max(upper)) else ylim, xlab=if (missing(xlab)) predictors[x.var] else xlab, main=main, x.vals=x$data[[predictors[x.var]]], rug=rug, lower=lower, upper=upper, scales=list(y=list(rot=roty), x=list(at=tickmarks.x$at, labels=tickmarks.x$labels, rot=rotx), alternating=alternating), layout=layout, data=data, ...) result$split <- split result$more <- more class(result) <- c("plot.eff", class(result)) } } result } effects/R/utilities.R0000644000176200001440000006274612647513565014265 0ustar liggesusers# utilities and common functions for effects package # John Fox, Jangman Hong, and Sanford Weisberg # 7-25-2013 S. Weisberg modified analyze.model and Analyze.model to ignore # default.levels, and use xlevels to set default. Use grid.pretty by default # 11-09-2013: fixed error message in Analyze.model(), bug reported by Joris Meys. J. Fox # 2013-10-15: eliminated functions not needed after effect() methods removed. J. Fox # 2013-10-29: fixed as.data.frame.*() to handle NA levels. J. Fox # 2014-03-13: modified Fixup.model.matrix() and Analyze.model() to handle partial residuals; # added is.factor.predictor() and is.numeric.predictor(). J. Fox # 2014-03-14: error message for non-factor, non-numeric predictor # 2014-07-08: if no numeric predictor, partial residuals suppressed with warning rather than an error # 2014-10-09: namespace fixes. J. Fox # 2015-04-08: added setStrip(), restoreStrip(). J. Fox # 2015-07-07: fixed matchVarName() so that it handles periods in names properly. J. Fox # 2015-09-10: added a fix for class = 'array' in Analyze.model. S. Weisberg has.intercept <- function(model, ...) any(names(coefficients(model))=="(Intercept)") term.names <- function (model, ...) { term.names <- gsub(" ", "", labels(terms(model))) if (has.intercept(model)) c("(Intercept)", term.names) else term.names } response.name <- function (model, ...) deparse(attr(terms(model), "variables")[[2]]) mfrow <- function(n, max.plots=0){ # number of rows and columns for array of n plots if (max.plots != 0 & n > max.plots) stop(paste("number of plots =",n," exceeds maximum =", max.plots)) rows <- round(sqrt(n)) cols <- ceiling(n/rows) c(rows, cols) } expand.model.frame <- function (model, extras, envir = environment(formula(model)), na.expand = FALSE){ # modified version of R base function f <- formula(model) data <- eval(model$call$data, envir) ff <- foo ~ bar + baz if (is.call(extras)) gg <- extras else gg <- parse(text = paste("~", paste(extras, collapse = "+")))[[1]] ff[[2]] <- f[[2]] ff[[3]][[2]] <- f[[3]] ff[[3]][[3]] <- gg[[2]] if (!na.expand) { naa <- model$call$na.action subset <- model$call$subset rval <- if (is.null(data)) eval(call("model.frame", ff, # modified subset = subset, na.action = naa), envir) # lines else eval(call("model.frame", ff, data = data, # subset = subset, na.action = naa), envir) # } else { subset <- model$call$subset rval <- eval(call("model.frame", ff, data = data, subset = subset, na.action = I), envir) oldmf <- model.frame(model) keep <- match(rownames(oldmf), rownames(rval)) rval <- rval[keep, ] class(rval) <- "data.frame" } return(rval) } is.relative <- function(term1, term2, factors) { all(!(factors[,term1]&(!factors[,term2]))) } descendants <- function(term, mod, ...){ names <- term.names(mod) if (has.intercept(mod)) names <- names[-1] if(length(names)==1) return(NULL) which.term <- which(term == names) if (length(which.term) == 0){ factors <- attr(terms(...), "factors") rownames(factors) <- gsub(" ", "", rownames(factors)) colnames(factors) <- gsub(" ", "", colnames(factors)) (1:length(names))[sapply(names, function(term2) is.relative(term, term2, factors))] } else { factors <- attr(terms(mod), "factors") rownames(factors) <- gsub(" ", "", rownames(factors)) colnames(factors) <- gsub(" ", "", colnames(factors)) (1:length(names))[-which.term][sapply(names[-which.term], function(term2) is.relative(term, term2, factors))] } } is.high.order.term <- function(term, mod,...){ 0 == length(descendants(term, mod, ...)) } subscripts <- function(index, dims){ subs <- function(dims, index){ dim <- length(dims) if (dim == 0) return(NULL) cum <- c(1,cumprod(dims))[dim] i <- index %/% cum if (index %% cum != 0) i <- i + 1 c(i, subs(dims[-dim], index - (i - 1)*cum)) } rev(subs(dims, index)) } matrix.to.df <- function(matrix, colclasses){ opt <- options(warn = -1) on.exit(options(opt)) ncol <- ncol(matrix) colnames <- colnames(matrix) colclasses[sapply(colclasses, function(x) "integer" %in% x)] <- "numeric" result <- vector(mode="list", length=ncol) names(result) <- colnames for (j in 1:ncol){ result[[j]] <- matrix[, j] class <- colclasses[[colnames[j]]] result[[colnames[j]]] <- if ("numeric" %in% class) { decChar <- getOption('OutDec') if (decChar == '.') as.numeric(result[[colnames[j]]]) else as.numeric(gsub(decChar, '.', matrix[,j])) } else if ("ordered" %in% class) ordered(result[[colnames[j]]]) else if ("factor" %in% class) factor(result[[colnames[j]]]) else result[[colnames[j]]] } as.data.frame(result) } # the following function is a modification of code contributed by Steve Taylor as.data.frame.eff <- function(x, row.names=NULL, optional=TRUE, transform=x$transformation$inverse, ...){ xx <- x$x for (var in names(xx)){ if (is.factor(xx[[var]])){ xx[[var]] <- addNA(xx[[var]]) # handle factors with "valid" NA level } } x$x <- xx result <- if (is.null(x$se)) data.frame(x$x, fit=transform(x$fit)) else data.frame(x$x, fit=transform(x$fit), se=x$se, lower=transform(x$lower), upper=transform(x$upper)) attr(result, "transformation") <- transform result } as.data.frame.effpoly <- function(x, row.names=NULL, optional=TRUE, ...){ factors <- sapply(x$variables, function(x) x$is.factor) factor.levels <- lapply(x$variables[factors], function(x) x$levels) if (!length(factor.levels) == 0){ factor.names <- names(factor.levels) for (fac in factor.names){ x$x[[fac]] <- factor(x$x[[fac]], levels=factor.levels[[fac]], exclude=NULL) } } result <- data.frame(x$x, x$prob, x$logit) if (!is.null(x$confidence.level)) result <- cbind(result, x$se.prob, x$se.logit, x$lower.prob, x$upper.prob, x$lower.logit, x$upper.logit) result } as.data.frame.efflatent <- function(x, row.names=NULL, optional=TRUE, ...){ xx <- x$x for (var in names(xx)){ if (is.factor(xx$var)){ xx$var <- addNA(xx$var) # handle factors with "valid" NA level } } x$x <- xx if (is.null(x$se)) data.frame(x$x, fit=x$fit) else data.frame(x$x, fit=x$fit, se=x$se, lower=x$lower, upper=x$upper) } logit2p <- function(logit) 1/(1 + exp(-logit)) p2logit <- function(p) log(p/(1 - p)) lrug <- function(x) { if (length(unique(x)) < 0.8 * length(x)) x <- jitter(x) grid.segments(x, unit(0, "npc"), x, unit(0.5, "lines"), default.units="native") } ## model.response not generic model.response.gls <- function(model){ model.response(model.frame(as.formula(model$call$model), data=eval(model$call$data))) } terms.gls <- function(x, ...) terms(formula(x)) ## vcov method for eff objects vcov.eff <- function(object, ...) object$vcov ## [ method for efflist objects `[.efflist` <- function(x, ...){ y <- NextMethod("[") class(y) <- class(x) y } ### the following functions are for use by Effect() methods Analyze.model <- function(focal.predictors, mod, xlevels, default.levels=NULL, formula.rhs, partial.residuals=FALSE, quantiles, x.var=NULL, data=NULL){ #browser() if ((!is.null(mod$nan.action)) && class(mod$na.action) == "exclude") class(mod$na.action) <- "omit" all.predictors <- all.vars(formula.rhs) check.vars <- !(focal.predictors %in% all.predictors) excluded.predictors <- setdiff(all.predictors, focal.predictors) number.bad <- sum(check.vars) if (any(check.vars)) { message <- if (number.bad == 1) paste("the following predictor is not in the model:", focal.predictors[check.vars]) else paste("the following predictors are not in the model:", paste(focal.predictors[check.vars], collapse=", ")) stop(message) } X.mod <- model.matrix(mod) cnames <- colnames(X.mod) factor.cols <- rep(FALSE, length(cnames)) names(factor.cols) <- cnames for (name in all.predictors){ if (is.factor.predictor(name, mod)) factor.cols[grep(paste("^", name, sep=""), cnames)] <- TRUE } factor.cols[grep(":", cnames)] <- FALSE X <- na.omit(expand.model.frame(mod, all.predictors)) bad <- sapply(X[, all.predictors, drop=FALSE], function(x) !(is.factor(x) || is.numeric(x))) if (any(bad)){ message <- if (sum(bad) == 1) paste("the following predictor isn't a factor or numeric:", all.predictors[bad]) else paste("the following predictors aren't factors or numeric:", paste(all.predictors[bad], collapse=", ")) stop(message) } x <- list() factor.levels <- list() if(length(xlevels)==0 & length(default.levels) == 1L) xlevels <- default.levels if(is.numeric(xlevels) & length(xlevels) == 1L){ levs <- xlevels for(name in focal.predictors) xlevels[[name]] <- levs } for (name in focal.predictors){ levels <- mod$xlevels[[name]] if(is.null(levels)) levels <- mod$xlevels[[paste("factor(",name,")",sep="")]] fac <- !is.null(levels) if (!fac) { levels <- if (is.null(xlevels[[name]])){ if (partial.residuals){ quantile(X[, name], quantiles) } else{ grid.pretty(range(X[, name])) } } else { if(length(xlevels[[name]]) == 1L) { seq(min(X[, name]), max(X[,name]), length=xlevels[[name]])} else xlevels[[name]]} } else factor.levels[[name]] <- levels x[[name]] <- list(name=name, is.factor=fac, levels=levels) } if (partial.residuals){ numeric.predictors <- sapply(focal.predictors, function(predictor) is.numeric.predictor(predictor, mod)) if (!any(numeric.predictors)) warning("there are no numeric focal predictors", "\n partial residuals suppressed") else{ x.var <- which(numeric.predictors)[1] x.var.name <- focal.predictors[x.var] if (is.null(mod$xlevels[[x.var.name]])){ x.var.levels <- x[[x.var]][["levels"]] x.var.range <- range(X[, focal.predictors[x.var]]) x[[x.var]][["levels"]] <- seq(from=x.var.range[1], to=x.var.range[2], length=100) } } } x.excluded <- list() for (name in excluded.predictors){ levels <- mod$xlevels[[name]] fac <- !is.null(levels) level <- if (fac) levels[1] else mean(X[, name]) if (fac) factor.levels[[name]] <- levels x.excluded[[name]] <- list(name=name, is.factor=fac, level=level) } dims <- sapply(x, function(x) length(x$levels)) len <- prod(dims) n.focal <- length(focal.predictors) n.excluded <- length(excluded.predictors) n.vars <- n.focal + n.excluded predict.data <-matrix('', len, n.vars) excluded <- sapply(x.excluded, function(x) x$level) for (i in 1:len){ subs <- subscripts(i, dims) for (j in 1:n.focal){ predict.data[i,j] <- x[[j]]$levels[subs[j]] } if (n.excluded > 0) predict.data[i, (n.focal + 1):n.vars] <- excluded } colnames(predict.data) <- c(sapply(x, function(x) x$name), sapply(x.excluded, function(x) x$name)) colclasses <- lapply(X, class) colclasses[colclasses == "matrix"] <- "numeric" colclasses[colclasses == "array"] <- "numeric" predict.data <- matrix.to.df(predict.data, colclasses=colclasses) list(predict.data=predict.data, factor.levels=factor.levels, factor.cols=factor.cols, focal.predictors=focal.predictors, n.focal=n.focal, excluded.predictors=excluded.predictors, n.excluded=n.excluded, x=x, X.mod=X.mod, cnames=cnames, X=X, x.var=x.var) } # Analyze.model <- function(focal.predictors, mod, xlevels, default.levels=NULL, formula.rhs){ # if ((!is.null(mod$na.action)) && class(mod$na.action) == "exclude") # class(mod$na.action) <- "omit" # all.predictors <- all.vars(formula.rhs) # check.vars <- !(focal.predictors %in% all.predictors) # excluded.predictors <- setdiff(all.predictors, focal.predictors) # number.bad <- sum(check.vars) # if (any(check.vars)) { # message <- if (number.bad == 1) paste("the following predictor is not in the model:", # focal.predictors[check.vars]) # else paste("the following predictors are not in the model:", # paste(focal.predictors[check.vars], collapse=", ")) # stop(message) # } # X.mod <- model.matrix(mod) # cnames <- colnames(X.mod) # factor.cols <- rep(FALSE, length(cnames)) # names(factor.cols) <- cnames # X <- model.frame(mod) # for (name in all.predictors){ # if (is.factor(X[[name]])) factor.cols[grep(paste("^", name, sep=""), cnames)] <- TRUE # } # factor.cols[grep(":", cnames)] <- FALSE # X <- na.omit(expand.model.frame(mod, all.predictors)) # x <- list() # factor.levels <- list() # if(length(xlevels)==0 & length(default.levels) == 1L) xlevels <- default.levels # if(is.numeric(xlevels) & length(xlevels) == 1L){ # levs <- xlevels # for(name in focal.predictors) xlevels[[name]] <- levs} # for (name in focal.predictors){ # levels <- mod$xlevels[[name]] # if(is.null(levels)) levels <- mod$xlevels[[paste("factor(",name,")",sep="")]] # fac <- !is.null(levels) # if (!fac) { # levels <- if (is.null(xlevels[[name]])){ # grid.pretty(range(X[, name]))} else { # if(length(xlevels[[name]]) == 1L) { # seq(min(X[, name]), max(X[,name]), length=xlevels[[name]])} else # xlevels[[name]]} # } # else factor.levels[[name]] <- levels # x[[name]] <- list(name=name, is.factor=fac, levels=levels) # } # x.excluded <- list() # for (name in excluded.predictors){ # levels <- mod$xlevels[[name]] # fac <- !is.null(levels) # level <- if (fac) levels[1] else mean(X[, name]) # if (fac) factor.levels[[name]] <- levels # x.excluded[[name]] <- list(name=name, is.factor=fac, # level=level) # } # dims <- sapply(x, function(x) length(x$levels)) # len <- prod(dims) # n.focal <- length(focal.predictors) # n.excluded <- length(excluded.predictors) # n.vars <- n.focal + n.excluded # predict.data <-matrix('', len, n.vars) # excluded <- sapply(x.excluded, function(x) x$level) # for (i in 1:len){ # subs <- subscripts(i, dims) # for (j in 1:n.focal){ # predict.data[i,j] <- x[[j]]$levels[subs[j]] # } # if (n.excluded > 0) # predict.data[i, (n.focal + 1):n.vars] <- excluded # } # colnames(predict.data) <- c(sapply(x, function(x) x$name), # sapply(x.excluded, function(x) x$name)) # colclasses <- lapply(X, class) # colclasses[colclasses == "matrix"] <- "numeric" # predict.data <- matrix.to.df(predict.data, colclasses=colclasses) # list(predict.data=predict.data, factor.levels=factor.levels, # factor.cols=factor.cols, focal.predictors=focal.predictors, n.focal=n.focal, # excluded.predictors=excluded.predictors, n.excluded=n.excluded, # x=x, X.mod=X.mod, cnames=cnames, X=X) # } Fixup.model.matrix <- function(mod, mod.matrix, mod.matrix.all, X.mod, factor.cols, cnames, focal.predictors, excluded.predictors, typical, given.values, partial.residuals=FALSE, mod.matrix.all.rounded){ vars <- as.character(attr(terms(mod), "variables"))[-(1:2)] attr(mod.matrix, "assign") <- attr(mod.matrix.all, "assign") if (length(excluded.predictors) > 0){ sel <- apply(sapply(excluded.predictors, matchVarName, expressions=vars), 1, any) strangers <- Strangers(mod, focal.predictors, excluded.predictors) stranger.cols <- apply(outer(strangers, attr(mod.matrix,'assign'), '=='), 2, any) } else stranger.cols <- rep(FALSE, ncol(mod.matrix)) if (has.intercept(mod)) stranger.cols[1] <- TRUE if (any(stranger.cols)) { facs <- factor.cols & stranger.cols covs <- (!factor.cols) & stranger.cols if (any(facs)){ mod.matrix[,facs] <- matrix(apply(as.matrix(X.mod[,facs]), 2, mean), nrow=nrow(mod.matrix), ncol=sum(facs), byrow=TRUE) if (partial.residuals) { mod.matrix.all.rounded[,facs] <- mod.matrix.all[,facs] <- matrix(apply(as.matrix(X.mod[,facs]), 2, mean), nrow=nrow(mod.matrix.all), ncol=sum(facs), byrow=TRUE) } } if (any(covs)){ mod.matrix[,covs] <- matrix(apply(as.matrix(X.mod[,covs]), 2, typical), nrow=nrow(mod.matrix), ncol=sum(covs), byrow=TRUE) if (partial.residuals) { mod.matrix.all.rounded[,covs] <- mod.matrix.all[,covs] <- matrix(apply(as.matrix(X.mod[,covs]), 2, typical), nrow=nrow(mod.matrix.all), ncol=sum(covs), byrow=TRUE) } } if (!is.null(given.values)){ stranger.names <- cnames[stranger.cols] given <- stranger.names %in% names(given.values) if (any(given)) { mod.matrix[,stranger.names[given]] <- matrix(given.values[stranger.names[given]], nrow=nrow(mod.matrix), ncol=length(stranger.names[given]), byrow=TRUE) if (partial.residuals) { mod.matrix.all.rounded[,stranger.names[given]] <- mod.matrix.all[,stranger.names[given]] <- matrix(given.values[stranger.names[given]], nrow=nrow(mod.matrix.all), ncol=length(stranger.names[given]), byrow=TRUE) } } } for (name in cnames){ components <- unlist(strsplit(name, ':')) components <- components[components %in% cnames] if (length(components) > 1) { mod.matrix[,name] <- apply(mod.matrix[,components], 1, prod) if (partial.residuals) { mod.matrix.all[,name] <- apply(mod.matrix.all[,components], 1, prod) mod.matrix.all.rounded[,name] <- apply(mod.matrix.all.rounded[,components], 1, prod) } } } } if (partial.residuals) list(mod.matrix=mod.matrix, mod.matrix.all=mod.matrix.all, mod.matrix.all.rounded=mod.matrix.all.rounded) else mod.matrix } # Fixup.model.matrix <- function(mod, mod.matrix, mod.matrix.all, X.mod, # factor.cols, cnames, focal.predictors, excluded.predictors, typical, given.values){ # vars <- as.character(attr(terms(mod), "variables"))[-(1:2)] # attr(mod.matrix, "assign") <- attr(mod.matrix.all, "assign") # if (length(excluded.predictors) > 0){ # sel <- apply(sapply(excluded.predictors, matchVarName, expressions=vars), 1, any) # strangers <- Strangers(mod, focal.predictors, excluded.predictors) # stranger.cols <- # apply(outer(strangers, attr(mod.matrix,'assign'), '=='), 2, any) # } # else stranger.cols <- rep(FALSE, ncol(mod.matrix)) # if (has.intercept(mod)) stranger.cols[1] <- TRUE # if (any(stranger.cols)) { # facs <- factor.cols & stranger.cols # covs <- (!factor.cols) & stranger.cols # if (any(facs)) mod.matrix[,facs] <- # matrix(apply(as.matrix(X.mod[,facs]), 2, mean), # nrow=nrow(mod.matrix), ncol=sum(facs), byrow=TRUE) # if (any(covs)) mod.matrix[,covs] <- # matrix(apply(as.matrix(X.mod[,covs]), 2, typical), # nrow=nrow(mod.matrix), ncol=sum(covs), byrow=TRUE) # if (!is.null(given.values)){ # stranger.names <- cnames[stranger.cols] # given <- stranger.names %in% names(given.values) # if (any(given)) mod.matrix[,stranger.names[given]] <- # matrix(given.values[stranger.names[given]], nrow=nrow(mod.matrix), # ncol=length(stranger.names[given]), byrow=TRUE) # } # } # for (name in cnames){ # components <- unlist(strsplit(name, ':')) # components <- components[components %in% cnames] # if (length(components) > 1) # mod.matrix[,name] <- apply(mod.matrix[,components], 1, prod) # } # mod.matrix # } # matchVarName <- function(name, expressions){ # a <- !grepl(paste("[.]+", name, sep=""), expressions) # b <- !grepl(paste(name, "[.]+", sep=""), expressions) # c <- grepl(paste("\\b", name, "\\b", sep=""), expressions) # a & b & c # } matchVarName <- function(name, expressions){ scratch <- "zAMIjw4RN3" # randomly generated string name <- gsub("\\.", scratch, name) expressions <- gsub("\\.", scratch, as.character(expressions)) a <- !grepl(paste("[.]+", name, sep=""), expressions) b <- !grepl(paste(name, "[.]+", sep=""), expressions) c <- grepl(paste("\\b", name, "\\b", sep=""), expressions) a & b & c } Strangers <- function(mod, focal.predictors, excluded.predictors){ names <- term.names(mod) if (has.intercept(mod)) names <- names[-1] sel <- apply(sapply(excluded.predictors, matchVarName, expressions=names), 1, any) (1:length(sel))[sel] } # the following is used by effect.multinom() and Effect.multinom() eff.mul <- function(x0, B, se, m, p, r, V){ mu <- exp(x0 %*% B) mu <- mu/(1 + sum(mu)) mu[m] <- 1 - sum(mu) logits <- log(mu/(1 - mu)) if (!se) return(list(p=mu, logits=logits)) d <- array(0, c(m, m - 1, p)) exp.x0.B <- as.vector(exp(x0 %*% B)) sum.exp.x0.B <- sum(exp.x0.B) for (j in 1:(m-1)){ d[m, j,] <- - exp.x0.B[j]*x0 for (jj in 1:(m-1)){ d[j, jj,] <- if (jj != j) - exp(x0 %*% (B[,jj] + B[,j]))*x0 else exp.x0.B[j]*(1 + sum.exp.x0.B - exp.x0.B[j])*x0 } } d <- d/(1 + sum.exp.x0.B)^2 V.mu <- rep(0, m) for (j in 1:m){ dd <- as.vector(t(d[j,,])) for (s in 1:r){ for (t in 1:r){ V.mu[j] <- V.mu[j] + V[s,t]*dd[s]*dd[t] } } } V.logits <- V.mu/(mu^2 * (1 - mu)^2) list(p=mu, std.err.p=sqrt(V.mu), logits=logits, std.error.logits=sqrt(V.logits)) } # the following are used by effect.polr() and Effect.polr() eff.polr <- function(x0, b, alpha, V, m, r, se){ eta0 <- x0 %*% b mu <- rep(0, m) mu[1] <- 1/(1 + exp(alpha[1] + eta0)) for (j in 2:(m-1)){ mu[j] <- exp(eta0)*(exp(alpha[j - 1]) - exp(alpha[j]))/ ((1 + exp(alpha[j - 1] + eta0))*(1 + exp(alpha[j] + eta0))) } mu[m] <- 1 - sum(mu) logits <- log(mu/(1 - mu)) if (!se) return(list(p=mu, logits=logits)) d <- matrix(0, m, r) d[1, 1] <- - exp(alpha[1] + eta0)/(1 + exp(alpha[1] + eta0))^2 d[1, m:r] <- - exp(alpha[1] + eta0)*x0/(1 + exp(alpha[1] + eta0))^2 for (j in 2:(m-1)){ d[j, j-1] <- exp(alpha[j-1] + eta0)/(1 + exp(alpha[j-1] + eta0))^2 d[j, j] <- - exp(alpha[j] + eta0)/(1 + exp(alpha[j] + eta0))^2 d[j, m:r] <- exp(eta0)*(exp(alpha[j]) - exp(alpha[j-1]))* (exp(alpha[j-1] + alpha[j] + 2*eta0) - 1) * x0 / (((1 + exp(alpha[j-1] + eta0))^2)* ((1 + exp(alpha[j] + eta0))^2)) } d[m, m-1] <- exp(alpha[m-1] + eta0)/(1 + exp(alpha[m-1] + eta0))^2 d[m, m:r] <- exp(alpha[m-1] + eta0)*x0/(1 + exp(alpha[m-1] + eta0))^2 V.mu <- rep(0, m) for (j in 1:m){ dd <- d[j,] for (s in 1:r){ for (t in 1:r){ V.mu[j] <- V.mu[j] + V[s,t]*dd[s]*dd[t] } } } V.logits <- V.mu/(mu^2 * (1 - mu)^2) list(p=mu, std.err.p=sqrt(V.mu), logits=logits, std.error.logits=sqrt(V.logits)) } eff.latent <- function(X0, b, V, se){ eta <- X0 %*% b if (!se) return(list(fit=eta)) var <- diag(X0 %*% V %*% t(X0)) list(fit=eta, se=sqrt(var)) } # determine class of a predictor is.factor.predictor <- function(predictor, model) { !is.null(model$xlevels[[predictor]]) } is.numeric.predictor <- function(predictor, model) { is.null(model$xlevels[[predictor]]) } # manage lattice strips setStrip <- function(bg=3, fg="black", clip=c("off", "on")){ clip <- match.arg(clip) bg.save <- strip.background <- trellis.par.get("strip.background") if (is.numeric(bg) && length(bg) == 1){ if (bg <= 0) stop("bg should be a positive integer or vector of colors") bg <- gray(seq(.95, .5, length=round(bg))) } strip.background$col <- bg fg.save <- strip.shingle <- trellis.par.get("strip.shingle") trellis.par.set("strip.background", strip.background) if (length(fg) != 1 && length(fg) != length(bg)) stop("lengths of fg and bg incompatible") strip.shingle$col <- fg trellis.par.set("strip.shingle", strip.shingle) clip.save <- .clip <- trellis.par.get("clip") .clip$strip <- clip trellis.par.set("clip", .clip) invisible(list(strip.background=bg.save, strip.shingle=fg.save, clip=clip.save)) } restoreStrip <- function(saved){ if (!identical(names(saved), c("strip.background", "strip.shingle", "clip"))) stop("argument saved does not contain strip parameters") trellis.par.set("strip.background", saved$strip.background) trellis.par.set("strip.shingle", saved$strip.shingle) trellis.par.set("clip", saved$clip) }effects/R/effects.R0000644000176200001440000000524412442332475013647 0ustar liggesusers# effect generic and methods; allEffects # John Fox, Sanford Weisberg, and Jangman Hong # last modified 2012-12-08 by J. Fox # 10/31/2012 modifed effect.lm to use z distn for ses with mer and nlme objects # 12-21-2012 Allow for empty cells in factor interactions, S. Weisberg # 7-15-2013: S. Weisberg: deleted 'default.levels' argument. Changed and # generalized xlevels argument to include the function of default.levels. # 2013-10-15: eliminated generic effect() and all its methods. J. Fox # 2014-07-02: added vcov. argument to effect # 2014-12-10: Changed 'effect' back to a generic function. S. Weisberg effect <- function(term, mod, vcov.=vcov, ...){ UseMethod("effect", mod) } effect.default <- function(term, mod, vcov.=vcov, ...){ term <- gsub(" ", "", gsub("\\*", ":", term)) terms <- term.names(mod) if (has.intercept(mod)) terms <- terms[-1] which.term <- which(term == terms) mod.aug<- list() if (length(which.term) == 0){ message("NOTE: ", term, " does not appear in the model") mod.aug <- update(formula(mod), eval(parse(text=paste(". ~ . +", term)))) } if (!is.high.order.term(term, mod, mod.aug)) message("NOTE: ", term, " is not a high-order term in the model") predictors <- all.vars(parse(text=term)) Effect(predictors, mod, vcov.=vcov., ...) } allEffects <- function(mod, ...) UseMethod("allEffects") allEffects.default <- function(mod, ...){ high.order.terms <- function(mod){ names <- term.names(mod) if (has.intercept(mod)) names<-names[-1] rel <- lapply(names, descendants, mod=mod) (1:length(names))[sapply(rel, function(x) length(x)==0)] } names <- term.names(mod) if (has.intercept(mod)) names <- names[-1] if (length(names) == 0) stop("the model contains no terms (beyond a constant)") terms <- names[high.order.terms(mod)] result <- lapply(terms, effect, mod=mod, ...) names(result) <- terms class(result) <- 'efflist' result } allEffects.gls <- function(mod, ...){ high.order.terms <- function(mod){ mod <- lm(as.formula(mod$call$model), data=eval(mod$call$data)) names <- term.names(mod) if (has.intercept(mod)) names<-names[-1] rel <- lapply(names, descendants, mod=mod) (1:length(names))[sapply(rel, function(x) length(x)==0)] } names <- term.names(mod) if (has.intercept(mod)) names <- names[-1] if (length(names) == 0) stop("the model contains no terms (beyond a constant)") terms <- names[high.order.terms(mod)] result <- lapply(terms, effect, mod=mod, ...) names(result) <- terms class(result) <- 'efflist' result } all.effects <- function(...){ .Deprecated("allEffects") allEffects(...) }effects/R/plot-methods.R0000644000176200001440000010670012647513565014656 0ustar liggesusers# plot.eff method for effects package, moved here from plot-summary-print-methods.R # The plot.effpoly method remains there for now. # 2013-10-17: Added use.splines keyword to plot.eff. Sandy # 2013-10-17: Made ci.style="bands" default for variates; allow "bands" if multiline=TRUE # 2013-10-29: fixed plot.eff() to handle factors with "valid" NA level. J. Fox # 2014-03-03: modified plot.eff() to handle partial residuals. J. Fox # 2014-09-20: fixed plot.eff() to work with partial residuals when rescale.axis=FALSE; # added smooth.residuals argument. J. Fox # 2014-10-10: namespace fixes. J. Fox # 2014-12-05: made key.args more flexible. J. Fox # 2015-03-22: use wide columns by default only when x for legend not set. J. Fox # 2015-03-25: use non-robust loess smooth for partial residuals for non-Gaussian families. J. Fox # 2015-03-25: rationalized type and rescale.axis args to plot.eff(); deprecated rescale.axis arg. J. Fox # 2015-05-28: added residuals.smooth.color argument. J. Fox # 2015-08-28: added residuals.cex argument. J. Fox # the following functions aren't exported find.legend.columns <- function(n, target=min(4, n)){ rem <- n %% target if (rem != 0 && rem < target/2) target <- target - 1 target } make.ticks <- function(range, link, inverse, at, n) { warn <- options(warn=-1) on.exit(options(warn)) link <- if (is.null(link)) function(x) nlm(function(y) (inverse(y) - x)^2, mean(range))$estimate else link if (is.null(n)) n <- 5 labels <- if (is.null(at)){ labels <- pretty(sapply(range, inverse), n=n+1) } else at ticks <- sapply(labels, link) list(at=ticks, labels=format(labels)) } range.adj <- function(x){ range <- range(x, na.rm=TRUE) c(range[1] - .025*(range[2] - range[1]), range[2] + .025*(range[2] - range[1])) } # added, modified from http://www.r-bloggers.com/confidence-bands-with-lattice-and-r/ panel.bands <- function(x, y, upper, lower, fill, col, subscripts, ..., font, fontface, use.splines=FALSE) { if(!missing(subscripts)) { upper <- upper[subscripts] lower <- lower[subscripts] } if (use.splines){ up <- spline(x, upper) down <- spline(x, lower) x <- up$x upper <- up$y lower <- down$y } panel.polygon(c(x, rev(x)), c(upper, rev(lower)), col = fill, fill=fill, border = FALSE, ...) } # modified by Michael Friendly: added key.args: # modified by Michael Friendly: added ci.style="bands" # modified by Michael Friendly: added lwd= argument for llines (not used elsewhere) # modified by Michael Friendly: added alpha.band= argument for ci.style="bands" spline.llines <- function(x, y, ...) llines(spline(x, y), ...) plot.eff <- function(x, x.var, z.var=which.min(levels), multiline=is.null(x$se), rug=TRUE, xlab, ylab, main=paste(effect, "effect plot"), colors=palette(), symbols=1:length(colors), lines=1:length(colors), cex=1.5, lwd=2, ylim, xlim=NULL, factor.names=TRUE, ci.style, band.transparency=0.15, band.colors=colors, type=c("rescale", "response", "link"), ticks=list(at=NULL, n=5), alternating=TRUE, rotx=0, roty=0, grid=FALSE, layout, rescale.axis, transform.x=NULL, ticks.x=NULL, key.args=NULL, row=1, col=1, nrow=1, ncol=1, more=FALSE, use.splines=TRUE, partial.residuals=c("adjusted", "raw"), show.fitted=FALSE, residuals.color="blue", residuals.pch=1, residuals.cex=1, smooth.residuals=TRUE, residuals.smooth.color=residuals.color, span=2/3, ...) { .mod <- function(a, b) ifelse( (d <- a %% b) == 0, b, d) .modc <- function(a) .mod(a, length(colors)) .mods <- function(a) .mod(a, length(symbols)) .modl <- function(a) .mod(a, length(lines)) .modb <- function(a) .mod(a, length(band.colors)) ci.style <- if(missing(ci.style)) NULL else match.arg(ci.style, c("bars", "lines", "bands", "none")) if (smooth.residuals && !is.null(x$family)){ loess.family <- if (x$family == "gaussian") "symmetric" else "gaussian" } type <- if (!missing(rescale.axis)){ message("NOTE: the rescale.axis argument is deprecated; use type instead") if (!is.logical(rescale.axis)) stop ("rescale.axis must be logical (TRUE or FALSE)") if (rescale.axis) "rescale" else "response" } else match.arg(type) switch(type, rescale = { type <- "response" rescale.axis <- TRUE }, response = { type <- "response" rescale.axis <- FALSE }, link = { type <- "link" rescale.axis <- TRUE } ) partial.residuals <- match.arg(partial.residuals) levels <- sapply(x$variables, function(z) length(as.vector(z[["levels"]]))) thresholds <- x$thresholds has.thresholds <- !is.null(thresholds) effect.llines <- llines if (missing(ylab)){ ylab <- if (has.thresholds) paste(x$response, ": ", paste(x$y.levels, collapse=", "), sep="") else x$response } if (has.thresholds){ threshold.labels <- abbreviate(x$y.levels, minlength=1) threshold.labels <- paste(" ", paste(threshold.labels[-length(threshold.labels)], threshold.labels[-1], sep=" - "), " ", sep="") } trans.link <- x$transformation$link trans.inverse <- x$transformation$inverse residuals <- if (partial.residuals == "adjusted") x$partial.residuals.adjusted else x$partial.residuals.raw fitted <- x$fitted.rounded if (!is.null(residuals) && !rescale.axis) { residuals <- trans.inverse(residuals) fitted <- trans.inverse(fitted) } if (!rescale.axis){ x$lower[!is.na(x$lower)] <- trans.inverse(x$lower[!is.na(x$lower)]) x$upper[!is.na(x$upper)] <- trans.inverse(x$upper[!is.na(x$upper)]) x$fit[!is.na(x$fit)] <- trans.inverse(x$fit)[!is.na(x$fit)] trans.link <- trans.inverse <- I } x.all <- x$x.all mod.matrix.all <- x$mod.matrix.all split <- c(col, row, ncol, nrow) ylab # force evaluation if (missing(x.var)) x.var <- x$x.var if (!is.null(x.var) && is.numeric(x.var)) x.var <- names(x.var) x.data <- x$data effect <- paste(sapply(x$variables, "[[", "name"), collapse="*") vars <- x$variables x <- as.data.frame(x, transform=I) for (i in 1:length(vars)){ if (!(vars[[i]]$is.factor)) next x[,i] <- factor(x[,i], levels=vars[[i]]$levels, exclude=NULL) } has.se <- !is.null(x$se) n.predictors <- ncol(x) - 1 - 3*has.se if (n.predictors == 1){ predictor <- names(x)[1] ### factor no other predictors if (is.factor(x[,1])){ ci.style <- if(is.null(ci.style)) "bars" else ci.style range <- if(has.se & ci.style!="none") range(c(x$lower, x$upper), na.rm=TRUE) else range(x$fit, na.rm=TRUE) ylim <- if (!missing(ylim)) ylim else c(range[1] - .025*(range[2] - range[1]), range[2] + .025*(range[2] - range[1])) tickmarks <- if (type == "response") make.ticks(ylim, link=trans.link, inverse=trans.inverse, at=ticks$at, n=ticks$n) else make.ticks(ylim, link=I, inverse=I, at=ticks$at, n=ticks$n) levs <- levels(x[,1]) plot <- xyplot(eval(parse( text=paste("fit ~ as.numeric(", names(x)[1], ")"))), strip=function(...) strip.default(..., strip.names=c(factor.names, TRUE)), panel=function(x, y, lower, upper, has.se, ...){ if (grid) panel.grid() good <- !is.na(y) if (has.se){ if (ci.style == "bars"){ larrows(x0=x[good], y0=lower[good], x1=x[good], y1=upper[good], angle=90, code=3, col=colors[.modc(2)], length=0.125*cex/1.5) } else if(ci.style == "lines") { effect.llines(x[good], lower[good], lty=2, col=colors[.modc(2)]) effect.llines(x[good], upper[good], lty=2, col=colors[.modc(2)]) } else{ if(ci.style == "bands") { panel.bands(x[good], y[good], upper[good], lower[good], fill=band.colors[1], alpha=band.transparency, use.splines=FALSE) }} } effect.llines(x[good], y[good], lwd=lwd, col=colors[1], type='b', pch=19, cex=cex, ...) if (has.thresholds){ panel.abline(h=thresholds, lty=3) panel.text(rep(current.panel.limits()$xlim[1], length(thresholds)), thresholds, threshold.labels, adj=c(0,0), cex=0.75) panel.text(rep(current.panel.limits()$xlim[2], length(thresholds)), thresholds, threshold.labels, adj=c(1,0), cex=0.75) } }, ylim=ylim, ylab=ylab, xlab=if (missing(xlab)) names(x)[1] else xlab, scales=list(x=list(at=1:length(levs), labels=levs, rot=rotx), y=list(at=tickmarks$at, labels=tickmarks$labels, rot=roty), alternating=alternating, y=roty), main=main, lower=x$lower, upper=x$upper, has.se=has.se, data=x, ...) result <- update(plot, layout = if (missing(layout)) c(0, prod(dim(plot))) else layout) result$split <- split result$more <- more class(result) <- c("plot.eff", class(result)) } ### variate, no other predictors *** else { effect.llines <- if(use.splines) spline.llines else effect.llines ci.style <- if(is.null(ci.style)) "bands" else ci.style range <- if(has.se & ci.style!="none") range(c(x$lower, x$upper), na.rm=TRUE) else range(x$fit, na.rm=TRUE) ylim <- if (!missing(ylim)) ylim else if (is.null(residuals)) c(range[1] - .025*(range[2] - range[1]), range[2] + .025*(range[2] - range[1])) else c(min(min(residuals), range[1] - .025*(range[2] - range[1])), max(max(residuals), range[2] + .025*(range[2] - range[1]))) tickmarks <- if (type == "response") make.ticks(ylim, link=trans.link, inverse=trans.inverse, at=ticks$at, n=ticks$n) else make.ticks(ylim, link=I, inverse=I, at=ticks$at, n=ticks$n) nm <- names(x)[1] x.vals <- x.data[, nm] if (nm %in% names(ticks.x)){ at <- ticks.x[[nm]]$at n <- ticks.x[[nm]]$n } else{ at <- NULL n <- 5 } xlm <- if (nm %in% names(xlim)){ xlim[[nm]] } else range.adj(x[nm]) # range(x.vals) tickmarks.x <- if ((nm %in% names(transform.x)) && !(is.null(transform.x))){ trans <- transform.x[[nm]]$trans make.ticks(trans(xlm), link=transform.x[[nm]]$trans, inverse=transform.x[[nm]]$inverse, at=at, n=n) } else { trans <- I make.ticks(xlm, link=I, inverse=I, at=at, n=n) } if (is.null(x.var)){ if (!is.null(residuals)){ x.var <- names(x)[1] } else x.var <- which.max(levels) } if (!is.null(residuals)) x.fit <- x.data[, predictor] if (is.numeric(x.var)) x.var <- predictor plot <- xyplot(eval(parse( text=paste("fit ~ trans(", x.var, ")"))), strip=function(...) strip.default(..., strip.names=c(factor.names, TRUE)), panel=function(x, y, x.vals, rug, lower, upper, has.se, ...){ if (grid) panel.grid() good <- !is.na(y) axis.length <- diff(range(x)) effect.llines(x[good], y[good], lwd=lwd, col=colors[1], ...) if (rug && is.null(residuals)) lrug(trans(x.vals)) if (has.se){ if (ci.style == "bars"){ larrows(x0=x[good], y0=lower[good], x1=x[good], y1=upper[good], angle=90, code=3, col=eval(colors[.modc(2)]), length=.125*cex/1.5) } else if(ci.style == "lines") { effect.llines(x[good], lower[good], lty=2, col=colors[.modc(2)]) effect.llines(x[good], upper[good], lty=2, col=colors[.modc(2)]) } else{ if(ci.style == "bands") { panel.bands(x[good], y[good], upper[good], lower[good], fill=band.colors[1], alpha=band.transparency, use.splines=use.splines) }} } if (has.thresholds){ panel.abline(h=thresholds, lty=3) panel.text(rep(current.panel.limits()$xlim[1], length(thresholds)), thresholds, threshold.labels, adj=c(0,0), cex=0.75) panel.text(rep(current.panel.limits()$xlim[2], length(thresholds)), thresholds, threshold.labels, adj=c(1,0), cex=0.75) } if (!is.null(residuals)){ lpoints(trans(x.fit), residuals, col=residuals.color, pch=residuals.pch, cex=residuals.cex) if (show.fitted) lpoints(trans(x.fit), fitted, pch=16, col=residuals.color) # REMOVE ME if (smooth.residuals){ llines(loess.smooth(trans(x.fit), residuals, span=span, family=loess.family), lwd=lwd, lty=2, col=residuals.smooth.color) } } }, ylim=ylim, xlim=suppressWarnings(trans(xlm)), ylab=ylab, xlab=if (missing(xlab)) names(x)[1] else xlab, x.vals=x.vals, rug=rug, main=main, lower=x$lower, upper=x$upper, has.se=has.se, data=x, scales=list(y=list(at=tickmarks$at, labels=tickmarks$labels, rot=roty), x=list(at=tickmarks.x$at, labels=tickmarks.x$labels, rot=rotx), alternating=alternating), ...) result <- update(plot, layout = if (missing(layout)) c(0, prod(dim(plot))) else layout) result$split <- split result$more <- more class(result) <- c("plot.eff", class(result)) } return(result) } ### more than one variate predictors <- names(x)[1:n.predictors] levels <- sapply(apply(x[,predictors], 2, unique), length) if (is.null(x.var)){ if (!is.null(residuals)){ x.var <- names(x)[1] } else x.var <- which.max(levels) } if (!is.null(residuals)) x.fit <- x.data[, x.var] if (is.character(x.var)) { which.x <- which(x.var == predictors) if (length(which.x) == 0) stop(paste("x.var = '", x.var, "' is not in the effect.", sep="")) x.var <- which.x } if (is.character(z.var)) { which.z <- which(z.var == predictors) if (length(which.z) == 0) stop(paste("z.var = '", z.var, "' is not in the effect.", sep="")) z.var <- which.z } if (x.var == z.var) z.var <- z.var + 1 ### multiline if (multiline){ ci.style <- if(is.null(ci.style)) "none" else ci.style if(ci.style == "lines") { cat("Confidence interval style 'lines' changed to 'bars'\n") ci.style <- "bars"} range <- if (has.se && ci.style !="none") range(c(x$lower, x$upper), na.rm=TRUE) else range(x$fit, na.rm=TRUE) ylim <- if (!missing(ylim)) ylim else c(range[1] - .025*(range[2] - range[1]), range[2] + .025*(range[2] - range[1])) tickmarks <- if (type == "response") make.ticks(ylim, link=trans.link, inverse=trans.inverse, at=ticks$at, n=ticks$n) else make.ticks(ylim, link=I, inverse=I, at=ticks$at, n=ticks$n) zvals <- unique(x[, z.var]) ### multiline factor if (is.factor(x[,x.var])){ levs <- levels(x[,x.var]) key <- list(title=predictors[z.var], cex.title=1, border=TRUE, text=list(as.character(zvals)), lines=list(col=colors[.modc(1:length(zvals))], lty=lines[.modl(1:length(zvals))], lwd=lwd), points=list(col=colors[.modc(1:length(zvals))], pch=symbols[.mods(1:length(zvals))]), columns = if ("x" %in% names(key.args)) 1 else find.legend.columns(length(zvals))) for (k in names(key.args)) key[k] <- key.args[k] plot <- xyplot(eval(parse( text=paste("fit ~ as.numeric(", predictors[x.var], ")", if (n.predictors > 2) paste(" |", paste(predictors[-c(x.var, z.var)])), collapse="*"))), strip=function(...) strip.default(..., strip.names=c(factor.names, TRUE)), panel=function(x, y, subscripts, z, lower, upper, show.se, ...){ if (grid) panel.grid() for (i in 1:length(zvals)){ sub <- z[subscripts] == zvals[i] good <- !is.na(y[sub]) os <- if(show.se) (i - (length(zvals) + 1)/2) * (2/(length(zvals)-1)) * .01 * (length(zvals) - 1) else 0 effect.llines(x[sub][good]+os, y[sub][good], lwd=lwd, type='b', col=colors[.modc(i)], pch=symbols[.mods(i)], lty=lines[.modl(i)], cex=cex, ...) if (show.se){ larrows(x0=x[sub][good]+os, y0=lower[subscripts][sub][good], x1=x[sub][good]+os, y1=upper[subscripts][sub][good], angle=90, code=3, col=eval(colors[.modc(i)]), length=.125*cex/1.5) } } if (has.thresholds){ panel.abline(h=thresholds, lty=3) panel.text(rep(current.panel.limits()$xlim[1], length(thresholds)), thresholds, threshold.labels, adj=c(0,0), cex=0.75) panel.text(rep(current.panel.limits()$xlim[2], length(thresholds)), thresholds, threshold.labels, adj=c(1,0), cex=0.75) } }, ylim=ylim, ylab=ylab, xlab=if (missing(xlab)) predictors[x.var] else xlab, z=x[,z.var], scales=list(x=list(at=1:length(levs), labels=levs, rot=rotx), y=list(at=tickmarks$at, labels=tickmarks$labels, rot=roty), alternating=alternating), zvals=zvals, main=main, key=key, lower=x$lower, upper=x$upper, show.se=has.se && ci.style=="bars", data=x, ...) result <- update(plot, layout = if (missing(layout)) c(0, prod(dim(plot))) else layout) result$split <- split result$more <- more class(result) <- c("plot.eff", class(result)) } ### multiline variate else{ effect.llines <- if(use.splines) spline.llines else effect.llines nm <- names(x)[x.var] x.vals <- x.data[, nm] if (nm %in% names(ticks.x)){ at <- ticks.x[[nm]]$at n <- ticks.x[[nm]]$n } else{ at <- NULL n <- 5 } xlm <- if (nm %in% names(xlim)){ xlim[[nm]] } else range.adj(x[nm]) # range(x.vals) tickmarks.x <- if ((nm %in% names(transform.x)) && !(is.null(transform.x))){ trans <- transform.x[[nm]]$trans # make.ticks(trans(xlm), link=transform.x[[nm]]$trans, inverse=transform.x[[nm]]$inverse, at=ticks.x$at, n=ticks.x$n) make.ticks(trans(xlm), link=transform.x[[nm]]$trans, inverse=transform.x[[nm]]$inverse, at=at, n=n) } else { trans <- I make.ticks(xlm, link=I, inverse=I, at=at, n=n) } key <- list(title=predictors[z.var], cex.title=1, border=TRUE, text=list(as.character(zvals)), lines=list(col=colors[.modc(1:length(zvals))], lty=lines[.modl(1:length(zvals))], lwd=lwd), columns = if ("x" %in% names(key.args)) 1 else find.legend.columns(length(zvals))) for (k in names(key.args)) key[k] <- key.args[k] plot <- xyplot(eval(parse( text=paste("fit ~trans(", predictors[x.var], ")", if (n.predictors > 2) paste(" |", paste(predictors[-c(x.var, z.var)])), collapse="*"))), strip=function(...) strip.default(..., strip.names=c(factor.names, TRUE)), panel=function(x, y, subscripts, x.vals, rug, z, lower, upper, show.se, ...){ if (grid) panel.grid() if (rug && is.null(residuals)) lrug(trans(x.vals)) axis.length <- diff(range(x)) for (i in 1:length(zvals)){ sub <- z[subscripts] == zvals[i] good <- !is.na(y[sub]) effect.llines(x[sub][good], y[sub][good], lwd=lwd, type='l', col=colors[.modc(i)], lty=lines[.modl(i)], cex=cex, ...) if(show.se){ if(ci.style == "bars"){ os <- (i - (length(zvals) + 1)/2) * (2/(length(zvals)-1)) * .01 * axis.length larrows(x0=x[sub][good]+os, y0=lower[subscripts][sub][good], x1=x[sub][good]+os, y1=upper[subscripts][sub][good], angle=90, code=3, col=eval(colors[.modc(i)]), length=.125*cex/1.5) } if(ci.style == "bands"){ panel.bands(x[sub][good], y[sub][good], upper[subscripts][sub][good], lower[subscripts][sub][good], fill=eval(band.colors[.modb(i)]), alpha=band.transparency, use.splines=use.splines) } } } if (has.thresholds){ panel.abline(h=thresholds, lty=3) panel.text(rep(current.panel.limits()$xlim[1], length(thresholds)), thresholds, threshold.labels, adj=c(0,0), cex=0.75) panel.text(rep(current.panel.limits()$xlim[2], length(thresholds)), thresholds, threshold.labels, adj=c(1,0), cex=0.75) } }, ylim=ylim, xlim=suppressWarnings(trans(xlm)), ylab=ylab, xlab=if (missing(xlab)) predictors[x.var] else xlab, x.vals=x.vals, rug=rug, z=x[,z.var], zvals=zvals, main=main, key=key, # lower=x$lower, upper=x$upper, show.se=has.se && ci.style %in% c("bars", "bands"), # data=x, scales=list(y=list(at=tickmarks$at, labels=tickmarks$labels), rot=roty, x=list(at=tickmarks.x$at, labels=tickmarks.x$labels, rot=rotx), alternating=alternating), ...) result <- update(plot, layout = if (missing(layout)) c(0, prod(dim(plot))) else layout) result$split <- split result$more <- more class(result) <- c("plot.eff", class(result)) } return(result) } # multiplot ci.style <- if(is.null(ci.style)){ if(is.factor(x[, x.var])) "bars" else "bands"} else ci.style range <- if (has.se && ci.style !="none") range(c(x$lower, x$upper), na.rm=TRUE) else range(x$fit, na.rm=TRUE) # multiplot factor if (is.factor(x[,x.var])){ ylim <- if (!missing(ylim)) ylim else c(range[1] - .025*(range[2] - range[1]), range[2] + .025*(range[2] - range[1])) tickmarks <- if (type == "response") make.ticks(ylim, link=trans.link, inverse=trans.inverse, at=ticks$at, n=ticks$n) else make.ticks(ylim, link=I, inverse=I, at=ticks$at, n=ticks$n) levs <- levels(x[,x.var]) plot <- xyplot(eval(parse( text=paste("fit ~ as.numeric(", predictors[x.var], ") |", paste(predictors[-x.var], collapse="*")))), strip=function(...) strip.default(..., strip.names=c(factor.names, TRUE)), panel=function(x, y, subscripts, lower, upper, has.se, ...){ if (grid) panel.grid() good <- !is.na(y) if (has.se){ if (ci.style == "bars"){ larrows(x0=x[good], y0=lower[subscripts][good], x1=x[good], y1=upper[subscripts][good], angle=90, code=3, col=colors[.modc(2)], length=0.125*cex/1.5) } else if(ci.style == "lines") { effect.llines(x[good], lower[subscripts][good], lty=2, col=colors[.modc(2)]) effect.llines(x[good], upper[subscripts][good], lty=2, col=colors[.modc(2)]) } else{ if(ci.style == "bands") { panel.bands(x[good], y[good], upper[subscripts][good], lower[subscripts][good], fill=band.colors[1], alpha=band.transparency, use.splines=FALSE) }} } effect.llines(x[good], y[good], lwd=lwd, type='b', col=colors[1], pch=19, cex=cex, ...) if (has.thresholds){ panel.abline(h=thresholds, lty=3) panel.text(rep(current.panel.limits()$xlim[1], length(thresholds)), thresholds, threshold.labels, adj=c(0,0), cex=0.75) panel.text(rep(current.panel.limits()$xlim[2], length(thresholds)), thresholds, threshold.labels, adj=c(1,0), cex=0.75) } }, ylim=ylim, ylab=ylab, xlab=if (missing(xlab)) predictors[x.var] else xlab, scales=list(x=list(at=1:length(levs), labels=levs, rot=rotx), y=list(at=tickmarks$at, labels=tickmarks$labels, rot=roty), alternating=alternating), main=main, lower=x$lower, upper=x$upper, has.se=has.se, data=x, ...) result <- update(plot, layout = if (missing(layout)) c(0, prod(dim(plot))) else layout) result$split <- split result$more <- more class(result) <- c("plot.eff", class(result)) } ### multiplot variate *** else{ effect.llines <- if(use.splines) spline.llines else effect.llines nm <- names(x)[x.var] x.vals <- x.data[, nm] if (nm %in% names(ticks.x)){ at <- ticks.x[[nm]]$at n <- ticks.x[[nm]]$n } else{ at <- NULL n <- 5 } xlm <- if (nm %in% names(xlim)){ xlim[[nm]] } else range.adj(x[nm]) tickmarks.x <- if ((nm %in% names(transform.x)) && !(is.null(transform.x))){ trans <- transform.x[[nm]]$trans make.ticks(trans(xlm), link=transform.x[[nm]]$trans, inverse=transform.x[[nm]]$inverse, at=at, n=n) } else { trans <- I make.ticks(xlm, link=I, inverse=I, at=at, n=n) } ylim <- if (!missing(ylim)) ylim else if (is.null(residuals)) c(range[1] - .025*(range[2] - range[1]), range[2] + .025*(range[2] - range[1])) else c(min(min(residuals), range[1] - .025*(range[2] - range[1])), max(max(residuals), range[2] + .025*(range[2] - range[1]))) tickmarks <- if (type == "response") make.ticks(ylim, link=trans.link, inverse=trans.inverse, at=ticks$at, n=ticks$n) else make.ticks(ylim, link=I, inverse=I, at=ticks$at, n=ticks$n) x.fit <- x.data[, predictors[x.var]] use <- rep(TRUE, length(residuals)) xx <- x[, predictors[-x.var], drop=FALSE] plot <- xyplot(eval(parse( text=paste("fit ~ trans(", predictors[x.var], ") |", paste(predictors[-x.var], collapse="*")))), strip=function(...) strip.default(..., strip.names=c(factor.names, TRUE)), panel=function(x, y, subscripts, x.vals, rug, lower, upper, has.se, ...){ if (grid) panel.grid() good <- !is.na(y) effect.llines(x[good], y[good], lwd=lwd, col=colors[1], ...) if (rug && is.null(residuals)) lrug(trans(x.vals)) if (has.se){ if (ci.style == "bars"){ larrows(x0=x[good], y0=lower[subscripts][good], x1=x[good], y1=upper[subscripts][good], angle=90, code=3, col=eval(colors[.modc(2)]), length=.125*cex/1.5) } else if(ci.style == "lines") { effect.llines(x[good], lower[subscripts][good], lty=2, col=colors[.modc(2)]) effect.llines(x[good], upper[subscripts][good], lty=2, col=colors[.modc(2)]) } else if(ci.style == "bands") { panel.bands(x[good], y[good], upper[subscripts][good], lower[subscripts][good], fill=band.colors[1], alpha=band.transparency, use.splines=use.splines) } if (!is.null(residuals)){ predictors <- predictors[-x.var] factors <- sapply(xx, is.factor) for (predictor in predictors){ use <- use & if(factors[predictor]) x.all[, predictor] == xx[subscripts[1], predictor] else x.all[, predictor] == xx[subscripts[1], predictor] } n.in.panel <- sum(use) if (n.in.panel > 0){ lpoints(trans(x.fit[use]), residuals[use], col=residuals.color, pch=residuals.pch, cex=residuals.cex) if (show.fitted) lpoints(trans(x.fit[use]), fitted[use], pch=16, col=residuals.color) # REMOVE ME if (smooth.residuals && n.in.panel >= 10) { llines(loess.smooth(x.fit[use], residuals[use], span=span, family=loess.family), lwd=lwd, lty=2, col=residuals.smooth.color) } } } } if (has.thresholds){ panel.abline(h=thresholds, lty=3) panel.text(rep(current.panel.limits()$xlim[1], length(thresholds)), thresholds, threshold.labels, adj=c(0,0), cex=0.75) panel.text(rep(current.panel.limits()$xlim[2], length(thresholds)), thresholds, threshold.labels, adj=c(1,0), cex=0.75) } }, ylim=ylim, xlim=suppressWarnings(trans(xlm)), ylab=ylab, xlab=if (missing(xlab)) predictors[x.var] else xlab, x.vals=x.vals, rug=rug, main=main, lower=x$lower, upper=x$upper, has.se=has.se, data=x, scales=list(y=list(at=tickmarks$at, labels=tickmarks$labels, rot=roty), x=list(at=tickmarks.x$at, labels=tickmarks.x$labels, rot=rotx), alternating=alternating), ...) result <- update(plot, layout = if (missing(layout)) c(0, prod(dim(plot))) else layout) result$split <- split result$more <- more class(result) <- c("plot.eff", class(result)) } return(result) } print.plot.eff <- function(x, ...){ NextMethod(split=x$split, more=x$more, ...) invisible(x) } plot.efflist <- function(x, selection, rows, cols, ask=FALSE, graphics=TRUE, ...){ if (!missing(selection)){ if (is.character(selection)) selection <- gsub(" ", "", selection) return(plot(x[[selection]], ...)) } effects <- gsub(":", "*", names(x)) if (ask){ repeat { selection <- menu(effects, graphics=graphics, title="Select Term to Plot") if (selection == 0) break else print(plot(x[[selection]], ...)) } } else { neffects <- length(x) mfrow <- mfrow(neffects) if (missing(rows) || missing(cols)){ rows <- mfrow[1] cols <- mfrow[2] } for (i in 1:rows) { for (j in 1:cols){ if ((i-1)*cols + j > neffects) break more <- !((i-1)*cols + j == neffects) print(plot(x[[(i-1)*cols + j]], row=i, col=j, nrow=rows, ncol=cols, more=more, ...)) } } } } effects/R/summary-print-methods.R0000644000176200001440000002246612230233005016505 0ustar liggesusers# plot, summary, and print methods for effects package # John Fox and Jangman Hong # last modified 2012-11-30 by J. Fox # 29 June 2011 added grid, rotx and roty arguments to the two plot methods # by S. Weisberg # 21 Dec 2012 modest modification of empty cells with crossed factors # 2013-01-17: Added factor.ci.style arg to plot.eff() and plot.effpoly(). J. Fox # 2013-01-18: Added CI bars to multiline plots with factor.ci.style="bars" # 2013-01-19: Renamed 'factor.ci.style' to 'ci.style'. Added a 'none' option # extended to variate terms if multiline=TRUE, ci.style="bars" # 2013-01-30: scale arrow "heads" for error bars relative to cex # 2013-05-31: fixed symbol colors in legends in plot.eff(). J. Fox # 2013-08-14: fixed bug in restoring warn option. J. Fox # 2013-08-27: fixed symbols argument for multiline plot in plot.eff(), reported by Ulrike Gromping. J. Fox # 2013-08-31: fixed handling of ticks.x argument. John # 2013-09-25: moved plot.eff methods to plot.methods.R for easier work. Michael # 2013-10-17: added use.splines argument to plot.effpoly. Sandy summary.eff <- function(object, type=c("response", "link"), ...){ result <- list() result$header <- paste("\n", gsub(":", "*", object$term), 'effect\n') result$offset <- object$offset type <- match.arg(type) if (type == "response") { object$fit <- object$transformation$inverse(object$fit) if (!is.null(object$confidence.level)){ object$lower <- object$transformation$inverse(object$lower) object$upper <- object$transformation$inverse(object$upper) } } result$effect <- array(object$fit, dim=sapply(object$variables, function(x) length(x$levels)), dimnames=lapply(object$variables, function(x) x$levels)) if (!is.null(object$se)){ result$lower.header <- paste('\n Lower', round(100*object$confidence.level, 2), 'Percent Confidence Limits\n') result$lower <- array(object$lower, dim=sapply(object$variables, function(x) length(x$levels)), dimnames=lapply(object$variables, function(x) x$levels)) result$upper.header <- paste('\n Upper', round(100*object$confidence.level, 2), 'Percent Confidence Limits\n') result$upper <- array(object$upper, dim=sapply(object$variables, function(x) length(x$levels)), dimnames=lapply(object$variables, function(x) x$levels)) } if (object$discrepancy > 1e-3) result$warning <- paste("\nWarning: There is an average discrepancy of", round(object$discrepancy, 3), "percent \n in the 'safe' predictions for effect", object$term, '\n') class(result) <- "summary.eff" result } print.summary.eff <- function(x, ...){ cat(x$header) if (x$offset != 0) cat("\noffset = ", x$offset, "\n\n") print(x$effect, ...) if (!is.null(x$lower)){ cat(x$lower.header) print(x$lower, ...) cat(x$upper.header) print(x$upper, ...) } if (!is.null(x$thresholds)){ cat("\nThresholds:\n") print(x$thresholds, ...) } if (!is.null(x$warning)) cat(x$warning) invisible(x) } print.eff <- function(x, type=c("response", "link"), ...){ cat(paste("\n", gsub(":", "*", x$term), 'effect\n')) if (x$offset != 0) cat("\noffset = ", x$offset, "\n\n") type <- match.arg(type) if (type == "response") x$fit <- x$transformation$inverse(x$fit) table <- array(x$fit, dim=sapply(x$variables, function(x) length(x$levels)), dimnames=lapply(x$variables, function(x) x$levels)) print(table, ...) if (x$discrepancy > 1e-3) cat(paste("\nWarning: There is an average discrepancy of", round(x$discrepancy, 3), "percent \n in the 'safe' predictions for effect", x$term, '\n')) invisible(x) } print.efflist <- function(x, ...){ cat(" model: ") form <- x[[1]]$formula attributes(form) <- NULL print(form) for (effect in names(x)){ print(x[[effect]], ...) } invisible(x) } summary.efflist <- function(object, ...){ cat(" model: ") form <- object[[1]]$formula attributes(form) <- NULL print(form) for (effect in names(object)){ print(summary(object[[effect]], ...)) } invisible(NULL) } print.effpoly <- function(x, type=c("probability", "logits"), ...){ type <- match.arg(type) x.frame <-as.data.frame(x) n.predictors <- length(names(x$x)) predictors <- names(x.frame)[1:n.predictors] y.lev <- x$y.lev ylevel.names <- make.names(paste("prob",y.lev)) colnames(x$prob) <- colnames(x$logit) <- ylevel.names y.categories <- matrix(0, nrow=length(x.frame[,predictors[1]]), ncol=length(y.lev)) for (i in 1:length(y.lev)){ level <- which(colnames(x$prob)[i] == ylevel.names) y.categories[,i] <- rep(y.lev[level], length(y.categories[,i])) } y.categories <- as.vector(y.categories) y.categories <- factor(y.categories) for (i in 1:length(y.lev)){ cat(paste("\n", gsub(":", "*", x$term), " effect (", type,") for ", y.lev[i], "\n", sep="")) table <- array(if (type == "probability") {x$prob[y.categories==y.lev[i]]} else {x$logit[y.categories==y.lev[i]]}, dim=sapply(x$variables, function(x) length(x$levels)), dimnames=lapply(x$variables, function(x) x$levels)) print(table, ...) } if (x$discrepancy > 0.1) cat(paste("\nWarning: There is an average discrepancy of", round(x$discrepancy, 2), "percent \n in the 'safe' predictions for effect", x$term, '\n')) invisible(x) } summary.effpoly <- function(object, type=c("probability", "logits"), ...){ type <- match.arg(type) x.frame <-as.data.frame(object) n.predictors <- length(names(object$x)) predictors <- names(x.frame)[1:n.predictors] y.lev <- object$y.lev ylevel.names <- make.names(paste("prob",y.lev)) colnames(object$prob) <- colnames(object$logit) <- colnames(object$lower.logit) <- colnames(object$upper.logit) <- colnames(object$lower.prob) <- colnames(object$upper.prob)<- ylevel.names y.categories <-matrix(0, nrow=length(x.frame[,predictors[1]]), ncol=length(y.lev)) for (i in 1:length(y.lev)){ level <- which(colnames(object$prob)[i] == ylevel.names) y.categories[,i] <- rep(y.lev[level], length(y.categories[,i])) } y.categories <- as.vector(y.categories) y.categories <- factor(y.categories) for (i in 1:length(y.lev)){ cat(paste("\n", gsub(":", "*", object$term), " effect (" , type, ") for ", y.lev[i], "\n", sep="")) table <- array(if (type == "probability") {object$prob[y.categories==y.lev[i]]} else {object$logit[y.categories==y.lev[i]]}, dim=sapply(object$variables, function(x) length(x$levels)), dimnames=lapply(object$variables, function(x) x$levels)) print(table, ...) } if (is.null(object$confidence.level)) return(invisible(NULL)) for (i in 1:length(y.lev)){ cat(paste("\n", 'Lower', object$confidence.level*100, 'Percent Confidence Limits for' , y.lev[i],'\n')) table <- if (type == "probability") object$lower.prob else object$lower.logit table <- array(table[y.categories==y.lev[i]], dim=sapply(object$variables, function(x) length(x$levels)), dimnames=lapply(object$variables, function(x) x$levels)) print(table, ...) } for (i in 1:length(y.lev)){ cat(paste("\n", 'Upper', object$confidence.level*100, 'Percent Confidence Limits for' , y.lev[i],'\n')) table <- if (type == "probability") object$upper.prob else object$upper.logit table <- array(table[y.categories==y.lev[i]], dim=sapply(object$variables, function(x) length(x$levels)), dimnames=lapply(object$variables, function(x) x$levels)) print(table, ...) } if (object$discrepancy > 0.1) cat(paste("\nWarning: There is an average discrepancy of", round(object$discrepancy, 2), "percent \n in the 'safe' predictions for effect", object$term, '\n')) invisible(NULL) } print.efflatent <- function(x, ...){ cat(paste("\n", gsub(":", "*", x$term), 'effect\n')) table <- array(x$fit, dim=sapply(x$variables, function(x) length(x$levels)), dimnames=lapply(x$variables, function(x) x$levels)) print(table, ...) cat("\nThresholds:\n") print(x$thresholds, ...) if (x$discrepancy > 0.1) cat(paste("\nWarning: There is an average discrepancy of", round(x$discrepancy, 3), "percent \n in the 'safe' predictions for effect", x$term, '\n')) invisible(x) } summary.efflatent <- function(object, ...){ result <- list() result$header <- paste("\n", gsub(":", "*", object$term), 'effect\n') result$effect <- array(object$fit, dim=sapply(object$variables, function(x) length(x$levels)), dimnames=lapply(object$variables, function(x) x$levels)) if (!is.null(object$se)){ result$lower.header <- paste('\n Lower', round(100*object$confidence.level, 2), 'Percent Confidence Limits\n') result$lower <- array(object$lower, dim=sapply(object$variables, function(x) length(x$levels)), dimnames=lapply(object$variables, function(x) x$levels)) result$upper.header <- paste('\n Upper', round(100*object$confidence.level, 2), 'Percent Confidence Limits\n') result$upper <- array(object$upper, dim=sapply(object$variables, function(x) length(x$levels)), dimnames=lapply(object$variables, function(x) x$levels)) } result$thresholds <- object$thresholds if (object$discrepancy > 0.1) result$warning <- paste("\nWarning: There is an average discrepancy of", round(object$discrepancy, 3), "percent \n in the 'safe' predictions for effect", object$term, '\n') class(result) <- "summary.eff" result } effects/R/Effect.R0000644000176200001440000006143412504572513013425 0ustar liggesusers# Effect generic and methods # John Fox and Sanford Weisberg # 12-21-2012 Allow for empty cells in factor interactions, S. Weisberg # 2012-03-05: Added .merMod method for development version of lme4, J. Fox # 2012-04-06: Added support for lme4.0, J. Fox # 2013-07-15: Changed default xlevels and default.levels # 2013-10-15: Added Effect.default(). J. Fox # 2013-10-22: fixed bug in Effect.lm() when na.action=na.exclude. J. Fox # 2013-10-29: code to handle "valid" NAs in factors. J. Fox # 2013-11-06: fixed bug in Effect.multinom() in construction of effect object # when there is only one focal predictor; caused as.data.frame.effpoly() to fail # 2014-03-13: modified Effect.lm() to compute partial residuals. J. Fox # 2014-05-06: fixed bug in Effect.gls() when cor or var structure depends on variables in the data set. J. Fox # 2014-08-02: added vcov.=vcov argument to allow other methods of estimating var(coef.estimates) # 2014-09-25: added KR argument to Effect.mer() and Effect.merMod(). J. Fox # 2014-12-07: don't assume that pbkrtest is installed. J. Fox # 2015-03-25: added "family" element to eff objects returned by Effect.lm(). J. Fox Effect <- function(focal.predictors, mod, ...){ UseMethod("Effect", mod) } Effect.lm <- function (focal.predictors, mod, xlevels = list(), default.levels = NULL, given.values, vcov. = vcov, se = TRUE, confidence.level = 0.95, transformation = list(link = family(mod)$linkfun, inverse = family(mod)$linkinv), typical = mean, offset = mean, partial.residuals=FALSE, quantiles=seq(0.2, 0.8, by=0.2), x.var=NULL, ...){ data <- if (partial.residuals){ all.vars <- all.vars(formula(mod)) expand.model.frame(mod, all.vars)[, all.vars] } else NULL if (missing(given.values)) given.values <- NULL else if (!all(which <- names(given.values) %in% names(coef(mod)))) stop("given.values (", names(given.values[!which]), ") not in the model") off <- if (is.numeric(offset) && length(offset) == 1) offset else if (is.function(offset)) { mod.off <- model.offset(model.frame(mod)) if (is.null(mod.off)) 0 else offset(mod.off) } else stop("offset must be a function or a number") formula.rhs <- formula(mod)[[3]] model.components <- Analyze.model(focal.predictors, mod, xlevels, default.levels, formula.rhs, partial.residuals=partial.residuals, quantiles=quantiles, x.var=x.var, data=data) excluded.predictors <- model.components$excluded.predictors predict.data <- model.components$predict.data predict.data.all.rounded <- predict.data.all <- if (partial.residuals) na.omit(data[, all.vars(formula(mod))]) else NULL factor.levels <- model.components$factor.levels factor.cols <- model.components$factor.cols n.focal <- model.components$n.focal x <- model.components$x X.mod <- model.components$X.mod cnames <- model.components$cnames X <- model.components$X x.var <- model.components$x.var formula.rhs <- formula(mod)[c(1, 3)] Terms <- delete.response(terms(mod)) mf <- model.frame(Terms, predict.data, xlev = factor.levels, na.action=NULL) mod.matrix <- model.matrix(formula.rhs, data = mf, contrasts.arg = mod$contrasts) if (is.null(x.var)) partial.residuals <- FALSE factors <- sapply(predict.data, is.factor) if (partial.residuals){ for (predictor in focal.predictors[-x.var]){ if (!factors[predictor]){ values <- unique(predict.data[, predictor]) predict.data.all.rounded[, predictor] <- values[apply(outer(predict.data.all[, predictor], values, function(x, y) (x - y)^2), 1, which.min)] } } mf.all <- model.frame(Terms, predict.data.all, xlev = factor.levels, na.action=NULL) mod.matrix.all <- model.matrix(formula.rhs, data = mf.all, contrasts.arg = mod$contrasts) mf.all.rounded <- model.frame(Terms, predict.data.all.rounded, xlev = factor.levels, na.action=NULL) mod.matrix.all.rounded <- model.matrix(formula.rhs, data = mf.all.rounded, contrasts.arg = mod$contrasts) } else mod.matrix.all <- model.matrix(mod) wts <- weights(mod) if (is.null(wts)) wts <- rep(1, length(residuals(mod))) res <- Fixup.model.matrix(mod, mod.matrix, mod.matrix.all, X.mod, factor.cols, cnames, focal.predictors, excluded.predictors, typical, given.values, partial.residuals=partial.residuals, mod.matrix.all.rounded) mod.matrix <- if (partial.residuals) res$mod.matrix else res mod.matrix.cases <- if (partial.residuals) res$mod.matrix.all else NULL mod.matrix.cases.rounded <- if (partial.residuals) res$mod.matrix.all.rounded else NULL # look for aliased coefficients and remove those columns from mod.matrix mod.matrix <- mod.matrix[, !is.na(mod$coefficients)] effect <- off + mod.matrix %*% mod$coefficients[!is.na(mod$coefficients)] if (partial.residuals){ mod.matrix.cases <- na.omit(mod.matrix.cases[, !is.na(mod$coefficients)]) fitted <- as.vector(off + mod.matrix.cases %*% mod$coefficients[!is.na(mod$coefficients)]) mod.matrix.cases.rounded <- na.omit(mod.matrix.cases.rounded[, !is.na(mod$coefficients)]) fitted.rounded <- as.vector(off + mod.matrix.cases.rounded %*% mod$coefficients[!is.na(mod$coefficients)]) res <- na.omit(residuals(mod, type="working")) partial.residuals.raw <- fitted + res partial.residuals.adjusted <- fitted.rounded + res } else { partial.residuals.raw <- partial.residuals.adjusted <- fitted.rounded <- fitted <- NULL } result <- list(term = paste(focal.predictors, collapse="*"), formula = formula(mod), response = response.name(mod), variables = x, fit = effect, x = predict.data[, 1:n.focal, drop=FALSE], x.all=predict.data.all.rounded, #[, 1:n.focal, drop=FALSE], model.matrix = mod.matrix, mod.matrix.all=mod.matrix.cases, data = X, discrepancy = 0, offset=off, fitted.rounded=fitted.rounded, fitted=fitted, partial.residuals.raw=partial.residuals.raw, partial.residuals.adjusted=partial.residuals.adjusted, x.var=x.var) # find empty cells, if any, and correct whichFact <- unlist(lapply(result$variables, function(x) x$is.factor)) zeroes <- NULL if(sum(whichFact) > 1){ nameFact <- names(whichFact)[whichFact] counts <- xtabs(as.formula( paste("~", paste(nameFact, collapse="+"))), model.frame(mod)) zeroes <- which(counts == 0) } if(length(zeroes) > 0){ levs <- expand.grid(lapply(result$variables, function(x) x$levels)) good <- rep(TRUE, dim(levs)[1]) for(z in zeroes){ good <- good & apply(levs, 1, function(x) !all(x == levs[z, whichFact])) } result$fit[!good] <- NA } if (se) { if (any(family(mod)$family == c("binomial", "poisson"))) { # dispersion <- 1 z <- qnorm(1 - (1 - confidence.level)/2) } else { # dispersion <- sum(wts * (residuals(mod))^2, na.rm=TRUE)/mod$df.residual z <- qt(1 - (1 - confidence.level)/2, df = mod$df.residual) } # old # V2 <- dispersion * summary.lm(mod)$cov # V1 <- vcov(mod) # V <- if (inherits(mod, "fakeglm")) # V1 # else V2 # end old, begin new August 2, 2014 V <- vcov.(mod) # I can see no reason to use dispersion * summary.lm(mod)$cov # end new eff.vcov <- mod.matrix %*% V %*% t(mod.matrix) rownames(eff.vcov) <- colnames(eff.vcov) <- NULL var <- diag(eff.vcov) result$vcov <- eff.vcov result$se <- sqrt(var) result$lower <- effect - z * result$se result$upper <- effect + z * result$se result$confidence.level <- confidence.level if(length(zeroes) > 0){ result$se[!good] <- NA result$lower[!good] <- NA result$upper[!good] <- NA } } if (is.null(transformation$link) && is.null(transformation$inverse)) { transformation$link <- I transformation$inverse <- I } result$transformation <- transformation result$family <- family(mod)$family class(result) <- "eff" result } Effect.mer <- function(focal.predictors, mod, KR=FALSE, ...) { result <- Effect(focal.predictors, mer.to.glm(mod, KR=KR), ...) result$formula <- as.formula(formula(mod)) result } Effect.merMod <- function(focal.predictors, mod, KR=FALSE, ...){ Effect.mer(focal.predictors, mod, KR=KR, ...) } Effect.lme <- function(focal.predictors, mod, ...) { result <- Effect(focal.predictors, lme.to.glm(mod), ...) result$formula <- as.formula(formula(mod)) result } Effect.gls <- function (focal.predictors, mod, xlevels = list(), default.levels = NULL, given.values, vcov. = vcov, se = TRUE, confidence.level = 0.95, transformation = NULL, typical = mean, ...){ if (missing(given.values)) given.values <- NULL else if (!all(which <- names(given.values) %in% names(coef(mod)))) stop("given.values (", names(given.values[!which]), ") not in the model") formula.rhs <- formula(mod)[[3]] .data <- eval(mod$call$data) mod.lm <- lm(as.formula(mod$call$model), data=.data, na.action=na.exclude) model.components <- Analyze.model(focal.predictors, mod.lm, xlevels, default.levels, formula.rhs) excluded.predictors <- model.components$excluded.predictors predict.data <- model.components$predict.data factor.levels <- model.components$factor.levels factor.cols <- model.components$factor.cols n.focal <- model.components$n.focal x <- model.components$x X.mod <- model.components$X.mod cnames <- model.components$cnames X <- model.components$X formula.rhs <- formula(mod)[c(1, 3)] nrow.X <- nrow(X) mf <- model.frame(formula.rhs, data=rbind(X[,names(predict.data),drop=FALSE], predict.data), xlev=factor.levels) mod.matrix.all <- model.matrix(formula.rhs, data=mf, contrasts.arg=mod$contrasts) mod.matrix <- mod.matrix.all[-(1:nrow.X),] mod.matrix <- Fixup.model.matrix(mod.lm, mod.matrix, model.matrix(mod.lm), X.mod, factor.cols, cnames, focal.predictors, excluded.predictors, typical, given.values) fit.1 <- na.omit(predict(mod)) mod.2 <- lm.fit(mod.matrix.all[1:nrow.X,], fit.1) class(mod.2) <- "lm" use <- !is.na(residuals(mod.lm)) .data <- .data[use, ] .data$.y <- model.response.gls(mod) .data$.X <- mod.matrix.all[1:nrow.X, ] mod.3 <- update(mod, .y ~ .X - 1, data=.data) discrepancy <- 100*mean(abs(fitted(mod.2)- fit.1)/(1e-10 + mean(abs(fit.1)))) if (discrepancy > 1e-3) warning(paste("There is a discrepancy of", round(discrepancy, 3), "percent \n in the 'safe' predictions used to generate effect", paste(focal.predictors, collapse="*"))) effect <- mod.matrix %*% mod$coefficients result <- list(term = paste(focal.predictors, collapse="*"), formula = formula(mod), response = response.name(mod), variables = x, fit = effect, x = predict.data[, 1:n.focal, drop=FALSE], model.matrix = mod.matrix, data = X, discrepancy = discrepancy, offset=0) if (se){ df.res <- mod$dims[["N"]] - mod$dims[["p"]] z <- qt(1 - (1 - confidence.level)/2, df=df.res) mod.2$terms <- terms(mod) V <- vcov.(mod.3) eff.vcov <- mod.matrix %*% V %*% t(mod.matrix) rownames(eff.vcov) <- colnames(eff.vcov) <- NULL var <- diag(eff.vcov) result$vcov <- eff.vcov result$se <- sqrt(var) result$lower <- effect - z*result$se result$upper <- effect + z*result$se result$confidence.level <- confidence.level } if (is.null(transformation$link) && is.null(transformation$inverse)){ transformation$link <- I transformation$inverse <- I } result$transformation <- transformation class(result) <- "eff" result } Effect.multinom <- function(focal.predictors, mod, confidence.level=.95, xlevels=list(), default.levels=NULL, given.values, vcov. = vcov, se=TRUE, typical=mean, ...){ if (length(mod$lev) < 3) stop("effects for multinomial logit model only available for response levels > 2") if (missing(given.values)) given.values <- NULL else if (!all(which <- colnames(given.values) %in% names(coef(mod)))) stop("given.values (", colnames(given.values[!which]),") not in the model") formula.rhs <- formula(mod)[c(1, 3)] model.components <- Analyze.model(focal.predictors, mod, xlevels, default.levels, formula.rhs) excluded.predictors <- model.components$excluded.predictors predict.data <- model.components$predict.data factor.levels <- model.components$factor.levels factor.cols <- model.components$factor.cols n.focal <- model.components$n.focal x <- model.components$x X.mod <- model.components$X.mod cnames <- model.components$cnames X <- model.components$X formula.rhs <- formula(mod)[c(1, 3)] Terms <- delete.response(terms(mod)) mf <- model.frame(Terms, predict.data, xlev = factor.levels) mod.matrix <- model.matrix(formula.rhs, data = mf, contrasts.arg = mod$contrasts) X0 <- Fixup.model.matrix(mod, mod.matrix, model.matrix(mod), X.mod, factor.cols, cnames, focal.predictors, excluded.predictors, typical, given.values) resp.names <- make.names(mod$lev, unique=TRUE) resp.names <- c(resp.names[-1], resp.names[1]) # make the last level the reference level B <- t(coef(mod)) V <- vcov.(mod) m <- ncol(B) + 1 p <- nrow(B) r <- p*(m - 1) n <- nrow(X0) P <- Logit <- matrix(0, n, m) colnames(P) <- paste("prob.", resp.names, sep="") colnames(Logit) <- paste("logit.", resp.names, sep="") if (se){ z <- qnorm(1 - (1 - confidence.level)/2) Lower.P <- Upper.P <- Lower.logit <- Upper.logit <- SE.P <- SE.logit <- matrix(0, n, m) colnames(Lower.logit) <- paste("L.logit.", resp.names, sep="") colnames(Upper.logit) <- paste("U.logit.", resp.names, sep="") colnames(Lower.P) <- paste("L.prob.", resp.names, sep="") colnames(Upper.P) <- paste("U.prob.", resp.names, sep="") colnames(SE.P) <- paste("se.prob.", resp.names, sep="") colnames(SE.logit) <- paste("se.logit.", resp.names, sep="") } for (i in 1:n){ res <- eff.mul(X0[i,], B, se, m, p, r, V) # compute effects P[i,] <- prob <- res$p # fitted probabilities Logit[i,] <- logit <- res$logits # fitted logits if (se){ SE.P[i,] <- se.p <- res$std.err.p # std. errors of fitted probs SE.logit[i,] <- se.logit <- res$std.error.logits # std. errors of logits Lower.P[i,] <- logit2p(logit - z*se.logit) Upper.P[i,] <- logit2p(logit + z*se.logit) Lower.logit[i,] <- logit - z*se.logit Upper.logit[i,] <- logit + z*se.logit } } resp.levs <- c(m, 1:(m-1)) # restore the order of the levels P <- P[, resp.levs] Logit <- Logit[, resp.levs] if (se){ Lower.P <- Lower.P[, resp.levs] Upper.P <- Upper.P[, resp.levs] Lower.logit <- Lower.logit[, resp.levs] Upper.logit <- Upper.logit[, resp.levs] SE.P <- SE.P[, resp.levs] SE.logit <- SE.logit[, resp.levs] } result <- list(term=paste(focal.predictors, collapse="*"), formula=formula(mod), response=response.name(mod), y.levels=mod$lev, variables=x, x=predict.data[, focal.predictors, drop=FALSE], model.matrix=X0, data=X, discrepancy=0, model="multinom", prob=P, logit=Logit) if (se) result <- c(result, list(se.prob=SE.P, se.logit=SE.logit, lower.logit=Lower.logit, upper.logit=Upper.logit, lower.prob=Lower.P, upper.prob=Upper.P, confidence.level=confidence.level)) # find empty cells, if any, and correct whichFact <- unlist(lapply(result$variables, function(x) x$is.factor)) zeroes <- NULL if(sum(whichFact) > 1){ nameFact <- names(whichFact)[whichFact] counts <- xtabs(as.formula( paste("~", paste(nameFact, collapse="+"))), model.frame(mod)) zeroes <- which(counts == 0) } if(length(zeroes) > 0){ levs <- expand.grid(lapply(result$variables, function(x) x$levels)) good <- rep(TRUE, dim(levs)[1]) for(z in zeroes){ good <- good & apply(levs, 1, function(x) !all(x == levs[z, whichFact])) } result$prob[!good, ] <- NA result$logit[!good, ] <- NA if (se){ result$se.prob[!good, ] <- NA result$se.logit[!good, ] <- NA result$lower.prob[!good, ] <- NA result$upper.prob[!good, ] <- NA } } class(result) <-'effpoly' result } Effect.polr <- function(focal.predictors, mod, confidence.level=.95, xlevels=list(), default.levels=NULL, given.values, vcov.=vcov, se=TRUE, typical=mean, latent=FALSE, ...){ if (mod$method != "logistic") stop('method argument to polr must be "logistic"') if (missing(given.values)) given.values <- NULL else if (!all(which <- names(given.values) %in% names(coef(mod)))) stop("given.values (", names(given.values[!which]),") not in the model") formula.rhs <- formula(mod)[c(1, 3)] model.components <- Analyze.model(focal.predictors, mod, xlevels, default.levels, formula.rhs) excluded.predictors <- model.components$excluded.predictors predict.data <- model.components$predict.data factor.levels <- model.components$factor.levels factor.cols <- model.components$factor.cols n.focal <- model.components$n.focal x <- model.components$x X.mod <- model.components$X.mod cnames <- model.components$cnames X <- model.components$X Terms <- delete.response(terms(mod)) mf <- model.frame(Terms, predict.data, xlev = factor.levels, na.action=NULL) mod.matrix <- model.matrix(formula.rhs, data = mf, contrasts.arg = mod$contrasts) X0 <- Fixup.model.matrix(mod, mod.matrix, model.matrix(mod), X.mod, factor.cols, cnames, focal.predictors, excluded.predictors, typical, given.values) resp.names <- make.names(mod$lev, unique=TRUE) X0 <- X0[,-1, drop=FALSE] b <- coef(mod) p <- length(b) # corresponds to p - 1 in the text alpha <- - mod$zeta # intercepts are negatives of thresholds z <- qnorm(1 - (1 - confidence.level)/2) result <- list(term=paste(focal.predictors, collapse="*"), formula=formula(mod), response=response.name(mod), y.levels=mod$lev, variables=x, x=predict.data[, focal.predictors, drop=FALSE], model.matrix=X0, data=X, discrepancy=0, model="polr") if (latent){ res <- eff.latent(X0, b, vcov.(mod)[1:p, 1:p], se) result$fit <- res$fit if (se){ result$se <- res$se result$lower <- result$fit - z*result$se result$upper <- result$fit + z*result$se result$confidence.level <- confidence.level } transformation <- list() transformation$link <- I transformation$inverse <- I result$transformation <- transformation result$thresholds <- -alpha class(result) <- c("efflatent", "eff") return(result) } m <- length(alpha) + 1 r <- m + p - 1 indices <- c((p+1):r, 1:p) V <- vcov.(mod)[indices, indices] for (j in 1:(m-1)){ # fix up the signs of the covariances V[j,] <- -V[j,] # for the intercepts V[,j] <- -V[,j]} n <- nrow(X0) P <- Logit <- matrix(0, n, m) colnames(P) <- paste("prob.", resp.names, sep="") colnames(Logit) <- paste("logit.", resp.names, sep="") if (se){ Lower.logit <- Upper.logit <- Lower.P <- Upper.P <- SE.P <- SE.Logit <- matrix(0, n, m) colnames(Lower.logit) <- paste("L.logit.", resp.names, sep="") colnames(Upper.logit) <- paste("U.logit.", resp.names, sep="") colnames(Lower.P) <- paste("L.prob.", resp.names, sep="") colnames(Upper.P) <- paste("U.prob.", resp.names, sep="") colnames(SE.P) <- paste("se.prob.", resp.names, sep="") colnames(SE.Logit) <- paste("se.logit.", resp.names, sep="") } for (i in 1:n){ res <- eff.polr(X0[i,], b, alpha, V, m, r, se) # compute effects P[i,] <- prob <- res$p # fitted probabilities Logit[i,] <- logit <- res$logits # fitted logits if (se){ SE.P[i,] <- se.p <- res$std.err.p # std. errors of fitted probs SE.Logit[i,] <- se.logit <- res$std.error.logits # std. errors of logits Lower.P[i,] <- logit2p(logit - z*se.logit) Upper.P[i,] <- logit2p(logit + z*se.logit) Lower.logit[i,] <- logit - z*se.logit Upper.logit[i,] <- logit + z*se.logit } } result$prob <- P result$logit <- Logit if (se) result <- c(result, list(se.prob=SE.P, se.logit=SE.Logit, lower.logit=Lower.logit, upper.logit=Upper.logit, lower.prob=Lower.P, upper.prob=Upper.P, confidence.level=confidence.level)) class(result) <-'effpoly' result } Effect.default <- function(focal.predictors, mod, xlevels = list(), default.levels = NULL, given.values, vcov. = vcov, se = TRUE, confidence.level = 0.95, transformation = list(link = I, inverse = I), typical = mean, offset = mean, ...){ if (missing(given.values)) given.values <- NULL else if (!all(which <- names(given.values) %in% names(coef(mod)))) stop("given.values (", names(given.values[!which]), ") not in the model") off <- if (is.numeric(offset) && length(offset) == 1) offset else if (is.function(offset)) { mod.off <- model.offset(model.frame(mod)) if (is.null(mod.off)) 0 else offset(mod.off) } else stop("offset must be a function or a number") formula.rhs <- formula(mod)[[3]] model.components <- Analyze.model(focal.predictors, mod, xlevels, default.levels, formula.rhs) excluded.predictors <- model.components$excluded.predictors predict.data <- model.components$predict.data factor.levels <- model.components$factor.levels factor.cols <- model.components$factor.cols n.focal <- model.components$n.focal x <- model.components$x X.mod <- model.components$X.mod cnames <- model.components$cnames X <- model.components$X formula.rhs <- formula(mod)[c(1, 3)] Terms <- delete.response(terms(mod)) mf <- model.frame(Terms, predict.data, xlev = factor.levels, na.action=NULL) mod.matrix <- model.matrix(formula.rhs, data = mf, contrasts.arg = mod$contrasts) mod.matrix <- Fixup.model.matrix(mod, mod.matrix, model.matrix(mod), X.mod, factor.cols, cnames, focal.predictors, excluded.predictors, typical, given.values) mod.matrix <- mod.matrix[, !is.na(coef(mod))] effect <- off + mod.matrix %*% mod$coefficients[!is.na(coef(mod))] result <- list(term = paste(focal.predictors, collapse="*"), formula = formula(mod), response = response.name(mod), variables = x, fit = effect, x = predict.data[, 1:n.focal, drop=FALSE], model.matrix = mod.matrix, data = X, discrepancy = 0, offset=off) whichFact <- unlist(lapply(result$variables, function(x) x$is.factor)) zeroes <- NULL if(sum(whichFact) > 1){ nameFact <- names(whichFact)[whichFact] counts <- xtabs(as.formula( paste("~", paste(nameFact, collapse="+"))), model.frame(mod)) zeroes <- which(counts == 0) } if(length(zeroes) > 0){ levs <- expand.grid(lapply(result$variables, function(x) x$levels)) good <- rep(TRUE, dim(levs)[1]) for(z in zeroes){ good <- good & apply(levs, 1, function(x) !all(x == levs[z, whichFact])) } result$fit[!good] <- NA } if (se) { z <- qnorm(1 - (1 - confidence.level)/2) V <- vcov.(mod) eff.vcov <- mod.matrix %*% V %*% t(mod.matrix) rownames(eff.vcov) <- colnames(eff.vcov) <- NULL var <- diag(eff.vcov) result$vcov <- eff.vcov result$se <- sqrt(var) result$lower <- effect - z * result$se result$upper <- effect + z * result$se result$confidence.level <- confidence.level if(length(zeroes) > 0){ result$se[!good] <- NA result$lower[!good] <- NA result$upper[!good] <- NA } } result$transformation <- transformation class(result) <- "eff" result }effects/R/Effect.mlm.R0000644000176200001440000000303512310147362014175 0ustar liggesusers# Calculate Effects for term(s) in a Multivariate Linear Model # 2014-03-12: Introduced allEffects.mlm(). J. Fox Effect.mlm <- function(focal.predictors, mod, response, ...) { if (missing(response)) { mod.frame <- model.frame(mod) response <- colnames(model.response(mod.frame)) } else if (is.numeric(response)) { mod.frame <- model.frame(mod) response.names <- colnames(model.response(mod.frame)) response <- response.names[response] } if (length(response)==1) { mod.1 <- update(mod, as.formula(paste(response, " ~ ."))) result <- Effect(focal.predictors, mod.1, ...) } else { result <- as.list(NULL) for (resp in response) { mod.1 <- update(mod, as.formula(paste(resp, " ~ ."))) lab <- resp result[[lab]] <- Effect(focal.predictors, mod.1, ...) } class(result) <- "efflist" } result } allEffects.mlm <- function(mod, ...){ result <- NextMethod() class(result) <- "mlm.efflist" result } plot.mlm.efflist <- function(x, ...){ x <- do.call(c, x) class(x) <- "efflist" plot(x, ...) } summary.mlm.efflist <- function(object, ...){ object <- do.call(c, object) for (effect in names(object)){ cat("\n\nResponse:", object[[effect]]$response, "\n") print(summary(object[[effect]], ...)) } } print.mlm.efflist <- function(x, ...){ x <- do.call(c, x) for (effect in names(x)){ cat("\n\nResponse:", x[[effect]]$response, "\n") print(x[[effect]], ...) } invisible(x) } effects/MD50000644000176200001440000000331312654472141012207 0ustar liggesusers9babab4452646b150015dec3888e93d1 *DESCRIPTION 1abbab63bfd9f409265bf62f443bb09e *NAMESPACE 3c18e4d569c5078df67b72d66914a07d *NEWS 5813199c8d50d68dd051d43339e7adf9 *R/Effect.R fd5534d1b50f0256571b3e20a4211520 *R/Effect.mlm.R c020ab794be53beba8b33f354c0fa04f *R/effects.R 333a68e50375fc19512396135be9ed3c *R/effectsclmm.R 82d6651fdaa4d2eedf50cd9242d3f712 *R/effectsmer.R 1403f40a00195b3a06fdcb8c680d2fdf *R/effectspoLCA.R 4485bfda834494fd0c85641d0652c36e *R/plot-methods.R b210448fcd1e33be31f94bc8316cdc05 *R/plot.effpoly.R 0bb21fdd0b65a9093bdc583428a82600 *R/summary-print-methods.R 9c18eaddb0e3e864e33bdf5969d11717 *R/utilities.R b68033b2f2ddc5020875693045fec265 *data/Arrests.rda a545d973c61588f5ca78894cb38b4319 *data/BEPS.rda cb8193a7b56ada2c8482825b80317cd3 *data/Cowles.rda ff3a8d5a0670d05b1a1b36a6315ec18c *data/Hartnagel.rda eb70cef30f5e1f72b0f52145b8119517 *data/Prestige.rda 67035a606a0f9a31a23a5196e2873a6f *data/TitanicSurvival.rda 68b5153cab85b6f874edcf7abd941f4a *data/WVS.rda 3e49132323090d5f7aac873dc4a78b39 *data/Wells.rda 809fa4f25bbbd3e6d146c26d7ae3907e *inst/CHANGES e6b77c2338408b6fdf1a617eaa014a2b *inst/CITATION 2cf51c803a105bfe15c89e1d38075dd4 *man/Arrests.Rd 8a5faaf5977b8922131c28944958e8bf *man/BEPS.Rd 8751e2c091279ca02cfac5845b196c77 *man/Cowles.Rd 73d76fd4ac46673ccbdbd07b8e199300 *man/Hartnagel.Rd 3f0741a1feb35e53cdc4fc6da672c28e *man/Prestige.Rd a2697326c6634deeba023eb2db80ffea *man/Titanic.Rd 3ea67e3d3d057bd0eae08ca5e4f2a0ad *man/WVS.Rd 18f83ed88afe8b1ecae2dbc3dc986b66 *man/Wells.Rd 50cea9d1ad78eccee1fe51783ea0933c *man/effect.Rd 03336c030e40258fc3316c4cb0c707d1 *man/effects-deprecated.Rd e4354fddbebb2b2d2b05767af3592d92 *man/effects-package.Rd 69766f376830496012f1f0865b4e0f5a *man/summary.effect.Rd effects/DESCRIPTION0000644000176200001440000000264112654472141013410 0ustar liggesusersPackage: effects Version: 3.0-6 Date: 2016-02-02 Title: Effect Displays for Linear, Generalized Linear, and Other Models Authors@R: c(person("John", "Fox", role = c("aut", "cre"), email = "jfox@mcmaster.ca"), person("Sanford", "Weisberg", role = "aut", email = "sandy@umn.edu"), person("Michael", "Friendly", role = "aut", email = "friendly@yorku.ca"), person("Jangman", "Hong", role = "aut"), person("Robert", "Andersen", role = "ctb"), person("David", "Firth", role = "ctb"), person("Steve", "Taylor", role = "ctb")) Suggests: pbkrtest (>= 0.4-4), nlme, MASS, poLCA, heplots, splines, ordinal Imports: lme4, nnet, lattice, grid, colorspace, graphics, grDevices, stats, utils LazyLoad: yes LazyData: yes Description: Graphical and tabular effect displays, e.g., of interactions, for various statistical models with linear predictors. License: GPL (>= 2) URL: http://www.r-project.org, http://socserv.socsci.mcmaster.ca/jfox/ Author: John Fox [aut, cre], Sanford Weisberg [aut], Michael Friendly [aut], Jangman Hong [aut], Robert Andersen [ctb], David Firth [ctb], Steve Taylor [ctb] Maintainer: John Fox Repository: CRAN Repository/R-Forge/Project: effects Repository/R-Forge/Revision: 215 Repository/R-Forge/DateTimeStamp: 2016-02-02 13:48:13 Date/Publication: 2016-02-03 22:42:57 NeedsCompilation: no Packaged: 2016-02-02 14:10:51 UTC; rforge Depends: R (>= 2.10) effects/man/0000755000176200001440000000000012654133545012454 5ustar liggesuserseffects/man/summary.effect.Rd0000644000176200001440000005516312570102253015672 0ustar liggesusers\name{summary.eff} \alias{print.eff} \alias{print.effpoly} \alias{print.efflatent} \alias{print.efflist} \alias{print.mlm.efflist} \alias{print.summary.eff} \alias{summary.eff} \alias{summary.effpoly} \alias{summary.efflatent} \alias{summary.efflist} \alias{summary.mlm.efflist} \alias{plot.eff} \alias{plot.effpoly} \alias{plot.efflist} \alias{plot.mlm.efflist} \alias{setStrip} \alias{restoreStrip} \alias{[.efflist} \title{Summarizing, Printing, and Plotting Effects} \description{ \code{summary}, \code{print}, \code{plot}, and \code{[} methods for \code{eff}, \code{effpoly}, \code{efflist}, and \code{mlm.efflist} objects. } \usage{ \method{print}{eff}(x, type=c("response", "link"), ...) \method{print}{effpoly}(x, type=c("probability", "logits"), ...) \method{print}{efflatent}(x, ...) \method{print}{efflist}(x, ...) \method{print}{mlm.efflist}(x, ...) \method{print}{summary.eff}(x, ...) \method{summary}{eff}(object, type=c("response", "link"), ...) \method{summary}{effpoly}(object, type=c("probability", "logits"), ...) \method{summary}{efflatent}(object, ...) \method{summary}{efflist}(object, ...) \method{summary}{mlm.efflist}(object, ...) \method{plot}{eff}(x, x.var, z.var=which.min(levels), multiline=is.null(x$se), rug=TRUE, xlab, ylab, main=paste(effect, "effect plot"), colors=palette(), symbols=1:length(colors), lines=1:length(colors), cex=1.5, lwd=2, ylim, xlim=NULL, factor.names=TRUE, ci.style, band.transparency=0.15, band.colors=colors, type=c("rescale", "response", "link"), ticks=list(at=NULL, n=5), alternating=TRUE, rotx=0, roty=0, grid=FALSE, layout, rescale.axis, transform.x=NULL, ticks.x=NULL, key.args=NULL, row=1, col=1, nrow=1, ncol=1, more=FALSE, use.splines=TRUE, partial.residuals=c("adjusted", "raw"), show.fitted=FALSE, residuals.color="blue", residuals.pch=1, residuals.cex=1, smooth.residuals=TRUE, residuals.smooth.color=residuals.color, span=2/3, ...) \method{plot}{effpoly}(x, type=c("probability", "logit"), x.var=which.max(levels), rug=TRUE, xlab, ylab=paste(x$response, " (", type, ")", sep=""), main=paste(effect, "effect plot"), colors, symbols, lines, cex=1.5, lwd=2, factor.names=TRUE, ci.style, band.colors, band.transparency=0.3, style=c("lines", "stacked"), confint=(style == "lines" && !is.null(x$confidence.level)), transform.x=NULL, ticks.x=NULL, xlim=NULL, ylim, rotx=0, alternating=TRUE, roty=0, grid=FALSE, layout, key.args=NULL, row=1, col=1, nrow=1, ncol=1, more=FALSE, use.splines=TRUE, ...) \method{plot}{efflist}(x, selection, rows, cols, ask=FALSE, graphics=TRUE, ...) \method{plot}{mlm.efflist}(x, ...) setStrip(bg=3, fg="black", clip=c("off", "on")) restoreStrip(saved) \method{[}{efflist}(x, ...) } \arguments{ \item{x}{an object of class \code{"eff"}, \code{"effpoly"}, \code{"efflist"}, \code{"mlm.efflist"}, or \code{"summary.eff"}, as appropriate.} \item{object}{an object of class \code{"eff"}, \code{"effpoly"}, \code{"efflist"}, or \code{"mlm.efflist"}, as appropriate.} \item{type}{for printing or summarizing linear and generalized linear models, if \code{"response"} (the default), effects are printed on the scale of the response variable; if \code{"link"}, effects are printed on the scale of the linear predictor. For plotting linear or genealized linearized models, \code{"rescale"} (the default) plots the vertical axis on the link scale (e.g., the logit scale for a logit model) but labels the axis on the response scale (e.g., the probability scale for a logit model); \code{"response"} plots and labels the vertical axis on the scale of the response (e.g., the probability scale for a logit model); and \code{"link"} plots and labels the vertical axis on the scale of the link (e.g., the logit scale for a logit model). For polytomous logit models, this argument takes either \code{"probability"} or \code{"logit"}, with the former as the default.} \item{rescale.axis}{this argument is deprecated --- use the \code{type} argument instead. Setting \code{rescale.axis=TRUE} is equivalent to \code{type="rescale"}; setting \code{rescale.axis=FALSE} is equivalent to \code{type="response"}. If specified, \code{rescale.axis} will override \code{type}.} \item{x.var}{the index (number) or quoted name of the covariate or factor to place on the horizontal axis of each panel of the effect plot. The default is the predictor with the largest number of levels or values.} \item{z.var}{for linear, generalized linear or mixed models, the index (number) or quoted name of the covariate or factor for which individual lines are to be drawn in each panel of the effect plot. The default is the predictor with the smallest number of levels or values. This argument is only used if \code{multiline = TRUE}.} \item{multiline}{for linear, generalized linear or mixed models, if \code{TRUE}, each panel of the display represents combinations of values of two predictors, with one predictor (corresponding to \code{x.var}) on the horzontal axis, and the other (corresponding to \code{z.var}) used to define lines in the graph; defaults to \code{TRUE} if there are no standard errors in the object being plotted, and \code{FALSE} otherwise.} \item{confint}{plot point-wise confidence bands around fitted effects (for multinomial and proportional-odds logit models); defaults to \code{TRUE}, in which case separate panels are used for different response levels.} \item{rug}{if \code{TRUE}, the default, a rug plot is shown giving the \emph{marginal} distribution of the predictor on the horizontal axis, if this predictor is a covariate. The rug plot is suppressed if partial residuals are plotted.} \item{xlab}{the label for the horizontal axis of the effect plot; if missing, the function will use the name of the predictor on the horizontal axis.} \item{ylab}{the label for the vertical axis of the effect plot; the default is constructed from the name of the response variable for the model from which the effect was computed.} \item{main}{the title for the plot, printed at the top; the default title is constructed from the name of the effect.} \item{colors}{\code{colors[1]} is used to plot effects, \code{colors[2]} to plot confidence limits when \code{ci.style} is not equal to \code{"bands"}. In a mulitline plot, the successive \code{colors} correspond to the levels of the \code{z.var} covariate or factor. In a stacked plot or a plot without confidence bands for a multinomial or proportional-odds logit model, the successive \code{colors} correspond to the levels of the response factor. In all but stacked plots, \code{colors} defaults to \code{palette()}. If \code{colors} is shorter than the number of levels, then it is recycled, so \code{colors="black"} will use black for all levels. For stacked multinomial-logit plots, \code{colors} defaults to \code{rainbow_hcl(levels)}, where \code{levels} is the number of levels of the response variable; for stacked proportional-odds model plots, \code{colors} defautls to \code{sequential_hcl(levels)}. \code{colors} does not recycle for stacked plots. \bold{Warning:} This argument \emph{cannot} be abbreviated to \code{col}, which is used for a different purpose (see below).} \item{symbols, lines}{corresponding to the levels of the \code{z.var} covariate or factor on a multiline plot, or to the successive levels of the response factor in a line plot for a polytomous logit model. These arguments are used only if \code{multiline = TRUE} or for polytomous logit models where the effects are plotted without confidence bands; in these cases a legend is drawn at the top of the display. If these arguments are too short they are recycled.} \item{cex}{character expansion for plotted symbols; default is \code{1.5}.} \item{lwd}{line width for fitted lines.} \item{ylim}{2-element vector containing the lower and upper limits of the vertical axes; if \code{NULL}, the default, then the vertical axes are scaled from the data.} \item{xlim}{a named list of 2-element vectors, with the names corresponding to numeric predictors; if a numeric predictor is in the list, then when it appears on the horizontal axis, the axis limits will be taken from the corresponding vector; if a predictor is not in the list, or if the argument is \code{NULL} (the default), then the horizontal axis limits are computed from the data.} \item{factor.names}{a logical value, default \code{TRUE}, that controls the inclusion of factor names in conditioning-variable labels.} \item{ci.style}{confidence bounds can be indicated using error bars, using lines or confidence bands, depending on the plot type. For single line plots the default is \code{"bars"} for factors and \code{"bands"} for variates. Style \code{"lines"} can also be used for either of these. For multiline plots, the default is \code{"none"} for no confidence bounds, but style \code{"bars"} or \code{"bands"} can also be used. For a variate the option \code{"bars"} displays the error bars at each of the \code{levels} points for which the horizontal variable was evaluated.} \item{band.colors}{A vector of colors for the color of the confidence band with \code{ci.style="bands"}. The default is \code{band.colors=colors}. For plots with one line, the choice, setting \code{band.colors="red"} produces a pleasing result, even if color provides no additional information in this case. If this argument is too short it is recycled.} \item{band.transparency}{For \code{ci.style="bands"}, the alpha transparency of the fill color. Not all graphic devices support transparency.} \item{style}{(for multinomial or proportional-odds logit models) \code{"lines"} (the default for a line plot, or \code{"stacked"} for a stacked-bar or stacked-area plot. In the latter case only fitted probabilities may be plotted and confidence envelopes cannot be shown.} \item{ticks}{a two-item list controlling the placement of tick marks on the vertical axis, with elements \code{at} and \code{n}. If \code{at=NULL} (the default), the program attempts to find `nice' locations for the ticks, and the value of \code{n} (default, \code{5}) gives the approximate number of tick marks desired; if \code{at} is non-\code{NULL}, then the value of \code{n} is ignored.} \item{ticks.x}{a named list of two-item lists controlling the placement of tick marks on the horizontal axis. Each list element is named for a numeric predictor in the model, and each sublist has elements \code{at} or \code{n} are for the \code{ticks} argument. If a predictor doesn't appear in the list, or if \code{ticks.x} is \code{NULL} (the default), then the tick marks are computed by the function.} \item{transform.x}{transformations to be applied to the horizontal axis, in the form of a named list, each of whose elements is itself a list of two functions, with sublist element names \code{trans} and \code{inverse}. The names of the list elements are numeric predictors in the model whose axes are to be transformed; the \code{trans} function is applied to the values of the predictor, and \code{inverse} is used for computing proper axis tick labels. If a numeric predictor is missing from \code{transform.x} then its axis is not transformed; if the argument is \code{NULL} (the default), then no predictor axes are transformed.} \item{alternating}{if \code{TRUE} (the default), the tick labels alternate by panels in multi-panel displays from left to right and top to bottom; if \code{FALSE}, tick labels appear at the bottom and on the left.} \item{rotx, roty}{rotation angles for the horizontal and vertical tick marks, respectively. Default is 0.} \item{grid}{if \code{TRUE}, add grid lines to the plot. Default is FALSE.} \item{layout}{the \code{layout} argument to the \pkg{lattice} function \code{\link{xyplot}} (or, in some cases \code{\link{densityplot}}), which is used to draw the effect display; if not specified, the plot will be formatted so that it appears on a single page.} \item{key.args}{additional arguments to be passed to the \code{key} trellis argument to \code{\link{xyplot}} or \code{\link{densityplot}}, e.g., to position the key (legend) in the plotting region; may also be used to modify the default behavior of the key.} \item{row, col, nrow, ncol, more}{These arguments are used to graph an effect as part of an array of plots; \code{row}, \code{col}, \code{nrow}, and \code{ncol} are used to compose the \code{split} argument and \code{more} the \code{more} argument to \code{\link{print.trellis}}. Normally these arguments are not set by the user, but by \code{plot.efflist}. \bold{Warning:} Note that \code{col} is \emph{not} used to specify colors; use \code{colors} instead (see above).} \item{selection}{the optional index (number) or quoted name of the effect in an effect list to be plotted; if not supplied, a menu of high-order terms is presented or all effects are plotted.} \item{rows, cols}{Number of rows and columns in the ``meta-array'' of plots produced for an \code{efflist} object; if either argument is missing, then the meta-layout will be computed by the \code{plot} method.} \item{ask}{if \code{selection} is not supplied and \code{ask} is \code{TRUE}, a menu of high-order terms is presented; if \code{ask} is \code{FALSE} (the default), effects for all high-order terms are plotted in an array.} \item{graphics}{if \code{TRUE} (the default), then the menu of terms to plot is presented in a dialog box rather than as a text menu.} \item{use.splines}{If \code{TRUE}, the default, then any lines drawn when the horizontal axis is not a factor use interpolating splines computed by the \code{\link{spline}} function. If FALSE, then linear interpoliation is used. This argument is ignored if the horizontal axis is a factor.} \item{partial.residuals}{use \code{"adjusted"} partial residuals, computed at the panel-rounded values of the focal predictors not on the horizontal axis of each panel, or \code{"raw"} partial residuals, computed at the actual values of all focal predictors; the default is \code{"adjusted"}, and this argument is effective only when partial residuals are included in the effect object to be plotted --- see \code{\link{Effect}}.} \item{smooth.residuals}{whether to show a \code{\link{loess}} smooth of the partial residuals, if they are present; the default is \code{TRUE}. For a non-Gaussian \code{glm} model, a non-robust \code{loess} smooth is used; for a \code{lm} model or a Gaussian \code{glm} model, a robust smooth is employed.} \item{span}{of the \code{\link{xyplot}} smoother to be applied to partial residuals; the default is \code{2/3}.} \item{show.fitted}{if partial residuals are present in the effect object, also plot the partial fitted values (which will be shown as filled circles).} \item{residuals.color}{color for plotting partial residuals (default \code{"blue"}).} \item{residuals.smooth.color}{color for plotting the smooth of the partial residuals; the default is the \code{residuals.color}.} \item{residuals.pch}{plotting symbol for partial residuals (default \code{1}, open circles).} \item{residuals.cex}{character expansion (relative size) for symbols plotting partial residuals (default is \code{1}).} \item{bg}{if a single numeric value (the default is \code{3}), the color of the strips at the tops of lattice panels are set to gray scale, with the number of graditions, if there is more than one conditioning variable, corresponding to the value given (which will be rounded to a whole number). This argument can also be a vector of colors, specified in any manner recognized in R (e.g, by name).} \item{fg}{foreground color or colors for the strips at the top of lattice panels (the default is \code{"black"}); can be a single value or a vector of values of the same length as \code{bg}.} \item{clip}{\code{"off"} or \code{"on"}, determines whether or not conditioning values in the strips at the top of lattice panels are clipped on the left and right. The normal lattice default is \code{"on"}; the default in \code{setStrip} is \code{"off"}, allowing the lines representing numeric conditioning values to be displayed more clearly at the extreme left and right of strips.} \item{saved}{a set of lattice strip specifications returned by \code{setStrip}.} \item{...}{arguments to be passed down.} } \details{ In a generalized linear model, by default, the \code{print} and \code{summary} methods for \code{eff} objects print the computed effects on the scale of the response variable using the inverse of the link function. In a logit model, for example, this means that the effects are expressed on the probability scale. By default, effects in a GLM are plotted on the scale of the linear predictor, but the vertical axis is labelled on the response scale. This preserves the linear structure of the model while permitting interpretation on what is usually a more familiar scale. This approach may also be used with linear models, for example to display effects on the scale of the response even if the data are analyzed on a transformed scale, such as log or square-root. When a factor is on the x-axis, the \code{plot} method for \code{eff} objects connects the points representing the effect by line segments, creating a response ``profile.'' If you wish to suppress these lines, add the argument \code{lty=0} to the call to \code{plot} (see the examples). In a polytomous (multinomial or proportional-odds) logit model, by default effects are plotted on the probability scale; they may be alternatively plotted on the scale of the individual-level logits. The \code{setStrip} and \code{restoreStrip} functions modify the strips that appear in subsequent lattice plots, including those produced by functions in the \pkg{effects} package. The default call \code{setStrip()} provides monochrome (rather than the lattice-default colored) strips with up to 3 gray-scale values corresponding to 3 conditioning variables; clipping at the left and right of strips is also turned off by default by \code{setStrip}. \code{restoreStrip} may be used to reset lattice strips to previously saved parameters returned by \code{setStrip}. } \value{ The \code{summary} method for \code{"eff"} objects returns a \code{"summary.eff"} object with the following components (those pertaining to confidence limits need not be present): \item{header}{a character string to label the effect.} \item{effect}{an array containing the estimated effect.} \item{lower.header}{a character string to label the lower confidence limits.} \item{lower}{an array containing the lower confidence limits.} \item{upper.header}{a character string to label the upper confidence limits.} \item{upper}{an array containing the upper confidence limits.} The \code{setStrip} function invisibly returns a list that can supply the argument of the \code{restoreStrip} function to restore the previous lattice strip specification. The \code{[} method for \code{"efflist"} objects is used to subset an \code{"efflist"} object and returns an object of the same class. } \author{John Fox \email{jfox@mcmaster.ca} and Jangman Hong.} \seealso{\code{\link{effect}}, \code{\link{allEffects}}, \code{\link{xyplot}}, \code{\link{densityplot}}, \code{\link{print.trellis}}, \code{\link{loess}}, \code{\link[colorspace]{rainbow_hcl}}, \code{\link[colorspace:rainbow_hcl]{sequential_hcl}}} \examples{ # also see examples in ?effect mod.cowles <- glm(volunteer ~ sex + neuroticism*extraversion, data=Cowles, family=binomial) eff.cowles <- allEffects(mod.cowles, xlevels=list(extraversion=seq(0, 24, 6))) eff.cowles as.data.frame(eff.cowles[[2]]) # neuroticism*extraversion interaction plot(eff.cowles, 'sex', ylab="Prob(Volunteer)", grid=TRUE, rotx=90, lty=0) .save.strip <- setStrip() plot(eff.cowles, 'neuroticism:extraversion', ylab="Prob(Volunteer)", ticks=list(at=c(.1,.25,.5,.75,.9))) \donttest{ # change color of the confidence bands to 'black' with .15 transparency plot(eff.cowles, 'neuroticism:extraversion', ylab="Prob(Volunteer)", ticks=list(at=c(.1,.25,.5,.75,.9)), band.colors="red", band.transparency=.3) plot(eff.cowles, 'neuroticism:extraversion', multiline=TRUE, ylab="Prob(Volunteer)", key.args = list(x = 0.75, y = 0.75, corner = c(0, 0))) # use probability scale in place of logit scale, all lines are black. plot(eff.cowles, 'neuroticism:extraversion', multiline=TRUE, ylab="Prob(Volunteer)", key.args = list(x = 0.75, y = 0.75, corner = c(0, 0)), colors="black", lines=1:8, ci.style="bands", type="response", band.colors=palette()) plot(effect('sex:neuroticism:extraversion', mod.cowles, xlevels=list(extraversion=seq(0, 24, 6))), multiline=TRUE) plot(effect('sex:neuroticism:extraversion', mod.cowles, xlevels=list(extraversion=seq(0, 24, 6))), multiline=TRUE, type="response", ci.style="bands") } if (require(nnet)){ mod.beps <- multinom(vote ~ age + gender + economic.cond.national + economic.cond.household + Blair + Hague + Kennedy + Europe*political.knowledge, data=BEPS) \donttest{ plot(effect("Europe*political.knowledge", mod.beps, xlevels=list(political.knowledge=0:3))) } plot(effect("Europe*political.knowledge", mod.beps, xlevels=list(political.knowledge=0:3), given.values=c(gendermale=0.5)), style="stacked", colors=c("blue", "red", "orange"), rug=FALSE) } if (require(MASS)){ mod.wvs <- polr(poverty ~ gender + religion + degree + country*poly(age,3), data=WVS) plot(effect("country*poly(age, 3)", mod.wvs)) \donttest{ plot(effect("country*poly(age, 3)", mod.wvs), style="stacked", colors=c("gray75", "gray50", "gray25")) plot(effect("country*poly(age, 3)", latent=TRUE, mod.wvs)) } } mod.pres <- lm(prestige ~ log(income, 10) + poly(education, 3) + poly(women, 2), data=Prestige) eff.pres <- allEffects(mod.pres, default.levels=50) \donttest{ plot(eff.pres) plot(eff.pres[1:2]) } plot(eff.pres[1], transform.x=list(income=list(trans=log10, inverse=function(x) 10^x)), ticks.x=list(income=list(at=c(1000, 2000, 5000, 10000, 20000)))) restoreStrip(.save.strip) remove(.save.strip) } \keyword{hplot} \keyword{models} effects/man/BEPS.Rd0000644000176200001440000000370112441350057013466 0ustar liggesusers\name{BEPS} \alias{BEPS} \docType{data} \title{British Election Panel Study} \description{ These data are drawn from the 1997-2001 British Election Panel Study (BEPS). } \usage{BEPS} \format{ A data frame with 1525 observations on the following 10 variables. \describe{ \item{\code{vote}}{Party choice: \code{Conservative}, \code{Labour}, or \code{Liberal Democrat}} \item{\code{age}}{in years} \item{\code{economic.cond.national}}{Assessment of current national economic conditions, 1 to 5.} \item{\code{economic.cond.household}}{Assessment of current household economic conditions, 1 to 5.} \item{\code{Blair}}{Assessment of the Labour leader, 1 to 5.} \item{\code{Hague}}{Assessment of the Conservative leader, 1 to 5.} \item{\code{Kennedy}}{Assessment of the leader of the Liberal Democrats, 1 to 5.} \item{\code{Europe}}{an 11-point scale that measures respondents' attitudes toward European integration. High scores represent `Eurosceptic' sentiment.} \item{\code{political.knowledge}}{Knowledge of parties' positions on European integration, 0 to 3.} \item{\code{gender}}{\code{female} or \code{male}.} } } \references{ J. Fox and R. Andersen (2006) Effect displays for multinomial and proportional-odds logit models. \emph{Sociological Methodology} \bold{36}, 225--255. } \examples{ summary(BEPS) \donttest{ if (require(splines) && require(nnet)){ # for bs() and multinom() beps <- multinom(vote ~ age + gender + economic.cond.national + economic.cond.household + Blair + Hague + Kennedy + bs(Europe, 3)*political.knowledge, data=BEPS) europe.knowledge <- effect("bs(Europe, 3)*political.knowledge", beps, xlevels=list(Europe=seq(1, 11, length=50), political.knowledge=0:3), given.values=c(gendermale=0.5)) plot(europe.knowledge) plot(europe.knowledge, style="stacked", colors=c("blue", "red", "orange"), rug=FALSE) } } } \keyword{datasets} effects/man/Wells.Rd0000644000176200001440000000327611163026512014026 0ustar liggesusers\name{Wells} \alias{Wells} \docType{data} \title{Well Switching in Bangladesh} \description{ Data on whether or not households in Bangladesh changed the wells that they were using. } \usage{Wells} \format{ A data frame with 3020 observations on the following 5 variables. \describe{ \item{\code{switch}}{whether or not the household switched to another well from an unsafe well: \code{no} or \code{yes}.} \item{\code{arsenic}}{the level of arsenic contamination in the household's original well, in hundreds of micrograms per liter; all are above 0.5, which was the level identified as ``safe''.} \item{\code{distance}}{in meters to the closest known safe well.} \item{\code{education}}{in years of the head of the household.} \item{\code{association}}{whether or not any members of the household participated in any community organizations: \code{no} or \code{yes}.} } } \details{ The data are for an area of Arahazar upazila, Bangladesh. The researchers labelled each well with its level of arsenic and an indication of whether the well was ``safe'' or ``unsafe.'' Those using unsafe wells were encouraged to switch. After several years, it was determined whether each household using an unsafe well had changed its well. These data are used by Gelman and Hill (2007) for a logistic-regression example. } \source{ \url{http://www.stat.columbia.edu/~gelman/arm/examples/arsenic/wells.dat}. } \references{ A. Gelman and J. Hill (2007) \emph{Data Analysis Using Regression and Multilevel/Hierarchical Models.} Cambridge: Cambridge University Press. } \examples{ summary(Wells) } \keyword{datasets} effects/man/effects-deprecated.Rd0000644000176200001440000000100111074677173016455 0ustar liggesusers\name{effects-deprecated} \alias{effects-deprecated} \alias{all.effects} \title{Deprecated Function in effects Package} \description{ The \code{all.effects} function is provided only for compatibility with older versions of the effects package and may be removed; use \code{\link{allEffects}} instead. } \usage{ all.effects(...) } \arguments{ \item{...}{arguments to be passed to \code{allEffects}.} } \author{John Fox \email{jfox@mcmaster.ca}.} \seealso{\code{\link{allEffects}}} effects/man/Prestige.Rd0000644000176200001440000000261011065527636014527 0ustar liggesusers\name{Prestige} \alias{Prestige} \docType{data} \title{Prestige of Canadian Occupations} \description{ The \code{Prestige} data frame has 102 rows and 6 columns. The observations are occupations. } \format{ This data frame contains the following columns: \describe{ \item{education}{ Average education (years) of occupational incumbents, in 1971. } \item{income}{ Average income (dollars) of incumbents, 1971. } \item{women}{ Percentage of incumbents who are women, 1971. } \item{prestige}{ Pineo-Porter prestige score for occupation, from a social survey conducted in the mid-1960s. } \item{census}{ Canadian Census occupational code. } \item{type}{ Type of occupation. A factor with levels (note: out of order): \code{bc}, Blue Collar; \code{prof}, Professional, Managerial, and Technical; \code{wc}, White Collar. } } } \source{ Canada (1971) \emph{Census of Canada}. Vol. 3, Part 6. Statistics Canada [pp. 19-1--19-21]. Personal communication from B. Blishen, W. Carroll, and C. Moore, Departments of Sociology, York University and University of Victoria. } \references{ Fox, J. (1997) \emph{Applied Regression, Linear Models, and Related Methods.} Sage. } \usage{ Prestige } \keyword{datasets} effects/man/Arrests.Rd0000644000176200001440000000257711162233541014367 0ustar liggesusers\name{Arrests} \alias{Arrests} \docType{data} \title{Arrests for Marijuana Possession} \description{ Data on police treatment of individuals arrested in Toronto for simple possession of small quantities of marijuana. The data are part of a larger data set featured in a series of articles in the Toronto Star newspaper. } \usage{Arrests} \format{ A data frame with 5226 observations on the following 8 variables. \describe{ \item{released}{Whether or not the arrestee was released with a summons; a factor with levels: \code{No}; \code{Yes}. } \item{colour}{The arrestee's race; a factor with levels: \code{Black}; \code{White}. } \item{year}{1997 through 2002; a numeric vector.} \item{age}{in years; a numeric vector.} \item{sex}{a factor with levels: \code{Female}; \code{Male}. } \item{employed}{a factor with levels: \code{No}; \code{Yes}. } \item{citizen}{a factor with levels: \code{No}; \code{Yes}. } \item{checks}{Number of police data bases (of previous arrests, previous convictions, parole status, etc. -- 6 in all) on which the arrestee's name appeared; a numeric vector} } } \source{ Personal communication from Michael Friendly, York University. } \examples{ summary(Arrests) } \keyword{datasets} effects/man/WVS.Rd0000644000176200001440000000317012441350057013414 0ustar liggesusers\name{WVS} \alias{WVS} \docType{data} \title{World Values Surveys} \description{ Data from the World Values Surveys 1995-1997 for Australia, Norway, Sweden, and the United States. } \usage{WVS} \format{ A data frame with 5381 observations on the following 6 variables. \describe{ \item{\code{poverty}}{``Do you think that what the government is doing for people in poverty in this country is about the right amount, too much, or too little?'' (ordered): \code{Too Little}, \code{About Right}, \code{Too Much}}. \item{\code{religion}}{Member of a religion: \code{no} or \code{yes}.} \item{\code{degree}}{Held a university degree: \code{no} or \code{yes}.} \item{\code{country}}{\code{Australia}, \code{Norway}, \code{Sweden}, or \code{USA}.} \item{\code{age}}{in years.} \item{\code{gender}}{\code{male} or \code{female}.} } } \references{ J. Fox and R. Andersen (2006) Effect displays for multinomial and proportional-odds logit models. \emph{Sociological Methodology} \bold{36}, 225--255. } \examples{ summary(WVS) \donttest{ if (require(splines) && require(MASS)){ # for bs() wvs <- polr(poverty ~ gender + country*(religion + degree + bs(age, 4)), data=WVS) plot(effect("country*bs(age,4)", wvs, xlevels=list(age=18:83), given.values=c(gendermale=0.5)), rug=FALSE) plot(effect("country*bs(age,4)", wvs, xlevels=list(age=18:83), given.values=c(gendermale=0.5)), rug=FALSE, style="stacked") plot(effect("country*bs(age,4)", wvs, xlevels=list(age=18:83), given.values=c(gendermale=0.5), latent=TRUE), rug=FALSE) } } } \keyword{datasets} effects/man/Hartnagel.Rd0000644000176200001440000000334011507364001014635 0ustar liggesusers\name{Hartnagel} \alias{Hartnagel} \docType{data} \title{Canadian Crime-Rates Time Series} \description{ The \code{Hartnagel} data frame has 38 rows and 7 columns. The data are an annual time-series from 1931 to 1968. There are some missing data. } \format{ This data frame contains the following columns: \describe{ \item{year}{ 1931--1968. } \item{tfr}{ Total fertility rate per 1000 women. } \item{partic}{ Women's labor-force participation rate per 1000. } \item{degrees}{ Women's post-secondary degree rate per 10,000. } \item{fconvict}{ Female indictable-offense conviction rate per 100,000. } \item{ftheft}{ Female theft conviction rate per 100,000. } \item{mconvict}{ Male indictable-offense conviction rate per 100,000. } \item{mtheft}{ Male theft conviction rate per 100,000. } } } \details{ The post-1948 crime rates have been adjusted to account for a difference in method of recording. Some of your results will differ in the last decimal place from those in Table 14.1 of Fox (1997) due to rounding of the data. Missing values for 1950 were interpolated. } \source{ Personal communication from T. Hartnagel, Department of Sociology, University of Alberta. } \references{ Fox, J., and Hartnagel, T. F (1979) Changing social roles and female crime in Canada: A time series analysis. \emph{Canadian Review of Sociology and Anthroplogy}, \bold{16}, 96--104. Fox, J. (2008) \emph{Applied Regression Analysis and Generalized Linear Models}, Second Edition. Sage. } \usage{ Hartnagel } \keyword{datasets} effects/man/Titanic.Rd0000644000176200001440000000324312216200646014327 0ustar liggesusers\name{TitanicSurvival} \alias{TitanicSurvival} \docType{data} \title{Survival of Passengers on the Titanic} \description{ Information on the survival status, sex, age, and passenger class of 1309 passengers in the Titanic disaster of 1912. } \usage{TitanicSurvival} \format{ A data frame with 1309 observations on the following 4 variables. \describe{ \item{\code{survived}}{\code{no} or \code{yes}.} \item{\code{sex}}{\code{female} or \code{male}} \item{\code{age}}{in years (and for some children, fractions of a year); age is missing for 263 of the passengers.} \item{\code{passengerClass}}{\code{1st}, \code{2nd}, or \code{3rd} class.} } } \details{ This is part of a larger data set compiled by Thomas Cason. Many additional details are given in the sources cited below. } \source{ Data set \code{titanic3} from \url{http://biostat.mc.vanderbilt.edu/twiki/bin/view/Main/DataSets}. } \references{ \url{http://www.encyclopedia-titanica.org/} F. E. Harrell, Jr. (2001) \emph{Regression Modeling Strategies} New York: Springer. } \examples{ summary(TitanicSurvival) titanic <- glm(survived ~ (passengerClass + sex + age)^2, data=TitanicSurvival, family=binomial) titanic.all <- allEffects(titanic, typical=median, given.values=c(passengerClass2nd=1/3, passengerClass3rd=1/3, sexmale=0.5)) plot(titanic.all, ticks=list(at=c(.01, .05, seq(.1, .9, by=.2), .95, .99)), ask=FALSE) plot(effect("passengerClass*sex*age", titanic, xlevels=list(age=0:65)), ticks=list(at=c(.001, .005, .01, .05, seq(.1, .9, by=.2), .95, .99, .995))) } \keyword{datasets} effects/man/effects-package.Rd0000644000176200001440000000543512633371321015753 0ustar liggesusers\name{effects-package} \Rdversion{1.1} \alias{effects-package} \alias{effects} \docType{package} \title{ Effect Displays for Linear, Generalized Linear, and Other Models } \description{ Graphical and tabular effect displays, e.g., of interactions, for linear (including fit via \code{\link[nlme]{gls}}), multivariate-linear, generalized linear, multinomial-logit, proportional-odds logit, mixed-effect, polytomous latent-class, and some other models; (multidimensional) component+residual plots for linear and generalized linear models. } \details{ \tabular{ll}{ Package: \tab effects\cr Version: \tab 3.0-5\cr Date: \tab 2015/12/13\cr Suggests: \tab pbkrtest (>= 0.4-4), nlme, MASS, poLCA, heplots, splines\cr Imports: \tab lme4, nnet, lattice, grid, colorspace, graphics, grDevices, stats, utils\cr LazyLoad: \tab yes\cr LazyData: \tab yes\cr License: \tab GPL (>= 2)\cr URL: \tab http://www.r-project.org, http://socserv.socsci.mcmaster.ca/jfox/\cr } This package creates effect displays for various kinds of models, as partly explained in the references. Typical usage is \code{plot(allEffects(model))}, where \code{model} is an appropriate fitted-model object. Additional arguments to \code{allEffects} and \code{plot} can be used to customize the resulting displays. The function \code{effect} can be employed to produce an effect display for a particular term in the model, or to which terms in the model are marginal. The function \code{Effect} may similarly be used to produce an effect display for any combination of predictors. For linear and generalized linear models it is also possible to plot partial residuals to obtain (multidimensional) component+residual plots. See \code{?effect}, \code{?Effect}, and \code{?plot.eff} for details. } \author{ John Fox , Sanford Weisberg, Michael Friendly, and Jangman Hong. We are grateful to Robert Andersen, David Firth, and for various suggestions. Maintainer: John Fox } \references{ Fox, J. (1987) Effect displays for generalized linear models. \emph{Sociological Methodology} \bold{17}, 347--361. Fox, J. (2003) Effect displays in R for generalised linear models. \emph{Journal of Statistical Software} \bold{8:15}, 1--27, <\url{http://www.jstatsoft.org/v08/i15/}>. Fox, J. and R. Andersen (2006) Effect displays for multinomial and proportional-odds logit models. \emph{Sociological Methodology} \bold{36}, 225--255. Fox, J. and J. Hong (2009). Effect displays in R for multinomial and proportional-odds logit models: Extensions to the effects package. \emph{Journal of Statistical Software} \bold{32:1}, 1--24, <\url{http://www.jstatsoft.org/v32/i01/}>. } \keyword{ package } effects/man/Cowles.Rd0000644000176200001440000000156111162233541014170 0ustar liggesusers\name{Cowles} \alias{Cowles} \docType{data} \title{Cowles and Davis's Data on Volunteering} \usage{Cowles} \description{ The \code{Cowles} data frame has 1421 rows and 4 columns. These data come from a study of the personality determinants of volunteering for psychological research. } \format{ This data frame contains the following columns: \describe{ \item{neuroticism}{scale from Eysenck personality inventory.} \item{extraversion}{scale from Eysenck personality inventory.} \item{sex}{a factor with levels: \code{female}; \code{male.}} \item{volunteer}{volunteeing, a factor with levels: \code{no}; \code{yes}.} } } \source{ Cowles, M. and C. Davis (1987) The subject matter of psychology: Volunteers. \emph{British Journal of Social Psychology} \bold{26}, 97--102. } \examples{ summary(Cowles) } \keyword{datasets} effects/man/effect.Rd0000644000176200001440000007354112511301532014172 0ustar liggesusers\name{effect} \alias{effect} \alias{Effect} \alias{Effect.default} \alias{Effect.lm} \alias{Effect.mer} \alias{Effect.merMod} \alias{Effect.lme} \alias{Effect.gls} \alias{Effect.multinom} \alias{Effect.polr} \alias{Effect.poLCA} \alias{Effect.clm2} \alias{Effect.mlm} \alias{allEffects} \alias{as.data.frame.eff} \alias{as.data.frame.effpoly} \alias{as.data.frame.efflatent} \alias{vcov.eff} \title{Functions For Constructing Effect Plots} \description{ \code{effect} and \code{Effect} construct an \code{"eff"} object for a term (usually a high-order term) in a linear (fit by \code{lm} or \code{gls}) or generalized linear model (fit by \code{glm}), or an \code{"effpoly"} object for a term in a multinomial or proportional-odds logit model (fit respectively by \code{multinom} or \code{polr}), absorbing the lower-order terms marginal to the term in question, and averaging over other terms in the model. For multivariate linear models (\code{mlm}), the function constructs a list of \code{"eff"} objects separately for the various response variables. \code{effect} builds the required object by specifying explicity a focal term like \code{"a:b"} for an \code{a} by \code{b} interaction. \code{Effect} requires specifying a the components of the term, for example \code{c("a", "b")} rather than the term itself. \code{Effect} is consequently more flexible and robust than \code{effect}, and will work with some models for which \code{effect} fails, such as models with nested terms (see the examples). The \code{effect} function works by constructing a call to \code{Effect}. The function can also be used with some mixed-effects models, including linear and generalized linear model fits from \code{lmer} and \code{glmer} from the \pkg{lme4} package and \code{lme} from the \pkg{nlme} package; ordinal logistic mixed effects fit with \code{clmm2} from the \pkg{ordinal} package, and polytomous latent-class models fit by the \code{poLCA} function in the \pkg{poLCA} package. The analysis in mixed effects is for the fixed effects only, not for random effects. There is a default method for \code{Effect} that should work with any model object that has a linear predictor and responds to the \code{\link{coef}}, \code{\link{model.frame}}, \code{\link{formula}}, and \code{\link{vcov}} functions. \code{allEffects} identifies all of the high-order terms in a model and returns a list of \code{"eff"} or \code{"effpoly"} objects (i.e., an object of type \code{"efflist"}). } \usage{ effect(term, mod, vcov.=vcov, ...) Effect(focal.predictors, mod, ...) \method{Effect}{lm}(focal.predictors, mod, xlevels = list(), default.levels = NULL, given.values, vcov.=vcov, se = TRUE, confidence.level = 0.95, transformation = list(link = family(mod)$linkfun, inverse = family(mod)$linkinv), typical = mean, offset = mean, partial.residuals=FALSE, quantiles=seq(0.2, 0.8, by=0.2), x.var=NULL, ...) \method{Effect}{gls}(focal.predictors, mod, xlevels = list(), default.levels=NULL, given.values, vcov.=vcov, se = TRUE, confidence.level = 0.95, transformation = NULL, typical = mean, ...) \method{Effect}{multinom}(focal.predictors, mod, confidence.level=.95, xlevels=list(), default.levels=NULL, given.values, vcov.=vcov, se=TRUE, typical=mean, ...) \method{Effect}{polr}(focal.predictors, mod, confidence.level=.95, xlevels=list(), default.levels=NULL, given.values, vcov.=vcov, se=TRUE, typical=mean, latent=FALSE, ...) \method{Effect}{clm2}(focal.predictors, mod, ...) \method{Effect}{mer}(focal.predictors, mod, KR=FALSE, ...) \method{Effect}{merMod}(focal.predictors, mod, KR=FALSE, ...) \method{Effect}{lme}(focal.predictors, mod, ...) \method{Effect}{poLCA}(focal.predictors, mod, ...) \method{Effect}{mlm}(focal.predictors, mod, response, ...) \method{Effect}{default}(focal.predictors, mod, xlevels = list(), default.levels = NULL, given.values, vcov. = vcov, se = TRUE, confidence.level = 0.95, transformation = list(link = I, inverse = I), typical = mean, offset = mean, ...) allEffects(mod, ...) \method{as.data.frame}{eff}(x, row.names=NULL, optional=TRUE, transform=x$transformation$inverse, ...) \method{as.data.frame}{effpoly}(x, row.names=NULL, optional=TRUE, ...) \method{as.data.frame}{efflatent}(x, row.names=NULL, optional=TRUE, ...) \method{vcov}{eff}(object, ...) } \arguments{ \item{term}{the quoted name of a term, usually, but not necessarily, a high-order term in the model. The term must be given exactly as it appears in the printed model, although either colons (\code{:}) or asterisks (\code{*}) may be used for interactions.} \item{focal.predictors}{a character vector of one or more predictors in the model.} \item{mod}{an object of class \code{"lm"}, \code{"gls"}, \code{"glm"}, \code{"multinom"}, \code{"polr"}, \code{"mer"} (or \code{"merMod"}), \code{"lme"} or \code{"poLCA"}. } \item{xlevels}{this argument is used to set the number of levels for any focal predictor that is not a factor. If \code{xlevels=NULL}, the default, then the number and values of levels for any numeric predictor is determined by \code{\link{grid.pretty}}. If \code{xlevels=n} is an integer, then each numeric predictor is represented by \code{n} equally spaced levels. More generally, \code{xlevels} can be a named list of values at which to set each numeric predictor. For example, \code{xlevels=list(x1=c(2, 4, 7), x2=5)} would use the values 2, 4 and 7 for the levels of \code{x1}, 5 equally spaced levels for the levels of \code{x2}, and use the default for any other numeric predictors. If partial residuals are computed, then the focal predictor that is to appear on the horizontal axis of an effect plot is evaluated at 100 equally spaced values along its full range, and, by default, other numeric predictors are evaluated at the quantiles specified in the \code{quantiles} argument, unless their values are given explicitly in \code{xlevels}.} \item{default.levels}{ignored, but included for compatibility with pre-July 2013 versions of this package. Use \code{xlevels} instead.} \item{given.values}{a numeric vector of named elements, setting particular columns of the model matrix to specific values for predictors that are \emph{not} focal predictors; if specified, this argument takes precedence over the application of the function given in the \code{typical} argument (below). Care must be taken in specifying these values --- e.g., for a factor, the values of all contrasts should be given and these should be consistent with each other.} \item{vcov.}{A function or the name of a function that will be used to get the estimated variance covariance matrix of the estimated coefficients. This will ordinarily be the default, \code{vcov}, which will result in the function call \code{vcov(mod)} to get the variance covariance matrix. You can use the name of any function with \code{mod} as the value of its first argument that returns an estimated sample covariance matrix, such as the \code{hccm} function in the \pkg{car} package that returns a heteroscedasticity corrected estimate with linear models.} \item{se}{if \code{TRUE}, the default, calculate standard errors and confidence limits for the effects. For \code{mer}, \code{merMod}, and \code{lme} objects, the normal distribution is used to get confidence limits.} \item{confidence.level}{level at which to compute confidence limits based on the standard-normal distribution; the default is \code{0.95}.} \item{KR}{if \code{TRUE} and the \pkg{pbkrtest} package is installed, use the Kenward-Roger coefficient covariance matrix to compute effect standard errors for linear mixed models fit with \code{lmer} in the \pkg{lme4} package. The default is \code{FALSE} because the computation can be very slow.} \item{transformation}{a two-element list with elements \code{link} and \code{inverse}. For a generalized linear model, these are by default the link function and inverse-link (mean) function. For a linear model, these default to \code{NULL}. If \code{NULL}, the identify function, \code{I}, is used; this effect can also be achieved by setting the argument to \code{NULL}. The inverse-link may be used to transform effects when they are printed or plotted; the link may be used in positioning axis labels (see below). If the link is not given, an attempt will be made to approximate it from the inverse-link.} \item{typical}{a function to be applied to the columns of the model matrix over which the effect is "averaged"; the default is \code{mean}.} \item{offset}{a function to be applied to the offset values (if there is an offset) in a linear or generalized linear model, or a mixed-effects model fit by \code{lmer} or \code{glmer}; or a numeric value, to which the offset will be set. The default is the \code{mean} function, and thus the offset will be set to its mean. \emph{Note:} Only offsets defined by the \code{offset} argument to \code{lm}, \code{glm}, \code{lmer}, or \code{glmer} will be handled correctly; use of the \code{offset} function in the model formula is not supported.} \item{partial.residuals}{if \code{TRUE}, partial residuals will be computed for an effect in a linear or generalized linear model; if \code{FALSE} (the default), partial residuals are suppressed.} \item{quantiles}{quantiles at which to evaluate numeric focal predictors \emph{not} on the horizontal axis, used only when partial residuals are displayed; superceded if the \code{xlevels} argument gives specific values for a predictor.} \item{x.var}{the name or index of the numeric predictor to define the horizontal axis of an effect plot for a linear or generalized linear model; the default is \code{NULL}, in which case the first numeric predictor in the effect will be used \emph{if} partial residuals are to be computed. This argument is intended to be used when \code{partial.residuals} is \code{TRUE}; otherwise, the variable on the horizontal axis can be chosen when the effect object is plotted: see \code{\link{plot.eff}}.} \item{latent}{if \code{TRUE}, effects in a proportional-odds logit model are computed on the scale of the latent response; if \code{FALSE} (the default) effects are computed as individual-level probabilities and logits.} \item{x}{an object of class \code{"eff"}, \code{"effpoly"}, or \code{"efflatent"}.} \item{transform}{a transformation to be applied to the effects and confidence limits, by default taken from the inverse link function saved in the \code{"eff"} object.} \item{row.names, optional}{not used.} \item{response}{for an \code{mlm}, a vector containing the name(s) or indices of one or more response variable(s). The default is to use all responses in the model.} \item{object}{an object of class \code{"eff"} for which the covariance matrix of the effects is desired.} \item{...}{arguments to be passed down.} } \details{ Normally, the functions to be used directly are \code{allEffects}, to return a list of high-order effects, and the generic \code{plot} function to plot the effects. (see \code{\link{plot.efflist}}, \code{\link{plot.eff}}, and \code{\link{plot.effpoly}}). Alternatively, \code{Effect} can be used to vary a subset of predictors over their ranges, while other predictors are held to typical values. Plots are drawn using the \code{\link{xyplot}} (or in some cases, the \code{\link{densityplot}}) function in the \code{lattice} package. Effects may also be printed (implicitly or explicitly via \code{print}) or summarized (using \code{summary}) (see \code{\link{print.efflist}}, \code{\link{summary.efflist}}, \code{\link{print.eff}}, \code{\link{summary.eff}}, \code{\link{print.effpoly}}, and \code{\link{summary.effpoly}}). If asked, the \code{effect} function will compute effects for terms that have higher-order relatives in the model, averaging over those terms (which rarely makes sense), or for terms that do not appear in the model but are higher-order relatives of terms that do. For example, for the model \code{Y ~ A*B + A*C + B*C}, one could compute the effect corresponding to the absent term \code{A:B:C}, which absorbs the constant, the \code{A}, \code{B}, and \code{C} main effects, and the three two-way interactions. In either of these cases, a warning is printed. The \code{as.data.frame} methods convert effect objects to data frames to facilitate the construction of custom displays. In the case of \code{"eff"} objects, the \code{se} element in the data frame is always on the scale of the linear predictor, and the transformation used for the fit and confidence limits is saved in a \code{"transformation"} attribute. } \value{ For \code{lm}, \code{glm}, \code{mer} and \code{lme}, \code{effect} and \code{Effect} return an \code{"eff"} object, and for \code{multinom}, \code{polr} and \code{clmm2}, an \code{"effpoly"} object, with the components listed below. For \code{mlm} with one response specified, an \code{"eff"} object, otherwise a class \code{"efflist"} object, containing one \code{"eff"} object for each \code{response}. \item{term}{the term to which the effect pertains.} \item{formula}{the complete model formula.} \item{response}{a character string giving the name of the response variable.} \item{y.levels}{(for \code{"effpoly"} objects) levels of the polytomous response variable.} \item{variables}{a list with information about each predictor, including its name, whether it is a factor, and its levels or values.} \item{fit}{(for \code{"eff"} objects) a one-column matrix of fitted values, representing the effect on the scale of the linear predictor; this is a ravelled table, representing all combinations of predictor values.} \item{prob}{(for \code{"effpoly"} objects) a matrix giving fitted probabilities for the effect for the various levels of the the response (columns) and combinations of the focal predictors (rows).} \item{logit}{(for \code{"effpoly"} objects) a matrix giving fitted logits for the effect for the various levels of the the response (columns) and combinations of the focal predictors (rows).} \item{x}{a data frame, the columns of which are the predictors in the effect, and the rows of which give all combinations of values of these predictors.} \item{model.matrix}{the model matrix from which the effect was calculated.} \item{data}{a data frame with the data on which the fitted model was based.} \item{discrepancy}{the percentage discrepancy for the `safe' predictions of the original fit; should be very close to 0. Note: except for \code{gls} models, this is now necessarily 0.} \item{offset}{value to which the offset is fixed; \code{0} if there is no offset.} \item{model}{(for \code{"effpoly"} objects) \code{"multinom"} or \code{"polr"}, as appropriate.} \item{vcov}{(for \code{"eff"} objects) a covariance matrix for the effect, on the scale of the linear predictor.} \item{se}{(for \code{"eff"} objects) a vector of standard errors for the effect, on the scale of the linear predictor.} \item{se.prob, se.logit}{(for \code{"effpoly"} objects) matrices of standard errors for the effect, on the probability and logit scales.} \item{lower, upper}{(for \code{"eff"} objects) one-column matrices of confidence limits, on the scale of the linear predictor.} \item{lower.prob, upper.prob, lower.logit, upper.logit}{(for \code{"effpoly"} objects) matrices of confidence limits for the fitted logits and probabilities; the latter are computed by transforming the former.} \item{confidence.level}{for the confidence limits.} \item{transformation}{(for \code{"eff"} objects) a two-element list, with element \code{link} giving the link function, and element \code{inverse} giving the inverse-link (mean) function.} \item{fitted.rounded}{partial fitted values at the observed values of the predictor \code{x.var} and the values of the other predictors that appear in the effect display; predictors not in the effect are held constant to typical values. This and the following two elements are \code{NULL} if partial residuals aren't computed and pertain only to linear or generalized linear models.} \item{fitted}{partial fitted values for the observed values of all predictors that appear in the effect display; predictors not in the effect are held constant to typical values.} \item{partial.residuals.raw}{partial residuals for the effect computed at the actual values of all focal predictors.} \item{partial.residuals.adjusted}{partial residuals for the effect computed at the panel-rounded values of focal predictors, except for the predictor corresponding to \code{xvar}, which will appear on the horizontal axis of the plotted effect.} \item{x.var}{the name of the predictor to appear on the horizontal axis of an effect plot made from the returned object; will usually be \code{NULL} if partial residuals aren't computed.} \item{family}{for a \code{"glm"} model, the name of the distributional family of the model; for an \code{"lm"} model, this is \code{"gaussian"}; otherwise \code{NULL}. The \code{family} controls how partial residuals are smoothed in plots.} \code{effectList} returns a list of \code{"eff"} or \code{"effpoly"} objects corresponding to the high-order terms of the model. If \code{mod} is of class \code{poLCA} (from the \code{poLCA} package) to fit a polytomous latent class model, effects are computed for the predictors given the estimated latent classes. The result is of class \code{eff} if the latent class model has 2 categories and of class \code{effpoly} with more than 2 categories. } \section{Warnings and Limitations}{ The \code{Effect} function handles factors and covariates differently, and is likely to be confused if one is changed to the other in a model formula. Consequently, formulas that include calls to \code{as.factor}, \code{factor}, or \code{numeric} (as, e.g., in \code{y ~ as.factor(income)}) will cause errors. Instead, create the modified variables outside of the model formula (e.g., \code{fincome <- as.factor(income)}) and use these in the model formula. Similarly variables of class \code{date} or \code{"times"}, which are usually differences between \code{"dates"} variables, should be converted to numeric variables outside the model formula. Factors cannot have colons in level names (e.g., \code{"level:A"}); the \code{effect} function will confuse the colons with interactions; rename levels to remove or replace the colons (e.g., \code{"level.A"}). In addition, factors cannont be declared on the fly (e.g., using \code{y ~ a + factor(b)}. The functions in the \pkg{effects} package work properly with predictors that are numeric or factors; consequently, e.g., convert logical predictors to factors, and dates to numeric. Empty cells in crossed-factors are now permitted for lm, glm and multinom models. With multinom models with two or more crossed factors with an empty cell, the 'plot' command with \code{style="stacked"} apparently does not work because of a bug in the \code{barchart} function in \code{lattice}. However, the default \code{style="lines"} does work. Offsets in linear and generalized linear models are supported, as are offsets in mixed models fit by \code{lmer} or \code{glmer}, but must be supplied through the \code{offset} argument to \code{lm}, \code{glm}, \code{lmer} or \code{glmer}; offsets supplied via calls to the \code{offset} function on the right-hand side of the model formula are not supported. Fitting ordinal mixed-models using \code{clmm2} permits many options, including a variety of link functions, scale functions, nominal regressors, and various methods for setting thresholds. Effects are currently generated only for the default values of the arguments \code{scale}, \code{nominal}, \code{link} and \code{threshold}, which is equivalent to fitting an ordinal response mixed effects model with a logistic link. The effect methods can also be used with objects created using \code{clm2} fitting ordinal response logistic models with no random effects, with results similar to those from \code{polr} in the \pkg{MASS} package. Calling any of these functions from within a user-written function may result in errors due to R's scoping rules. See the vignette \code{embedding.pdf} for the \pkg{car} package for a solution to this problem. } \references{ Fox, J. (1987). Effect displays for generalized linear models. \emph{Sociological Methodology} \bold{17}, 347--361. Fox, J. (2003) Effect displays in R for generalised linear models. \emph{Journal of Statistical Software} \bold{8:15}, 1--27, <\url{http://www.jstatsoft.org/v08/i15/}>. Fox, J. and R. Andersen (2006). Effect displays for multinomial and proportional-odds logit models. \emph{Sociological Methodology} \bold{36}, 225--255. Fox, J. and J. Hong (2009). Effect displays in R for multinomial and proportional-odds logit models: Extensions to the effects package. \emph{Journal of Statistical Software} \bold{32:1}, 1--24, <\url{http://www.jstatsoft.org/v32/i01/}>. Hastie, T. J. (1992). Generalized additive models. In Chambers, J. M., and Hastie, T. J. (eds.) \emph{Statistical Models in S}, Wadsworth. Weisberg, S. (2014). \emph{Applied Linear Regression}, 4th edition, Wiley, \url{http://z.umn.edu/alr4ed}. } \author{John Fox \email{jfox@mcmaster.ca}, Sanford Weisberg \email{sandy@umn.edu} and Jangman Hong.} \seealso{\code{\link{print.eff}}, \code{\link{summary.eff}}, \code{\link{plot.eff}}, \code{\link{print.summary.eff}}, \code{\link{print.effpoly}}, \code{\link{summary.effpoly}}, \code{\link{plot.effpoly}}, \code{\link{print.efflist}}, \code{\link{summary.efflist}}, \code{\link{plot.efflist}}, \code{\link{xyplot}}, \code{\link{densityplot}}} \examples{ # Note: Some of these examples are marked as "don't test" # to reduce the execution times of the examples # when the package is checked. mod.cowles <- glm(volunteer ~ sex + neuroticism*extraversion, data=Cowles, family=binomial) eff.cowles <- allEffects(mod.cowles, xlevels=list(extraversion=seq(0, 24, 6)), given.values=c(sexmale=0.5)) eff.cowles as.data.frame(eff.cowles[[2]]) \donttest{ # the following are equivalent: eff.ne <- effect("neuroticism*extraversion", mod.cowles) Eff.ne <- Effect(c("neuroticism", "extraversion"), mod.cowles) all.equal(eff.ne$fit, Eff.ne$fit) plot(eff.cowles, 'sex', ylab="Prob(Volunteer)") plot(eff.cowles, 'neuroticism:extraversion', ylab="Prob(Volunteer)", ticks=list(at=c(.1,.25,.5,.75,.9))) plot(eff.cowles, 'neuroticism:extraversion', multiline=TRUE, ylab="Prob(Volunteer)") plot(effect('sex:neuroticism:extraversion', mod.cowles, xlevels=list(extraversion=seq(0, 24, 6))), multiline=TRUE) } # a nested model: mod <- lm(log(prestige) ~ income:type + education, data=Prestige) # does not work: effect("income:type", mod, transformation=list(link=log, inverse=exp)) plot(Effect(c("income", "type"), mod, transformation=list(link=log, inverse=exp), ylab="prestige")) # works if (require(nnet)){ mod.beps <- multinom(vote ~ age + gender + economic.cond.national + economic.cond.household + Blair + Hague + Kennedy + Europe*political.knowledge, data=BEPS) \donttest{ plot(effect("Europe*political.knowledge", mod.beps, xlevels=list(political.knowledge=0:3))) } plot(Effect(c("Europe", "political.knowledge"), mod.beps, xlevels=list(Europe=1:11, political.knowledge=0:3), given.values=c(gendermale=0.5)), style="stacked", colors=c("blue", "red", "orange"), rug=FALSE) \donttest{ plot(effect("Europe*political.knowledge", mod.beps, # equivalent xlevels=list(political.knowledge=0:3), given.values=c(gendermale=0.5)), style="stacked", colors=c("blue", "red", "orange"), rug=FALSE) } } if (require(MASS)){ mod.wvs <- polr(poverty ~ gender + religion + degree + country*poly(age,3), data=WVS) \donttest{ plot(effect("country*poly(age, 3)", mod.wvs)) } plot(Effect(c("country", "age"), mod.wvs), style="stacked") \donttest{ plot(effect("country*poly(age, 3)", mod.wvs), style="stacked") # equivalent plot(effect("country*poly(age, 3)", latent=TRUE, mod.wvs)) } } mod.pres <- lm(prestige ~ log(income, 10) + poly(education, 3) + poly(women, 2), data=Prestige) eff.pres <- allEffects(mod.pres, xlevels=50) plot(eff.pres) plot(eff.pres[1], transform.x=list(income=list(trans=log10, inverse=function(x) 10^x)), ticks.x=list(income=list(at=c(1000, 2000, 5000, 10000, 20000)))) \donttest{ # linear model with log-response and log-predictor # to illustrate transforming axes and setting tick labels mod.pres1 <- lm(log(prestige) ~ log(income) + poly(education, 3) + poly(women, 2), data=Prestige) # effect of the log-predictor eff.log <- Effect("income", mod.pres1) # effect of the log-predictor transformed to the arithmetic scale eff.trans <- Effect("income", mod.pres1, transformation=list(link=log, inverse=exp)) #variations: # y-axis: scale is log, tick labels are log # x-axis: scale is arithmetic, tick labels are arithmetic plot(eff.log) # y-axis: scale is log, tick labels are log # x-axis: scale is log, tick labels are arithmetic plot(eff.log, transform.x=list(income=c(trans=log, inverse=exp)), ticks.x=list(income=list(at=c(1000, 2000, 5000, 10000, 20000))), xlab="income, log-scale") # y-axis: scale is log, tick labels are arithmetic # x-axis: scale is arithmetic, tick labels are arithmetic plot(eff.trans, ylab="prestige") # y-axis: scale is arithmetic, tick labels are arithmetic # x-axis: scale is arithmetic, tick labels are arithmetic plot(eff.trans, type="response", ylab="prestige") # y-axis: scale is log, tick labels are arithmetic # x-axis: scale is log, tick labels are arithmetic plot(eff.trans, transform.x=list(income=c(trans=log, inverse=exp)), ticks.x=list(income=list(at=c(1000, 2000, 5000, 10000, 20000))), xlab="income, log-scale", ylab="prestige, log-scale", main="Both effect and X in log-scale") # y-axis: scale is arithmetic, tick labels are airthmetic # x-axis: scale is log, tick labels are arithmetic plot(eff.trans, transform.x=list(income=c(trans=log, inverse=exp)), ticks.x=list(income=list(at=c(1000, 2000, 5000, 10000, 20000))), type="link", xlab="income, log-scale", ylab="prestige") } if (require(nlme)){ # for gls() mod.hart <- gls(fconvict ~ mconvict + tfr + partic + degrees, data=Hartnagel, correlation=corARMA(p=2, q=0), method="ML") plot(allEffects(mod.hart)) detach(package:nlme) } if (require(lme4)){ data(cake, package="lme4") fm1 <- lmer(angle ~ recipe * temperature + (1|recipe:replicate), cake, REML = FALSE) plot(Effect(c("recipe", "temperature"), fm1)) \donttest{ plot(effect("recipe:temperature", fm1), grid=TRUE) # equivalent } if (any(grepl("pbkrtest", search()))) detach(package:pbkrtest) detach(package:lme4) } \donttest{ if (require(nlme) && length(find.package("lme4", quiet=TRUE)) > 0){ data(cake, package="lme4") cake$rep <- with(cake, paste( as.character(recipe), as.character(replicate), sep="")) fm2 <- lme(angle ~ recipe * temperature, data=cake, random = ~ 1 | rep, method="ML") plot(Effect(c("recipe", "temperature"), fm2)) plot(effect("recipe:temperature", fm2), grid=TRUE) # equivalent } detach(package:nlme) } \donttest{ if (require(poLCA)){ data(election) f2a <- cbind(MORALG,CARESG,KNOWG,LEADG,DISHONG,INTELG, MORALB,CARESB,KNOWB,LEADB,DISHONB,INTELB)~PARTY*AGE nes2a <- poLCA(f2a,election,nclass=3,nrep=5) plot(Effect(c("PARTY", "AGE"), nes2a), style="stacked") } } # mlm example if (require(heplots)) { data(NLSY, package="heplots") mod <- lm(cbind(read,math) ~ income+educ, data=NLSY) eff.inc <- Effect("income", mod) plot(eff.inc) eff.edu <- Effect("educ", mod) plot(eff.edu, rug=FALSE, grid=TRUE) \donttest{ plot(Effect("educ", mod, response="read")) } detach(package:heplots) } # component + residual plot examples \donttest{ Prestige$type <- factor(Prestige$type, levels=c("bc", "wc", "prof")) mod.prestige.1 <- lm(prestige ~ income + education, data=Prestige) plot(allEffects(mod.prestige.1, partial.residuals=TRUE)) # standard C+R plots mod.prestige.2 <- lm(prestige ~ type*(income + education), data=Prestige) plot(allEffects(mod.prestige.2, partial.residuals=TRUE)) mod.prestige.3 <- lm(prestige ~ type + income*education, data=Prestige) plot(Effect(c("income", "education"), mod.prestige.3, partial.residuals=TRUE), span=1) } # artificial data set.seed(12345) x1 <- runif(500, -75, 100) x2 <- runif(500, -75, 100) y <- 10 + 5*x1 + 5*x2 + x1^2 + x2^2 + x1*x2 + rnorm(500, 0, 1e3) Data <- data.frame(y, x1, x2) mod.1 <- lm(y ~ poly(x1, x2, degree=2, raw=TRUE), data=Data) # raw=TRUE necessary for safe prediction mod.2 <- lm(y ~ x1*x2, data=Data) mod.3 <- lm(y ~ x1 + x2, data=Data) .save.strip <- setStrip() # change color of lattice strips plot(Effect(c("x1", "x2"), mod.1, partial.residuals=TRUE)) # correct model plot(Effect(c("x1", "x2"), mod.2, partial.residuals=TRUE)) # wrong model plot(Effect(c("x1", "x2"), mod.3, partial.residuals=TRUE)) # wrong model restoreStrip(.save.strip) remove(.save.strip) } \keyword{hplot} \keyword{models}