eRm/0000755000176000001440000000000011706463107011032 5ustar ripleyuserseRm/MD50000644000176000001440000002152511706463107011347 0ustar ripleyusers94d55d512a9ba36caa9b7df079bae19f *COPYING d86eed66bba841658207fd609fdd3d4a *COPYRIGHTS c824379f1e7400c15cea5f79c12608b4 *DESCRIPTION 2fa574614347ba33bf658a819b4fe98a *NAMESPACE c3686599805d8b0a1fcd21e60698b311 *NEWS d2d22f8b2176c97ca7df720f7a089794 *R/IC.default.R 2bedf16f88332f159bfad9cdb36f4689 *R/IC.ppar.r 2e4b547352dd7af179635bc46f9cdf87 *R/IC.r 4bf43e52d582082bb387c70910851f74 *R/LLRA.R 7826640f34c84fbb51764108b0961824 *R/LLTM.R bf407bc699aa2a8a6bb1e507cc32f049 *R/LPCM.R 559e1ab0a83b6e78e7ae67ddb4a41b1e *R/LRSM.R e09054c9e41da89f00cf09d2ca0b67b2 *R/LRtest.R 2d59903e184e948d35803dc9745d1044 *R/LRtest.Rm.R c4570ad02cb02e2263ff64507e5aa619 *R/MLoef.R 85771f7f5668ecde577700138d893add *R/NPtest.R bbd93fc79991f98878c94fff0bcfaf30 *R/PCM.R 136ebfb819fe8287b48d0d33c194815d *R/RM.R 8f3a897135420e5fb99b1adebc4c8765 *R/ROCR_aux.R 83e714c0d5bd37f8299292e3bf574a09 *R/RSM.R bc7be7af543fb355b3452bade6befcef *R/Rsquared.R f2527f30f48fcf4cfda14c9951186ead *R/Waldtest.R 80e1701b16f045aaf746f28dc808c98e *R/Waldtest.Rm.R 4de7e90e5eba8d4d90d7005bc6b9c38d *R/anova.llra.R b2e5064d8aa93edded5747f9f64f315c *R/build_W.R a8aa6e591ab2b76117211cdef4cdcf52 *R/checkdata.R 40e9100130fde20aa73a04818d9909b3 *R/cldeviance.R 013c3532078f0f62c326f5f7fe10e23e *R/cmlprep.R 9f91686f144add619bbaaec474974fbb *R/coef.eRm.R 0218542e5f566c443de1054b43d1dc6e *R/coef.ppar.R 2a665e30eb32b2b8b9a7ee2cb846831b *R/collapse_W.R 323a5c6fe9d49f18031aeeff22de0ee5 *R/confint.eRm.r a98e2b8f8c0dc9f184159e94c210611b *R/confint.ppar.r fd0612d832c6044df83689d42631a9af *R/confint.threshold.r b9da6dc9d83605723db2e81c69fd81b8 *R/cwdeviance.r 9f6fff516f1257a642e18175e82ec53d *R/datcheck.LRtest.r 4f7c2902266ffc2b21916d2791287ef9 *R/datcheck.R 3ed2f9de5c5ec9a4d3dd66b40e0425d1 *R/datprep_LLTM.R b9868aa42173751e8d9fd8a1a16c49e1 *R/datprep_LPCM.R 5bd2486b21ae4a9f196c7445aea4c958 *R/datprep_LRSM.R 8eff0f65d596844d9810c3cfa58bc0a2 *R/datprep_PCM.R ca0af91c768a48fff0782a8b92d5ed9d *R/datprep_RM.R 977fc96896e111fcf2692e274d9be346 *R/datprep_RSM.R 39b6410834977409a957f4dc85fb8aaf *R/fitcml.R 4878fc213dcd9d9a0b3b59353f06eebe *R/gofIRT.R 57242fed1520c63de85bdc52e6a70c31 *R/gofIRT.ppar.R f9850156706dcad20dfedfc80c220b04 *R/hoslem.R 71212f4c14003458fc3e30638f339629 *R/invalid.R e2acfcf0f69a54046e7500f37b11cce7 *R/itemfit.R f242e9d1dc269c42da573caeb5beb426 *R/itemfit.ppar.R d12c6019315fbc11dca8ce109e1883ab *R/labeling.internal.r c6cc30c6760a3e4fd1213ec55bd086e9 *R/likLR.R 9c56aa9ca79069fd2fa2a02f14d8e7b4 *R/llra.datprep.R b120a9958f01935b8829c7473345e9de *R/llra.internals.R 5bea9ef9bfb310d9d57960b335cc6e2f *R/logLik.eRm.r 6f889b8ad497988f236902afa009cba3 *R/logLik.ppar.r a4b58be8d00e8a61a442e6e7f5709138 *R/model.matrix.eRm.R 08c4ee8081fb13b7da90b8a6faa28ea0 *R/performance.R ad14b6669d085f886f032ff60fd2643b *R/performance_measures.R 166e801703a416aa2a2cdbde298651db *R/performance_plots.R adabedf84b1cecfce267c7be91d7cbbb *R/person.parameter.R e91f0d25dcaa66df375fa34312e0f36c *R/person.parameter.eRm.R 1580451632297ddae0e4ae6c262a9bb0 *R/personfit.R 68cc2204b1c470c10a57948e6629d542 *R/personfit.ppar.R 5211c838890a01fe791e26e5f2ba10c8 *R/pifit.internal.r 6dc484eac9810ce58aa2a06fab305a56 *R/plist.internal.R e050e7af6f9bb0a52bf55026d3101d27 *R/plot.ppar.r fa5b8a3513fcdf05e0533c7549d7adb7 *R/plotCI.R 5b20ac29faf764d0e70892b578012162 *R/plotDIF.R a8afc0168d588f3380174f8e7b6d37cd *R/plotGOF.LR.R c535a897ffcc871b893cbbee237baf70 *R/plotGOF.R c0c259828a7490f7cf87888394e31214 *R/plotGR.R e5df0fcbc0cf84479dd1af053994135c *R/plotICC.R 0add981ba89f86649c26ca8c5bd818b6 *R/plotICC.Rm.R e9b4f3c4e961531fbd204c4e52e056a4 *R/plotPImap.R 53328e5a237f91c7f03e1447430a3580 *R/plotPWmap.R 9f9956d4213bf12105e1985ff7c2fc28 *R/plotTR.R 817427f50be483fdf3dc5be097121c7f *R/plotjointICC.R 30e0c7cadcb3d360f9373b6ffdc58912 *R/plotjointICC.dRm.R 503d39882ff1519f9a9c822525df881b *R/pmat.R 978c189f51d4c525f6a1d53377c72fab *R/pmat.default.R 3b8b125515c56d967e0184d9bae4fe87 *R/pmat.ppar.R 75667db3c392cce69583eb02f93739bb *R/predict.ppar.R c6520d9937a5432bba0921b1c1ddc154 *R/prediction.R 2ef33bcb8ff18bada70cc9cd28167b6b *R/print.ICr.r e419bb90c13b3e34d60f809d2aab7a8c *R/print.LR.R baa854c72760319d47628d9cdeab5847 *R/print.MLoef.r 063e72adfa562645ff3510ff2f93fc8a *R/print.eRm.R c5312a2efff02316933e78574a2d7072 *R/print.gof.R 1a8e26e12c84b76aac1cccc7ecafd6c0 *R/print.ifit.R eceef6e388090c985b69bb7495fd898b *R/print.llra.R 37817e03ebd0e3395fb011928892e97b *R/print.logLik.eRm.R e7e35e2b4d5cfd70f341b852793c0e4b *R/print.logLik.ppar.r ae0c91d5cdfde5d20945fc0e804708f7 *R/print.pfit.R 22c7359fd707bd836a258c6e02f484b3 *R/print.ppar.R cb2c9116b9ede352bb567fa9eb8ac393 *R/print.resid.R 45ce31571c765ec1a1bb87a16ff82184 *R/print.step.r 22252f71f8a50bf9726528501c70b443 *R/print.summary.llra.R 5aa80f82e5e72c24602c8525b6389d71 *R/print.threshold.r 2ba6872ce486a3b88889f50ab34046f1 *R/print.wald.R 681692c0b23c20c07fd6e27eb6823d03 *R/residuals.ppar.R 7a34777d662d5c0f5c9c1e473bf52746 *R/rostdeviance.r b3f9c25354a6b9bddf925bcab2648687 *R/sim.2pl.R 4b54d34fbb18f74fac3b93da2c11a84f *R/sim.locdep.R f882b1817e7bbcef9b27c276f257a626 *R/sim.rasch.R 60c5d7095026f9b3f7ee2dc6321fbc25 *R/sim.xdim.R 50fba0ca19951546155a7d644228d24f *R/stepwiseIt.R ec78daaa3f45ed12a62e483f3ee1ce63 *R/stepwiseIt.eRm.R 913ccfc27b8531d6efe242b877b45d30 *R/summary.LR.r 65d0e9b84e5f96d02e9de47a6e8a5a90 *R/summary.MLoef.r dca2b5cf6a23b084d1fcee50a51050b6 *R/summary.eRm.R a201d1040cd729f62e09a45ff0402957 *R/summary.gof.R 91593cc56050024103f88648bd3fd9d6 *R/summary.llra.R bde67b4f83ca342816e87e9c39fde213 *R/summary.ppar.R d28668718027ba4f7d4210dcac0c6b76 *R/summary.threshold.r b80b7dbe79cca5167e85e53674887e40 *R/thresholds.eRm.r 3c6f6ec631aeacf22e73fadf8074fc12 *R/thresholds.r 54259c5861ddc4a41474b0f47dc7ff13 *R/vcov.eRm.R e7182e7050856376cc68a090c3fc962d *R/zzz.R 17e5211154d381fb70abc1eddbccb9d7 *data/llraDat1.rda 04e416237eb526a153a793cec2988809 *data/llraDat2.rda 9346da0bb55f95db5ed71f73d050f2bd *data/llradat3.rda ed5e5d66de298a34f143f361ee0722fa *data/lltmdat1.rda b66d20feef24d2792b7e496bf30dbb7e *data/lltmdat2.rda d064d228919bf4bec3f96f3bdc73309a *data/lpcmdat.rda 1ae66d8c67c396f15799b2b8defd9447 *data/lrsmdat.rda edd14b4be4ee7329f78e2ec1555a4ad4 *data/pcmdat.rda 3694b4d3076bbbb26f6734c06e923f90 *data/pcmdat2.rda 257737a046f48862ca6f26fc8f5bc94f *data/raschdat1.rda 18d2da8d7902349b513004d1342fbd3d *data/raschdat2.rda c42bff96b6fd7f98d0b86bd6b058b367 *data/rsmdat.rda c58787b71a1f1d3c295a4bfa20737306 *inst/doc/UCML.jpg 859438dab150c3e7762183e79cb4e6ec *inst/doc/Z.cls 6c088c475da8ad1998ff6abf0550a717 *inst/doc/eRm.Rnw 53f32799d25b50133b6ff217357184ae *inst/doc/eRm.pdf 61d680540f93d252561fa615bd95511b *inst/doc/eRm_object_tree.pdf 4252672da961d1370a22bc901d68cc5d *inst/doc/eRmvig.bib 405e1ee0b6273a4d992299ddc0f9233c *inst/doc/index.html.old 92e903f33d4067a7fbc89fa4e7571c92 *inst/doc/jss.bst e97dfac8265ca8a3cbae7ff1d64ac832 *inst/doc/modelhierarchy.pdf 3ccae9025b31a4c6c2a4a0c38fadcc60 *man/IC.Rd 346aa4dad160d49be05a16a4f6fae983 *man/LLRA.Rd 3ff5fc2b6804ccec50788a81ab024ad3 *man/LLTM.Rd b2674dd262ed08acf9973e028b6c9062 *man/LPCM.Rd 2fe4a48d3db49e43f8ca765b3724a23b *man/LRSM.Rd 21a5eee857676dadca51fce6f58438bb *man/LRtest.Rd 145b3975b694fcb6040bb7ece98b4962 *man/MLoef.Rd 07664ccf23e9418d1a3e2ae70572aa82 *man/NPtest.Rd 49e40371caf75ce2b50dde4fce78fa83 *man/PCM.Rd ba1bc663be054dca6f66956e83296d9f *man/RM.Rd 81a06eda5b851e2551d0c069132bd601 *man/RSM.Rd 1fa68e025a4ece034a6e9719c58f26e7 *man/Waldtest.Rd 7a672a0bdbee1d6dfcddd76a77e7f384 *man/anova.llra.Rd 61d45eca1d3602e0910c562444a5a91a *man/build_W.Rd 5646ca17db14129c73ecc8e2c059f0d6 *man/collapse_W.Rd 3df6fa3e7648ed587015d46e61ff43f3 *man/eRm-package.Rd e61327803e5e6b7d2d3ccc944e37aabb *man/gofIRT.Rd 76cea65dee359343eacaf632bda3ea52 *man/itemfit.ppar.Rd 07fa1d3822aa64ac126dd6ea09fbd896 *man/llra.datprep.Rd 3b7d3041daf6dff31a863240a7d66c43 *man/llraDat1.Rd 6c2f252c609fbf7bf5e909c7aaae01f3 *man/llraDat2.Rd 4e6086c12e5f1af87effae54623069b8 *man/llradat3.Rd 5858e38d4bd68f93c3eb4961423f2aae *man/person.parameter.Rd cbae5267dff3868ef0cd2c382a9968da *man/plotDIF.Rd cc89a7415fd66ead9a764e66e190ebb8 *man/plotGR.Rd ebc0198383f388c7f118bf004e1ab47a *man/plotICC.Rd fb7839cc73bd28943352b8af8601cb87 *man/plotPImap.Rd db3395d4738132591b935af5dc7809ac *man/plotPWmap.Rd bb2e316fb66fbc62d5d9d86db9ced96a *man/plotTR.Rd 5c7fab3317a8fcc01c2ff0a6fe1824a9 *man/predict.ppar.Rd 9e66621272c2cf652b1396428edf94f8 *man/print.eRm.Rd d96cb1ddd85fd3b7ecdfe095550dc027 *man/raschdat.Rd 8cd62fbd653f0036bdc4ff0a97c6c77d *man/sim.2pl.Rd 91b5aef01c0f9142c7970c6b75727034 *man/sim.locdep.Rd b937031cca9d299b55d3932f299983af *man/sim.rasch.Rd 242e82abb41d92f5926ec9ff2b037e1b *man/sim.xdim.Rd 77f43c8fcfd4ff3af7f4351419fde0ff *man/stepwiseIt.Rd a62ad6ab0c17c4af3ea1de449c0cc388 *man/summary.llra.Rd eb6aee99b123cc957f76c938c624f264 *man/thresholds.Rd d0d66f1a61eae0e016a6e9401d0e5917 *src/components.c ce4282b827566f0ef0572ae06eb7bf04 *src/components.h b318d7bfff0ba1eccadc86d34e1f0c5f *src/geodist.c c44d6148a344eb9e3deb558f5d453d8c *src/geodist.h eRm/src/0000755000176000001440000000000011572663364011631 5ustar ripleyuserseRm/src/geodist.h0000744000176000001440000000144211572663364013442 0ustar ripleyusers/* ###################################################################### # # geodist.h # # copyright (c) 2004, Carter T. Butts # Last Modified 11/21/04 # Licensed under the GNU General Public License version 2 (June, 1991) # # Part of the R/sna package # # This file contains headers for geodist.c. # ###################################################################### */ #ifndef GEODIST_H #define GEODIST_H /*DECLARATIONS/INCLUSIONS---------------------------------------------------*/ #include #include #include /*INTERNAL ROUTINES---------------------------------------------------------*/ /*R-CALLABLE ROUTINES-------------------------------------------------------*/ void geodist_R(double *g, double *pn, double *gd, double *sigma); #endif eRm/src/geodist.c0000744000176000001440000000424211572663364013436 0ustar ripleyusers/* ###################################################################### # # geodist.c # # copyright (c) 2004, Carter T. Butts # Last Modified 11/21/04 # Licensed under the GNU General Public License version 2 (June, 1991) # # Part of the R/sna package # # This file contains routines related to the computation of geodesics. # ###################################################################### */ #include #include #include #include "geodist.h" void geodist_R(double *g, double *pn, double *gd, double *sigma) /* Compute geodesics for the graph in g. The geodesic distances are stored in gd, and the path counts in sigma (both being nxn matrices). Note that these should be initialized to all infs and all 0s, respectively. */ { char *visited; long int n,v,i,nod,s1count; /*Set up stuff*/ n=*pn; /*Allocate memory for visited list*/ visited=(char *)R_alloc(n,sizeof(char)); /*Cycle through each node, performing a BFS*/ for(v=0;v0){ while(s1count>0){ /*Find the next visitable node, and change its state*/ for(nod=0;visited[nod]!=1;nod++); /*Only OK b/c s1count>0*/ visited[nod]=3; s1count--; for(i=0;i=g[nod+i*n]){ gd[v+i*n]=gd[v+nod*n]+g[nod+i*n]; /*Geodist is nod's+g*/ sigma[v+i*n]+=sigma[v+nod*n]; /*Add to path count*/ } } } /*Continue until we run out of nodes for this iteration*/ /*Mark all "to-be-visited" nodes as visitable*/ for(i=0;i # Last Modified 11/26/04 # Licensed under the GNU General Public License version 2 (June, 1991) # # Part of the R/sna package # # This file contains headers for components.c. # ###################################################################### */ #ifndef COMPONENTS_H #define COMPONENTS_H /*DECLARATIONS/INCLUSIONS---------------------------------------------------*/ #include #include #include /*INTERNAL ROUTINES---------------------------------------------------------*/ /*R-CALLABLE ROUTINES-------------------------------------------------------*/ void component_dist_R(double *g, double *pn, double *memb); #endif eRm/src/components.c0000744000176000001440000000411111572663364014160 0ustar ripleyusers/* ###################################################################### # # components.c # # copyright (c) 2004, Carter T. Butts # Last Modified 11/26/04 # Licensed under the GNU General Public License version 2 (June, 1991) # # Part of the R/sna package # # This file contains routines related to the identification of # components. # ###################################################################### */ #include #include #include #include "components.h" void component_dist_R(double *g, double *pn, double *memb) /* Determine component memberships in g. The memberships are stored in memb, which must be a zero-initialized vector of length *pn. */ { char *visited; long int n,v,nod,i,s1count; double comp=0.0; /*Set up stuff*/ n=*pn; /*Allocate memory for visited list*/ visited=(char *)R_alloc(n,sizeof(char)); /*Cycle through each node, performing a BFS*/ for(v=0;v0*/ visited[nod]=3; /*Mark as visited*/ s1count--; memb[nod]=comp; /*Set membership to comp*/ for(i=v+1;i1 && is.character(splitcr)){ # if splitcr is character vector, treated as factor splitcr<-as.factor(splitcr) } if (is.factor(splitcr)){ spl.nam<-deparse(substitute(splitcr)) spl.lev<-levels(splitcr) spl.gr<-paste(spl.nam,spl.lev,sep=" ") splitcr<-unclass(splitcr) } numsplit<-is.numeric(splitcr) if (any(is.na(object$X))) { if (!numsplit && splitcr=="mean") { #mean split spl.gr<-c("Raw Scores < Mean", "Raw Scores >= Mean") X<-object$X # calculates index for NA groups # from person.parameter.eRm dichX <- ifelse(is.na(X),1,0) strdata <- apply(dichX,1,function(x) {paste(x,collapse="")}) gmemb <- as.vector(data.matrix(data.frame(strdata))) gindx<-unique(gmemb) rsum.all<-rowSums(X,na.rm=TRUE) grmeans<-tapply(rsum.all,gmemb,mean) #sorted ngr<-table(gmemb) #sorted m.all<-rep(grmeans,ngr) #sorted,expanded rsum.all<-rsum.all[order(gmemb)] spl<-ifelse(rsum.all Median") #removed rh 2010-12-17 #cat("Warning message: Persons with median raw scores are assigned to the lower raw score group!\n") X<-object$X # calculates index for NA groups # from person.parameter.eRm dichX <- ifelse(is.na(X),1,0) strdata <- apply(dichX,1,function(x) {paste(x,collapse="")}) gmemb <- as.vector(data.matrix(data.frame(strdata))) gindx<-unique(gmemb) rsum.all<-rowSums(X,na.rm=TRUE) grmed<-tapply(rsum.all,gmemb,median) #sorted ngr<-table(gmemb) #sorted m.all<-rep(grmed,ngr) #sorted,expanded rsum.all<-rsum.all[order(gmemb)] spl<-ifelse(rsum.all<=m.all,1,2) splitcr<-spl object$X<-X[order(gmemb),] } } if (is.numeric(splitcr)){ spl.nam<-deparse(substitute(splitcr)) if (length(table(splitcr)) > 2) stop("Dichotomous person split required!") if (length(splitcr) != dim(object$X)[1]) { stop("Mismatch between length of split vector and number of persons!") } else { rvind <- splitcr Xlist <- by(object$X,rvind, function(x) x) names(Xlist) <- as.list(sort(unique(splitcr))) if(is.null(spl.gr)){ spl.lev<-names(Xlist) spl.gr<-paste(spl.nam,spl.lev,sep=" ") } }} if (!is.numeric(splitcr)) { if (splitcr=="median") { #median split rv <- apply(object$X,1,sum,na.rm=TRUE) rvsplit <- median(rv) rvind <- rep(0,length(rv)) rvind[rv > rvsplit] <- 1 #group with high raw score object Xlist <- by(object$X,rvind,function(x) x) names(Xlist) <- list("low","high") } if (splitcr=="mean") { #mean split rv <- apply(object$X,1,sum,na.rm=TRUE) rvsplit <- mean(rv) rvind <- rep(0,length(rv)) rvind[rv > rvsplit] <- 1 #group with highraw scoobject Xlist <- by(object$X,rvind,function(x) x) names(Xlist) <- list("low","high") } } del.pos.l <- lapply(Xlist, function(x) { it.sub <- datcheck.LRtest(x,object$X,object$model) #items to be removed within subgroup }) del.pos <- unique(unlist(del.pos.l)) if ((length(del.pos)) >= (dim(object$X)[2]-1)) { stop("\nNo items with appropriate response patterns left to perform Wald-test!\n") } if (length(del.pos) > 0) { warning("\nThe following items were excluded due to inappropriate response patterns within subgroups: ",immediate.=TRUE) cat(colnames(object$X)[del.pos], sep=" ","\n") cat("Subgroup models are estimated without these items!\n") } if (length(del.pos) > 0) { X.el <- object$X[,-(del.pos)] } else { X.el <- object$X } Xlist.n <- by(X.el,rvind,function(y) y) names(Xlist.n) <- names(Xlist) if (object$model=="RM") { likpar <- sapply(Xlist.n,function(x) { #matrix with loglik and npar for each subgroup objectg <- RM(x) parg <- objectg$etapar seg <- objectg$se.eta list(parg,seg,objectg$betapar,objectg$se.beta) }) } if (object$model=="PCM") { likpar <- sapply(Xlist.n,function(x) { #matrix with loglik and npar for each subgroup objectg <- PCM(x) parg <- objectg$etapar seg <- objectg$se.eta list(parg,seg,objectg$betapar,objectg$se.beta) }) } if (object$model=="RSM") { likpar <- sapply(Xlist.n,function(x) { #matrix with loglik and npar for each subgroup objectg <- RSM(x) parg <- objectg$etapar seg <- objectg$se.eta list(parg,seg,objectg$betapar,objectg$se.beta) }) } betapar1 <- likpar[3,][[1]] beta1.se <- likpar[4,][[1]] betapar2 <- likpar[3,][[2]] beta2.se <- likpar[4,][[2]] num <- (betapar1-betapar2) denom <- sqrt(beta1.se^2 + beta2.se^2) W.i <- num/denom pvalues <- (1-pnorm(abs(W.i)))*2 coef.table <- cbind(W.i,pvalues) dimnames(coef.table) <- list(names(betapar1),c("z-statistic","p-value")) result <- list(coef.table=coef.table,betapar1=betapar1,se.beta1=beta1.se,betapar2=betapar2, se.beta2=beta2.se, spl.gr=spl.gr, call=call, it.ex = del.pos) class(result) <- "wald" result } eRm/R/Waldtest.R0000744000176000001440000000011111572663323013142 0ustar ripleyusers`Waldtest` <- function(object,splitcr="median")UseMethod("Waldtest") eRm/R/vcov.eRm.R0000744000176000001440000000032711572663323013063 0ustar ripleyusers`vcov.eRm` <- function(object,...) { if (any(is.na(object$se.eta))) { vcmat <- NA } else { vcmat <- (solve(object$hessian)) #VC-matrix of the parameter estimates } return(vcmat) } eRm/R/thresholds.r0000744000176000001440000000006711572663323013604 0ustar ripleyusersthresholds <- function(object)UseMethod("thresholds") eRm/R/thresholds.eRm.r0000744000176000001440000000516611572663323014333 0ustar ripleyusersthresholds.eRm <- function(object) # uses matrix approach { #Computation of threshold parameters for polytomous models #object of class "eRm" (but not "dRm") if ((object$model == "LLTM") || (object$model == "RM")) stop("Threshold parameters are computed only for polytomous models!") if ((object$model == "LRSM") || (object$model == "LPCM")) { mpoints <- object$mpoints ngroups <- object$ngroups vecrep <- mpoints * ngroups } else { mpoints <- 1 ngroups <- 1 vecrep <- 1 } betapar <- object$betapar indmt <- apply(object$X,2,max,na.rm=TRUE) #number of categories per item mt_vek1 <- sequence(indmt[1:(length(indmt)/mpoints)]) #1 block of beta-items mt_vek <- rep(mt_vek1, vecrep) sq<-ifelse(mt_vek > 1,-1,0) d1<-diag(sq[-1]) k<-length(betapar) d2<-diag(k) d2[-k,-1]<-d2[-k,-1]+d1 threshpar <-as.vector(crossprod(betapar,d2)*-1) #vector with threshold parameters names(threshpar) <- paste("thresh",names(betapar)) vc.beta <- (object$W%*%solve(object$hessian)%*%t(object$W)) #VC matrix beta's se.thresh <- sqrt(diag(d2%*%(vc.beta)%*%t(d2))) #standard errors of thresholds names(se.thresh) <- names(threshpar) blocks <- rep(1:vecrep, each = length(mt_vek1)) thblock <- split(threshpar,blocks) #block of threshholds (as in design matrix) indmt1 <- indmt[1:(length(indmt)/mpoints)] indvec <- rep(1:length(indmt1),indmt1) threshtab.l <- lapply(thblock, function(x) { #list of table-blocks Location <- tapply(x,indvec,mean) #location parameters thresh.l <- split(x, indvec) threshmat <- t(sapply(thresh.l,"[",1:max(mt_vek))) colnames(threshmat) <- paste("Threshold", 1:dim(threshmat)[2]) parmat <- cbind(Location,threshmat) }) #determine item names for block-table cnames <- colnames(object$X) ind.it <- rep(1:mpoints,each = length(cnames)/mpoints) #item label index itnames1 <- as.vector(unlist(tapply(cnames, ind.it, function(x) rep(x, ngroups)))) rep.ind <- sapply(threshtab.l, function(x) dim(x)[1]) sp.ind <- rep(1:length(rep.ind), rep.ind) names.l <- split(itnames1, sp.ind) #names as list for (i in 1:length(threshtab.l)) rownames(threshtab.l[[i]]) <- names.l[[i]] #name the items result <- list(threshpar = threshpar,se.thresh = se.thresh, threshtable = threshtab.l) class(result) <- "threshold" result }eRm/R/summary.threshold.r0000744000176000001440000000052011572663323015107 0ustar ripleyuserssummary.threshold <- function(object,...) { #object of class "threshold" coef.table <- cbind(round(object$threshpar,5),round(object$se.thresh,5),round(confint(object),5)) dimnames(coef.table) <- list(names(object$threshpar),c("Estimate","Std. Err.",colnames(confint(object)))) cat("\n") print(coef.table) cat("\n") }eRm/R/summary.ppar.R0000744000176000001440000000267211572663323014027 0ustar ripleyusers`summary.ppar` <- function(object,...) # summary method for object of class "ppar" { if (length(object$pers.ex) > 0) { thetaind <- rownames(object$X)[-object$pers.ex] } else { thetaind <- rownames(object$X) } if (any(is.na(object$X))) { #recompute gmemb without persons excluded dichX <- ifelse(is.na(object$X),1,0) strdata <- apply(dichX,1,function(x) {paste(x,collapse="")}) gmemb <- as.vector(data.matrix(data.frame(strdata))) } else { gmemb <- rep(1,dim(object$X)[1]) } cat("\n") cat("Estimation of Ability Parameters") for (i in 1:length(object$thetapar)) { cat("\n\n") if (length(object$thetapar) > 1) { cat("Subject NA Group:",i,"\n") xvec <- rbind(object$X[gmemb==i,])[1,] #determine NA pattern xvec[!is.na(xvec)] <- "x" cat("NA pattern:",xvec,"\n") } cat("Collapsed log-likelihood:",object$loglik[[i]],"\n") cat("Number of iterations:",object$iter[[i]],"\n") cat("Number of parameters:",object$npar[[i]],"\n") cat("\n") cat("ML estimated ability parameters (without spline interpolated values): \n") coef.table <- cbind(object$thetapar[[i]],object$se.theta[[i]],confint(object)[[i]]) dimnames(coef.table) <- list(paste("theta",thetaind[object$gmemb==i]),c("Estimate","Std. Err.",colnames(confint(object)[[i]]))) print(coef.table) } } eRm/R/summary.MLoef.r0000744000176000001440000000247011572663323014123 0ustar ripleyuserssummary.MLoef <- function(object, ...) { # summary method for object of class "MLoef" (MLoef) # prepare message for split criteria if( length(object$splitcr) == 1){ if( (object$splitcr == "median") | (object$splitcr == "mean") ){ spl <- object$splitcr } } else { spl <- "user-defined" } # if(!is.null(object$warning)){ # if(object$splitcr == "median") cat("Warning: Item(s)",paste(names(object$warning),collapse=", "),"with raw score equal to the median assigned to the lower raw score group!\n") # if(object$splitcr == "mean") cat("Warning: Item(s)",paste(names(object$warning),collapse=", "),"with raw score equal to the mean assigned to the lower raw score group!\n") # } cat("\n") cat("Martin-Loef-Test (split criterion: ",spl,")\n",sep="") for(i in 1:length(object$i.groups)){ cat("\n") cat("Group ",i,":\nItems: ",sep="") cat(paste(object$i.groups[[i]]),sep=", ") cat("\nLog-Likelihood:",round(object$subModels[[i]]$loglik,3),"\n") } cat("\n") cat("Overall Rasch-Model:\n") cat("Log-Likelihood:",round(object$fullModel$loglik,3),"\n") cat("\n") cat(paste("LR-value:",round(object$LR,3),"\n")) cat(paste("Chi-square df:",round(object$df,3),"\n")) cat(paste("p-value:",round(object$p.value,3)),"\n") cat("\n") } eRm/R/summary.LR.r0000744000176000001440000000152111572663323013432 0ustar ripleyuserssummary.LR <- function(object,...) # summary method for objects of class "LR" (from LRtest") { cat("\n") cat("Andersen LR-test: \n") cat("LR-value:", round(object$LR,3),"\n") cat("Chi-square df:",object$df,"\n") cat("p-value: ",round(object$pvalue,3),"\n") cat("\n") mt_vek <- apply(object$X,2,max,na.rm=TRUE) for (i in 1:length(object$betalist)) { cat("\n") cat("Subject subgroup ",object$spl.gr[i],":",sep="") cat("\n") cat("Log-likelihood: ",object$likgroup[i]) cat("\n\n") cat("Beta Parameters: \n") betavec <- object$betalist[[i]] if (!all(is.na(object$selist[[i]]))) { coeftable <- rbind(betavec,object$selist[[i]]) rownames(coeftable) <- c("Estimate","Std.Err.") print(coeftable) } else { print(betavec) } cat("\n") } } eRm/R/summary.llra.R0000744000176000001440000000112211572663323014004 0ustar ripleyuserssummary.llra <- function(object, ...) UseMethod("summary.llra") summary.llra <- function(object, gamma=0.95, ...) #summary for class llra { modi <- object$model calli <- deparse(object$call) logli <- object$loglik iti <- object$iter pari <- object$npar cii <- confint(object, "eta", level=gamma) se.eta <- object$se.eta names(se.eta) <- names(object$etapar) res <- list(etapar=object$etapar,se.eta=se.eta,ci=cii,iter=iti,model=modi,call=calli,npar=pari,loglik=logli,refGroup=object$refGroup) class(res) <- "summary.llra" res } eRm/R/summary.gof.R0000744000176000001440000000151211572663323013630 0ustar ripleyuserssummary.gof <- function(object, ...) { #summary method for objects of class "gof" (from gofIRT.ppar) cat("\nGoodness-of-Fit Tests\n") print(round(object$test.table, 3)) cat("\nR-Squared Measures") cat("\nPearson R2:", round(object$R2$R2.P, 3)) cat("\nSum-of-Squares R2:", round(object$R2$R2.SS, 3)) cat("\nMcFadden R2:", round(object$R2$R2.MF, 3)) cat("\n\nClassifier Results - Confusion Matrix (relative frequencies)\n") print(round(object$classifier$confmat/sum(object$classifier$confmat), 3)) cat("\nAccuracy:", round(object$classifier$accuracy, 3)) cat("\nSensitivity:", round(object$classifier$sensitivity, 3)) cat("\nSpecificity:", round(object$classifier$specificity, 3)) cat("\nArea under ROC:", round(object$AUC, 3)) cat("\nGini coefficient:", round(object$Gini, 3)) cat("\n\n") }eRm/R/summary.eRm.R0000744000176000001440000000300211572663323013574 0ustar ripleyusers`summary.eRm` <- function(object,...) { #labels...whether the item parameters should be labelled cat("\n") cat("Results of",object$model,"estimation: \n") cat("\n") cat("Call: ", deparse(object$call), "\n") cat("\n") cat("Conditional log-likelihood:",object$loglik,"\n") cat("Number of iterations:",object$iter,"\n") cat("Number of parameters:",object$npar,"\n") cat("\n") X <- object$X X01 <- object$X01 mt_vek <- apply(X,2,max,na.rm=TRUE) ci <- confint(object,"eta") # eta parameters: if (object$model %in% c("RM","RSM","PCM")) # now difficulty for RM, RSM, PCM cat("Item (Category) Difficulty Parameters (eta) ") # new labelling rh 25-03-2010 else cat("Basic Parameters eta ") cat("with 0.95 CI:\n") coeftable <- as.data.frame(cbind(round(object$etapar,3), round(object$se.eta,3),round(ci,3))) colnames(coeftable) <- c("Estimate","Std. Error","lower CI","upper CI") rownames(coeftable) <- names(object$etapar) print(coeftable) ci <- confint(object,"beta") cat("\nItem Easiness Parameters (beta) with 0.95 CI:\n") #coeftable <- as.data.frame(cbind(round(object$betapar),3), # round(object$se.beta,3),round(ci,3)) coeftable <- cbind(round(object$betapar,3), round(object$se.beta,3), round(ci,3)) colnames(coeftable) <- c("Estimate","Std. Error","lower CI","upper CI") rownames(coeftable) <- names(object$betapar) print(coeftable) cat("\n") } eRm/R/stepwiseIt.R0000744000176000001440000000020411572663323013516 0ustar ripleyusers`stepwiseIt` <- function(object, criterion = list("itemfit"), alpha = 0.05, verbose = TRUE, maxstep = NA)UseMethod("stepwiseIt") eRm/R/stepwiseIt.eRm.R0000744000176000001440000001352711572663323014254 0ustar ripleyusers#function for stepwise item elimination stepwiseIt.eRm <- function(object, criterion = list("itemfit"), alpha = 0.05, verbose = TRUE, maxstep = NA) { # object of class dRm # criterion: either list("itemfit") or list("LRtest", splitcr) od list("Waldtest", splitcr) #-------- sanity checks --------- dummy <- match.arg(criterion[[1]], c("itemfit","LRtest","Waldtest")) if (!is.list(criterion)) stop("Criterion must be provided as list!") if (!any(class(object) == "dRm")) stop("Stepwise elimination implemented for dichotomous Rasch models only!") #------- end sanity checks ------ X.new <- object$X K <- dim(X.new)[2] if (is.na(maxstep)) maxstep <- K if (length(criterion) == 2) { splitcr <- criterion[[2]] } else { splitcr <- "median" } #---------------- start elimination ---------------- i <- 0 it.el <- rep(NA, K) #initialize outputs el.names <- rep(NA, K) wald.mat <- matrix(NA, ncol = 2, nrow = K) itemfit.mat <- matrix(NA, ncol = 3, nrow = K) LR.mat <- matrix(NA, ncol = 3, nrow = K) repeat { if((dim(X.new)[2]) == 2) { warning("Only 2 items left: No Rasch homogeneous itemset found!", call. = FALSE) break } if (i == maxstep) { warning("Maximum number of steps reached!", call. = FALSE) break } i <- i + 1 res <- RM(X.new) #fit Rasch #---------------- itemfit criterion ------------ if (criterion[[1]] == "itemfit") { pres <- person.parameter(res) #person parameters it.res <- itemfit(pres) #compute itemfit pvalvec <- 1-pchisq(it.res$i.fit, it.res$i.df) #vector with pvalues pvalsig <- which(pvalvec < alpha) #significant p-values if (length(pvalsig) > 0) { it.el[i] <- which(it.res$i.fit == max(it.res$i.fit))[1] ie <- it.el[i] itemfit.mat[i,] <- c(it.res$i.fit[ie], it.res$i.df[ie], pvalvec[ie]) if (verbose) cat("Eliminated item - Step ",i,": ",colnames(X.new)[it.el[i]],"\n", sep = "") el.names[i] <- colnames(X.new)[it.el[i]] X.new <- X.new[,-it.el[i]] } else break } #-------------- end itemfit criterion ----------- #------------------ Waldtest criterion ---------- if (criterion[[1]] == "Waldtest") { wald.res <- Waldtest(res, splitcr = splitcr) #compute Waldtest zvalvec <- abs(wald.res$coef.table[,1]) #absolute z-values pvalvec <- wald.res$coef.table[,2] #vector with pvalues pvalsig <- which(pvalvec < alpha) #significant p-values if (length(pvalsig) > 0) { elpos <- which(zvalvec == max(zvalvec))[1] #exclude maximum z-value Waldtest wald.mat[i,] <- wald.res$coef.table[elpos,] if (length(wald.res$it.ex) > 0) elpos <- elpos + sum(wald.res$it.ex <= elpos) #if items couldn't computed in Waldtest it.el[i] <- elpos el.names[i] <- colnames(X.new)[it.el[i]] if (verbose) cat("Eliminated item - Step ",i,": ",el.names[i],"\n", sep = "") X.new <- X.new[,-it.el[i]] } else break } #-------------- LRtest criterion ---------------- if (criterion[[1]] == "LRtest") #uses Waldtest but stops when LRtest is sig. { lr.res <- LRtest(res, splitcr = splitcr) if(lr.res$pvalue < alpha) { wald.res <- Waldtest(res, splitcr = splitcr) #compute Waldtest zvalvec <- abs(wald.res$coef.table[,1]) #absolute z-values elpos <- which(zvalvec == max(zvalvec))[1] #exclude maximum z-value Waldtest if (length(wald.res$it.ex) > 0) elpos <- elpos + sum(wald.res$it.ex <= elpos) #if items couldn't computed in Waldtest it.el[i] <- elpos LR.mat[i,] <- c(lr.res$LR, lr.res$df, lr.res$pvalue) el.names[i] <- colnames(X.new)[it.el[i]] if (verbose) cat("Eliminated item - Step ",i,": ",el.names[i],"\n", sep = "") X.new <- X.new[,-it.el[i]] } else break } #----------- end LRtest criterion --------- } #--------------------- end stepwise------------------ #labeling el.names <- el.names[!is.na(el.names)] if (all(is.na(el.names))) { warning("No items eliminated! Each of them fits the Rasch model!", call. = FALSE) itemfit.mat <- NULL LR.mat <- NULL wald.mat <- NULL criterion[[1]] <- "none" } if (criterion[[1]] == "itemfit") { itemfit.mat <- rbind(itemfit.mat[!is.na(rowSums(itemfit.mat)),]) rownames(itemfit.mat) <- paste("Step ",1:length(el.names),": ",el.names,sep = "") colnames(itemfit.mat) <- c("Chisq", "df","p-value") } else { itemfit.mat <- NULL } if (criterion[[1]] == "Waldtest") { wald.mat <- rbind(wald.mat[!is.na(rowSums(wald.mat)),]) rownames(wald.mat) <- paste("Step ",1:length(el.names),": ",el.names,sep = "") colnames(wald.mat) <- c("z-statistic", "p-value") } else { wald.mat <- NULL } if (criterion[[1]] == "LRtest") { if (i == maxstep) { LR.mat <- rbind(LR.mat[!is.na(rowSums(LR.mat)),]) rownames(LR.mat) <- paste("Step ",1:length(el.names),": ",el.names,sep = "") } else { LR.mat <- rbind(LR.mat[!is.na(rowSums(LR.mat)),], c(lr.res$LR, lr.res$df, lr.res$pvalue)) rownames(LR.mat) <- c(paste("Step ",1:length(el.names),": ",el.names,sep = ""), paste("Step ", i,": None", sep = "")) } colnames(LR.mat) <- c("LR-value", "Chisq df", "p-value") } else { LR.mat <- NULL } result <- list(X = X.new, fit = res, it.elim = el.names, res.wald = wald.mat, res.itemfit = itemfit.mat, res.LR = LR.mat, nsteps = i-1) class(result) <- "step" result } eRm/R/sim.xdim.R0000744000176000001440000000461611572663323013121 0ustar ripleyuserssim.xdim <- function(persons, items, Sigma, weightmat, seed = NULL, cutpoint = "randomized") { # Sigma ... VC matrix for multinormal distribution # weightmat ... matrix of dimension k times D with weights. If omitted, equal weights are used. ## function from MASS mvrnorm<-function (n = 1, mu, Sigma, tol = 1e-06, empirical = FALSE) { p <- length(mu) if (!all(dim(Sigma) == c(p, p))) stop("incompatible arguments") eS <- eigen(Sigma, symmetric = TRUE, EISPACK = TRUE) ev <- eS$values if (!all(ev >= -tol * abs(ev[1]))) stop("'Sigma' is not positive definite") X <- matrix(rnorm(p * n), n) if (empirical) { X <- scale(X, TRUE, FALSE) X <- X %*% svd(X, nu = 0)$v X <- scale(X, FALSE, TRUE) } X <- drop(mu) + eS$vectors %*% diag(sqrt(pmax(ev, 0)), p) %*% t(X) nm <- names(mu) if (is.null(nm) && !is.null(dn <- dimnames(Sigma))) nm <- dn[[1]] dimnames(X) <- list(nm, NULL) if (n == 1) drop(X) else t(X) } if (missing(Sigma)) { ndim <- ncol(persons) } else { ndim <- nrow(Sigma) #number of dimensions } if (length(persons) == 1) { #simulating if (!is.null(seed)) set.seed(seed) faehig <- mvrnorm(persons, mu = rep(0, nrow(Sigma)), Sigma = Sigma) } else { faehig <- persons } if (length(items) == 1) { if (!is.null(seed)) set.seed(seed) schwierig <- rnorm(items) } else { schwierig <- items } n.persons <- nrow(faehig) n.items <- length(schwierig) if (missing(weightmat)) { #specifying the weight matrix weightmat <- matrix(0, ncol = ndim, nrow = n.items) if (!is.null(seed)) set.seed(seed) indvec <- sample(1:ndim, n.items, replace = TRUE) for (i in 1:n.items) weightmat[i,indvec[i]] <- 1 } Wp <- apply(weightmat, 1, function(wi) { #n.persons times n.items matrix Xw <- t(wi) %*% t(faehig)}) psolve <- matrix(0,n.persons,n.items) #class<-rep(1,n.persons) #class[sample(n.persons)[1:round(n.persons/2,0)]]<-2 for (j in 1:n.items) for (i in 1:n.persons) psolve[i,j] <- exp(Wp[i,j]-schwierig[j])/(1+ exp(Wp[i,j]-schwierig[j])) if (cutpoint == "randomized") { if (!is.null(seed)) set.seed(seed) R <-(matrix(runif(n.items*n.persons),n.persons,n.items) < psolve)*1 } else { R <- (cutpoint < psolve)*1 } return(R) } eRm/R/sim.rasch.R0000744000176000001440000000151711572663323013255 0ustar ripleyuserssim.rasch <-function(persons, items, seed = NULL, cutpoint = "randomized") { #produces rasch homogeneous data #cutpoint... probability or "randomized" if (length(items) == 1) { if (!is.null(seed)) set.seed(seed) schwierig <- rnorm(items) #standard normal distributed n.items <- items } else { schwierig <- items n.items <- length(items) } if (length(persons) == 1) { if (!is.null(seed)) set.seed(seed) faehig <- rnorm(persons) n.persons <- persons } else { faehig <- persons n.persons <- length(persons) } fsmat <- outer(faehig, schwierig, "-") psolve <- exp(fsmat)/(1+exp(fsmat)) if (cutpoint == "randomized") { if (!is.null(seed)) set.seed(seed) R <-(matrix(runif(n.items*n.persons),n.persons,n.items) < psolve)*1 } else { R <- (cutpoint < psolve)*1 } return(R) } eRm/R/sim.locdep.R0000744000176000001440000000322211572663323013416 0ustar ripleyuserssim.locdep <- function(persons, items, it.cor = 0.25, seed = NULL, cutpoint = "randomized") { # simulating data according to the local dependence model by Jannarone (1986) # it.cor represents the pairwise item correlation. If it is a single value, it is constant over all items, # otherwise a symmetric matrix of dimension n.items x n.items # it.cor = 1 reflects strong violation, it.cor = 0 corresponds to the Rasch model. if (length(items) == 1) { if (!is.null(seed)) set.seed(seed) schwierig <- rnorm(items) #standard normal distributed n.items <- items } else { schwierig <- items n.items <- length(items) } if (length(persons) == 1) { if (!is.null(seed)) set.seed(seed) faehig <- rnorm(persons) n.persons <- persons } else { faehig <- persons n.persons <- length(persons) } if (is.matrix(it.cor)) { #if (dim(it.cor)!= c(n.items, n.items)) stop("it.cor must be symmetric and of dimension number of items") delta <- it.cor } else { delta <- matrix(it.cor, ncol = n.items, nrow = n.items) } Loesprob<-matrix(0,n.persons,n.items) if (!is.null(seed)) set.seed(seed) Random.numbers<-matrix(runif(n.items*n.persons),n.persons,n.items) R<-matrix(-5,n.persons,n.items) for (j in 1:n.items) { for (i in 1:n.persons) { if ((j %% 2) == 0) { Loesprob[i,j]<-exp(faehig[i]-schwierig[j]+(R[i,j-1]-0.5)*delta[j,j-1])/(1+exp(faehig[i]-schwierig[j]+(R[i,j-1]-0.5)*delta[j,j-1])) } else { Loesprob[i,j]<-exp(faehig[i]-schwierig[j])/(1+exp(faehig[i]-schwierig[j])) }} R[,j]<-(Random.numbers[,j] 1) { alpha <- discrim } else { if (!is.null(seed)) set.seed(seed) alpha <- rlnorm(n.items, 0, sdlog = discrim) #discrimination parameter } psolve <- matrix(0, n.persons, n.items) for (i in 1:n.persons) for (j in 1:n.items) psolve[i,j]<-exp(alpha[j]*(faehig[i]-schwierig[j]))/(1+exp(alpha[j]*(faehig[i]-schwierig[j]))) if (cutpoint == "randomized") { if (!is.null(seed)) set.seed(seed) R <-(matrix(runif(n.items*n.persons),n.persons,n.items) < psolve)*1 } else { R <- (cutpoint < psolve)*1 } return(R) } eRm/R/Rsquared.R0000744000176000001440000000151411572663323013151 0ustar ripleyusersRsquared <- function(object, pi.hat) { # objects of class ppar # computes Pearson R^2 and SS R^2 for objects of class ppar #Pi <- pmat(object) #expected values if (length(object$pers.ex) > 0){ y <- as.vector(t(object$X[-object$pers.ex,])) #observed values } else { y <- as.vector(t(object$X)) } pi.hat <- as.vector(t(pi.hat)) R.P <- cor(y, pi.hat)^2 #Squared Pearson correlation R.SS <- 1-(sum((y - pi.hat)^2)/sum((y - mean(y))^2)) #SS-R^2 loglik.full <- sum(y*log(pi.hat) + (1-y)*log(1-pi.hat), na.rm = TRUE) #full likelihood loglik.0 <- sum(y*log(mean(y)) + (1-y)*log(1-mean(y))) #L0 (Agresti, Sec. 6.2.5) R.MF <- (loglik.0 - loglik.full)/loglik.full result <- list(R2.P = R.P, R2.SS = R.SS, R2.MF = R.MF) result }eRm/R/RSM.R0000744000176000001440000000352311572663323012026 0ustar ripleyusers`RSM` <- function(X, W, se = TRUE, sum0 = TRUE, etaStart) { #...X: person*item scores matrix (starting from 0) #-------------------main programm------------------- call<-match.call() groupvec <- 1 mpoints <- 1 model <- "RSM" if (missing(W)) W <- NA else W <- as.matrix(W) if (missing(etaStart)) etaStart <- NA else etaStart <- as.vector(etaStart) XWcheck <- datcheck(X,W,mpoints,groupvec,model) #inital check of X and W X <- XWcheck$X lres <- likLR(X,W,mpoints,groupvec,model,st.err=se,sum0,etaStart) parest <- lres$parest #full groups for parameter estimation loglik <- -parest$minimum #log-likelihood value iter <- parest$iterations #number of iterations convergence <- parest$code etapar <- parest$estimate #eta estimates betapar <- as.vector(lres$W%*% etapar) #beta estimates if (se) { se.eta <- sqrt(diag(solve(parest$hessian))) #standard errors se.beta <- sqrt(diag(lres$W%*%solve(parest$hessian)%*%t(lres$W))) #se beta } else { se.eta <- rep(NA,length(etapar)) se.beta <- rep(NA,length(betapar)) } X01 <- lres$X01 labs <- labeling.internal(model,X,X01,lres$W,etapar,betapar,mpoints,max(groupvec)) #labeling for L-models W <- labs$W etapar <- labs$etapar betapar <- labs$betapar etapar <- -etapar # output difficulty rh 25-03-2010 npar <- dim(lres$W)[2] #number of parameters result <- list(X=X,X01=X01,model=model,loglik=loglik,npar=npar,iter=iter,convergence=convergence, etapar=etapar,se.eta=se.eta,hessian=parest$hessian,betapar=betapar, se.beta=se.beta,W=W,call=call) class(result) <- c("Rm","eRm") #classes: simple RM and extended RM result } eRm/R/rostdeviance.r0000744000176000001440000000220611572663323014110 0ustar ripleyusersrostdeviance <- function(object) { # Analysis of Deviance Table (Test against a saturated model) # object... object of class ppar #---------------saturated model--------------------- X <- object$X N <- dim(X)[1] #number of subjects K <- dim(X)[2] #number of items x.ch <- apply(X,1,toString) #response patters as string vectors nx <- as.vector(table(x.ch)) #pattern frequencies lsat <- sum(nx*(log(nx/N))) #log-likelihood of saturated model (Rost, p.334) #npar.sat <- length(nx) npar.sat <- prod(apply(X, 2, max) + 1) - 1 #number of possible response patterns - 1 #------------end saturated model-------------------- rv <- rowSums(X, na.rm = TRUE) #person raw scores lmml <- sum(table(rv)*log(table(rv)/N))+object$loglik.cml #MML likelihood npar.mml <- dim(object$W)[2] #+ length(table(rv)) ... not sure about that dev <- -2*(lmml - lsat) #deviance df.chi <- npar.sat - npar.mml p.value <- 1-pchisq(dev,df.chi) result <- list(value = dev, df = df.chi, p.value = p.value) return(result) } eRm/R/ROCR_aux.R0000744000176000001440000000530711572663323013011 0ustar ripleyusers## --------------------------------------------------------------------------- ## Dealing with argument lists, especially '...' ## --------------------------------------------------------------------------- ## return list of selected arguments, skipping those that ## are not present in arglist .select.args <- function( arglist, args.to.select, complement=FALSE) { match.bool <- names(arglist) %in% args.to.select if (complement==TRUE) match.bool <- !match.bool return( arglist[ match.bool] ) } ## return arguments in arglist which match prefix, with prefix removed ## ASSUMPTION: prefix is separated from rest by a '.'; this is removed along ## with the prefix .select.prefix <- function( arglist, prefixes, complement=FALSE ) { match.expr <- paste(paste('(^',prefixes,'\\.)',sep=""),collapse='|') match.bool <- (1:length(arglist)) %in% grep( match.expr, names(arglist) ) if (complement==TRUE) match.bool <- !match.bool arglist <- arglist[ match.bool] names(arglist) <- sub( match.expr, '', names(arglist)) return( arglist ) } .garg <- function( arglist, arg, i=1) { if (is.list(arglist[[arg]])) arglist[[ arg ]][[i]] else arglist[[ arg ]] } .sarg <- function( arglist, ...) { ll <- list(...) for (argname in names(ll) ) { arglist[[ argname ]] <- ll[[ argname ]] } return(arglist) } .farg <- function( arglist, ...) { ll <- list(...) for (argname in names(ll) ) { if (length(arglist[[argname]])==0) arglist[[ argname ]] <- ll[[ argname ]] } return(arglist) } .slice.run <- function( arglist, runi=1) { r <- lapply( names(arglist), function(name) .garg( arglist, name, runi)) names(r) <- names(arglist) r } ## --------------------------------------------------------------------------- ## Line segments ## --------------------------------------------------------------------------- .construct.linefunct <- function( x1, y1, x2, y2) { if (x1==x2) { stop("Cannot construct a function from data.") } lf <- eval(parse(text=paste("function(x) {", "m <- (",y2,"-",y1,") / (",x2,"-",x1,");", "c <- ",y1," - m * ",x1,";", "return( m * x + c)}",sep=" "))) lf } .intersection.point <- function( f, g ) { ## if lines are parallel, no intersection point if (f(1)-f(0) == g(1)-g(0)) { return( c(Inf,Inf) ) } ## otherwise, choose search interval imin <- -1 imax <- 1 while (sign(f(imin)-g(imin)) == sign(f(imax)-g(imax))) { imin <- 2*imin imax <- 2*imax } h <- function(x) { f(x) - g(x) } intersect.x <- uniroot( h, interval=c(imin-1,imax+1) )$root intersect.y <- f( intersect.x ) return( c(intersect.x, intersect.y )) } eRm/R/RM.R0000744000176000001440000000353211572663323011703 0ustar ripleyusers`RM` <- function(X, W, se = TRUE, sum0 = TRUE, etaStart) { #...X: 0/1 person*item matrix #-------------------main programm------------------- call<-match.call() groupvec <- 1 mpoints <- 1 model <- "RM" if (missing(W)) W <- NA else W <- as.matrix(W) if (missing(etaStart)) etaStart <- NA else etaStart <- as.vector(etaStart) XWcheck <- datcheck(X,W,mpoints,groupvec,model) #inital check of X and W X <- XWcheck$X lres <- likLR(X,W,mpoints,groupvec,model,st.err=se,sum0,etaStart) parest <- lres$parest #full groups for parameter estimation loglik <- -parest$minimum #log-likelihood value iter <- parest$iterations #number of iterations convergence <- parest$code etapar <- parest$estimate #eta estimates betapar <- as.vector(lres$W%*% etapar) #beta estimates if (se) { se.eta <- sqrt(diag(solve(parest$hessian))) #standard errors se.beta <- sqrt(diag(lres$W%*%solve(parest$hessian)%*%t(lres$W))) #se beta } else { se.eta <- rep(NA,length(etapar)) se.beta <- rep(NA,length(betapar)) } X01 <- lres$X01 labs <- labeling.internal(model,X,X01,lres$W,etapar,betapar,mpoints,max(groupvec)) #labeling for L-models W <- labs$W etapar <- labs$etapar betapar <- labs$betapar etapar <- -etapar # output difficulty rh 25-03-2010 npar <- dim(lres$W)[2] #number of parameters result <- list(X=X,X01=X01,model=model,loglik=loglik,npar=npar,iter=iter,convergence=convergence, etapar=etapar,se.eta=se.eta,hessian=parest$hessian,betapar=betapar, se.beta=se.beta,W=W,call=call) class(result) <- c("dRm","Rm","eRm") #classes: dichotomous RM, RM (RM, PCM, RSM), and extended RM (all) result } eRm/R/residuals.ppar.R0000744000176000001440000000027211572663323014317 0ustar ripleyusers`residuals.ppar` <- function(object,...) # computes standardized residuals # for object of class "ppar" (from person.parameter) { result <- itemfit(object)$st.res result } eRm/R/print.wald.R0000744000176000001440000000056311572663323013450 0ustar ripleyusers`print.wald` <- function(x,...) #print method for objects of class "wald" (from waldtest) { #if (!is.null(x$betalab)) { # cat("Warning Message: Item",x$betalab[1],"was not tested due to sum-0 restriction.\n") #} cat("\nWald test on item level (z-values):\n\n") print(round(x$coef.table,3)) cat("\n") invisible(round(x$coef.table,3)) } eRm/R/print.threshold.r0000744000176000001440000000036311572663323014553 0ustar ripleyusersprint.threshold <- function(x,...) { cat("\n") for (i in 1:length(x$threshtable)) { cat("Design Matrix Block ",i,":\n",sep="") print(round(x$threshtable[[i]],5)) cat("\n") } invisible(x$threshtable) }eRm/R/print.summary.llra.R0000744000176000001440000000125111572663323015142 0ustar ripleyusers print.summary.llra <- function(x,...) { cat("\n") cat("Results of LLRA via",x$model,"estimation: \n") cat("\n") cat("Call: ", x$call, "\n") cat("\n") cat("Conditional log-likelihood:", x$loglik, "\n") cat("Number of iterations:", x$iter, "\n") cat("Number of parameters:", x$npar, "\n") cat("\n") cat("Estimated parameters ") cat("with 0.95 CI:\n") coeftable <- as.data.frame(cbind(round(x$etapar, 3),round(x$se.eta, 3), round(x$ci, 3))) colnames(coeftable) <- c("Estimate", "Std.Error", "lower.CI", "upper.CI") rownames(coeftable) <- names(x$etapar) print(coeftable) cat("\nReference Group: ",x$refGroup,"\n") } eRm/R/print.step.r0000744000176000001440000000101711572663323013527 0ustar ripleyusersprint.step <- function(x, ...) { cat("\nResults for stepwise item elimination:\n") cat("Number of steps:",x$nsteps,"\n") if (!is.null(x$res.wald)) { cat("Criterion: Waldtest\n\n") print(round(x$res.wald, 3)) cat("\n") } if (!is.null(x$res.itemfit)) { cat("Criterion: Itemfit\n\n") print(round(x$res.itemfit, 3)) cat("\n") } if (!is.null(x$res.LR)) { cat("Criterion: Andersen's LR-test\n\n") print(round(x$res.LR, 3)) cat("\n") } invisible(x) }eRm/R/print.resid.R0000744000176000001440000000077311572663323013632 0ustar ripleyusers`print.resid` <- function(x,...) # print method for object of class "resid" (from residuals.ppar) { cat("\nStandardized Residuals \n") for (i in 1:length(x$st.res)) { if (length(x$st.res) > 1) {cat("Person NA Group:",i,"\n")} print(x$st.res[[i]]) cat("\n") } cat("\nSquared Standardized Residuals \n") for (i in 1:length(x$sq.res)) { if (length(x$sq.res) > 1) {cat("Person NA Group:",i,"\n")} print(x$sq.res[[i]]) cat("\n") } } eRm/R/print.ppar.R0000744000176000001440000000563011572663323013463 0ustar ripleyusers`print.ppar` <- function(x,...) # print method for person.parameter # x...object of class ppar { cat("\n") cat("Person Parameters:") cat("\n") if (length(x$pers.ex) > 0) { X <- x$X[-x$pers.ex,] #list with raw scores sumlist <- by(x$X[-x$pers.ex,],x$gmemb,rowSums,na.rm=TRUE) } else { X <- x$X sumlist <- by(x$X,x$gmemb,rowSums,na.rm=TRUE) } if (is.null(x$pred.list)) { #no spline Interpolation coef.list <- mapply(function(sm,th,se) { th.u <- tapply(th,sm, function(tm) {tm[1]}) #due to rounding errors, pck out first one se.u <- tapply(se,sm, function(ss) {ss[1]}) sm.u <- unique(sort(sm)) smth <- cbind(sm.u,th.u,se.u) return(smth) },sumlist,x$thetapar,x$se,SIMPLIFY=FALSE) } else { #if spline Interpolation #TFvec <- sapply(x$pred.list,is.null) #for these NA groups no spline interpolation was computed #predind <- (1:length(x$pred.list))[!TFvec] #x$pred.list <- x$pred.list[predind] coef.list <- mapply(function(sm,pl,se) { se.u <- tapply(se,sm, function(ss) {ss[1]}) sm.u <- unique(sort(sm)) TFvec <- pl$x %in% sm.u se.ind <- 1:length(TFvec) se.all <- rep(NA,length(se.ind)) se.all[se.ind[TFvec]] <- se.u cbind(pl$x,pl$y,se.all) },sumlist,x$pred.list,x$se,SIMPLIFY=FALSE) } if (dim(coef.list[[1]])[2] == 2) { #if no standard errors were computed coef.list <- lapply(coef.list,function(cl) {cbind(cl,NA)}) } # if (any(is.na(x$X))) { #recompute gmemb without persons excluded # dichX <- ifelse(is.na(x$X),1,0) # strdata <- apply(dichX,1,function(x) {paste(x,collapse="")}) # gmemb <- as.vector(data.matrix(data.frame(strdata))) # } else { # gmemb <- rep(1,dim(x$X)[1]) # } for (i in 1:length(x$thetapar)) { cat("\n") if (length(x$thetapar) > 1) { cat("Person NA Group:",i,"\n") xvec <- rep(NA, (dim(x$X)[2])) notNApos <- which(!is.na(as.vector(rbind(X[x$gmemb == i,])[1,]))) xvec[notNApos] <- "x" cat("NA pattern:",xvec,"\n") } colnames(coef.list[[i]]) <- c("Raw Score","Estimate","Std.Error") rownames(coef.list[[i]]) <- rep("",dim(coef.list[[i]])[1]) print(coef.list[[i]]) } invisible(coef.list) } eRm/R/print.pfit.R0000744000176000001440000000121711572663323013460 0ustar ripleyusers`print.pfit` <- function(x, visible=TRUE, ...) # print method for personfit # x...object of class "pfit" from (personfit) { pvalues <- 1-pchisq(x$p.fit,x$p.df-1) # df correction rh 10-01-20 coef.table <- cbind(round(x$p.fit,3),x$p.df-1,round(pvalues,3),round(x$p.outfitMSQ,3),round(x$p.infitMSQ,3),round(x$p.outfitZ,2),round(x$p.infitZ,2)) colnames(coef.table) <- c("Chisq","df","p-value","Outfit MSQ", "Infit MSQ", "Outfit t", "Infit t" ) rownames(coef.table) <- names(x$p.fit) if (visible){ # added rh 10-01-20 cat("\nPersonfit Statistics: \n") print(coef.table) cat("\n") } invisible(coef.table) } eRm/R/print.MLoef.r0000744000176000001440000000171511572663323013563 0ustar ripleyusersprint.MLoef <- function(x,...) { #print method for object of class "MLoef" (MLoef) # prepare message for split criteria if( length(x$splitcr) == 1){ if( (x$splitcr == "median") | (x$splitcr == "mean")){ spl <- x$splitcr } } else{ spl <- "user-defined" } # # if(!is.null(x$warning)){ # if(x$splitcr == "median") cat("Warning: Item(s)",paste(names(x$warning),collapse=", "),"with raw score equal to the median assigned to the lower raw score group!\n") # if(x$splitcr == "mean") cat("Warning: Item(s)",paste(names(x$warning),collapse=", "),"with raw score equal to the mean assigned to the lower raw score group!\n") # } cat("\n") cat("Martin-Loef-Test (split criterion: ",spl,")\n",sep="") cat(paste("LR-value:",round(x$LR,3),"\n")) cat(paste("Chi-square df:",round(x$df,3),"\n")) cat(paste("p-value:",round(x$p.value,3)),"\n") if (!("MLx" %in% class(x))) cat("\n") # no blank line if called from print.MLobj (NPtest) } eRm/R/print.LR.R0000744000176000001440000000041011572663323013025 0ustar ripleyusers`print.LR` <- function(x,...) { #print method for object of class "LR" (LRtest) cat("\n") cat("Andersen LR-test: \n") cat("LR-value:", round(x$LR,3),"\n") cat("Chi-square df:",x$df,"\n") cat("p-value: ",round(x$pvalue,3),"\n") cat("\n") } eRm/R/print.logLik.ppar.r0000744000176000001440000000034111572663323014735 0ustar ripleyusers`print.logLik.ppar` <- function (x, digits = getOption("digits"),...) { cat("Unconditional (joint) log Lik.: ", format(x$loglik, digits = digits), " (df=", format(x$df), ")\n", sep = "") invisible(x) } eRm/R/print.logLik.eRm.R0000744000176000001440000000033011572663323014454 0ustar ripleyusers`print.logLik.eRm` <- function (x, digits = getOption("digits"),...) { cat("Conditional log Lik.: ", format(x$loglik, digits = digits), " (df=", format(x$df), ")\n", sep = "") invisible(x) } eRm/R/print.llra.R0000744000176000001440000000033511572663323013450 0ustar ripleyusersprint.llra <- function(x,...) { cat("Call:\n") print(x$call) cat("\n") cat("\nParameters:\n") outmat <- rbind(x$etapar,x$se.eta) rownames(outmat) <- c("Estimate","Std.Err") print(outmat) } eRm/R/print.ifit.R0000744000176000001440000000120611572663323013447 0ustar ripleyusers`print.ifit` <- function(x, visible=TRUE, ...) # print method for itemfit # x...object of class "ifit" from (itemfit) { pvalues <- 1-pchisq(x$i.fit,x$i.df-1) # df correction rh 10-01-20 coef.table <- cbind(round(x$i.fit,3),x$i.df-1,round(pvalues,3),round(x$i.outfitMSQ,3),round(x$i.infitMSQ,3),round(x$i.outfitZ,2),round(x$i.infitZ,2)) colnames(coef.table) <- c("Chisq","df","p-value","Outfit MSQ", "Infit MSQ", "Outfit t", "Infit t" ) rownames(coef.table) <- names(x$i.fit) if (visible){ # added rh 10-01-20 cat("\nItemfit Statistics: \n") print(coef.table) cat("\n") } invisible(coef.table) } eRm/R/print.ICr.r0000744000176000001440000000024611572663323013234 0ustar ripleyusersprint.ICr <- function(x,...) { #print method for objects of class "ICr" (from function "IC") cat("\nInformation Criteria: \n") print(x$ICtable) cat("\n") }eRm/R/print.gof.R0000744000176000001440000000057011572663323013272 0ustar ripleyusersprint.gof <- function(x, ...) { #print method for objects of class "gof" (from gofIRT.ppar) cdv <- round(x$test.table[1,], 3) cat("\nGoodness-of-Fit Results:") cat("\nCollapsed Deviance = ", cdv[1], " (df = ", cdv[2], ", p-value = ", cdv[3], ")", sep ="") cat("\nPearson R2:", round(x$R2$R2.P, 3)) cat("\nArea Under ROC:", round(x$AUC, 3)) cat("\n\n") }eRm/R/print.eRm.R0000744000176000001440000000166711572663323013252 0ustar ripleyusers`print.eRm` <- function(x,...) { #print method for all models cat("\n") cat("Results of", x$model, "estimation: \n") cat("\n") cat("Call: ", deparse(x$call), "\n") cat("\n") cat("Conditional log-likelihood:", x$loglik, "\n") cat("Number of iterations:", x$iter, "\n") cat("Number of parameters:", x$npar, "\n") cat("\n") if (x$model %in% c("RM","RSM","PCM")) #eta parameters cat("Item (Category) Difficulty Parameters (eta):") # new labelling rh 25-03-2010 else # now difficulty for RM, RSM, PCM cat("Basic Parameters eta:") cat("\n") etapar <- x$etapar #nameeta <- paste("eta",1:dim(x$W)[2]) se <- x$se.eta result <- rbind(etapar, se) #colnames(result) <- nameeta rownames(result) <- c("Estimate", "Std.Err") print(result) cat("\n\n") invisible(result) } eRm/R/prediction.R0000744000176000001440000001500611572663323013524 0ustar ripleyusersprediction <- function(predictions, labels, label.ordering=NULL) { ## bring 'predictions' and 'labels' into list format, ## each list entry representing one x-validation run ## convert predictions into canonical list format if (is.data.frame(predictions)) { names(predictions) <- c() predictions <- as.list(predictions) } else if (is.matrix(predictions)) { predictions <- as.list(data.frame(predictions)) names(predictions) <- c() } else if (is.vector(predictions) && !is.list(predictions)) { predictions <- list(predictions) } else if (!is.list(predictions)) { stop("Format of predictions is invalid.") } ## if predictions is a list -> keep unaltered ## convert labels into canonical list format if (is.data.frame(labels)) { names(labels) <- c() labels <- as.list( labels) } else if (is.matrix(labels)) { labels <- as.list( data.frame( labels)) names(labels) <- c() } else if ((is.vector(labels) || is.ordered(labels) || is.factor(labels)) && !is.list(labels)) { labels <- list( labels) } else if (!is.list(labels)) { stop("Format of labels is invalid.") } ## if labels is a list -> keep unaltered ## Length consistency checks if (length(predictions) != length(labels)) stop(paste("Number of cross-validation runs must be equal", "for predictions and labels.")) if (! all(sapply(predictions, length) == sapply(labels, length))) stop(paste("Number of predictions in each run must be equal", "to the number of labels for each run.")) ## only keep prediction/label pairs that are finite numbers for (i in 1:length(predictions)) { finite.bool <- is.finite( predictions[[i]] ) predictions[[i]] <- predictions[[i]][ finite.bool ] labels[[i]] <- labels[[i]][ finite.bool ] } ## abort if 'labels' format is inconsistent across ## different cross-validation runs label.format="" ## one of 'normal','factor','ordered' if (all(sapply( labels, is.factor)) && !any(sapply(labels, is.ordered))) { label.format <- "factor" } else if (all(sapply( labels, is.ordered))) { label.format <- "ordered" } else if (all(sapply( labels, is.character)) || all(sapply( labels, is.numeric)) || all(sapply( labels, is.logical))) { label.format <- "normal" } else { stop(paste("Inconsistent label data type across different", "cross-validation runs.")) } ## abort if levels are not consistent across different ## cross-validation runs if (! all(sapply(labels, levels)==levels(labels[[1]])) ) { stop(paste("Inconsistent factor levels across different", "cross-validation runs.")) } ## convert 'labels' into ordered factors, aborting if the number ## of classes is not equal to 2. levels <- c() if ( label.format == "ordered" ) { if (!is.null(label.ordering)) { stop(paste("'labels' is already ordered. No additional", "'label.ordering' must be supplied.")) } else { levels <- levels(labels[[1]]) } } else { if ( is.null( label.ordering )) { if ( label.format == "factor" ) levels <- sort(levels(labels[[1]])) else levels <- sort( unique( unlist( labels))) } else { ## if (!setequal( levels, label.ordering)) { if (!setequal( unique(unlist(labels)), label.ordering )) { stop("Label ordering does not match class labels.") } levels <- label.ordering } for (i in 1:length(labels)) { if (is.factor(labels)) labels[[i]] <- ordered(as.character(labels[[i]]), levels=levels) else labels[[i]] <- ordered( labels[[i]], levels=levels) } } if (length(levels) != 2) { message <- paste("Number of classes is not equal to 2.\n", "ROCR currently supports only evaluation of ", "binary classification tasks.",sep="") stop(message) } ## determine whether predictions are continuous or categorical ## (in the latter case stop; scheduled for the next ROCR version) if (!is.numeric( unlist( predictions ))) { stop("Currently, only continuous predictions are supported by ROCR.") } ## compute cutoff/fp/tp data cutoffs <- list() fp <- list() tp <- list() fn <- list() tn <- list() n.pos <- list() n.neg <- list() n.pos.pred <- list() n.neg.pred <- list() for (i in 1:length(predictions)) { n.pos <- c( n.pos, sum( labels[[i]] == levels[2] )) n.neg <- c( n.neg, sum( labels[[i]] == levels[1] )) ans <- .compute.unnormalized.roc.curve( predictions[[i]], labels[[i]] ) cutoffs <- c( cutoffs, list( ans$cutoffs )) fp <- c( fp, list( ans$fp )) tp <- c( tp, list( ans$tp )) fn <- c( fn, list( n.pos[[i]] - tp[[i]] )) tn <- c( tn, list( n.neg[[i]] - fp[[i]] )) n.pos.pred <- c(n.pos.pred, list(tp[[i]] + fp[[i]]) ) n.neg.pred <- c(n.neg.pred, list(tn[[i]] + fn[[i]]) ) } return( new("prediction", predictions=predictions, labels=labels, cutoffs=cutoffs, fp=fp, tp=tp, fn=fn, tn=tn, n.pos=n.pos, n.neg=n.neg, n.pos.pred=n.pos.pred, n.neg.pred=n.neg.pred)) } ## fast fp/tp computation based on cumulative summing .compute.unnormalized.roc.curve <- function( predictions, labels ) { ## determine the labels that are used for the pos. resp. neg. class : pos.label <- levels(labels)[2] neg.label <- levels(labels)[1] pred.order <- order(predictions, decreasing=TRUE) predictions.sorted <- predictions[pred.order] tp <- cumsum(labels[pred.order]==pos.label) fp <- cumsum(labels[pred.order]==neg.label) ## remove fp & tp for duplicated predictions ## as duplicated keeps the first occurrence, but we want the last, two ## rev are used. ## Highest cutoff (Infinity) corresponds to tp=0, fp=0 dups <- rev(duplicated(rev(predictions.sorted))) tp <- c(0, tp[!dups]) fp <- c(0, fp[!dups]) cutoffs <- c(Inf, predictions.sorted[!dups]) return(list( cutoffs=cutoffs, fp=fp, tp=tp )) } eRm/R/predict.ppar.R0000744000176000001440000000140211572663323013752 0ustar ripleyuserspredict.ppar <- function(object, cutpoint = "randomized", ...) { # predict method for objects of class ppar # cutpoint ... either value between 0 and 1, or randomized assignment Pi <- pmat(object) #expected values X <- object$X.ex if (max(X, na.rm = TRUE) > 1) stop("Available for dichotomous models only!") K <- dim(X)[2] N <- dim(X)[1] y <- as.vector(t(X)) #observed values pi.hat <- as.vector(t(Pi)) if (cutpoint == "randomized") { pvec <- runif(length(y)) } else { pvec <- rep(cutpoint, length(y)) } classvec <- (pvec < pi.hat)*1 #expected 0/1 vector classmat <- matrix(classvec, ncol = K, nrow = N, byrow = TRUE) dimnames(classmat) <- list(rownames(X), colnames(X)) return(classmat) } eRm/R/pmat.R0000744000176000001440000000006011572663323012317 0ustar ripleyusers`pmat` <- function(object)UseMethod("pmat") eRm/R/pmat.ppar.R0000744000176000001440000000462211572663323013270 0ustar ripleyusers`pmat.ppar` <- function(object) # computes a list of expected probabilities for objects of class "ppar" for each NA-subgroup # without category! { X <- object$X mt_vek <- apply(X,2,max,na.rm=TRUE) #number of categories - 1 for each item mt_ind <- rep(1:length(mt_vek),mt_vek) rp <- rowSums(X,na.rm=TRUE) maxrp <- sum(mt_vek) TFrow <- ((rp==maxrp) | (rp==0)) pmat.l <- lapply(object$thetapar, function(theta1) { #runs over missing structures theta <- theta1 p.list <- tapply(object$betapar,mt_ind,function(beta.i) { #matrices of expected prob as list (over items) beta.i <- c(0,beta.i) ind.h <- 0:(length(beta.i)-1) theta.h <- ind.h %*% t(theta) tb <- exp(theta.h+beta.i) denom <- colSums(tb) pi.mat <- apply(tb,1,function(y) {y/denom}) return(pi.mat) }) p.list0 <- lapply(p.list,function(pl) {rbind(pl)[,-1]}) #delete 0th category pmat <- matrix(unlist(p.list0),nrow=length(theta1)) #save as matrix return(pmat) }) #----------item-category labels---------- cnames <- substr(names(object$betapar),6,40) for (i in 1:length(pmat.l)) dimnames(pmat.l[[i]]) <- list(names(object$thetapar[[i]]),cnames) #-----------end labels------- if (length(object$pers.ex) > 0) { X <- object$X[-object$pers.ex,] #list with raw scores X01 <- object$X01[-object$pers.ex,] } else { X <- object$X X01 <- object$X01 } NApos <- tapply(1:length(object$gmemb),object$gmemb,function(ind) { #positions for NA replacement xvec <- X01[ind[1],] which(is.na(xvec)) }) pmat <- NULL for (i in 1:length(pmat.l)) { pmat.l[[i]][,NApos[[i]]] <- NA #insert NA's pmat <- rbind(pmat,pmat.l[[i]]) } #-------------- reorder the p-matrix --------------- ind.orig <- as.vector(unlist(tapply(1:length(object$gmemb), object$gmemb, function(ind) {ind}))) pmat.orig.list <- by(pmat, ind.orig, function(ii) return(ii)) pmat.orig <- as.matrix(do.call(rbind, pmat.orig.list)) #final P-matrix (corresponding to X) rownames(pmat.orig) <- rownames(X) return(pmat.orig) } eRm/R/pmat.default.R0000744000176000001440000000024711572663323013751 0ustar ripleyusers`pmat.default` <- function(object) # error message for using incorrect object { stop("pmat() requires object of class 'ppar', obtained from person.parameter()") } eRm/R/plotTR.R0000744000176000001440000000222011572663323012602 0ustar ripleyusers plotTR <-function(object,...) { #TODO : *Add CI around point estimates require(lattice) #plot trend over time for all items itms <- object$itms tps <- object$mpoints pplgrps <- object$ngroups/itms trend <- object$etapar[((pplgrps-1)*itms*(tps-1)+1):((pplgrps-1)*itms*(tps-1)+(itms*(tps-1)))] tips <-rep(paste("t",1:tps,sep=""),each=itms) items <- rep(paste("Item",1:itms),tps) tr0 <- rep(0,itms) trend <- c(tr0,trend) plotdats <- data.frame(trend,items,tips) key.items <- list(space = "right", text = list(levels(plotdats$items)), points = list(pch = 1:length(levels(plotdats$items)), col = "black") ) plotout <- xyplot(trend~tips,data=plotdats, aspect="fill", type="o", groups=items, key=key.items, lty=1,pch = 1:length(levels(plotdats$items)), col.line = "darkgrey", col.symbol = "black", xlab = "Time", ylab = "Effect", main = "Trend effect plot for LLRA" ) print(plotout) } eRm/R/plotPWmap.R0000744000176000001440000002354411572663323013315 0ustar ripleyusers`plotPWmap` <- function(object, pmap=FALSE, imap=TRUE, item.subset="all", person.subset="all", mainitem="Item Map", mainperson="Person Map", mainboth="Item/Person Map", latdim="Latent Dimension", tlab="Infit t statistic", pp=NULL, cex.gen=0.6, cex.pch=1, person.pch=1, item.pch=16, personCI=NULL, itemCI=NULL, horiz=FALSE) { def.par <- par(no.readonly = TRUE) ## save default, for resetting... ## Pathway map currently only for RM, PCM and RSM ## The next part of the code finds locations and standard errors for ## the item thresholds if ((object$model == "LLTM") || (object$model == "LRSM") || (object$model == "LPCM")) stop("Pathway Map can only be computed for RM, RSM, and PCM!") if (!pmap && !imap) stop("Pathway Map requires you to request at least one map (item or person)!") ## compute threshtable (from betapars for dichotomous models) and item names if (object$model == "RM" || max(object$X, na.rm=TRUE) < 2 ) { # dichotomous model dRm <- TRUE ## betapars are easiness parameters; only the pars need negating threshtable<-cbind(object$betapar * -1, object$se.beta) rownames(threshtable) <- colnames(object$X) ## shorter synonym tt<-threshtable } else { ## polytomous model dRm <- FALSE thresh <- thresholds(object) threshtable <- cbind(thresh$threshpar, thresh$se.thresh) tlevels<-apply(thresh$threshtable[[1]], 1, function(x) length(na.exclude(x))) - 1 if (!(sum(tlevels)==nrow(threshtable))) stop("Threshtable rows not equal to number of thresholds - oops!") ttl<-NULL ## threshtable labels for (i in rownames(as.matrix(tlevels))) if (tlevels[i]==1) ttl<-c(ttl,i) else ttl<-c(ttl,paste(i,1:tlevels[i],sep=":")) rownames(threshtable)<-ttl ## shorter synonyms tt<-threshtable tl<-tlevels } if (is.null(pp)) suppressWarnings(pp<-person.parameter(object)) else if (class(pp) != "ppar" || !identical(pp$X,object$X)) stop("pp is not a person.parameter object which matches the main Rasch data object!") ## We will be plotting the infit data versus the parameters for ## both items and persons iloc<-tt[,1] ise<-tt[,2] ifit <- itemfit(pp) ifitZ <- ifit$i.infitZ ploc <- as.matrix(pp$theta.table['Person Parameter'])[,1] pse <- unlist(pp$se.theta, recursive=FALSE) names(pse) <- sub("^NAgroup[0-9]*\\.","",names(pse)) pse <- pse[names(ploc)] pfit <- personfit(pp) pfitZ <- pfit$p.infitZ ## We can now do item and person subsetting; the item subsetting is ## pretty ugly as there are multiple cases. (We dare not do it earlier ## as we have to take items from all of iloc, ise and ifitZ.) if (imap && is.character(item.subset)) { ## Case 1: item subsetting by item names if (dRm) { if (length(item.subset)>1 && all(item.subset %in% rownames(tt))) { iloc <- iloc[item.subset] ise <- ise[item.subset] ifitZ <- ifitZ[item.subset] tt <- tt[item.subset,] } else if(length(item.subset)!=1 || !(item.subset=="all")) stop("item.subset misspecified. Use 'all' or vector of at least two valid item indices/names.") } else { if (length(item.subset)>1 && all(item.subset %in% rownames(as.matrix(tl)))) { iloc <- iloc[item.subset] ise <- ise[item.subset] ifitZ <- ifitZ[item.subset] tl <- tl[item.subset] for (i in rownames(as.matrix(tl))) if (tl[i]==1) keep.subset<-c(keep.subset,i) else keep.subset<-c(keep.subset,paste(i,1:tl[i],sep=":")) tt<-tt[keep.subset,] } else if(length(item.subset)!=1 || !(item.subset=="all")) stop("item.subset misspecified. Use 'all' or vector of at least two valid item indices/names.") } } else if (imap) { ## Case 2: item subsetting by item numbers if (dRm) { if (length(item.subset)>1 && all(item.subset %in% 1:nrow(tt))) { iloc <- iloc[item.subset] ise <- ise[item.subset] ifitZ <- ifitZ[item.subset] tt <- tt[item.subset,] } else stop("item.subset misspecified. Use 'all' or vector of at least two valid item indices/names.") } else { if (length(item.subset)>1 && all(item.subset %in% 1:length(tl))) { iloc <- iloc[item.subset] ise <- ise[item.subset] ifitZ <- ifitZ[item.subset] tl <- tl[item.subset] for (i in rownames(as.matrix(tl))) if (tl[i]==1) keep.subset<-c(keep.subset,i) else keep.subset<-c(keep.subset,paste(i,1:tl[i],sep=":")) tt<-tt[keep.subset,] } else stop("item.subset misspecified. Use 'all' or vector of at least two valid item indices/names.") } } ## We can now do person subsetting; this is significantly easier than ## item subsetting, as there is no dRM/eRm distinction. if (pmap && is.character(person.subset)) { ## Case 1: person subsetting by person names if (length(person.subset)>1 && all(person.subset %in% names(ploc))) { ploc <- ploc[person.subset] pse <- pse[person.subset] pfitZ <- pfitZ[person.subset] } else if(length(person.subset)!=1 || !(person.subset=="all")) stop("person.subset misspecified. Use 'all' or vector of at least two valid person indices/names.") } else if (pmap) { ## Case 2: person subsetting by person numbers if (length(person.subset)>1 && all(person.subset %in% 1:length(ploc))) { ploc <- ploc[person.subset] pse <- pse[person.subset] pfitZ <- pfitZ[person.subset] } else stop("person.subset misspecified. Use 'all' or vector of at least two valid person indices/names.") } ## Confidence intervals for persons and items ## ## Need defaults for multiple of standard error for purpose of range ## calculation; these are zero as default is not to draw confidence ## intervals pci=0 ici=0 ## Our calculation is simplistic; we use the normal distribution to ## estimate our confidence interval from our standard error. However, ## since this is likely to only be approximate and indicative anyway, we ## are not concerned by this. if(pmap && !is.null(personCI)) { if(is.null(personCI$clevel)) personCI$clevel <- 0.95 if(is.null(personCI$col)) personCI$col <- "orange" if(is.null(personCI$lty)) personCI$lty <- "dotted" pci <- qnorm((1-personCI$clevel)/2, lower.tail=FALSE) } if(imap && !is.null(itemCI)) { if(is.null(itemCI$clevel)) itemCI$clevel <- 0.95 if(is.null(itemCI$col)) itemCI$col <- "red" if(is.null(itemCI$lty)) itemCI$lty <- "dotted" ici <- qnorm((1-itemCI$clevel)/2, lower.tail=FALSE) } ## Now we can plot the Pathway Map if (pmap) { ## person map xrange.pmap <- range(pfitZ,na.rm=TRUE) xrange.pmap[1] <- min(-2.5,xrange.pmap[1]) xrange.pmap[2] <- max(2.5,xrange.pmap[2]+1) ## need space for labels yrange.pmap<-range(ploc,na.rm=TRUE) yrange.pmap[1]<-yrange.pmap[1]-pci*max(pse) yrange.pmap[2]<-yrange.pmap[2]+pci*max(pse) } if (imap) { ## item map xrange.imap <- range(ifitZ,na.rm=TRUE) xrange.imap[1] <- min(-2.5,xrange.imap[1]) xrange.imap[2] <- max(2.5,xrange.imap[2]+1) ## need space for labels yrange.imap<-range(iloc,na.rm=TRUE) yrange.imap[1]<-yrange.imap[1]-ici*max(ise) yrange.imap[2]<-yrange.imap[2]+ici*max(ise) } if (pmap && !imap) { xrange <- xrange.pmap yrange <- yrange.pmap maintitle <- mainperson } else if (!pmap && imap) { xrange <- xrange.imap yrange <- yrange.imap maintitle <- mainitem } else { xrange <- numeric(2) yrange <- numeric(2) xrange[1] <- min(xrange.pmap[1], xrange.imap[1]) xrange[2] <- max(xrange.pmap[2], xrange.imap[2]) yrange[1] <- min(yrange.pmap[1], yrange.imap[1]) yrange[2] <- max(yrange.pmap[2], yrange.imap[2]) maintitle <- mainboth } par(mar=c(5,4,4,2)) if (!horiz){ # rh 2010-12-09 plot(xrange,yrange, xlim=xrange, ylim=yrange, main=maintitle, ylab=latdim, xlab=tlab, type="n") abline(v=c(-2,2),col="lightgreen") } else { plot(yrange,xrange, xlim=yrange, ylim=xrange, main=maintitle, ylab=tlab, xlab=latdim, type="n") abline(h=c(-2,2),col="lightgreen") } if (pmap) { ## person map zt <- pfitZ if (!horiz){ if (pci>0) ## draw confidence intervals arrows(zt,ploc+pci*pse, zt,ploc-pci*pse, angle=90, code=3, length=0.04, col=personCI$col, lty=personCI$lty) points(zt,ploc,pch=person.pch,cex=cex.pch) text(zt,ploc,names(ploc),cex=cex.gen,pos=4) } else { if (pci>0) ## draw confidence intervals arrows(ploc+pci*pse, zt,ploc-pci*pse, zt, angle=90, code=3, length=0.04, col=personCI$col, lty=personCI$lty) points(ploc, zt, pch=person.pch,cex=cex.pch) text(ploc, zt, names(ploc),cex=cex.gen,pos=4) } } if (imap) { ## item map if (dRm) zt <- ifitZ else zt <- rep(ifitZ,times=tl) if (!horiz){ if (ici>0) ## draw confidence intervals arrows(zt,iloc+ici*ise, zt,iloc-ici*ise, angle=90, code=3, length=0.04, col=itemCI$col, lty=itemCI$lty) points(zt,iloc,pch=item.pch,cex=cex.pch) text(zt,iloc,rownames(tt),cex=cex.gen,pos=4) } else { if (ici>0) ## draw confidence intervals arrows(iloc+ici*ise, zt,iloc-ici*ise,zt, angle=90, code=3, length=0.04, col=itemCI$col, lty=itemCI$lty) points(iloc, zt,pch=item.pch,cex=cex.pch) text(iloc,zt, rownames(tt),cex=cex.gen,pos=4) } } par(def.par) invisible(NULL) } eRm/R/plotPImap.R0000744000176000001440000001116711572663323013275 0ustar ripleyusers`plotPImap` <- function(object, item.subset="all", sorted = FALSE, main="Person-Item Map", latdim="Latent Dimension", pplabel="Person\nParameter\nDistribution", cex.gen=0.7, xrange=NULL, warn.ord=TRUE, warn.ord.colour="black", irug=TRUE, pp=NULL) { def.par <- par(no.readonly = TRUE) # save default, for resetting... # Item-Person map currently only for RM, PCM and RSM if ((object$model == "LLTM") || (object$model == "LRSM") || (object$model == "LPCM")) stop("Item-Person Map are computed only for RM, RSM, and PCM!") # compute threshtable (from betapars for dichotomous models) and item names if (object$model == "RM" || max(object$X,na.rm=TRUE) < 2){ dRm <- TRUE threshtable<-cbind(object$betapar, object$betapar) * -1 # betapars are easiness parameteres rownames(threshtable)<-substring(rownames(threshtable), first=6, last=9999) } else { dRm <- FALSE threshtable<-thresholds(object)$threshtable[[1]] } tr<-as.matrix(threshtable) if (is.character(item.subset)){ if (length(item.subset)>1 && all(item.subset %in% rownames(threshtable))) tr<-tr[item.subset,] else if(length(item.subset)!=1 || !(item.subset=="all")) stop("item.subset misspecified. Use 'all' or vector of at least two valid item indices/names.") } else { if (length(item.subset)>1 && all(item.subset %in% 1:nrow(tr))) tr<-tr[item.subset,] else stop("item.subset misspecified. Use 'all' or vector of at least two valid item indices/names.") } if (sorted) tr<-tr[order(tr[,1],decreasing=FALSE),] loc<-as.matrix(tr[,1]) tr<-as.matrix(tr[,-1]) # person parameters unlist in case of several for NA groups if (is.null(pp)) suppressWarnings(pp<-person.parameter(object)) else if (class(pp) != "ppar" || !identical(pp$X,object$X)) stop("pp is not a person.parameter object which matches the main Rasch data object!") theta<-unlist(pp$thetapar) tt<-table(theta) ttx<-as.numeric(names(tt)) yrange <- c(0,nrow(tr)+1) if (is.null(xrange)) xrange<-range(c(tr,theta),na.rm=T) nf <- layout(matrix(c(2,1),2,1,byrow=TRUE), heights = c(1,3), T)#, c(0,3), TRUE) #layout.show(nf) par(mar=c(2.5,4,0,1)) plot(xrange,yrange, xlim=xrange, ylim=yrange, main="", ylab="",type="n", yaxt="n", xaxt="n")#,cex.lab=0.7,lheight=0.1) axis(2,at=1:nrow(tr),labels=rev(rownames(tr)),las=2,cex.axis=cex.gen) axis(1,at=seq(floor(xrange[1]),ceiling(xrange[2])),cex.axis=cex.gen,padj=-1.5) mtext(latdim,1,1.2,cex=cex.gen+.1) #mtext("low",1,1,at=ceiling(xrange[2]),cex=.7) #mtext("high",1,1,at=floor(xrange[1]),cex=.7) ### BEGIN irug if(irug == TRUE){ y.offset <- nrow(tr)*.0275 tr.rug <- as.numeric(tr) if(any(is.na(tr.rug))) tr.rug <- tr.rug[-which(is.na(tr.rug))] segments(tr.rug,rep(yrange[2],length(tr.rug))+y.offset, tr.rug,rep(yrange[2],length(tr.rug))+100) } ### END irug warn<-rep(" ",nrow(tr)) for (j in 1:nrow(tr)){ i<-nrow(tr)+1-j assign("trpoints",tr[i,!is.na(tr[i,])]) npnts<-length(trpoints) if (!dRm && !all(sort(trpoints)==trpoints)) ptcol=warn.ord.colour else ptcol="black" if(npnts>1) points(sort(trpoints),rep(j,npnts),type="b",cex=1,col=ptcol) if (dRm) { lines(xrange*1.5,rep(j,2),lty="dotted") ## superfluous ##text(sort(trpoints),rep(j,npnts),rownames(tr)[i], cex=cex.gen,pos=3,col=ptcol) # different labelling for dRm } else { #lines(xrange*1.5,rep(j,2),lty="dotted") if(npnts>1) text(sort(trpoints),rep(j,npnts),(1:npnts)[order(trpoints)],cex=cex.gen,pos=1,col=ptcol) if(!all(sort(trpoints)==trpoints)) warn[j]<-"*" } points(loc[i],j,pch=20,cex=1.5,col=ptcol) # plot item locations # this is too much; obscures the dots with too many data points present # text(loc[i],j,rev(rownames(tr)[i]),cex=cex.gen,pos=3,col=ptcol) } if (warn.ord) axis(4,at=1:nrow(tr),tick=FALSE, labels=warn, hadj=2.5, padj=0.7, las=2)#,cex.axis=cex.gen) # person parameters par(mar=c(0,4,3,1)) #hist(theta,main=title,axes=FALSE, ylab="", xlim=xrange, col="lightgray") plot(ttx,tt,type="n", main=main, axes=FALSE, ylab="", xlim=xrange, ylim=c(0,max(tt))) points(ttx,tt,type="h", col="gray", lend=2,lwd=5) #axis(4) mtext(pplabel,2,0.5,las=2,cex=cex.gen) box() par(def.par) } eRm/R/plotjointICC.R0000744000176000001440000000010411572663323013716 0ustar ripleyusers`plotjointICC` <- function(object,...)UseMethod("plotjointICC") eRm/R/plotjointICC.dRm.R0000744000176000001440000000357111572663323014452 0ustar ripleyusers`plotjointICC.dRm` <- function(object, item.subset = "all", legend=TRUE, xlim=c(-4,4),ylim=c(0,1), xlab="Latent Dimension",ylab="Probability to Solve",lty=1,legpos="left", main="ICC plot",col=NULL,...) #produces one common ICC plot for Rasch models only #object of class "dRm" #item.subset...specify items that have to be plotted; if NA, all items are used #legend...if legend should be plotted { theta <- seq(xlim[1],xlim[2],by=0.1) if (any(item.subset=="all")) { it.legend <- 1:dim(object$X)[2] } else { if (is.character(item.subset)) { it.legend <- item.subset betatemp <- t(as.matrix(object$betapar)) colnames(betatemp) <- colnames(object$X) object$betapar <- betatemp[,item.subset] } else { it.legend <- colnames(object$X)[item.subset] object$betapar <- object$betapar[item.subset] } object$X <- object$X[,item.subset] #pick out items defined in itemvec } th.ord <- order(theta) p.list <- plist.internal(object,theta) p.list <- lapply(p.list,function(x) {x[,-1]}) #Delete 0-probabilites p.mat <- matrix(unlist(p.list),ncol=length(p.list)) text.ylab <- p.mat[(1:length(theta))[theta==median(theta)],] #dev.new() if(is.null(main)) main="" if(is.null(col)) col=1:(dim(p.mat)[2]) #pmICCs<-cbind(sort(theta),p.mat[th.ord,]) matplot(sort(theta),p.mat[th.ord,],type="l",lty=lty,col=col, main=main,xlim=xlim,ylim=ylim,xlab=xlab,ylab=ylab,...) if (is.character(legpos)){ if (!legend) { #text(x=median(theta),y=text.ylab,labels=paste("I",1:(dim(p.mat)[2]),sep=""),col=1:(dim(p.mat)[2])) text(x=median(theta),y=text.ylab,labels=it.legend,col=1:(dim(p.mat)[2])) } else { legend(legpos,legend=paste("Item",it.legend),col=1:(dim(p.mat)[2]),lty=lty,...) } } } eRm/R/plotICC.Rm.R0000744000176000001440000001562611572663323013246 0ustar ripleyusers`plotICC.Rm` <- function(object, item.subset = "all", empICC = NULL, empCI = NULL, mplot = NULL, # ask,mplot added rh 2007-12-01 xlim = c(-4,4), ylim = c(0,1), xlab = "Latent Dimension", ylab = "Probability to Solve", main=NULL, # main rh 2010-03-06 col = NULL, lty = 1, legpos = "left", ask = TRUE, ...) # produces ICC plots # object of class Rm { X <- object$X if (is.null(col)) col <- 1:(max(apply(X,2,max,na.rm=TRUE))+1) main.arg <- main # rh added 2010-11-23 otherwise always same item in title if NULL # some sanity checks if (is.null(empICC)) { emp.plot <- FALSE } else if (!is.element(empICC[[1]], c("raw","loess","tukey","kernel"))) { ##empirical[[1]] <- "none" emp.plot <- FALSE warning('empICC must be one of "raw","loess","tukey","kernel"!\n') } else if (object$model != "RM") { warning("Empirical ICCs can only be plotted for a dichotomous Rasch model!\n") emp.plot <- FALSE } else { th.est <- person.parameter(object) thetapar <- th.est$thetapar if (length(thetapar)!=1) { #Too complicated with NA'groups (for each NAgroup separate plots...) warning("Empirical ICCs are not produced for different NA groups!\n") emp.plot <- FALSE } else { thetapar.u <- unique(round(unlist(thetapar),5)) if (length(thetapar.u)<4) { warning("No empirical ICCs for less the 4 different person parameters!\n") emp.plot <- FALSE } else emp.plot <- TRUE } } theta <- seq(xlim[1],xlim[2],by=0.1) #x-axis p.list <- plist.internal(object,theta) #matrix of probabilities th.ord <- order(theta) if (any(item.subset=="all")) { textlab <- colnames(object$X) ivec <- 1:length(p.list) } else { if (is.character(item.subset)) { #item names specified ivectemp <- t(as.matrix(1:length(p.list))) colnames(ivectemp) <- colnames(object$X) ivec <- ivectemp[,item.subset] textlab <- item.subset textlab[ivec] <- textlab it.legend <- item.subset } else { #numeric vector specified textlab <- colnames(object$X)[item.subset] textlab[item.subset] <- textlab ivec <- item.subset } } if (object$model=="RM") { #Rasch model p.list <- lapply(p.list,function(x) {x[,-1]}) #Delete 0-probabilites p.mat <- matrix(unlist(p.list),ncol=length(p.list)) #matrix with solving probabilities text.ylab <- p.mat[(1:length(theta))[theta==median(theta)],] } ## plot for non RMs ################# if (object$model != "RM"){ if (ask) par("ask"=TRUE) # added rh 2007-12-01 if (is.null(mplot)) mplot<-FALSE if (mplot) par(mfrow=c(2,2)) for (j in 1:length(ivec)) { # loop for items i <- ivec[j] yp <- as.matrix(p.list[[i]]) yy <- yp[th.ord,] if(is.null(main.arg)) main<-paste("ICC plot for item ",textlab[i]) # rh 2010-03-06 matplot(sort(theta),yy,type="l",lty=lty,col=col, #main=paste("ICC plot for item ",textlab[i]),xlim=xlim, # replaced to allow for user titles rh 2010-03-06 main=main, xlim=xlim, ylim=ylim,xlab=xlab,ylab=ylab,...) if (is.character(legpos)) legend(legpos,legend=paste(c("Category"),0:(dim(yp)[2]-1)), col=col,lty=lty, ...) # added rh 2007-12-01 } ## plot for RMs ##################### } else { if (is.null(mplot) && length(ivec)>1) mplot<-TRUE else mplot<-FALSE # rh 2010-03-06 no mfrow(2,2) if only 1 item if (mplot) par(mfrow=c(2,2)) if (ask) par("ask"=TRUE) # added rh 2007-12-01 for (j in 1:length(ivec)) { #runs over items i <- ivec[j] yp <- as.matrix(p.list[[i]]) yy <- yp[th.ord,] if(is.null(main.arg)) main<-paste("ICC plot for item ",textlab[i]) # rh 2010-03-06 matplot(sort(theta),yy,type="l",lty=lty,col=col, #main=paste("ICC plot for item ",textlab[i]),xlim=xlim, # replaced to allow for user titles rh 2010-03-06 main=main, xlim=xlim, ylim=ylim,xlab=xlab,ylab=ylab,...) ##ylim=ylim,xlab=xlab,ylab=ylab,"ask"=TRUE,...) ## empirical ICC if (emp.plot) { freq.table <- as.matrix(table(rowSums(X),X[,i])) rel.freq <- freq.table[,2]/rowSums(freq.table) idx <- as.numeric(rownames(freq.table)) xy<-cbind(th.est$pred.list[[1]]$y[idx+1],rel.freq) if(empICC[[1]]=="loess") if(!is.null(empICC$smooth)) smooth<-empICC$smooth else smooth<-0.75 if(empICC[[1]]=="kernel") if(!is.null(empICC$smooth)) smooth<-empICC$smooth else smooth<-0.5 nn <- rowSums(freq.table) switch(empICC[[1]], "raw"={}, "loess"={xy[,2]<-loess(xy[,2]~xy[,1],span=smooth)$fitted},#+;cyf<-cbind(xy[,2] * nn, nn)}, "tukey"={xy[,2]<-smooth(xy[,2])},#;cyf<-cbind(xy[,2] * nn, nn)} "kernel"={xy[,2]<-ksmooth(xy[,1],xy[,2],bandwidth=smooth,x.points=xy[,1])[[2]]} ) xy[,2] <- ifelse(xy[,2]>1,1,ifelse(xy[,2]<0,0,xy[,2])) # bounding p in [0,1] if(is.null(empICC$type)) empICC$type <- "p" if(is.null(empICC$pch)) empICC$pch <- 1 if(is.null(empICC$col)) empICC$col <- "black" if(is.null(empICC$lty)) empICC$lty <- "solid" # confidence intervals for empirical ICC if(!is.null(empCI)) { # functions from prop.test() p.L <- function(x, n, alpha) { if (x <= 0) 0 else qbeta(alpha, x, n - x + 1)} p.U <- function(x, n, alpha) { if (x >= n) 1 else qbeta(1 - alpha, x + 1, n - x)} CINT <- function(x, n, conf.level){ alpha <- (1 - conf.level)/2 c(p.L(x,n, alpha), p.U(x,n, alpha)) } if(is.null(empCI$clevel)) empCI$clevel <- 0.95 if(is.null(empCI$col)) empCI$col <- "red" if(is.null(empCI$lty)) empCI$lty <- "dotted" cyf<-cbind(xy[,2] * nn, nn) cy<-apply(cyf,1,function(x) CINT(x[1],x[2],empCI$clevel)) apply(cbind(xy[,1],t(cy)),1,function(x)segments(x[1],x[2],x[1],x[3],lty=empCI$lty,col=empCI$col)) } # plots the point estimates of the empirical ICC lines(xy[,1],xy[,2],type=empICC$type, pch=empICC$pch, col=empICC$col, lty=empICC$lty, ...) } # end if(emp.plot) } } ## reset graphics parameters par("ask"=FALSE) # added rh 2007-12-01 par(mfrow=c(1,1)) } eRm/R/plotICC.R0000744000176000001440000000007211572663323012656 0ustar ripleyusers`plotICC` <- function(object,...)UseMethod("plotICC") eRm/R/plotGR.R0000744000176000001440000000434411572663323012576 0ustar ripleyusersplotGR <- function(object,...) { #TODO: *Add CI around point estimates require(lattice) itms <- object$itms tps <- object$mpoints pplgrps <- object$ngroups/itms if(pplgrps<2) stop("There are no treatment effects in this analysis.") #treatment effects for all treatment groups at tps>1 treat <- object$etapar[1:((pplgrps-1)*itms*(tps-1))] time <- factor(rep(paste("t",2:tps,sep=""),each=itms*(pplgrps-1))) item <- factor(rep(rep(paste("Item",1:itms),each=pplgrps-1),tps-1)) names1 <- unique(names(object$groupvec))[1:(length(unique(names(object$groupvec))))-1] #labeling group <- factor(rep(names1,itms*(tps-1))) plotdats1 <- data.frame(treat,group,item,time) #effects (i.e. zeros) for all treatment groups at tp=1 treat0 <- rep(0,itms*(pplgrps-1)) time0 <- factor(rep("t1",each=itms*(pplgrps-1))) item0 <- factor(rep(paste("Item",1:itms),each=(pplgrps-1))) #labeling group0 <- factor(rep(names1,itms)) plotdats0 <- data.frame(treat0,group0,item0,time0) names(plotdats0) <- c("treat","group","item","time") #effects (i.e. zeros) for control or baseline group for all tps treat00 <- rep(0,itms*tps) time00 <- factor(rep(paste("t",1:tps,sep=""),each=itms)) item00 <- factor(rep(paste("Item",1:itms),tps)) group00 <- factor(rep(unique(names(object$groupvec))[length(unique(names(object$groupvec)))],itms*tps)) plotdats00 <- data.frame(treat00,group00,item00,time00) names(plotdats00) <- c("treat","group","item","time") #all together plotdats <- rbind(plotdats00,plotdats0,plotdats1) #plot key.group <- list(space = "right", text = list(levels(plotdats$group)), points = list(pch = 1:length(levels(plotdats$group)), col = "black") ) plotout <- xyplot(treat ~ time | item, plotdats, aspect = "xy", type = "o", groups = group, key = key.group, lty = 1, pch = 1:length(levels(plotdats$group)), col.line = "darkgrey", col.symbol = "black", xlab = "Time", ylab = "Effect", main = "Treatment effect plot for LLRA" ) print(plotout) } eRm/R/plotGOF.R0000744000176000001440000000006511572663323012675 0ustar ripleyusers`plotGOF` <- function(x,...)UseMethod("plotGOF") eRm/R/plotGOF.LR.R0000744000176000001440000001744611572663323013224 0ustar ripleyusers`plotGOF.LR` <- function(x,beta.subset="all", main="Graphical Model Check", xlab=NULL,ylab=NULL,tlab="item", ylim=c(-3,3),xlim=c(-3,3),type="p",pos="4", conf=NULL, ctrline=NULL,...) { # graphical model check # beta.subset...plot only a subset of beta-parameters; either "all" or an index vector # x...object of class LR (from LRtest) # tlab ... labelling: "item" abbreviated beta parameter name, "number" number from beta par list, # "identify" interactive, "none" # pos ... (where the textlabel appears) # conf ... confidence ellipses: NULL or # list(gamma=0.95, col="red", ia=TRUE, lty="dashed", which=all items in beta.subset) # ctrline ... control lines (confidence bands): NULL or list(gamma=0.95,lty="solid", col="blue") # ... additional graphic parameters if (length(x$likgroup) > 2) warning("Only the parameters for the first two subgroups are plotted!") if (is.null(xlab)) xlab<-paste("Beta for Group: ",x$spl.gr[1],sep="") if (is.null(ylab)) ylab<-paste("Beta for Group: ",x$spl.gr[2],sep="") nparg1 <- length(x$betalist[[1]]) nparg2 <- length(x$betalist[[2]]) if (nparg1 != nparg2) stop("Unequal number of parameters in the subgroups! Plot cannot be produced, choose another split in LRtest!") beta1 <- x$betalist[[1]] * -1 # -1 to obtain difficulty parameters beta2 <- x$betalist[[2]] * -1 if (is.character(beta.subset)) { if (beta.subset=="all") { beta.subset <- 1:length(beta1) #textlab <- names(beta1) switch(EXPR=tlab, item=textlab <- substr(names(beta1),6,100), #remove "beta " from names number=textlab <- 1:length(beta1), identify=labs <- substr(names(beta1),6,100) ) } else { textlab <- beta.subset } } else { #textlab <- names(beta1)[beta.subset] ##beta.subset<-sort(beta.subset) switch(EXPR=tlab, item=textlab <- substr(names(beta1)[beta.subset],6,100), #remove "beta " from names number=textlab <- beta.subset, identify=labs <- substr(names(beta1)[beta.subset],6,100) ) } #yshift <- (ylim[2]-ylim[1])/30 yshift<-0 plot(beta1[beta.subset],beta2[beta.subset],main=main,xlab=xlab, ylab=ylab,ylim=ylim,xlim=xlim,type=type,...) abline(0,1) if(exists("textlab")) { text(beta1[beta.subset],beta2[beta.subset]+yshift,labels=textlab,pos=pos,...) } if(exists("labs")) { options(locatorBell = FALSE) xycoords <- cbind(beta1[beta.subset], beta2[beta.subset]) nothing<-identify(xycoords,labels = labs,atpen=TRUE,offset=1) } # se's needed for ellipses and control lines if(is.list(conf) || is.list(ctrline)){ if(any(is.na(unlist(x$selist)))) { warning("Confidence ellipses or control lines cannot be plotted.\n LR object without standard errors. Use option 'se=TRUE' in LRtest()") conf <- ctrline <- NULL } else { s1 <- x$selist[[1]] s2 <- x$selist[[2]] v1 <- s1^2 v2 <- s2^2 suspicious.se<-any(cbind(s1,s2)[beta.subset]>10) if(suspicious.se){ warning("Suspicious size of standard error(s).\n Check model specification, split criterion, data.") } } if(any(abs(cbind(beta1,beta2)[beta.subset])>8)){ warning("Suspicious size of parameter estimate(s).\n Check model specification, split criterion, data.") if(is.null(conf)) conf$ia <- FALSE } } # confidence ellipses if(is.list(conf)){ # (interactive) plot of confidence ellipses ## function ellipse() from package car ellipse <- function (center, shape, radius, center.pch = 19, center.cex = 1.5, segments = 51, add = TRUE, xlab = "", ylab = "", las = par("las"), col = palette()[2], lwd = 2, lty = 1, ...) { if (!(is.vector(center) && 2 == length(center))) stop("center must be a vector of length 2") if (!(is.matrix(shape) && all(2 == dim(shape)))) stop("shape must be a 2 by 2 matrix") angles <- (0:segments) * 2 * pi/segments unit.circle <- cbind(cos(angles), sin(angles)) ellipse <- t(center + radius * t(unit.circle %*% chol(shape))) if (add) lines(ellipse, col = col, lwd = lwd, lty = lty, ...) else plot(ellipse, xlab = xlab, ylab = ylab, type = "l", col = col, lwd = lwd, lty = lty, las = las, ...) if (center.pch) points(center[1], center[2], pch = center.pch, cex = center.cex, col = col) } # select items for which ellipses are drawn ## rh 2011-05-31 if(is.null(conf$which)) conf$which<-beta.subset#seq_along(beta.subset) ##conf$which <- sort(conf$which) if(!all(conf$which %in% beta.subset)) stop("Incorrect item number(s) for which ellipses are to be drawn") if(is.null(conf$col)) { conf$c <- rep("red",length.out=length(beta1)) } else if (!is.null(conf$which)){ ## conf$c <- rep(NA,length.out=length(beta.subset)) conf$c <- rep(NA,length.out=length(conf$which)) if (length(conf$c)!=length(conf$which)) stop("which and col must have the same length in specification of conf") else conf$c[conf$which]<-conf$col } conf$col <- conf$c if(is.null(conf$gamma)) conf$gamma <- 0.95 if(is.null(conf$lty)) conf$lty <- "dotted" if(is.null(conf$ia)) conf$ia <- FALSE z <- qnorm((conf$gamma+1)/2) ci1u <- beta1 + z*s1 ci1l <- beta1 - z*s1 ci2u <- beta2 + z*s2 ci2l <- beta2 - z*s2 if(conf$ia) { identifyEll <- function(x, y, ci1u, ci1l, ci2u,ci2l, v1, v2, conf, n=length(x), ...) ## source: example from help("identify") ## a function to use identify to select points, and overplot the ## points with a cofidence ellipse as they are selected { xy <- xy.coords(x, y); x <- xy$x; y <- xy$y sel <- rep(FALSE, length(x)); res <- integer(0) while(sum(sel) < n) { ans <- identify(x[!sel], y[!sel], n=1, plot=FALSE, ...) if(!length(ans)) break ans <- which(!sel)[ans] i <- ans lines(rep(x[i],2),c(ci2u[i],ci2l[i]),col=conf$col[1], lty=conf$lty) lines(c(ci1u[i],ci1l[i]), rep(y[i],2),col=conf$col[1],lty=conf$lty) ellipse(center=c(x[i],y[i]),matrix(c(v1[i],0,0,v2[i]),2),z,segments=200,center.cex=0.5,lwd=1, col=conf$col[1]) #points(x[ans], y[ans], pch = pch) sel[ans] <- TRUE res <- c(res, ans) } #res } identifyEll(beta1[beta.subset],beta2[beta.subset], ci1u[beta.subset], ci1l[beta.subset], ci2u[beta.subset], ci2l[beta.subset], v1[beta.subset], v2[beta.subset], conf) } else { # non-interactive: plot of all ellipses at once x<-beta1 y<-beta2 for (i in beta.subset) { if(i %in% conf$which){ lines(rep(x[i],2),c(ci2u[i],ci2l[i]),col=conf$col[i], lty=conf$lty) lines(c(ci1u[i],ci1l[i]), rep(y[i],2),col=conf$col[i],lty=conf$lty) ellipse(center=c(x[i],y[i]),matrix(c(v1[i],0,0,v2[i]),2),z,segments=200,center.cex=0.5,lwd=1, col=conf$col[i]) } } } } # 95% control lines (Wright) if(is.list(ctrline)){ if(is.null(ctrline$gamma)) ctrline$gamma <- 0.95 if(is.null(ctrline$col)) ctrline$col <- "blue" if(is.null(ctrline$lty)) ctrline$lty <- "solid" z <- qnorm((ctrline$gamma+1)/2) d<-(beta1+beta2)/2 se.d<-sqrt(v1+v2) d<-sort(d) se.d<-se.d[order(d)] upperx<-d-z*se.d/2 uppery<-d+z*se.d/2 lines(upperx,uppery, col=ctrline$col, lty=ctrline$lty) lines(uppery,upperx, col=ctrline$col, lty=ctrline$lty) } } eRm/R/plotDIF.R0000744000176000001440000001460411572663323012670 0ustar ripleyusersplotDIF <- function(object, item.subset=NULL, gamma = 0.95, main=NULL, xlim=NULL, xlab=" ", ylab=" ", col=NULL, distance, splitnames=NULL, leg=FALSE, legpos="bottomleft", ...){ if(class(object)=="LR"){ ## added rh 11-03-17 object <- list(object) } else if(is.list(object)) { checklr <- sapply(object,class) if(!all(checklr=="LR")) stop("Elements of '",deparse(substitute(object)), "' must must be LRtest objects!") } else if(!is.list(object)) { stop(deparse(substitute(object)), "must be a list of LRtest objects!") } # extract number of LRtest objects M <- length(sapply(object, function(x) length(x))) # Confidence plot only for LRtest objects for(p in 1:M){ if((object[[p]]$model == "LLTM") || (object[[p]]$model == "LRSM") || (object[[p]]$model == "LPCM")){ stop("Confidence Plot is computed only for LRtest objects (RM, PCM, RSM)!") } else if(is.na(sum(unlist(object[[p]]$selist))) == TRUE){ stop("Confidence Plot is computed only for LRtest objects (RM) with standard errors (se=TRUE)!") } } # confidences list for storing confints confidences1 <- vector("list") # for labeling list entries nam.lab <- vector("character") # subgroups splits n2 <- sapply(object, function(x) length(x$spl.gr)) # loops for computing thresholds on LRtest objects for(m in 1:M){ # confidences for dichotomous items if(object[[m]]$model == "RM"){ confidences1[[m]] <- lapply(object[[m]]$fitobj,function(x){confint(x, level=gamma)}) } else { # confidences for polytomous items confidences1[[m]] <- lapply(object[[m]]$fitobj, function(x){confint(thresholds(x),level=gamma)}) } } if(is.null(names(object)) == TRUE){ names(confidences1) <- paste("LRtest", 1:M, sep="") } else { names(confidences1) <- names(object) } confidences <- do.call(c,lapply(confidences1,function(x) x[1:length(x)])) if(missing(distance)) distance <- .7/(length(confidences)) if((distance <= 0) | (distance >= .5)) stop("distance must not be >= .5 or <= 0") model.vec <- vector("character") for(p in 1:M) model.vec[p] <- object[[p]]$model if(any(model.vec == "PCM") || any(model.vec == "RSM")){ model <- "PCM" } else { model <- "RM" } # extracting the longest element of confidences for definition of tickpositions and ticklabels # (snatches at the confidences-object index) factorlist <- (unique(unlist(lapply(confidences, function(x) dimnames(x)[[1]])))) #maxlist <- max(order(factorlist)) if(is.null(item.subset)){ if(model == "PCM"){ y.lab <- sub("thresh beta ", "", factorlist) } else { y.lab <- sub("beta ", "", factorlist) } } else if(is.character(item.subset)){ # item subset specified as character if(model == "PCM"){ y.lab <- sub("(.+)[.][^.]+$", "\\1", sub("thresh beta ", "", factorlist)) # search only for a "." separation after item label categ <- gsub("^.*\\.(.*)$","\\1", factorlist) # extract item categories - search only for a "." separation y.lab.id <- y.lab %in% item.subset y.lab1 <- y.lab[y.lab.id] categ1 <- categ[y.lab.id] y.lab <- paste(y.lab1, categ1, sep=".") # stick item categories and names together again factorlist <- factorlist[y.lab.id] } else { y.lab <- sub("beta ", "", factorlist) # search only for a "." separation after item label y.lab.id <- y.lab %in% item.subset y.lab <- y.lab[y.lab.id] factorlist <- factorlist[y.lab.id] } } else { # item subset specified as position number (index in data matrix) if(model == "PCM"){ y.lab <- sub("(.+)[.][^.]+$", "\\1", sub("thresh beta ", "", factorlist)) # search only for a "." separation after item label categ <- gsub("^.*\\.(.*)$","\\1", factorlist) # extract item categories - search only for a "." separation y.lab2 <- unique(y.lab)[item.subset] y.lab.id <- y.lab %in% y.lab2 y.lab1 <- y.lab[y.lab.id] categ1 <- categ[y.lab.id] y.lab <- paste(y.lab1, categ1, sep=".") # stick item categories and names together again factorlist <- factorlist[y.lab.id] } else { y.lab <- sub("beta ", "", factorlist) y.lab2 <- unique(y.lab)[item.subset] y.lab.id <- y.lab %in% y.lab2 y.lab <- y.lab[y.lab.id] factorlist <- factorlist[y.lab.id] } } # setting range of xaxis if(is.null(xlim)){ xlim <- range(unlist(confidences)) } # setting tickpositions tickpos <- 1:(length(y.lab)) # + 3.5/distance mm 2011-06-03 lty <- unlist(lapply(n2, function(i)1:i)) # defining the plot if(is.null(main)){ main<-paste("Confidence plot") } plot(xlim, xlim=xlim, ylim=c(1,length(factorlist))+c(-.5,+.5), type="n", yaxt="n", main=main, xlab=xlab,ylab=ylab,...) # rh 2011-03-23 reverse ylim added axis(2, at=tickpos, labels=y.lab, cex.axis=0.7, las=2) if(is.null(col)){ for(k in 1:length(confidences)) { for(l in 1:length(factorlist)) { lines(as.data.frame(confidences[[k]])[factorlist[l],], rep(seq(l-.5+distance, l+.5-distance, length.out=length(confidences))[k], 2), type="b", pch=c("[","]"), col=length(cumsum(n2)[cumsum(n2) < k])+1, lty=lty[k]) } } } else { col <- rep(col, n2) for(k in 1:length(confidences)) { for(l in 1:length(factorlist)) { lines(as.data.frame(confidences[[k]])[factorlist[l],], rep(seq(l-.5+distance, l+.5-distance, length.out=length(confidences))[k], 2), type="b", pch=c("[","]"), col=col[k], lty=lty[k]) } } } # doing nicer legend labels if(is.null(splitnames)==FALSE){ names(confidences) <- splitnames } if(leg == TRUE){ linespread <- .7 + .3 * (1/length(confidences)) if(is.null(col)){ #col <- rep(1:length(n2), each=n2) rh 2011-03-18 col <- rep(1:length(n2), n2) # legend(legpos, rev(paste(names(confidences))), col=rev(col), lty=rev(lty)) legend(legpos, rev(paste(names(confidences))), y.intersp=linespread, col=rev(col), lty=rev(lty)) } else { # legend(legpos, rev(paste(names(confidences))), col=rev(col), lty=rev(lty)) legend(legpos, rev(paste(names(confidences))), y.intersp=linespread, col=rev(col), lty=rev(lty)) } } invisible(list(confints=confidences1)) #rh 2011-03-18 } eRm/R/plotCI.R0000744000176000001440000000737611572663323012571 0ustar ripleyusers# $Id: plotCI.R 1318 2009-05-08 21:56:38Z warnes $ plotCI <- function (x, y = NULL, uiw, liw = uiw, ui, li, err='y', ylim=NULL, xlim=NULL, type="p", col=par("col"), barcol=col, pt.bg = par("bg"), sfrac = 0.01, gap=1, lwd=par("lwd"), lty=par("lty"), labels=FALSE, add=FALSE, xlab, ylab, minbar, maxbar, ... ) { if (is.list(x)) { y <- x$y x <- x$x } if(invalid(xlab)) xlab <- deparse(substitute(x)) if(invalid(ylab)) { if(is.null(y)) { xlab <- "" ylab <- deparse(substitute(x)) } else ylab <- deparse(substitute(y)) } if (is.null(y)) { if (is.null(x)) stop("both x and y NULL") y <- as.numeric(x) x <- seq(along = x) } if(err=="y") z <- y else z <- x if(invalid(uiw)) uiw <- NA if(invalid(liw)) liw <- NA if(invalid(ui)) ui <- z + uiw if(invalid(li)) li <- z - liw if(!invalid(minbar)) li <- ifelse( li < minbar, minbar, li) if(!invalid(maxbar)) ui <- ifelse( ui > maxbar, maxbar, ui) if(err=="y") { if(is.null(ylim)) ylim <- range(c(y, ui, li), na.rm=TRUE) if(is.null(xlim) && !is.R() ) xlim <- range( x, na.rm=TRUE) } else if(err=="x") { if(is.null(xlim)) xlim <- range(c(x, ui, li), na.rm=TRUE) if(is.null(ylim) && !is.R() ) ylim <- range( x, na.rm=TRUE) } if(!add) { if(invalid(labels) || labels==FALSE ) plot(x, y, ylim = ylim, xlim=xlim, col=col, xlab=xlab, ylab=ylab, ...) else { plot(x, y, ylim = ylim, xlim=xlim, col=col, type="n", xlab=xlab, ylab=ylab, ...) text(x, y, label=labels, col=col, ... ) } } if(is.R()) myarrows <- function(...) arrows(...) # works only using R!! # else # myarrows <- function(x1,y1,x2,y2,angle,code,length,...) # { # segments(x1,y1,x2,y2,open=TRUE,...) # if(code==1) # segments(x1-length/2,y1,x1+length/2,y1,...) # else # segments(x2-length/2,y2,x2+length/2,y2,...) # } if(err=="y") { if(gap!=FALSE) gap <- strheight("O") * gap smidge <- par("fin")[1] * sfrac # draw upper bar if(!is.null(li)) myarrows(x , li, x, pmax(y-gap,li), col=barcol, lwd=lwd, lty=lty, angle=90, length=smidge, code=1) # draw lower bar if(!is.null(ui)) myarrows(x , ui, x, pmin(y+gap,ui), col=barcol, lwd=lwd, lty=lty, angle=90, length=smidge, code=1) } else { if(gap!=FALSE) gap <- strwidth("O") * gap smidge <- par("fin")[2] * sfrac # draw left bar if(!is.null(li)) myarrows(li, y, pmax(x-gap,li), y, col=barcol, lwd=lwd, lty=lty, angle=90, length=smidge, code=1) if(!is.null(ui)) myarrows(ui, y, pmin(x+gap,ui), y, col=barcol, lwd=lwd, lty=lty, angle=90, length=smidge, code=1) } ## _now_ draw the points (to avoid having lines drawn 'through' points) points(x, y, col = col, lwd = lwd, bg = pt.bg, type = type, ...) invisible(list(x = x, y = y)) } eRm/R/plot.ppar.r0000744000176000001440000000260111572663323013340 0ustar ripleyusersplot.ppar <- function(x,xlab="Person Raw Scores",ylab="Person Parameters (Theta)",main=NULL,...) ### function (x, y = NULL, type = "p", xlim = NULL, ylim = NULL, ### log = "", main = NULL, sub = NULL, ### xlab = "Person Raw Scores", ### ylab = "Person Parameters (Theta)", ### ann = par("ann"), axes = TRUE, frame.plot = axes, panel.first = NULL, ### panel.last = NULL, asp = NA, ...) # plot of the person raw scores against the person parameters # x...object of class "ppar" (resulting from person.parameter.eRm) { pl <- x$pred.list #list with spline interpolations if (is.null(pl)) stop("Spline interpolation required in person.parameter.eRm!") X <- x$X if (length(x$pers.ex) > 0) { X <- X[-x$pers.ex,] #gmemb <- x$gmemb[-x$pers.ex] } gmemb <- x$gmemb X.list <- split(as.data.frame(X),as.factor(gmemb)) if (length(pl) > 1) { for (i in 1:length(pl)) main.text <- paste("Person Parameter Plot of Group",i) } else { main.text <- "Plot of the Person Parameters" } if (!is.null(main)) main.text <- main for (i in 1:length(pl)) { #dev.new() plot(rowSums(X.list[[i]],na.rm=TRUE),x$thetapar[[i]],xlim=c(min(pl[[i]]$x),max(pl[[i]]$x)), ylim=c(min(pl[[i]]$y),max(pl[[i]]$y)),xlab=xlab,ylab=ylab, main=main.text,...) lines(pl[[i]]$x,pl[[i]]$y) } } eRm/R/plist.internal.R0000744000176000001440000000152611572663323014334 0ustar ripleyusers`plist.internal` <- function(object,theta) # computes a list of expected probabilities for objects of class Rm # with 0th category included! { X <- object$X mt_vek <- apply(X,2,max,na.rm=TRUE) #number of categories - 1 for each item mt_ind <- rep(1:length(mt_vek),mt_vek) #--------compute list matrix of probabilites for fixed theta) p.list <- tapply(object$betapar,mt_ind,function(beta.i) { beta.i <- c(0,beta.i) ind.h <- 0:(length(beta.i)-1) theta.h <- ind.h %*% t(theta) #multiply category with #tb <- exp(theta.h-beta.i) tb <- exp(theta.h+beta.i) denom <- colSums(tb) pi.mat <- apply(tb,1,function(y) {y/denom}) return(pi.mat) }) return(p.list) } eRm/R/pifit.internal.r0000744000176000001440000000406311572663323014353 0ustar ripleyuserspifit.internal <- function(object) { #object of class ppar #function is called in itemfit.ppar and personfit.ppar X <- object$X mt_vek <- apply(X,2,max,na.rm=TRUE) #number of categories - 1 for each item mt_ind <- rep(1:length(mt_vek),mt_vek) mt_seq <- sequence(mt_vek) gmemb <- object$gmemb pmat <- pmat(object) #matrix with model probabilites #-----------------matrix with expected response patterns-------------- Emat.cat <- t(apply(pmat,1,function(x) x*mt_seq)) if ((object$model == "RM") || (object$model == "LLTM")) { Emat <- Emat.cat } else { E.list <- tapply(1:length(mt_ind),mt_ind, function(ind) {rowSums(cbind(Emat.cat[,ind]),na.rm=TRUE)}) Emat <- matrix(unlist(E.list),ncol=dim(X)[2],dimnames=list(rownames(pmat),colnames(X))) } #------------------------variance term for standardized residuals------ pmat.l0 <- tapply(1:length(mt_ind),mt_ind, function(ind) { vec0 <- 1-rowSums(as.matrix(pmat[,ind])) #prob for 0th category cbind(vec0,pmat[,ind]) }) pmat0 <- matrix(unlist(pmat.l0),nrow=length(gmemb)) #prob matrix 0th category included mt_vek0 <- integer(0) #add 0th category to all indices for (i in mt_vek) mt_vek0 <- c(mt_vek0, 0:i) mt_ind0 <- rep(1:length(mt_vek),mt_vek+1) colnames(Emat) <- NULL Emat0 <- t(apply(Emat[,mt_ind0],1,function(x) {mt_vek0 - x})) Vmat.cat <- (Emat0)^2*pmat0 V.list <- tapply(1:length(mt_ind0),mt_ind0, function(ind) {rowSums(Vmat.cat[,ind],na.rm=TRUE)}) Vmat <- matrix(unlist(V.list),ncol=dim(X)[2],dimnames=list(rownames(pmat),colnames(X))) #------------------------kurtosis term for standardized residuals------ Cmat.cat <- (Emat0)^4*pmat0 C.list <- tapply(1:length(mt_ind0),mt_ind0, function(ind) {rowSums(Cmat.cat[,ind],na.rm=TRUE)}) Cmat <- matrix(unlist(C.list),ncol=dim(X)[2],dimnames=list(rownames(pmat),colnames(X))) result <- list(Emat=Emat,Vmat=Vmat,Cmat=Cmat) } eRm/R/personfit.R0000744000176000001440000000007211572663323013372 0ustar ripleyusers`personfit` <- function(object)UseMethod("personfit") eRm/R/personfit.ppar.R0000744000176000001440000000321111572663323014331 0ustar ripleyusers`personfit.ppar` <- function(object) # computes Chi-square based itemfit statistics (Smith, p.77ff) # for object of class "ppar" (from person.parameter) { if (length(object$pers.ex)==0) { X <- object$X } else { X <- object$X[-object$pers.ex,] } #rp <- rowSums(X,na.rm=TRUE) #mt_vek <- apply(X,2,max,na.rm=TRUE) #maxrp <- sum(mt_vek) #TFrow <- ((rp==maxrp) | (rp==0)) #exclude full and 0 responses VE <- pifit.internal(object) #compute expectation and variance term Emat <- VE$Emat Vmat <- VE$Vmat Cmat <- VE$Cmat st.res <- (X-Emat)/sqrt(Vmat) #st.res <- (X[!TFrow,]-Emat)/sqrt(Vmat) sq.res <- st.res^2 #squared standardized residuals pfit <- rowSums(sq.res,na.rm=TRUE) pdf <- apply(X,1,function(x) {length(na.exclude(x))}) #pdf <- apply(X[!TFrow,],1,function(x) {length(na.exclude(x))}) #degress of freedom (#of persons per item) p.outfitMSQ <- pfit/pdf qsq.outfitMSQ <- (rowSums(Cmat/Vmat^2, na.rm=TRUE)/pdf^2) - 1/pdf q.outfitMSQ <- sqrt(qsq.outfitMSQ) psumVmat<-rowSums(Vmat) p.infitMSQ <- rowSums(sq.res*Vmat, na.rm = TRUE)/psumVmat qsq.infitMSQ <- rowSums(Cmat-Vmat^2, na.rm=TRUE)/psumVmat^2 q.infitMSQ <- sqrt(qsq.infitMSQ) p.outfitZ <- (sqrt(p.outfitMSQ)-1)*(3/q.outfitMSQ)+(q.outfitMSQ/3) p.infitZ <- (sqrt(p.infitMSQ)-1)*(3/q.infitMSQ)+(q.infitMSQ/3) result <- list(p.fit = pfit, p.df = pdf, st.res = st.res, p.outfitMSQ = p.outfitMSQ, p.infitMSQ = p.infitMSQ, p.outfitZ = p.outfitZ, p.infitZ = p.infitZ) class(result) <- "pfit" result } eRm/R/person.parameter.R0000744000176000001440000000010111572663323014637 0ustar ripleyusersperson.parameter <- function(object)UseMethod("person.parameter")eRm/R/person.parameter.eRm.R0000744000176000001440000002316611572663323015401 0ustar ripleyusers`person.parameter.eRm` <- function(object) # estimation of the person parameters with jml # object of class eRm # se... whether standard errors should be computed # splineInt... whether spline interpolation should be carried out { se <- TRUE splineInt <- TRUE options(warn=0) X <- object$X #collapse X #X.full <- object$X max.it <- apply(X,2,max,na.rm=TRUE) #maximum item raw score without NA rp <- rowSums(X,na.rm=TRUE) #person raw scores maxrp <- apply(X,1,function(x.i) {sum(max.it[!is.na(x.i)])}) #maximum item raw score for person i TFrow <- ((rp==maxrp) | (rp==0)) pers.exe <- (1:dim(X)[1])[TFrow] #persons excluded from estimation due to 0/full pers.exe.names<-rownames(X)[pers.exe] pers.in<-(1:dim(X)[1])[-pers.exe] #persons in estimation if (length(pers.exe) > 0) { #data matrix persons (full/0) excluded) X.ex <- object$X[-pers.exe,] } else { X.ex <- object$X } if (any(is.na(X))) { dichX <- ifelse(is.na(X),1,0) strdata <- apply(dichX,1,function(x) {paste(x,collapse="")}) gmemb.X <- as.vector(data.matrix(data.frame(strdata))) } else { gmemb.X <- rep(1,dim(X)[1]) } if (length(pers.exe) > 0) X <- X[-pers.exe,] X.dummy <- X if (any(is.na(X))) { dichX <- ifelse(is.na(X),1,0) strdata <- apply(dichX,1,function(x) {paste(x,collapse="")}) gmemb <- as.vector(data.matrix(data.frame(strdata))) gmemb1 <- gmemb } else { gmemb <- rep(1,dim(X)[1]) gmemb1 <- gmemb } mt_vek <- apply(X,2,max,na.rm=TRUE) #number of categories - 1 for each item mt_ind <- rep(1:length(mt_vek),mt_vek) #index i for items indvec <- NULL #establish matrix with unique raw scores for (i in unique(gmemb)) { gmemb.ind <- which(gmemb == i) collapse.vec <- which(!duplicated(rowSums(rbind(X[gmemb==i,]),na.rm = TRUE))) indvec <- c(indvec, gmemb.ind[collapse.vec]) } #for (i in unique(gmemb)) indvec <- c(indvec,!duplicated(rowSums(rbind(X[gmemb==i,]),na.rm = TRUE))) indvec <- sort(indvec) X <- X[indvec,] #collapsing X beta.all <- object$betapar if (!is.null(object$ngroups)) if (object$ngroups > 1) stop("Estimation of person parameters for models with group contrasts not possible!") if (is.null(object$mpoints)) { mpoints <- 1 } else {mpoints <- object$mpoints} r.pall <- rowSums(X,na.rm=TRUE) #person raw scores X01 <- object$X01 if (length(pers.exe) > 0) X01 <- X01[-pers.exe,] #if persons excluded due to 0/full response X01 <- X01[indvec,] #collapsed version gmemb <- gmemb[indvec] #collapsed version rownames(X01) <- rownames(X) rowvec <- 1:(dim(X01)[1]) fitlist <- tapply(rowvec,gmemb,function(rind) { #list with nlm outputs if (length(rind) > 1) { ivec <- !is.na(X[rind[1],]) #non-NA elements r.i <- colSums(X[rind,ivec],na.rm=TRUE) #item raw scores } else { #if only one person belongs to raw score group ivec <- !is.na(X[rind[1],]) r.i <- X[rind,ivec] # r.i <- X[rind,] # r.i[is.na(r.i)] <- 0 } #r.i <- colSums(object$X[rind,],na.rm=TRUE) #item raw scores r.p <- r.pall[rind] #person raw scores for current NA group X01g <- rbind(X01[rind,]) beta <- beta.all[!is.na(X01g[1,])] X01beta <- rbind(X01g,beta.all) #matrix with unique 0/1 response patterns and beta vector in the last row theta <- rep(0,length(r.p)) #==================== ML routines =================================== jml.rasch <- function(theta) #fast ML for RM only { ksi <- exp(theta) denom <- 1/exp(-beta) #-beta instead of beta since beta are easiness parameter lnL <- sum(r.p*theta)-sum(r.i*(-beta))-sum(log(1+outer(ksi,denom))) -lnL } jml <- function(theta) #ML for all other models { t1t2.list <- tapply(1:(dim(X01beta)[2]),mt_ind, function(xin) { #xb <- (t(X01beta)[xin,]) xb <- rbind(t(X01beta)[xin,]) #0/1 responses and beta parameters for one item beta.i <- c(0,xb[,dim(xb)[2]]) #item parameter with 0 #person responses (row-wise) on each category for current item if ((dim(xb)[1] > 1) && (length(xin == 1))) { x01.i <- as.matrix(xb[,1:(dim(xb)[2]-1)]) } else { x01.i <- rbind(xb[,1:(dim(xb)[2]-1)]) #0/1 matrix for item i without beta } cat0 <- rep(0,dim(x01.i)[2]) cat0[colSums(x01.i)==0] <- 1 #those with 0 on the 1-kth category get a 1 x01.i0 <- rbind(cat0,x01.i) #appending response vector for 0th category ind.h <- 0:(length(beta.i)-1) theta.h <- ind.h %*% t(theta) #n. categories times theta #!!!FIXME term1 <- (theta.h+beta.i)*x01.i0 #category-person matrix t1.i <- sum(colSums(term1)) #sum over categories and persons #print(t1.i) term2 <- exp(theta.h+beta.i) t2.i <- sum(log(colSums(term2))) #sum over categories and persons #print(t2.i) return(c(t1.i,t2.i)) }) termlist <- matrix(unlist(t1t2.list),ncol=2,byrow=TRUE) termlist <- termlist[!is.na(rowSums(termlist)),] st1st2 <- colSums(termlist, na.rm = TRUE) #sum term1, term2 lnL <- st1st2[1]-st1st2[2] -lnL } #==================== end ML routines ================================ #==================== call optimizer ================================= if (object$model == "RM") { fit <- nlm(jml.rasch,theta,hessian=se,iterlim=1000) } else { fit <- nlm(jml,theta,hessian=se,iterlim=1000) } #fit2 <- optim(theta,jml.rasch,method="BFGS",hessian=TRUE) #=================== end call optimizer ============================== loglik <- -fit$minimum niter <- fit$iterations thetapar <- fit$estimate if (se) { se <- sqrt(diag(solve(fit$hessian))) } else { se <- NA fit$hessian <- NA } list(loglik=loglik,niter=niter,thetapar=thetapar,se=se,hessian=fit$hessian) }) loglik <- NULL niter <- NULL npar <- NULL thetapar <- list(NULL) se.theta <- list(NULL) hessian <- list(NULL) for (i in 1:length(fitlist)) { loglik <- c(loglik,fitlist[[i]]$loglik) niter <- c(niter,fitlist[[i]]$niter) npar <- c(npar,length(fitlist[[i]]$thetapar)) thetapar[[i]] <- fitlist[[i]]$thetapar se.theta[[i]] <- fitlist[[i]]$se hessian[[i]] <- fitlist[[i]]$hessian } if (splineInt) { #cubic spline interpolation for missing, 0, full raw scores x <- rowSums(X,na.rm=TRUE) xlist <- split(x,gmemb) pred.list <- mapply(function(xx,yy) { y <- tapply(yy,xx, function(xy) {xy[1]}) x <- unique(sort(xx)) if ((length(x) > 3) || (length(y) > 3)) { #otherwise splinereg is not admissible fm1 <- interpSpline(x,y) pred.val <- predict(fm1, 0:sum(max.it)) } else { warning("Spline interpolation is not performed!\n Less than 4 different person parameters estimable!\n Perhaps in (NA) subgroup(s).") NULL }},xlist,thetapar,SIMPLIFY=FALSE) X.n <- object$X if (any(sapply(pred.list,is.null))) pred.list <- NULL #no spline interpolation applicable } names(thetapar) <- names(se.theta) <- paste("NAgroup",1:length(thetapar),sep="") #---------expand theta and se.theta, labeling ------------------- for (i in unique(gmemb)) { o.r <- rowSums(rbind(X.dummy[gmemb1==i,]), na.rm = TRUE) #orginal raw scores names(o.r) <- rownames(X.dummy)[gmemb1 == i] c.r <- rowSums(rbind(X[gmemb==i,]), na.rm = TRUE) #collapsed raw scores match.ind <- match(o.r, c.r) thetapar[[i]] <- thetapar[[i]][match.ind] #de-collapse theta's se.theta[[i]] <- se.theta[[i]][match.ind] #de-collapse se's names(thetapar[[i]]) <- names(se.theta[[i]]) <- names(o.r) } #--------------- end expand, labeling --------------------------- #---------------------- theta.table new ---------------------- thetavec <- unlist(thetapar) ind.orig <- as.vector(unlist(tapply(1:length(gmemb1), gmemb1, function(ind) {ind}))) theta.orig <- tapply(thetavec, ind.orig, function(ii) return(ii)) #original order theta parameter theta.table <- data.frame(theta.orig, gmemb1) colnames(theta.table) <- c("Person Parameter","NAgroup") rownames(theta.table) <- rownames(X.ex) result <- list(X = X.n, X01 = object$X01, X.ex = X.ex, W = object$W, model = object$model, loglik = loglik, loglik.cml = object$loglik, npar = npar, iter = niter, betapar = object$betapar, thetapar = thetapar, se.theta = se.theta, theta.table = theta.table, pred.list = pred.list, hessian = hessian, mpoints = mpoints, pers.ex = pers.exe, gmemb = gmemb1) class(result) <- "ppar" result } eRm/R/performance_plots.R0000744000176000001440000006030011572663323015103 0ustar ripleyusers## ---------------------------------------------------------------------------- ## plot method for objects of class 'performance' ## ---------------------------------------------------------------------------- .get.arglist <- function( fname, arglist ) { if (fname=='plot') return(.select.args(arglist, union(names(formals(plot.default)), names(par())))) else if (fname=='plot.xy') return(.select.args(arglist, union( names(formals(plot.xy)), names(par())))) else return( .select.prefix( arglist, fname) ) } .downsample <- function( perf, downsampling ) { for (i in 1:length(perf@alpha.values)) { if (downsampling < 1 && downsampling > 0) ind <- round(seq(1, length(perf@alpha.values[[i]]), length=(length(perf@alpha.values[[i]]) * downsampling))) else if (downsampling > 1) ind <- round(seq(1, length(perf@alpha.values[[i]]), length=downsampling)) else ind <- 1:length(perf@alpha.values[[i]]) perf@alpha.values[[i]] <- perf@alpha.values[[i]][ind] perf@x.values[[i]] <- perf@x.values[[i]][ind] perf@y.values[[i]] <- perf@y.values[[i]][ind] } return(perf) } .plot.performance <- function(perf, ..., avg="none", spread.estimate="none", spread.scale=1, show.spread.at=c(), colorize=FALSE, colorize.palette=rev(rainbow(256,start=0, end=4/6)), colorkey=colorize, colorkey.relwidth=0.25, colorkey.pos="right", print.cutoffs.at=c(), cutoff.label.function=function(x) { round(x,2) }, downsampling=0, add=FALSE) { arglist <- c(lapply( as.list(environment()), eval ), list(...) ) if (length(perf@y.values) != length(perf@x.values)) { stop("Performance object cannot be plotted.") } if (is.null(perf@alpha.values) && (colorize==TRUE || length(print.cutoffs.at)>0)) { stop(paste("Threshold coloring or labeling cannot be performed:", "performance object has no threshold information.")) } if ((avg=="vertical" || avg=="horizontal") && (colorize==TRUE || length(print.cutoffs.at)>0)) { stop(paste("Threshold coloring or labeling is only well-defined for", "'no' or 'threshold' averaging.")) } if (downsampling >0 ) perf <- .downsample( perf, downsampling) ## for infinite cutoff, assign maximal finite cutoff + mean difference ## between adjacent cutoff pairs if (length(perf@alpha.values)!=0) perf@alpha.values <- lapply(perf@alpha.values, function(x) { isfin <- is.finite(x); x[is.infinite(x)] <- (max(x[isfin]) + mean(abs(x[isfin][-1] - x[isfin][-length(x[isfin])]))); x } ) ## remove samples with x or y not finite for (i in 1:length(perf@x.values)) { ind.bool <- (is.finite(perf@x.values[[i]]) & is.finite(perf@y.values[[i]])) if (length(perf@alpha.values)>0) perf@alpha.values[[i]] <- perf@alpha.values[[i]][ind.bool] perf@x.values[[i]] <- perf@x.values[[i]][ind.bool] perf@y.values[[i]] <- perf@y.values[[i]][ind.bool] } arglist <- .sarg( arglist, perf=perf) if (add==FALSE) do.call( ".performance.plot.canvas", arglist ) if (avg=="none") do.call(".performance.plot.no.avg", arglist) else if (avg=="vertical") do.call(".performance.plot.vertical.avg", arglist) else if (avg=="horizontal") do.call(".performance.plot.horizontal.avg", arglist) else if (avg=="threshold") do.call(".performance.plot.threshold.avg", arglist) } ## --------------------------------------------------------------------------- ## initializing plots and plotting a canvas ## (can be skipped using 'plot( ..., add=TRUE)' ## --------------------------------------------------------------------------- .performance.plot.canvas <- function(perf, avg, ...) { arglist <- list(...) axis.names <- list(x=perf@x.name, y=perf@y.name) if (avg=="horizontal" || avg=="threshold") axis.names$x <- paste("Average", tolower(axis.names$x)) if (avg=="vertical" || avg=="threshold") axis.names$y <- paste("Average", tolower(axis.names$y)) arglist <- .farg(arglist, xlab=axis.names$x, ylab=axis.names$y) arglist <- .farg(arglist, xlim=c(min(unlist(perf@x.values)), max(unlist(perf@x.values))), ylim=c(min(unlist(perf@y.values)), max(unlist(perf@y.values)))) do.call("plot", .sarg(.slice.run(.get.arglist('plot', arglist)), x=0.5, y=0.5, type='n', axes=FALSE)) do.call( "axis", .sarg(.slice.run(.get.arglist('xaxis', arglist)), side=1)) do.call( "axis", .sarg(.slice.run(.get.arglist('yaxis', arglist)), side=2)) if (.garg(arglist,'colorkey')==TRUE) { colors <- rev( .garg(arglist,'colorize.palette') ) max.alpha <- max(unlist(perf@alpha.values)) min.alpha <- min(unlist(perf@alpha.values)) col.cutoffs <- rev(seq(min.alpha,max.alpha, length=length( colors ))) if ( .garg(arglist,'colorkey.pos')=="right") { ## axis drawing (ticks + labels) ## The interval [min.alpha,max.alpha] needs to be mapped onto ## the interval [min.y,max.y], rather than onto the interval ## [ylim[1],ylim[2]] ! In the latter case, NAs could occur in ## approxfun below, because axTicks can be out of the ylim-range ## ('yxaxs': 4%region) max.y <- max(axTicks(4)) min.y <- min(axTicks(4)) alpha.ticks <- .garg( arglist, c("coloraxis.at")) if (length(alpha.ticks)==0) alpha.ticks <- approxfun(c(min.y, max.y), c(min.alpha, max.alpha)) ( axTicks(4)) alpha2y <- approxfun(c(min(alpha.ticks), max(alpha.ticks)), c(min.y,max.y)) arglist <- .sarg(arglist, coloraxis.labels=.garg(arglist, 'cutoff.label.function')(alpha.ticks), coloraxis.at=alpha2y(alpha.ticks)) do.call("axis", .sarg(.slice.run(.get.arglist('coloraxis', arglist)), side=4)) ## draw colorkey ## each entry in display.bool corresponds to one rectangle of ## the colorkey. ## Only rectangles within the alpha.ticks range are plotted. ## y.lower, y.upper, and colors, are the attributes of the visible ## rectangles (those for which display.bool=TRUE) display.bool <- (col.cutoffs >= min(alpha.ticks) & col.cutoffs < max(alpha.ticks)) y.lower <- alpha2y( col.cutoffs )[display.bool] colors <- colors[display.bool] if (length(y.lower>=2)) { y.width <- y.lower[2] - y.lower[1] y.upper <- y.lower + y.width x.left <- .garg(arglist,'xlim')[2] + ((.garg(arglist,'xlim')[2] - .garg(arglist,'xlim')[1]) * (1-.garg(arglist,'colorkey.relwidth'))*0.04) x.right <- .garg(arglist,'xlim')[2] + (.garg(arglist,'xlim')[2] -.garg(arglist,'xlim')[1]) * 0.04 rect(x.left, y.lower, x.right, y.upper, col=colors, border=colors,xpd=NA) } } else if (.garg(arglist, 'colorkey.pos') == "top") { ## axis drawing (ticks + labels) max.x <- max(axTicks(3)) min.x <- min(axTicks(3)) alpha.ticks <- .garg( arglist, c("coloraxis.at")) if (length(alpha.ticks)==0) { alpha.ticks <- approxfun(c(min.x, max.x), c(min.alpha, max.alpha))(axTicks(3)) } alpha2x <- approxfun(c( min(alpha.ticks), max(alpha.ticks)), c( min.x, max.x)) arglist <- .sarg(arglist, coloraxis.labels=.garg(arglist, 'cutoff.label.function')(alpha.ticks), coloraxis.at= alpha2x(alpha.ticks)) do.call("axis", .sarg(.slice.run( .get.arglist('coloraxis', arglist)), side=3)) ## draw colorkey display.bool <- (col.cutoffs >= min(alpha.ticks) & col.cutoffs < max(alpha.ticks)) x.left <- alpha2x( col.cutoffs )[display.bool] colors <- colors[display.bool] if (length(x.left)>=2) { x.width <- x.left[2] - x.left[1] x.right <- x.left + x.width y.lower <- .garg(arglist,'ylim')[2] + (.garg(arglist,'ylim')[2] - .garg(arglist,'ylim')[1]) * (1-.garg(arglist,'colorkey.relwidth'))*0.04 y.upper <- .garg(arglist,'ylim')[2] + (.garg(arglist,'ylim')[2] - .garg(arglist,'ylim')[1]) * 0.04 rect(x.left, y.lower, x.right, y.upper, col=colors, border=colors, xpd=NA) } } } do.call( "box", .slice.run( .get.arglist( 'box', arglist))) } ## ---------------------------------------------------------------------------- ## plotting performance objects when no curve averaging is wanted ## ---------------------------------------------------------------------------- .performance.plot.no.avg <- function( perf, ... ) { arglist <- list(...) arglist <- .farg(arglist, type= 'l') if (.garg(arglist, 'colorize') == TRUE) { colors <- rev( .garg( arglist, 'colorize.palette') ) max.alpha <- max(unlist(perf@alpha.values)) min.alpha <- min(unlist(perf@alpha.values)) col.cutoffs <- rev(seq(min.alpha,max.alpha, length=length(colors)+1)) col.cutoffs <- col.cutoffs[2:length(col.cutoffs)] } for (i in 1:length(perf@x.values)) { if (.garg(arglist, 'colorize') == FALSE) { do.call("plot.xy", .sarg(.slice.run(.get.arglist('plot.xy', arglist), i), xy=(xy.coords(perf@x.values[[i]], perf@y.values[[i]])))) } else { for (j in 1:(length(perf@x.values[[i]])-1)) { segment.coloring <- colors[min(which(col.cutoffs <= perf@alpha.values[[i]][j]))] do.call("plot.xy", .sarg(.slice.run(.get.arglist('plot.xy', arglist), i), xy=(xy.coords(perf@x.values[[i]][j:(j+1)], perf@y.values[[i]][j:(j+1)])), col= segment.coloring)) } } print.cutoffs.at <- .garg(arglist, 'print.cutoffs.at',i) if (! is.null(print.cutoffs.at)) { text.x <- approxfun(perf@alpha.values[[i]], perf@x.values[[i]], rule=2, ties=mean)(print.cutoffs.at) text.y <- approxfun(perf@alpha.values[[i]], perf@y.values[[i]], rule=2, ties=mean)(print.cutoffs.at) do.call("points", .sarg(.slice.run(.get.arglist('points', arglist),i), x= text.x, y= text.y)) do.call("text", .farg(.slice.run( .get.arglist('text', arglist),i), x= text.x, y= text.y, labels=(.garg(arglist, 'cutoff.label.function', i)(print.cutoffs.at)))) } } } ## ---------------------------------------------------------------------------- ## plotting performance objects when vertical curve averaging is wanted ## ---------------------------------------------------------------------------- .performance.plot.vertical.avg <- function( perf, ...) { arglist <- list(...) arglist <- .farg(arglist, show.spread.at= (seq(min(unlist(perf@x.values)), max(unlist(perf@x.values)), length=11))) perf.avg <- perf x.values <- seq(min(unlist(perf@x.values)), max(unlist(perf@x.values)), length=max( sapply(perf@x.values, length))) for (i in 1:length(perf@y.values)) { perf.avg@y.values[[i]] <- approxfun(perf@x.values[[i]], perf@y.values[[i]], ties=mean, rule=2)(x.values) } perf.avg@y.values <- list(rowMeans( data.frame( perf.avg@y.values ))) perf.avg@x.values <- list(x.values) perf.avg@alpha.values <- list() ## y.values at show.spread.at (midpoint of error bars ) show.spread.at.y.values <- lapply(as.list(1:length(perf@x.values)), function(i) { approxfun(perf@x.values[[i]], perf@y.values[[i]], rule=2, ties=mean)( .garg(arglist, 'show.spread.at')) }) show.spread.at.y.values <- as.matrix(data.frame(show.spread.at.y.values )) colnames(show.spread.at.y.values) <- c() ## now, show.spread.at.y.values[i,] contains the curve y values at the ## sampling x value .garg(arglist,'show.spread.at')[i] if (.garg(arglist, 'spread.estimate') == "stddev" || .garg(arglist, 'spread.estimate') == "stderror") { bar.width <- apply(show.spread.at.y.values, 1, sd) if (.garg(arglist, 'spread.estimate') == "stderror") { bar.width <- bar.width / sqrt( ncol(show.spread.at.y.values) ) } bar.width <- .garg(arglist, 'spread.scale') * bar.width suppressWarnings(do.call("plotCI", .farg(.sarg(.get.arglist( 'plotCI', arglist), x=.garg(arglist, 'show.spread.at'), y=rowMeans( show.spread.at.y.values), uiw= bar.width, liw= bar.width, err= 'y', add= TRUE), gap= 0, type= 'n'))) } if (.garg(arglist, 'spread.estimate') == "boxplot") { do.call("boxplot", .farg(.sarg(.get.arglist( 'boxplot', arglist), x= data.frame(t(show.spread.at.y.values)), at= .garg(arglist, 'show.spread.at'), add= TRUE, axes= FALSE), boxwex= (1/(2*(length(.garg(arglist, 'show.spread.at'))))))) do.call("points", .sarg(.get.arglist( 'points', arglist), x= .garg(arglist, 'show.spread.at'), y= rowMeans(show.spread.at.y.values))) } do.call( ".plot.performance", .sarg(arglist, perf= perf.avg, avg= 'none', add= TRUE)) } ## ---------------------------------------------------------------------------- ## plotting performance objects when horizontal curve averaging is wanted ## ---------------------------------------------------------------------------- .performance.plot.horizontal.avg <- function( perf, ...) { arglist <- list(...) arglist <- .farg(arglist, show.spread.at= seq(min(unlist(perf@y.values)), max(unlist(perf@y.values)), length=11)) perf.avg <- perf y.values <- seq(min(unlist(perf@y.values)), max(unlist(perf@y.values)), length=max( sapply(perf@y.values, length))) for (i in 1:length(perf@x.values)) { perf.avg@x.values[[i]] <- approxfun(perf@y.values[[i]], perf@x.values[[i]], ties=mean, rule=2)(y.values) } perf.avg@x.values <- list(rowMeans( data.frame( perf.avg@x.values ))) perf.avg@y.values <- list(y.values) perf.avg@alpha.values <- list() ## x.values at show.spread.at (midpoint of error bars ) show.spread.at.x.values <- lapply(as.list(1:length(perf@y.values)), function(i) { approxfun(perf@y.values[[i]], perf@x.values[[i]], rule=2, ties=mean)(.garg(arglist,'show.spread.at')) } ) show.spread.at.x.values <- as.matrix(data.frame(show.spread.at.x.values)) colnames(show.spread.at.x.values) <- c() ## now, show.spread.at.x.values[i,] contains the curve x values at the ## sampling y value .garg(arglist,'show.spread.at')[i] if (.garg(arglist,'spread.estimate') == 'stddev' || .garg(arglist,'spread.estimate') == 'stderror') { bar.width <- apply(show.spread.at.x.values, 1, sd) if (.garg(arglist,'spread.estimate')== 'stderror') { bar.width <- bar.width / sqrt( ncol(show.spread.at.x.values) ) } bar.width <- .garg(arglist,'spread.scale') * bar.width suppressWarnings(do.call("plotCI", .farg(.sarg(.get.arglist( 'plotCI', arglist), x= rowMeans( show.spread.at.x.values), y= .garg(arglist, 'show.spread.at'), uiw= bar.width, liw= bar.width, err= 'x', add= TRUE), gap= 0, type= 'n'))) } if (.garg(arglist,'spread.estimate') == "boxplot") { do.call("boxplot", .farg(.sarg(.get.arglist( 'boxplot', arglist), x= data.frame(t(show.spread.at.x.values)), at= .garg(arglist,'show.spread.at'), add= TRUE, axes= FALSE, horizontal= TRUE), boxwex= 1/(2*(length(.garg(arglist,'show.spread.at')))))) do.call("points", .sarg(.get.arglist( 'points', arglist), x= rowMeans(show.spread.at.x.values), y= .garg(arglist,'show.spread.at'))) } do.call( ".plot.performance", .sarg(arglist, perf= perf.avg, avg= 'none', add= TRUE)) } ## ---------------------------------------------------------------------------- ## plotting performance objects when threshold curve averaging is wanted ## ---------------------------------------------------------------------------- .performance.plot.threshold.avg <- function( perf, ...) { arglist <- list(...) arglist <- .farg(arglist, show.spread.at= seq(min(unlist(perf@x.values)), max(unlist(perf@x.values)), length=11)) perf.sampled <- perf alpha.values <- rev(seq(min(unlist(perf@alpha.values)), max(unlist(perf@alpha.values)), length=max( sapply(perf@alpha.values, length)))) for (i in 1:length(perf.sampled@y.values)) { perf.sampled@x.values[[i]] <- approxfun(perf@alpha.values[[i]],perf@x.values[[i]], rule=2, ties=mean)(alpha.values) perf.sampled@y.values[[i]] <- approxfun(perf@alpha.values[[i]], perf@y.values[[i]], rule=2, ties=mean)(alpha.values) } ## compute average curve perf.avg <- perf.sampled perf.avg@x.values <- list( rowMeans( data.frame( perf.avg@x.values))) perf.avg@y.values <- list(rowMeans( data.frame( perf.avg@y.values))) perf.avg@alpha.values <- list( alpha.values ) x.values.spread <- lapply(as.list(1:length(perf@x.values)), function(i) { approxfun(perf@alpha.values[[i]], perf@x.values[[i]], rule=2, ties=mean)(.garg(arglist,'show.spread.at')) } ) x.values.spread <- as.matrix(data.frame( x.values.spread )) y.values.spread <- lapply(as.list(1:length(perf@y.values)), function(i) { approxfun(perf@alpha.values[[i]], perf@y.values[[i]], rule=2, ties=mean)(.garg(arglist,'show.spread.at')) } ) y.values.spread <- as.matrix(data.frame( y.values.spread )) if (.garg(arglist,'spread.estimate')=="stddev" || .garg(arglist,'spread.estimate')=="stderror") { x.bar.width <- apply(x.values.spread, 1, sd) y.bar.width <- apply(y.values.spread, 1, sd) if (.garg(arglist,'spread.estimate')=="stderror") { x.bar.width <- x.bar.width / sqrt( ncol(x.values.spread) ) y.bar.width <- y.bar.width / sqrt( ncol(x.values.spread) ) } x.bar.width <- .garg(arglist,'spread.scale') * x.bar.width y.bar.width <- .garg(arglist,'spread.scale') * y.bar.width suppressWarnings( do.call("plotCI", .farg(.sarg(.get.arglist( 'plotCI', arglist), x= rowMeans(x.values.spread), y= rowMeans(y.values.spread), uiw= x.bar.width, liw= x.bar.width, err= 'x', add= TRUE), gap= 0, type= 'n'))) suppressWarnings( do.call("plotCI", .farg(.sarg(.get.arglist( 'plotCI', arglist), x= rowMeans(x.values.spread), y= rowMeans(y.values.spread), uiw= y.bar.width, liw= y.bar.width, err= 'y', add= TRUE), gap= 0, type= 'n'))) } if (.garg(arglist,'spread.estimate')=="boxplot") { do.call("boxplot", .farg(.sarg(.get.arglist('boxplot', arglist), x= data.frame(t(x.values.spread)), at= rowMeans(y.values.spread), add= TRUE, axes= FALSE, horizontal= TRUE), boxwex= 1/(2*(length(.garg(arglist,'show.spread.at')))))) do.call("boxplot", .farg(.sarg(.get.arglist('boxplot', arglist), x= data.frame(t(y.values.spread)), at= rowMeans(x.values.spread), add= TRUE, axes= FALSE), boxwex= 1/(2*(length(.garg(arglist,'show.spread.at')))))) do.call("points", .sarg(.get.arglist('points', arglist), x= rowMeans(x.values.spread), y= rowMeans(y.values.spread))) } do.call( ".plot.performance", .sarg(arglist, perf= perf.avg, avg= 'none', add= TRUE)) } eRm/R/performance_measures.R0000744000176000001440000003740511572663323015600 0ustar ripleyusers## ------------------------------------------------------------------------ ## classical machine learning contingency table measures ## ------------------------------------------------------------------------ .performance.accuracy <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, (tn+tp) / length(predictions) ) } .performance.error.rate <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, (fn+fp) / length(predictions) ) } .performance.false.positive.rate <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, fp / n.neg ) } .performance.true.positive.rate <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, tp / n.pos ) } .performance.false.negative.rate <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, fn / n.pos ) } .performance.true.negative.rate <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, tn / n.neg ) } .performance.positive.predictive.value <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { ppv <- tp / (fp + tp) list( cutoffs, ppv ) } .performance.negative.predictive.value <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { npv <- tn / (tn + fn) list( cutoffs, npv ) } .performance.prediction.conditioned.fallout <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { ppv <- .performance.positive.predictive.value(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred)[[2]] list( cutoffs, 1 - ppv ) } .performance.prediction.conditioned.miss <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { npv <- .performance.negative.predictive.value(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred)[[2]] list( cutoffs, 1 - npv ) } ## ------------------------------------------------------------------------ ## ...not actually performance measures, but very useful as a second axis ## against which to plot a "real" performance measure ## (popular example: lift charts) ## ------------------------------------------------------------------------ .performance.rate.of.positive.predictions <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, n.pos.pred / (n.pos + n.neg) ) } .performance.rate.of.negative.predictions <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, n.neg.pred / (n.pos + n.neg) ) } ## ------------------------------------------------------------------------ ## Classical statistical contingency table measures ## ------------------------------------------------------------------------ .performance.phi <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list(cutoffs, (tn*tp - fn*fp) / sqrt(n.pos * n.neg * n.pos.pred * n.neg.pred) ) } .performance.mutual.information <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { n.samples <- n.pos + n.neg mi <- c() for (k in 1:length(cutoffs)) { kij <- rbind( c(tn[k],fn[k]), c(fp[k],tp[k]) ) ki.j. <- rbind(c(n.neg * n.neg.pred[k], n.neg.pred[k] * n.pos), c(n.neg * n.pos.pred[k], n.pos * n.pos.pred[k])) log.matrix <- log2( kij / ki.j.) log.matrix[kij/ki.j.==0] <- 0 mi <- c(mi, log2(n.samples) + sum( kij * log.matrix) / n.samples ) } list( cutoffs, mi ) } .performance.chisq <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { chisq <- c() for (i in 1:length(cutoffs)) { A <- rbind( c( tn[i], fn[i]), c(fp[i], tp[i]) ) chisq <- c(chisq, chisq.test(A,correct=F)$statistic ) } list( cutoffs, chisq ) } .performance.odds.ratio <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { list( cutoffs, tp * tn / (fn * fp) ) } ## ------------------------------------------------------------------------ ## Other measures based on contingency tables ## ------------------------------------------------------------------------ .performance.lift <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { n.samples <- n.pos + n.neg list( cutoffs, (tp / n.pos) / (n.pos.pred / n.samples) ) } .performance.f <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred, alpha) { prec <- .performance.positive.predictive.value(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred)[[2]] list( cutoffs, 1/ ( alpha*(1/prec) + (1-alpha)*(1/(tp/n.pos)) ) ) } .performance.rocconvexhull <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { x <- fp / n.neg y <- tp / n.pos finite.bool <- is.finite(x) & is.finite(y) x <- x[ finite.bool ] y <- y[ finite.bool ] if (length(x) < 2) { stop("Not enough distinct predictions to compute ROC convex hull.") } ## keep only points on the convex hull ind <- chull(x, y) x.ch <- x[ind] y.ch <- y[ind] ## keep only convex hull points above the diagonal, except (0,0) ## and (1,1) ind.upper.triangle <- x.ch < y.ch x.ch <- c(0, x.ch[ind.upper.triangle], 1) y.ch <- c(0, y.ch[ind.upper.triangle], 1) ## sort remaining points by ascending x value ind <- order(x.ch) x.ch <- x.ch[ind] y.ch <- y.ch[ind] list( x.ch, y.ch ) } ## ---------------------------------------------------------------------------- ## Cutoff-independent measures ## ---------------------------------------------------------------------------- .performance.auc <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred, fpr.stop) { x <- fp / n.neg y <- tp / n.pos finite.bool <- is.finite(x) & is.finite(y) x <- x[ finite.bool ] y <- y[ finite.bool ] if (length(x) < 2) { stop(paste("Not enough distinct predictions to compute area", "under the ROC curve.")) } if (fpr.stop < 1) { ind <- max(which( x <= fpr.stop )) tpr.stop <- approxfun( x[ind:(ind+1)], y[ind:(ind+1)] )(fpr.stop) x <- c(x[1:ind], fpr.stop) y <- c(y[1:ind], tpr.stop) } ans <- list() auc <- 0 for (i in 2:length(x)) { auc <- auc + 0.5 * (x[i] - x[i-1]) * (y[i] + y[i-1]) } ans <- list( c(), auc) names(ans) <- c("x.values","y.values") return(ans) } .performance.precision.recall.break.even.point <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { pred <- prediction( predictions, labels) perf <- performance( pred, measure="prec", x.measure="rec") x <- rev(perf@x.values[[1]]) y <- rev(perf@y.values[[1]]) alpha <- rev(perf@alpha.values[[1]]) finite.bool <- is.finite(alpha) & is.finite(x) & is.finite(y) x <- x[ finite.bool ] y <- y[ finite.bool ] alpha <- alpha[ finite.bool ] if (length(x) < 2) { stop(paste("Not enough distinct predictions to compute", "precision/recall intersections.")) } intersection.cutoff <- c() intersection.pr <- c() ## find all intersection points by looking at all intervals (i,i+1): ## if the difference function between x and y has different signs at the ## interval boundaries, then an intersection point is in the interval; ## compute as the root of the difference function if ( (x[1]-y[1]) == 0) { intersection.cutoff <- c( alpha[1] ) intersection.pr <- c( x[1] ) } for (i in (1:(length(alpha)-1))) { if ((x[i+1]-y[i+1]) == 0) { intersection.cutoff <- c( intersection.cutoff, alpha[i+1] ) intersection.pr <- c( intersection.pr, x[i+1] ) } else if ((x[i]-y[i])*(x[i+1]-y[i+1]) < 0 ) { ans <- uniroot(approxfun(c(alpha[i], alpha[i+1] ), c(x[i]-y[i], x[i+1]-y[i+1])), c(alpha[i],alpha[i+1])) intersection.cutoff <- c(intersection.cutoff, ans$root) intersection.pr <- c(intersection.pr, ans$f.root) } } list( rev(intersection.cutoff), rev(intersection.pr) ) } .performance.calibration.error <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred, window.size) { if (window.size > length(predictions)) { stop("Window size exceeds number of predictions.") } if (min(predictions)<0 || max(predictions)>1) { stop("Calibration error needs predictions between 0 and 1") } pos.label <- levels(labels)[2] neg.label <- levels(labels)[1] ordering <- rev(order( predictions )) predictions <- predictions[ ordering ] labels <- labels[ ordering ] median.cutoffs <- c() calibration.errors <- c() for (left.index in 1 : (length(predictions) - window.size+1) ) { right.index <- left.index + window.size - 1 pos.fraction <- sum(labels[left.index : right.index] == pos.label) / window.size mean.prediction <- mean( predictions[ left.index : right.index ] ) calibration.errors <- c(calibration.errors, abs(pos.fraction - mean.prediction)) median.cutoffs <- c(median.cutoffs, median(predictions[left.index:right.index])) } list( median.cutoffs, calibration.errors ) } .performance.mean.cross.entropy <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { if (! all(levels(labels)==c(0,1)) || any(predictions<0) || any(predictions>1) ) { stop(paste("Class labels need to be 0 and 1 and predictions between", "0 and 1 for mean cross entropy.")) } pos.label <- levels(labels)[2] neg.label <- levels(labels)[1] list( c(), - 1/length(predictions) * (sum( log( predictions[which(labels==pos.label)] )) + sum( log( 1 - predictions[which(labels==neg.label)] ))) ) } .performance.root.mean.squared.error <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { ## convert labels from factor to numeric values labels <- as.numeric(levels(labels))[labels] if (any(is.na(labels))) { stop("For rmse predictions have to be numeric.") } list( c(), sqrt( 1/length(predictions) * sum( (predictions - labels)^2 )) ) } ## ---------------------------------------------------------------------------- ## Derived measures: ## ---------------------------------------------------------------------------- .performance.sar <- function( predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { pred <- prediction( predictions, labels) perf.acc <- performance( pred, measure="acc") perf.rmse <- performance( pred, measure="rmse") perf.auc <- performance( pred, measure="auc") list(cutoffs, 1/3 * (perf.acc@y.values[[1]] + (1 - perf.rmse@y.values[[1]]) + perf.auc@y.values[[1]])) } ## ---------------------------------------------------------------------------- ## Measures taking into account actual cost considerations ## ---------------------------------------------------------------------------- .performance.expected.cost <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred) { ## kick out suboptimal values (i.e. fpr/tpr pair for which another one ## with same fpr and higher tpr exists, ## or one for which one with same tpr but lower fpr exists if (n.neg==0 || n.pos==0) { stop(paste("At least one positive and one negative sample are", "needed to compute a cost curve.")) } fpr <- fp / n.neg tpr <- tp / n.pos ## sort by fpr (ascending), in case of ties by descending tpr ind <- order(fpr,-tpr) fpr <- fpr[ind] tpr <- tpr[ind] ## for tied fprs, only the one with the highest tpr is kept ind <- !duplicated(fpr) fpr <- fpr[ind] tpr <- tpr[ind] ## for tied tprs, only keep the one with the lowest fpr ind <- order(-tpr,fpr) fpr <- fpr[ind] tpr <- tpr[ind] ind <- !duplicated(tpr) fpr <- fpr[ind] tpr <- tpr[ind] if (!any(0==fpr & 0==tpr)) { fpr <- c(0,fpr) tpr <- c(0,tpr) } if (!any(1==fpr & 1==tpr)) { fpr <- c(fpr,1) tpr <- c(tpr,1) } ## compute all functions f <- list() for (i in 1:length(fpr)) { f <- c(f, .construct.linefunct( 0, fpr[i], 1, 1-tpr[i] )) } ## compute all intersection points x.values <- c() y.values <- c() for (i in 1:(length(fpr)-1)) { for (j in (i+1):length(fpr)) { ans <- .intersection.point( f[[i]], f[[j]] ) if (all(is.finite(ans))) { y.values.at.current.x <- c() for (k in 1:length(f)) { y.values.at.current.x <- c(y.values.at.current.x, f[[k]](ans[1])) } if (abs(ans[2] - min(y.values.at.current.x )) < sqrt(.Machine$double.eps)) { x.values <- c(x.values, ans[1]) y.values <- c(y.values, ans[2]) } } } } if (!any(0==x.values & 0==y.values)) { x.values <- c(0,x.values) y.values <- c(0,y.values) } if (!any(1==x.values & 0==y.values)) { x.values <- c(x.values,1) y.values <- c(y.values,0) } ind <- order( x.values) list( x.values[ind], y.values[ind] ) } .performance.cost <- function(predictions, labels, cutoffs, fp, tp, fn, tn, n.pos, n.neg, n.pos.pred, n.neg.pred, cost.fp, cost.fn) { n.samples <- n.pos + n.neg cost <- ((n.pos / n.samples) * (fn / n.pos) * cost.fn + (n.neg / n.samples) * (fp / n.neg) * cost.fp) list( cutoffs, cost ) } eRm/R/performance.R0000744000176000001440000003474511572663323013700 0ustar ripleyusersperformance <- function(prediction.obj, measure, x.measure="cutoff", ...) { ## define the needed environments envir.list <- .define.environments() long.unit.names <- envir.list$long.unit.names function.names <- envir.list$function.names obligatory.x.axis <- envir.list$obligatory.x.axis optional.arguments <- envir.list$optional.arguments default.values <- envir.list$default.values ## abort in case of misuse if (class(prediction.obj) != 'prediction' || !exists(measure, where=long.unit.names, inherits=FALSE) || !exists(x.measure, where=long.unit.names, inherits=FALSE)) { stop(paste("Wrong argument types: First argument must be of type", "'prediction'; second and optional third argument must", "be available performance measures!")) } ## abort, if attempt is made to use a measure that has an obligatory ## x.axis as the x.measure (cannot be combined) if (exists( x.measure, where=obligatory.x.axis, inherits=FALSE )) { message <- paste("The performance measure", x.measure, "can only be used as 'measure', because it has", "the following obligatory 'x.measure':\n", get( x.measure, envir=obligatory.x.axis)) stop(message) } ## if measure is a performance measure with obligatory x.axis, then ## enforce this axis: if (exists( measure, where=obligatory.x.axis, inherits=FALSE )) { x.measure <- get( measure, envir=obligatory.x.axis ) } if (x.measure == "cutoff" || exists( measure, where=obligatory.x.axis, inherits=FALSE )) { ## fetch from '...' any optional arguments for the performance ## measure at hand that are given, otherwise fill up the default values optional.args <- list(...) argnames <- c() if ( exists( measure, where=optional.arguments, inherits=FALSE )) { argnames <- get( measure, envir=optional.arguments ) default.arglist <- list() for (i in 1:length(argnames)) { default.arglist <- c(default.arglist, get(paste(measure,":",argnames[i],sep=""), envir=default.values, inherits=FALSE)) } names(default.arglist) <- argnames for (i in 1:length(argnames)) { templist <- list(optional.args, default.arglist[[i]]) names(templist) <- c('arglist', argnames[i]) optional.args <- do.call('.farg', templist) } } optional.args <- .select.args( optional.args, argnames ) ## determine function name function.name <- get( measure, envir=function.names ) ## for each x-validation run, compute the requested performance measure x.values <- list() y.values <- list() for (i in 1:length( prediction.obj@predictions )) { argumentlist <- .sarg(optional.args, predictions= prediction.obj@predictions[[i]], labels= prediction.obj@labels[[i]], cutoffs= prediction.obj@cutoffs[[i]], fp= prediction.obj@fp[[i]], tp= prediction.obj@tp[[i]], fn= prediction.obj@fn[[i]], tn= prediction.obj@tn[[i]], n.pos= prediction.obj@n.pos[[i]], n.neg= prediction.obj@n.neg[[i]], n.pos.pred= prediction.obj@n.pos.pred[[i]], n.neg.pred= prediction.obj@n.neg.pred[[i]]) ans <- do.call( function.name, argumentlist ) if (!is.null(ans[[1]])) x.values <- c( x.values, list( ans[[1]] )) y.values <- c( y.values, list( ans[[2]] )) } if (! (length(x.values)==0 || length(x.values)==length(y.values)) ) { stop("Consistency error.") } ## create a new performance object return( new("performance", x.name = get( x.measure, envir=long.unit.names ), y.name = get( measure, envir=long.unit.names ), alpha.name = "none", x.values = x.values, y.values = y.values, alpha.values = list() )) } else { perf.obj.1 <- performance( prediction.obj, measure=x.measure, ... ) perf.obj.2 <- performance( prediction.obj, measure=measure, ... ) return( .combine.performance.objects( perf.obj.1, perf.obj.2 ) ) } } .combine.performance.objects <- function( p.obj.1, p.obj.2 ) { ## some checks for misusage (in any way, this function is ## only for internal use) if ( p.obj.1@x.name != p.obj.2@x.name ) { stop("Error: Objects need to have identical x axis.") } if ( p.obj.1@alpha.name != "none" || p.obj.2@alpha.name != "none") { stop("Error: At least one of the two objects has already been merged.") } if (length(p.obj.1@x.values) != length(p.obj.2@x.values)) { stop(paste("Only performance objects with identical number of", "cross-validation runs can be combined.")) } x.values <- list() x.name <- p.obj.1@y.name y.values <- list() y.name <- p.obj.2@y.name alpha.values <- list() alpha.name <- p.obj.1@x.name for (i in 1:length( p.obj.1@x.values )) { x.values.1 <- p.obj.1@x.values[[i]] y.values.1 <- p.obj.1@y.values[[i]] x.values.2 <- p.obj.2@x.values[[i]] y.values.2 <- p.obj.2@y.values[[i]] ## cutoffs of combined object = merged cutoffs of simple objects cutoffs <- sort( unique( c(x.values.1, x.values.2)), decreasing=TRUE ) ## calculate y.values at cutoffs using step function y.values.int.1 <- approxfun(x.values.1, y.values.1, method="constant",f=1,rule=2)(cutoffs) y.values.int.2 <- approxfun(x.values.2, y.values.2, method="constant",f=1,rule=2)(cutoffs) ## 'approxfun' ignores NA and NaN objs <- list( y.values.int.1, y.values.int.2) objs.x <- list( x.values.1, x.values.2 ) na.cutoffs.1.bool <- is.na( y.values.1) & !is.nan( y.values.1 ) nan.cutoffs.1.bool <- is.nan( y.values.1) na.cutoffs.2.bool <- is.na( y.values.2) & !is.nan( y.values.2 ) nan.cutoffs.2.bool <- is.nan( y.values.2) bools <- list(na.cutoffs.1.bool, nan.cutoffs.1.bool, na.cutoffs.2.bool, nan.cutoffs.2.bool) values <- c(NA,NaN,NA,NaN) for (j in 1:4) { for (k in which(bools[[j]])) { interval.max <- objs.x[[ ceiling(j/2) ]][k] interval.min <- -Inf if (k < length(objs.x[[ ceiling(j/2) ]])) { interval.min <- objs.x[[ ceiling(j/2) ]][k+1] } objs[[ ceiling(j/2) ]][cutoffs <= interval.max & cutoffs > interval.min ] <- values[j] } } alpha.values <- c(alpha.values, list(cutoffs)) x.values <- c(x.values, list(objs[[1]])) y.values <- c(y.values, list(objs[[2]])) } return( new("performance", x.name=x.name, y.name=y.name, alpha.name=alpha.name, x.values=x.values, y.values=y.values, alpha.values=alpha.values)) } .define.environments <- function() { ## There are five environments: long.unit.names, function.names, ## obligatory.x.axis, optional.arguments, default.values ## Define long names corresponding to the measure abbreviations. long.unit.names <- new.env() assign("none","None", envir=long.unit.names) assign("cutoff", "Cutoff", envir=long.unit.names) assign("acc", "Accuracy", envir=long.unit.names) assign("err", "Error Rate", envir=long.unit.names) assign("fpr", "False positive rate", envir=long.unit.names) assign("tpr", "True positive rate", envir=long.unit.names) assign("rec", "Recall", envir=long.unit.names) assign("sens", "Sensitivity", envir=long.unit.names) assign("fnr", "False negative rate", envir=long.unit.names) assign("tnr", "True negative rate", envir=long.unit.names) assign("spec", "Specificity", envir=long.unit.names) assign("ppv", "Positive predictive value", envir=long.unit.names) assign("prec", "Precision", envir=long.unit.names) assign("npv", "Negative predictive value", envir=long.unit.names) assign("fall", "Fallout", envir=long.unit.names) assign("miss", "Miss", envir=long.unit.names) assign("pcfall", "Prediction-conditioned fallout", envir=long.unit.names) assign("pcmiss", "Prediction-conditioned miss", envir=long.unit.names) assign("rpp", "Rate of positive predictions", envir=long.unit.names) assign("rnp", "Rate of negative predictions", envir=long.unit.names) assign("auc","Area under the ROC curve", envir=long.unit.names) assign("cal", "Calibration error", envir=long.unit.names) assign("mwp", "Median window position", envir=long.unit.names) assign("prbe","Precision/recall break-even point", envir=long.unit.names) assign("rch", "ROC convex hull", envir=long.unit.names) assign("mxe", "Mean cross-entropy", envir=long.unit.names) assign("rmse","Root-mean-square error", envir=long.unit.names) assign("phi", "Phi correlation coefficient", envir=long.unit.names) assign("mat","Matthews correlation coefficient", envir=long.unit.names) assign("mi", "Mutual information", envir=long.unit.names) assign("chisq", "Chi-square test statistic", envir=long.unit.names) assign("odds","Odds ratio", envir=long.unit.names) assign("lift", "Lift value", envir=long.unit.names) assign("f","Precision-Recall F measure", envir=long.unit.names) assign("sar", "SAR", envir=long.unit.names) assign("ecost", "Expected cost", envir=long.unit.names) assign("cost", "Explicit cost", envir=long.unit.names) ## Define function names corresponding to the measure abbreviations. function.names <- new.env() assign("acc", ".performance.accuracy", envir=function.names) assign("err", ".performance.error.rate", envir=function.names) assign("fpr", ".performance.false.positive.rate", envir=function.names) assign("tpr", ".performance.true.positive.rate", envir=function.names) assign("rec", ".performance.true.positive.rate", envir=function.names) assign("sens", ".performance.true.positive.rate", envir=function.names) assign("fnr", ".performance.false.negative.rate", envir=function.names) assign("tnr", ".performance.true.negative.rate", envir=function.names) assign("spec", ".performance.true.negative.rate", envir=function.names) assign("ppv", ".performance.positive.predictive.value", envir=function.names) assign("prec", ".performance.positive.predictive.value", envir=function.names) assign("npv", ".performance.negative.predictive.value", envir=function.names) assign("fall", ".performance.false.positive.rate", envir=function.names) assign("miss", ".performance.false.negative.rate", envir=function.names) assign("pcfall", ".performance.prediction.conditioned.fallout", envir=function.names) assign("pcmiss", ".performance.prediction.conditioned.miss", envir=function.names) assign("rpp", ".performance.rate.of.positive.predictions", envir=function.names) assign("rnp", ".performance.rate.of.negative.predictions", envir=function.names) assign("auc", ".performance.auc", envir=function.names) assign("cal", ".performance.calibration.error", envir=function.names) assign("prbe", ".performance.precision.recall.break.even.point", envir=function.names) assign("rch", ".performance.rocconvexhull", envir=function.names) assign("mxe", ".performance.mean.cross.entropy", envir=function.names) assign("rmse", ".performance.root.mean.squared.error", envir=function.names) assign("phi", ".performance.phi", envir=function.names) assign("mat", ".performance.phi", envir=function.names) assign("mi", ".performance.mutual.information", envir=function.names) assign("chisq", ".performance.chisq", envir=function.names) assign("odds", ".performance.odds.ratio", envir=function.names) assign("lift", ".performance.lift", envir=function.names) assign("f", ".performance.f", envir=function.names) assign("sar", ".performance.sar", envir=function.names) assign("ecost", ".performance.expected.cost", envir=function.names) assign("cost", ".performance.cost", envir=function.names) ## If a measure comes along with an obligatory x axis (including "none"), ## list it here. obligatory.x.axis <- new.env() assign("mxe", "none", envir=obligatory.x.axis) assign("rmse", "none", envir=obligatory.x.axis) assign("prbe", "none", envir=obligatory.x.axis) assign("auc", "none", envir=obligatory.x.axis) assign("rch","none", envir=obligatory.x.axis) ## ecost requires probability cost function as x axis, which is handled ## implicitly, not as an explicit performance measure. assign("ecost","none", envir=obligatory.x.axis) ## If a measure has optional arguments, list the names of the ## arguments here. optional.arguments <- new.env() assign("cal", "window.size", envir=optional.arguments) assign("f", "alpha", envir=optional.arguments) assign("cost", c("cost.fp", "cost.fn"), envir=optional.arguments) assign("auc", "fpr.stop", envir=optional.arguments) ## If a measure has additional arguments, list the default values ## for them here. Naming convention: e.g. "cal" has an optional ## argument "window.size" the key to use here is "cal:window.size" ## (colon as separator) default.values <- new.env() assign("cal:window.size", 100, envir=default.values) assign("f:alpha", 0.5, envir=default.values) assign("cost:cost.fp", 1, envir=default.values) assign("cost:cost.fn", 1, envir=default.values) assign("auc:fpr.stop", 1, envir=default.values) list(long.unit.names=long.unit.names, function.names=function.names, obligatory.x.axis=obligatory.x.axis, optional.arguments=optional.arguments, default.values=default.values) } eRm/R/PCM.R0000744000176000001440000000352111572663323012002 0ustar ripleyusers`PCM` <- function(X, W, se = TRUE, sum0 = TRUE, etaStart) { #...X: person*item scores matrix (starting from 0) #-------------------main programm------------------- call<-match.call() model <- "PCM" groupvec <- 1 mpoints <- 1 if (missing(W)) W <- NA else W <- as.matrix(W) if (missing(etaStart)) etaStart <- NA else etaStart <- as.vector(etaStart) XWcheck <- datcheck(X,W,mpoints,groupvec,model) #inital check of X and W X <- XWcheck$X lres <- likLR(X,W,mpoints,groupvec,model,st.err=se,sum0,etaStart) parest <- lres$parest #full groups for parameter estimation loglik <- -parest$minimum #log-likelihood value iter <- parest$iterations #number of iterations convergence <- parest$code etapar <- parest$estimate #eta estimates betapar <- as.vector(lres$W%*% etapar) #beta estimates if (se) { se.eta <- sqrt(diag(solve(parest$hessian))) #standard errors se.beta <- sqrt(diag(lres$W%*%solve(parest$hessian)%*%t(lres$W))) #se beta } else { se.eta <- rep(NA,length(etapar)) se.beta <- rep(NA,length(betapar)) } X01 <- lres$X01 labs <- labeling.internal(model,X,X01,lres$W,etapar,betapar,mpoints,max(groupvec)) #labeling for L-models W <- labs$W etapar <- labs$etapar betapar <- labs$betapar etapar <- -etapar # output difficulty rh 25-03-2010 npar <- dim(lres$W)[2] #number of parameters result <- list(X=X,X01=X01,model=model,loglik=loglik,npar=npar,iter=iter,convergence=convergence, etapar=etapar,se.eta=se.eta,hessian=parest$hessian,betapar=betapar, se.beta=se.beta,W=W,call=call) class(result) <- c("Rm","eRm") #classes: simple RM and extended RM result } eRm/R/NPtest.R0000744000176000001440000003750211572663323012606 0ustar ripleyusersNPtest<-function(obj, n=NULL, method="T1", ...){ if (is.matrix(obj) || is.data.frame(obj)){ # input is datamatrix - RaschSampler object is generated if (!all(obj %in% 0:1)) stop("Data matrix must be binary, NAs not allowed") itscor<-colSums(obj) # rh 2011-03-03 itcol<-(itscor==0|itscor==nrow(obj)) if (any(itcol)){ cat("The following columns in the data show complete 0/full responses: \n") cat((1:ncol(obj))[itcol],sep=", ") cat("\n") stop("NPtest using these items is meaningless. Delete them first!") } if (is.null(n)) n <- 500 obj<-rsampler(obj,rsctrl(burn_in=256, n_eff=n, step=32)) } switch(method, "T1"=T1(obj), "T2"=T2(obj, ...), "T4"=T4(obj, ...), "T7"=T7(obj, ...), "T7a"=T7a(obj, ...), "T10"=T10(obj, ...), "T11"=T11(obj), "MLoef"=MLoef.x(obj, ...) ############################################### ) } MLoef.x<-function(rsobj, splitcr=NULL){ # user function MLexact<-function(X,splitcr){ rmod<-RM(X) LR<-MLoef(rmod,splitcr)$LR LR } #if(!exists("splitcr")) splitcr="median" if(is.null(splitcr)) splitcr="median" res <- rstats(rsextrobj(rsobj, 2), MLexact, splitcr) rmod<-RM(rsextrmat(rsobj,1)) # MLoef for original data MLres<-MLoef(rmod,splitcr) class(MLres)<-c(class(MLres),"MLx") # for printing without blank line res1<-MLres$LR n_eff<-rsobj$n_eff # number of simulated matrices res<-unlist(res) prop<-sum((res[1:n_eff]>=res1)/n_eff) result<-list(MLres=MLres, n_eff=n_eff, prop=prop, MLoefvec=res) # MLobj class(result)<-"MLobj" result } T1<-function(rsobj){ T1stat<-function(x){ # calculates statistic T1 unlist(lapply(1:(k-1),function(i) lapply((i+1):k, function(j) sum(x[,i]==x[,j])))) } n_eff<-rsobj$n_eff # number of simulated matrices n_tot<-rsobj$n_tot # number of simulated matrices k<-rsobj$k # number of columns of matrices res<-rstats(rsobj,T1stat) # calculates statistic for each matrix res<-do.call(cbind, lapply(res,as.vector)) # converts result list to matrix T1vec<-apply(res, 1, function(x) sum(x[2:(n_tot)]>=x[1])/n_eff) T1mat<-matrix(,k,k) T1mat[lower.tri(T1mat)] <- T1vec # lower triangular matrix of p-values result<-list(n_eff=n_eff, prop=T1vec, T1mat=T1mat) # T1obj class(result)<-"T1obj" result } T2<-function(rsobj,idx=NULL,stat="var"){ T2.Var.stat<-function(x){ # calculates statistic T2 var(rowSums(x[,idx, drop=FALSE])) } T2.MAD1.stat<-function(x){ # calculates statistic T2 y<-rowSums(x[,idx, drop=FALSE]) # mean absolute deviation mean(abs(y-mean(y))) } T2.MAD2.stat<-function(x){ # calculates statistic T2 mad(rowSums(x[,idx, drop=FALSE]),constant=1) # unscaled median absolute deviation } T2.Range.stat<-function(x){ # calculates statistic T2 diff(range(rowSums(x[,idx, drop=FALSE]))) } n<-rsobj$n n_eff<-rsobj$n_eff k<-rsobj$k # number of columns of matrices if(is.null(idx)) stop("No item(s) for subscale specified (use idx!)") res<-switch(stat, "var"=rstats(rsobj,T2.Var.stat), "mad1"=rstats(rsobj,T2.MAD1.stat), "mad2"=rstats(rsobj,T2.MAD2.stat), "range"=rstats(rsobj,T2.Range.stat), stop("stat must be one of \"var\", \"mad1\", \"mad2\", \"range\"") ) res<-unlist(res) prop<-sum(res[2:(n_eff+1)]>=res[1])/n_eff result<-list(n_eff=n_eff, prop=prop, idx=idx, stat=stat, T2vec=res) # T2obj class(result)<-"T2obj" result } T4<-function(rsobj,idx=NULL,group=NULL,alternative="high"){ T4.stat<-function(x){ # calculates statistic T4 sign*sum(rowSums(x[gr,idx,drop=FALSE])) } n_eff<-rsobj$n_eff # number of simulated matrices n_tot<-rsobj$n_tot # number of all matrices k<-rsobj$k # number of items if(is.null(idx)) stop("No item(s) for subscale specified (use idx!)") if(length(idx)==k) # rh 2011-03-03 stop("Subscale containing all items gives meaningless results for T4.") if(is.null(group)) stop("No group specified (use group!)") if(!is.logical(group)) # added rh 2011-03-03 stop("group must be of type \"logical\" (e.g., group = (age==1) )") if(alternative=="high") sign <- 1 else if(alternative=="low") sign <- -1 else stop("alternative incorrectly specified! (use either \"high\" or \"low\")") gr<-as.logical(group) # group definition (logical) res<-rstats(rsobj,T4.stat) res<-unlist(res) prop<-sum(res[2:(n_tot)]>=res[1])/n_eff gr.nam <- deparse(substitute(group)) gr.n <- sum(group) result<-list(n_eff=n_eff, prop=prop, idx=idx, gr.nam=gr.nam, gr.n=gr.n, T4vec=res, alternative=alternative) # T4obj class(result)<-"T4obj" result } T7<-function(rsobj,idx=NULL){ T7.stat<-function(x){ # calculates statistic T7 calcT7<-function(i,j){ # calculates sum for all items in subscale if(sitscor[i]>sitscor[j]){ sum(submat[,j]>submat[,i]) # # t<-table(submat[,i],submat[,j]) # odds ratio gives the same result # OR<-t[1]*t[4]/(t[2]*t[3]) # 1/OR } else NA } submat<-x[,idx] submat<-submat[,order(itscor,decreasing=TRUE)] RET<-unlist(lapply(1:(m-1), function(i) lapply((i+1):m, function(j) calcT7(i,j)))) RET } n_eff<-rsobj$n_eff # number of simulated matrices n_tot<-rsobj$n_tot # number of all matrices k<-rsobj$k # number of items if(is.null(idx)) stop("No items for subscale specified (use idx!)") else if (length(idx)<2) stop("At least 2 items have to be specified with idx!") submat<-rsextrmat(rsobj,1)[,idx] itscor<-colSums(submat) names(itscor)<-colnames(submat)<-idx submat<-submat[,order(itscor,decreasing=TRUE)] sitscor<-sort(itscor,decreasing=TRUE) # sorted itemscore m<-length(itscor) resList<-rstats(rsobj,T7.stat) res<-sapply(resList,sum,na.rm=TRUE) prop<-sum(res[2:(n_eff+1)]>=res[1])/n_eff result<-list(n_eff=n_eff, prop=prop, itscor=itscor, T7vec=res) # T7obj class(result)<-"T7obj" result } T7a<-function(rsobj,idx=NULL){ T7a.stat<-function(x){ # calculates statistic T7a calcT7a<-function(i,j){ # calculates sum for single Itempair if(sitscor[i]>sitscor[j]){ sum(submat[,j]>submat[,i]) # # t<-table(submat[,i],submat[,j]) # odds ratio gives the same result # OR<-t[1]*t[4]/(t[2]*t[3]) # 1/OR } else NA } submat<-x[,idx] submat<-submat[,order(itscor,decreasing=TRUE)] RET<-unlist(lapply(1:(m-1), function(i) lapply((i+1):m, function(j) calcT7a(i,j)))) RET } n_eff<-rsobj$n_eff # number of simulated matrices n_tot<-rsobj$n_tot # number of all matrices k<-rsobj$k # number of items if(is.null(idx)) stop("No items for subscale specified (use idx!)") else if (length(idx)<2) stop("At least 2 items have to be specified with idx!") submat<-rsextrmat(rsobj,1)[,idx] itscor<-colSums(submat) names(itscor)<-colnames(submat)<-idx submat<-submat[,order(itscor,decreasing=TRUE)] sitscor<-sort(itscor,decreasing=TRUE) # sorted itemscore m<-length(itscor) res<-rstats(rsobj,T7a.stat) res<-do.call(cbind, lapply(res,as.vector)) # converts result list to matrix T7avec<-apply(res, 1, function(x) sum(x[2:(n_tot)]>=x[1])/n_eff) T7anam<-NULL for (i in 1:(m-1)) for(j in (i+1):m ) T7anam<-c(T7anam, paste("(",names(sitscor[i]),">",names(sitscor[j]),")",sep="",collapse="")) names(T7avec)<-T7anam result<-list(n_eff=n_eff, prop=T7avec,itscor=itscor) # T7aobj class(result)<-"T7aobj" result } T10<-function(rsobj, splitcr="median"){ calc.groups<-function(x,splitcr){ if (length(splitcr) > 1) { # numeric vectors converted to factors if (length(splitcr) != nrow(x)) { stop("Mismatch between length of split vector and number of persons!") } splitcr <- as.factor(splitcr) if (length(levels(splitcr))>2) { stop("Split vector defines more than 2 groups (only two allowed)!") } spl.lev <- levels(splitcr) #spl.gr <- paste(spl.nam, spl.lev, sep = " ") # not necessary for the time being hi <- splitcr==spl.lev[1] # first level is high group } else if (!is.numeric(splitcr)) { spl.nam <- splitcr if (splitcr == "median") { spl.gr <- c("Raw Scores <= Median", "Raw Scores > Median") rv <- rowSums(x) rvsplit <- median(rv) hi <- rv > rvsplit } if (splitcr == "mean") { spl.gr <- c("Raw Scores < Mean", "Raw Scores >= Mean") rv <- rowSums(x) rvsplit <- mean(rv) hi <- rv > rvsplit } } list(hi=hi,spl.nam=spl.nam) # spl.nam is returned due to lex scoping even if not defined here } T10.stat<-function(x){ # calculates statistic T10 for one matrix nij.hi<-unlist(lapply(1:k,function(i) lapply(1:k, function(j) sum(x[hi,i]>x[hi,j])))) nij.low<-unlist(lapply(1:k,function(i) lapply(1:k, function(j) sum(x[!hi,i]>x[!hi,j])))) nji.hi<- unlist(lapply(1:k,function(i) lapply(1:k, function(j) sum(x[hi,i]=res[1])/n_eff result<-list(n_eff=n_eff, prop=prop,spl.nam=ans$spl.nam,hi.n=hi.n,low.n=low.n,T10vec=res) # T10obj class(result)<-"T10obj" result } T11<-function(rsobj){ T11.stat<-function(x){ as.vector(cor(x)) } calc.T11<-function(x){ # calculates statistic T11 for one matrix sum(abs(x-rho)) } n_eff<-rsobj$n_eff # number of simulated matrices n_tot<-rsobj$n_tot # number of all matrices k<-rsobj$k # number of columns of matrices res<-rstats(rsobj,T11.stat) # for each matrix calculate all r_ij's cormats <- matrix(unlist(res),nrow=k*k) # k*k x n_tot matrix, each colum contains one corr matrix rho<-apply(cormats[,2:n_tot],1,mean) # vector of estimated "real" rho_ij's T11obs<-calc.T11(cormats[,1]) # vector of observed r_ij's prop<-sum(apply(cormats[, 2:n_tot],2,calc.T11)>=T11obs)/n_eff result<-list(n_eff=n_eff, prop=prop, T11r=cormats[,1], T11rho=rho) # T11obj class(result)<-"T11obj" result } print.MLobj<-function(x,...){ print(x$MLres) cat("'exact' p-value =", x$prop, " (based on", x$n_eff, "sampled matrices)\n\n") } print.T1obj<-function(x,alpha=0.05,...){ txt1<-"\nNonparametric RM model test: T1 (local dependence - increased inter-item correlations)\n" writeLines(strwrap(txt1, exdent=5)) cat(" (counting cases with equal responses on both items)\n") cat("Number of sampled matrices:", x$n_eff,"\n") cat("Number of Item-Pairs tested:", length(x$prop),"\n") cat("Item-Pairs with one-sided p <", alpha,"\n") T1mat<-x$T1mat idx<-which(T1mat0) print(round(val,digits=3)) else cat("none\n\n") } print.T2obj<-function(x,...){ prop<-x$prop idx<-x$idx stat<-x$stat statnam<-switch(stat, "var"="variance", "mad1"="mean absolute deviation", "mad2"="median absolute deviation", "range"="range" ) txt<-"\nNonparametric RM model test: T2 (local dependence - model deviating subscales)\n" writeLines(strwrap(txt, exdent=5)) cat(" (dispersion of subscale person rawscores)\n") cat("Number of sampled matrices:", x$n_eff,"\n") cat("Items in subscale:", idx,"\n") cat("Statistic:", statnam,"\n") cat("one-sided p-value:",prop,"\n\n") # cat(" (proportion of sampled",statnam," GE observed)\n\n") } print.T4obj<-function(x,...){ prop<-x$prop idx<-x$idx gr.nam<-x$gr.nam gr.n<-x$gr.n alternative<-x$alternative cat("\nNonparametric RM model test: T4 (Group anomalies - DIF)\n") cat(" (counting", alternative, "raw scores on item(s) for specified group)\n") cat("Number of sampled matrices:", x$n_eff,"\n") cat("Items in Subscale:", idx,"\n") cat("Group:",gr.nam," n =",gr.n,"\n") cat("one-sided p-value:",prop,"\n\n") # cat(" (proportion of sampled raw scores GE observed)\n\n") } print.T7obj<-function(x,...){ prop<-x$prop cat("\nNonparametric RM model test: T7 (different discrimination - 2PL)\n") cat(" (counting cases with response 1 on more difficult and 0 on easier item)\n") cat("Number of sampled matrices:", x$n_eff,"\n") cat("Item Scores:\n") print(x$itscor) cat("one-sided p-value:",prop,"\n\n") } print.T7aobj<-function(x,...){ prop<-x$prop cat("\nNonparametric RM model test: T7a (different discrimination - 2PL)\n") cat(" (counting cases with response 1 on more difficult and 0 on easier item)\n") cat("Number of sampled matrices:", x$n_eff,"\n") cat("Item Scores:\n") print(x$itscor) cat("\nItem-Pairs: (i>j ... i easier than j)\n\n") print(round(prop,digits=3)) } print.T10obj<-function(x,...){ spl.nam<-x$spl.nam prop<-x$prop hi.n<-x$hi.n low.n<-x$low.n cat("\nNonparametric RM model test: T10 (global test - subgroup-invariance)\n") cat("Number of sampled matrices:", x$n_eff,"\n") cat("Split:",spl.nam,"\n") cat("Group 1: n = ",hi.n," Group 2: n =",low.n,"\n") cat("one-sided p-value:",prop,"\n\n") # cat(" (proportion of sampled statistics GE observed)\n\n") } print.T11obj<-function(x,...){ prop<-x$prop cat("\nNonparametric RM model test: T11 (global test - local dependence)\n") cat(" (sum of deviations between observed and expected inter-item correlations)\n") cat("Number of sampled matrices:", x$n_eff,"\n") cat("one-sided p-value:",prop,"\n\n") # cat(" (proportion of sampled sums GE observed)\n\n") } eRm/R/model.matrix.eRm.R0000744000176000001440000000012711572663323014507 0ustar ripleyusers`model.matrix.eRm` <- function(object,...) object$W #design matrix eRm/R/MLoef.R0000744000176000001440000001342411572663323012370 0ustar ripleyusersMLoef <- function(robj, splitcr="median") { # performs the Martin-Loef LR-test # robj... object of class RM # splitcr... splitting criterion for two groups. "median" (default) and "mean" # split items in two groups according to the median/mean or item raw # scores. # a vector of length k (number of items) containing two different # elements signifying group membership of items can be supplied. ### no test with missing values rh 19-05-10 if(any(is.na(robj$X))) stop("Martin-Loef Test with NA currently not available\n") wrning <- NULL # initialize an object for warnings if(length(splitcr) == 1){ # generate split-vector if "mean" or "median" if(splitcr == "median"){ raw.scores <- colSums(robj$X,na.rm=T) numsplit <- as.numeric(raw.scores > median(raw.scores,na.rm=T)) if( any(raw.scores == median(raw.scores,na.rm=T)) ){ # Only if one item's raw score == the median, a warning is issued wrning <- which(raw.scores == median(raw.scores,na.rm=T)) # append a warning-slot to the object for print and summary methods cat("Item(s)",paste(names(wrning),collapse=", "),"with raw score equal to the median assigned to the lower raw score group!\n") } } if(splitcr=="mean"){ raw.scores <- colSums(robj$X,na.rm=T) numsplit <- as.numeric(raw.scores > mean(raw.scores,na.rm=T)) if( any(raw.scores == mean(raw.scores,na.rm=T)) ){ # Only if one item's raw score == the mean, a warning is issued wrning <- which(raw.scores == mean(raw.scores,na.rm=T)) # append a warning-slot to the object for print and summary methods cat("Item(s)",paste(names(wrning),collapse=", "),"with raw score equal to the mean assigned to the lower raw score group!\n") } } } else { # check if the submitted split-vector is appropriate if(length(splitcr) != ncol(robj$X)) stop("Split vector too long/short.") # if(length(unique(splitcr)) > 2) stop("Only two groups allowed.") if(length(unique(splitcr)) < 2) stop("Split vector must contain at least two groups.") numsplit <- splitcr } sp.groups <- unique(numsplit) i.groups <- sapply(sp.groups, function(g){ which(numsplit == g) }, simplify=F) # check if any group countains less than 2 items if( any(unlist(lapply(i.groups, length)) < 2) ){ stop("Each group of items must contain at least 2 items.") } # check if one group contains subject with <=1 valid responses if(any(unlist(lapply(i.groups, function(g){ any(rowSums(!is.na(robj$X[,g])) <= 1) })))) stop("Groups contain subjects with less than two valid responses.") ### possible missing patterns and classification of persons into groups # MV.X <- apply(matrix(as.numeric(is.na(robj$X01)),ncol=ncol(robj$X01)),1,paste,collapse="") # MV.p <- sort(unique(MV.X)) # MV.g <- numeric(length=length(MV.X)) # g <- 1 # for(i in MV.p){ # MV.g[MV.X == i] <- g; # g <- g + 1 # } # na.X01 <- list() # for(i in 1:length(MV.p)){ # na.X01[[i]] <- matrix(robj$X01[which(MV.g == i),], ncol=ncol(robj$X01)) # } # res1 <- RM(robj$X01[,i.groups[[1]]]) # res2 <- RM(robj$X01[,i.groups[[2]]]) # fitting the submodels subModels <- lapply(i.groups, function(g){ PCM(robj$X[,g]) }) ### calculating the numerator and denominator sub.tabs <- as.data.frame(sapply(subModels, function(M){ rowSums(M$X, na.rm=T) })) sub.tabs <- table(sub.tabs) sub.term <- sub.tabs * (log(sub.tabs) - log(nrow(robj$X))) sub.term <- sum(na.omit(as.numeric(sub.term))) sub.max <- lapply(i.groups, function(g){ sum(apply(robj$X[,g], 2, max)) }) full.tab <- table(rowSums(robj$X, na.rm=T)) full.term <- sum(na.omit(as.numeric( full.tab * (log(full.tab) - log(nrow(robj$X))) ))) ML.LR <- 2 * ( sub.term + sum(unlist(lapply(subModels, `[[`, "loglik"))) - full.term - robj$loglik ) df <- prod(unlist(sub.max)+1) - (sum(apply(robj$X, 2, max))+1) - length(sp.groups) + 1 # ml.num <- ml.den <- df <- numeric() # for(i in 1:length(MV.p)){ # .temp.num <- table(rowSums(na.X01[[i]],na.rm=T)) # .temp.num <- .temp.num[.temp.num > 0] ### rh # ml.num[i] <- sum( (log(.temp.num)-log(sum(.temp.num)))*.temp.num ) # # if(nrow(na.X01[[i]]) > 1){ # .temp.den <- table(rowSums(na.X01[[i]][,i.groups[[1]]],na.rm=T), # rowSums(na.X01[[i]][,i.groups[[2]]],na.rm=T)) # } # else{ # .temp.den <- table(sum(na.X01[[i]][,i.groups[[1]]],na.rm=T), # sum(na.X01[[i]][,i.groups[[2]]],na.rm=T)) # } # .temp.den <- .temp.den[.temp.den > 0] ### rh # ml.den[i] <- sum( (log(.temp.den)-log(sum(.temp.den)))*.temp.den ) # # k1 <- sum(!is.na(na.X01[[i]][1,i.groups[[1]]])) ### rh # k2 <- sum(!is.na(na.X01[[i]][1,i.groups[[2]]])) ### rh # df[i] <- k1 * k2 -1 ### rh # } # # a <- sum(ml.num) # b <- sum(ml.den) # k <- c(length(i.groups[[1]]),length(i.groups[[2]])) # # ML.LR <- -2*( (a + robj$loglik) - (b + res1$loglik + res2$loglik) ) # DF <- prod(k) - 1 p.value <- 1 - pchisq(ML.LR, df) result <- list(LR=ML.LR, df=df, p.value=p.value, fullModel=robj, subModels=subModels, Lf=robj$loglik, Ls=lapply(subModels, `[[`, "loglik"), # theta.table.RM=table(rowSums(robj$X01)), # both used for the plotting # theta.table.MLoef=table(rowSums(res1$X01),rowSums(res2$X01)), # routine plot.MLoef i.groups=i.groups, # items1=i.groups[[1]], items2=i.groups[[2]], k=k, splitcr=splitcr, split.vector=numsplit, warning=wrning, call=match.call()) class(result) <- "MLoef" return(result) } eRm/R/LRtest.Rm.R0000744000176000001440000002047411572663323013163 0ustar ripleyusers`LRtest.Rm` <- function(object, splitcr="median", se=FALSE) { # performs Andersen LR-test # object... object of class RM # splitcr... splitting criterion for LR-groups. "all.r" corresponds to a complete # raw score split (r=1,...,k-1), "median" to a median raw score split, # "mean" corresponds to the mean raw score split. # optionally also a vector of length n for group split can be submitted. # se...whether standard errors should be computed call<-match.call() spl.gr<-NULL X.original<-object$X if (length(splitcr)>1 && is.character(splitcr)){ # if splitcr is character vector, treated as factor splitcr<-as.factor(splitcr) } if (is.factor(splitcr)){ spl.nam<-deparse(substitute(splitcr)) spl.lev<-levels(splitcr) spl.gr<-paste(spl.nam,spl.lev,sep=" ") splitcr<-unclass(splitcr) } numsplit<-is.numeric(splitcr) if (any(is.na(object$X))) { if (!numsplit && splitcr=="mean") { #mean split spl.gr<-c("Raw Scores < Mean", "Raw Scores >= Mean") X<-object$X # calculates index for NA groups # from person.parameter.eRm dichX <- ifelse(is.na(X),1,0) strdata <- apply(dichX,1,function(x) {paste(x,collapse="")}) gmemb <- as.vector(data.matrix(data.frame(strdata))) gindx<-unique(gmemb) rsum.all<-rowSums(X,na.rm=T) grmeans<-tapply(rsum.all,gmemb,mean) #sorted ngr<-table(gmemb) #sorted m.all<-rep(grmeans,ngr) #sorted,expanded rsum.all<-rsum.all[order(gmemb)] spl<-ifelse(rsum.all Median") # cat("Warning message: Persons with median raw scores are assigned to the lower raw score group!\n") X<-object$X # calculates index for NA groups # from person.parameter.eRm dichX <- ifelse(is.na(X),1,0) strdata <- apply(dichX,1,function(x) {paste(x,collapse="")}) gmemb <- as.vector(data.matrix(data.frame(strdata))) gindx<-unique(gmemb) rsum.all<-rowSums(X,na.rm=T) grmed<-tapply(rsum.all,gmemb,median) #sorted ngr<-table(gmemb) #sorted m.all<-rep(grmed,ngr) #sorted,expanded rsum.all<-rsum.all[order(gmemb)] spl<-ifelse(rsum.all<=m.all,1,2) splitcr<-spl object$X<-X[order(gmemb),] } } if (!is.numeric(splitcr)) { if (splitcr=="all.r") { #full raw score split rvind <- apply(object$X,1,sum,na.rm=TRUE) #person raw scoobject Xlist <- by(object$X,rvind,function(x) x) names(Xlist) <- as.list(sort(unique(rv))) } if (splitcr=="median") { #median split spl.gr<-c("Raw Scores <= Median", "Raw Scores > Median") #removed rh 2010-12-17 #cat("Warning message: Persons with median raw scores are assigned to the lower raw score group!\n") rv <- apply(object$X,1,sum,na.rm=TRUE) rvsplit <- median(rv) rvind <- rep(0,length(rv)) rvind[rv > rvsplit] <- 1 #group with highraw scoobject Xlist <- by(object$X,rvind,function(x) x) names(Xlist) <- list("low","high") } if (splitcr=="mean") { #mean split spl.gr<-c("Raw Scores < Mean", "Raw Scores >= Mean") rv <- apply(object$X,1,sum,na.rm=TRUE) rvsplit <- mean(rv) rvind <- rep(0,length(rv)) rvind[rv > rvsplit] <- 1 #group with highraw scoobject Xlist <- by(object$X,rvind,function(x) x) names(Xlist) <- list("low","high") } } if (is.numeric(splitcr)) { #manual raw score split spl.nam<-deparse(substitute(splitcr)) if (length(splitcr)!=dim(object$X)[1]){ stop("Mismatch between length of split vector and number of persons!") } else { rvind <- splitcr Xlist <- by(object$X,rvind, function(x) x) names(Xlist) <- as.list(sort(unique(splitcr))) if(is.null(spl.gr)){ spl.lev<-names(Xlist) spl.gr<-paste(spl.nam,spl.lev,sep=" ") } } } #----------item to be deleted--------------- del.pos.l <- lapply(Xlist, function(x) { it.sub <- datcheck.LRtest(x,object$X,object$model) #items to be removed within subgroup }) del.pos <- unique(unlist(del.pos.l)) if ((length(del.pos)) >= (dim(object$X)[2]-1)) { stop("\nNo items with appropriate response patterns left to perform LR-test!\n") } if (length(del.pos) > 0) { warning("\nThe following items were excluded due to inappropriate response patterns within subgroups: ",immediate.=TRUE) cat(colnames(object$X)[del.pos], sep=" ","\n") cat("Full and subgroup models are estimated without these items!\n") } if (length(del.pos) > 0) { X.el <- object$X[,-(del.pos)] } else { X.el <- object$X } Xlist.n <- by(X.el,rvind,function(y) y) names(Xlist.n) <- names(Xlist) if (length(del.pos) > 0) Xlist.n <- c(Xlist.n,list(X.el)) # X.el added since we must refit whole group without del.pos items if (object$model=="RM") { likpar <- sapply(Xlist.n,function(x) { #matrix with loglik and npar for each subgroup objectg <- RM(x,se=se) likg <- objectg$loglik nparg <- length(objectg$etapar) # betalab <- colnames(objectg$X) list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta,outobj=objectg) # rh outobj added ###list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta) # rh outobj added }) } if (object$model=="PCM") { likpar <- sapply(Xlist.n,function(x) { #matrix with loglik and npar for each subgroup objectg <- PCM(x,se=se) likg <- objectg$loglik nparg <- length(objectg$etapar) list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta,outobj=objectg) # rh outobj added ###list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta) # rh outobj added }) } if (object$model=="RSM") { likpar <- sapply(Xlist.n,function(x) { #matrix with loglik and npar for each subgroup objectg <- RSM(x,se=se) likg <- objectg$loglik nparg <- length(objectg$etapar) list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta,outobj=objectg) # rh outobj added ###list(likg,nparg,objectg$betapar,objectg$etapar,objectg$se.beta) # rh outobj added }) } ## extract fitted splitgroup models # rh 02-05-2010 fitobj<-likpar[6,1:length(unique(rvind))] likpar<-likpar[-6,] if (length(del.pos) > 0) { #re-estimate full model pos <- length(Xlist.n) #position of the full model loglik.all <- likpar[1,pos][[1]] #loglik full model etapar.all <- rep(0,likpar[2,pos]) #etapar full model (filled with 0 for df computation) likpar <- likpar[,-pos] Xlist.n <- Xlist.n[-pos] } else { loglik.all <- object$loglik etapar.all <- object$etapar } loglikg <- sum(unlist(likpar[1,])) #sum of likelihood value for subgroups LR <- 2*(abs(loglikg-loglik.all)) #LR value df = sum(unlist(likpar[2,]))-(length(etapar.all)) #final degrees of freedom pvalue <- 1-pchisq(LR,df) #pvalue betalist <- likpar[3,] #organizing betalist result <- list(X=X.original, X.list=Xlist.n, model=object$model,LR=LR, df=df, pvalue=pvalue, likgroup=unlist(likpar[1,],use.names=FALSE), betalist=betalist, etalist=likpar[4,],selist=likpar[5,], spl.gr=spl.gr, call=call, fitobj=fitobj) ## rh fitobj added class(result) <- "LR" result } eRm/R/LRtest.R0000744000176000001440000000011611572663323012575 0ustar ripleyusers`LRtest` <- function(object,splitcr="median",se=FALSE)UseMethod("LRtest") eRm/R/LRSM.R0000744000176000001440000000333611572663323012144 0ustar ripleyusers`LRSM` <- function(X, W, mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE, etaStart) { model <- "LRSM" call<-match.call() if (missing(W)) W <- NA else W <- as.matrix(W) if (missing(etaStart)) etaStart <- NA else etaStart <- as.vector(etaStart) XWcheck <- datcheck(X,W,mpoints,groupvec,model) #inital check of X and W X <- XWcheck$X lres <- likLR(X,W,mpoints,groupvec,model,st.err=se,sum0,etaStart) parest <- lres$parest #full groups for parameter estimation loglik <- -parest$minimum #log-likelihood value iter <- parest$iterations #number of iterations convergence <- parest$code etapar <- parest$estimate #eta estimates betapar <- as.vector(lres$W%*% etapar) #beta estimates if (se) { se.eta <- sqrt(diag(solve(parest$hessian))) #standard errors se.beta <- sqrt(diag(lres$W%*%solve(parest$hessian)%*%t(lres$W))) #se beta } else { se.eta <- rep(NA,length(etapar)) se.beta <- rep(NA,length(betapar)) } X01 <- lres$X01 labs <- labeling.internal(model,X,X01,lres$W,etapar,betapar,mpoints,max(groupvec)) #labeling for L-models W <- labs$W etapar <- labs$etapar betapar <- labs$betapar npar <- dim(lres$W)[2] #number of parameters result <- list(X=X,X01=X01,model=model,loglik=loglik,npar=npar,iter=iter,convergence=convergence, etapar=etapar,se.eta=se.eta,hessian=parest$hessian,betapar=betapar, se.beta=se.beta,W=W,mpoints=mpoints,ngroups=max(groupvec),groupvec=groupvec,call=call) class(result) <- "eRm" #classes: simple RM and extended RM result } eRm/R/LPCM.R0000744000176000001440000000346611572663323012126 0ustar ripleyusers`LPCM` <- function(X, W, mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE, etaStart) { #-------------------main programm------------------- model <- "LPCM" call<-match.call() if (missing(W)) W <- NA else W <- as.matrix(W) if (missing(etaStart)) etaStart <- NA else etaStart <- as.vector(etaStart) XWcheck <- datcheck(X,W,mpoints,groupvec,model) #inital check of X and W groupvec <- XWcheck$groupvec X <- XWcheck$X lres <- likLR(X,W,mpoints,groupvec,model,st.err=se,sum0,etaStart) parest <- lres$parest #full groups for parameter estimation loglik <- -parest$minimum #log-likelihood value iter <- parest$iterations #number of iterations convergence <- parest$code etapar <- parest$estimate #eta estimates betapar <- as.vector(lres$W%*% etapar) #beta estimates if (se) { se.eta <- sqrt(diag(solve(parest$hessian))) #standard errors se.beta <- sqrt(diag(lres$W%*%solve(parest$hessian)%*%t(lres$W))) #se beta } else { se.eta <- rep(NA,length(etapar)) se.beta <- rep(NA,length(betapar)) } X01 <- lres$X01 labs <- labeling.internal(model,X,X01,lres$W,etapar,betapar,mpoints,max(groupvec)) #labeling for L-models W <- labs$W etapar <- labs$etapar betapar <- labs$betapar npar <- dim(lres$W)[2] #number of parameters result <- list(X=X,X01=X01,model=model,loglik=loglik,npar=npar,iter=iter,convergence=convergence, etapar=etapar,se.eta=se.eta,hessian=parest$hessian,betapar=betapar, se.beta=se.beta,W=W,mpoints=mpoints,ngroups=max(groupvec),groupvec=groupvec,call=call) class(result) <- "eRm" #classes: simple RM and extended RM result } eRm/R/logLik.ppar.r0000744000176000001440000000036011572663323013603 0ustar ripleyuserslogLik.ppar <- function(object,...) { #object of class ppar # val <- object$loglik # attr(val, "df") <- object$npar val <- list(loglik = object$loglik, df = object$npar) # rh 26-03-2010 class(val) <- "logLik.ppar" val } eRm/R/logLik.eRm.r0000744000176000001440000000036111572663323013365 0ustar ripleyuserslogLik.eRm <- function(object,...) { #object of class eRm # val <- object$loglik # attr(val, "df") <- object$npar val <- list(loglik = object$loglik, df = object$npar) # rh 26-03-2010 class(val) <- "logLik.eRm" val } eRm/R/LLTM.R0000744000176000001440000000340511572663323012134 0ustar ripleyusers`LLTM` <- function(X, W, mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE, etaStart) { #...X: person*(item*times) matrix (T1|T2|...) model <- "LLTM" call<-match.call() if (missing(W)) W <- NA else W <- as.matrix(W) if (missing(etaStart)) etaStart <- NA else etaStart <- as.vector(etaStart) XWcheck <- datcheck(X,W,mpoints,groupvec,model) #inital check of X and W X <- XWcheck$X lres <- likLR(X,W,mpoints,groupvec,model,st.err=se,sum0,etaStart) parest <- lres$parest #full groups for parameter estimation loglik <- -parest$minimum #log-likelihood value iter <- parest$iterations #number of iterations convergence <- parest$code etapar <- parest$estimate #eta estimates betapar <- as.vector(lres$W%*% etapar) #beta estimates if (se) { se.eta <- sqrt(diag(solve(parest$hessian))) #standard errors se.beta <- sqrt(diag(lres$W%*%solve(parest$hessian)%*%t(lres$W))) #se beta } else { se.eta <- rep(NA,length(etapar)) se.beta <- rep(NA,length(betapar)) } X01 <- lres$X01 labs <- labeling.internal(model,X,X01,lres$W,etapar,betapar,mpoints,max(groupvec)) #labeling for L-models W <- labs$W etapar <- labs$etapar betapar <- labs$betapar npar <- dim(lres$W)[2] #number of parameters result <- list(X=X,X01=X01,model=model,loglik=loglik,npar=npar,iter=iter,convergence=convergence, etapar=etapar,se.eta=se.eta,hessian=parest$hessian,betapar=betapar, se.beta=se.beta,W=W,mpoints=mpoints,ngroups=max(groupvec),groupvec=groupvec,call=call) class(result) <- "eRm" #classes: simple RM and extended RM result } eRm/R/LLRA.R0000744000176000001440000000136011572663323012114 0ustar ripleyusersLLRA <- function(X, W, mpoints, groups, baseline=NULL, itmgrps=NULL,...) { if(missing(mpoints)) stop("Please specify the number of time points. If there are none, you might want to try PCM() or LPCM().") Xprep <- llra.datprep(X,mpoints,groups,baseline) itmgrps <- rep(1:Xprep$nitems) groupvec <- Xprep$assign.vec pplgrps <- length(Xprep$grp_n) if(missing(W)) W <- build_W(Xprep$X,length(unique(itmgrps)),mpoints,Xprep$grp_n,groupvec,itmgrps) fit <- LPCM(Xprep$X,W,mpoints=mpoints,groupvec=groupvec,sum0=FALSE) refg <- unique(names(which(groupvec==max(groupvec)))) out <- c(fit,"itms"=Xprep$nitems,"refGroup"=refg) out$call <- match.call() class(out) <- c("llra","Rm","eRm") cat("Reference group: ",refg,"\n\n") return(out) } eRm/R/llra.internals.R0000744000176000001440000001216211572663323014314 0ustar ripleyusers#internal functions get_item_cats <- function(X,nitems,grp_n) { # returns list of vectors with length max(categories) for each item; # 1:number categories are the first few entries and the rest is filed with zeros # This later corresponds to the necessary setup in LPCM where the superfluous categories must be set to 0 its <- rep(1:nitems,each=sum(grp_n)) cats <- lapply(split(X,its),max) #splits the data matrix according to items and finds the maximum category max.cat <- max(X) #overall maximum category vec.cat <- lapply(cats,function(x) c(1:x,rep(0,max.cat-x))) vec.cat #the ominous list of form c(1:categories,0,0,0) } build_effdes <- function(nitems,mpoints,pplgrps,categos,groupvec) { #builds treatment design structure for W # #mpoints>nitems>treat>catego #build group design tmp1 <- diag(pplgrps) tmp1[pplgrps,pplgrps] <- 0 eff.tmp1 <- lapply(categos,function(x)(tmp1%x%x)) #list with categories per item, replicated per group eff.tmp2 <- as.matrix(bdiag(eff.tmp1)) #blockdiagonal with blocks equal to the categories eff.tmp3 <- diag(mpoints-1)%x%eff.tmp2 #blow up to mpoints nuller <- matrix(0,nrow=dim(eff.tmp2)[1],ncol=dim(eff.tmp3)[2]) #baseline (tp=1) gr.bu <- rbind(nuller,eff.tmp3) #combine baseline and effects #labelling of effects names1 <- unique(names(groupvec)) #names1 <- paste("G",pplgrps:1,sep="") names2 <- paste(names1,"I",sep=".") names3 <- paste(names2,rep(1:nitems,each=pplgrps),sep="") names4 <- paste(names3,"t",sep=".") names5 <- paste(names4,rep(2:mpoints,each=pplgrps*nitems),sep="") colnames(gr.bu) <- names5 #columns with zeros (baseline group) are removed now rem.0 <- NA for(i in 1:dim(gr.bu)[2]) {rem.0[i] <- all(gr.bu[,i]==0)} gr.bu.red <- gr.bu[,which(rem.0==0)] return(gr.bu.red) } build_trdes <- function(nitems,mpoints,pplgrps,categos) { #builds trend design structure for W # #mpoints>nitems>treat>catego tr.tmp1 <- lapply(categos,function(x) rep(x,pplgrps)) #replicate number of categories per item times the groups tr.tmp2 <- as.matrix(bdiag(tr.tmp1)) #build the blockdiaginal for all items tr.tmp3 <- diag(mpoints-1)%x%tr.tmp2 #blow it up to the time points necessary nuller <- matrix(0,nrow=dim(tr.tmp2)[1],ncol=dim(tr.tmp3)[2]) #baseline tr.bu <- rbind(nuller,tr.tmp3) #combine mpoints and baseline #structure: for each category multiply it with a vector of group indicators #hence the grouping is: #tau1 t2-t1, tau2 t2-t1, ..., tauk t2-t1, tau1 t3-t1, tau2 t3-t1, .. tauk t3-t1 #cat("Design matrix columns are:","\n","tau_1^(t2-t1), tau_2^(t2-t1), ..., tau_k^(t2-t1), tau_1^(t3-t1), tau_2(t3-t1), ..., tau_k^(t3-t1), etc.","\n") #labeling names1 <- paste("trend.I",1:nitems,sep="") names2 <- paste(names1,"t",sep=".") names3 <- paste(names2,rep(2:mpoints,each=nitems),sep="") colnames(tr.bu) <- names3 return(tr.bu) } build_catdes <- function(nitems,mpoints,pplgrps,categos) { #builds category design matrix #FIX ME: is a bit ugly, we might get the loops out somehow # #check if there are just binary items if(max(unlist(categos))<2) stop("items are (at most) binary and need no design") #currently equates cat.0 and cat.1 warning("Currently c0 and c1 are equated for each item","\n") max.all <- max(unlist(categos)) #maximum category number ls.ct.des <- list() #list of designs for each item #here we walk through each item and build up the category design for(i in 1:nitems) { max.it <- sum(categos[[i]]!=0) #maximum category number of item i ct.des <- rbind(rep(0,dim(diag(max.all-1))[2]),diag(max.all-1)) #the design for the maximum number of categories in X rems <- max.all-max.it #the number of superfluous columns #here the superfluous columns are removed as the step from W to W* #the necessary rows with zeros however are maintained: #for a dichotomous item the structure is slightly different than for any other, since it returns an empty matrix of appropriate dimensions #for a polytomous item the superfluous columns are removed from the back ifelse(rems==max.all-1, ct.des<- as.matrix(ct.des[,-(1:max.all-1)]), ct.des<- as.matrix(ct.des[,1:((max.all-1)-rems)])) ct.des.gr <- rep(1,pplgrps)%x%ct.des #blow it up to the number of groups ls.ct.des[[i]] <- ct.des.gr #list with all category designs for each item } ct.tmp2 <- as.matrix(bdiag(ls.ct.des)) #blockdiagonal matrix for a single mpoints ct.bu <- rep(1,mpoints)%x%ct.tmp2 #blow up to number of times points #try to first build first item, then second and so on, then blow up #labeling: pretty unelegant too names <- NA for(i in 1:nitems) { cat <- max(categos[[i]]) ifelse(cat==1,names1 <- "remove",names1 <- paste("c",2:cat,sep="")) names2 <- paste("I",i,sep="") names3 <- paste(names1,names2,sep=".") names<- c(names,names3) } names <- names[-1] if(length(grep("remove",names)>0)) names <- names[-grep("remove",names)] colnames(ct.bu) <- names return(ct.bu) } eRm/R/llra.datprep.R0000744000176000001440000000263211572663323013755 0ustar ripleyusersllra.datprep<-function(X, mpoints, groups, baseline=NULL){ Xwide <- X if (ncol(Xwide) %% mpoints > 0) stop("Number of items must be the same for each timepoint.") nitems <- dim(Xwide)[2]/mpoints if(missing(groups)) groups <- rep("CG",dim(Xwide)[1]) covs.prep<-function(groups,baseline){ groups<-as.matrix(groups) grstr<-apply(groups,1,paste,collapse=":") grstr <- factor(grstr) if(!is.null(baseline)) { basel <- paste(baseline,collapse=":") grstr <- relevel(grstr,basel) } cov.groupvec<-as.numeric(grstr) names(cov.groupvec)<-grstr cov.groupvec } # sort data according to cov.groupvec cov.groupvec<-covs.prep(groups,baseline) Xwide<-Xwide[order(cov.groupvec,decreasing=TRUE),] cov.groupvec<-sort(cov.groupvec) # number of people per group grp_n<-table(cov.groupvec) names(grp_n)<-unique(names(cov.groupvec)) # convert to long format Xlong <- matrix(unlist(Xwide), ncol = mpoints) # assignment vector item x treatment assign.vec <- as.vector(sapply(1:nitems, function(i) cov.groupvec + (i-1)*max(cov.groupvec))) assign.vec <- rev(assign.vec) assign.vec <- abs(assign.vec-max(assign.vec))+1 names(assign.vec)<-rev(rep(names(cov.groupvec),nitems)) list(X=Xlong, assign.vec=assign.vec, grp_n=grp_n, nitems=nitems) } eRm/R/likLR.R0000744000176000001440000000323711572663323012404 0ustar ripleyusers`likLR` <- function (X,W,mpoints,Groups,model,st.err,sum0,etaStart) { if (any(is.na(X))) { dichX <- ifelse(is.na(X),1,0) strdata <- apply(dichX,1,function(x) {paste(x,collapse="")}) gmemb <- as.vector(data.matrix(data.frame(strdata))) } else { gmemb <- rep(1,dim(X)[1]) } #data preparation, design matrix generation for various models if (model=="RM") { Xprep <- datprep_RM(X,W,sum0) } else if (model=="LLTM") { Xprep <- datprep_LLTM(X,W,mpoints,Groups,sum0) } else if (model=="RSM") { Xprep <- datprep_RSM(X,W,sum0) } else if (model=="PCM") { Xprep <- datprep_PCM(X,W,sum0) } else if (model=="LRSM") { Xprep <- datprep_LRSM(X,W,mpoints,Groups,sum0) } else if (model=="LPCM") {Xprep <- datprep_LPCM(X,W,mpoints,Groups,sum0) } if (any(is.na(etaStart))) etaStart <- rep(0,dim(Xprep$W)[2]) #check starting vector if (length(etaStart) != dim(Xprep$W)[2]) stop("Vector with starting values does not match number of parameters!") ng <- max(Groups) if ((dim(Xprep$W)[1]) != ((dim(Xprep$X01)[2])*ng)) stop("Mismatch between number of rows (beta's) in W and number of items (categories) in X!") Lprep <- cmlprep(Xprep$X01,Xprep$mt_vek,mpoints,Groups,Xprep$W,gmemb) parest <- fitcml(Lprep$mt_ind,Lprep$nrlist,Lprep$x_mt,Lprep$rtot,Xprep$W, max(Groups),gind=Lprep$gind,x_mtlist=Lprep$x_mtlist, Lprep$NAstruc,g_NA=Lprep$g_NA,st.err,etaStart,gby=Lprep$gby) W1 <- Xprep$W #rownames(W1) <- NULL #colnames(W1) <- paste("eta",1:dim(W1)[2],sep="") options(warn=0) list(W=W1,parest=parest,X01=Xprep$X01) #returns design matrix and results } eRm/R/labeling.internal.r0000744000176000001440000001146411572663323015020 0ustar ripleyuserslabeling.internal <- function(model,X,X01,W,etapar,betapar,mpoints,ngroups) { #labeling for W, eta, beta. if (is.null(colnames(W))) { #eta labels names(etapar) <- paste("eta",1:dim(W)[2]) colnames(W) <- names(etapar) } else { names(etapar) <- colnames(W) } if(model=="RM"){ # new labelling of if (!is.null(colnames(X))) # eta parameters for names(etapar) <- colnames(X)[2:ncol(X)] # RM, RSM, PCM else # rh, 25-03-2010 names(etapar) <- paste("I",2:ncol(X),sep="") # } # gives estimated # item (RM) if(model=="RSM"){ # item + category (RSM) if (!is.null(colnames(X))) { # item x category (PCM) names(etapar)[1:(ncol(X)-1)] <- colnames(X)[2:ncol(X)] # parameters } else { # names(etapar[1:(ncol(X)-1)]) <- paste("I",2:ncol(X),sep="") # } # maxcat <- max(X,na.rm=TRUE) # if (maxcat>1) # names(etapar)[ncol(X):length(etapar)] <- paste("Cat ",2:maxcat,sep="") # } # # # if(model=="PCM"){ # indmt <- apply(X,2,max,na.rm=TRUE) # number of categories # catnames <- sequence(indmt) # # if (!is.null(colnames(X))) { # itnames <- colnames(X) # } else { # itnames <- paste("I",1:ncol(X),sep="") # } # etanames <- rep(itnames, indmt) # etanames <- paste(etanames[-1],catnames[-1],sep=".c") # names(etapar) <- etanames # } # if (mpoints == 1) { #no mpoints labels if ((model=="RM") || (model=="LLTM")) { #no category labels betanames <- paste("beta",colnames(X)) } else { indmt <- apply(X,2,max,na.rm=TRUE) catnames <- sequence(indmt) itnames <- rep(colnames(X),indmt) betanames <- paste("beta",paste(itnames,catnames,sep=".c")) } } else { #repeated measurement models indmt0 <- apply(X,2,max,na.rm=TRUE) indmt <- rep(apply(X,2,max,na.rm=TRUE),ngroups) catnames <- sequence(indmt) #category names if (substr(colnames(X)[1],1,2)=="I1") { #if item names specified by user itemind <- rep(paste("I",1:(dim(X)[2]/mpoints),sep=""),mpoints) #item labels } else { itemind <- colnames(X) } itnames <- rep(itemind,indmt0) if (ngroups > 1) { ind.it <- rep(1:mpoints,each = length(itnames)/mpoints) #item label index itnames <- as.vector(unlist(tapply(itnames, ind.it, function(x) rep(x, ngroups)))) } if (model == "LLTM") { icnames <- rep(itnames,(dim(W)[1]/length(itnames))) } else { icnames <- paste(itnames,catnames,sep=".c") } t.lab <- paste("t",rep(1:mpoints,each=length(icnames)/mpoints),sep="") #time labels if (ngroups > 1) { g.lab <- rep(paste("g",rep(1:ngroups,each=length(icnames)/mpoints/ngroups),sep=""),mpoints) betanames <- paste(icnames,t.lab,g.lab) } else { betanames <- paste(icnames,t.lab) } } if (is.null(rownames(W))) { #no labels provided rownames(W) <- betanames names(betapar) <- betanames } else { names(betapar) <- rownames(W) } list(W=W,etapar=etapar,betapar=betapar) } eRm/R/itemfit.R0000744000176000001440000000006611572663323013025 0ustar ripleyusers`itemfit` <- function(object)UseMethod("itemfit") eRm/R/itemfit.ppar.R0000744000176000001440000000235311572663323013767 0ustar ripleyusers`itemfit.ppar` <- function(object) # computes Chi-square based itemfit statistics # for object of class "ppar" (from person.parameter) { if (length(object$pers.ex)==0) { X <- object$X } else { X <- object$X[-object$pers.ex,] } VE <- pifit.internal(object) #compute expectation and variance term Emat <- VE$Emat Vmat <- VE$Vmat Cmat <- VE$Cmat st.res <- (X-Emat)/sqrt(Vmat) sq.res <- st.res^2 #squared standardized residuals ifit <- colSums(sq.res,na.rm=TRUE) idf <- apply(X,2,function(x) {length(na.exclude(x))}) i.outfitMSQ <- ifit/idf qsq.outfitMSQ <- (colSums(Cmat/Vmat^2, na.rm=TRUE)/idf^2) - 1/idf q.outfitMSQ <- sqrt(qsq.outfitMSQ) isumVmat<-colSums(Vmat) i.infitMSQ <- colSums(sq.res*Vmat, na.rm = TRUE)/isumVmat qsq.infitMSQ <- colSums(Cmat-Vmat^2, na.rm=TRUE)/isumVmat^2 q.infitMSQ <- sqrt(qsq.infitMSQ) i.outfitZ <- (sqrt(i.outfitMSQ)-1)*(3/q.outfitMSQ)+(q.outfitMSQ/3) i.infitZ <- (sqrt(i.infitMSQ)-1)*(3/q.infitMSQ)+(q.infitMSQ/3) result <- list(i.fit=ifit,i.df=idf,st.res=st.res,i.outfitMSQ=i.outfitMSQ,i.infitMSQ=i.infitMSQ,i.outfitZ=i.outfitZ,i.infitZ=i.infitZ) class(result) <- "ifit" result } eRm/R/invalid.R0000744000176000001440000000044211572663323013010 0ustar ripleyusers# $Id: invalid.R 625 2005-06-09 14:20:30Z nj7w $ invalid <- function(x) { if( missing(x) || is.null(x) || length(x)==0 ) return(TRUE) if(is.list(x)) return(all(sapply(x,invalid))) else if(is.vector(x)) return(all(is.na(x))) else return(FALSE) } eRm/R/IC.r0000744000176000001440000000005211572663323011712 0ustar ripleyusers`IC` <- function(object)UseMethod("IC") eRm/R/IC.ppar.r0000744000176000001440000000533611572663323012665 0ustar ripleyusersIC.ppar <- function(object) { #computes loglik, AIC, BIC, and cAIC for JML, MML, CML #object of class ppar #---------- full likelihood ---------- X <- object$X if (length(object$pers.ex) > 0) X01 <- object$X01[-object$pers.ex,] else X01 <- object$X01 mt_vek <- apply(X,2,max,na.rm=TRUE) #number of categories - 1 for each item mt_ind <- rep(1:length(mt_vek),mt_vek) mt_seq <- sequence(mt_vek) gmemb <- object$gmemb pmat <- pmat(object) pmat.l0 <- tapply(1:length(mt_ind),mt_ind, function(ind) { #expand pmat for 0-th category vec0 <- 1-rowSums(as.matrix(pmat[,ind])) #prob for 0th category cbind(vec0,pmat[,ind]) }) pmat0 <- matrix(unlist(pmat.l0),nrow=length(gmemb)) #X01 matrix 0th category included X01.l0 <- tapply(1:length(mt_ind), mt_ind, function(ind) { #expand X01 for 0-th category vec0 <- 1-rowSums(as.matrix(X01[,ind])) #prob for 0th category cbind(vec0,X01[,ind]) }) X010 <- matrix(unlist(X01.l0),nrow=length(gmemb)) #X01 matrix 0th category included loglik.full <- sum(log(na.exclude(pmat0[X010 == 1]))) #vector of "observed" solving probabilities N.ex <- dim(object$X.ex)[1] #number of persons (excluded) npar.full <- (dim(object$W)[2])+sum(object$npar) #number of item + person parameters AIC.full <- -2*loglik.full + 2*npar.full BIC.full <- -2*loglik.full + log(N.ex)*npar.full cAIC.full <- -2*loglik.full + log(N.ex)*npar.full + npar.full fullvec <- c(loglik.full, npar.full, AIC.full, BIC.full, cAIC.full) #------------ MML ----------- N <- dim(object$X)[1] rv <- rowSums(object$X, na.rm = TRUE) #person raw scores npar.mml <- (dim(object$W)[2])#+(length(table(rv))) lmml <- sum(table(rv)*log(table(rv)/N))+object$loglik.cml #MML likelihood AIC.mml <- -2*lmml + 2*npar.mml BIC.mml <- -2*lmml + log(N)*npar.mml cAIC.mml <- -2*lmml + log(N)*npar.mml + npar.mml mmlvec <- c(lmml, npar.mml, AIC.mml, BIC.mml, cAIC.mml) #------------- CML --------------- npar.cml <- dim(object$W)[2] lcml <- object$loglik.cml AIC.cml <- -2*lcml + 2*npar.cml BIC.cml <- -2*lcml + log(N)*npar.cml cAIC.cml <- -2*lcml + log(N)*npar.cml + npar.cml cmlvec <- c(lcml, npar.cml, AIC.cml, BIC.cml, cAIC.cml) ICtable <- rbind(fullvec, mmlvec, cmlvec) rownames(ICtable) <- c("joint log-lik", "marginal log-lik", "conditional log-lik") colnames(ICtable) <- c("value", "npar", "AIC", "BIC", "cAIC") result <- list(ICtable = ICtable) class(result) <- "ICr" result } eRm/R/IC.default.R0000744000176000001440000000024311572663323013277 0ustar ripleyusers`IC.default` <- function(object) # error message for using incorrect object { stop("IC() requires object of class 'ppar', obtained from person.parameter()") } eRm/R/hoslem.R0000744000176000001440000000216011572663323012650 0ustar ripleyusershoslem <- function(object, groups.hl = 10, pi.hat) { # computes the Hosmer-Lemeshow test for objects of class "ppar" # groups.hl ... number of groups for percentile splitting K <- dim(object$X)[2] N <- dim(object$X.ex)[1] #Pi <- pmat(object) #expected values if (length(object$pers.ex) > 0) { y <- as.vector(t(object$X[-object$pers.ex,])) #observed values } else { y <- as.vector(t(object$X)) } pi.hat <- as.vector(t(pi.hat)) cutpoints <- quantile(pi.hat, probs = seq(0, 1, 1/groups.hl)) #perzentiles groupvec <- cut(pi.hat, cutpoints, include.lowest = TRUE, labels = 1:groups.hl) #recode ph.hat o.g <- tapply(y, groupvec, sum) #number of 1-responses in group n.g <- table(groupvec) #number of responses in group pi.mean <- tapply(pi.hat, groupvec, mean) #average response probabilites value <- sum((o.g - n.g*pi.mean)^2/(n.g *pi.mean*(1-pi.mean))) #HM-test statistic df <- groups.hl - 2 p.value <- 1 - pchisq(value, df) result <- list(value = value, df = df, p.value = p.value) result } eRm/R/gofIRT.R0000744000176000001440000000011711572663323012513 0ustar ripleyusersgofIRT <- function(object, groups.hl = 10, cutpoint = 0.5)UseMethod("gofIRT") eRm/R/gofIRT.ppar.R0000744000176000001440000000540411572663323013460 0ustar ripleyusersgofIRT.ppar <- function(object, groups.hl = 10, cutpoint = 0.5) { #S3 method for computing 3 deviances and hosmer-lemeshow test #object ... object of class ppar #ngroups.hl ... number of percentile groups for Hosmer-Lemeshow Test if (max(object$X, na.rm = TRUE) > 1) stop("Tests for polytomous models not implemented yet!") if (any(is.na(object$X))) stop("Test for data with missings not implemented yet!") pi.hat <- pmat(object) groups.cldev <- "rawscore" #---------------- compute test statistics ---------------------------- res.cl <- unlist(cldeviance(object, groups.gr = groups.cldev, pi.hat = pi.hat)) res.hl <- unlist(hoslem(object, groups.hl = groups.hl, pi.hat = pi.hat)) res.rost <- unlist(rostdeviance(object)) res.cw <- unlist(cwdeviance(object, pi.hat)) res.table <- rbind(res.cl, res.hl, res.rost, res.cw) colnames(res.table) <- c("value","df","p-value") rownames(res.table) <- c("Collapsed Deviance", "Hosmer-Lemeshow", "Rost Deviance", "Casewise Deviance") #------------------- end test statistics ---------------------------- #---------------------- R-squared ----------------------------------- res.r2 <- Rsquared(object, pi.hat = pi.hat) #---------------------- end R-squared ------------------------------- #--------------------------- classifier stuff ----------------------- pred.X <- predict(object, cutpoint = cutpoint) #predicted data matrix observed <- as.vector(object$X.ex) predicted <- as.vector(pred.X) confmat <- table(predicted, observed) accuracy <- sum(diag(confmat))/sum(confmat) sens <- as.vector((confmat[2,2])/(colSums(confmat)[2])) spez <- as.vector((confmat[1,1])/(colSums(confmat)[1])) cl.list <- list(confmat = confmat, accuracy = accuracy, sensitivity = sens, specificity = spez) probvec <- as.vector(pi.hat) rocpr.res <- prediction(probvec[!is.na(probvec)], observed[!is.na(observed)]) roc.res <- performance(rocpr.res, "tpr","fpr") #produce ROC output spezvec <- 1-(roc.res@x.values[[1]]) #vector of specificities (different cuts) sensvec <- roc.res@y.values[[1]] #vector of sensitivities (different cuts) cutvec <- roc.res@alpha.values[[1]] #vector with thresholds sscmat <- cbind(cutvec, sensvec - spezvec)[order(abs(sensvec-spezvec), decreasing = FALSE),] thresh.opt <- mean(sscmat[1:2,1]) auc.all <- performance(rocpr.res, "auc") #area under ROC auc.res <- auc.all@y.values[[1]] gini <- (2*auc.res)-1 #----------------------- end classifier ---------------------------------- result <- list(test.table = res.table, R2 = res.r2, classifier = cl.list, AUC = auc.res, Gini = gini, ROC = roc.res, opt.cut = thresh.opt, predobj = rocpr.res) class(result) <- "gof" result } eRm/R/fitcml.R0000744000176000001440000001014411572663323012640 0ustar ripleyusers`fitcml` <- function (mt_ind,nrlist,x_mt,rtot,W,ngroups,gind,x_mtlist,NAstruc,g_NA,st.err,etaStart,gby) { #cml function for call in nlm cml <- function(eta) { beta <- as.vector(W%*%eta) #FIXME!!! gby?? beta.list <- split(beta,gind) #gind index for treatment groups beta.list1 <- beta.list #beta and NAstructure (over Groups): 1st line parameter values, 2nd line which item NA betaNA <- mapply(function(x,y) {rbind(x,y)},beta.list1,NAstruc,SIMPLIFY=FALSE) #likelihood term based on gamma functions for each Group x NAgroup combination Lg <- lapply(betaNA, function(betaNAmat) { beta.vec <- betaNAmat[1,] #get parameter vector beta #gamma functions for each NAgroup within Groups Lg.NA <- apply(matrix(betaNAmat[-1,],ncol=length(beta.vec)),1, function(NAvec) { #list of virtual item-category parameters per item beta_list <- as.list(split(beta.vec[NAvec==1],mt_ind[1:(length(beta.vec[NAvec==1]))])) parlist <- lapply(beta_list,exp) #initial epsilon as list #------------------gamma functions---------------------- g_iter <- NULL #computation of the gamma functions K <- length(parlist) for (t in 1:(K-1)) { #building up J1,...,Jt,...,Js if (t==1) { #first iteration step gterm <- c(1,parlist[[t]]) #0th element included }else { gterm <- g_iter #gamma previous iteration with 0th el g_iter <- NULL } parvek <- c(1,parlist[[t+1]]) #eps vector in current iteration with 0th el h <- length(parvek) #dimensions for matrix mt <- length(gterm) rtot1 <- h+mt-1 #number of possible raw scores (0 included) gtermvek <- rep(c(gterm,rep(0,h)),h) #building up matrix for gamma term gtermvek <- gtermvek[-((length(gtermvek)-h+1):length(gtermvek))] #eliminating last h 0's gmat <- matrix(gtermvek,nrow=rtot1,ncol=h) emat <- matrix(rep(parvek,rep(rtot1,h)),ncol=h,nrow=rtot1) #building up matrix for eps term gmat_new <- gmat*emat #merge matrices g_iter <- rowSums(gmat_new) #gamma functions in current iteration are rowsums } #----------------- end gamma functions ------------------ Lg.NA <- as.vector(g_iter[2:(rtot+1)]) #final gamma vector stored in gamma (without gamma0) return(Lg.NA) }) }) #----------------- compute likelihood components ----------------------- L1 <- sum(mapply(function(x,z) { x[!is.na(z)]%*%na.exclude(z) },nrlist,lapply(Lg,log))) #sum up L1-terms (group-wise) L2 <- sum(mapply("%*%",x_mtlist,beta.list1)) #sum up L2-terms (group-wise) L1-L2 #final likelihood value } #----------------- end likelihood ----------------------- eta <- etaStart #starting values for eta parameters err<-try(exists(fitctrl), TRUE) # check if fitctrl is defined if(class(err)=="try-error") fitctrl <- "nlm" # if undefined set it to "nlm" if(fitctrl=="nlm"){ options(warn=-1) #turn off warnings for NA/Inf fit <- nlm(cml,eta,hessian=st.err,iterlim=5000) #NLM optimizer } else if(fitctrl=="optim"){ options(warn=0) fit <- optim(eta,cml,method="BFGS",hessian=TRUE,control=list(maxit=5000)) fit$counts<-fit$counts[1] names(fit)<-c("estimate","minimum","iterations","code","message","hessian") } else stop("optimizer misspecified in fitctrl\n") fit } eRm/R/datprep_RSM.R0000744000176000001440000000632111572663323013544 0ustar ripleyusers`datprep_RSM` <- function(X,W,sum0) { #... X: data matrix with response categories to be converted into 0/1 matrix max.it <- apply(X,2,max,na.rm=TRUE) #RSM check for equal number of categories if (length(table(max.it)) > 1) stop("RSM can not be computed since number of categories are not the same for each item!\n") N <- dim(X)[1] #number of persons K <- dim(X)[2] #number of items hmax <- max(X,na.rm=TRUE) #highest category mt_vek <- rep(hmax,K) #vector with number of categories - 1 for each item mt_vek_0 <- mt_vek+1 #number of categories for each item X01_0 <- matrix(rep(0,(N*sum(mt_vek_0))),nrow=N) #empty 0/1 matrix K <- length(mt_vek) cummt0 <- c(0,cumsum(mt_vek_0)[1:(K-1)])+1 #index vector for 0th category indmatp <- apply(X,1,function(xi) {xi+cummt0}) #preparing index matrix for 1 responses imp1 <- as.vector(indmatp) imp2 <- rep(1:N,rep(K,N)) indmat <- cbind(imp2,imp1) #final index matrix for 1 responses X01_0[indmat] <- 1 #0/1 matrix with 0th category NAindmat <- rbind(imp2,rep(1:K,N),c(t(X))) #impose NA structure rownames(NAindmat) <- NULL NAind <- t(NAindmat[1:2,is.na(NAindmat[3,])]) #index matrix for NA's in X if (length(NAind) > 0) { NAindlist <- apply(NAind,1,function(x){ co <- seq(cummt0[x[2]],cummt0[x[2]]+mt_vek[x[2]]) NAind01 <- cbind(rep(x[1],length(co)),co) data.frame(NAind01,row.names=NULL) #list with NA indices }) indmatNA <- matrix(unlist(lapply(NAindlist, function(x) {t(as.matrix(x))})),ncol=2,byrow=TRUE) #matrix with NA indices X01_0[indmatNA] <- NA } X01 <- X01_0[,-cummt0] #delete 0-category answers --> final 0/1 pattern matrix (dim N*sum(mt_vek)) #automatized generation of the design matrix W if (length(W)==1) { e_it <- gl(K,hmax) #factor for item parameters e_cat <- gl(hmax,1,K*hmax) #factor for category par if (sum0) { Xm <- model.matrix(~e_it+e_cat)[,-1] #dummy coding Xm[1:hmax,1:(K-1)] <- -1 #first item to be sum0 normalized } else { Xm <- model.matrix(~e_it+e_cat)[,-1] #design matrix with 0/1 contrasts (without intercept) } catvek <- 1:hmax #preparing the item design vectors e_itnew <- catvek*Xm[,1:(K-1)] Xm[,1:(K-1)] <- e_itnew W <- Xm #final design matrix colnames(W) <- NULL rownames(W) <- NULL } list(X=X,X01=X01,mt_vek=mt_vek,W=W) #Output: X01 ... 0/1 response matrix of dimension N*rtot # mt_vek ... vector of length K with number of categories - 1 (for each item) # W ... design matrix of dimension sum(mt_vek)*((K-1)+(hmax-1)) } eRm/R/datprep_RM.R0000744000176000001440000000177011572663323013424 0ustar ripleyusers`datprep_RM` <- function(X,W,sum0) #prepares data matrix for Rasch model { X01 <- X #X is already X(0,1) mt_vek <- rep(1,dim(X01)[2]) #number of categories for each item K <- length(mt_vek) #automatized generation of the design matrix W if (length(W)==1) { W1 <- diag(1,(K-1)) #build up design matrix if (sum0) { w1 <- rep(-1,(K-1)) #sum0 restriction } else { w1 <- rep(0,(K-1)) #first item parameter set to 0 } W <- rbind(w1,W1) #RM design matrix colnames(W) <- NULL rownames(W) <- NULL } list(X=X,X01=X01,mt_vek=mt_vek,W=W) #Output: X01 ... 0/1 response matrix of dimension N*rtot # mt_vek ... 1-vector of length K # W ... design matrix of dimension K*K } eRm/R/datprep_PCM.R0000744000176000001440000000511311572663323013520 0ustar ripleyusers`datprep_PCM` <- function(X,W,sum0) { #... X: data matrix with response categories to be converted into 0/1 matrix #TFrow <- (rowSums(X)==0) #el. persons with 0/K rawscore #X <- X[!TFrow,] #converting into 0/1 matrix N <- dim(X)[1] #number of persons mt_vek <- apply(X,2,max,na.rm=TRUE) #number of categories - 1 for each item mt_vek_0 <- mt_vek+1 #number of categories for each item X01_0 <- matrix(rep(0,(N*sum(mt_vek_0))),nrow=N)#empty 0/1 matrix K <- length(mt_vek) #number of items cummt0 <- c(0,cumsum(mt_vek_0)[1:(K-1)])+1 #index vector for 0th category indmatp <- apply(X,1,function(xi) {xi+cummt0}) #preparing index matrix for 1 responses imp1 <- as.vector(indmatp) imp2 <- rep(1:N,rep(K,N)) indmat <- cbind(imp2,imp1) #final index matrix for 1 responses X01_0[indmat] <- 1 #0/1 matrix with 0th category NAindmat <- rbind(imp2,rep(1:K,N),c(t(X))) #impose NA structure rownames(NAindmat) <- NULL NAind <- t(NAindmat[1:2,is.na(NAindmat[3,])]) #index matrix for NA's in X if (length(NAind) > 0) { NAindlist <- apply(NAind,1,function(x){ co <- seq(cummt0[x[2]],cummt0[x[2]]+mt_vek[x[2]]) NAind01 <- cbind(rep(x[1],length(co)),co) data.frame(NAind01,row.names=NULL) #list with NA indices }) indmatNA <- matrix(unlist(lapply(NAindlist, function(x) {t(as.matrix(x))})),ncol=2,byrow=TRUE) #matrix with NA indices X01_0[indmatNA] <- NA } X01 <- X01_0[,-cummt0] #delete 0-category answers --> final 0/1 pattern matrix (dim N*sum(mt_vek)) #automatized generation of the design matrix W if (length(W)==1) { W1 <- diag(1,(sum(mt_vek)-1)) #build up design matrix if (sum0) { w1 <- rep(-1,(sum(mt_vek)-1)) #sum0 restriction } else { w1 <- rep(0,(sum(mt_vek)-1)) #first item parameter set to 0 } W <- rbind(w1,W1) #PCM design matrix colnames(W) <- NULL rownames(W) <- NULL } list(X=X,X01=X01,mt_vek=mt_vek,W=W) #Output: X01 ... 0/1 response matrix of dimension N*rtot # mt_vek ... vector of length K with number of categories - 1 (for each item) # W ... design matrix of dimension sum(mt_vek)*sum(mt_vek) } eRm/R/datprep_LRSM.R0000744000176000001440000001066411572663323013665 0ustar ripleyusers`datprep_LRSM` <- function(X,W,mpoints,Groups,sum0) { #TFrow <- (rowSums(X)==0) #el. persons with 0 rawscore #X <- X[!TFrow,] ngroups <- max(Groups) #number of groups N <- dim(X)[1] #number of persons K <- dim(X)[2]/mpoints #number of items hmax <- max(X,na.rm=TRUE) #highest category mt_vek <- rep(hmax,K) #number of categories - 1 for each item mt_vek_0 <- mt_vek+1 #number of categories for each item X01_0 <- matrix(rep(0,(N*sum(mt_vek_0)*mpoints)),nrow=N) #empty 0/1 matrix K1 <- dim(X)[2] cummt0 <- c(0,cumsum(rep(mt_vek_0,mpoints))[1:(K1-1)])+1 #index vector for 0th category indmatp <- apply(X,1,function(xi) {xi+cummt0}) #preparing index matrix for 1 responses imp1 <- as.vector(indmatp) imp2 <- rep(1:N,rep(K1,N)) indmat <- cbind(imp2,imp1) #final index matrix for 1 responses X01_0[indmat] <- 1 #0/1 matrix with 0th category d1 <- 1:N d2 <- 1:K1 coor <- expand.grid(d2,d1)[,c(2:1)] #X coordinates resvec <- as.vector(t(X)) #X as vector (rowwise) NAind <- as.matrix(coor[is.na(resvec),]) #index matrix for NA's in X mt_vek.t <- rep(mt_vek,mpoints) if (length(NAind) > 0) { NAindlist <- apply(NAind,1,function(x){ co <- seq(cummt0[x[2]],cummt0[x[2]]+mt_vek.t[x[2]]) NAind01 <- cbind(rep(x[1],length(co)),co) rownames(NAind01) <- NULL data.frame(NAind01,row.names=NULL) #list with NA indices }) indmatNA <- matrix(unlist(lapply(NAindlist, function(x) {t(as.matrix(x))})),ncol=2,byrow=TRUE) #matrix with NA indices X01_0[indmatNA] <- NA } X01 <- X01_0[,-cummt0] #automatized generation of the design matrix W if (length(W)==1) { #generating design matrix e_it <- gl(K,hmax) #factor for item parameters e_cat <- gl(hmax,1,K*hmax) #factor for category par if (sum0) { Xm <- model.matrix(~e_it+e_cat)[,-1] #dummy coding Xm[1:hmax,1:(K-1)] <- -1 #first item to be sum0 normalized } else { Xm <- model.matrix(~e_it+e_cat)[,-1] #design matrix with 0/1 contrasts (without intercept) } catvek <- 1:hmax #preparing the item design vectors e_itnew <- catvek*Xm[,1:(K-1)] Xm[,1:(K-1)] <- e_itnew W11 <- Xm #first part (same as RSM) without virtual items ZW <- dim(W11)[1] W1 <- NULL for (i in 1:(mpoints*ngroups)) W1 <- rbind(W1,W11) #first part with virtual items if (mpoints > 1) { #more than 1 measurement points if (ngroups > 1) { #more than 1 group/more mpoints t_mp1 <- rep(1:mpoints,rep(ZW*ngroups,mpoints)) t_mp <- factor(t_mp1) g_ng1 <- rep(rep(1:ngroups,rep(ZW,ngroups)),mpoints) g_ng <- factor(g_ng1) W2 <- model.matrix(~t_mp+g_ng)[,-1] #main effects g and mp W2[1:(ZW*ngroups),] <- 0 #remove main effects for the first test occasion } else { #1 group/more mpoints mp <- gl(mpoints,ZW) #factor for measurement points W2 <- model.matrix(~mp)[,-1] } } else if (ngroups > 1) { #1 mpoint/more groups g <- gl(ngroups,ZW) W2 <- model.matrix(~g)[,-1] warning("Group contrasts without repeated measures can not be estimated!") } else if (ngroups == 1) W2 <- NULL #1 mpoint/1 group contr <- W2*catvek #imposing item categories if (is.matrix(contr)==TRUE) { contrrow <- apply(contr,1,function(x) {x*1:dim(contr)[2]}) #imposing multiplicative factor over time & group contrasts W <- cbind(W1,t(contrrow)) #design matrix completed } else {W <- cbind(W1,contr)} colnames(W) <- NULL rownames(W) <- NULL } list(X=X,X01=X01,mt_vek=mt_vek,W=W) } eRm/R/datprep_LPCM.R0000744000176000001440000000745511572663323013647 0ustar ripleyusers`datprep_LPCM` <- function(X,W,mpoints,Groups,sum0) { #TFrow <- (rowSums(X)==0) #el. persons with 0 rawscore #X <- X[!TFrow,] ngroups <- max(Groups) N <- dim(X)[1] #number of persons K <- dim(X)[2]/mpoints #number of items mt_vek <- apply(X,2,max,na.rm=TRUE)[1:K] #number of categories - 1 for each item mt_vek_0 <- mt_vek+1 #number of categories for each item X01_0 <- matrix(rep(0,(N*sum(mt_vek_0)*mpoints)),nrow=N) #empty 0/1 matrix K1 <- dim(X)[2] cummt0 <- c(0,cumsum(rep(mt_vek_0,mpoints))[1:(K1-1)])+1 #index vector for 0th category indmatp <- apply(X,1,function(xi) {xi+cummt0}) #preparing index matrix for 1 responses imp1 <- as.vector(indmatp) imp2 <- rep(1:N,rep(K1,N)) indmat <- cbind(imp2,imp1) #final index matrix for 1 responses X01_0[indmat] <- 1 #0/1 matrix with 0th category d1 <- 1:N d2 <- 1:K1 coor <- expand.grid(d2,d1)[,c(2:1)] #X coordinates resvec <- as.vector(t(X)) #X as vector (rowwise) NAind <- as.matrix(coor[is.na(resvec),]) #index matrix for NA's in X mt_vek.t <- rep(mt_vek,mpoints) if (length(NAind) > 0) { NAindlist <- apply(NAind,1,function(x){ #x <- unlist(x) co <- seq(cummt0[x[2]],cummt0[x[2]]+mt_vek.t[x[2]]) NAind01 <- cbind(rep(x[1],length(co)),co) rownames(NAind01) <- NULL data.frame(NAind01,row.names=NULL) #list with NA indices }) indmatNA <- matrix(unlist(lapply(NAindlist, function(x) {t(as.matrix(x))})),ncol=2,byrow=TRUE) #matrix with NA indices X01_0[indmatNA] <- NA } X01 <- X01_0[,-cummt0] #automatized generation of the design matrix W if (length(W)==1) { W11diag <- diag(1,(sum(mt_vek)-1)) #build up design matrix if (sum0) { w110 <- rep(-1,(sum(mt_vek)-1)) #sum0 restriction } else { w110 <- rep(0,(sum(mt_vek)-1)) #first item category parameter set to 0 } W11 <- rbind(w110,W11diag) #PCM design matrix ZW <- dim(W11)[1] W1 <- NULL for (i in 1:(mpoints*ngroups)) W1 <- rbind(W1,W11) #first part with virtual items if (mpoints > 1) { #more than 1 measurement points if (ngroups > 1) { #more than 1 group/more mpoints t_mp1 <- rep(1:mpoints,rep(ZW*ngroups,mpoints)) t_mp <- factor(t_mp1) g_ng1 <- rep(rep(1:ngroups,rep(ZW,ngroups)),mpoints) g_ng <- factor(g_ng1) W2 <- model.matrix(~t_mp+g_ng)[,-1] #main effects g and mp W2[1:(ZW*ngroups),] <- 0 #remove main effects for the first test occasion } else { #1 group/more mpoints t_mp <- gl(mpoints,ZW) #factor for measurement points W2 <- model.matrix(~t_mp)[,-1] } } else if (ngroups > 1) { #1 mpoint/more groups g_ng <- gl(ngroups,ZW) W2 <- model.matrix(~g_ng)[,-1] warning("Group contrasts without repeated measures can not be estimated!") } else if (ngroups == 1) W2 <- NULL #1 mpoint/1 group catvek <- sequence(mt_vek) W2_cat <- W2*catvek #imposing item categories W <- cbind(W1,W2_cat) #design matrix completed colnames(W) <- NULL rownames(W) <- NULL } list(X=X,X01=X01,mt_vek=mt_vek,W=W) } eRm/R/datprep_LLTM.R0000744000176000001440000000460311572663323013654 0ustar ripleyusers`datprep_LLTM` <- function(X,W,mpoints,Groups,sum0) { # Design matrix see Fischer & Molenaar, p. 159 #TFrow <- (rowSums(X)==0 | rowSums(X)==(dim(X)[2])) #el. persons with 0/K rawscore #X <- X[!TFrow,] ngroups <- max(Groups) X01 <- X N <- dim(X)[1] #number of persons K <- dim(X)[2]/mpoints #number of items mt_vek <- rep(1,K) #automatized generation of the design matrix W if (length(W)==1) { W11diag <- diag(1,(sum(mt_vek)-1)) #build up design matrix if (sum0) { w110 <- rep(-1,(sum(mt_vek)-1)) #sum0 restriction } else { w110 <- rep(0,(sum(mt_vek)-1)) #first item category parameter set to 0 } W11 <- rbind(w110,W11diag) #RM design matrix ZW <- dim(W11)[1] W1 <- NULL for (i in 1:(mpoints*ngroups)) W1 <- rbind(W1,W11) #first part with virtual items if (mpoints > 1) { #more than 1 measurement points if (ngroups > 1) { #more than 1 group/more mpoints t_mp1 <- rep(1:mpoints,rep(ZW*ngroups,mpoints)) t_mp <- factor(t_mp1) g_ng1 <- rep(rep(1:ngroups,rep(ZW,ngroups)),mpoints) g_ng <- factor(g_ng1) W2 <- model.matrix(~t_mp+g_ng)[,-1] #main effects g and mp W2[1:(ZW*ngroups),] <- 0 #remove main effects for the first test occasion } else { #1 group/more mpoints t_mp <- gl(mpoints,ZW) #factor for measurement points W2 <- model.matrix(~t_mp)[,-1] } } else if (ngroups > 1) { #1 mpoint/more groups g_ng <- gl(ngroups,ZW) W2 <- model.matrix(~g_ng)[,-1] warning("Group contrasts without repeated measures can not be estimated!") } else if (ngroups == 1) W2 <- NULL #1 mpoint/1 group W <- cbind(W1,W2) colnames(W) <- NULL rownames(W) <- NULL } list(X=X,X01=X01,mt_vek=mt_vek,W=W) #Output: X01 ... 0/1 response matrix of dimension N*rtot # mt_vek ... vector of length K with number of categories - 1 (for each item) # W ... design matrix of dimension (K*T)*((K-1)*(T-1)+1) } eRm/R/datcheck.R0000744000176000001440000001244111572663323013132 0ustar ripleyusers`datcheck` <- function(X,W,mpoints,groupvec,model) { if (is.data.frame(X)) {X <- as.matrix(X)} #X as data frame allowed if (is.null(colnames(X))) { #determine item names if (mpoints > 1) { mpind <- paste("t",rep(1:mpoints,each=(dim(X)[2]/mpoints),1),sep="") #time points itemind <- paste("I",1:(dim(X)[2]/mpoints),sep="") colnames(X) <- paste(itemind,mpind) } else { colnames(X) <- paste("I",1:dim(X)[2],sep="") #item labels }} if (is.null(rownames(X))) rownames(X) <- paste("P",1:dim(X)[1],sep="") #person labels #----------------------- check groupvec -------------------------- if ((length(groupvec) > 1) && (length(groupvec) != dim(X)[1])) { stop("Wrong specification of groupvec!")} if (min(groupvec)!=1) { stop("Group specification must start with 1!")} if (length(unique(groupvec))!=(max(groupvec))) { stop("Group vector is incorrectly specified (perhaps a category is missing)!")} # rh 2011-03-03 if ((max(groupvec) > 1) && (mpoints==1)) { stop("Model not identifiable! Group contrasts can only be imposed for repeated measurement designs.") } # if ((length(groupvec) > 1) && any(is.na(X))) { # stop("Model with repeated measures, group specification and NAs cannot be computed!") } #----------------------- check X -------------------------------- allna.vec <- apply(X,2,function(y) {all(is.na(y))}) #eliminate items with all NA's if (any(allna.vec)) {stop("There are items with full NA responses which must be deleted!")} allna.vec <- apply(X,1,function(y) {all(is.na(y))}) #eliminate items with all NA's if (any(allna.vec)) {stop("There are persons with full NA responses which must be deleted!")} allna.vec <- apply(X,1,function(y) {sum(is.na(y))}) if (any(allna.vec == (dim(X)[2]-1))) {stop("Subjects with only 1 valid response must be removed!")} ri.min <- apply(X,2,min,na.rm=TRUE) #if no 0 responses if (any(ri.min > 0)) { cat("Warning message: The following items have no 0-responses: \n") cat(colnames(X)[ri.min>0],sep=", ") cat("\n") cat("Responses are shifted such that lowest category is 0. \n") cat("\n") } X <- t(apply(X,1,function(y) {y-ri.min})) #shift down to 0 ri <- apply(X,2,sum,na.rm=TRUE) #item raw scores n.NA <- colSums(apply(X,2,is.na)) #number of NA's per column maxri <- (dim(X)[1]*(apply(X,2,max,na.rm=TRUE)))-n.NA #maximum item raw scores with NA TFcol <- ((ri==maxri) | (ri==0)) X.n <- X[,!TFcol] #new matrix with excluded items item.ex <- (1:dim(X)[2])[TFcol] #excluded items if (length(item.ex) > 0) { if (mpoints == 1) { cat("Warning message: The following items were excluded due to complete 0/full responses: \n") cat(colnames(X)[item.ex],sep=", ") cat("\n") } else { cat("The following items show complete 0/full responses: \n") cat(colnames(X)[item.ex],sep=", ") cat("\n") stop("Estimation cannot be performed! Delete the correponding items for the other measurement points as well! \n") }} if ((model=="PCM") || (model=="LPCM")) { #check if there are missing categories for PCM (for RSM doesn't matter) tablist <- apply(X,2,function(x) list(as.vector(table(x)))) tablen <- sapply(tablist,function(x) length(x[[1]])) xmax <- apply(X,2,max)+1 indwrong <- which(tablen != xmax) if (length(indwrong) > 0) { cat("The following items do not have responses on each category: \n") cat(colnames(X)[indwrong],sep=", ") cat("\n") cat("Warning message: Estimation may not be feasible. Please check data matrix! \n") cat("\n") } } #-------------------------- ill conditioned for RM and LLTM -------------- if ((model=="RM") || (model=="LLTM")) { if (length(table(X.n)) != 2) stop("Dichotomous data matrix required!") k.t <- dim(X.n)[2]/mpoints #check for each mpoint separately t.ind <- rep(1:mpoints,1,each=k.t) X.nlv <- split(t(X.n),t.ind) #split X due to mpoints cn.lv <- split(colnames(X.n),t.ind) X.nl <- lapply(X.nlv,matrix,ncol=k.t,byrow=TRUE) for (i in 1:length(X.nl)) colnames(X.nl[[i]]) <- cn.lv[[i]] for (l in 1:mpoints) { #check within mpoint X.nll <- X.nl[[l]] k <- ncol(X.nll) adj <- matrix(0,nc=k,nr=k) for (i in 1:k) for(j in 1:k) { adj[i,j]<- 1*any(X.nll[,i]> X.nll[,j],na.rm=TRUE) } cd <- component.dist(adj, connected = "strong") cm <- cd$membership cmp <- max(cm) if(cmp>1) { cmtab <- table(cm) maxcm.n <- as.numeric(names(cmtab)[cmtab!=max(cmtab)]) suspcol <- (1:length(cm))[tapply(cm,1:length(cm),function(x) any(maxcm.n==x))] n.suspcol <- colnames(X.nll)[suspcol] cat("Suspicious items:",n.suspcol,"\n") stop("Estimation stopped due to ill-conditioned data matrix X!") } }} #----------------------- end ill-conditioned check ------------------------------- list(X=X.n,groupvec=groupvec) } eRm/R/datcheck.LRtest.r0000744000176000001440000000405411572663323014407 0ustar ripleyusersdatcheck.LRtest <- function(x, X, model) { #sanity checks for LRtest (internal function of LRtest.R) #x...submatrix (splitted with "splitcr" and called within Xlist) #X...original data matrix (from model fit) exclude <- NULL #vector with items to be excluded #----check full/0 responses------ n.NA <- colSums(apply(X,2,is.na)) #number of NA's per column maxri <- (dim(X)[1]*(apply(X,2,max,na.rm=TRUE)))-n.NA #maximum item raw scores with NA ri <- apply(x,2,sum,na.rm=TRUE) #item raw scores exclude <- c(exclude,which((ri==maxri) | (ri==0))) #----check full(-1) NA's--------- allna.vec <- apply(x,2,function(y) { naTF <- is.na(y) (sum(naTF) >= length(y-1)) }) exclude <- c(exclude,which(allna.vec)) #----minimum category = 0-------- ri.min <- apply(x,2,min,na.rm=TRUE) #if no 0 responses exclude <- c(exclude,which(ri.min!=0)) #----RSM-checks for same number of categories-------- if ((model == "RSM") || (model == "LRSM")) { highcat <- max(X, na.rm=TRUE) #highest category in original data highcat.sub <- apply(x,2,max,na.rm=TRUE) #RSM check for equal number of categories exclude <- c(exclude,which(highcat.sub != highcat)) } #---PCM checks for all categories responses--------- if ((model=="PCM") || (model=="LPCM")) { #check if there are missing categories for PCM (for RSM doesn't matter) cat.data <- apply(X,2,function(y) list(unique(na.exclude(y)))) #categories of orginal data cat.sub <- apply(x,2,function(y) list(unique(na.exclude(y)))) #categories of subgroup data catcomp <- mapply(function(y.s,y.d) { (length(y.s[[1]]) == (length(y.d[[1]]))) },cat.sub,cat.data) exclude <- c(exclude,which(!catcomp)) } return(unique(exclude)) #return vector with items to be eliminated }eRm/R/cwdeviance.r0000744000176000001440000000105111572663323013527 0ustar ripleyuserscwdeviance <- function(object, pi.hat) { # computes casewise deviance for objects of class ppar X <- object$X.ex loglik.full <- sum(X*log(pi.hat)+(1-X)*log(1-pi.hat), na.rm = TRUE) #for ordinary logistic regression npar.full <- (dim(object$W)[2])+sum(object$npar) #number of estimated item + person parameters npar.sat <- sum(nrow(pi.hat)*ncol(pi.hat)) value <- -2*loglik.full df <- npar.sat-npar.full p.value <- 1-pchisq(value, df = df) result <- list(value = value, df = df, p.value = p.value) result }eRm/R/confint.threshold.r0000744000176000001440000000055211572663323015057 0ustar ripleyusersconfint.threshold <- function(object, parm, level = 0.95, ...) { #object of class "threshold" a <- (1 - level)/2 a <- c(a, 1 - a) pct <- paste(a*100,"%") fac <- qnorm(a) cf <- object$threshpar ses <- object$se.thresh dn <- names(object$threshpar) ci <- array(NA, dim = c(length(cf), 2), dimnames = list(dn,pct)) ci[] <- cf + ses %o% fac ci }eRm/R/confint.ppar.r0000744000176000001440000000077011572663323014027 0ustar ripleyusersconfint.ppar <- function(object, parm, level = 0.95, ...) { #parm...either "beta" or "eta" #object of class "ppar" a <- (1 - level)/2 a <- c(a, 1 - a) pct <- paste(a*100,"%") fac <- qnorm(a) cf <- object$thetapar ses <- object$se.theta ci <- list(NULL) for (i in 1:length(cf)) { ci[[i]] <- array(NA, dim = c(length(cf[[i]]), 2), dimnames = list(names(object$thetapar[[i]]),pct)) ci[[i]][] <- cf[[i]] + ses[[i]] %o% fac } names(ci) <- paste("NAgroup",1:length(ci),sep="") ci }eRm/R/confint.eRm.r0000744000176000001440000000100711572663323013602 0ustar ripleyusersconfint.eRm <- function(object, parm="beta", level = 0.95, ...) { #parm...either "beta" or "eta" #object of class "eRm" a <- (1 - level)/2 a <- c(a, 1 - a) pct <- paste(a*100,"%") fac <- qnorm(a) if (parm=="eta") { cf <- object$etapar ses <- object$se.eta dn <- names(object$etapar) } if (parm=="beta") { cf <- object$betapar ses <- object$se.beta dn <- names(object$betapar) } ci <- array(NA, dim = c(length(cf), 2), dimnames = list(dn,pct)) ci[] <- cf + ses %o% fac ci } eRm/R/collapse_W.R0000744000176000001440000000061211572663323013451 0ustar ripleyuserscollapse_W <- function(W,listItems,newNames) { if(missing(newNames)) newNames <- paste("collapsedEffect",1:length(listItems),sep="") Wtmp1 <- W[,-unlist(listItems)] collapsed <- lapply(listItems, function(x) rowSums(W[,x])) Wtmp2 <- matrix(unlist(collapsed),ncol=length(collapsed)) Wout <- cbind(Wtmp1,Wtmp2) colnames(Wout) <- c(colnames(Wtmp1),newNames) Wout } eRm/R/coef.ppar.R0000744000176000001440000000017711572663323013244 0ustar ripleyusers`coef.ppar` <- function(object, ...) { x <- object$theta.table[,1] names(x) <- rownames(object$theta.table) x } eRm/R/coef.eRm.R0000744000176000001440000000037311572663323013023 0ustar ripleyusers`coef.eRm` <- function(object, parm = "beta", ...) { # option "beta" added rh 2010-03-07 if(parm == "beta") object$betapar else if(parm == "eta") object$etapar else stop("'parm' incorrectly specified") } eRm/R/cmlprep.R0000744000176000001440000000765611572663323013042 0ustar ripleyusers`cmlprep` <- function(X01,mt_vek,mpoints,Groups,W,gmemb) { levs <- (gmemb-1)*max(Groups)+Groups #merge Groups and gmemb vector into level vector if (length(Groups)==1) { #if no group contrast x_mt <- colSums(X01,na.rm=TRUE) #item category raw scores as vector #eventuell x_mtlist auf NA gruppen aufbrechen x_mtlist <- list(x_mt) ngroups <- 1 } else { #if groups defined ngroups <- max(Groups) #number of groups x_mtlist <- by(X01,levs,colSums,na.rm=TRUE) #item-category raw scores for each group (as list) x_mtlist.G <- by(X01,Groups,colSums,na.rm=TRUE) #item-category raw scores for each group (as list) #FIXME!!! use x_mtlist?? x_mt <- as.vector(unlist(x_mtlist.G)) #as vector: g1|g2|... } end1 <- length(mt_vek)*mpoints*ngroups mt_ind <- rep(1:end1,rep(mt_vek,mpoints*ngroups)) #category index vector (for converting x_mt into list) x_tmt <- split(x_mt,mt_ind) #list for likelihood: item-wise * ngroups rtot <- sum(mt_vek)*mpoints ics <- rep(sequence(mt_vek),mpoints) #item category scores for each item as vector rv <- apply(X01,1,function(x) { #person raw scores of 0/1 matrix ics[!is.na(x)]%*%na.exclude(x)}) #--- preparing index vector for item parameters --- if (ngroups > 1) { #groups seglen <- sum(mt_vek) #length of beta vector (segment) gind <- rep(rep(1:ngroups,rep(seglen,ngroups)),mpoints) #parameter index vector for group extraction } else { gind <- rep(1,dim(W)[1]) } #--- preparing lists for person splits --- rvlist <- split(rv,levs) #split person raw scores due to levels (NAgroup AND treatment) nrlist <- lapply(rvlist,function(rvel) { #list with item raw score frequencies for each group (transposed) rvtab <- table(rvel) #raw score frequencies dnamevek <- as.numeric(unlist(dimnames(rvtab))) #different raw scores for 0 fill up nr <- rep (0,rtot+1) #setting 0 raw score frequencies nr[dnamevek+1] <- rvtab #vector with person raw scores from 1:rtot (with 0 fill up) nr <- nr[-1] return(nr) }) if ((ngroups > 1) && (length(unique(gmemb)) > 1)) { #NA groups AND Groups gg <- table(Groups,gmemb) #gg[gg > 0] <- 1 g_NA <- as.vector(rowSums(gg)) #How many NA-sub groups in each Group #grgm <- cbind(Groups, gmemb) #grgmst <- apply(grgm,1,function(x) { #merge indexes to characters # paste(x[1],x[2]) }) #GGind <- rank(unique(grgmst)) #levtab <- table(levs) #frequencies of levels #FIXME!!! following line wrong index #gby <- rep(GGind,levtab) #ordering by NAgroups nested in Group #this probably does the job gby <- levs } else { g_NA <- 1 gby <- gmemb } NAstruc <- by(!is.na(X01),gby,function(x) { #list of unique NA structures for each Group x.u <- unique(x) as.numeric(as.matrix(x.u))}) #NA's are coded with 0 NAcheck <- sapply(NAstruc,sum) #if for certain NAgroups only 1 item was presented list(x_mt=x_mt,mt_ind=mt_ind,x_tmt=x_tmt,rtot=rtot,nrlist=nrlist,gind=gind,x_mtlist=x_mtlist, NAstruc=NAstruc,g_NA=g_NA,gby=gby) } eRm/R/cldeviance.R0000744000176000001440000000526611572663323013470 0ustar ripleyuserscldeviance <- function(object, groups.gr = "rawscore", pi.hat) { # computes the collapsed deviance of # object of class ppar k <- dim(object$X)[2] #number of items N <- dim(object$X)[1] #number of persons (full) #----------- define group vector --------------- if (groups.gr == "rawscore") indvec.full <- rowSums(object$X, na.rm = TRUE) #person raw scores #if (groups.gr == "pattern") { #pattern-wise # X.string <- apply(object$X, 1, paste, collapse = "") # indvec.full <- rank(X.string, ties.method = "min") #} if (is.numeric(groups.gr)) { if (length(groups.gr) != dim(object$X)[1]) stop("Group vector must be of length N (number of subjects in object$X)!") indvec.full <- groups.gr } #---------- end define group vector ----------- #---- reduce group vector (pers.ex)------ if (length(object$pers.ex) > 0) { #persons eliminated y <- object$X[-object$pers.ex,] #observed values indvec.red <- indvec.full[-object$pers.ex] } else { y <- (object$X) indvec.red <- indvec.full } #pi.hat <- pmat(object) #gmemb.ext <- rep(object$gmemb, each = k) #gmemb extended to response vector #pi.hat <- as.vector(t(pmat(object))) #fitted values dev.g <- tapply(1:length(indvec.red), indvec.red, function(ii) { #D component for each group n.g <- length(ii) #number of group subjects y.g <- colSums(rbind(y[ii,])) #group responses pi.g <- rbind(pi.hat[ii,])[1,] #vector with fitted values devvec <- mapply(function(yy, pp) { #compute deviance for each item if ((yy > 0) && (yy < n.g)) { term1 <- yy*log(yy/(n.g*pp)) term2 <- (n.g-yy)*log((n.g-yy)/(n.g*(1-pp))) dev <- sign(yy-n.g*pp)*sqrt(2*(term1+term2)) } if (yy == 0) dev <- -sqrt(2*n.g*abs(log(1-pp))) if (yy == n.g) dev <- sqrt(2*n.g*abs(log(pp))) return(dev) },y.g, pi.g) return(sum(devvec^2)) #item-wise sum of squared devres }) value <- sum(dev.g) df <- (length(unique(indvec.red)))*k p.value <- 1-pchisq(value, df = df) result <- list(value = value, df = df, p.value = p.value) return(result) } eRm/R/checkdata.R0000744000176000001440000001224011572663323013270 0ustar ripleyusers# uses # component.dist # reachability # geodist # symmetrize # components.c # geodist.c # # from R package sna # function to check for ill-conditioned data in the RM # requires package sna ##checkdata<-function(x) ##{ ## k<-ncol(x) ## adj<-matrix(0,nc=k,nr=k) ## for (i in 1:k) for(j in 1:k) { ## adj[i,j]<- 1*any(x[,i]>x[,j],na.rm=TRUE) ## } ## ## #library(sna) ## #adj <- diag.remove(adj) ## # %print(adj) # adjacency marix ## cd <- component.dist(adj, connected = "strong") ## cm <- cd$membership ## cmp <- max(cm) ## ## ## if(cmp>1) { ## cat("Data:",deparse(substitute(x)),"are ill-conditioned\n") ## cat("Number of strong components",cmp,"\n") ## cat("Component membership of items: ",cm,"\n") ## } else ## cat("Data:",deparse(substitute(x)),"are well-conditioned\n") ##} ## ###################################################### component.dist<- function (dat, connected = c("strong", "weak", "unilateral", "recursive")) { # dat <- as.sociomatrix.sna(dat) # if (is.list(dat)) # return(lapply(dat, component.dist, connected = connected)) # else if (length(dim(dat)) > 2) # return(apply(dat, 1, component.dist, connected = connected)) n <- dim(dat)[2] if (any(dat != t(dat))) dat <- switch(match.arg(connected), weak = symmetrize(dat, rule = "weak"), unilateral = reachability(dat), strong = symmetrize(reachability(dat), rule = "strong"), recursive = symmetrize(dat, rule = "strong")) # if (match.arg(connected) == "unilateral") # if (any(dat != t(dat))) # warning("Nonunique unilateral component partition detected in component.dist. Problem vertices will be arbitrarily assigned to one of their components.\n") membership <- rep(0, n) membership <- .C("component_dist_R", as.double(dat), as.double(n), membership = as.double(membership), PACKAGE="eRm")$membership o <- list() o$membership <- membership o$csize <- vector() for (i in 1:max(membership)) o$csize[i] <- length(membership[membership == i]) o$cdist <- vector() for (i in 1:n) o$cdist[i] <- length(o$csize[o$csize == i]) o } #reachability - Find the reachability matrix of a graph. reachability<-function(dat,geodist.precomp=NULL){ #Pre-process the raw input # dat<-as.sociomatrix.sna(dat) # if(is.list(dat)) # return(lapply(dat,reachability,geodist.precomp=geodist.precomp)) # else if(length(dim(dat))>2) # return(apply(dat,1,reachability,geodist.precomp=geodist.precomp)) # return(unlist(apply(dat,1,function(x,geodist.precomp){list(reachability(x, geodist.precomp=geodist.precomp))},geodist.precomp=geodist.precomp),recursive=FALSE)) #End pre-processing #Get the counts matrix if(is.null(geodist.precomp)) cnt<-geodist(dat)$counts else cnt<-geodist.precomp$counts #Dichotomize and return apply(cnt>0,c(1,2),as.numeric) } #geodist - Find the numbers and lengths of geodesics among nodes in a graph #using a BFS, a la Brandes (2000). (Thanks, Ulrik!) geodist<-function(dat,inf.replace=Inf){ #Pre-process the raw input # dat<-as.sociomatrix.sna(dat) # if(is.list(dat)) # return(lapply(dat,geodist,inf.replace=inf.replace)) # else if(length(dim(dat))>2) # return(apply(dat,1,geodist,inf.replace=inf.replace)) #End pre-processing n<-dim(dat)[2] #Initialize the matrices sigma<-matrix(0,nrow=n,ncol=n) gd<-matrix(Inf,nrow=n,ncol=n) #Perform the calculation geo<-.C("geodist_R",as.double(dat),as.double(n),gd=as.double(gd), sigma=as.double(sigma),NAOK=TRUE,PACKAGE="eRm") #Return the results o<-list() o$counts<-matrix(geo$sigma,n,n) o$gdist<-matrix(geo$gd,n,n) o$gdist[o$gdist==Inf]<-inf.replace #Patch Infs, if desired o } #symmetrize - Convert a graph or graph stack to a symmetric form. Current rules #for symmetrizing include "upper" and "lower" diagonals, "weak" connectedness #rule, and a "strong" connectedness rule. symmetrize<-function(mats,rule="weak"){ #Pre-process the raw input # mats<-as.sociomatrix.sna(mats) # if(is.list(mats)) # return(lapply(mats,symmetrize,rule=rule)) #End pre-processing #Build the input data structures # if(length(dim(mats))>2){ # m<-dim(mats)[1] # n<-dim(mats)[2] # o<-dim(mats)[3] # d<-mats # }else{ m<-1 n<-dim(mats)[1] o<-dim(mats)[2] d<-array(dim=c(1,n,o)) d[1,,]<-mats # } #Apply the symmetry rule for(i in 1:m){ if(rule=="upper"){ # temp<-d[i,,] # for(j in 1:n) # temp[j:n,j]<-temp[j,j:n] # d[i,,]<-temp # }else if(rule=="lower"){ # temp<-d[i,,] # for(j in 1:n) # temp[j,j:n]<-temp[j:n,j] # d[i,,]<-temp # }else if(rule=="weak"){ # d[i,,]<-matrix(as.numeric(d[i,,]|t(d[i,,])),nrow=n,ncol=o) }else if(rule=="strong"){ d[i,,]<-matrix(as.numeric(d[i,,]&t(d[i,,])),nrow=n,ncol=o) } } #Return the symmetrized matrix if(m==1) out<-d[1,,] else out<-d out } eRm/R/build_W.R0000744000176000001440000000201111572663323012741 0ustar ripleyusersbuild_W <- function(X,nitems,mpoints,grp_n,groupvec,itmgrps) { if(missing(grp_n)) grp_n<- table(groupvec) if(!is.numeric(grp_n)) stop("Please specify the number of subjects per group.") if(missing(nitems))stop("Please specify the number of items.") if(any(grp_n==0)) stop("There are groups with zero sample size.") if(missing(mpoints)) stop("Please specify the number of time points. If there are none, you might want to use PCM() or LPCM().") pplgrps <- length(grp_n) #builds the LLRA design matrix from scratch categos <- get_item_cats(X,nitems,grp_n) #trend effects design tr.des <- build_trdes(nitems,mpoints,pplgrps,categos) #tretment effects design gr.des <- build_effdes(nitems,mpoints,pplgrps,categos,groupvec) #category design if(length(unique(unlist(categos)))==1&&sum(unique(unlist(categos)))==1) return(cbind(gr.des,tr.des)) ct.des <- build_catdes(nitems,mpoints,pplgrps,categos) #all together now! des <-cbind(gr.des,tr.des,ct.des) des } eRm/R/anova.llra.R0000744000176000001440000000222611572663323013421 0ustar ripleyusersanova.llra <- function(object, ...) UseMethod("anova.llra") anova.llra.default <- function(object,...) { objets <- list(object, ...) isllra <- unlist(lapply(objets, function(x) "llra" %in% class(x))) ## checks if (!all(isllra)) { objets <- objets[isllra] warning("non-LLRA-model(s) removed") } dimdata <- dim(objets[[1L]]$X) samedata <- unlist(lapply(objets, function(x) all(dim(x$X)==dimdata))) if (!all(samedata)) stop("models were not all fitted to the same size of dataset") nmodels <- length(objets) logliks <- as.numeric(lapply(objets, function(x) x$loglik)) npars <- as.numeric(lapply(objets, function(x) x$npar)) nparsS <- npars[order(npars)] logliksS <- logliks[order(npars)] lrstat <-c(NA,2*abs(diff(logliksS))) dfs <- c(NA,abs(diff(nparsS))) ps <- 1-pchisq(lrstat,dfs) tbl <- data.frame(nparsS, logliksS, dfs, lrstat, ps) dimnames(tbl) <- list(1L:nmodels, c("Npar", "logLik", "df", "-2LR","Asymp.p-Value")) title <- "Analysis of Deviance Table\n" structure(tbl, heading = title, class = c("anova", "data.frame")) } eRm/NEWS0000744000176000001440000001103111572663323011531 0ustar ripleyusersChanges in Version 0.14-0 o new (wrapper) function LLRA for fitting linear logistic models with relaxed assumptions including utilities for preparing data (llra.datprep), setting up (build_W) and modifying (collapse_W) design matrices, comparing llra models (anova) and plotting results (plotTR and plotGR) o 'exact' version of the Martin-Loef test for binary items and arbitrary splits added as method to NPtest o in plotGOF confidence ellipses can now be drawn for subsets of items, optionally using different colours o new function plotDIF (by Kathrin Gruber): plots confidence intervals for item parameters estimated separately in subgroups, uses LR objects as input o adapted the MLoef function to work with polytomous data and more than two item groups o error checks in NPtest: (i) 0/full resposes for items meaningless for NPtest, (ii) group in method="T4" must be of type logical, (iii) specifying all items for T4 gives meaningless results. o warning regarding group assignment when using median split removed from LRtest and Waldtest o some modifications in plotPWmap: horizontal plotting, different default plotting symbols, option to change size of plotting symbols o bug in MLoef fixed (now using logs in calculating the person contributions) o eRm now depends on R >= 2.12.0 o Latin1 encoding removed o bug in plotICC (always same title) fixed Changes in Version 0.13-0 o LLTM, LRSM, and LPCM work now for repeated measurement designs with treatment groups and missing values. o Rename vignette to 'eRm'. Changes in Version 0.12-2 o new function plotPWmap to plot Bond-and-Fox style pathway maps for the data by Julian Gilbey. Since calculation of the t-statistics requires calculation of the kurtosis of the standardized residuals, according changes to itemfit.ppar, personfit.ppar, pifit.internal, print.ifit, and print.pfit. o plotPImap patched by Julian Gilbey: length of item.subset did not match the documentation, warning stars did not all appear, pre-calculated person.parameter data can be passed to the function via pp, mis-ordered items can be coloured. some minor bugs fixed. o the optimizer can be changed to optim using fitctrl<-"optim" and reset to nlm (the default) with fitctrl<-"nlm" o value of LRtest now countains the list fitobj which contains the model objects according to the subgroups specified by splitcr o MLoef no longer supports missings values Changes in Version 0.12-1 o function invalid from package gtools integrated into eRm eRm no longer depends on gtools Changes in Version 0.12-0 o for RM, RSM, and PCM: eta parameters are now diplayed as difficulty parameters print and summary methods changed accordingly o new labeling of eta parameters in RM, RSM, and PCM. they now are labeled according to the estimated parameters for items (RM), items + categories (RSM), items x categories (PCM) o function MLoef for Martin-Loef-Test added o df in personfit and itemfit corrected o the logLik functions now extract the log-likelhood and df into objects of class logLik.eRm and loglik.ppar with elements loglik and df. the corresponding print methods have been modified accordingly. o function coef.ppar to extract person parameter estimates added o option for beta parameters added to coef.eRm o in confint.eRm: default parm = "beta" o minor modifications in the help file for IC() o plotPImap: revised rug added, bug concerning item.subset fixed, minor modifications to enhance readability o minor modifications in plotjointICC: allows for main title and colors, option legpos = FALSE suppresses legends, dev.new removed, legend = FALSE produced incorrect labeling o minor modifications in plotICC: allows for main title and colors, default coloring with col = NULL instead of NA for compatibility, option legpos = FALSE suppresses legends, mplot is now FALSE if only one item is specified o plot.ppar: dev.new removed o option 'visible' in print.ifit und print.pfit to allow for avoiding overly long output and for extraction of infit and outfit values (maybe changed to a coef method later) o strwrap() for NPtest print methods to break long lines o new methods IC.default and pmat.default for enhanced error messages o lazy loading package and datafiles eRm/NAMESPACE0000744000176000001440000000376111572663323012264 0ustar ripleyusersuseDynLib(eRm) import("stats", "graphics") importFrom(Matrix, bdiag) export(RM) export(LLTM) export(RSM) export(LRSM) export(PCM) export(LPCM) export(LRtest) export(MLoef) export(itemfit) export(person.parameter) export(personfit) export(plotDIF) export(plotGOF) export(plotICC) export(plotjointICC) export(plotPImap) export(plotPWmap) export(pmat) export(Waldtest) export(IC) export(thresholds) export(sim.2pl) export(sim.rasch) export(sim.xdim) export(sim.locdep) export(stepwiseIt) export(gofIRT) export(NPtest) export(llra.datprep) export(build_W) export(collapse_W) export(LLRA) export(plotGR) export(plotTR) S3method(print, eRm) S3method(summary, eRm) S3method(summary, ppar) S3method(summary, LR) S3method(summary, MLoef) S3method(model.matrix, eRm) S3method(coef, eRm) S3method(coef, ppar) S3method(vcov, eRm) S3method(print, LR) S3method(print, MLobj) S3method(print, MLoef) S3method(print, ifit) S3method(print, wald) S3method(print, pfit) S3method(print, ppar) S3method(print, step) S3method(itemfit, ppar) S3method(LRtest, Rm) S3method(person.parameter, eRm) S3method(personfit, ppar) S3method(plotGOF, LR) S3method(plotICC, Rm) S3method(plotjointICC, dRm) S3method(plot, ppar) S3method(pmat, ppar) S3method(residuals, ppar) S3method(Waldtest, Rm) S3method(logLik, eRm) S3method(logLik, ppar) S3method(print, logLik.eRm) S3method(print, logLik.ppar) S3method(print, threshold) S3method(summary, threshold) S3method(thresholds, eRm) S3method(print, ICr) S3method(confint, eRm) S3method(confint, ppar) S3method(confint, threshold) S3method(IC, ppar) S3method(stepwiseIt, eRm) S3method(gofIRT, ppar) S3method(predict, ppar) S3method(summary, gof) S3method(print, gof) S3method(print, T1obj) S3method(print, T2obj) S3method(print, T4obj) S3method(print, T7obj) S3method(print, T7aobj) S3method(print, T10obj) S3method(print, T11obj) S3method(anova, llra) S3method(print, llra) S3method(print,summary.llra) S3method(summary, llra) eRm/man/0000755000176000001440000000000011572663323011610 5ustar ripleyuserseRm/man/Waldtest.Rd0000744000176000001440000000516111572663323013672 0ustar ripleyusers\name{Waldtest} \alias{Waldtest} \alias{Waldtest.Rm} \alias{print.wald} %- Also NEED an '\alias' for EACH other topic documented here. \title{Item-Specific Wald Test} \description{Performs a Wald test on item-level by splitting subjects into subgroups. } \usage{ \method{Waldtest}{Rm}(object, splitcr = "median") \method{print}{wald}(x,...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{Object of class \code{RM}.} \item{splitcr}{Split criterion for subject raw score splitting. \code{median} uses the median as split criterion, \code{mean} performs a mean-split. Optionally \code{splitcr} can also be a dichotomous vector which assigns each person to a certain subgroup (e.g., following an external criterion). This vector can be numeric, character or a factor. } \item{x}{Object of class \code{wald}.} \item{...}{Further arguments passed to or from other methods. They are ignored in this function.} } \details{Items are eliminated if they not have the same number of categories in each subgroup. To avoid this problem, for RSM and PCM it is considered to use a random or another user-defined split. If the data set contains missing values and \code{mean} or \code{median} is specified as splitcriterion, means or medians are calculated for each missing value subgroup and consequently used for raw score splitting.} \value{ Returns an object of class \code{wald} containing: \item{coef.table}{Data frame with test statistics, z- and p-values.} \item{betapar1}{Beta parameters for first subgroup} \item{se.beta1}{Standard errors for first subgroup} \item{betapar2}{Beta parameters for second subgroup} \item{se.beta2}{Standard errors for second subgroup} \item{se.beta2}{Standard errors for second subgroup} \item{spl.gr}{Names and levels for \code{splitcr}.} \item{call}{The matched call.} } \references{ Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations, Recent Developements, and Applications. Springer. Fischer, G. H., and Scheiblechner, H. (1970). Algorithmen und Programme fuer das probabilistische Testmodell von Rasch [Algorithms and programs for Rasch's probabilistic test model]. Psychologische Beitraege, 12, 23-51. } \author{Patrick Mair, Reinhold Hatzinger} %\note{} \seealso{\code{\link{LRtest}}} \examples{ #Wald test for Rasch model with user-defined subject split data(raschdat2) res <- RM(raschdat2) splitvec <- sample(1:2,25,replace=TRUE) Waldtest(res, splitcr = splitvec) #Wald test for RSM eliminates 4 items (with median split) data(rsmdat) res <- RSM(rsmdat) Waldtest(res) } \keyword{models} eRm/man/thresholds.Rd0000744000176000001440000000465111572663323014265 0ustar ripleyusers\name{thresholds} \alias{thresholds} \alias{thresholds.eRm} \alias{print.threshold} \alias{summary.threshold} \alias{confint.threshold} %- Also NEED an '\alias' for EACH other topic documented here. \title{Computation of item-category treshold parameters.} \description{This function transforms the beta parameters into threshold parameters. These can be interpreted by means of log-odds as visualized in ICC plots. } \usage{ \method{thresholds}{eRm}(object) \method{print}{threshold}(x, ...) \method{summary}{threshold}(object, ...) \method{confint}{threshold}(object, parm, level = 0.95, ...) } \arguments{ Arguments for \code{thresholds}: \item{object}{Object of class \code{eRm}.} Arguments for \code{print}, \code{summary}, and \code{confint} methods: \item{x}{Object of class \code{threshold}.} \item{parm}{Parameter specification (ignored).} \item{level}{Alpha-level.} \item{...}{Further arguments to be passed to methods. They are ignored.} } \details{For dichotomous models (i.e., RM and LLTM) threshold parameters are not computed. The \code{print} method returns a location parameter for each item which is the mean of the corresponding threshold parameters. For LPCM and LRSM the thresholds are computed for each design matrix block (i.e., measurement point/group) separately (PCM and RSM have only 1 block).} \value{ The function \code{thresholds} returns an object of class \code{threshold} containing: \item{threshpar}{Vector with threshold parameters.} \item{se.thresh}{Vector with standard errors.} \item{threshtable}{Data frame with location and threshold parameters.} } \references{ Andrich, D. (1978). Application of a psychometric rating model to ordered categories which are scored with successive integers. Applied Psychological Measurement, 2, 581-594. } \seealso{ \code{\link{plotICC.Rm}} } \examples{ #Threshold parameterization for a rating scale model data(rsmdat) res <- RSM(rsmdat) th.res <- thresholds(res) th.res confint(th.res) summary(th.res) #Threshold parameters for a PCM with ICC plot data(pcmdat) res <- PCM(pcmdat) th.res <- thresholds(res) th.res plotICC(res) #Threshold parameters for a LPCM: #Block 1: t1, g1; Block 2: t1, g2; ...; Block 6: t2,g3 data(lpcmdat) G <- c(rep(1,7),rep(2,7),rep(3,6)) # group vector for 3 groups res <- LPCM(lpcmdat, mpoints = 2, groupvec = G) th.res <- thresholds(res) th.res } \keyword{models} eRm/man/summary.llra.Rd0000744000176000001440000000370211572663323014530 0ustar ripleyusers\name{summary.llra} \alias{summary.llra} \alias{print.summary.llra} \title{Summarizing Linear Logistic Models with Relaxed Assumptions (LLRA) } \description{ \code{summary} method for class \code{"llra"} } \usage{ \method{summary}{llra}(object, gamma, ...) \method{print}{summary.llra}(x, ...) } \arguments{ \item{object}{an object of class "llra", typically result of a call to \code{\link{LLRA}}. } \item{x}{an object of class "summary.llra", usually, a result of a call to \code{summary.llra}. } \item{gamma}{The level of confidence for the confidence intervals. Default is 0.95.} \item{\dots}{further arguments passed to or from other methods. } } \details{ Objects of class \code{"summary.llra"} contain all parameters of interest plus the confidence intervals. \code{print.summary.llra} rounds the values to 3 digits and displays them nicely. } \value{ The function \code{summary.lllra} computes and returns a list of summary statistics of the fitted LLRA given in object, reusing the components (list elements) \code{call}, \code{etapar}, \code{iter}, \code{loglik}, \code{model}, \code{npar} and \code{se.etapar} from its argument, plus \item{ci}{The upper and lower confidence interval borders.} } \author{ Thomas Rusch } \seealso{ The model fitting function \code{\link{LLRA}}. } \examples{ ##Example 6 from Hatzinger & Rusch (2009) data("llradat3") groups <- c(rep("TG",30),rep("CG",30)) llra1 <- LLRA(llradat3,mpoints=2,groups=groups) summary(llra1) ##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4 ##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6 ##categories respectively. \dontrun{ data("llraDat2") ex2 <- LLRA(llraDat2[1:20],mpoints=4,llraDat2[21]) sumEx2 <- summary(ex2, gamma=0.95) #print the summary sumEx2 #get confidence intervals sumEx2$ci } } eRm/man/stepwiseIt.Rd0000744000176000001440000000520511572663323014242 0ustar ripleyusers\name{stepwiseIt} \alias{stepwiseIt} \alias{stepwiseIt.eRm} \alias{print.step} %- Also NEED an '\alias' for EACH other topic documented here. \title{Stepwise item elimination} \description{This function eliminates items stepwise according to one of the following criteria: itemfit, Wald test, Andersen's LR-test } \usage{ \method{stepwiseIt}{eRm}(object, criterion = list("itemfit"), alpha = 0.05, verbose = TRUE, maxstep = NA) } \arguments{ \item{object}{Object of class \code{eRm}.} \item{criterion}{List with either \code{"itemfit"}, \code{"Waldtest"} or \code{"LRtest"} as first element. Optionally, for the Waldtest and LRtest a second element containing the split criterion can be specified (see details).} \item{alpha}{Significance level.} \item{verbose}{If \code{TRUE} intermediate results are printed out. } \item{maxstep}{Maximum number of elimination steps. If \code{NA} the procedure stops when the itemset is Rasch homogeneous.} } \details{If \code{criterion = list("itemfit")} the elimination stops when none of the p-values in itemfit is significant. Within each step the item with the largest chi-squared itemfit value is excluded. If \code{criterion = list("Waldtest")} the elimination stops when none of the p-values resulting from the Wald test is significant. Within each step the item with the largest z-value in Wald test is excluded. If \code{criterion = list("LRtest")} the elimination stops when Andersen's LR-test is not significant. Within each step the item with the largest z-value in Wald test is excluded. } \value{ The function returns an object of class \code{step} containing: \item{X}{Reduced data matrix (bad items eliminated)} \item{fit}{Object of class \code{eRm} with the final item parameter elimination} \item{it.elim}{Vector contaning the names of the eliminated items} \item{res.wald}{Elimination results for Wald test criterion} \item{res.itemfit}{Elimination results for itemfit criterion} \item{res.LR}{Elimination results for LR-test criterion} \item{nsteps}{Number of elimination steps} } \seealso{ \code{\link{LRtest.Rm}}, \code{\link{Waldtest.Rm}}, \code{\link{itemfit.ppar}} } \examples{ ## 2pl-data, 100 persons, 10 items set.seed(123) X <- sim.2pl(500, 10, 0.4) res <- RM(X) ## elimination according to itemfit stepwiseIt(res, criterion = list("itemfit")) ## Wald test based on mean splitting stepwiseIt(res, criterion = list("Waldtest","mean")) ## Andersen LR-test based on random split set.seed(123) groupvec <- sample(1:3, 500, replace = TRUE) stepwiseIt(res, criterion = list("LRtest",groupvec)) } \keyword{models} eRm/man/sim.xdim.Rd0000744000176000001440000000620611572663323013634 0ustar ripleyusers\name{sim.xdim} \alias{sim.xdim} \title{Simulation of multidimensional binary data} \description{This utility function simulates a 0-1 matrix violating the unidimensionality assumption in the Rasch model. } \usage{ sim.xdim(persons, items, Sigma, weightmat, seed = NULL, cutpoint = "randomized") } \arguments{ \item{persons}{Either a matrix (each column corresponds to a dimension) of person parameters or an integer indicating the number of persons (see details).} \item{items}{Either a vector of item parameters or an integer indicating the number of items (see details).} \item{Sigma}{A positive-definite symmetric matrix specifying the covariance matrix of the variables.} \item{weightmat}{Matrix for item-weights for each dimension (columns).} \item{seed}{A seed for the random number generated can be set.} \item{cutpoint}{Either \code{"randomized"} for a randomized tranformation of the model probability matrix into the model 0-1 matrix or an integer value between 0 and 1 (see details).} } \details{If \code{persons} is specified as matrix, \code{Sigma} is ignored. If \code{items} is an integer value, the corresponding parameter vector is drawn from N(0,1). The \code{cutpoint} argument refers to the transformation of the theoretical probabilities into a 0-1 data matrix. A randomized assingment implies that for each cell an additional random number is drawn. If the model probability is larger than this value, the person gets 1 on this particular item, if smaller, 0 is assigned. Alternatively, a numeric probability cutpoint can be assigned and the 0-1 scoring is carried out according to the same rule. If \code{weightmat} is not specified, a random indicator matrix is generated where each item is a measurement of only one dimension. For instance, the first row for a 3D-model could be (0,1,0) which means that the first item measures the second dimension only. This corresponds to the between-item multidimensional model presented by Adams et al. (1997). \code{Sigma} reflects the VC-structure for the person parameters drawn from a multivariate standard normal distribution. Thus, the diagonal elements are typically 1 and the lower the covariances in the off-diagonal, the stronger the model violation. } \references{ Adams, R. J., Wilson, M., & Wang, W. C. (1997). The multidimensional random coefficients multinomial logit model. Applied Psychological Measurement, 21, 1-23. Glas, C. A. W. (1992). A Rasch model with a multivariate distribution of ability. In M. Wilson (Ed.), Objective Measurement: Foundations, Recent Developments, and Applications (pp. 236-258). Norwood, NJ: Ablex. } \seealso{\code{\link{sim.rasch}}, \code{\link{sim.locdep}}, \code{\link{sim.2pl}}} \examples{ # 500 persons, 10 items, 3 dimensions, random weights. Sigma <- matrix(c(1, 0.01, 0.01, 0.01, 1, 0.01, 0.01, 0.01, 1), 3) X <- sim.xdim(500, 10, Sigma) #500 persons, 10 items, 2 dimensions, weights fixed to 0.5 itemvec <- runif(10, -2, 2) Sigma <- matrix(c(1, 0.05, 0.05, 1), 2) weights <- matrix(0.5, ncol = 2, nrow = 10) X <- sim.xdim(500, itemvec, Sigma, weightmat = weights) } \keyword{models} eRm/man/sim.rasch.Rd0000744000176000001440000000360311572663323013771 0ustar ripleyusers\name{sim.rasch} \alias{sim.rasch} \title{Simulation of Rasch homogeneous data} \description{This utility function returns a 0-1 matrix which fits the Rasch model. } \usage{ sim.rasch(persons, items, seed = NULL, cutpoint = "randomized") } \arguments{ \item{persons}{Either a vector of person parameters or an integer indicating the number of persons (see details)} \item{items}{Either a vector of item parameters or an integer indicating the number of items (see details)} \item{seed}{A seed for the random number generated can be set.} \item{cutpoint}{Either \code{"randomized"} for a randomized tranformation of the model probability matrix into the model 0-1 matrix or an integer value between 0 and 1 (see details)} } \details{If \code{persons} or \code{items} is an integer value, the corresponding parameter vector is drawn from N(0,1). The \code{cutpoint} argument refers to the transformation of the theoretical probabilities into a 0-1 data matrix. A randomized assingment implies that for each cell an additional random number is drawn. If the model probability is larger than this value, the person gets 1 on this particular item, if smaller, 0 is assigned. Alternatively, a numeric probability cutpoint can be assigned and the 0-1 scoring is carried out according to the same rule. } \references{ Su\'arez-Falc\'on, J. C., & Glas, C. A. W. (2003). Evaluation of global testing procedures for item fit to the Rasch model. British Journal of Mathematical and Statistical Society, 56, 127-143. } \seealso{\code{\link{sim.xdim}}, \code{\link{sim.locdep}}, \code{\link{sim.2pl}}} \examples{ #simulating Rasch homogenous data #100 persons, 10 items, parameter drawn from N(0,1) X <- sim.rasch(100, 10) #person parameters drawn from uniform distribution, fixed cutpoint ppar <- runif(100,-2,2) X <- sim.rasch(ppar, 10, cutpoint = 0.5) } \keyword{models} eRm/man/sim.locdep.Rd0000744000176000001440000000474611572663323014150 0ustar ripleyusers\name{sim.locdep} \alias{sim.locdep} \title{Simulation locally dependent items} \description{This utility function returns a 0-1 matrix violating the local independence assumption. } \usage{ sim.locdep(persons, items, it.cor = 0.25, seed = NULL, cutpoint = "randomized") } \arguments{ \item{persons}{Either a vector of person parameters or an integer indicating the number of persons (see details).} \item{items}{Either a vector of item parameters or an integer indicating the number of items (see details).} \item{it.cor}{Either a single correlation value between 0 and 1 or a positive semi-definite VC matrix.} \item{seed}{A seed for the random number generated can be set.} \item{cutpoint}{Either \code{"randomized"} for a randomized tranformation of the model probability matrix into the model 0-1 matrix or an integer value between 0 and 1 (see details).} } \details{If \code{persons} or \code{items} is an integer value, the corresponding parameter vector is drawn from N(0,1). The \code{cutpoint} argument refers to the transformation of the theoretical probabilities into a 0-1 data matrix. A randomized assingment implies that for each cell an additional random number is drawn. If the model probability is larger than this value, the person gets 1 on this particular item, if smaller, 0 is assigned. Alternatively, a numeric probability cutpoint can be assigned and the 0-1 scoring is carried out according to the same rule. The argument \code{it.cor} reflects the pair-wise inter-item correlation. If this should be constant across the items, a single value between 0 (i.e. Rasch model) and 1 (strong violation) can be specified. Alternatively, a symmetric VC-matrix of dimension number of items can be defined. } \references{ Jannarone, R. J. (1986). Conjunctive item response theory kernels. Psychometrika, 51, 357-373. Su\'arez-Falc\'on, J. C., & Glas, C. A. W. (2003). Evaluation of global testing procedures for item fit to the Rasch model. British Journal of Mathematical and Statistical Society, 56, 127-143. } \seealso{\code{\link{sim.rasch}}, \code{\link{sim.2pl}}, \code{\link{sim.xdim}}} \examples{ #simulating locally-dependent data #500 persons, 10 items, inter-item correlation of 0.5 X <- sim.locdep(500, 10, it.cor = 0.5) #500 persons, 4 items, correlation matrix specified sigma <- matrix(c(1,0.2,0.2,0.3,0.2,1,0.4,0.1,0.2,0.4,1,0.8,0.3,0.1,0.8,1), ncol = 4) X <- sim.locdep(500, 4, it.cor = sigma) } \keyword{models} eRm/man/sim.2pl.Rd0000744000176000001440000000521311572663323013365 0ustar ripleyusers\name{sim.2pl} \alias{sim.2pl} \title{Simulation of 2-pl data} \description{This utility function returns a 0-1 matrix violating the parallel ICC assumption in the Rasch model. } \usage{ sim.2pl(persons, items, discrim = 0.25, seed = NULL, cutpoint = "randomized") } \arguments{ \item{persons}{Either a vector of person parameters or an integer indicating the number of persons (see details).} \item{items}{Either a vector of item parameters or an integer indicating the number of items (see details).} \item{discrim}{Standard deviation on the log scale.} \item{seed}{A seed for the random number generated can be set.} \item{cutpoint}{Either \code{"randomized"} for a randomized tranformation of the model probability matrix into the model 0-1 matrix or an integer value between 0 and 1 (see details).} } \details{If \code{persons} and/or \code{items} (using single integers) are specified to determine the number of subjects or items, the corresponding parameter vector is drawn from N(0,1). The \code{cutpoint} argument refers to the transformation of the theoretical probabilities into a 0-1 data matrix. A randomized assingment implies that for each cell an additional random number is drawn. If the model probability is larger than this value, the person gets 1 on this particular item, if smaller, 0 is assigned. Alternatively, a numeric probability cutpoint can be assigned and the 0-1 scoring is carried out according to the same rule. The \code{discrim} argument can be specified either as a vector of length \code{items} defining the item discrimination parameters in the 2-PL (e.g., \code{c(1,1,0.5,1,1.5)}), or as a single value. In that case, the discrimination parameters are drawn from a lognormal distribution with \code{meanlog = 0}, where the specified value in \code{discrim} refers to the standard deviation on the log-scale. The larger the values, the stronger the degree of Rasch violation. Reasonable values are up to 0.5. If 0, the data are Rasch homogeneous. } \references{ Su\'arez-Falc\'on, J. C., & Glas, C. A. W. (2003). Evaluation of global testing procedures for item fit to the Rasch model. British Journal of Mathematical and Statistical Society, 56, 127-143. } \seealso{\code{\link{sim.rasch}}, \code{\link{sim.locdep}}, \code{\link{sim.xdim}}} \examples{ #simulating 2-PL data #500 persons, 10 items, sdlog = 0.30, randomized cutpoint X <- sim.2pl(500, 10, discrim = 0.30) #item and discrimination parameters from uniform distribution, #cutpoint fixed dpar <- runif(50, 0, 2) ipar <- runif(50, -1.5, 1.5) X <- sim.2pl(500, ipar, dpar, cutpoint = 0.5) } \keyword{models} eRm/man/RSM.Rd0000744000176000001440000000571411572663323012550 0ustar ripleyusers\name{RSM} \alias{RSM} %- Also NEED an '\alias' for EACH other topic documented here. \title{Estimation of rating scale models} \description{ This function computes the parameter estimates of a rating scale model for polytomous item responses by using CML estimation. } \usage{ RSM(X, W, se = TRUE, sum0 = TRUE, etaStart) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{X}{Input data matrix or data frame with item responses (starting from 0); rows represent individuals, columns represent items. Missing values are inserted as \code{NA}.} \item{W}{Design matrix for the RSM. If omitted, the function will compute W automatically.} \item{se}{If \code{TRUE}, the standard errors are computed.} \item{sum0}{If \code{TRUE}, the parameters are normed to sum-0 by specifying an appropriate \code{W}. If \code{FALSE}, the first parameter is restricted to 0.} \item{etaStart}{A vector of starting values for the eta parameters can be specified. If missing, the 0-vector is used.} } \details{ The design matrix approach transforms the RSM into a partial credit model and estimates the corresponding basic parameters by using CML. Available methods for RSM-objects are \code{print}, \code{coef}, \code{model.matrix}, \code{vcov}, \code{summary}, \code{logLik}, \code{person.parameters}, \code{plotICC}, \code{LRtest}. } \value{ Returns an object of class \code{Rm, eRm} and contains the log-likelihood value, the parameter estimates and their standard errors. \item{loglik}{Conditional log-likelihood.} \item{iter}{Number of iterations.} \item{npar}{Number of parameters.} \item{convergence}{See \code{code} output in \code{\link{nlm}}.} \item{etapar}{Estimated basic item difficulty parameters (item and category parameters).} \item{se.eta}{Standard errors of the estimated basic item parameters.} \item{betapar}{Estimated item-category (easiness) parameters.} \item{se.beta}{Standard errors of item parameters.} \item{hessian}{Hessian matrix if \code{se = TRUE}.} \item{W}{Design matrix.} \item{X}{Data matrix.} \item{X01}{Dichotomized data matrix.} \item{call}{The matched call.} } \references{ Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations, Recent Developements, and Applications. Springer. Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20. Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43. } \author{Patrick Mair, Reinhold Hatzinger} %\note{} \seealso{\code{\link{RM}},\code{\link{PCM}},\code{\link{LRtest}} } \examples{ ##RSM with 10 subjects, 3 items data(rsmdat) res <- RSM(rsmdat) res summary(res) #eta and beta parameters with CI thresholds(res) #threshold parameters } \keyword{models} eRm/man/RM.Rd0000744000176000001440000000614111572663323012420 0ustar ripleyusers\name{RM} \alias{RM} %- Also NEED an '\alias' for EACH other topic documented here. \title{Estimation of Rasch Models} \description{ This function computes the parameter estimates of a Rasch model for binary item responses by using CML estimation. } \usage{ RM(X, W, se = TRUE, sum0 = TRUE, etaStart) } \arguments{ \item{X}{Input 0/1 data matrix or data frame; rows represent individuals, columns represent items. Missing values are inserted as \code{NA}.} \item{W}{Design matrix for the Rasch model. If omitted, the function will compute W automatically.} \item{se}{If \code{TRUE}, the standard errors are computed.} \item{sum0}{If \code{TRUE}, the parameters are normed to sum-0 by specifying an appropriate \code{W}. If \code{FALSE}, the first parameter is restricted to 0.} \item{etaStart}{A vector of starting values for the eta parameters can be specified. If missing, the 0-vector is used.} } \details{ For estimating the item parameters the CML method is used. Available methods for RM-objects are:\cr \code{print}, \code{coef}, \code{model.matrix}, \code{vcov}, \code{summary}, \code{logLik}, \code{person.parameter}, \code{LRtest}, \code{Waldtest}, \code{plotICC}, \code{plotjointICC}. } \value{ Returns an object of class \code{dRm, Rm, eRm} and contains the log-likelihood value, the parameter estimates and their standard errors. \item{loglik}{Conditional log-likelihood.} \item{iter}{Number of iterations.} \item{npar}{Number of parameters.} \item{convergence}{See \code{code} output in \code{\link{nlm}}.} \item{etapar}{Estimated basic item difficulty parameters.} \item{se.eta}{Standard errors of the estimated basic item parameters.} \item{betapar}{Estimated item (easiness) parameters.} \item{se.beta}{Standard errors of item parameters.} \item{hessian}{Hessian matrix if \code{se = TRUE}.} \item{W}{Design matrix.} \item{X}{Data matrix.} \item{X01}{Dichotomized data matrix.} \item{call}{The matched call.} } \references{ Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations, Recent Developements, and Applications. Springer. Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20. Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43. } \author{Patrick Mair, Reinhold Hatzinger} %\note{} \seealso{\code{\link{RSM}},\code{\link{PCM}}, \code{\link{LRtest}}, \code{\link{Waldtest}} } \examples{ # Rasch model with beta.1 restricted to 0 data(raschdat1) res <- RM(raschdat1, sum0 = FALSE) print(res) summary(res) res$W #generated design matrix # Rasch model with sum-0 beta restriction; no standard errors computed res <- RM(raschdat1, se = FALSE, sum0 = TRUE) print(res) summary(res) res$W #generated design matrix #Rasch model with missing values data(raschdat2) res <- RM(raschdat2) print(res) summary(res) } \keyword{models} eRm/man/raschdat.Rd0000744000176000001440000000115011572663323013666 0ustar ripleyusers\name{raschdat1} \alias{raschdat1} \alias{raschdat2} \alias{lltmdat1} \alias{lltmdat2} \alias{pcmdat} \alias{pcmdat2} \alias{lpcmdat} \alias{rsmdat} \alias{lrsmdat} \docType{data} \title{Data for Computing Extended Rasch Models} \description{Artificial data sets for computing extended Rasch models. } \usage{data(raschdat1)} \format{Numeric matrices with subjects as rows, items as columns, missing values as \code{NA}. } \examples{ data(raschdat1) data(raschdat2) data(lltmdat1) data(lltmdat2) data(pcmdat) data(pcmdat2) data(lpcmdat) data(rsmdat) data(lrsmdat) } \keyword{datasets} eRm/man/print.eRm.Rd0000744000176000001440000000427311572663323013764 0ustar ripleyusers\name{print.eRm} \alias{print.eRm} \alias{summary.eRm} \alias{vcov.eRm} \alias{model.matrix.eRm} \alias{coef.eRm} \alias{logLik.eRm} \alias{confint.eRm} %- Also NEED an '\alias' for EACH other topic documented here. \title{Methods for extended Rasch models} \description{Several methods for objects of class \code{eRm}.} \usage{ \method{print}{eRm}(x, ...) \method{summary}{eRm}(object, ...) \method{coef}{eRm}(object, parm="beta", ...) \method{model.matrix}{eRm}(object, ...) \method{vcov}{eRm}(object, ...) \method{logLik}{eRm}(object, ...) \method{confint}{eRm}(object, parm = "beta", level = 0.95, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{Object of class \code{eRm}.} \item{object}{Object of class \code{eRm}.} \item{parm}{Either \code{"eta"} or \code{"beta"}.} \item{level}{Alpha-level.} \item{...}{Further arguments to be passed to or from other methods. They are ignored in this function.} } \details{ The \code{print} method displays the value of the log-likelihood, parameter estimates (basic parameters eta) and their standard errors. For RM, RSM, and PCM models, the etas are difficulty parameters, for the LLTM, LRSM, LPCM the sign of the parameters depend on the design matrix and are easiness effects by default. The \code{summary} method additionally gives the full set of item parameters beta as easiness parameters for all models. Print methods are also available for the functions \code{logLik} and \code{confint} (see below). } \value{ The methods below are extractor functions and return various quantities: \code{vcov} returns the variance-covariance matrix of the parameter estimates, \code{coef} a vector of estimates of the eta or beta basic parameters, \code{model.matrix} the design matrix, \code{logLik} an object with elements \code{loglik} and \code{df} containing the log-likelihood value and df. \code{confint} a matrix of confidence interval values for eta or beta. } \author{Patrick Mair, Reinhold Hatzinger} \examples{ data(raschdat1) res <- RM(raschdat1) print(res) summary(res) coef(res) vcov(res) model.matrix(res) logLik(res) } \keyword{models} eRm/man/predict.ppar.Rd0000744000176000001440000000226211572663323014475 0ustar ripleyusers\name{predict.ppar} \alias{predict.ppar} \title{Predict methods} \description{Returns data matrix based on model probabilites. So far implemented for dichotomous models only.} \usage{ \method{predict}{ppar}(object, cutpoint = "randomized", ...) } \arguments{ \item{object}{Object of class \code{ppar} (from \code{person.parameter()}).} \item{cutpoint}{Either single integer value between 0 and 1 or \code{"randomized"} for randomized 0-1 assignment (see details)} \item{...}{Additional arguments ignored} } \details{ A randomized assignment implies that for each cell an additional random number is drawn. If the model probability is larger than this value, the person gets 1 on this particular item, if smaller, 0 is assigned. Alternatively, a numeric probability cutpoint can be assigned and the 0-1 scoring is carried out according to the same rule. } \value{ Returns data matrix based on model probabilities } \author{Patrick Mair, Reinhold Hatzinger} %\note{} \seealso{ \code{\link{gofIRT.ppar}} } \examples{ #Model-based data matrix for RSM data(raschdat2) res <- RM(raschdat2) pres <- person.parameter(res) predict(pres) } \keyword{models} eRm/man/plotTR.Rd0000744000176000001440000000255411572663323013332 0ustar ripleyusers\name{plotTR} \alias{plotTR} \title{Plot Trend Effects for LLRA } \description{ Plots trend effects over time. } \usage{ plotTR(object, ...) } \arguments{ \item{object}{an object of class \code{"llra"} } \item{\dots}{Additional parameters to be passed to and from other methods} } \details{ The plot is a lattice plot with one panel. The effects for each items are plotted over the different time points. Please note that all effects are to be interpreted relative to the baseline (i.e. t1). Currently, this function only works for a full item x treatment x timepoints LLRA. Collapsed effects will not be displayed properly. } \author{ Thomas Rusch } \seealso{ The plot method for treatment effects \code{"plotGR"}. } \section{Warning:}{ Objects of class \code{"llra"} that contain estimates from a collapsed data matrix will not be displayed correctly. } \examples{ ##Example 6 from Hatzinger & Rusch (2009) data("llradat3") groups <- c(rep("TG",30),rep("CG",30)) llra1 <- LLRA(llradat3,mpoints=2,groups=groups) summary(llra1) plotTR(llra1) ##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4 ##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6 ##categories respectively. \dontrun{ data("llraDat2") ex2 <- LLRA(llraDat2[1:20],mpoints=4,groups=llraDat2[21]) plotTR(ex2) } } eRm/man/plotPWmap.Rd0000744000176000001440000001177011572663323014031 0ustar ripleyusers\name{plotPWmap} \alias{plotPWmap} \title{Pathway Map} \description{ A Bond-and-Fox Pathway Map displays the location of each item or each person against its infit t-statistic. Pathway maps are useful for identifying misfitting items or misfitting persons. Items or people should ideally have a infit t-statistic lying between about -2 and +2, and these values are marked. } \usage{ plotPWmap(object, pmap = FALSE, imap=TRUE, item.subset = "all", person.subset = "all", mainitem = "Item Map", mainperson = "Person Map", mainboth="Item/Person Map", latdim = "Latent Dimension", tlab = "Infit t statistic", pp = NULL, cex.gen = 0.6, cex.pch=1, person.pch = 1, item.pch = 16, personCI = NULL, itemCI = NULL, horiz=FALSE) } \arguments{ \item{object}{Object of class \code{Rm} or \code{dRm}} \item{pmap}{Plot a person map if \code{TRUE}; the default is \code{FALSE}.} \item{imap}{Plot an item map if \code{TRUE} (the default); do not plot if \code{FALSE}. At least one of \code{pmap} and \code{imap} must be \code{TRUE}.} \item{item.subset}{Subset of items to be plotted for an item map. Either a numeric vector indicating the item numbers or a character vector indicating the item names. If \code{"all"}, all items are plotted. The number of items to be plotted must be > 1.} \item{person.subset}{Subset of persons to be plotted for a person map. Either a numeric vector indicating the person numbers or a character vector indicating the person names. If \code{"all"}, all persons are plotted. The number of persons to be plotted must be > 1.} \item{mainitem}{Main title of an item plot.} \item{mainperson}{Main title of a person plot.} \item{mainboth}{Main title of a person/item joint plot.} \item{latdim}{Label of the y-axis, i.e., the latent dimension.} \item{tlab}{Label of the x-axis, i.e., the t-statistic dimension.} \item{pp}{If non-\code{NULL}, this contains the \code{person.parameter} data of the data object, avoiding the need to recalculate it.} \item{cex.gen}{\code{cex} as a graphical parameter specifies a numerical value giving the amount by which plotting text and symbols should be magnified relative to the default. Here \code{cex.gen} applies to all text labels. The default is 0.6.} \item{cex.pch}{applies to all plotting symbols. The default is 1.} \item{person.pch, item.pch}{Specifies the symbol used for plotting person data and item data respectively; the defaults are 1 and 16 respectively. See \code{\link{points}} for more information about \code{pch} values.} \item{personCI, itemCI}{Plotting confidence intervals for the the person abilities and item difficulties. If \code{personCI=NULL} (the default) no confidence intervals are drawn for person abilities. Otherwise, specifying \code{personCI} draws approximate confidence intervals for each person's ability. \code{personCI} must be specified as a list, and the optional elements of this list are \code{gamma}, the confidence level, \code{col}, colour, and \code{lty}, line type. If \code{personCI} is specified as an empty list, or not all of the list items are specified, the default values \code{personCI=list(gamma=0.95,col="orange",lty="dotted")} will be used. The same goes for \code{itemCI}, except that the default settings are \code{itemCI=list(gamma=0.95,col="red",lty="dotted")}.} \item{horiz}{if \code{TRUE}, the plot is horizontal, i.e., the latent dimension is on the x-axis. The default is \code{FALSE}.} } \details{ This code uses vertical(horizontal) error bars rather than circles or boxes to indicate standard errors. It also offers the possibility of plotting item or person data on its own; this can considerably simplify the reading of the plots for large datasets. } %\value{} \references{ Bond T.G., Fox C.M. (2007) \emph{Applying the Rasch Model: Fundamental Measurement in the Human Sciences} (2nd ed.) chapter 3, Lawrence Erlbaum Associates, Inc. Linacre J.M., Wright B.D. (1994) Dichotomous Infit and Outfit Mean-Square Fit Statistics / Chi-Square Fit Statistics. \emph{Rasch Measurement Transactions} \bold{8:2} p. 350, \url{http://www.rasch.org/rmt/rmt82a.htm} Linacre J.M. (2002) What do Infit and Outfit, Mean-square and Standardized mean? \emph{Rasch Measurement Transactions} \bold{16:2} p. 878, \url{http://www.rasch.org/rmt/rmt162f.htm} Wright B.D., Masters G.N. (1990) Computation of OUTFIT and INFIT Statistics. \emph{Rasch Measurement Transactions} \bold{3:4} p. 84--85, \url{http://www.rasch.org/rmt/rmt34e.htm} } \author{Julian Gilbey} %\note{} %\seealso{} \examples{ data(pcmdat) res<-PCM(pcmdat) pparm<-person.parameter(res) plotPWmap(res, pp=pparm) plotPWmap(res, pp=pparm, pmap=TRUE) } \keyword{models} eRm/man/plotPImap.Rd0000744000176000001440000000613711572663323014014 0ustar ripleyusers\name{plotPImap} \alias{plotPImap} %- Also NEED an '\alias' for EACH other topic documented here. \title{Person-Item Map} \description{ A person-item map displays the location of item (and threshold) parameters as well as the distribution of person parameters.along the latent dimension. Person-item maps are useful to compare the range and position of the item measure distribution (lower panel) to the range and position of the person measure distribution (upper panel). Items should ideally be located along the whole scale to meaningfully measure the `ability' of all persons. } \usage{ plotPImap(object, item.subset = "all", sorted = FALSE, main = "Person-Item Map", latdim = "Latent Dimension", pplabel = "Person\nParameter\nDistribution", cex.gen = 0.7, xrange = NULL, warn.ord = TRUE, warn.ord.colour = "black", irug = TRUE, pp = NULL) } \arguments{ \item{object}{Object of class \code{Rm} or \code{dRm}} \item{item.subset}{Subset of items to be plotted. Either a numeric vector indicating the column in \code{X} or a character vector indicating the column name. If \code{"all"}, all items are plotted. The number of items to be plotted must be > 1.} \item{sorted}{ If \code{TRUE}, the items are sorted in increasing order according to their location on the latent dimension.} \item{main}{Main title of the plot.} \item{latdim}{Label of the x-axis, i.e., the latent dimension.} \item{pplabel}{Title for the upper panel displaying the person parameter distribution} \item{cex.gen}{\code{cex} as a graphical parameter specifies a numerical value giving the amount by which plotting text and symbols should be magnified relative to the default. Here \code{cex.gen} applies to all text labels. The default is 0.7.} \item{xrange}{Range for the x-axis} \item{warn.ord}{If \code{TRUE} (the default) asterisks are displayed in the right margin of the lower panel to indicate nonordinal threshold locations for polytomous items.} \item{warn.ord.colour}{Nonordinal threshold locations for polytomous items are coloured with this colour to make them more visible. This is especially useful when there are many items so that the plot is quite dense. The default is \code{"black"}, so that there is no distinction made.} \item{irug}{If \code{TRUE} (the default), all thresholds are plotted below the person distribution to indicate where the included items are most informative.} \item{pp}{If non-\code{NULL}, this contains the \code{person.parameter} data of the data object, avoiding the need to recalculate it.} } \details{ Item locations are displayed with bullets, threshold locations with circles. } %\value{} \references{Bond, T.G., and Fox Ch.M. (2007) Applying the Rasch Model. Fundamental Measurement in the Human Sciences. 2nd Edition. Lawrence Erlbaum Associates. } \author{Patrick Mair, Reinhold Hatzinger, patches from Julian Gilbey and Marco Maier} %\note{} %\seealso{} \examples{ data(pcmdat) res<-PCM(pcmdat) plotPImap(res, sorted=TRUE) } \keyword{models} eRm/man/plotICC.Rd0000744000176000001440000001435211572663323013402 0ustar ripleyusers\name{plotICC} \alias{plotICC} \alias{plotICC.Rm} \alias{plotjointICC} \alias{plotjointICC.dRm} \title{ICC Plots} \description{Plot functions for visualizing the item characteristic curves} \usage{ \method{plotICC}{Rm}(object, item.subset = "all", empICC = NULL, empCI = NULL, mplot = NULL, xlim = c(-4, 4), ylim = c(0, 1), xlab = "Latent Dimension", ylab = "Probability to Solve", main=NULL, col = NULL, lty = 1, legpos = "left", ask = TRUE, ...) \method{plotjointICC}{dRm}(object, item.subset = "all", legend = TRUE, xlim = c(-4, 4), ylim = c(0, 1), xlab = "Latent Dimension", ylab = "Probability to Solve", lty = 1, legpos = "left", main="ICC plot",col=NULL,...) } \arguments{ \item{object}{object of class \code{Rm} or \code{dRm}} \item{item.subset}{Subset of items to be plotted. Either a numeric vector indicating the column in \code{X} or a character vector indiciating the column name. If \code{"all"} (default), all items are plotted.} \item{empICC}{Plotting the empirical ICCs for objects of class \code{dRm}. If \code{empICC=NULL} (the default) the empirical ICC is not drawn. Otherwise, \code{empICC} must be specified as a list where the first element must be one of \code{"raw"}, \code{"loess"}, \code{"tukey"}, \code{"kernel"}. The other optional elements are \code{smooth} (numeric), \code{type} (line type for empirical ICCs, useful values are \code{"p"} (default), \code{"l"}, and \code{"b"}, see graphics parameter \code{type} in \code{\link{plot.default}}), \code{pch}, \code{col}, and \code{lty}, plotting `character', colour and linetype (see \code{\link{par}}). See details and examples below. } \item{empCI}{Plotting confidence intervals for the the empirical ICCs. If \code{empCI=NULL} (the default) no confidence intervals are drawn. Otherwise, by specifying \code{empCI} as a list gives `exact' confidence intervals for each point of the empirical ICC. The optional elements of this list are \code{gamma}, the confidence level, \code{col}, colour, and \code{lty}, line type. If \code{empCI} is specified as an empty list, the default values \code{empCI=list(gamma=0.95,col="red",lty="dotted")} will be used. } \item{mplot}{if \code{NULL} the default setting is in effect. For models of class \code{dRm} this is \code{mplot = TRUE}, i.e., the ICCs for up to 4 items are plotted in one figure. For \code{Rm} models the default is \code{FALSE} (each item in one figure) but may be set to \code{TRUE}. } \item{xlab}{Label of the x-axis.} \item{ylab}{Label of the y-axis.} \item{xlim}{Range of person parameters.} \item{ylim}{Range for probability to solve.} \item{legend}{If \code{TRUE}, legend is provided, otherwise the ICCs are labeled.} \item{col}{If not specified or \code{NULL}, line colors are determined automatically. Otherwise, a scalar or vector with appropriate color specifications may be supplied (see \code{\link{par}}).} \item{lty}{Line type.} \item{main}{Title of the plot.} \item{legpos}{Position of the legend with possible values \code{"bottomright"}, \code{"bottom"}, \code{"bottomleft"}, \code{"left"}, \code{"topleft"}, \code{"top"}, \code{"topright"}, \code{"right"} and \code{"center"}. If \code{FALSE} no legend is displayed.} \item{ask}{If \code{TRUE} (the default) and the \code{R} session is interactive the user is asked for input, before a new figure is drawn. \code{FALSE} is only useful if automated figure export is in effect, e.g., when using \code{\link{Sweave}}.} \item{\ldots}{Additional plot parameters.} } \details{Empirical ICCs for objects of class \code{dRm} can be plotted using the option \code{empICC}, a list where the first element specifies the type of calculation of the empirical values. If \code{empICC=list("raw", other specifications)} relative frequencies of the positive responses are calculated for each rawscore group and plotted at the position of the corresponding person parameter. The other options use the default versions of various smoothers: \code{"tukey"} (see \code{\link{smooth}}), \code{"loess"} (see \code{\link{loess}}), and \code{"kernel"} (see \code{\link{ksmooth}}). For \code{"loess"} and \code{"kernel"} a further element, \code{smooth}, may be specified to control the span (default is 0.75) or the bandwith (default is 0.5), respectively. For example, the specification could be \code{empirical = list("loess", smooth=0.9)} or \code{empirical = list("kernel",smooth=2)}. Higher values result in smoother estimates of the empirical ICCs. The optional confidence intervals are obtained by a procedure first given in Clopper and Pearson (1934) based on the beta distribution (see \code{\link{binom.test}}). } \note{For most of the plot options see \code{\link{plot}} and \code{\link{par}}.} %\value{} %\references{} \author{Patrick Mair, Reinhold Hatzinger} %\note{} \seealso{\code{\link{plotGOF}}} \examples{ # Rating scale model, ICC plot for all items data(rsmdat) rsm.res <- RSM(rsmdat) thresholds(rsm.res) plotICC(rsm.res) # now items 1 to 4 in one figure without legends plotICC(rsm.res, item.subset = 1:4, mplot = TRUE, legpos = FALSE) # Rasch model for items 1 to 8 from raschdat1 # empirical ICCs displaying relative frequencies (default settings) data(raschdat1) rm8.res <- RM(raschdat1[,1:8]) plotICC(rm8.res, empICC=list("raw")) # the same but using different plotting styles plotICC(rm8.res, empICC=list("raw",type="b",col="blue",lty="dotted")) # kernel-smoothed empirical ICCs using bandwidth = 2 plotICC(rm8.res, empICC = list("kernel",smooth=3)) # raw empirical ICCs with confidence intervals # displaying only items 2,3,7,8 plotICC(rm8.res, item.subset=c(2,3,7,8), empICC=list("raw"), empCI=list()) # Joint ICC plot for items 2, 6, 8, and 15 for a Rasch model data(raschdat1) res <- RM(raschdat1) plotjointICC(res, item.subset = c(2,6,8,15), legpos = "left") } \keyword{models} eRm/man/plotGR.Rd0000744000176000001440000000301611572663323013307 0ustar ripleyusers\name{plotGR} \alias{plotGR} \title{Plot Treatment or Covariate Effects for LLRA } \description{ Plots treatment or covariate group effects over time. } \usage{ plotGR(object, ...) } \arguments{ \item{object}{an object of class "llra". } \item{\dots}{Additional parameters to be passed to and from other methods. } } \details{ The plot is a lattice plot with each panel corresponding to an item. The effects are plotted for each groups (including baseline) over the different time points. The groups are given the same names as for the parameter estimates (derived from groupvec). Please note that all effects are to be interpreted relative to the baseline. Currently, this function only works for a full item x treatment x timepoints LLRA. Collapsed effects will not be displayed properly. } \author{ Thomas Rusch } \seealso{ The plot method for trend effects \code{\link{plotTR}}. } \section{Warning:}{ Objects of class \code{"llra"} that contain estimates from a collapsed data matrix will not be displayed correctly. } \examples{ ##Example 6 from Hatzinger & Rusch (2009) data("llradat3") groups <- c(rep("TG",30),rep("CG",30)) llra1 <- LLRA(llradat3,mpoints=2,groups=groups) summary(llra1) plotGR(llra1) ##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4 ##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6 ##categories respectively. \dontrun{ data("llraDat2") ex2 <- LLRA(llraDat2[1:20],mpoints=4,groups=llraDat2[21]) plotGR(ex2) } }eRm/man/plotDIF.Rd0000744000176000001440000000755011572663323013410 0ustar ripleyusers\name{plotDIF} \alias{plotDIF} \title{ Confidence intervals plot of item parameter estimates. } \description{ Performs an plot of item parameter conficence intervals based on \code{LRtest} subgroup splitting. } \usage{ plotDIF(object, item.subset = NULL, gamma = 0.95, main = NULL, xlim = NULL, xlab = " ", ylab=" ", col = NULL, distance, splitnames=NULL, leg = FALSE, legpos="bottomleft", ...) } \arguments{ \item{object}{ An object of class \code{LR} (if more objects should be plotted, the argument has to be defined as a \code{list}). } \item{item.subset}{ Subset of items to be plotted. Either a numeric vector indicating the items or a character vector indicating the itemnames. If nothing is defined (default), all items are plotted. } \item{gamma}{ The level for the item parameter's confidence limits (default is gamma = 0.95). } \item{main}{ Main title for the plot. } \item{xlim}{ Numeric vector of length 2, giving the x coordinates ranges of the plot (the y coordinates depend on the number of depicted items). } \item{xlab}{ Label for the x axis. } \item{ylab}{ Label for the y axis. } \item{col}{ By default the color for the drawn confidence lines is determined automatically whereas every group (split criterion) is depicted in the same color. } \item{distance}{ Distance between each item's confidence lines -- if omitted, the distance shrinks with increasing numbers of split criteria. Can be overriden using values in (0, 0.5). %Distance between the drawn confidence lines, default is division by factor 10 . } \item{splitnames}{ For labeling the splitobjects in the legend (returns a nicer output). } \item{leg}{ If \code{TRUE} a legend is provided by default. } \item{legpos}{ Position of the legend with possible values \code{"bottomright"}, \code{"bottom"}, \code{"bottomleft"}, \code{"left"}, \code{"topleft"}, \code{"top"}, \code{"topright"}, \code{"right"} and \code{"center"}. The default value for the legend is \code{"bottomright"}. } \item{...}{ Further options to be passed to \code{plot}. } } \details{ If there are items that cannot be estimated for some reasons, certainly these ones are not plotted. For plotting several objects of class \code{LR}, the subgroup splitting by \code{LRtest} has to be carried out for the same data set (or at least item subsets of it). Plotting a certain subset of items could be useful if the objects of class \code{LR} contain a huge number of estimated items. The default level for the conficence limits is gamma = 0.95. (If the conficence limits should be corrected it is useful to use a correction, e.g., Bonferroni: 1 - (1 - gamma) / number of estimated items.) } \value{ \code{plotCI} returns a list containing the confidence limits of each group in each \code{LRtest} object. } \author{ Kathrin Gruber, Reinhold Hatzinger } \seealso{ \code{\link{LRtest}}, \code{\link{confint.threshold}}, \code{\link{thresholds}} } \examples{ splitvec <- sample(1:3, 100, replace = TRUE) data(raschdat1) res <- RM(raschdat1) # LR-test on dichotomous Rasch model with user-defined split lrres <- LRtest(res, splitcr = splitvec, se = TRUE) # LR-test with mean split, standard errors for beta's lrres2 <- LRtest(res, split = "mean", se = TRUE) RMplotCI <- list(lrres, lrres2) # Confidence intervals plot with default assumptions plotDIF(RMplotCI) # Confidence intervals plot with Bonferroni correction plotDIF(RMplotCI, gamma = 1 - (0.05/10)) # Confidence intervals plot for an item subset plotDIF(RMplotCI, item.subset=1:6) # with user defined group color and legend plotDIF(RMplotCI, col=c("red","blue"), leg=TRUE) # with names for the splitobjects plotDIF(RMplotCI, col=c("red","blue"), leg=TRUE, splitnames=c(paste(rep("User",3),1:3,sep=" "), paste(rep("Mean",2),1:2, sep=" "))) } \keyword{models} eRm/man/person.parameter.Rd0000744000176000001440000000737611572663323015402 0ustar ripleyusers\name{person.parameter} \alias{person.parameter} \alias{person.parameter.eRm} \alias{summary.ppar} \alias{print.ppar} \alias{plot.ppar} \alias{coef.ppar} \alias{logLik.ppar} \alias{print.logLik.ppar} \alias{confint.ppar} %- Also NEED an '\alias' for EACH other topic documented here. \title{Estimation of Person Parameters} \description{Maximum likelihood estimation of the person parameters with spline interpolation for non-observed and 0/full responses. Extraction of information criteria such as AIC, BIC, and cAIC based on unconditional log-likelihood.} \usage{ \method{person.parameter}{eRm}(object) \method{summary}{ppar}(object, ...) \method{print}{ppar}(x, ...) \method{plot}{ppar}(x, xlab = "Person Raw Scores", ylab = "Person Parameters (Theta)", main = NULL, ...) \method{coef}{ppar}(object, ...) \method{logLik}{ppar}(object, ...) \method{confint}{ppar}(object, parm, level = 0.95, ...) } \arguments{ \item{object}{Object of class \code{eRm} in \code{person.parameter} and object of class \code{ppar} in \code{IC}.} Arguments for \code{print} and \code{plot} methods: \item{x}{Object of class \code{ppar}.} \item{xlab}{Label of the x-axis.} \item{ylab}{Label of the y-axis.} \item{main}{Title of the plot.} \item{...}{Further arguments to be passed to or from other methods. They are ignored in this function.} Arguments for \code{confint}: \item{parm}{Parameter specification (ignored).} \item{level}{Alpha-level.} } \details{If the data set contains missing values, person parameters are estimated for each missing value subgroup. } \value{ The function \code{person.parameter} returns an object of class \code{ppar} containing: \item{loglik}{Log-likelihood of the collapsed data (for faster estimation persons with the same raw score are collapsed).} \item{npar}{Number of parameters.} \item{niter}{Number of iterations.} \item{thetapar}{Person parameter estimates.} \item{se.theta}{Standard errors of the person parameters.} \item{hessian}{Hessian matrix.} \item{theta.table}{Matrix with person parameters (ordered according to original data) including NA pattern group.} \item{pers.ex}{Indices with persons excluded due to 0/full raw score} \item{X.ex}{Data matrix with persons excluded} \item{gmemb}{NA group membership vector (0/full persons excluded)} The function \code{coef} returns a vector of the person parameter estimates for each person (i.e., the first column of \code{theta.table}). The function \code{logLik} returns an object of class \code{loglik.ppar} containing: \item{loglik}{Log-likelihood of the collapsed data (see above).} \item{df}{Degrees of freedom.} } \references{ Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations, Recent Developements, and Applications. Springer. Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20. Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43. } \author{Patrick Mair, Reinhold Hatzinger} %\note{} \seealso{ \code{\link{itemfit.ppar}},\code{\link{personfit.ppar}} } \examples{ #Person parameter estimation of a rating scale model data(rsmdat) res <- RSM(rsmdat) pres <- person.parameter(res) print(pres) summary(pres) plot(pres) #Person parameter estimation for a Rasch model with missing values data(raschdat2) res <- RM(raschdat2, se = FALSE) #Rasch model without standard errors pres <- person.parameter(res) print(pres) #person parameters summary(pres) logLik(pres) #log-likelihood of person parameter estimation } \keyword{models} eRm/man/PCM.Rd0000744000176000001440000000554011572663323012523 0ustar ripleyusers\name{PCM} \alias{PCM} %- Also NEED an '\alias' for EACH other topic documented here. \title{Estimation of partial credit models} \description{ This function computes the parameter estimates of a partial credit model for polytomous item responses by using CML estimation. } \usage{ PCM(X, W, se = TRUE, sum0 = TRUE, etaStart) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{X}{Input data matrix or data frame with item responses (starting from 0); rows represent individuals, columns represent items. Missing values are inserted as \code{NA}.} \item{W}{Design matrix for the PCM. If omitted, the function will compute W automatically.} \item{se}{If \code{TRUE}, the standard errors are computed.} \item{sum0}{If \code{TRUE}, the parameters are normed to sum-0 by specifying an appropriate \code{W}. If \code{FALSE}, the first parameter is restricted to 0.} \item{etaStart}{A vector of starting values for the eta parameters can be specified. If missing, the 0-vector is used.} } \details{ Through specification in W, the parameters of the categories with 0 responses are set to 0 as well as the first category of the first item. Available methods for PCM-objects are:\cr \code{print}, \code{coef}, \code{model.matrix}, \code{vcov}, \code{plot}, \code{summary}, \code{logLik}, \code{person.parameters}, \code{plotICC}, \code{LRtest}. } \value{ Returns an object of class \code{Rm, eRm} containing. \item{loglik}{Conditional log-likelihood.} \item{iter}{Number of iterations.} \item{npar}{Number of parameters.} \item{convergence}{See \code{code} output in \code{\link{nlm}}.} \item{etapar}{Estimated basic item difficulty parameters.} \item{se.eta}{Standard errors of the estimated basic item parameters.} \item{betapar}{Estimated item-category (easiness) parameters.} \item{se.beta}{Standard errors of item parameters.} \item{hessian}{Hessian matrix if \code{se = TRUE}.} \item{W}{Design matrix.} \item{X}{Data matrix.} \item{X01}{Dichotomized data matrix.} \item{call}{The matched call.} } \references{ Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations, Recent Developements, and Applications. Springer. Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20. Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43. } \author{Patrick Mair, Reinhold Hatzinger} %\note{} \seealso{\code{\link{RM}},\code{\link{RSM}},\code{\link{LRtest}} } \examples{ ##PCM with 10 subjects, 3 items data(pcmdat) res <- PCM(pcmdat) res summary(res) #eta and beta parameters with CI thresholds(res) #threshold parameters } \keyword{models} eRm/man/NPtest.Rd0000744000176000001440000002006511572663323013320 0ustar ripleyusers\name{NPtest} \Rdversion{1.1} \alias{NPtest} \title{function to perform nonparametric Rasch model tests} \description{A variety of nonparametric tests as proposed by Ponocny(2001) and an 'exact' version of the Martin-Loef test are implemented. The function operates on random binary matrices that have been generated using an MCMC algorithm (Verhelst, 2008) from the RaschSampler package (Hatzinger, Mair, and Verhelst, 2009). } \usage{ NPtest(obj, n=NULL, method = "T1", ...) } \arguments{ \item{obj}{ A binary data matrix (or data frame) or an object containing the output from the \code{\link[RaschSampler]{RaschSampler}} package. } \item{n}{ If \code{obj} is a matrix or a data frame, \code{n} n is the number of sampled matrices (default is 500) } \item{method}{ One of the test statistics. See details below. } \item{\dots}{ Further arguments for specifying the statistics functions. See details below. } } \details{ The function uses the \code{\link[RaschSampler]{RaschSampler}} package. On input the user has to supply either a binary data matrix or a RaschSampler output object. If the input is a data matrix, the RaschSampler is called with default values (i.e., \code{rsctrl(burn_in = 256, n_eff = n, step = 32)}, see \code{\link[RaschSampler]{rsctrl}}), where \code{n} may be specified by the user (otherwise it is 500). The starting values for the random number generators are chosen randomly using system time. Methods other than those listed below can easily be implemented using the RaschSampler package directly. The currently implemented methods (following Ponocny's notation of \emph{T}-statistics) and their options are: \describe{ \item{\bold{T1:}}{\code{method = "T1"}, no further option}\cr Checks for local dependence via increased inter-item correlations. For all item pairs cases are counted with equal responses on both items. \item{\bold{T2:}}{\code{method = "T2", idx = NULL, stat = "var"}}\cr \code{idx} \ldots vector of indexes specifying items which define a subscale, e.g., \code{idx = c(1, 5, 7)}\cr \code{stat} \ldots one of \code{"var"} (variance), \code{"mad1"} (mean absolute deviation), \code{"mad2"} (median absolute deviation), \code{"range"} (range)\cr Checks for local dependence within model deviating subscales via increased dispersion of subscale person rawscores. \item{\bold{T4:}}{\code{method = "T4", idx = NULL, group = NULL, alternative = "high"}}\cr \code{idx} \ldots vector of indexes specifying items which define a subscale, e.g., \code{idx = c(1, 5, 7)}\cr \code{group} \ldots logical vector defining a subject group, e.g., \code{group = (age >= 15 && age < 30)}\cr \code{alternative} \ldots one of \code{"high"} or \code{"low"}. Specifies the alternative hypothesis.\cr Checks for group anomalies (DIF) via too high (low) raw scores on item(s) for specified group. \item{\bold{T7:}}{\code{method = "T7", idx = NULL}}\cr \code{idx} \ldots vector of indexes specifying items which define a subscale, e.g., \code{idx = c(1, 5, 7)}\cr Checks for lower discrimination (2PL) in item subscale via counting cases with response 1 on more difficult and 0 on easier items. The test is global for the subscale, i.e. all subscale items are evaluated using a single statistic. \item{\bold{T7a:}}{\code{method = "T7a", idx = NULL}}\cr \code{idx} \ldots vector of indexes specifying items to investigate, e.g., \code{idx = c(1, 5, 7)}\cr Checks for lower discrimination (2PL) of an item compared to another item via counting cases with response 1 on more difficult and 0 on easier item. The test is performed pairwise, i.e. a statistic is calculated for each item pair. \item{\bold{T10:}}{\code{method = "T10", splitcr="median"}}\cr \code{splitcr} \ldots split criterion for subject raw score splitting. \code{"median"} uses the median as split criterion, \code{"mean"} performs a mean-split. Optionally \code{splitcr} can also be a vector which assigns each person to a one of two subgroups (e.g., following an external criterion). This vector can be numeric, character, logical or a factor.\cr Gobal test for subgroup-invariance. Checks for different item difficulties in two subgroups (for details see Ponocny, 2001). \item{\bold{T11:}}{\code{method = "T11"}, no further option}\cr Gobal test for local dependence. The statistic calculates the sum of absolute deviations between the observed inter-item correlations and the expected correlations. } The 'exact' version of the \bold{Martin-Loef} statistic is specified via \code{method = "MLoef"} and optionally \code{splitcr} (see \code{\link{MLoef}}). } \value{ Depends on the method used. For each method a list is returned. The returned objects are of class \code{T1obj}, \code{T2obj}, \code{T4obj}, \code{T7obj}, \code{T7aobj}, \code{T10obj}, \code{T11obj} corresponding to the method used. The main output element is \code{prop} giving the one-sided p-value, i.e., the number of statistics from the sampled matrices which are equal or exceed the statistic based on the observed data. For \emph{T1} and \emph{T7a} \code{prop} is a vector. For the \emph{Martin-Loef} test the returned object is of class \code{MLobj}. Besides other elements, it contains a \code{prop} vector and \code{MLres}, the output object from the asymptotic Martin-Loef test on the input data. } \references{ Ponocny, I. (2001) Nonparametric goodness-of-fit tests for the rasch model. Psychometrika, Volume 66, Number 3\cr Verhelst, N. D. (2008) An Efficient MCMC Algorithm to Sample Binary Matrices with Fixed Marginals. Psychometrika, Volume 73, Number 4\cr Verhelst, N. D., Hatzinger, R., and Mair, P. (2007) The Rasch Sampler, Journal of Statistical Software, Vol. 20, Issue 4, Feb 2007 } \author{ Reinhold Hatzinger } %\note{ %Maybe notes appear here %} \seealso{ \code{\link[RaschSampler]{RaschSampler}} } \examples{ ### Preparation: # data for examples below data(raschdat1) X<-raschdat1 # generate 100 random matrices based on original data matrix rmat<-rsampler(X,rsctrl(burn_in=100, n_eff=100, seed=123)) ## the following examples can also directly be used by setting ## rmat <- raschdat1 ## without calling rsampler() first, e.g., t1<-NPtest(raschdat1, n=100, method="T1") ### Examples: ##---- T1 ------------------------------------------------------ t1<-NPtest(rmat,method="T1") # choose a different alpha for selecting displayed values print(t1,alpha=0.01) ##---- T2 ------------------------------------------------------ t21<-NPtest(rmat,method="T2",idx=1:5) # default is variance t21 t22<-NPtest(rmat,method="T2",idx=c(1,22,5,27,6,9,11),stat="mad1") t22 ##---- T4 ------------------------------------------------------ age<-sample(20:90, 100, replace=TRUE) # group must be a logical vector # (value of TRUE is used for group selection) age<-age<30 t41<-NPtest(rmat,method="T4",idx=1:3,group=age) t41 sex<-gl(2,50) # group can also be a logical expression (generating a vector) t42<-NPtest(rmat,method="T4",idx=c(1,4,5,6),group=sex==1) t42 ##---- T7, T7a -------------------------------------------------- # simultaenous test for all items in subscale t7<-NPtest(rmat,method="T7",idx=1:3) t7 # test for item-pairs t7a<-NPtest(rmat,method="T7a",idx=c(1,3,5)) # test for item-pairs t7a ##---- T10 ------------------------------------------------------ t101<-NPtest(rmat,method="T10") # default split criterion is "median" t101 split<-runif(100) t102<-NPtest(rmat,method="T10",splitcr=split>0.5) t102 t103<-NPtest(rmat,method="T10",splitcr=sex) t103 ##---- T11 ------------------------------------------------------ t11<-NPtest(rmat,method="T11") t11 ##---- Martin-Loef ---------------------------------------------- \dontrun{ # takes a while ... data(raschdat1) split<-rep(1:3, each=10) NPtest(raschdat1, n=100, method="MLoef", splitcr=split) } } \keyword{htest} \keyword{nonparametric} eRm/man/MLoef.Rd0000744000176000001440000000602111572663323013101 0ustar ripleyusers\name{MLoef} \alias{MLoef} \alias{print.MLoef} \alias{summary.MLoef} \title{Computation of Martin-Loef's LR-Test} \description{This LR-Test is based on item subgroup splitting.} \usage{ MLoef(robj, splitcr = "median") } \arguments{ \item{robj}{Object of class \code{Rm}.} \item{splitcr}{Split criterion to define the item groups. \code{"median"} and \code{"mean"} split items in two groups based on their items' raw scores. \code{splitcr} can also be a vector of length k (where k denotes the number of items) that takes two or more distinct values to define groups used for the Martin-Loef Test.} } \details{ This function implements a generalization of the Martin-Loef test for polytomous items as proposed by Christensen, Bjorner, Kreiner & Petersen (2002), but does currently not allow for missing values. % The function can handle missing values, as long as every subject has at % least 2 valid responses in each group of items. If the split criterion is \code{"median"} or \code{"mean"} and one or more items' raw scores are equal the median resp. mean, \code{MLoef} will assign those items to the lower raw score group. \code{summary.MLoef} gives detailed information about the allocation of all items. \code{summary} and \code{print} methods are available for objects of class \code{MLoef}. An 'exaxt' version of the Martin-Loef test for binary items is implemented in the function \code{\link{NPtest}}. } \value{ \code{MLoef} returns an object of class \code{MLoef} containing: \item{LR}{LR-value} \item{df}{degrees of freedom of the test statistic} \item{p.value}{p-value of the test} \item{fullModel}{the overall Rasch model} \item{subModels}{a list containing the submodels} \item{Lf}{log-likelihood of the full model} \item{Ls}{list of the sub models' log-likelihoods} \item{i.groups}{a list of the item groups} \item{splitcr}{submitted split criterion} \item{split.vector}{binary allocation of items to groups} \item{warning}{items equalling median or mean for the respective split criteria} \item{call}{the matched call} } \references{ Christensen, K. B., Bjorner, J. B., Kreiner S. & Petersen J. H. (2002). Testing unidimensionality in polytomous Rasch models. \emph{Psychometrika, (67)}4, 563--574. Fischer, G. H., and Molenaar, I. (1995). \emph{Rasch Models -- Foundations, Recent Developements, and Applications.} Springer. Rost, J. (2004). \emph{Lehrbuch Testtheorie -- Testkonstruktion.} Bern: Huber. } \author{Marco Maier, Reinhold Hatzinger} %\note{} \seealso{\code{\link{LRtest}}, \code{\link{Waldtest}}} \examples{ # Martin-Loef-test on dichotomous Rasch model using "median" and a user-defined # split vector. Note that group indicators can be of character and/or numeric. splitvec <- c(1, 1, 1, "x", "x", "x", 0, 0, 1, 0) res <- RM(raschdat1[,1:10]) MLoef.1 <- MLoef(res, splitcr = "median") MLoef.2 <- MLoef(res, splitcr = splitvec) MLoef.1 summary(MLoef.2) } \keyword{models} eRm/man/LRtest.Rd0000744000176000001440000002042411572663323013317 0ustar ripleyusers\name{LRtest} \alias{LRtest.Rm} \alias{LRtest} \alias{print.LR} \alias{summary.LR} \alias{plotGOF} \alias{plotGOF.LR} %- Also NEED an '\alias' for EACH other topic documented here. \title{Computation of Andersen's LR-test.} \description{This LR-test is based on subject subgroup splitting. } \usage{ \method{LRtest}{Rm}(object, splitcr = "median", se = FALSE) \method{plotGOF}{LR}(x, beta.subset = "all", main="Graphical Model Check", xlab = NULL, ylab = NULL, tlab = "item", ylim = c(-3, 3), xlim = c(-3, 3), type = "p", pos = "4", conf = NULL, ctrline = NULL, ...) %\method{print}{LR}(x,...) %\method{summary}{LR}(object,...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{Object of class \code{Rm}.} \item{splitcr}{Split criterion for subject raw score splitting. \code{all.r} corresponds to a full raw score split, \code{median} uses the median as split criterion, \code{mean} performs a mean-split. Optionally \code{splitcr} can also be a vector which assigns each person to a certain subgroup (e.g., following an external criterion). This vector can be numeric, character or a factor.} \item{se}{If \code{TRUE} standard errors for beta's are computed.} %Arguments for \code{plotGOF}: \item{x}{Object of class \code{LR}. Also used for visualizing the fit of single items.} \item{beta.subset}{If \code{"all"}, all items are plotted. Otherwise numeric subset vector can be specified.} \item{main}{Main title of the plot.} \item{xlab}{Label on x-axis, default gives name of \code{splitcr} and level.} \item{ylab}{Label on y-axis, default gives name of \code{splitcr} and level.} \item{tlab}{Specification of item labels: \code{"item"} prints the item names, \code{"number"} gives integers corresponding to order of the beta parameters, if \code{"none"} no labels are printed. \code{"identify"} allows for an interactive labelling. Initially no labels are printed, after clicking close to an item point the corresponding label is added. The identification process is terminated by clicking the second button and selecting 'Stop' from the menu, or from the 'Stop' menu on the graphics window. For more information and basic operation see \code{\link{identify}}. } \item{xlim}{Limits on x-axis.} \item{ylim}{Limits on y-axis.} \item{type}{Plotting type.(see \code{\link{plot}})} \item{pos}{Position of the item label (see \code{\link{text}})} \item{conf}{for plotting confidence ellipses for the item parameters. If \code{conf=NULL} (the default) no ellipses are drawn. Otherwise, \code{conf} must be specified as a list with optional elements: \code{gamma}, is the confidence level (numeric), \code{col} and \code{lty}, colour and linetype (see \code{\link{par}}), \code{which} (numeric index vector) specifying for which items ellipses are drawn (must be a subset of \code{beta.subset}), and \code{ia}, logical, if the ellipses are to be drawn interactively (cf. \code{tlab="identify"} above). If \code{conf} is specified as a an empty list, %\code{conf=list()}, the default values \code{conf=list(gamma=0.95, col="red", lty="dashed", ia=FALSE)} will be used. See example below. To use \code{conf}, the LR object \code{x} has to be generated using the option \code{se=TRUE} in \code{LRtest()}. For specification of \code{col} and \code{which} see Details and Examples below.} \item{ctrline}{for plotting confidence bands (control lines, cf.eg.Wright and Stone, 1999). If \code{ctrline=NULL} (the default) no lines are drawn. Otherwise, \code{ctrline} must be specified as a list with optional elements: \code{gamma}, is the confidence level (numeric), \code{col} and \code{lty}, colour and linetype (see \code{\link{par}}). If \code{ctrline} is specified as \code{ctrline=list()}, the default values \code{conf=list(gamma=0.95, col="blue", lty="solid")} will be used. See examples below. To use \code{ctrline}, the LR object \code{x} has to be generated using the option \code{se=TRUE} in \code{LRtest()}. } \item{...}{Additional parameters.} } \details{If the data set contains missing values and \code{mean} or \code{median} is specified as splitcriterion, means or medians are calculated for each missing value subgroup and consequently used for raw score splitting. When using interactive selection for both labelling of single points (\code{tlab = "identify"} and drawing confidence ellipses at certain points (\code{ia = TRUE}) then first all plotted points are labelled and afterwards all ellipses are generated. Both identification processes can be terminated by clicking the second (right) mouse button and selecting `Stop' from the menu, or from the `Stop' menu on the graphics window. Using the specification \code{which} in allows for selectively drawing ellipses for certain items only, e.g., \code{which=1:3} draws ellipses for items 1 to 3 (as long as they are included in \code{beta.subset}). The default is drawing ellipses for all items. The element \code{col} in the \code{conf} list can either be a single colour specification such as \code{"blue"} or a vector with colour specifications for all items. The length must be the same as the number of ellipses to be drawn. For colour specification a palette can be set up using standard palettes (e.g. \code{\link{rainbow}}) or palettes from the \code{colorspace} or \code{RColorBrewer} package. An example is given below. \code{summary} and \code{print} methods are available for objects of class \code{LR}. } \value{ \code{LRtest} returns an object of class \code{LR} containing: \item{LR}{LR-value.} \item{df}{Degrees of freedom of the test statistic.} \item{Chisq}{Chi-square value with corresponding df.} \item{pvalue}{P-value of the test.} \item{likgroup}{Log-likelihood values for the subgroups} \item{betalist}{List of beta parameters for the subgroups.} \item{selist}{List of standard errors of beta's.} \item{etalist}{List of eta parameters for the subgroups.} \item{spl.gr}{Names and levels for \code{splitcr}.} \item{call}{The matched call.} \item{fitobj}{List containing model objects from subgroup fit.} } \references{ Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations, Recent Developements, and Applications. Springer. Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20. Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43. Wright, B.D., and Stone, M.H. (1999). Measurement essentials. Wide Range Inc., Wilmington. (\url{http://www.rasch.org/measess/me-all.pdf} 28Mb). } \author{Patrick Mair, Reinhold Hatzinger} %\note{} \seealso{\code{\link{Waldtest}}} \examples{ # LR-test on dichotomous Rasch model with user-defined split splitvec <- sample(1:3, 100, replace = TRUE) data(raschdat1) res <- RM(raschdat1) lrres <- LRtest(res, splitcr = splitvec) lrres summary(lrres) \dontrun{ # goodness-of-fit plot with interactive labelling of items plotGOF(lrres, tlab = "identify") } # LR-test with mean split, standard errors for beta's lrres2 <- LRtest(res, split = "mean", se = TRUE) # goodness-of-fit plot # additional 95 percent control line with user specified style plotGOF(lrres2, ctrline=list(gamma=0.95, col="red", lty="dashed")) # goodness-of-fit plot for items 1, 14, 24, and 25 # additional 95 percent confidence ellipses, default style plotGOF(lrres2, beta.subset=c(14,25,24,1), conf=list()) # goodness-of-fit plot for items 1, 14, 24, and 25 # for items 1 and 24 additional 95 percent confidence ellipses # using colours for these 2 items from the colorspace package \dontrun{ library(colorspace) colors<-rainbow_hcl(2) plotGOF(lrres2, beta.subset=c(14,25,24,1), conf=list(which=c(1,14), col=colors)) } } \keyword{models} eRm/man/LRSM.Rd0000744000176000001440000000714211572663323012661 0ustar ripleyusers\name{LRSM} \alias{LRSM} %- Also NEED an '\alias' for EACH other topic documented here. \title{Estimation of linear rating scale models} \description{ This function computes the parameter estimates of a linear rating scale model (LRSM) for polytomuous item responses by using CML estimation. } \usage{ LRSM(X, W , mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE, etaStart) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{X}{Input data matrix or data frame; rows represent individuals (N in total), columns represent items. Missing values are inserted as \code{NA}.} \item{W}{Design matrix for the LRSM. If omitted, the function will compute W automatically.} \item{mpoints}{Number of measurement points.} \item{groupvec}{Vector of length N which determines the group membership of each subject, starting from 1} \item{se}{If \code{TRUE}, the standard errors are computed.} \item{sum0}{If \code{TRUE}, the parameters are normalized to sum-0 by specifying an appropriate \code{W}. If \code{FALSE}, the first parameter is restricted to 0.} \item{etaStart}{A vector of starting values for the eta parameters can be specified. If missing, the 0-vector is used.} } \details{ Through appropriate definition of \code{W} the LRSM can be viewed as a more parsimonous RSM, on the one hand, e.g. by imposing some cognitive base operations to solve the items. One the other hand, linear extensions of the Rasch model such as group comparisons and repeated measurement designs can be computed. If more than one measurement point is examined, the item responses for the 2nd, 3rd, etc. measurement point are added column-wise in X. If \code{W} is user-defined, it is nevertheless necessary to specify \code{mpoints} and \code{groupvec}. It is important that first the time contrasts and then the group contrasts have to be imposed. Available methods for LRSM-objects are: \code{print}, \code{coef}, \code{model.matrix}, \code{vcov},\code{summary}, \code{logLik}, \code{person.parameters}. } \value{ Returns on object of class \code{eRm} containing: \item{loglik}{Conditional log-likelihood.} \item{iter}{Number of iterations.} \item{npar}{Number of parameters.} \item{convergence}{See \code{code} output in \code{\link{nlm}}.} \item{etapar}{Estimated basic item parameters (item and category parameters).} \item{se.eta}{Standard errors of the estimated basic item parameters.} \item{betapar}{Estimated item (easiness) parameters.} \item{se.beta}{Standard errors of item parameters.} \item{hessian}{Hessian matrix if \code{se = TRUE}.} \item{W}{Design matrix.} \item{X}{Data matrix.} \item{X01}{Dichotomized data matrix.} \item{groupvec}{Group membership vector.} \item{call}{The matched call.} } \references{ Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations, Recent Developements, and Applications. Springer. Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20. Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43. } \author{Patrick Mair, Reinhold Hatzinger} %\note{} \seealso{\code{\link{LLTM}},\code{\link{LPCM}}} \examples{ #LRSM for two measurement points #20 subjects, 2*3 items, W generated automatically, #first parameter set to 0, no standard errors computed. data(lrsmdat) res <- LRSM(lrsmdat, mpoints = 2, groupvec = 1, sum0 = FALSE, se = FALSE) print(res) } \keyword{models} eRm/man/LPCM.Rd0000744000176000001440000000707611572663323012645 0ustar ripleyusers\name{LPCM} \alias{LPCM} %- Also NEED an '\alias' for EACH other topic documented here. \title{Estimation of linear partial credit models} \description{ This function computes the parameter estimates of a linear partial credit model (LRSM) for polytomuous item responses by using CML estimation. } \usage{ LPCM(X, W , mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE, etaStart) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{X}{Input data matrix or data frame; rows represent individuals (N in total), columns represent items. Missing values are inserted as \code{NA}.} \item{W}{Design matrix for the LPCM. If omitted, the function will compute W automatically.} \item{mpoints}{Number of measurement points.} \item{groupvec}{Vector of length N which determines the group membership of each subject, starting from 1} \item{se}{If \code{TRUE}, the standard errors are computed.} \item{sum0}{If \code{TRUE}, the parameters are normalized to sum-0 by specifying an appropriate \code{W}. If \code{FALSE}, the first parameter is restricted to 0.} \item{etaStart}{A vector of starting values for the eta parameters can be specified. If missing, the 0-vector is used.} } \details{ Through appropriate definition of \code{W} the LPCM can be viewed as a more parsimonous PCM, on the one hand, e.g. by imposing some cognitive base operations to solve the items. One the other hand, linear extensions of the Rasch model such as group comparisons and repeated measurement designs can be computed. If more than one measurement point is examined, the item responses for the 2nd, 3rd, etc. measurement point are added column-wise in X. If \code{W} is user-defined, it is nevertheless necessary to specify \code{mpoints} and \code{groupvec}. It is important that first the time contrasts and then the group contrasts have to be imposed. Available methods for LPCM-objects are:\cr \code{print}, \code{coef}, \code{model.matrix}, \code{vcov},\code{summary}, \code{logLik}, \code{person.parameters}. } \value{ Returns on object of class \code{eRm} containing: \item{loglik}{Conditional log-likelihood.} \item{iter}{Number of iterations.} \item{npar}{Number of parameters.} \item{convergence}{See \code{code} output in \code{\link{nlm}}.} \item{etapar}{Estimated basic item parameters.} \item{se.eta}{Standard errors of the estimated basic item parameters.} \item{betapar}{Estimated item (easiness) parameters.} \item{se.beta}{Standard errors of item parameters.} \item{hessian}{Hessian matrix if \code{se = TRUE}.} \item{W}{Design matrix.} \item{X}{Data matrix.} \item{X01}{Dichotomized data matrix.} \item{groupvec}{Group membership vector.} \item{call}{The matched call.} } \references{ Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations, Recent Developements, and Applications. Springer. Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20. Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43. } \author{Patrick Mair, Reinhold Hatzinger} %\note{} \seealso{\code{\link{LRSM}},\code{\link{LLTM}}} \examples{ #LPCM for two measurement points and two subject groups #20 subjects, 2*3 items data(lpcmdat) G <- c(rep(1,10),rep(2,10)) #group vector res <- LPCM(lpcmdat, mpoints = 2, groupvec = G) print(res) summary(res) } \keyword{models} eRm/man/LLTM.Rd0000744000176000001440000000731611572663323012657 0ustar ripleyusers\name{LLTM} \alias{LLTM} \title{Estimation of linear logistic test models} \description{ This function computes the parameter estimates of a linear logistic test model (LLTM) for binary item responses by using CML estimation. } \usage{ LLTM(X, W, mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE, etaStart) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{X}{Input 0/1 data matrix or data frame; rows represent individuals (N in total), columns represent items. Missing values have to be inserted as \code{NA}.} \item{W}{Design matrix for the LLTM. If omitted, the function will compute W automatically.} \item{mpoints}{Number of measurement points.} \item{groupvec}{Vector of length N which determines the group membership of each subject, starting from 1. If \code{groupvec=1}, no group contrasts are imposed.} \item{se}{If \code{TRUE}, the standard errors are computed.} \item{sum0}{If \code{TRUE}, the parameters are normalized to sum-0 by specifying an appropriate \code{W}. If \code{FALSE}, the first parameter is restricted to 0.} \item{etaStart}{A vector of starting values for the eta parameters can be specified. If missing, the 0-vector is used.} } \details{ Through appropriate definition of \code{W} the LLTM can be viewed as a more parsimonous Rasch model, on the one hand, e.g. by imposing some cognitive base operations to solve the items. One the other hand, linear extensions of the Rasch model such as group comparisons and repeated measurement designs can be computed. If more than one measurement point is examined, the item responses for the 2nd, 3rd, etc. measurement point are added column-wise in X. If \code{W} is user-defined, it is nevertheless necessary to specify \code{mpoints} and \code{groupvec}. It is important that first the time contrasts and then the group contrasts have to be imposed. Available methods for LLTM-objects are:\cr \code{print}, \code{coef}, \code{model.matrix}, \code{vcov},\code{summary}, \code{logLik}, \code{person.parameters}. } \value{ Returns on object of class \code{eRm} containing: \item{loglik}{Conditional log-likelihood.} \item{iter}{Number of iterations.} \item{npar}{Number of parameters.} \item{convergence}{See \code{code} output in \code{\link{nlm}}.} \item{etapar}{Estimated basic item parameters.} \item{se.eta}{Standard errors of the estimated basic parameters.} \item{betapar}{Estimated item (easiness) parameters.} \item{se.beta}{Standard errors of item parameters.} \item{hessian}{Hessian matrix if \code{se = TRUE}.} \item{W}{Design matrix.} \item{X}{Data matrix.} \item{X01}{Dichotomized data matrix.} \item{groupvec}{Group membership vector.} \item{call}{The matched call.} } \references{ Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations, Recent Developements, and Applications. Springer. Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20. Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43. } \author{Patrick Mair, Reinhold Hatzinger} %\note{} \seealso{\code{\link{LRSM}},\code{\link{LPCM}}} \examples{ #LLTM for 2 measurement points #100 persons, 2*15 items, W generated automatically data(lltmdat1) res1 <- LLTM(lltmdat1, mpoints = 2) print(res1) summary(res1) #Reparameterized Rasch model as LLTM (more pasimonious) data(lltmdat2) W <- matrix(c(1,2,1,3,2,2,2,1,1,1),ncol=2) #design matrix res2 <- LLTM(lltmdat2, W = W) print(res2) summary(res2) } \keyword{models} eRm/man/llradat3.Rd0000744000176000001440000000170711572663323013613 0ustar ripleyusers\name{llradat3} \alias{llradat3} \docType{data} \title{An Artifical LLRA Data Set } \description{ Artificial data set of 3 items, 2 time points and 2 groups for LLRA. It is example 6 from Hatzinger and Rusch (2009). } \usage{data(llradat3)} \format{ A data frame with 60 observations of 6 variables. \itemize{ \item{V1}{ Answers to item 1 at time point 1} \item{V2}{ Answers to item 2 at time point 1} \item{V3}{ Answers to item 3 at time point 1} \item{V4}{ Answers to item 1 at time point 2} \item{V5}{ Answers to item 2 at time point 2} \item{V6}{ Answers to item 3 at time point 2} } } \details{ This is a data set as described in Hatzinger and Rusch (2009). } \references{ Hatzinger, R. and Rusch, T. (2009) IRT models with relaxed assumptions in eRm: A manual-like instruction. \emph{Psychology Science Quarterly}, \bold{51}, pp. 87--120, \url{http://erm.r-forge.r-project.org/psq_1_2009_06_87-120.pdf} } \examples{ data(llradat3) } \keyword{datasets} eRm/man/llraDat2.Rd0000744000176000001440000000402211572663323013543 0ustar ripleyusers\name{llraDat2} \alias{llraDat2} \docType{data} \title{An Artifical LLRA Data Set } \description{ Artificial data set of 70 subjects with 5 items, 4 time points and 3 groups for LLRA. } \usage{data(llraDat2)} \format{ A data frame with 70 observations of 21 variables. \itemize{ \item{t1.I1}{ Answers to item 1 at time point 1} \item{t1.I2}{ Answers to item 2 at time point 1} \item{t1.I3}{ Answers to item 3 at time point 1} \item{t1.I4}{ Answers to item 4 at time point 1} \item{t1.I5}{ Answers to item 5 at time point 1} \item{t2.I1}{ Answers to item 1 at time point 2} \item{t2.I2}{ Answers to item 2 at time point 2} \item{t2.I3}{ Answers to item 3 at time point 2} \item{t2.I4}{ Answers to item 4 at time point 2} \item{t2.I5}{ Answers to item 5 at time point 2} \item{t3.I1}{ Answers to item 1 at time point 3} \item{t3.I2}{ Answers to item 2 at time point 3} \item{t3.I3}{ Answers to item 3 at time point 3} \item{t3.I4}{ Answers to item 4 at time point 3} \item{t3.I5}{ Answers to item 5 at time point 3} \item{t4.I1}{ Answers to item 1 at time point 4} \item{t4.I2}{ Answers to item 2 at time point 4} \item{t4.I3}{ Answers to item 3 at time point 4} \item{t4.I4}{ Answers to item 4 at time point 4} \item{t4.I5}{ Answers to item 5 at time point 4} \item{groups}{ The group membership} } } \details{ This is a data set as described in Hatzinger and Rusch (2009). 5 items were measured at 4 time points (in columns). Each persons answers to the items are recorded in the rows. There are 2 treatment groups and a control group. Treatment group 2 has size, 10, treatment group 1 has size 20 and the control group has size 40. Item 1 is dichotomous, all others are polytomous. Item 2, 3, 4 and 5 have 3, 4, 5, 6 categories respectively. } \references{ Hatzinger, R. and Rusch, T. (2009) IRT models with relaxed assumptions in eRm: A manual-like instruction. \emph{Psychology Science Quarterly}, \bold{51}, pp. 87--120, \url{http://erm.r-forge.r-project.org/psq_1_2009_06_87-120.pdf} } \examples{ data(llraDat2) } \keyword{datasets} eRm/man/llraDat1.Rd0000744000176000001440000000454411572663323013553 0ustar ripleyusers\name{llraDat1} \alias{llraDat1} \docType{data} \title{An Artifical LLRA Data Set } \description{ Artificial data set of 5 items, 5 time points and 5 groups for LLRA. } \usage{data(llraDat1)} \format{ A data frame with 150 observations of 26 variables. \itemize{ \item{t1.I1}{ Answers to item 1 at time point 1} \item{t1.I2}{ Answers to item 2 at time point 1} \item{t1.I3}{ Answers to item 3 at time point 1} \item{t1.I4}{ Answers to item 4 at time point 1} \item{t1.I5}{ Answers to item 5 at time point 1} \item{t2.I1}{ Answers to item 1 at time point 2} \item{t2.I2}{ Answers to item 2 at time point 2} \item{t2.I3}{ Answers to item 3 at time point 2} \item{t2.I4}{ Answers to item 4 at time point 2} \item{t2.I5}{ Answers to item 5 at time point 2} \item{t3.I1}{ Answers to item 1 at time point 3} \item{t3.I2}{ Answers to item 2 at time point 3} \item{t3.I3}{ Answers to item 3 at time point 3} \item{t3.I4}{ Answers to item 4 at time point 3} \item{t3.I5}{ Answers to item 5 at time point 3} \item{t4.I1}{ Answers to item 1 at time point 4} \item{t4.I2}{ Answers to item 2 at time point 4} \item{t4.I3}{ Answers to item 3 at time point 4} \item{t4.I4}{ Answers to item 4 at time point 4} \item{t4.I5}{ Answers to item 5 at time point 4} \item{t5.I1}{ Answers to item 1 at time point 5} \item{t5.I2}{ Answers to item 2 at time point 5} \item{t5.I3}{ Answers to item 3 at time point 5} \item{t5.I4}{ Answers to item 4 at time point 5} \item{t5.I5}{ Answers to item 5 at time point 5} \item{groups}{ The group membership} } } \details{ This is a data set as described in Hatzinger and Rusch (2009). 5 items were measured at 5 time points (in columns). Each row corresponds to one person (P1 to P150). There are 4 treatment groups and a control group. Treatment group G5 has size 10 (the first ten subjects), treatment group G4 has size 20, treatment group G3 has size 30, treatment group G2 has size 40 and the control group CG has size 50 (the last 50 subjects). Item 1 is dichotomous, all others are polytomous. Item 2, 3, 4 and 5 have 3, 4, 5, 6 categories respectively. } \references{ Hatzinger, R. and Rusch, T. (2009) IRT models with relaxed assumptions in eRm: A manual-like instruction. \emph{Psychology Science Quarterly}, \bold{51}, pp. 87--120, \url{http://erm.r-forge.r-project.org/psq_1_2009_06_87-120.pdf} } \examples{ data(llraDat1) } \keyword{datasets} eRm/man/LLRA.Rd0000744000176000001440000001207711572663323012641 0ustar ripleyusers\name{LLRA} \alias{LLRA} \alias{print.llra} \title{Fit Linear Logistic Models with Relaxed Assumptions (LLRA) } \description{ Automatically builds design matrix and fits LLRA. } \usage{ LLRA(X, W, mpoints, groups, baseline, itmgrps = NULL, ...) \method{print}{llra}(x, ...) } \arguments{ \item{X}{Data matrix as described in Hatzinger and Rusch (2009). It must be of wide format, e.g. for each person all item answers are written in columns for t1, t2, t3 etc. Hence each row corresponds to all observations for a single person. See llraDat1 for an example. Missing values are not allowed. } \item{W}{Design Matrix for LLRA to be passed to \code{LPCM}. If missing, it is generated automatically. } \item{mpoints}{The number of time points. } \item{groups}{Vector, matrix or data frame with subject/treatment covariates. } \item{baseline}{An optional vector with the baseline values for the columns in group. } \item{itmgrps}{ Specifies how many groups of items there are. Currently not functional but may be useful in the future. } \item{x}{For the print method, an object of class \code{"llra"}. } \item{\dots}{ Additional arguments to be passed to and from other methods. } } \details{The function \code{LLRA} is a wrapper for \code{LPCM} to fit Linear Logistic Models with Relaxed Assumptions (LLRA). LLRA are extensions of the LPCM for the measurement of change over a number of discrete time points for a set of items. It can incorporate categorical covariate information. If no design matrix W is passed as an argument, it is built automatically from scratch. Unless passed by the user, the baseline group is always the one with the lowest (alpha-)numerical value for argument \code{groups}. All other groups are labeled decreasingly according to the (alpha)-numerical value, e.g. with 2 treatment groups (TG1 and TG2) and one control group (CG), CG will be the baseline than TG1 and TG2. Hence the group effects are ordered like \code{rev((unique(names(groupvec)))} for naming. Caution is advised as LLRA will fail if all changes for a group will be into a single direction (e.g. all subjects in the treatment group show improvement). Currently only data matrices are supported as arguments. } \value{ Returns an object of class \code{"llra"} (also inheriting from class \code{"eRm"}) containing \item{loglik}{Conditional log-likelihood.} \item{iter}{Number of iterations.} \item{npar}{Number of parameters.} \item{convergence}{See code output in nlm.} \item{etapar}{Estimated basic item parameters. These are the LLRA effect parameters.} \item{se.eta}{Standard errors of the estimated basic item parameters.} \item{betapar}{Estimated item (easiness) parameters of the virtual items (not useful for interpretation here).} \item{se.beta}{Standard errors of virtual item parameters (not useful for interpretation here).} \item{hessian}{Hessian matrix if \code{se = TRUE}.} \item{W}{Design matrix.} \item{X}{Data matrix in long format. The columns correspond to the measurement points and each persons item answers are listed susequently in rows.} \item{X01}{Dichotomized data matrix.} \item{groupvec}{Assignment vector.} \item{call}{The matched call.} \item{itms}{The number of items.} } \references{ Fischer, G.H. (1995) Linear logistic models for change. In G.H. Fischer and I. W. Molenaar (eds.), \emph{Rasch models: Foundations, recent developments and applications} (pp. 157--181), New York: Springer. Glueck, J. and Spiel, C. (1997) Item response models for repeated measures designs: Application and limitations of four different approaches. \emph{Methods of Psychological Research}, \bold{2}. \url{http://www.dgps.de/fachgruppen/methoden/mpr-online/issue2/art6/article.html} Hatzinger, R. and Rusch, T. (2009) IRT models with relaxed assumptions in eRm: A manual-like instruction. \emph{Psychology Science Quarterly}, \bold{51}, pp. 87--120, \url{http://erm.r-forge.r-project.org/psq_1_2009_06_87-120.pdf} } \author{ Thomas Rusch } \section{Warning}{A warning is printed that the first two categories for polytomous items are equated to save parameters. See Hatzinger and Rusch (2009) for a justification why this is valid also from a substantive point of view.} \seealso{ The function to build the design matrix \code{\link{build_W}}, and the S3 methods \code{\link{summary.llra}} and \code{\link{plotTR}} and \code{\link{plotGR}} for plotting. } \examples{ ##Example 6 from Hatzinger & Rusch (2009) data("llradat3") groups <- c(rep("TG",30),rep("CG",30)) llra1 <- LLRA(llradat3,mpoints=2,groups=groups) llra1 ##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4 ##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6 ##categories respectively. \dontrun{ data("llraDat2") dats <- llraDat2[1:20] groups <- llraDat2$group tps <- 4 ex2 <- LLRA(dats,mpoints=tps,groups=groups) #baseline CG #baseline TG1 ex2a <- LLRA(dats,mpoints=tps,groups=groups,baseline="TG1") #baseline TG1 ex2 summary(ex2) summary(ex2a) plotGR(ex2) plotTR(ex2) } } eRm/man/llra.datprep.Rd0000744000176000001440000000452711572663323014500 0ustar ripleyusers\name{llra.datprep} \alias{llra.datprep} \title{Prepare Data Set for LLRA Analysis } \description{ Converts wide data matrix in long format, sorts subjects according to groups and builds assigment vector. } \usage{ llra.datprep(X, mpoints, groups, baseline) } \arguments{ \item{X}{Data matrix as described in Hatzinger and Rusch (2009). It must be of wide format, e.g. for each person all item answers are written in columns for t1, t2, t3 etc. Hence each row corresponds to all observations for a single person. Missing values are not allowed. } \item{mpoints}{The number of time points. } \item{groups}{Vector, matrix or data frame with subject/treatment covariates. } \item{baseline}{An optional vector with the baseline values for the columns in group.} } \details{The function converts a data matrix from wide to long fromat as needed for LLRA. Additionally it sorts the subjects according to the different treatment/covariate groups. The group with the lowest (alpha-)numerical value will be the baseline. Treatment and covariate groups are either defined by a vector, or by a matrix or data frame. The latter will be combined to a vector of groups corresponding to a combination of each factor level per column with the factor levels of the other column. The (constructed or passed) vector will then be used to create the assignment vector. } \value{ Returns a list with the components \item{X}{Data matrix in long format with subjects sorted by groups.} \item{assign.vec}{The assignment vector.} \item{grp_n}{A vector of the number of subjects in each group.} } \author{ Reinhold Hatzinger } \seealso{ The function that uses this is \code{\link{LLRA}}. The values from \code{llra.datprep} can be passed to \code{\link{build_W}}. } \examples{ # example 3 items, 3 timepoints, n=10, 2x2 treatments dat<-sim.rasch(10,9) tr1<-sample(c("a","b"),10,r=TRUE) tr2<-sample(c("x","y"),10,r=TRUE) # one treatment res<-llra.datprep(dat,mpoints=3,groups=tr1) res<-llra.datprep(dat,mpoints=3,groups=tr1,baseline="b") # two treatments res<-llra.datprep(dat,mpoints=3,groups=cbind(tr1,tr2)) res<-llra.datprep(dat,mpoints=3,groups=cbind(tr1,tr2),baseline=c("b","x")) # two treatments - data frame tr.dfr<-data.frame(tr1, tr2) res<-llra.datprep(dat,mpoints=3,groups=tr.dfr) } eRm/man/itemfit.ppar.Rd0000744000176000001440000000624211572663323014506 0ustar ripleyusers\name{itemfit.ppar} \alias{itemfit.ppar} \alias{itemfit} \alias{personfit.ppar} \alias{personfit} \alias{residuals.ppar} \alias{pmat.ppar} \alias{pmat} \alias{print.ifit} \alias{print.pfit} \alias{print.resid} %- Also NEED an '\alias' for EACH other topic documented here. \title{Residuals, Personfit and Itemfit Statistics} \description{\code{pmat} computes the theoretical person-item matrix with solving probabilities for each category (except 0th). \code{residuals} computes the squared and standardized residuals based on the observed and the expected person-item matrix. Chi-square based itemfit and personfit statistics can be obtained by using \code{itemfit} and \code{personfit}. } \usage{ \method{pmat}{ppar}(object) \method{residuals}{ppar}(object,...) \method{itemfit}{ppar}(object) \method{personfit}{ppar}(object) \method{print}{ifit}(x, visible = TRUE, ...) \method{print}{pfit}(x, visible = TRUE, ...) \method{print}{resid}(x, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{Object of class \code{ppar}, derived from \code{person.parameter}.} \item{x}{Object of class \code{ifit}, \code{pfit}, or \code{resid}.} \item{visible}{if \code{FALSE}, returns the matrix of fit statistics that otherwise would be printed.} \item{...}{Further arguments passed to or from other methods. They are ignored in this function.} } %\details{} \value{ %Function \code{pmat}: \item{pmat}{Matrix of theoretical probabilities for each category except 0th (from function \code{pmat}).} %Function \code{itemfit} returns a list of class \code{ifit} with components: \item{i.fit}{Chi-squared itemfit statistics (from function \code{itemfit}).} \item{i.df}{Degrees of freedom for itemfit statistics (from function \code{itemfit}).} \item{st.res}{Standardized residuals (from function \code{itemfit}).} \item{i.outfitMSQ}{Outfit mean-square statistics (from function \code{itemfit}).} \item{i.infitMSQ}{Infit mean-square statistics (from function \code{itemfit}).} %Function \code{personfit} returns a list of class \code{pfit} with components: \item{p.fit}{Chi-squared personfit statistics (from function \code{personfit}).} \item{p.df}{Degrees of freedom for personfit statistics (from function \code{personfit}).} \item{st.res}{Standardized residuals (from function \code{personfit}).} \item{p.outfitMSQ}{Outfit mean-square statistics (from function \code{personfit}).} \item{p.infitMSQ}{Infit mean-square statistics (from function \code{personfit}).} } \references{ Smith Jr., E. V., and Smith, R. M. (2004). Introduction to Rasch Measurement. JAM press. Wright, B.D., and Masters, G.N. Computation of OUTFIT and INFIT Statistics. Rasch Measurement Transactions, 1990, 3:4 p.84-5 } \author{Patrick Mair, Reinhold Hatzinger} %\note{} \seealso{\code{\link{person.parameter}} } \examples{ # Rasch model, estimation of item and person parameters data(raschdat2) res <- RM(raschdat2) p.res <- person.parameter(res) # Matrix with expected probabilities and corresponding residuals pmat(p.res) residuals(p.res) #Itemfit itemfit(p.res) #Personfit personfit(p.res) } \keyword{models} eRm/man/IC.Rd0000744000176000001440000000223311572663323012373 0ustar ripleyusers\name{IC} \alias{IC} \alias{IC.ppar} \title{Information criteria} \description{Computation of information criteria such as AIC, BIC, and cAIC based on unconditional (joint), marginal, and conditional log-likelihood} \usage{ \method{IC}{ppar}(object) } \arguments{ \item{object}{Object of class \code{ppar} (from \code{person.parameter()}.} } \details{ The joint log-likelihood is established by summation of the logarithms of the estimated solving probabilities. The marginal log-likelihood can be computed directly from the conditional log-likelihood (see vignette for details). } \value{ The function \code{IC} returns an object of class \code{ICr} containing: \item{ICtable}{Matrix containing log-likelihood values, number of parameters, AIC, BIC, and cAIC for the joint, marginal, and conditional log-likelihood.} } \seealso{ \code{\link{LRtest.Rm}} } \examples{ #IC's for Rasch model data(raschdat2) res <- RM(raschdat2) #Rasch model pres <- person.parameter(res) #Person parameters IC(pres) #IC's for RSM data(rsmdat) res <- RSM(rsmdat) pres <- person.parameter(res) IC(pres) } \keyword{models} eRm/man/gofIRT.Rd0000744000176000001440000000370511572663323013237 0ustar ripleyusers\name{gofIRT} \alias{gofIRT} \alias{gofIRT.ppar} \alias{summary.gof} \alias{print.gof} \title{Various model tests and fit indices} \description{This function computes various model tests and fit indices for objects of class \code{ppar}: Collapsed deviance, Casewise deviance, Rost's LR-test, Hosmer-Lemeshow test, R-Squared measures, confusion matrix, ROC analysis. } \usage{ \method{gofIRT}{ppar}(object, groups.hl = 10, cutpoint = 0.5) } \arguments{ \item{object}{Object of class \code{ppar} (from \code{person.parameter()}).} \item{groups.hl}{Number of groups for Hosmer-Lemeshow test (see details).} \item{cutpoint}{Integer between 0 and 1 for computing the 0-1 model matrix from the estimated probabilities} } \details{So far this test statistics are implemented only for dichotomous models without NA's. The Hosmer-Lemeshow test is computed by splitting the response vector into percentiles, e.g. \code{groups.hl = 10} corresponds to decile splitting. } \value{ The function \code{gofIRT} returns an object of class \code{gof} containing: \item{test.table}{Ouput for model tests.} \item{R2}{List with R-squared measures.} \item{classifier}{Confusion matrix, accuracy, sensitivity, specificity.} \item{AUC}{Area under ROC curve.} \item{Gini}{Gini coefficient.} \item{ROC}{FPR and TPR for different cutpoints.} \item{opt.cut}{Optimal cutpoint determined by ROC analysis.} \item{predobj}{Prediction output from ROC analysis (\code{ROCR} package)} } \references{ Mair, P., Reise, S. P., and Bentler, P. M. (2008). IRT goodness-of-fit using approaches from logistic regression. UCLA Statistics Preprint Series. } \seealso{ \code{\link{itemfit.ppar}},\code{\link{personfit.ppar}},\code{\link{LRtest}} } \examples{ #Goodness-of-fit for a Rasch model data(raschdat1) res <- RM(raschdat1) pres <- person.parameter(res) gof.res <- gofIRT(pres) gof.res summary(gof.res) } \keyword{models} eRm/man/eRm-package.Rd0000744000176000001440000000727411572663323014226 0ustar ripleyusers\name{eRm-package} \alias{eRm-package} \alias{eRm} \docType{package} \title{ extended Rasch modeling } \description{ This package estimates extended Rasch models, i.e. the ordinary Rasch model for dichotomous data (RM), the linear logistic test model (LLTM), the rating scale model (RSM) and its linear extension (LRSM), the partial credit model (PCM) and its linear extension (LPCM). The parameters are estimated by conditional maximum likelihood (CML). Missing values are allowed in the data matrix. Additional features are the estimation of the person parameters, LR-Model test, item-spefific Wald test, Martin-Loef test, nonparametric Monte-Carlo tests, itemfit and personfit statistics, various ICC plots. An eRm platform is provided at http://r-forge.r-project.org/projects/erm/. } \details{ \tabular{ll}{ Package: \tab eRm\cr Type: \tab Package\cr Version: \tab 0.14-0\cr Date: \tab 2011-06-05\cr License: \tab GPL\cr } The basic input units for the functions are the person-item matrix X and the design matrix W. Missing values in X are coded with \code{NA}. By default, W is generated automatically, but it can be specified by the user as well. The function call of the basic models can be achieved through \code{RM(X, W)}, \code{RSM(X, W)}, and \code{PCM(X, W)}. The linear extensions provide the possibility to fit a more restricted model than its basic complement, such as \code{LLTM(X, W)}, \code{LRSM(X, W)},\code{LPCM(X, W)}, but also a generalization by imposing repeated measurement designs and group contrasts. These models can be estimated by using, e.g., \code{LLTM(X, W, mpoints = 2, groupvec = g)},\cr \code{LRSM(X, W, mpoints = 2, groupvec = g)},\cr \code{LPCM(X, W, mpoints = 2, groupvec = g)},\cr and as very flexible multidimensional model for repeated measurements \code{LLRA(X, W, mpoints = 2, groups = G)},\cr \code{mpoints} specifies the number of measurement or time points, \code{g} is a vector with the group membership for each subject, ordered according to the rows of the data matrix, and \code{G} is a matrix with subject covariates (e.g., treatments), \code{RM} produces an object belonging to the classes \code{dRm}, \code{Rm}, and \code{eRm}. \code{PCM} and \code{RSM} produce objects belonging to the classes \code{Rm} and \code{eRm}, whereas results of \code{LLTM}, \code{LRSM}, \code{LLTM} and \code{LLRA} are objects of class \code{eRm}. For a detailled overview of all classes defined in the package and the functions depending on them see the package's vignette. We acknowledge Julian Gilbey for writing the \code{plotPWmap} function, Kathrin Gruber for the function \code{plotDIF}, and Thomas Rusch for \code{LLRA} and related utilities. The \code{eRm} package contains functions from the packages \code{sna}, \code{gtools} and \code{ROCR}. Thanks to Carter T. Butts, Gregory R. Warnes, and Tobias Sing et al. } \note{The fitting engine by default is \code{\link{nlm}} unless changed to \code{\link{optim}}. For specification of the optimizer the global variable \code{fitctrl} has to be used, i.e., \code{fitctrl <- "nlm"} or \code{fitctrl <- "optim"}.} \author{Patrick Mair, Reinhold Hatzinger, Marco Maier, and others Maintainer: Patrick Mair } \references{ Fischer, G. H., and Molenaar, I. (1995). Rasch Models - Foundations, Recent Developements, and Applications. Springer. Mair, P., and Hatzinger, R. (2007). Extended Rasch modeling: The eRm package for the application of IRT models in R. Journal of Statistical Software, 20(9), 1-20. Mair, P., and Hatzinger, R. (2007). CML based estimation of extended Rasch models with the eRm package in R. Psychology Science, 49, 26-43. } \keyword{models} eRm/man/collapse_W.Rd0000744000176000001440000000501411572663323014170 0ustar ripleyusers\name{collapse_W} \alias{collapse_W} \title{ Convenient Collapsing of LLRA Design Matrix } \description{ Collapses columns of a design matrix for LLRA to specify different parameter restrictions in \code{LLRA}. } \usage{ collapse_W(W, listItems, newNames) } \arguments{ \item{W}{A design matrix (for LLRA), typically from a call to \code{\link{build_W}} or component \code{$W} from \code{\link{LLRA}} or \code{\link{LPCM}} } \item{listItems}{A list of numeric vectors. Each component of the list specifies columns to be collapsed together. } \item{newNames}{An (optional) character vector specifying the names of the collapsed effects. } } \details{ This function is a convenience function to collapse a design matrix, i.e. to specify linear trend or treatment effects and so on. Collapsing here means that effects in columns are summed up. For this, a list of numeric vectors with the column indices of columns to be collapsed have to be passed to the function. For example, if you want to collapse column 3, 6 and 8 into one new effect and 1, 4 and 9 into another it needs to be passed with \code{list(c(3,6,8),c(1,4,9))}. The new effects can be given names by passing a character vector to the function with equal length as the list. } \value{An LLRA design matrix as described by Hatzinger and Rusch (2009). This can be passed as the \code{W} argument to \code{LLRA} or \code{LPCM}. } \references{ Hatzinger, R. and Rusch, T. (2009) IRT models with relaxed assumptions in eRm: A manual-like instruction. \emph{Psychology Science Quarterly}, \bold{51}, pp. 87--120, \url{http://erm.r-forge.r-project.org/psq_1_2009_06_87-120.pdf} } \author{ Thomas Rusch } \seealso{ The function to build design matrices from scratch, \code{\link{build_W}}. } \examples{ ##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4 ##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6 ##categories respectively. data("llraDat2") llraDat2a <- matrix(unlist(llraDat2[1:20]),ncol=4) groupvec <-rep(1:3*5,each=20) W <- build_W(llraDat2a, nitems=5, mpoints=4, grp_n=c(10,20,40), groupvec=groupvec, itmgrps=1:5) #There are 55 parameters to be estimated dim(W) #Imposing a linear trend for the second item ,i.e. parameters in #columns 32, 37 and 42 need to be #collapsed into a single column. collItems1 <- list(c(32,37,42)) newNames1 <- c("trend.I2") Wstar1 <- collapse_W(W,collItems1) #53 parameters need to be estimated dim(Wstar1) }eRm/man/build_W.Rd0000744000176000001440000000654111572663323013473 0ustar ripleyusers\name{build_W} \alias{build_W} \alias{build_catdes} \alias{build_trdes} \alias{build_effdes} \alias{get_item_cats} \title{ Automatized Construction of LLRA Design Matrix } \description{ Builds a design matrix for LLRA from scratch. } \usage{ build_W(X, nitems, mpoints, grp_n, groupvec, itmgrps) } \arguments{ \item{X}{Data matrix as described in Hatzinger and Rusch (2009). It must be of long format, e.g. for each person all item answers are written in subsequent rows. The columns correspond to time points. Missing values are not allowed. It can easily be constructed from data in wide format with \code{matrix(unlist(data),ncol=mpoints)} or from \code{\link{llra.datprep}}. } \item{nitems}{The number of items. } \item{mpoints}{The number of time points. } \item{grp_n}{A vector of number of subjects per g+1 groups (e.g. g treatment or covariate groups and 1 control or baseline group. The sizes must be ordered like the corresponding groups. } \item{groupvec}{Assignment vector, i.e. which person belongs to which treatment/item group } \item{itmgrps}{Specifies how many groups of items there are. } } \details{ The function is designed to be modular and calls four internal function \code{build_effdes} (for treatment/covariate effects), \code{build_trdes} (for trend effects), \code{build_catdes} (for category parameter design matrix) and \code{get_item_cats} (checks how many categories each item has). Those functions are not intended to be used by the user. Labeling of effects also happens in the internal functions. } \value{An LLRA design matrix as described by Hatzinger and Rusch (2009). This can be passed as the \code{W} argument to \code{LLRA} or \code{LPCM}. The design matrix specifies every item to lie on its own dimension. Hence at every time point > 1, there are effects for each treatment or covariate group as well as trend effects for every item. Therefore overall there are items x (groups-1) x (time points-1) covariate effect parameters and items x (time points-1) trend parameters specified. For polytomous items there also are parameters for each category with the first and second category being equated for each item. They need not be equidistant. The number of parameters therefore increase quite rapidly for any additional time point, item or covariate group. } \references{ Hatzinger, R. and Rusch, T. (2009) IRT models with relaxed assumptions in eRm: A manual-like instruction. \emph{Psychology Science Quarterly}, \bold{51}, pp. 87--120, \url{http://erm.r-forge.r-project.org/psq_1_2009_06_87-120.pdf} } \author{ Thomas Rusch } \section{Warning }{A warning is printed that the first two categories for polytomous items are equated.} \seealso{ This function is used for automatic generation of the design matrix in \code{\link{LLRA}}. } \examples{ ##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4 ##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6 ##categories respectively. data("llraDat2") llraDat2a <- matrix(unlist(llraDat2[1:20]),ncol=4) groupvec <-rep(1:3*5,each=20) W <- build_W(llraDat2a,nitems=5,mpoints=4,grp_n=c(10,20,40),groupvec=groupvec,itmgrps=1:5) #There are 55 parameters dim(W) #Estimating LLRA by specifiying W \dontrun{ ex2W <- LLRA(llraDat2[1:20],W=W,mpoints=4,groups=llraDat2[21]) } }eRm/man/anova.llra.Rd0000744000176000001440000000417411572663323014143 0ustar ripleyusers\name{anova.llra} \alias{anova.llra} \alias{anova.llra.default} \title{Analysis of Deviance for Linear Logistic Models with Relaxed Assumptions } \description{Compute an analysis of deviance table for one or more LLRA. } \usage{ \method{anova}{llra}(object, ...) } \arguments{ \item{object, ... }{objects of class "llra", typically the result of a call to \code{\link{LLRA}}. } } \details{ An analysis of deviance table will be calculated. The models in rows are ordered from the smallest to the largest model. Each row shows the number of parameters (Npar) and the log-likelihood (logLik). For all but the first model, the parameter difference (df) and the difference in deviance or the likelihood ratio (-2LR) is given between two subsequent models (with increasing complexity). Please note that interpreting these values only makes sense if the models are nested. The table also contains p-values comparing the reduction in the deviance to the df for each row based on the asymptotic Chi^2-Distribution of the Likelihood ratio test statistic. } \value{ An object of class \code{"anova"} inheriting from class \code{"data.frame"}. } \author{ Thomas Rusch } \section{Warning:}{ The comparison between two or more models by \code{anova} will only be valid if they are fitted to the same dataset and if the models are nested. The function does not check if that is the case. } \seealso{ The model fitting function \code{\link{LLRA}}. } \examples{ \dontrun{ ##An LLRA with 2 treatment groups and 1 baseline group, 5 items and 4 ##time points. Item 1 is dichotomous, all others have 3, 4, 5, 6 ##categories respectively. data("llraDat2") #fit LLRA ex2 <- LLRA(llraDat2[,1:20],mpoints=4,groups=llraDat2[,21]) #Imposing a linear trend for items 2 and 3 using collapse_W collItems2 <- list(c(32,37,42),c(33,38,43)) newNames2 <- c("trend.I2","trend.I3") Wnew <- collapse_W(ex2$W,collItems2,newNames2) #Estimating LLRA with the linear trend for item 2 and 3 ex2new <- LLRA(llraDat2[1:20],W=Wnew,mpoints=4,groups=llraDat2[21]) #comparing models with likelihood ratio test anova(ex2,ex2new) } } eRm/inst/0000755000176000001440000000000011572663323012012 5ustar ripleyuserseRm/inst/doc/0000755000176000001440000000000011572663364012564 5ustar ripleyuserseRm/inst/doc/Z.cls0000744000176000001440000001741511572663323013504 0ustar ripleyusers\def\fileversion{1.1} \def\filename{Z} \def\filedate{2006/10/11} %% %% Package `Z' to use with LaTeX2e for Z reports %% Copyright (C) 2004 Achim Zeileis %% \NeedsTeXFormat{LaTeX2e} \ProvidesClass{Z}[\filedate\space\fileversion\space Z class by Achim Zeileis] %% options \LoadClass[10pt,a4paper,twoside]{article} \newif\if@notitle \@notitlefalse \newif\if@noheadings \@noheadingsfalse \DeclareOption{notitle}{\@notitletrue} \DeclareOption{noheadings}{\@noheadingstrue} \ProcessOptions %% required packages \RequirePackage{graphicx,a4wide,color,hyperref,ae,fancyvrb,thumbpdf} \RequirePackage[T1]{fontenc} \usepackage[authoryear,round,longnamesfirst]{natbib} \bibpunct{(}{)}{;}{a}{}{,} \bibliographystyle{jss} %% paragraphs \setlength{\parskip}{0.7ex plus0.1ex minus0.1ex} \setlength{\parindent}{0em} %% for all publications \newcommand{\Plaintitle}[1]{\def\@Plaintitle{#1}} \newcommand{\Shorttitle}[1]{\def\@Shorttitle{#1}} \newcommand{\Plainauthor}[1]{\def\@Plainauthor{#1}} \newcommand{\Keywords}[1]{\def\@Keywords{#1}} \newcommand{\Plainkeywords}[1]{\def\@Plainkeywords{#1}} \newcommand{\Abstract}[1]{\def\@Abstract{#1}} %% defaults \author{Firstname Lastname\\Affiliation} \title{Title} \Abstract{---!!!---an abstract is required---!!!---} \Plainauthor{\@author} \Plaintitle{\@title} \Shorttitle{\@title} \Keywords{---!!!---at least one keyword is required---!!!---} \Plainkeywords{\@Keywords} %% Sweave(-like) %\DefineVerbatimEnvironment{Sinput}{Verbatim}{fontshape=sl} %\DefineVerbatimEnvironment{Soutput}{Verbatim}{} %\DefineVerbatimEnvironment{Scode}{Verbatim}{fontshape=sl} %\newenvironment{Schunk}{}{} %\setkeys{Gin}{width=0.8\textwidth} %% new \maketitle \def\maketitle{ \begingroup \def\thefootnote{\fnsymbol{footnote}} \def\@makefnmark{\hbox to 0pt{$^{\@thefnmark}$\hss}} \long\def\@makefntext##1{\parindent 1em\noindent \hbox to1.8em{\hss $\m@th ^{\@thefnmark}$}##1} \@maketitle \@thanks \endgroup \setcounter{footnote}{0} \if@noheadings %% \thispagestyle{empty} %% \markboth{\centerline{\@Shorttitle}}{\centerline{\@Plainauthor}} %% \pagestyle{myheadings} \else \thispagestyle{empty} \markboth{\centerline{\@Shorttitle}}{\centerline{\@Plainauthor}} \pagestyle{myheadings} \fi \let\maketitle\relax \let\@maketitle\relax \gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\thanks\relax } % Author information can be set in various styles: % For several authors from the same institution: % \author{Author 1 \and ... \and Author n \\ % Address line \\ ... \\ Address line} % if the names do not fit well on one line use % Author 1 \\ {\bf Author 2} \\ ... \\ {\bf Author n} \\ % For authors from different institutions: % \author{Author 1 \\ Address line \\ ... \\ Address line % \And ... \And % Author n \\ Address line \\ ... \\ Address line} % To start a seperate ``row'' of authors use \AND, as in % \author{Author 1 \\ Address line \\ ... \\ Address line % \AND % Author 2 \\ Address line \\ ... \\ Address line \And % Author 3 \\ Address line \\ ... \\ Address line} \def\@maketitle{\vbox{\hsize\textwidth \linewidth\hsize {\centering {\LARGE\bf \@title\par} \vskip 0.2in plus 1fil minus 0.1in { \def\and{\unskip\enspace{\rm and}\enspace}% \def\And{\end{tabular}\hss \egroup \hskip 1in plus 2fil \hbox to 0pt\bgroup\hss \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\ignorespaces}% \def\AND{\end{tabular}\hss\egroup \hfil\hfil\egroup \vskip 0.1in plus 1fil minus 0.05in \hbox to \linewidth\bgroup\rule{\z@}{10pt} \hfil\hfil \hbox to 0pt\bgroup\hss \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\ignorespaces} \hbox to \linewidth\bgroup\rule{\z@}{10pt} \hfil\hfil \hbox to 0pt\bgroup\hss \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\@author \end{tabular}\hss\egroup \hfil\hfil\egroup} \vskip 0.3in minus 0.1in \hrule \begin{abstract} \@Abstract \end{abstract}} \textit{Keywords}:~\@Keywords. \vskip 0.1in minus 0.05in \hrule \vskip 0.2in minus 0.1in }} %% \def\@maketitle{\vbox{\hsize\textwidth \linewidth\hsize %% {\centering %% {\LARGE\bf \@title\par} %% \def\And{\end{tabular}\hfil\linebreak[0]\hfil %% \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\ignorespaces}% %% \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\@author\end{tabular}% %% \vskip 0.3in minus 0.1in %% \hrule %% \begin{abstract} %% \@Abstract %% \end{abstract}} %% \textit{Keywords}:~\@Keywords. %% \vskip 0.1in minus 0.05in %% \hrule %% \vskip 0.2in minus 0.1in %% }} %% sections, subsections, and subsubsections \newlength{\preXLskip} \newlength{\preLskip} \newlength{\preMskip} \newlength{\preSskip} \newlength{\postMskip} \newlength{\postSskip} \setlength{\preXLskip}{1.8\baselineskip plus 0.5ex minus 0ex} \setlength{\preLskip}{1.5\baselineskip plus 0.3ex minus 0ex} \setlength{\preMskip}{1\baselineskip plus 0.2ex minus 0ex} \setlength{\preSskip}{.8\baselineskip plus 0.2ex minus 0ex} \setlength{\postMskip}{.5\baselineskip plus 0ex minus 0.1ex} \setlength{\postSskip}{.3\baselineskip plus 0ex minus 0.1ex} \newcommand{\jsssec}[2][default]{\vskip \preXLskip% \pdfbookmark[1]{#1}{Section.\thesection.#1}% \refstepcounter{section}% \centerline{\textbf{\Large \thesection. #2}} \nopagebreak \vskip \postMskip \nopagebreak} \newcommand{\jsssecnn}[1]{\vskip \preXLskip% \centerline{\textbf{\Large #1}} \nopagebreak \vskip \postMskip \nopagebreak} \newcommand{\jsssubsec}[2][default]{\vskip \preMskip% \pdfbookmark[2]{#1}{Subsection.\thesubsection.#1}% \refstepcounter{subsection}% \textbf{\large \thesubsection. #2} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssubsecnn}[1]{\vskip \preMskip% \textbf{\large #1} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssubsubsec}[2][default]{\vskip \preSskip% \pdfbookmark[3]{#1}{Subsubsection.\thesubsubsection.#1}% \refstepcounter{subsubsection}% {\large \textit{#2}} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssubsubsecnn}[1]{\vskip \preSskip% {\textit{\large #1}} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssimplesec}[2][default]{\vskip \preLskip% %% \pdfbookmark[1]{#1}{Section.\thesection.#1}% \refstepcounter{section}% \textbf{\large #1} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssimplesecnn}[1]{\vskip \preLskip% \textbf{\large #1} \nopagebreak \vskip \postSskip \nopagebreak} \renewcommand{\section}{\secdef \jsssec \jsssecnn} \renewcommand{\subsection}{\secdef \jsssubsec \jsssubsecnn} \renewcommand{\subsubsection}{\secdef \jsssubsubsec \jsssubsubsecnn} %% colors \definecolor{Red}{rgb}{0.7,0,0} \definecolor{Blue}{rgb}{0,0,0.8} \hypersetup{% hyperindex = {true}, colorlinks = {true}, linktocpage = {true}, plainpages = {false}, linkcolor = {Blue}, citecolor = {Blue}, urlcolor = {Red}, pdfstartview = {Fit}, pdfpagemode = {UseOutlines}, pdfview = {XYZ null null null} } \AtBeginDocument{ \hypersetup{% pdfauthor = {\@Plainauthor}, pdftitle = {\@Plaintitle}, pdfkeywords = {\@Plainkeywords} } } \if@notitle %% \AtBeginDocument{\maketitle} \else \AtBeginDocument{\maketitle} \fi %% commands \makeatletter \newcommand\code{\bgroup\@makeother\_\@codex} \def\@codex#1{{\normalfont\ttfamily\hyphenchar\font=-1 #1}\egroup} \makeatother %%\let\code=\texttt \let\proglang=\textsf \newcommand{\pkg}[1]{{\normalfont\fontseries{b}\selectfont #1}} \newcommand{\email}[1]{\href{mailto:#1}{\normalfont\texttt{#1}}} \newcommand{\doi}[1]{\href{http://dx.doi.org/#1}{\normalfont\texttt{doi:#1}}} \newcommand{\E}{\mathsf{E}} \newcommand{\VAR}{\mathsf{VAR}} \newcommand{\COV}{\mathsf{COV}} \newcommand{\Prob}{\mathsf{P}} eRm/inst/doc/UCML.jpg0000744000176000001440000012336611572663323014035 0ustar ripleyusersJFIF``C       C  xZ !V"17u2AT#6QU$3Bv%458aGqsFCERde&Sbcr ?*o^JAap+ Ñң͗"j8*DLDY-m{ջWMfLm.5zFFamR/TBxgi^KpcH~Y\,522UN:(uF%IK"TZt&5z'QXz v\F%x8NA4}t332 'f[maBDf|n<8spKmn79;~I,y//Յ9s_]ȓcmٍOuoϒm? e[%jIo2{@l1,ךN<$9±nDy[9f|BѪ_ʅ*%UTDO+Slyɷ)IjYtm޴0A\橹]}*{IJV\aWjFˍ6Я"z5&”aO9XŃ05M2e8I!=ŊJFB2q׌*]$V;dV^B%lʚFе]7-!?1vkBdX*ӈf f:y6͵\uYi^5+ER!Ń/y)ɍz7 Z?zKf*^'lU3j ,f, 1d:U)BhԴPPa먷Dt&6NI5(]:f\:ĖUsƺLؒKK4y%:٥[6T 2 &;ȉ3&=vP \h:h^&Tn-xE 67R͐QV*4by05èYi <+`fMv[~MO˨NE4XjaPn;mTtt #.1]}ri BH'4Imԑ" @s\tma+pH.34<.|GN9kJZ' [@qXN}rzTףH(omnl;i-'NZ,9'>y7,Y"Lvaas68?آZoT{o٨EcȟXMNpbnZIey8,گ2ŵ$9EAxģOrwcvt̗ޖSG@ܘ$"  Or55(r|Y \ WmKI6l\5AA: íƔgcU*YZUm6P߈Mv;rۖ"Z,1yZRW7\+M6.EwgÀr_MRkGMě|4ՆIL6 ^ùx@?}lJg'r'R)GGi8^E"UX(eٓaRdd钊’ҷ*@sΕcic0yRi*Cn \,Hκdq7(ˆiإZfz]8,=s:˂ mKJҝwiڍKiZh6WqPVA:IY&G-9-}Qىsm X"!V%%rwK(i,lZ^R J$V#0+Tz+qeGT̤Ďl88ښI$ qA?Sr^) ?UUY0ꞷs:=5+3}wNHZKT'aTǠPfס`>+bpe*non̅qڒ}:-n.|Ēa"7~Kn4ΥkQ$FEBwʋJȱ1^T3KlY*Ie֔ۯKՃVF%ىsm X"!V%%rwK(i,lZ^R J$t 5[Tƶq^v_6o'*,Iv^Lʿ]kWW+.[ŌXl[Iě-+Gk32EfWآ~Z2m)g X\0RZm'kV*׈a6gؽ\fmxԥ)ܘh0}ݩ'k.&Z  Yc-lRqb~ ͲXqgqۺn{ }+$q8"0w`Μ[PˌMv U:)hR%nVBHsaa:^GfyS^ ?D;))8#sA:K<}kck?ܱd1نŇpbyiCaS]fO"}a7+9 i&m5?;#PkSj{ԒX>RF9ݎQm2_'zZ=MOa\CYuG 1\d3*xp (ДmkbUjزr֭c<˳o2pR\RT DӮ6Lm9Udp;.8hg'+^('VR԰X A+II5KLrcm=O*Q%hv߿Mz@jFò"ﱯe6dĜ4i$$߻PZzHgiZb$لvhC +~ ף^ 7oICYaL]-3!ߓfq#?-d><*j[Ϛu+IhDDEaZ5Eˆsv"LZ[M#=Un r/?/| MjvX0ܨheVՒTZDzj4\~+̴Ihn6j- h'R{-2d`9VXx1c7P4qZoF.ëJ,׮u }8Fz]a8 V`EK1žo}[;FrZm>X6Z'X1֦]cuRmo)i3Y"I$<8r$T̪9%Up.n> ;hBM.2d{RR i߬v.~B4Vhr;)V1JiЗ7$QU,=g͘ldX"%l[Q IR[ f$A5wgUIXrI2Rp)q;[a ON\"S8YS8q _u#SU:nOv>X6Z'X1֦]cuRmo)i3Y"I$=XWa|9+XN#E?o[[J5 ${u2.OSX <9cBq8֌.,~7SƯyi3۩hz IĘm75\OaET),=fl4MjuSNnq͆I0a#(N$} *jv7Ҏ8HR]-tNPH|.Ĺ\ʜ?8$\SЍ)dsٙo=Z'hUDJ"˭ N4r]㆒ֲjmW^%ϓmUֽ4 켂[jڲJTC-@ioc/# p BL7ÒnfR=8}z4Z5\G1*eiOqAŁ";rL%K\8Be7ÒnfR%vjCu0݄τ'$X@f˨[%+Qh܍UȖPk;"STerC`u*GZ-k=\=6q&^.'qB]r%CqThZtON y~aJWjyfɚ̓rPHk įP9doYTT"I(.35 r/?/| OoI{g]=6UKbm+52V7mפ20V1l\2&ǵ;]sK.Q ࢷk:MDz6I?8]M!?H$%a#([RJ; IqfEf\G"$%Q-YnJФJJ# 2$됗l#-܎ۆo%K#m*7Dj/@6%řqDciDme+B)*#ԌeqXqy 9)fd-DqB4 +ڕ}gW6=Vc/a|#*JǪe;/pP 9bSj/II{eV[> t 9bSj/II{eV[> t J2RBj9mB\ZS4A(e+ñ=\Z r` Yq讚SkSN-ZZ48t)&F_qy/S3x}O̞m[y)M S l: \Y}[+ò?\g00jcUQ[s%sͻxksDUle)2RY3=)MKu8}z}m6C* BIl>7qm "ڭN2~XoѷV86ƹdZ>l4kmHtiX] i:+&ΑRJK]t+S#L7@8ґx3eX0^A5`lMQFuV2~􎎖T\5,5Kq‘30il LJD&5ĔHe^6+7P)qJaj~,Bz2[-tI۸:FHZE[Xϛ8b2RàU7u OP`Ö1?S L-OEOFKe 4{wBөHvtglGyإ;c䓞yD5#eGCXt{TW)V`a#`&ss_f5U1X<۷4E[&+fR[c%+o>ҔXykTGSivn24$jC|G&Оq+R-9mT'٥q un-slkICi6 3OjV64F+0P=iL,u!,ĺIuI B2?A$̃t)c;&Z٨V+dTg]ejC.G/HeLRTح)1c F ltNc\IMOA?O$FwtC 2fRE[ô崦$CJNq4.rL5tuSc3Xa䩷uSp'-ْm8B:6= .\Í[2i8q,4| .1JRdHp$iqev*Zy;xiHg.Zb55ڦ a c;$ R\iTgZ=4]w] _*.i]L\C7HnC۩|_Et0೵%n$<85ۗWn_@ۗWn_@ۗWn_@ۗWn_@bHOML+s,;0M55"kIaQ5Ȉ.t ϥ660.mg$d,ohس5'C>oWn_@Qܿx4GrmHsO}/ 1>Qܿx4GrmHSíFUe?2SQ1(Tn%4RMczRKVSx4GrmHsO}/ 1>Qܿx4GrmHsO}/ 1>Qܿx4GrmHsO}/ 1>Qܿx4GrmHsO}/ OTN[aDĢz!SWw]ӊoKm5eI-Z3MmHsO}/ 1>Qܿx4GrmHsO}/ 1>Qܿx4GrmHsO}/ 1>Qܿx4GrmHsO}/ &s6~d-ZeS3 C\Juķ\ܤ6Ɖj"$֝鑕!QT!&Ͷq ^q%fDG3IktY[GJjM_qdZcS%9mM^QvSN)J-:HQ$i`,mQgaILU@y3v+D*w9m\gC30Y8jU^J"V۩r'К_[O^Y5³-,vC\%]9f}5 ijN`1=L.U$N~jȾt .F%{ aNEUs)TZ+|s >ʐ(m Y 3XI8qNA-i y2v;p Hޒ[O 5XjReeH6dfA)e$FzY5³-,vC\%]9f}mmHsO}/ 1>Qܿx4GrmHsO}/ &s6~d-ZeS3 C\Juķ\ܤ6Ɖj"$֝%scf28mHsO}/ 5>%>=\+2{wOOx4GrmHsO}/ 51\p^.|[ijoyH#qZGiz$9>ۗWn_@ۗWn_@L;K&^%ϓ/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>L;K&^%ϓ/ɀs y~9ÿy`^~_0r$ym"KeqǦjDwRhq J4&Z P bP'@D\;[i r->\ii[N%DiQ@nÿy`^~_0r/?/|9xw𗟗>L;K&^%ϓ/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>L;K&^%ϓ/ɀ˖Q%Cl)[+.=5{R#CHZUI2R{pf}}T( ʶL;K&^%ϓ/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>L;K&^%ϓ/ɀs y~9ÿy`^~_0r$ym"KeqǦjDwRhq J4&Z P/q]NUVϯ[>!VǙ8ȈIz/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>L;K&^%ϓ/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>LacjljaU6,F|Cr=hm;)-T-LҀs\u8 UXW3>Yl[f:w""z)&Z;K&^%ϓ/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>L;K&^%ϓ/ɀs y~9ÿy`^~_0%z}Tر wu\DR3J5%LlW#+.E5Q㴓[8%(BSJ3Јb/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>L;K&^%ϓ/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>L;K&Iql +k`\Zi]-vɸ_ukuԣ5(:(0T`< ak8غҽ.K٦,~2O6[[#=z1T`<8>6rqud.c8FNvI]%32o.qkqcdUo/mhO˭ףlYb q+ܪ%wtgؑh'E-zw e>؃\bYF 1!R嵶3 G4IeӢ7G0/0=j{Du-Xuk5kθ;akP,#=ذ42Tp&2cBpͳV-}6eflA*:m ,hE'RJ/M?ˊƘ&x*/F<]g)uP#ձgO^kIҀs\Y?[Kz$f`<"ԕO6؜'- F!Kn1o䖘-[h>mܬԋO XF jF&r1f-[$K>ђ4LIvVb15grdJmRRT.IIN&ða(,Xfy\KڧF߯zttdѴvbR"Rg;ל &TZX c88r 6Ɩܴb R_4HiHCԥK.|+y2k 9-ג[5 `4C+PRu "F]n\f%&IJRD~= (21.q]:?nm ru?d:GO&9Ǎ0?gox5Anf#u 4&ޅi l[,p ݔ8ʚBdٸ4ԵH kWb%(w nvv|jnK,[OBJICNRV7EEg@TZGHSA%NGSs~ߡ$ruFUV gB-R3q;jI褙jGuƘK3 2jYc.=w'4F qt"$ˣPku08ҁ;g#qm4H׆ɤfi"]@+rJ.L sśmZLȒt]Z ]f%lI ־MI2>OSݨu0u!+U/[`y1n?Ihf\_: 2qE>OcllSJ?ݷQ ̃W86^1:r 1ⱮOA&rHxAH""3 l C0^]Od~s͓j:/\,KM"'VJ-:td#/E^*gK cYJPW!t'/p0G'6nї#e*'jj]%4zl"OΎz8盵 : $%R]D_qcA螥LjuSe~8z >2 S-2AΛ(qT8eSe0ZP^oӣAkغeIz=\RU(1ly!MWhIidupDK2]-EqFda ]/ y)#Xŵ{RJKJHӪƻ~+pfZGĘ"a' |s #Y=v>a*J'lAZ:]. a[Ol^>Fxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vi]vqJWDmuI褑3Ӏ<'#<1:I}8 N{_N3Ӏ<'#<1:I}8 N{_N3Ӏ<'#<1:I}8 N{_N3Ӏ<'#<1:I}8 N{_N3ӀyrYc,=6r4qiFf֚OjJ/dg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯN˟ )NXzYFyc}շQT]$g'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwk'q~ǘV5٘fh-&Cy+RKH\P^g *<=D$4';ڴ+\f+x!% ]Uj^ KVnC-o]e ە˪) iٹmFT=~- mDm"6:"' %MDJ"WN`6#5b J{+Xp23x)`XL˦ˬ$!mryF ?)iR NRgL9-_>x2!旵FmZ OE$?HeYKSb"E{#d2AlizOVc[ymLإ|NK)%6Q)oqq]>9.rg}\!,Wt2oEe^[$QA+OeEwPajk&ӹ6'vo;] PPxNv rCҭlc-mNf:QF1[ m22Vgʿ闟OUJ\kتDQqTRY#-ƣO_*,8K#Ŏ;"C$6h-Zԭ )If~ڙY>hwO[۸Z"Eflt:DNr'K$D lp/Y3\ puPeH޴_ۇu5Ys d<5z*qS鋇*b"K͢;N%cDTON4؛x_<.\uvP )#fi|Zrwd^I&%&ˆm7tVne3-et?p i<$0N>XAY`ǎz !JIܙ>AHng`:Jd3ɞ IWU :~ž_ֽyR)i"7*#R/8 3s.r-=e!r$I-E&%LJf?w/c_`,{zŷjQVm3)+C=dde` ؚrwX|⺣$2O%@-2𴸘c ><]4hҎk\y^Ws6D[tA˼͚ ʃ3f3d1)!!$QI Gxzzܿ0.cgg8_tY4 3K1ƤgD-tPs𐠠CW<8f?ˍK-eΡZۣ"QoGrf o60m'T`Kk^}}TQ*1ִ+jI== g[bR"u1ZZ!}32I'iUn:,}AD\;rJ&44 t)kڽ32FL>0z+8Dz7$֪SiOM7uhl!1N1ʪ|>IҸGO ,Q' ~ UwGSΙNq6[m6129>rZ ~ V꺦U^ 2Bf;ɐ{"l6R}Ե3D[ bs$7p~W #߷Wg=b%D;mAѶz$yD[7teuahyqLBi-\Z”7jROMzqxel+baKb-+2Hy5i_h+!p^cHcThi7CdF]/ά-jg5mExVLurDH Iuk=V4[_ĭDYLŽYM&k4QHӡ ά-^+Y|m܅ɸ{]gMxŦg%7X⹸QR\B"$i7OUzwv8(Ԩ)|;SPrT#bj-)K_Ftt2)9C}9Vw)SY~dygڶ~B|řeѷ>GX>)\)ri6RGD0I#5-p,i|(qH ]i y*g_ҒIjsQ¶8o RKJǓcdKTf֏5PyZdN_sq _RV)XR䡴8ެp:W$: ~ {CeI-KDKHBnCfҍQWF`>RY:)uM:A2{NA7m>ދKfjCa|&G yQ\' &82do/G]sqK?t0s BŜp0lKmq!ɩ7I*otjROMz]`,s7-Jz#VpcTw8-"lZ܃SK 04(V.v%:aN!~Sj[m%nWA,ie-ŒR_y gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v (q^/ a#NȐ|$6h-ZԦȒjf`3sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀㄰ 4SzQc3\-INM Q8g{ :[jVzU7&{U?Ekqϓ!#f{e;4.Ak\57eG}Q)Y4o=_5Dd`2H5)>i:8Y[/CC%$"0} g`bSGmML:\..qQfFf8{5-V3SԷ+a1ۤm2&9f9"Q-Ԥձv7rjjgMhl3>Žߗm%2j"R$N/@^`KTnBǟlURUVq ˌ)"Tt@L܍j+üz2bS#IIqH5dAb^3k[tQmlb"pӕkvz`Rt7|_Ŀg͗6,g@Fqau_d9m KU Tg~eV-QO ^!ֶӅ`v7nTk}ui J2⇪m۪]Rkٗ+ԥJY&;&Ĕ [ojjWAq[[UWef!ʪ9Ck!؈-/Cڒ01KLEy1C2 (ukI$?x9-*㫽EbZx:MkkQChZ7mC."U,cGj߆3eRQz ImdЯеjNPb;h4Ms,IZr3ߌѾFѨ'IFu#Tc?:+؂Z׽vșoU)꡹&Jҝ47҄Fvxu܄ 'Wϯ߭G[6\RqV 9Y^W4Hx~*DxN&䬣& Ljh48 [_M3eiKq^!;V -mL~,a2^Ę량冤3]-dt;45/v"{FqeCKJhRҺ{{"7+5q&AqWZ%k<%Y9,G[ʼnaOphm&hf\ؼXhdѶK-եJpa|<ͫUw$ VO3D \x::]ssY%;jNݩ1嵴]*]}t6l̈́6UJ9IAsD+טWѦ+ƺ"M' [1fuՕj+4 VwVFE~)F(,v&KCDp.9 Khݴi T,bk~͖mIE )&N嵓BBթ:a̗&&DAa WKgY")e&N +Kݴ׌{j+SN:{+ruZK *QqJ['MQfc,B u2_6g3&.7osetu$JI*H \Jf7 !XULYnzqd+p8{ՅT, =\W+ڣBROE$?HXq- aFq#ӗ|-\5l8mRVɝ4n̘LJ*\V {t㹢v6fZ+v Iw26믰ü()1:d(z 2؄0IS . )uc,iQloY%%u㸖q//vi30ag&Avׅ]1B Biֽ`O]Q8S.;7^VM6a7nx~uY \+mȳʍn4ӛ\i-#i8'VHPbgiUb ܈#4N̔G-!ipj4aٓx(4HgBQ(Pi|񯌼O_#|o_-x~p$ljWc^|RKMkbd-+uݢRjSh="O]׉<&r+۶\.4--pDN2.3VQW◠E}3(4ڗ(JB8{KpȌN^͕%L Wb]G&1$+V¯$8Y)ٿ.kkQFEPƒ"#\4/{!ڨ2[&_ĪvnR3VN2-7 Mm$mF-f@=9wjŬRTV);YqdńBMG:=wy'wTDn&%CQAXwBMHݴ)3-RZ&|8óf-&%b[`ڦ%q8j\ZMN={_!0=SOTx-մKYF˫yOq*)Փj2܄ԙ'_!*56>"%GבGufHhkj6m;Pf&di/2icb$$kGj6"A"‹#%&\JĴT*Ŷ5!PkQMǨ.|(2"ޚ-,kjେ"F6q48N ѤK@U`e dƃevVoy5utF]=UNx2~_!*56>"%GבGufHhkj6m;Pf&d~jy2zL.G!ur]SϓeT۫qFͽC;Vb$=q1* 7OĚü5jFiIj5~eL,Jk#+TlX6rT险jxj3Qj > *@ŏL秉:J'>PvF)~Y, |jդZ|D%o3A.irap"5HӖifI[RH'si&m[D$D{Ww|NHz6bT5 Kol5xk4$ԍLғ2%y&xK$5MF|r24ZC'!n:R)K3YZY;溙K+J3mheI~L8(e&jWݳg[eӹr)"PK*]_5;?!Y̲T Y]E:L&q !ڋd[L:z(2"|XQUM} p`*msHmScO[aOGD;LA_7*3Hy[[sS$w+MoV=<[*lU81y ;}hS*[4 RI1Df>_}ܞIiKf-d λ2+LCMYfZ]'TVZCqjS̥+3R3޽Ci/2icb$$kGj6"A"s SSKXoۍuocB,hmdЋjt WgE7k19*) DA8ۚ)d[L-E/vWS$YJa1Jyg!rm~{giCj#$tu߻ i9ٳ)F;K6"JڒGDy;I63j܂$" bu~)D*+CZIb|!&Sڝ~hIWx pmk14RQPa[VYYYȌ%+D`2v)UW+unyۺCK$hGG*/ f"[v(HR[cfAFnF=CɆ&r+?O'xUI<5[ŵ ؔ'pZ(7ڒ2]!6w®}xqm ԩ[i83(H$O0 ֮ Vsx2⸇^Ok5$Qs/pܿʴL)M1GZN-I Cԉ##sC# ʃSWgv ^XD6] 9Te6j"RFf^ND|+n\,wQm|H4my[YhZoN?ĞV'%÷xfCmqC%MYOH ;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr( X̸̊"DIK4[n6ܕIԔF@2O~LTW)V`a#`*Jnb&+iqO2hj PbN&uп^5n̽ 58s0M0 0q~H{'k\|FW:>sJwauh(]ZY4_n;fᑨJtF$̸̊"DIK4[n6ܕIԔF@5Rq rTr똺Ri5KaǴ65: foasL͜DZ|7VBIK{A@GLAk"V51pHm&ʜSu[IH YrÊo"ڥ62Z#%?OI_)Iy]U-h'Ql#mIdV4_'z#SMu8=_B";RP[oW.ZPv&CDd9!A(d5tjf, YmdWm+%d5Y6(֍2J.^x O.g \-tHbRp]䴡kMɲrC uDQ$j?\Ҫ%EvҐH\$?4DkA֏;OHm#uSb;oǏGPGBzSO]z.nrXq] Kr[nIY|'ITtKI2qVVAl2SDR#kKhliA6ӚĵV׃,<5khZz%&gzPU7u i;/ۘɀ*bLH䴲Ӳ %SmI%n?S+bE,032\M\e$mÀh3*Ḥ_q}5LKƋp뜱~׿Ua2zZ^+Wׂ_">ڤ'Q;FN-/fac_dPYZkEʞtn?۴ush5'yrH_$^j4=L zťv"#:zU0e 3Sr[Џ?)V`a#`6-cUD)oʌb[@u6 jAbnq Q(JIDİظ%T0AzA}IirT#/eW[VysatW`On= 7sʣ%nV%0gcFKޮ!@2ƠiQe3(kqŬkZj7 FԹRO>ʧ_%{snUjiItfkU.QS]z 8jUǔ۰&dk4 ݧF" |U[8~KݛfzI+6j/i B$X+%mKIn=uzrYGz,7':hhKs .4fɷ$dIJ $2%ʁ4jb0! Ƨ5Wؙ cPkJm4&vxqq3b6̈+1YD8m~O%h4fRLsmax;UP"C%08$3i o4VZu1ˎ.EKM\meIjBЖF+RJ+0o~@%qߡ+]O3cf28SíFUO~LTiY dwMʋ!y%-S];TZA1QUPۀք2%IJP%%ǔҔ% 2I}{*. mĨr0 fiRfZ 5UIqf>_u"NHS:BR5IKOl +0bŔ[KIn5m%D&q.tϊf^R R ʔ5+%jrM{s7^{@mZ^Q5j5=Cm}}tr*ڶ5G(6㴶8& M6?`RtRRd \IO)RdO26%- *k ГY#MSK@7uf)9mcmg&KM4iJIZzL֖4UE[ZTْC}OHIKˬ+*SϼԮ5Ϝ{wiyFfT֎G +-M6S%@sR#.b8Æ;7@EUŦࣅ#r"G (qO#GGFJ~&1gZeIKgo? }m6". J(TϜuN:L*4pu&-S΋%AXW%%$qB}8ĥFJu$D"ÊHVu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`)bD+1"2#-GI jPBJREeRm/inst/doc/modelhierarchy.pdf0000744000176000001440000001672611572663323016266 0ustar ripleyusers%PDF-1.4 % 6 0 obj <> endobj xref 6 10 0000000016 00000 n 0000000647 00000 n 0000000723 00000 n 0000000847 00000 n 0000000929 00000 n 0000001124 00000 n 0000002119 00000 n 0000003279 00000 n 0000003483 00000 n 0000000496 00000 n trailer <]>> startxref 0 %%EOF 15 0 obj<>stream xb```g``anT,  bP aB7H320H ! endstream endobj 7 0 obj<> endobj 8 0 obj<> endobj 9 0 obj<>/ProcSet[/PDF/Text]/ExtGState<>>> endobj 10 0 obj<> endobj 11 0 obj<>stream HV=o1+\ ǟ-"!"!n;DP"PRQy36.7=oQI8yg?Yg[V_락߾;Ӽ9yx˚L5do= >82e<MD/XVj%f1C8D\}{!jaYo֘;W?b~D;v7ZD:;VN3l'`8r\o_#|XG3q{O W<)6~!EVǔ5pZw:JX'>7V-QHCX;ksē׮{J ml LM$8],n͞t*,x:v8Ej~UWLųL=)nXyS!CNP_5dh=lYMI%tMQJTa ܐKaCԺD5[Xsay*n^%uJ9J0;ɇ߻jmx"^Or\qϘ%'޳WhG5n[ڹب\ǙU\ ʼn6*uK,TS ]7e&*Ϡ/ ^>stream HlkLSgi9E+`ݖ=!\ +8d hpʨV`P.]@ۖJ ,E-x O!#H>23D7e)/va_I?6$5-c{R22n¹׶q$#Zw@/`kB0OO).і>/HU(˓/*KUyE\MboTJ-yZ-ܜ(g*˔xcX,ŰL+„X16gC\bY|f3@8jL&%t9IO; F`<?08jtAoaЛ$I&a?]et6ML69š|E=Ni1YWBPu2pPG8G}ٌ|h+`ֽd.֨>"x?/F@cJ(tDC=d$KZM[ǨQ<Űd1Z&55]hd= ZD pS͋ta5d9p^r|HQ ҬϋUs {=^[ ]j .?{-H6u{H&!^r2B?# Av6_P<+AM\l [cBu\IGhQjtFN@ q߯(թ gTI'Ľ| Q].y: ޫҰaN_"1l%Wia(@sN]?s׋åy!WPH=i[Ο{<5Kqi229]FxaWdiYIqCv[@J}LVjum];׏Q0x?f*n`vwblaj NW٭FZCC8|y. D^K=}0̭Lek}X|A(7|5 2l>nuxx[Z}1"hgzro:a@\$ݝ I}qǵfdCD~Nu)> endstream endobj 13 0 obj<> endobj 14 0 obj<> endobj 1 0 obj<> endobj 2 0 obj<> endobj 3 0 obj<> endobj 4 0 obj<>stream Acrobat Distiller 7.0.5 (Windows) 2006-12-07T19:28:41+01:00 2006-12-07T19:29:59+01:00 dvips(k) 5.94b Copyright 2004 Radical Eye Software 2006-12-07T19:29:59+01:00 application/pdf modelhierarchy.dvi uuid:24a41332-332e-48aa-8039-01a80649f082 uuid:04394476-4020-4118-8bcb-0f6f46e95be8 endstream endobj 5 0 obj<> endobj xref 0 6 0000000000 65535 f 0000003559 00000 n 0000003592 00000 n 0000003615 00000 n 0000003665 00000 n 0000007238 00000 n trailer <> startxref 116 %%EOF eRm/inst/doc/jss.bst0000744000176000001440000007765211572663323014112 0ustar ripleyusers%% %% This is file `jss.bst', %% generated with the docstrip utility. %% %% The original source files were: %% %% merlin.mbs (with options: `ay,nat,nm-rvx,keyxyr,dt-beg,yr-par,note-yr,tit-qq,bt-qq,atit-u,trnum-it,vol-bf,volp-com,num-xser,isbn,issn,edpar,pp,ed,xedn,xand,etal-it,revdata,eprint,url,url-blk,doi,nfss') %% ---------------------------------------- %% *** Journal of Statistical Software *** %% %% Copyright 1994-2004 Patrick W Daly % =============================================================== % IMPORTANT NOTICE: % This bibliographic style (bst) file has been generated from one or % more master bibliographic style (mbs) files, listed above. % % This generated file can be redistributed and/or modified under the terms % of the LaTeX Project Public License Distributed from CTAN % archives in directory macros/latex/base/lppl.txt; either % version 1 of the License, or any later version. % =============================================================== % Name and version information of the main mbs file: % \ProvidesFile{merlin.mbs}[2004/02/09 4.13 (PWD, AO, DPC)] % For use with BibTeX version 0.99a or later %------------------------------------------------------------------- % This bibliography style file is intended for texts in ENGLISH % This is an author-year citation style bibliography. As such, it is % non-standard LaTeX, and requires a special package file to function properly. % Such a package is natbib.sty by Patrick W. Daly % The form of the \bibitem entries is % \bibitem[Jones et al.(1990)]{key}... % \bibitem[Jones et al.(1990)Jones, Baker, and Smith]{key}... % The essential feature is that the label (the part in brackets) consists % of the author names, as they should appear in the citation, with the year % in parentheses following. There must be no space before the opening % parenthesis! % With natbib v5.3, a full list of authors may also follow the year. % In natbib.sty, it is possible to define the type of enclosures that is % really wanted (brackets or parentheses), but in either case, there must % be parentheses in the label. % The \cite command functions as follows: % \citet{key} ==>> Jones et al. (1990) % \citet*{key} ==>> Jones, Baker, and Smith (1990) % \citep{key} ==>> (Jones et al., 1990) % \citep*{key} ==>> (Jones, Baker, and Smith, 1990) % \citep[chap. 2]{key} ==>> (Jones et al., 1990, chap. 2) % \citep[e.g.][]{key} ==>> (e.g. Jones et al., 1990) % \citep[e.g.][p. 32]{key} ==>> (e.g. Jones et al., p. 32) % \citeauthor{key} ==>> Jones et al. % \citeauthor*{key} ==>> Jones, Baker, and Smith % \citeyear{key} ==>> 1990 %--------------------------------------------------------------------- ENTRY { address archive author booktitle chapter collaboration doi edition editor eid eprint howpublished institution isbn issn journal key month note number numpages organization pages publisher school series title type url volume year } {} { label extra.label sort.label short.list } INTEGERS { output.state before.all mid.sentence after.sentence after.block } FUNCTION {init.state.consts} { #0 'before.all := #1 'mid.sentence := #2 'after.sentence := #3 'after.block := } STRINGS { s t} FUNCTION {output.nonnull} { 's := output.state mid.sentence = { ", " * write$ } { output.state after.block = { add.period$ write$ newline$ "\newblock " write$ } { output.state before.all = 'write$ { add.period$ " " * write$ } if$ } if$ mid.sentence 'output.state := } if$ s } FUNCTION {output} { duplicate$ empty$ 'pop$ 'output.nonnull if$ } FUNCTION {output.check} { 't := duplicate$ empty$ { pop$ "empty " t * " in " * cite$ * warning$ } 'output.nonnull if$ } FUNCTION {fin.entry} { add.period$ write$ newline$ } FUNCTION {new.block} { output.state before.all = 'skip$ { after.block 'output.state := } if$ } FUNCTION {new.sentence} { output.state after.block = 'skip$ { output.state before.all = 'skip$ { after.sentence 'output.state := } if$ } if$ } FUNCTION {add.blank} { " " * before.all 'output.state := } FUNCTION {date.block} { new.block } FUNCTION {not} { { #0 } { #1 } if$ } FUNCTION {and} { 'skip$ { pop$ #0 } if$ } FUNCTION {or} { { pop$ #1 } 'skip$ if$ } FUNCTION {non.stop} { duplicate$ "}" * add.period$ #-1 #1 substring$ "." = } STRINGS {z} FUNCTION {remove.dots} { 'z := "" { z empty$ not } { z #1 #1 substring$ z #2 global.max$ substring$ 'z := duplicate$ "." = 'pop$ { * } if$ } while$ } FUNCTION {new.block.checkb} { empty$ swap$ empty$ and 'skip$ 'new.block if$ } FUNCTION {field.or.null} { duplicate$ empty$ { pop$ "" } 'skip$ if$ } FUNCTION {emphasize} { duplicate$ empty$ { pop$ "" } { "\emph{" swap$ * "}" * } if$ } FUNCTION {bolden} { duplicate$ empty$ { pop$ "" } { "\textbf{" swap$ * "}" * } if$ } FUNCTION {tie.or.space.prefix} { duplicate$ text.length$ #3 < { "~" } { " " } if$ swap$ } FUNCTION {capitalize} { "u" change.case$ "t" change.case$ } FUNCTION {space.word} { " " swap$ * " " * } % Here are the language-specific definitions for explicit words. % Each function has a name bbl.xxx where xxx is the English word. % The language selected here is ENGLISH FUNCTION {bbl.and} { "and"} FUNCTION {bbl.etal} { "et~al." } FUNCTION {bbl.editors} { "eds." } FUNCTION {bbl.editor} { "ed." } FUNCTION {bbl.edby} { "edited by" } FUNCTION {bbl.edition} { "edition" } FUNCTION {bbl.volume} { "volume" } FUNCTION {bbl.of} { "of" } FUNCTION {bbl.number} { "number" } FUNCTION {bbl.nr} { "no." } FUNCTION {bbl.in} { "in" } FUNCTION {bbl.pages} { "pp." } FUNCTION {bbl.page} { "p." } FUNCTION {bbl.eidpp} { "pages" } FUNCTION {bbl.chapter} { "chapter" } FUNCTION {bbl.techrep} { "Technical Report" } FUNCTION {bbl.mthesis} { "Master's thesis" } FUNCTION {bbl.phdthesis} { "Ph.D. thesis" } MACRO {jan} {"January"} MACRO {feb} {"February"} MACRO {mar} {"March"} MACRO {apr} {"April"} MACRO {may} {"May"} MACRO {jun} {"June"} MACRO {jul} {"July"} MACRO {aug} {"August"} MACRO {sep} {"September"} MACRO {oct} {"October"} MACRO {nov} {"November"} MACRO {dec} {"December"} MACRO {acmcs} {"ACM Computing Surveys"} MACRO {acta} {"Acta Informatica"} MACRO {cacm} {"Communications of the ACM"} MACRO {ibmjrd} {"IBM Journal of Research and Development"} MACRO {ibmsj} {"IBM Systems Journal"} MACRO {ieeese} {"IEEE Transactions on Software Engineering"} MACRO {ieeetc} {"IEEE Transactions on Computers"} MACRO {ieeetcad} {"IEEE Transactions on Computer-Aided Design of Integrated Circuits"} MACRO {ipl} {"Information Processing Letters"} MACRO {jacm} {"Journal of the ACM"} MACRO {jcss} {"Journal of Computer and System Sciences"} MACRO {scp} {"Science of Computer Programming"} MACRO {sicomp} {"SIAM Journal on Computing"} MACRO {tocs} {"ACM Transactions on Computer Systems"} MACRO {tods} {"ACM Transactions on Database Systems"} MACRO {tog} {"ACM Transactions on Graphics"} MACRO {toms} {"ACM Transactions on Mathematical Software"} MACRO {toois} {"ACM Transactions on Office Information Systems"} MACRO {toplas} {"ACM Transactions on Programming Languages and Systems"} MACRO {tcs} {"Theoretical Computer Science"} FUNCTION {bibinfo.check} { swap$ duplicate$ missing$ { pop$ pop$ "" } { duplicate$ empty$ { swap$ pop$ } { swap$ pop$ } if$ } if$ } FUNCTION {bibinfo.warn} { swap$ duplicate$ missing$ { swap$ "missing " swap$ * " in " * cite$ * warning$ pop$ "" } { duplicate$ empty$ { swap$ "empty " swap$ * " in " * cite$ * warning$ } { swap$ pop$ } if$ } if$ } FUNCTION {format.eprint} { eprint duplicate$ empty$ 'skip$ { "\eprint" archive empty$ 'skip$ { "[" * archive * "]" * } if$ "{" * swap$ * "}" * } if$ } FUNCTION {format.url} { url empty$ { "" } { "\urlprefix\url{" url * "}" * } if$ } STRINGS { bibinfo} INTEGERS { nameptr namesleft numnames } FUNCTION {format.names} { 'bibinfo := duplicate$ empty$ 'skip$ { 's := "" 't := #1 'nameptr := s num.names$ 'numnames := numnames 'namesleft := { namesleft #0 > } { s nameptr "{vv~}{ll}{ jj}{ f{}}" format.name$ remove.dots bibinfo bibinfo.check 't := nameptr #1 > { namesleft #1 > { ", " * t * } { "," * s nameptr "{ll}" format.name$ duplicate$ "others" = { 't := } { pop$ } if$ t "others" = { " " * bbl.etal emphasize * } { " " * t * } if$ } if$ } 't if$ nameptr #1 + 'nameptr := namesleft #1 - 'namesleft := } while$ } if$ } FUNCTION {format.names.ed} { 'bibinfo := duplicate$ empty$ 'skip$ { 's := "" 't := #1 'nameptr := s num.names$ 'numnames := numnames 'namesleft := { namesleft #0 > } { s nameptr "{f{}~}{vv~}{ll}{ jj}" format.name$ remove.dots bibinfo bibinfo.check 't := nameptr #1 > { namesleft #1 > { ", " * t * } { "," * s nameptr "{ll}" format.name$ duplicate$ "others" = { 't := } { pop$ } if$ t "others" = { " " * bbl.etal emphasize * } { " " * t * } if$ } if$ } 't if$ nameptr #1 + 'nameptr := namesleft #1 - 'namesleft := } while$ } if$ } FUNCTION {format.key} { empty$ { key field.or.null } { "" } if$ } FUNCTION {format.authors} { author "author" format.names duplicate$ empty$ 'skip$ { collaboration "collaboration" bibinfo.check duplicate$ empty$ 'skip$ { " (" swap$ * ")" * } if$ * } if$ } FUNCTION {get.bbl.editor} { editor num.names$ #1 > 'bbl.editors 'bbl.editor if$ } FUNCTION {format.editors} { editor "editor" format.names duplicate$ empty$ 'skip$ { " " * get.bbl.editor "(" swap$ * ")" * * } if$ } FUNCTION {format.isbn} { isbn "isbn" bibinfo.check duplicate$ empty$ 'skip$ { new.block "ISBN " swap$ * } if$ } FUNCTION {format.issn} { issn "issn" bibinfo.check duplicate$ empty$ 'skip$ { new.block "ISSN " swap$ * } if$ } FUNCTION {format.doi} { doi "doi" bibinfo.check duplicate$ empty$ 'skip$ { new.block "\doi{" swap$ * "}" * } if$ } FUNCTION {format.note} { note empty$ { "" } { note #1 #1 substring$ duplicate$ "{" = 'skip$ { output.state mid.sentence = { "l" } { "u" } if$ change.case$ } if$ note #2 global.max$ substring$ * "note" bibinfo.check } if$ } FUNCTION {format.title} { title "title" bibinfo.check duplicate$ empty$ 'skip$ { "\enquote{" swap$ * add.period$ "}" * } if$ } FUNCTION {end.quote.btitle} { booktitle empty$ 'skip$ { before.all 'output.state := } if$ } FUNCTION {format.full.names} {'s := "" 't := #1 'nameptr := s num.names$ 'numnames := numnames 'namesleft := { namesleft #0 > } { s nameptr "{vv~}{ll}" format.name$ 't := nameptr #1 > { namesleft #1 > { ", " * t * } { s nameptr "{ll}" format.name$ duplicate$ "others" = { 't := } { pop$ } if$ t "others" = { " " * bbl.etal emphasize * } { numnames #2 > { "," * } 'skip$ if$ bbl.and space.word * t * } if$ } if$ } 't if$ nameptr #1 + 'nameptr := namesleft #1 - 'namesleft := } while$ } FUNCTION {author.editor.key.full} { author empty$ { editor empty$ { key empty$ { cite$ #1 #3 substring$ } 'key if$ } { editor format.full.names } if$ } { author format.full.names } if$ } FUNCTION {author.key.full} { author empty$ { key empty$ { cite$ #1 #3 substring$ } 'key if$ } { author format.full.names } if$ } FUNCTION {editor.key.full} { editor empty$ { key empty$ { cite$ #1 #3 substring$ } 'key if$ } { editor format.full.names } if$ } FUNCTION {make.full.names} { type$ "book" = type$ "inbook" = or 'author.editor.key.full { type$ "proceedings" = 'editor.key.full 'author.key.full if$ } if$ } FUNCTION {output.bibitem} { newline$ "\bibitem[{" write$ label write$ ")" make.full.names duplicate$ short.list = { pop$ } { * } if$ "}]{" * write$ cite$ write$ "}" write$ newline$ "" before.all 'output.state := } FUNCTION {n.dashify} { 't := "" { t empty$ not } { t #1 #1 substring$ "-" = { t #1 #2 substring$ "--" = not { "--" * t #2 global.max$ substring$ 't := } { { t #1 #1 substring$ "-" = } { "-" * t #2 global.max$ substring$ 't := } while$ } if$ } { t #1 #1 substring$ * t #2 global.max$ substring$ 't := } if$ } while$ } FUNCTION {word.in} { bbl.in capitalize " " * } FUNCTION {format.date} { year "year" bibinfo.check duplicate$ empty$ { "empty year in " cite$ * "; set to ????" * warning$ pop$ "????" } 'skip$ if$ extra.label * before.all 'output.state := " (" swap$ * ")" * } FUNCTION {format.btitle} { title "title" bibinfo.check duplicate$ empty$ 'skip$ { emphasize } if$ } FUNCTION {either.or.check} { empty$ 'pop$ { "can't use both " swap$ * " fields in " * cite$ * warning$ } if$ } FUNCTION {format.bvolume} { volume empty$ { "" } { bbl.volume volume tie.or.space.prefix "volume" bibinfo.check * * series "series" bibinfo.check duplicate$ empty$ 'pop$ { swap$ bbl.of space.word * swap$ emphasize * } if$ "volume and number" number either.or.check } if$ } FUNCTION {format.number.series} { volume empty$ { number empty$ { series field.or.null } { series empty$ { number "number" bibinfo.check } { output.state mid.sentence = { bbl.number } { bbl.number capitalize } if$ number tie.or.space.prefix "number" bibinfo.check * * bbl.in space.word * series "series" bibinfo.check * } if$ } if$ } { "" } if$ } FUNCTION {format.edition} { edition duplicate$ empty$ 'skip$ { output.state mid.sentence = { "l" } { "t" } if$ change.case$ "edition" bibinfo.check " " * bbl.edition * } if$ } INTEGERS { multiresult } FUNCTION {multi.page.check} { 't := #0 'multiresult := { multiresult not t empty$ not and } { t #1 #1 substring$ duplicate$ "-" = swap$ duplicate$ "," = swap$ "+" = or or { #1 'multiresult := } { t #2 global.max$ substring$ 't := } if$ } while$ multiresult } FUNCTION {format.pages} { pages duplicate$ empty$ 'skip$ { duplicate$ multi.page.check { bbl.pages swap$ n.dashify } { bbl.page swap$ } if$ tie.or.space.prefix "pages" bibinfo.check * * } if$ } FUNCTION {format.journal.pages} { pages duplicate$ empty$ 'pop$ { swap$ duplicate$ empty$ { pop$ pop$ format.pages } { ", " * swap$ n.dashify "pages" bibinfo.check * } if$ } if$ } FUNCTION {format.journal.eid} { eid "eid" bibinfo.check duplicate$ empty$ 'pop$ { swap$ duplicate$ empty$ 'skip$ { ", " * } if$ swap$ * numpages empty$ 'skip$ { bbl.eidpp numpages tie.or.space.prefix "numpages" bibinfo.check * * " (" swap$ * ")" * * } if$ } if$ } FUNCTION {format.vol.num.pages} { volume field.or.null duplicate$ empty$ 'skip$ { "volume" bibinfo.check } if$ bolden number "number" bibinfo.check duplicate$ empty$ 'skip$ { swap$ duplicate$ empty$ { "there's a number but no volume in " cite$ * warning$ } 'skip$ if$ swap$ "(" swap$ * ")" * } if$ * eid empty$ { format.journal.pages } { format.journal.eid } if$ } FUNCTION {format.chapter.pages} { chapter empty$ 'format.pages { type empty$ { bbl.chapter } { type "l" change.case$ "type" bibinfo.check } if$ chapter tie.or.space.prefix "chapter" bibinfo.check * * pages empty$ 'skip$ { ", " * format.pages * } if$ } if$ } FUNCTION {bt.enquote} { duplicate$ empty$ 'skip$ { "\enquote{" swap$ * non.stop { ",} " * } { "}, " * } if$ } if$ } FUNCTION {format.booktitle} { booktitle "booktitle" bibinfo.check bt.enquote } FUNCTION {format.in.ed.booktitle} { format.booktitle duplicate$ empty$ 'skip$ { editor "editor" format.names.ed duplicate$ empty$ 'pop$ { " " * get.bbl.editor "(" swap$ * "), " * * swap$ * } if$ word.in swap$ * } if$ } FUNCTION {format.thesis.type} { type duplicate$ empty$ 'pop$ { swap$ pop$ "t" change.case$ "type" bibinfo.check } if$ } FUNCTION {format.tr.number} { number "number" bibinfo.check type duplicate$ empty$ { pop$ bbl.techrep } 'skip$ if$ "type" bibinfo.check swap$ duplicate$ empty$ { pop$ "t" change.case$ } { tie.or.space.prefix * * } if$ } FUNCTION {format.article.crossref} { word.in " \cite{" * crossref * "}" * } FUNCTION {format.book.crossref} { volume duplicate$ empty$ { "empty volume in " cite$ * "'s crossref of " * crossref * warning$ pop$ word.in } { bbl.volume capitalize swap$ tie.or.space.prefix "volume" bibinfo.check * * bbl.of space.word * } if$ " \cite{" * crossref * "}" * } FUNCTION {format.incoll.inproc.crossref} { word.in " \cite{" * crossref * "}" * } FUNCTION {format.org.or.pub} { 't := "" address empty$ t empty$ and 'skip$ { t empty$ { address "address" bibinfo.check * } { t * address empty$ 'skip$ { ", " * address "address" bibinfo.check * } if$ } if$ } if$ } FUNCTION {format.publisher.address} { publisher "publisher" bibinfo.warn format.org.or.pub } FUNCTION {format.organization.address} { organization "organization" bibinfo.check format.org.or.pub } FUNCTION {article} { output.bibitem format.authors "author" output.check author format.key output format.date "year" output.check date.block format.title "title" output.check new.block crossref missing$ { journal "journal" bibinfo.check emphasize "journal" output.check format.vol.num.pages output } { format.article.crossref output.nonnull format.pages output } if$ format.issn output format.doi output new.block format.note output format.eprint output format.url output fin.entry } FUNCTION {book} { output.bibitem author empty$ { format.editors "author and editor" output.check editor format.key output } { format.authors output.nonnull crossref missing$ { "author and editor" editor either.or.check } 'skip$ if$ } if$ format.date "year" output.check date.block format.btitle "title" output.check crossref missing$ { format.bvolume output new.block format.number.series output new.sentence format.publisher.address output } { new.block format.book.crossref output.nonnull } if$ format.edition output format.isbn output format.doi output new.block format.note output format.eprint output format.url output fin.entry } FUNCTION {booklet} { output.bibitem format.authors output author format.key output format.date "year" output.check date.block format.title "title" output.check new.block howpublished "howpublished" bibinfo.check output address "address" bibinfo.check output format.isbn output format.doi output new.block format.note output format.eprint output format.url output fin.entry } FUNCTION {inbook} { output.bibitem author empty$ { format.editors "author and editor" output.check editor format.key output } { format.authors output.nonnull crossref missing$ { "author and editor" editor either.or.check } 'skip$ if$ } if$ format.date "year" output.check date.block format.btitle "title" output.check crossref missing$ { format.bvolume output format.chapter.pages "chapter and pages" output.check new.block format.number.series output new.sentence format.publisher.address output } { format.chapter.pages "chapter and pages" output.check new.block format.book.crossref output.nonnull } if$ format.edition output crossref missing$ { format.isbn output } 'skip$ if$ format.doi output new.block format.note output format.eprint output format.url output fin.entry } FUNCTION {incollection} { output.bibitem format.authors "author" output.check author format.key output format.date "year" output.check date.block format.title "title" output.check new.block crossref missing$ { format.in.ed.booktitle "booktitle" output.check end.quote.btitle format.bvolume output format.number.series output format.chapter.pages output new.sentence format.publisher.address output format.edition output format.isbn output } { format.incoll.inproc.crossref output.nonnull format.chapter.pages output } if$ format.doi output new.block format.note output format.eprint output format.url output fin.entry } FUNCTION {inproceedings} { output.bibitem format.authors "author" output.check author format.key output format.date "year" output.check date.block format.title "title" output.check new.block crossref missing$ { format.in.ed.booktitle "booktitle" output.check end.quote.btitle format.bvolume output format.number.series output format.pages output new.sentence publisher empty$ { format.organization.address output } { organization "organization" bibinfo.check output format.publisher.address output } if$ format.isbn output format.issn output } { format.incoll.inproc.crossref output.nonnull format.pages output } if$ format.doi output new.block format.note output format.eprint output format.url output fin.entry } FUNCTION {conference} { inproceedings } FUNCTION {manual} { output.bibitem format.authors output author format.key output format.date "year" output.check date.block format.btitle "title" output.check organization address new.block.checkb organization "organization" bibinfo.check output address "address" bibinfo.check output format.edition output format.doi output new.block format.note output format.eprint output format.url output fin.entry } FUNCTION {mastersthesis} { output.bibitem format.authors "author" output.check author format.key output format.date "year" output.check date.block format.btitle "title" output.check new.block bbl.mthesis format.thesis.type output.nonnull school "school" bibinfo.warn output address "address" bibinfo.check output format.doi output new.block format.note output format.eprint output format.url output fin.entry } FUNCTION {misc} { output.bibitem format.authors output author format.key output format.date "year" output.check date.block format.title output new.block howpublished "howpublished" bibinfo.check output format.doi output new.block format.note output format.eprint output format.url output fin.entry } FUNCTION {phdthesis} { output.bibitem format.authors "author" output.check author format.key output format.date "year" output.check date.block format.btitle "title" output.check new.block bbl.phdthesis format.thesis.type output.nonnull school "school" bibinfo.warn output address "address" bibinfo.check output format.doi output new.block format.note output format.eprint output format.url output fin.entry } FUNCTION {proceedings} { output.bibitem format.editors output editor format.key output format.date "year" output.check date.block format.btitle "title" output.check format.bvolume output format.number.series output new.sentence publisher empty$ { format.organization.address output } { organization "organization" bibinfo.check output format.publisher.address output } if$ format.isbn output format.issn output format.doi output new.block format.note output format.eprint output format.url output fin.entry } FUNCTION {techreport} { output.bibitem format.authors "author" output.check author format.key output format.date "year" output.check date.block format.title "title" output.check new.block format.tr.number emphasize output.nonnull institution "institution" bibinfo.warn output address "address" bibinfo.check output format.doi output new.block format.note output format.eprint output format.url output fin.entry } FUNCTION {unpublished} { output.bibitem format.authors "author" output.check author format.key output format.date "year" output.check date.block format.title "title" output.check format.doi output new.block format.note "note" output.check format.eprint output format.url output fin.entry } FUNCTION {default.type} { misc } READ FUNCTION {sortify} { purify$ "l" change.case$ } INTEGERS { len } FUNCTION {chop.word} { 's := 'len := s #1 len substring$ = { s len #1 + global.max$ substring$ } 's if$ } FUNCTION {format.lab.names} { 's := "" 't := s #1 "{vv~}{ll}" format.name$ s num.names$ duplicate$ #2 > { pop$ " " * bbl.etal emphasize * } { #2 < 'skip$ { s #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" = { " " * bbl.etal emphasize * } { bbl.and space.word * s #2 "{vv~}{ll}" format.name$ * } if$ } if$ } if$ } FUNCTION {author.key.label} { author empty$ { key empty$ { cite$ #1 #3 substring$ } 'key if$ } { author format.lab.names } if$ } FUNCTION {author.editor.key.label} { author empty$ { editor empty$ { key empty$ { cite$ #1 #3 substring$ } 'key if$ } { editor format.lab.names } if$ } { author format.lab.names } if$ } FUNCTION {editor.key.label} { editor empty$ { key empty$ { cite$ #1 #3 substring$ } 'key if$ } { editor format.lab.names } if$ } FUNCTION {calc.short.authors} { type$ "book" = type$ "inbook" = or 'author.editor.key.label { type$ "proceedings" = 'editor.key.label 'author.key.label if$ } if$ 'short.list := } FUNCTION {calc.label} { calc.short.authors short.list "(" * year duplicate$ empty$ short.list key field.or.null = or { pop$ "" } 'skip$ if$ * 'label := } FUNCTION {sort.format.names} { 's := #1 'nameptr := "" s num.names$ 'numnames := numnames 'namesleft := { namesleft #0 > } { s nameptr "{vv{ } }{ll{ }}{ f{ }}{ jj{ }}" format.name$ 't := nameptr #1 > { " " * namesleft #1 = t "others" = and { "zzzzz" * } { t sortify * } if$ } { t sortify * } if$ nameptr #1 + 'nameptr := namesleft #1 - 'namesleft := } while$ } FUNCTION {sort.format.title} { 't := "A " #2 "An " #3 "The " #4 t chop.word chop.word chop.word sortify #1 global.max$ substring$ } FUNCTION {author.sort} { author empty$ { key empty$ { "to sort, need author or key in " cite$ * warning$ "" } { key sortify } if$ } { author sort.format.names } if$ } FUNCTION {author.editor.sort} { author empty$ { editor empty$ { key empty$ { "to sort, need author, editor, or key in " cite$ * warning$ "" } { key sortify } if$ } { editor sort.format.names } if$ } { author sort.format.names } if$ } FUNCTION {editor.sort} { editor empty$ { key empty$ { "to sort, need editor or key in " cite$ * warning$ "" } { key sortify } if$ } { editor sort.format.names } if$ } FUNCTION {presort} { calc.label label sortify " " * type$ "book" = type$ "inbook" = or 'author.editor.sort { type$ "proceedings" = 'editor.sort 'author.sort if$ } if$ #1 entry.max$ substring$ 'sort.label := sort.label * " " * title field.or.null sort.format.title * #1 entry.max$ substring$ 'sort.key$ := } ITERATE {presort} SORT STRINGS { last.label next.extra } INTEGERS { last.extra.num number.label } FUNCTION {initialize.extra.label.stuff} { #0 int.to.chr$ 'last.label := "" 'next.extra := #0 'last.extra.num := #0 'number.label := } FUNCTION {forward.pass} { last.label label = { last.extra.num #1 + 'last.extra.num := last.extra.num int.to.chr$ 'extra.label := } { "a" chr.to.int$ 'last.extra.num := "" 'extra.label := label 'last.label := } if$ number.label #1 + 'number.label := } FUNCTION {reverse.pass} { next.extra "b" = { "a" 'extra.label := } 'skip$ if$ extra.label 'next.extra := extra.label duplicate$ empty$ 'skip$ { "{\natexlab{" swap$ * "}}" * } if$ 'extra.label := label extra.label * 'label := } EXECUTE {initialize.extra.label.stuff} ITERATE {forward.pass} REVERSE {reverse.pass} FUNCTION {bib.sort.order} { sort.label " " * year field.or.null sortify * " " * title field.or.null sort.format.title * #1 entry.max$ substring$ 'sort.key$ := } ITERATE {bib.sort.order} SORT FUNCTION {begin.bib} { preamble$ empty$ 'skip$ { preamble$ write$ newline$ } if$ "\begin{thebibliography}{" number.label int.to.str$ * "}" * write$ newline$ "\newcommand{\enquote}[1]{``#1''}" write$ newline$ "\providecommand{\natexlab}[1]{#1}" write$ newline$ "\providecommand{\url}[1]{\texttt{#1}}" write$ newline$ "\providecommand{\urlprefix}{URL }" write$ newline$ "\expandafter\ifx\csname urlstyle\endcsname\relax" write$ newline$ " \providecommand{\doi}[1]{doi:\discretionary{}{}{}#1}\else" write$ newline$ " \providecommand{\doi}{doi:\discretionary{}{}{}\begingroup \urlstyle{rm}\Url}\fi" write$ newline$ "\providecommand{\eprint}[2][]{\url{#2}}" write$ newline$ } EXECUTE {begin.bib} EXECUTE {init.state.consts} ITERATE {call.type$} FUNCTION {end.bib} { newline$ "\end{thebibliography}" write$ newline$ } EXECUTE {end.bib} %% End of customized bst file %% %% End of file `jss.bst'. eRm/inst/doc/index.html.old0000744000176000001440000000046511572663323015337 0ustar ripleyusers R: eRm vignettes

Vignettes of package eRm

eRmvig.pdf:
eRm Basics
eRm/inst/doc/eRm_object_tree.pdf0000744000176000001440000006570211572663323016355 0ustar ripleyusers%PDF-1.4 % 1 0 obj << /Title (eRm.dvi) /Creator (dvips\(k\) 5.99 Copyright 2010 Radical Eye Software) /ModDate (D:20110414120412+02'00') /Producer (PDF-XChange Viewer [Version: 2.0 \(Build 54.0\) \(Jul 9 2010; 16:47:37\)]) /CreationDate (D:20110405114134+02'00') >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R /Metadata 4 0 R >> endobj 3 0 obj << /Kids [5 0 R] /Type /Pages /Count 1 >> endobj 4 0 obj << /Type /Metadata /Length 3299 /Subtype /XML >> stream 2011-04-05T11:41:34+02:00 dvips(k) 5.99 Copyright 2010 Radical Eye Software 2011-04-14T12:04:12+02:00 6491a0bc-61c4-11e0-0000-bd0b9bb047cc 6491a0bc-61c4-11e0-0000-bd0b9bb047cc application/pdf eRm.dvi PDF-XChange Viewer [Version: 2.0 (Build 54.0) (Jul 9 2010; 16:47:37)] endstream endobj 5 0 obj << /Type /Page /Parent 3 0 R /Rotate 90 /Contents 6 0 R /MediaBox [0 0 595 842] /Resources << /Font 7 0 R /ProcSet [/PDF /Text] /ExtGState 8 0 R >> >> endobj 6 0 obj << /Filter [/FlateDecode] /Length 10699 >> stream x][,q~_G~y $ CGvk)O}E3g䙆%ebXZ9)+?>?Wb'>9)Nx!%GS8AiXOE}m\thkƢQJ}i>=%8z>tg_=>i c4!:RE_Jb}%Zriwj[MA}:({o Ƹ=`0j?;刺P:%O>})@X9CUEp?yY}Eӌ!>ӯ ?>O&BMEkm1)k: [u6mɤb@(nAD=^D8Ps 71UDe!=76 ^n 0)^"Fkץ}zĞzܾ Ou(/_>| FEMsTۓ7&O*&+Q2 Hbivm)&JcQEw>LC},!&˺Q%81тi!J.ï~vCi٬)&ZO?R.m|qAJ-u! #,RezDZ=NJDtB#ڵޣ\XvuX7io@-_49t !h(>Gh+yă\!MyD!a Ʋ`7(Ezk8&"h῾|^矾|o (>B0w(;P<0-t>nK%P(Gk٣\X6`10w Ͱ jh2~&W29T4b&bX7Vi#d[=vs15P:BEz8`i!zFhJ/kQq,Gs2nOR^{ tXFʦPh -l#WeA͆G)XC$6>F+/k[v|(`]-R6;(j AYA۶qS k2X7JXR8I3ghwȧyl@'ц1+nc+`.u~h ;}1)~-ޔZHyJ%Z M'UX v4o=`-bBaF@RGuNJ6_ϿnVKl_KS_f9 SG9'!tRDޣla~ bX 1ZnFi qe!|õeo7 K4X0Eb4VR)t2#D .d7uP\u>+$"Nv(;=QJxmTaPБ ;/ 6ZM(`jbX *.ɌXJGN]9ݖX\ժ8ϪP`rMmrX=Tz[@ [P܂%5 tL`i&1 ao4lT'Yӡ)c(4U9_a#+v^0CJwֽ!UX" 㟌{B7}^,,BC0f`. [~_c D~5o,F@˙%d+ZՃ|ؽu0:a(L(/!gx!E,[`3K{h,sl&\&5ga u/wtٿ; G-|͙$]>Ց|zuq ؄"Tlm6Jͭ:&$[+QJmTxދxuu>g$P8ޙ<Sz{NVs*άZ p iƯc=Hq[w]9A9ec@m#W :&PNOJ(o|:-f乳JmT \uaYػ&iA4\|QR P ¢OBB+۠乱A*6z׃o7]%MLBT||cì4Ui_W|~oOߩvS`K3 C>sSO"9j3n+9>>/UsNNe;rvr~dNm)bh`?{EH1@3q)9d#y!"3.rÆ^ٜ޵+Vmƍy\9u5,0xS1l/ޛXo:T%Ňfչ,G&w6.)GS??j?+\=}vV!Ra*\Pv 0&;-˽`qJh–p`v~o;a\vK`>vuQkAW: hBǘl)[TT~r@W(m|~C8GE/{3\ S=<K_ `//߿@ 6DC֤`.@€"LEA_@SkRӭF(m - {õ1@@1Є6})%I@yPM0ʑLacz?8UEvGk |õw1e6a7u+Б3y0,ݱ pDcsBq*&!\&dCe$`wz08+! T))chK䢁e="@EsFR'Ap8- &lX2=B9_ A# /x춥XrꄸNyS8$Shye2kMj6- ߲-5~r82r­x(_?|Jmhz6>%Tzd)%V7=Zφޱ͏keމ۔ViuViuViuViuViuViuViuViuq[$H.3 nMs~v 6Pp (_(u.!_Fq;/_l;a R{h7]jo擉ۯc j/na{n{l;a,ޡ`0\CfCjo46Jj9 4r(7!:.8Ik{7mFu#7M\pH1qRHBI9Vg5eK{u ІU(}v'=X=:@Qn*u<0k+Vs-YC.CW6ő=p٧[ \6bEQ .Fv `K b ne<DzE?-xvTxp uh4L'@ܑێץxP]NJSX*3{t!:J> [akp-Ocqiי <'[hE0p?qk>xvx,f9tT*.GjwHz3)ޮ"l;@Oگb5fkgl|;xeJ6bXn؆H"hbɻGeù+SE:<P[GqlxRxdTաf)f Wwv(#0 sZ ?2oX`C5A?*A~]@Um5MD QV@Rv$q,@ys]` 5ڈi r>Gq pW"h-|¢, ڷ.ϗsxM;b~=_QVw(]awCQp=`&D+5Ja,תQjGF ;R&3-8KG [X I %!QIY[bqz}2q4" ڵi8fqs[< t8)%, %q=iJ1KB)CfO#PY^, -}PMP2Lgr5"xpS1"]  $)] ~rDt9c;RuGए wv]Ӹt}{Xh2&^lx1AgGAc e{1*b\ϴҞ%K#\-˥h^ᬄAZyn, m ^HG]?B~ݒǓ3sX7}V.Mo֎ &Ь[8npRcY6E$3oP)FsVqlpB5s!@ڸdzC`rtNcbZ U/q<Ӳ tR4!S+P6J8(jC( 6M=dߺSmH3!! dC_[S~^ג0҇#BG[-6Ą)^8Mpr]¢T1 – N 0JP8p@;H i {5~$OKF D85bEUQV¢>Is44)_i.5sc!iT m\]Ty -miG;qLouYڜgJ-g5 mVZM"#Os(h}0Cyzqe5طFHC1_>x:Q Ͼ$^A @z&,!^WEm_@[htPbkFPUDֹfP);=+s cu`O>ƕ҇Fdmk@{udc-$N8#Miy)Ǎvs2:&jJ[sTEaja$Ȅ H&\cdSMC^E0rݢR~W/`ЬxݎB NPvyLWDc }\҇Fy"kyuc1W 2O,.Nr Vʄakw:M)q1WҰcgŦ\WVaЩ%(AGۦBPrJ SX4f,IH J-UPp(i66i;tg ӝa3Lw0;tg ӝa3Lw0Nӝa3Lw0;AOw0;tgxtw2|JzzXc y&XԠm H )sg!iD@ڸ-A;bF`C xO;)˞s1XxXT[7%Y] 36G}FpVMn}CIÔrMm0'+Ef ZxiaG.!5s!i \cl&ۀ$ƙss.!Z3gDp mA;H+iM=l{xl3q ԸMT\ %59Y\Qs\6OOd .pnr/HjRjXPN\B0)BTK GN VBk*5ƣQj+Z ~T{ucٖpOEt3h$@+B:@Io$W_ιc8RgsFm2u]xgzDPKH yEZ!5H+WvI9DIVbQBqŅ͜l/1E#2!2' yF^B/ǽVߚX\<8ҵR!g;k}˵QB Pxo]/R%o-eڇp@}K^; *ed}Y;֔k}˵Qns4,mw7xMu1P(P.;*cΖ8\8@ q>Na w(W fk}E8M@{?~.bŧ bZii`(kqT~ ّv:rh>~BIx]XSQep#m,mQ ~Fuv`0X)o햲X\;@ШuK퇱\{~p*^FFb#A fږnQQWQ n4); ȿg]$?┏?^<M ^^rfpd7:HBBVCN EE~k~{HvpEgȆ+l`ָ~AvRqGE_X |Pv~/:%!C($١\8dQ nE wɼ(;ogX y+v{tl԰;8 Sa= ǡQXP;+y˵(@6u5|=n]窏mAWbjG3MBlH|;;6x\=Q;UnE:,2%;֗'ޝ&AnXZb>@/ OȫzBHcQBPϮ] @B^7[;3to9vWQ 07~n] $4kvIц˸TҢn N4{O4{_afifififififififߞ|9p endstream endobj 7 0 obj << /R8 9 0 R /R10 10 0 R /R12 11 0 R >> endobj 8 0 obj << /R7 12 0 R >> endobj 9 0 obj << /Type /Font /Widths [555 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 277 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 722 0 0 0 0 0 361 0 0 625 916 0 0 680 0 736 555 722 0 0 0 0 0 0 0 0 0 0 0 0 500 0 0 555 444 305 500 555 277 0 527 277 833 0 500 555 0 391 394 388 0 0 722] /Subtype /Type1 /BaseFont /QAHJTD+CMR10 /Encoding 13 0 R /LastChar 119 /FirstChar 12 /FontDescriptor 14 0 R >> endobj 10 0 obj << /Type /Font /Widths [570 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 399 399 0 0 0 0 285 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 742 0 0 670 806 0 370 0 0 642 941 0 799 0 0 756 0 742 0 0 1056 0 0 0 0 0 0 0 0 0 513 0 456 570 457 314 513 570 285 314 542 285 856 570 513 570 0 402 405 399 570 0 742 0 542] /Subtype /Type1 /BaseFont /EOSVXO+CMR9 /Encoding 15 0 R /LastChar 121 /FirstChar 12 /FontDescriptor 16 0 R >> endobj 11 0 obj << /Type /Font /Widths [590 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 413 413 0 0 0 0 295 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 781 0 0 0 0 0 0 0 0 0 0 0 0 0 0 531 0 472 590 472 324 0 0 295 0 0 295 885 590 531 590 0 414 419 413 590 560 0 560 560] /Subtype /Type1 /BaseFont /YYTCCC+CMR8 /Encoding 17 0 R /LastChar 121 /FirstChar 12 /FontDescriptor 18 0 R >> endobj 12 0 obj << /OPM 1 /Type /ExtGState >> endobj 13 0 obj << /Type /Encoding /Differences [12 /fi] /BaseEncoding /WinAnsiEncoding >> endobj 14 0 obj << /Type /FontDescriptor /Flags 6 /StemV 106 /Ascent 705 /CharSet (/C/I/L/M/P/R/S/T/a/d/e/f/fi/g/h/i/k/l/m/o/p/period/r/s/t/w) /Descent -206 /XHeight 453 /FontBBox [0 -206 879 705] /FontName /QAHJTD+CMR10 /CapHeight 705 /FontFile3 19 0 R /ItalicAngle 0 /MissingWidth 500 >> endobj 15 0 obj << /Type /Encoding /Differences [12 /fi] /BaseEncoding /WinAnsiEncoding >> endobj 16 0 obj << /Type /FontDescriptor /Flags 6 /StemV 108 /Ascent 750 /CharSet (/C/F/G/I/L/M/O/R/T/W/a/c/d/e/f/fi/g/h/i/j/k/l/m/n/o/p/parenleft/parenright/period/r/s/t/u/w/y) /Descent -249 /XHeight 453 /FontBBox [-39 -249 1036 750] /FontName /EOSVXO+CMR9 /CapHeight 705 /FontFile3 20 0 R /ItalicAngle 0 /MissingWidth 500 >> endobj 17 0 obj << /Type /Encoding /Differences [12 /fi] /BaseEncoding /WinAnsiEncoding >> endobj 18 0 obj << /Type /FontDescriptor /Flags 4 /StemV 128 /Ascent 750 /CharSet (/R/a/c/d/e/f/fi/i/l/m/n/o/p/parenleft/parenright/period/r/s/t/u/v/x/y) /Descent -250 /XHeight 446 /FontBBox [0 -250 857 750] /FontName /YYTCCC+CMR8 /CapHeight 683 /FontFile3 21 0 R /ItalicAngle 0 /MissingWidth 500 >> endobj 19 0 obj << /Filter [/FlateDecode] /Length 3141 /Subtype /Type1C >> stream xW{TSg?!sTLdsSuZJ".Uux @!Iv$<@xGP[.t,:o3j[-5s&YY9η=/"ۋD,|z#%xb׻ 9?l}Տa/(HUNIJ._$$$xQشd*V#(S˓5{.;;;(VNzuMr&EBS:=M!V,.h=,](ԁ4fe4kc׭OP$)%|7ޢQpj3zVRUjwj ZJA@-Q z#t򦒩lzx]/6yI[+i.O!6oș;yb_;0_'3yi8(H&@=\7MZNc996`6ZD5߹ns9 KE7`m#S:(kS!DمrwxigA`jb^*tPPm MtG~d7ɛdYJbR p"Чv`Y[dGI_(ˮOtS]3fhCS.zq9}.ݷ3`pƽȢ;[.bb1no K*84^a͐6Fӧu_%WKh"TIe68?餹I8T5Xr }Va Iq;;:m1lt%$ɋvtN85f0#XG!L_8}l/0P#'t.[Mls}ñFp8j/v$TV2p뚚󳻛{W3305P n}({ K8 *ք'צh岱֒? 8߆.|&7b <c*pa)䱛՛iAȉk?8) KOQϺ-+\K_.04CE觲b%7Izr.S)\@d3zuv~KT?* Ӯn㢵4 ʼ&6:r0_L/jZsiHC ͤ2OZCU~yUUkm=\24P +NCzYj~nIOU@ XU1;r]|c7'~}|# rT زؓ/*?7Kt9NCII8`BVo_6ۚu$ov~DZ`~~ͼ|K'l2 '6d%v>(,%j[\a~N:.$`S_A+~'¶톊N9Nwp.I~1 @5Oiнe)zS4l囮db![ЃF e= b6;Jln%?yVA,eV4N7ݿ(Mc@#1L&~#Tv\dsHk9}|a,kioqѫ*~ޔQMZQi-Bӟ\HX"%rٞ!8`Mbi.WN6 9pf}mÝg` o SqO g0wȜ9)ba-lAQPUdWh()2p^ )7#t _T:D=Wb XdO:f9ŇҜ|P3얶CΈU/F\yox %.Z:\fH 7'1GX\!A_~{{Wr23~`L0\2)4Xb#i8ɍIp)ݶE FOq/Zd_•D%|b8WV7+0أhLʠv$W͑ў=F]^/B9X}:(`rk j* :KbXի_v(;{rR!MPQL؝(.w Dra!M;v@ <2>z`c ˶ug NW^#g$ݦs 4Cףs}⹡[6q$W-alg0RRm!U0xQ%H'4ZϸFݿ6ϒgFvXm`z_z,j|.3%/||OH\Ep?J܏4-VI5*z+ir*b5lֹAPAd~W WX?P"HM&A۴lڞfdf82]N{'#ŭ~3CBC>yҵPY8 y)j?g*ԤObKޤj}8xܗW^ΐ[K >ed7[ۋ[Uʼ5,8Ƴ!XU9bd0,AWPҘ#6ze·k?yВ;NJx"h"]De:Y8Se4X =+% O. 8X_f$D7=0>xgw pxwߥ9rKw 絚>0ۡ ڌmK-( o4(aUG}f'+!]HyBJWPTL?{2!&㪉7G~_G\ Nar<:ğB^d腷ؿGbԝU5 V4MY.LZK.cnOvu߼!4^ቊ89iiW(> stream xX XT^"$۵%R|TJye``DP`7 : 0xB$D#NISY۶f <93352e#\׬s+I&=3LzVJL?82]Mn.x=h`2YL_L 3oT3g, SG Dk?J0MϤM3$LW#MYLy-8&yUL\&LFef\?N,!@")xurȚа#"EKʌp wJbJ`n;hM(q,S)@.4JEWo+8sֈfgQS؈USYhQ T4$רp&kZYk/4wb4AU* h{w/ .K[G{3{3jS{! ??. BVHdS h:( J,]'tVlubԡ%eϵu|L]8%_]X_ZS):~3𳷬yZX>WA7[$SXX '"ڳ)k:y5SW;Y#"frI'9Hw6SujW ]oe#āZ6B@P+^}`jv#E[#=mU;ސ;F|_5FΆ Bk8H }.{E06M ;t`(!V@$H[:jGbYNyT95W,D ՗F!O~gT@K9ì_l.ViM:֦[=;m \nM/ঈN+6Z'+g;VR֤4ت:wZ dQpasݬWQ8wydf]3\Sܾu om,U o*$Y]_*JsWB] !@ uM>  V~&ih>J) Vhn') t.җ[6qSlJ2-c[5Ԩuk{g\!-(i8i)! T4LO!@K2ݥFlQzwޒK,'/)b꤆_=:<Mq-8T TAl}| pN$y\iK~Eq]jDr:pw˿@ RW 5"=_=b**BȆdhsYS8m1{ܨ0MZX 04sf)-K{Iozi!i, {A;PawwB t5miAc +-{d“ȿuubG uj ;(h0/.ʥtZiXp6PW.&lufdMq4}'}=ִrx[7B.+9m޺⚪T{Hl6"R -i^j% +߭'Ǿ{~Rό,Sl^mφokmbE0E)d߂MLe7ġx:4Ј*J1& Tq?.4˺;tV{k>+,4Eze۶kg H|Nѝ8D_4^覸 -aps<~J(153 V&()sϼ@9w<_!12Kloj5wAQMx52߆Jמ+,DQ9>FQ֩6uhP[" -#:thY"71@ BK14Bu䤸?>^S][$$'4mH'GdGXr*Hkp TA3(Qy!Thai Q(AO#4Gqׇ$Xм%s1)u ] &>qYlwA.'aSȾDF+5uofX+ؒZUG\)p[6Zvm7' endstream endobj 21 0 obj << /Filter [/FlateDecode] /Length 2454 /Subtype /Type1C >> stream xV TTuÅ$[Xvgt- ւ%ȀRTaanD\++)-,-konoirw߱-&5}' #C; epf\j kFIqaL#Tkc"%y5xXzQy[{ړEa-A'i:E t9TUMYbuhs?oxV,d-pvyZ•ej&>GDa1w([Ƞ 'B2)tm>ѷ>fd5$:9Xt[aj"6},KNaSr, ! rRkC"Ǎp8 <>SkT;ra/EF-A%xNja;xflN!*uPZ]vf_@ɉn C#áaϤ='oQM͝d[ގQK ɨ uՊG{7@*'me-R.Dڗ'jؕ#LpxFB໧ϢP O8LSN! cwu8Ny"<M -h@j# |򪖇keC" 8 fӫ`Pq2iJTNC3@SV`ptغ.6xXeE7MpEA%gps8-H>s1;@)lL4]ӨAGR1'.ްWx@[ WW "kY[ t\F/itj PYa7:|[BWIΓ[(JUӛrkcØijpK\|jk5`MV|r1Qщ@^dG~ ]G漣ooKo<9dw9EZعS+o*ݗLqО՛~D?'፪cm `#`Caz{Xk%Mm/s9AbMmjMQWb^aPBkwAmL_l xYEsJDF~L\BwWuKxyhT{ACg4ulI<|Şɛ9v I\x @_04_KW8<b񧩭seI$cSlvby1l)cM0 ?g+/ ;'%黀@g#JNQэ=zhL5iCBk˫̇+(bLuB%nHfT UZrWecq0NU /?2 Pok֗Z81733oZX~nh|iPV~8<"yÍ!h?P|:~w#g9[n4 9CK_ &NGL  14 7rl m)潴b XuC Ө/0 l<BKX!1w?8QјLᑪ,h _zfI_p/|npk&|FW_v8t{:Xcb4[^k}Oձb woBEZZz:nJDOri$:ӓުLNWtf{Z;_N р[XRC9}&g>7z-w^ endstream endobj xref 0 22 0000000000 65535 f 0000000015 00000 n 0000000278 00000 n 0000000343 00000 n 0000000400 00000 n 0000003781 00000 n 0000003947 00000 n 0000014722 00000 n 0000014777 00000 n 0000014809 00000 n 0000015228 00000 n 0000015670 00000 n 0000016087 00000 n 0000016133 00000 n 0000016224 00000 n 0000016514 00000 n 0000016605 00000 n 0000016932 00000 n 0000017023 00000 n 0000017323 00000 n 0000020557 00000 n 0000024445 00000 n trailer << /ID [<85128482BB9A8BB909F565CBB49F5E9C> <85128482BB9A8BB909F565CBB49F5E9C>] /Info 1 0 R /Root 2 0 R /Size 22 >> startxref 26992 %%EOFeRm/inst/doc/eRmvig.bib0000744000176000001440000005070011572663323014471 0ustar ripleyusers@article{Ro:99, author = {J. Rost}, year = {1999}, TITLE = {Was ist aus dem Rasch-Modell geworden? [What Happened with the Rasch Model?]}, JOURNAL = {Psychologische Rundschau}, VOLUME = {50}, PAGES = {140--156} } @article{Scheib:72, author = {H. Scheiblechner}, year = {1972}, TITLE = {{Das Lernen und L\"osen komplexer Denkaufgaben. [The learning and solving of complex reasoning items.]}}, JOURNAL = {Zeitschrift f\"ur Experimentelle und Angewandte Psychologie}, VOLUME = {3}, PAGES = {456--506} } @article{And:78, author = {D. Andrich}, year = {1978}, TITLE = {A rating formulation for ordered response categories}, JOURNAL = {Psychometrika}, VOLUME = {43}, PAGES = {561--573} } @article{FiPa:91, author = {G. H. Fischer and P. Parzer}, year = {1991}, TITLE = {An extension of the rating scale model with an application to the measurement of change}, JOURNAL = {Psychometrika}, VOLUME = {56}, PAGES = {637--651} } @article{Mast:82, author = {G. N. Masters}, year = {1982}, TITLE = {A Rasch model for partial credit scoring}, JOURNAL = {Psychometrika}, VOLUME = {47}, PAGES = {149--174} } @article{FiPo:94, author = {G. H. Fischer and I. Ponocny}, year = {1994}, TITLE = {An extension of the partial credit model with an application to the measurement of change}, JOURNAL = {Psychometrika}, VOLUME = {59}, PAGES = {177--192} } @article{LeVe:86, author = {J. de Leeuw and N. Verhelst}, year = {1986}, TITLE = {Maximum likelihood estimation in generalized Rasch models}, JOURNAL = {Journal of educational statistics}, VOLUME = {11}, PAGES = {183--196} } @article{Ra:77, author = {G. Rasch}, year = {1977}, TITLE = {On specific objectivity: An attempt at formalising the request for generality and validity of scientific statements}, JOURNAL = {Danish Yearbook of Philosophy}, VOLUME = {14}, PAGES = {58--94} } @article{GlVe:89, author = {C. A. W. Glas and N. Verhelst}, year = {1989}, TITLE = {Extensions of the partial credit model}, JOURNAL = {Psychometrika}, VOLUME = {54}, PAGES = {635--659} } @article{Mi:85, author = {R. J. Mislevy}, year = {1985}, TITLE = {Estimation of latent group effects}, JOURNAL = {Journal of the American Statistical Association}, VOLUME = {80}, PAGES = {993--997} } @article{Li:94, author = {M. Liou}, year = {1994}, TITLE = {More on the computation of higher-order derivatives of the elementary symmetric functions in the Rasch model}, JOURNAL = {Applied Psychological Measurement}, VOLUME = {18}, PAGES = {53--62} } @article{And:72, author = {E. B. Andersen}, year = {1972}, TITLE = {The numerical solution of a set of conditional estimation equations}, JOURNAL = {Journal of the Royal Statistical Society, Series B}, VOLUME = {34}, PAGES = {42--54} } @article{And:73, author = {E. B. Andersen}, year = {1973}, TITLE = {A goodness of fit test for the Rasch model}, JOURNAL = {Psychometrika}, VOLUME = {38}, PAGES = {123--140} } @article{Fisch:73, author = {G. H. Fischer}, year = {1973}, TITLE = {The linear logistic test model as an instrument in educational research}, JOURNAL = {Acta Psychologica}, VOLUME = {37}, PAGES = {359--374} } @article{Riz:06, author = {D. Rizopoulos}, year = {2006}, TITLE = {\pkg{ltm}: An \proglang{R} package for latent variable modeling and item response theory analyses}, JOURNAL = {Journal of Statistical Software}, VOLUME = {17}, number = {5}, pages = {1-25}, url = {http://www.jstatsoft.org/v17/i05/} } @article{Bor:06, author = {D. Borsboom}, year = {2006}, TITLE = {The attack of the psychometricians}, JOURNAL = {Psychometrika}, VOLUME = {71}, PAGES = {425--440} } @article{Kub:05, author = {K. D. Kubinger}, year = {2005}, TITLE = {Psychological test calibration using the Rasch model: Some critical suggestions on traditional approaches}, JOURNAL = {International Journal of Testing}, VOLUME = {5}, PAGES = {377--394} } @article{CAnd:07, author = {C. Anderson and Z. Li and J. Vermunt}, year = {2007}, TITLE = {Estimation of models in the Rasch family for polytomous items and multiple latent variables}, JOURNAL = {Journal of Statistical Software}, VOLUME = {20}, number = {6}, PAGES = {}, url = {http://www.jstatsoft.org/v20/i06/} } @BOOK{Ra:60, AUTHOR = {Rasch, G.}, YEAR = {1960}, TITLE = {Probabilistic Models for some Intelligence and Attainment Tests}, PUBLISHER = {Danish Institute for Educational Research}, EDITION = {}, ADDRESS = {Copenhagen} } @BOOK{Fisch:74, AUTHOR = {Fischer, G. H.}, YEAR = {1974}, TITLE = {Einf\"uhrung in die Theorie psychologischer Tests [Introduction to Mental Test Theory]}, PUBLISHER = {Huber}, EDITION = {}, ADDRESS = {Bern} } @BOOK{BaKi:04, AUTHOR = {Baker, F. B. and Kim, S.}, YEAR = {2004}, TITLE = {Item Response Theory: Parameter Estimation Techniques}, PUBLISHER = {Dekker}, EDITION = {2nd}, ADDRESS = {New York} } @BOOK{FiPS:98, AUTHOR = {Fischer, G. H. and Ponocny-Seliger, E.}, YEAR = {1998}, TITLE = {Structural Rasch Modeling: Handbook of the Usage of LPCM-WIN 1.0}, PUBLISHER = {ProGAMMA}, EDITION = {}, ADDRESS = {Groningen} } @INCOLLECTION{Ra:61, AUTHOR = {Rasch, G.}, YEAR = {1961}, TITLE = {On General Laws and the Meaning of Measurement in Psychology.}, BOOKTITLE = {Proceedings of the IV. Berkeley Symposium on Mathematical Statistics and Probability, Vol. IV}, PAGES = {321--333}, EDITOR = {}, PUBLISHER = {University of California Press}, ADDRESS = {Berkeley} } @INCOLLECTION{Fisch:95a, AUTHOR = {Fischer, G. H.}, YEAR = {1995}, TITLE = {Derivations of the Rasch Model}, BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications}, PAGES = {15--38}, EDITOR = {G.H. Fischer and I.W. Molenaar}, PUBLISHER = {Springer}, ADDRESS = {New York} } @INCOLLECTION{Fisch:95b, AUTHOR = {Fischer, G. H.}, YEAR = {1995}, TITLE = {Linear Logistic Models for Change}, BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications}, PAGES = {157--180}, EDITOR = {G.H. Fischer and I.W. Molenaar}, PUBLISHER = {Springer}, ADDRESS = {New York} } @INCOLLECTION{Linacre:2004, AUTHOR = {Linacre, J. M.}, YEAR = {2004}, TITLE = {Estimation Methods for \uppercase{R}asch Measures}, BOOKTITLE = {Introduction to \uppercase{R}asch Measurement}, PAGES = {25--48}, EDITOR = {E. V. {Smith Jr.} and R. M. Smith}, PUBLISHER = {JAM Press}, ADDRESS = {Maple Grove, MN} } @INCOLLECTION{And:95, AUTHOR = {Andersen, E. B.}, YEAR = {1995}, TITLE = {Polytomous Rasch Models and their Estimation}, BOOKTITLE = {Rasch models: Foundations, recent developments, and applications}, PAGES = {271--292}, EDITOR = {G.H. Fischer and I.W. Molenaar}, PUBLISHER = {Springer}, ADDRESS = {New York} } @INCOLLECTION{Molenaar:1995, AUTHOR = {Molenaar, I.}, YEAR = {1995}, TITLE = {Estimation of Item Parameters}, BOOKTITLE = {Rasch models: Foundations, recent developements, and applications}, PAGES = {39--51}, EDITOR = {G.H. Fischer and I.W. Molenaar}, PUBLISHER = {Springer}, ADDRESS = {New York} } @article{Bock+Aitkin:1981, author = {R. D. Bock and M. Aitkin}, year = {1981}, TITLE = {Marginal maximum likelihood estimation of item parameters: as application of an \uppercase{EM} algorithm}, JOURNAL = {Psychometrika}, VOLUME = {46}, PAGES = {443--459} } @article{Haberman:77, author = {S. J. Haberman}, year = {1977}, TITLE = {Maximum likelihood estimates in exponential response models}, JOURNAL = {The Annals of Statistics}, VOLUME = {5}, PAGES = {815--841} } @article{Wright+Panchapakesan:1969, author = {B. D. Wright and N. Panchapakesan}, year = {1969}, TITLE = {A procedure for sample-free item analysis}, JOURNAL = {Educational and Psychological measurement}, VOLUME = {29}, PAGES = {23--48} } @BOOK{Wright+Masters:1982, AUTHOR = {Wright, B. D. and Masters, G. N.}, YEAR = {1982}, TITLE = {Rating scale analysis: \uppercase{R}asch measurement}, PUBLISHER = {Mesa Press}, EDITION = {}, ADDRESS = {Chicago} } @BOOK{Andrich:88, AUTHOR = {Andrich, D.}, YEAR = {1988}, TITLE = {Rasch Models for Measurement (Sage University paper series on quantitative applications in the social sciences)}, PUBLISHER = {Sage}, EDITION = {}, ADDRESS = {Newbury Park, CA} } @INCOLLECTION{FiJr:92, AUTHOR = {Fisher Jr., W. P.}, YEAR = {1992}, TITLE = {Objectivity in Measurement: A Philosophical History of \uppercase{R}asch's Separability Theorem}, BOOKTITLE = {Objective Measurement: Theory into Practice, Volume 1}, PAGES = {29--60}, EDITOR = {M. Wilson}, PUBLISHER = {Ablex}, ADDRESS = {Norwood, NJ} } @INCOLLECTION{Rost:2000, AUTHOR = {Rost, J.}, YEAR = {2000}, TITLE = {The Growing Family of \uppercase{R}asch Models}, BOOKTITLE = {Essays on item response theory}, PAGES = {25--42}, EDITOR = {A. Boomsma and M.A.J. van Duijn and T.A.B. Snijders}, PUBLISHER = {Springer}, ADDRESS = {New York} } @article{Fischer:1987, author = {G. H. Fischer}, title = {Applying the principles of specific objectivity and of generalizability to the measurement of change}, year = {1987}, journal = {Psychometrika}, volume = {52}, pages = {565-587}, } @BOOK{Davier:1998, AUTHOR = {{von Davier}, M.}, YEAR = {1998}, TITLE = {\uppercase{WINMIRA}: A \uppercase{W}indows program for mixed \uppercase{R}asch models}, PUBLISHER = {IPN}, EDITION = {}, ADDRESS = {Kiel} } @INCOLLECTION{Kubinger:1989, AUTHOR = {Kubinger, K. D.}, YEAR = {1989}, TITLE = {Aktueller \uppercase{S}tand und kritische \uppercase{W}\"urdigung der \uppercase{P}robabilistischen \uppercase{T}esttheorie. [\uppercase{C}urrent status and critical appreciation of probabilistic test theory]}, BOOKTITLE = {Moderne \uppercase{T}esttheorie: Ein Abriss samt neuesten Beitr\"agen}, PAGES = {19--83}, EDITOR = {K.D. Kubinger}, PUBLISHER = {Beltz}, ADDRESS = {Weinheim} } @INCOLLECTION{Glas:1992, AUTHOR = {Glas, C. A. W.}, YEAR = {1992}, TITLE = {A Rasch Model with a Multivariate Distribution of Ability}, BOOKTITLE = {Objective Measurement: Theory into Practice, Volume 1}, PAGES = {236--258}, EDITOR = {M. Wilson}, PUBLISHER = {Ablex}, ADDRESS = {Norwood, NJ} } @INCOLLECTION{Ho:95, AUTHOR = {Hoijtink, H.}, YEAR = {1995}, TITLE = {Linear and Repeated Measures Models for the Person Parameter}, BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications}, PAGES = {203--214}, EDITOR = {G.H. Fischer and I.W. Molenaar}, PUBLISHER = {Springer}, ADDRESS = {New York} } @article{Fischer:1981, author = {G. H. Fischer}, year = {1981}, TITLE = {On the existence and uniqueness of maximum-likelihood estimates in the \uppercase{R}asch model}, JOURNAL = {Psychometrika}, VOLUME = {46}, PAGES = {59--77} } @INCOLLECTION{Fischer:1988, AUTHOR = {Fischer, G. H.}, YEAR = {1988}, TITLE = {Spezifische \uppercase{O}bjektvit\"at: \uppercase{E}ine wissenschaftstheoretische \uppercase{G}rundlage des \uppercase{R}asch-\uppercase{M}odells. [\uppercase{S}pecific objectivity: \uppercase{A}n epistemological foundation of the \uppercase{R}asch model.]}, BOOKTITLE = {Moderne Testtheorie}, PAGES = {87--111}, EDITOR = {K.D. Kubinger}, PUBLISHER = {Beltz}, ADDRESS = {Weinheim} } @INCOLLECTION{And:83, AUTHOR = {Andersen, E. B.}, YEAR = {1983}, TITLE = {A General Latent Structure Model for Contingency Table Data}, BOOKTITLE = {Principals of Modern Psychological Measurement}, PAGES = {117--138}, EDITOR = {H. Wainer and S. Messik}, PUBLISHER = {Erlbaum}, ADDRESS = {Hillsdale, NJ} } @article{Andersen:1970, author = {E. B. Andersen}, year = {1970}, TITLE = {Asymptotic properties of conditional maximum likelihood estimators}, JOURNAL = {Journal of the Royal Statistical Society, Series B}, VOLUME = {32}, PAGES = {283--301} } @INCOLLECTION{Glas+Verhelst:1995b, AUTHOR = {Glas, C. A. W. and Verhelst, N.}, YEAR = {1995}, TITLE = {Tests of Fit for Polytomous \uppercase{R}asch Models}, BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications}, PAGES = {325--352}, EDITOR = {G.H. Fischer and I.W. Molenaar}, PUBLISHER = {Springer}, ADDRESS = {New York} } @BOOK{deBoeck+Wilson:2004, AUTHOR = {{de Boeck}, P. and Wilson, M.}, YEAR = {2004}, TITLE = {Explanatory item response models: A generalized linear and nonlinear approach}, PUBLISHER = {Springer}, EDITION = {}, ADDRESS = {New York} } @article{VedB:01, author = {T. Verguts and P. {De Boeck}}, year = {2001}, TITLE = {Some \uppercase{M}antel-\uppercase{H}aenszel tests of \uppercase{R}asch model assumptions}, JOURNAL = {British Journal of Mathematical and Statistical Psychology}, VOLUME = {54}, PAGES = {21--37} } @INCOLLECTION{Glas+Verhelst:1995a, AUTHOR = {Glas, C. A. W. and Verhelst, N.}, YEAR = {1995}, TITLE = {Testing the \uppercase{R}asch model}, BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications}, PAGES = {69--96}, EDITOR = {G.H. Fischer and I.W. Molenaar}, PUBLISHER = {Springer}, ADDRESS = {New York} } @INCOLLECTION{Sm:04, AUTHOR = {Smith, R. M.}, YEAR = {2004}, TITLE = {Fit Analysis in Latent Trait Measurement Models.}, BOOKTITLE = {Introduction to Rasch Measurement}, PAGES = {73--92}, EDITOR = {E. S. Smith and R. M. Smith}, PUBLISHER = {JAM Press}, ADDRESS = {Maple Grove, MN} } @INCOLLECTION{Fisch:77, AUTHOR = {Fischer, G. H:}, YEAR = {1977}, TITLE = {Linear Logistic Trait Models: Theory and Application}, BOOKTITLE = {Structural Models of Thinking and Learning}, PAGES = {203--225}, EDITOR = {H. Spada and W. F. Kempf}, PUBLISHER = {Huber}, ADDRESS = {Bern} } @INCOLLECTION{RoDa:95, AUTHOR = {Rost, J. and von Davier, M.}, YEAR = {1995}, TITLE = {Polytomous Mixed Rasch Models}, BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications}, PAGES = {371--382}, EDITOR = {G.H. Fischer and I.W. Molenaar}, PUBLISHER = {Springer}, ADDRESS = {New York} } @INCOLLECTION{Verhelst+Glas:1995, AUTHOR = {N. Verhelst and C. A. W. Glas}, YEAR = {1995}, TITLE = {The one parameter logistic test model}, BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications}, PAGES = {215--238}, EDITOR = {G.H. Fischer and I.W. Molenaar}, PUBLISHER = {Springer}, ADDRESS = {New York} } @INCOLLECTION{Pf:94, AUTHOR = {Pfanzagl, J.}, YEAR = {1994}, TITLE = {On Item Parameter Estimation in Certain Latent Trait Models}, BOOKTITLE = {Contributions to Mathematical Psychology, Psychometrics, and Methodology}, PAGES = {249--263}, EDITOR = {G.H. Fischer and D. Laming}, PUBLISHER = {Springer}, ADDRESS = {New York} } @article{Gustafsson:1980, author = {J. Gustafsson}, year = {1980}, TITLE = {Testing and obtaining fit of data to the \uppercase{R}asch model}, JOURNAL = {British Journal of Mathematical and Statistical Psychology}, VOLUME = {33}, PAGES = {205--233} } @Manual{R:06, title = {R: A Language and Environment for Statistical Computing}, author = {{R Development Core Team}}, organization = {R Foundation for Statistical Computing}, address = {Vienna, Austria}, year = 2007, note = {{ISBN} 3-900051-07-0}, url = {http://www.R-project.org} } @article{Mair+Hatzinger:2007, author = {P. Mair and R. Hatzinger}, year = {2007}, TITLE = {Extended \uppercase{R}asch Modeling: The e\uppercase{R}m package for the application of \uppercase{IRT} models in \uppercase{R}}, JOURNAL = {Journal of Statistical Software}, VOLUME = {20(9)}, PAGES = {1--20} } @article{Warm:1989, author = {T. A. Warm}, year = {1989}, TITLE = {Weighted likelihood estimation of ability in item response theory}, JOURNAL = {Psychometrika}, VOLUME = {54}, PAGES = {427--450} } @article{Ponocny:2001, author = {I. Ponocny}, year = {2001}, TITLE = {Nonparametric goodness-of-fit tests for the \uppercase{R}asch model.}, JOURNAL = {Psychometrika}, VOLUME = {66}, PAGES = {437--460} } @INCOLLECTION{Birnbaum:1968, AUTHOR = {Birnbaum, A.}, YEAR = {1968}, TITLE = {Some latent trait models and their use in inferring an examinee's ability}, BOOKTITLE = {Statistical theories of mental test scores}, PAGES = {395--479}, EDITOR = {F. M. Lord and M. R. Novick}, PUBLISHER = {Addison-Wesley}, ADDRESS = {Reading, MA} } @article{Verhelst+Hatzinger+Mair:2007, author = {N. Verhelst and R. Hatzinger and P. Mair}, year = {2007}, TITLE = {The \uppercase{R}asch sampler}, JOURNAL = {Journal of Statistical Software}, VOLUME = {20(4)}, PAGES = {1--14} } @article{FiSch:70, author = {G. H. Fischer and H. H. Scheiblechner}, year = {1970}, TITLE = {Algorithmen und \uppercase{P}rogramme f\"ur das probabilistische \uppercase{T}estmodell von \uppercase{R}asch. [\uppercase{A}lgorithms and programs for \uppercase{R}asch's probabilistic test model.]}, JOURNAL = {Psychologische Beitr\"age}, VOLUME = {12}, PAGES = {23--51} } @article{Suarez+Glas:2003, author = {J. C. Su\'arez-Falc\'on and C. A. W. Glas}, year = {2003}, TITLE = {Evaluation of global testing procedures for item fit to the \uppercase{R}asch model.}, JOURNAL = {British Journal of Mathematical and Statistical Society}, VOLUME = {56}, PAGES = {127--143} } @article{Adams+Wilson+Wang:1997, author = {R. J. Adams and M. Wilson and W. C. Wang}, year = {1997}, TITLE = {The multidimensional random coefficients multinomial logit model}, JOURNAL = {Applied Psychological Measurement}, VOLUME = {21}, PAGES = {1--23} } @article{Jannarone:1986, author = {R. J. Jannarone}, year = {1986}, TITLE = {Conjunctive item response theory model kernels}, JOURNAL = {Psychometrika}, VOLUME = {51}, PAGES = {357--373} } @article{, author = {}, year = {}, TITLE = {}, JOURNAL = {}, VOLUME = {}, PAGES = {} } @article{Mair+Hatzinger:2007b, author = {P. Mair and R. Hatzinger}, year = {2007}, TITLE = {\uppercase{CML} based estimation of extended \uppercase{R}asch models with the e\uppercase{R}m package in \uppercase{R}}, JOURNAL = {Psychology Science}, VOLUME = {49}, PAGES = {26--43} } @INCOLLECTION{Hoijtink+Boomsma:1995, AUTHOR = {H. Hoijtink and A. Boomsma}, YEAR = {1995}, TITLE = {On person parameter estimation in the dichotomous \uppercase{R}asch model}, BOOKTITLE = {Rasch Models: Foundations, Recent Developments, and Applications}, PAGES = {53--68}, EDITOR = {G.H. Fischer and I.W. Molenaar}, PUBLISHER = {Springer}, ADDRESS = {New York} } @BOOK{Poinstingl+Mair+Hatzinger:07, AUTHOR = {Poinstingl, H. and Mair, P. and Hatzinger, R.}, YEAR = {2007}, TITLE = {Manual zum \uppercase{S}oftwarepackage e\uppercase{R}m: Anwendung des \uppercase{R}asch-\uppercase{M}odells}, PUBLISHER = {Pabst Science Publishers}, EDITION = {}, ADDRESS = {Lengerich} } @MastersThesis{Mair:2006, Author = {P. Mair}, School = {Department of Psychology, University of Vienna}, Title = {Simulation Studies for Goodness-of-Fit Statistics in Item Response Theory}, Year = {2006} } @INCOLLECTION{, AUTHOR = {}, YEAR = {}, TITLE = {}, BOOKTITLE = {}, PAGES = {}, EDITOR = {}, PUBLISHER = {}, ADDRESS = {} } @BOOK{, AUTHOR = {}, YEAR = {}, TITLE = {}, PUBLISHER = {}, EDITION = {}, ADDRESS = {} }eRm/inst/doc/eRm.Rnw0000744000176000001440000017356111572663323014010 0ustar ripleyusers%\VignetteIndexEntry{eRm Basics} \documentclass[article]{Z} \usepackage{amsmath, thumbpdf} \usepackage{Sweave} \usepackage{graphicx} \author{Patrick Mair\\Wirtschaftsuniversit\"at Wien \And Reinhold Hatzinger\\Wirtschaftsuniversit\"at Wien} \Plainauthor{Patrick Mair, Reinhold Hatzinger} \title{Extended Rasch Modeling: The R Package \pkg{eRm}} \Plaintitle{Extended Rasch Modeling: The R Package eRm} \Shorttitle{The R Package \pkg{eRm}} \Abstract{ This package vignette is an update of the \pkg{eRm} papers by published in a special issue on Psychometrics in the Journal of Statistical Software and in Psychology Science \citep{Mair+Hatzinger:2007, Mair+Hatzinger:2007b}. Since the publication of these papers various extensions and additional features have been incorporated into the package. We start with a methodological introduction to extended Rasch models followed by a general program description and application topics. The package allows for the computation of simple Rasch models, rating scale models, partial credit models and linear extensions of these. The incorporation of such linear structures allows for modeling the effects of covariates and enables the analysis of repeated categorical measurements. The item parameter estimation is performed by means of CML, for the person parameters we use ordinary ML. The estimation routines work for incomplete data matrices as well. Based on these estimators, item-wise and global goodness-of-fit statistics are described and various plots are presented. } \Keywords{eRm package, Rasch model, LLTM, RSM, LRSM, PCM, LPCM, CML estimation} %\Volume{20} %\Issue{9} %\Month{April} %\Year{2007} %FIXME% %% \Submitdate{2004-06-21} %% \Acceptdate{2004-12-04} %\Address{ % Patrick Mair\\ % Department f\"ur Statistik und Mathematik\\ % Wirtschaftsuniversit\"at Wien\\ % A-1090 Wien, Austria\\ % E-mail: \email{patrick.mair@wu-wien.ac.at}\\ % URL: \url{http://statmath.wu-wien.ac.at/~mair/} %} \begin{document} \section{Introduction} \citet{Ro:99} claimed in his article that ``even though the Rasch model has been existing for such a long time, 95\% of the current tests in psychology are still constructed by using methods from classical test theory" (p. 140). Basically, he quotes the following reasons why the Rasch model (RM) is being rarely used: The Rasch model in its original form \citep{Ra:60}, which was limited to dichotomous items, is arguably too restrictive for practical testing purposes. Thus, researchers should focus on extended Rasch models. In addition, Rost argues that there is a lack of user-friendly software for the computation of such models. Hence, there is a need for a comprehensive, user-friendly software routine. Corresponding recent discussions can be found in \citet{Kub:05} and \citet{Bor:06}. In addition to the RM, the models that can be computed by means of the \pkg {eRm} package are: the linear logistic test model \citep{Scheib:72}, the rating scale model \citep{And:78}, the linear rating scale model \citep{FiPa:91}, the partial credit model \citep{Mast:82}, and the linear partial credit model \citep{GlVe:89,FiPo:94}. These models and their main characteristics are presented in Section \ref{sec:erm}. Concerning parameter estimation, these models have an important feature in common: Separability of item and person parameters. This implies that the item parameters $\mathbf{\beta}$ can be estimated without estimating the person parameters achieved by conditioning the likelihood on the sufficient person raw score. This conditional maximum likelihood (CML) approach is described in Section \ref{sec:cml}. Several diagnostic tools and tests to evaluate model fit are presented in Section \ref{Gof}. In Section \ref{sec:pack}, the corresponding implementation in \proglang{R} \citep{R:06} is described by means of several examples. The \pkg{eRm} package uses a design matrix approach which allows to reparameterize the item parameters to model common characteristics of the items or to enable the user to impose repeated measurement designs as well as group contrasts. By combining these types of contrasts one allows that the item parameter may differ over time with respect to certain subgroups. To illustrate the flexibility of the \pkg{eRm} package some examples are given to show how suitable design matrices can be constructed. %----------------- end introduction ---------------- \section{Extended Rasch models} \label{sec:erm} \subsection{General expressions} Briefly after the first publication of the basic Rasch Model \citep{Ra:60}, the author worked on polytomous generalizations which can be found in \citet{Ra:61}. \citet{And:95} derived the representations below which are based on Rasch's general expression for polytomous data. The data matrix is denoted as $\mathbf{X}$ with the persons in the rows and the items in the columns. In total there are $v=1,...,n$ persons and $i=1,...,k$ items. A single element in the data matrix $\mathbf{X}$ is indexed by $x_{vi}$. Furthermore, each item $I_i$ has a certain number of response categories, denoted by $h=0,...,m_i$. The corresponding probability of response $h$ on item $i$ can be derived in terms of the following two expressions \citep{And:95}: \begin{equation} \label{eq1} P(X_{vi}=h)=\frac{\exp[\phi_h(\theta_v+\beta_i)+\omega_h]}{\sum_{l=0}^{m_i} \exp[\phi_l (\theta_v+\beta_i)+\omega_l]} \end{equation} or \begin{equation} \label{eq2} P(X_{vi}=h)=\frac{\exp[\phi_h \theta_v+\beta_{ih}]}{\sum_{l=0}^{m_i} \exp[\phi_l \theta_v+\beta_{il}]}. \end{equation} Here, $\phi_h$ are scoring functions for the item parameters, $\theta_v$ are the uni-dimensional person parameters, and $\beta_i$ are the item parameters. In Equation \ref{eq1}, $\omega_h$ corresponds to category parameters, whereas in Equation \ref{eq2} $\beta_{ih}$ are the item-category parameters. The meaning of these parameters will be discussed in detail below. Within the framework of these two equations, numerous models have been suggested that retain the basic properties of the Rasch model so that CML estimation can be applied. \subsection{Representation of extended Rasch models} \label{Rep} For the ordinary Rasch model for dichotomous items, Equation \ref{eq1} reduces to \begin{equation} \label{eq:rasch} P(X_{vi}=1)=\frac{\exp(\theta_v - \beta_i)}{1+\exp(\theta_v-\beta_i)}. \end{equation} The main assumptions, which hold as well for the generalizations presented in this paper, are: uni-dimensionality of the latent trait, sufficiency of the raw score, local independence, and parallel item characteristic curves (ICCs). Corresponding explanations can be found, e.g., in \citet{Fisch:74} and mathematical derivations and proofs in \citet{Fisch:95a}. \begin{figure}[hbt] \centering \includegraphics[height=60mm, width=40mm]{modelhierarchy.pdf} \caption{\label{fig1} Model hierarchy} \end{figure} For dichotomous items, \citet{Scheib:72} proposed the (even more restricted) linear logistic test model (LLTM), later formalized by \citet{Fisch:73}, by splitting up the item parameters into the linear combination \begin{equation} \label{eq4} \beta_i=\sum_{j=1}^p w_{ij} \eta_j. \end{equation} \citet{Scheib:72} explained the dissolving process of items in a test for logics (``Mengenrechentest") by so-called ``cognitive operations" $\eta_j$ such as negation, disjunction, conjunction, sequence, intermediate result, permutation, and material. Note that the weights $w_{ij}$ for item $i$ and operation $j$ have to be fixed a priori. Further elaborations about the cognitive operations can be found in \citet[p.~361ff.]{Fisch:74}. Thus, from this perspective the LLTM is more parsimonous than the Rasch model. Though, there exists another way to look at the LLTM: A generalization of the basic Rasch model in terms of repeated measures and group contrasts. It should be noted that both types of reparameterization also apply to the linear rating scale model (LRSM) and the linear partial credit model (LPCM) with respect to the basic rating scale model (RSM) and the partial credit model (PCM) presented below. Concerning the LLTM, the possibility to use it as a generalization of the Rasch model for repeated measurements was already introduced by \citet{Fisch:74}. Over the intervening years this suggestion has been further elaborated. \citet{Fisch:95b} discussed certain design matrices which will be presented in Section \ref{sec:design} and on the basis of examples in Section \ref{sec:pack}. At this point we will focus on a simple polytomous generalization of the Rasch model, the RSM \citep{And:78}, where each item $I_i$ must have the same number of categories. Pertaining to Equation \ref{eq1}, $\phi_h$ may be set to $h$ with $h=0,...,m$. Since in the RSM the number of item categories is constant, $m$ is used instead of $m_i$. Hence, it follows that \begin{equation} \label{eq5} P(X_{vi}=h)=\frac{\exp[h(\theta_v+\beta_i)+\omega_h]}{\sum_{l=0}^m \exp[l(\theta_v+ \beta_i)+\omega_l]}, \end{equation} with $k$ item parameters $\beta_1,...,\beta_k$ and $m+1$ category parameters $\omega_0,...,\omega_m$. This parameterization causes a scoring of the response categories $C_h$ which is constant over the single items. Again, the item parameters can be split up in a linear combination as in Equation \ref{eq4}. This leads to the LRSM proposed by \citet{FiPa:91}. Finally, the PCM developed by \citet{Mast:82} and its linear extension, the LPCM \citep{FiPo:94}, are presented. The PCM assigns one parameter $\beta_{ih}$ to each $I_i \times C_h$ combination for $h=0,...,m_i$. Thus, the constant scoring property must not hold over the items and in addition, the items can have different numbers of response categories denoted by $m_i$. Therefore, the PCM can be regarded as a generalization of the RSM and the probability for a response of person $v$ on category $h$ (item $i$) is defined as \begin{equation} \label{eq6} P(X_{vih}=1)=\frac{\exp[h\theta_v + \beta_{ih}]}{\sum_{l=0}^{m_i}\exp[l\theta_v + \beta_{il}]}. \end{equation} It is obvious that (\ref{eq6}) is a simplification of (\ref{eq2}) in terms of $\phi_h = h$. As for the LLTM and the LRSM, the LPCM is defined by reparameterizing the item parameters of the basic model, i.e., \begin{equation} \label{eq:lpcmeta} \beta_{ih}=\sum_{j=1}^p w_{ihj}\eta_j. \end{equation} These six models constitute a hierarchical order as displayed in Figure \ref{fig1}. This hierarchy is the base for a unified CML approach presented in the next section. It is outlined again that the linear extension models can be regarded either as generalizations or as more restrictive formulations pertaining to the underlying base model. The hierarchy for the basic model is straightforward: The RM allows only items with two categories, thus each item is represented by one parameter $\beta_i$. The RSM allows for more than two (ordinal) categories each represented by a category parameter $\omega_h$. Due to identifiability issues, $\omega_0$ and $\omega_1$ are restricted to 0. Hence, the RM can be seen as a special case of the RSM whereas, the RSM in turn, is a special case of the PCM. The latter model assigns the parameter $\beta_{ih}$ to each $I_i \times C_h$ combination. To conclude, the most general model is the LPCM. All other models can be considered as simplifications of Equation \ref{eq6} combined with Equation \ref{eq:lpcmeta}. As a consequence, once an estimation procedure is established for the LPCM, this approach can be used for any of the remaining models. This is what we quote as \textit{unified CML approach}. The corresponding likelihood equations follow in Section \ref{sec:cml}. \subsection{The concept of virtual items} \label{sec:design} When operating with longitudinal models, the main research question is whether an individual's test performance changes over time. The most intuitive way would be to look at the shift in ability $\theta_v$ across time points. Such models are presented e.g. in \citet{Mi:85}, \citet{Glas:1992}, and discussed by \citet{Ho:95}. Yet there exists another look onto time dependent changes, as presented in \citet[p~158ff.]{Fisch:95b}: The person parameters are fixed over time and instead of them the item parameters change. The basic idea is that one item $I_i$ is presented at two different times to the same person $S_v$ is regarded as a pair of \textit{virtual items}. Within the framework of extended Rasch models, any change in $\theta_v$ occuring between the testing occasions can be described without loss of generality as a change of the item parameters, instead of describing change in terms of the person parameter. Thus, with only two measurement points, $I_i$ with the corresponding parameter $\beta_i$ generates two virtual items $I_r$ and $I_s$ with associated item parameters $\beta^{\ast}_r$ and $\beta^{\ast}_s$. For the first measurement point $\beta^{\ast}_r=\beta_i$, whereas for the second $\beta^{\ast}_s=\beta_i+\tau$. In this linear combination the $\beta^{\ast}$-parameters are composed additively by means of the real item parameters $\beta$ and the treatment effects $\tau$. This concept extends to an arbitrary number of time points or testing occasions. Correspondingly, for each measurement point $t$ we have a vector of \textit{virtual item parameters} $\boldsymbol{\beta}^{\ast(t)}$ of length $k$. These are linear reparameterizations of the original $\boldsymbol{\beta}^{(t)}$, and thus the CML approach can be used for estimation. In general, for a simple LLTM with two measurement points the design matrix $\boldsymbol{W}$ is of the form as given in Table \ref{tab1}. \begin{table} \centering \[ \begin{array}{c|c|rrrr|r} & & \eta_1 & \eta_2 & \hdots & \eta_k & \eta_{k+1}\\ \hline \textrm{Time 1} & \beta_1^{\ast(1)} & 1 & 0 & 0 & 0 & 0\\ & \beta_2^{\ast(1)} & 0 & 1 & 0 & 0 & 0\\ & \vdots & & & \ddots& & \vdots\\ & \beta_{k}^{\ast(1)} & 1 & 0 & 0 & 1 & 0\\ \hline \textrm{Time 2} & \beta_{k+1}^{\ast(2)} & 1 & 0 & 0 & 0 & 1\\ & \beta_{k+2}^{\ast(2)} & 0 & 1 & 0 & 0 & 1\\ & \vdots & & & \ddots& & \vdots\\ & \beta_{2k}^{\ast(2)} & 1 & 0 & 0 & 1 & 1\\ \end{array} \] \caption{\label{tab1}A design matrix for an LLTM with two timepoints.} \end{table} The parameter vector $\boldsymbol{\beta}^{\ast(1)}$ represents the item parameters for the first test occasion, $\boldsymbol{\beta}^{\ast(2)}$ the parameters for the second occasion. It might be of interest whether these vectors differ. The corresponding trend contrast is $\eta_{k+1}$. Due to this contrast, the number of original $\beta$-parameters is doubled by introducing the $2k$ virtual item parameters. If we assume a constant shift for all item parameters, it is only necessary to estimate $\hat{\boldsymbol{\eta}}'=(\hat{\eta}_1,...,\hat{\eta}_{k+1})$ where $\hat{\eta}_{k+1}$ gives the amount of shift. Since according to (\ref{eq4}), the vector $\hat{\boldsymbol{\beta}}^\ast$ is just a linear combination of $\hat{\boldsymbol{\eta}}$. As mentioned in the former section, when using models with linear extensions it is possible to impose group contrasts. By doing this, one allows that the item difficulties are different across subgroups. However, this is possible only for models with repeated measurements and virtual items since otherwise the introduction of a group contrast leads to overparameterization and the group effect cannot be estimated by using CML. Table \ref{tab2} gives an example for a repeated measurement design where the effect of a treatment is to be evaluated by comparing item difficulties regarding a control and a treatment group. The number of virtual parameters is doubled compared to the model matrix given in Table \ref{tab1}. \begin{table}[h] \centering \[ \begin{array}{c|c|c|rrrr|rrr} & & & \eta_1 & \eta_2 & \hdots & \eta_k & \eta_{k+1} & \eta_{k+2} \\ \hline \textrm{Time 1} & \textrm{Group 1} & \beta_1^{\ast(1)} & 1 & 0 & 0 & 0 & 0 & 0\\ & & \beta_2^{\ast(1)} & 0 & 1 & 0 & 0 & 0& 0\\ & & \vdots & & & \ddots& &\vdots &\vdots\\ & & \beta_{k}^{\ast(1)} & 1 & 0 & 0 & 1 & 0 & 0\\ \cline{2-9} & \textrm{Group 2} & \beta_{k+1}^{\ast(1)} & 1 & 0 & 0 & 0 & 0 & 0\\ & & \beta_{k+2}^{\ast(1)} & 0 & 1 & 0 & 0 & 0 & 0\\ & & \vdots & & & \ddots& &\vdots & \vdots\\ & & \beta_{2k}^{\ast(1)} & 1 & 0 & 0 & 1 & 0& 0\\ \hline \textrm{Time 2} & \textrm{Group 1} & \beta_1^{\ast(2)} & 1 & 0 & 0 & 0 & 1 & 0\\ & & \beta_2^{\ast(2)} & 0 & 1 & 0 & 0 & 1 & 0\\ & & \vdots & & & \ddots& &\vdots &\vdots\\ & & \beta_{k}^{\ast(2)} & 1 & 0 & 0 & 1 & 1 & 0\\ \cline{2-9} & \textrm{Group 2} & \beta_{k+1}^{\ast(2)} & 1 & 0 & 0 & 0 & 1 & 1\\ & & \beta_{k+2}^{\ast(2)} & 0 & 1 & 0 & 0 & 1 & 1\\ & & \vdots & & & \ddots& &\vdots & \vdots\\ & & \beta_{2k}^{\ast(2)} & 1 & 0 & 0 & 1 & 1 & 1\\ \end{array} \] \caption{\label{tab2} Design matrix for a repeated measurements design with treatment and control group.} \end{table} Again, $\eta_{k+1}$ is the parameter that refers to the time contrast, and $\eta_{k+2}$ is a group effect within measurement point 2. More examples are given in Section \ref{sec:pack} and further explanations can be found in \citet{Fisch:95b}, \citet{FiPo:94}, and in the software manual for the LPCM-Win program by \citet{FiPS:98}. By introducing the concept of virtual persons, \pkg{eRm} allows for the computation of the linear logistic test model with relaxed assumptions \citep[LLRA][]{Fisch:77}. Corresponding explanations will be given in a subsequent version of this vignette. %------------------------ end extended Rasch models -------------------------- \section{Estimation of item and person parameters} \label{sec:cml} \subsection{CML for item parameter estimation} The main idea behind the CML estimation is that the person's raw score $r_v=\sum_{i=1}^k x_{vi}$ is a sufficient statistic. Thus, by conditioning the likelihood onto $\boldsymbol{r}'=(r_1,...,r_n)$, the person parameters $\boldsymbol{\theta}$, which in this context are nuisance parameters, vanish from the likelihood equation, thus, leading to consistently estimated item parameters $\hat{\boldsymbol{\beta}}$. Some restrictions have to be imposed on the parameters to ensure identifiability. This can be achieved, e.g., by setting certain parameters to zero depending on the model. In the Rasch model one item parameter has to be fixed to 0. This parameter may be considered as baseline difficulty. In addition, in the RSM the category parameters $\omega_0$ and $\omega_1$ are also constrained to 0. In the PCM all parameters representing the first category, i.e. $\beta_{i0}$ with $i=1,\ldots,k$, and one additional item-category parameter, e.g., $\beta_{11}$ have to be fixed. For the linear extensions it holds that the $\beta$-parameters that are fixed within a certain condition (e.g. first measurement point, control group etc.) are also constrained in the other conditions (e.g. second measurement point, treatment group etc.). At this point, for the LPCM the likelihood equations with corresponding first and second order derivatives are presented (i.e. \textit{unified CML equations}). In the first version of the \pkg {eRm} package numerical approximations of the Hessian matrix are used. However, to ensure numerical accuracy and to speed up the estimation process, it is planned to implement the analytical solution as given below. The conditional log-likelihood equation for the LPCM is \begin{equation} \label{eq:cmll} \log L_c = \sum_{i=1}^k \sum_{h=1}^{m_i} x_{+ih} \sum_{j=1}^p w_{ihj} \eta_j - \sum_{r=1}^{r_{max}} n_r \log \gamma_r. \end{equation} The maximal raw score is denoted by $r_{max}$ whereas the number of subjects with the same raw score is quoted as $n_r$. Alternatively, by going down to an individual level, the last sum over $r$ can be replaced by $\sum_{v=1}^n \log \gamma_{r_v}$. It is straightforward to show that the LPCM as well as the other extended Rasch models, define an exponential family \citep{And:83}. Thus, the raw score $r_v$ is minimally sufficient for $\theta_v$ and the item totals $x_{.ih}$ are minimally sufficient for $\beta_{ih}$. Crucial expressions are the $\gamma$-terms which are known as \textit{elementary symmetric functions}. More details about these terms are given in the next section. However, in the \pkg {eRm} package the numerically stable \textit{summation algorithm} as suggested by \citet{And:72} is implemented. \citet{FiPo:94} adopted this algorithm for the LPCM and devised also the first order derivative for computing the corresponding derivative of $\log L_c$: \begin{equation} \label{eq:dcml} \frac{\partial\log L_c}{\partial\eta_a} = \sum_{i=1}^k \sum_{h=1}^{m_i} w_{iha}\left(x_{+ih} - \epsilon_{ih} \sum_{r=1}^{r_{max}} n_r \frac{ \gamma_{r}^{(i)}}{\gamma_r}\right). \end{equation} It is important to mention that for the CML-representation, the multiplicative Rasch expression is used throughout equations \ref{eq1} to \ref{eq:lpcmeta}, i.e., $\epsilon_i=\exp(-\beta_i)$ for the person parameter. Therefore, $\epsilon_{ih}$ corresponds to the reparameterized item $\times$ category parameter whereas $\epsilon_{ih} > 0$. Furthermore, $\gamma_{r}^{(i)}$ are the first order derivatives of the $\gamma$-functions with respect to item $i$. The index $a$ in $\eta_a$ denotes the first derivative with respect to the $a^{th}$ parameter. For the second order derivative of $\log L_c$, two cases have to be distinguished: the derivatives for the off-diagonal elements and the derivatives for the main diagonal elements. The item categories with respect to the item index $i$ are coded with $h_i$, and those referring to item $l$ with $h_l$. The second order derivatives of the $\gamma$-functions with respect to items $i$ and $l$ are denoted by $\gamma_r^{(i,l)}$. The corresponding likelihood expressions are \begin{align} \label{eq:2dcml} \frac{\partial\log L_c}{\partial\eta_a \eta_b} = & -\sum_{i=1}^k \sum_{h_i=1}^{m_i} w_{ih_ia}w_{ih_ib}\epsilon_{ih_i} \sum_{r=1}^{r_{max}} n_r \frac{\log \gamma_{r-h_i}}{\gamma_r}\\ & -\sum_{i=1}^k \sum_{h_i=1}^{m_i} \sum_{l=1}^k \sum_{h_l=1}^{m_l} w_{ih_ia}w_{lh_lb} \left[\epsilon_{ih_i} \epsilon_{lh_l} \left( \sum_{r=1}^{r_{max}} n_r \frac{\gamma_{r}^{(i)}\gamma_{r}^{(l)}}{\gamma_r^2} - \sum_{r=1}^{r_{max}} n_r \frac{\gamma_{r}^{(i,l)}}{\gamma_r}\right)\right] \notag \end{align} for $a\neq b$, and \begin{align} \label{eq:2dcmlab} \frac{\partial\log L_c}{\partial\eta_a^2} = & -\sum_{i=1}^k \sum_{h_i=1}^{m_i} w_{ih_ia}^2 \epsilon_{ih_i} \sum_{r=1}^{r_{max}} n_r \frac{\log \gamma_{r-h_i}}{\gamma_r}\\ & -\sum_{i=1}^k \sum_{h_i=1}^{m_i} \sum_{l=1}^k \sum_{h_l=1}^{m_l} w_{ih_ia}w_{lh_la}\epsilon_{ih_i} \epsilon_{lh_l}\sum_{r=1}^{r_{max}} n_r \frac{\gamma_{r-h_i}^{(i)}\gamma_{r-h_l}^{(l)}}{\gamma_r^2} \notag \end{align} for $a=b$. To solve the likelihood equations with respect to $\mathbf{\hat{\eta}}$, a Newton-Raphson algorithm is applied. The update within each iteration step $s$ is performed by \begin{equation} \label{eq:iter} \boldsymbol{\hat{\eta}}_s=\boldsymbol{\hat{\eta}}_{s-1}-\mathbf{H}_{s-1}^{-1} \boldsymbol{\delta}_{s-1}. \end{equation} The starting values are $\boldsymbol{\hat{\eta}}_0=\mathbf{0}$. $\mathbf{H}_{s-1}^{-1}$ is the inverse of the Hessian matrix composed by the elements given in Equation \ref{eq:2dcml} and \ref{eq:2dcmlab} and $\boldsymbol{\delta}_{s-1}$ is the gradient at iteration $s-1$ as specified in Equation \ref{eq:dcml}. The iteration stops if the likelihood difference $\left|\log L_c^{(s)} - \log L_c^{(s-1)} \right|\leq \varphi$ where $\varphi$ is a predefined (small) iteration limit. Note that in the current version (\Sexpr{packageDescription("eRm", fields = "Version")}) $\mathbf{H}$ is approximated numerically by using the \pkg{nlm} Newton-type algorithm provided in the \pkg{stats} package. The analytical solution as given in Equation \ref{eq:2dcml} and \ref{eq:2dcmlab} will be implemented in the subsequent version of \pkg{eRm}. \subsection{Mathematical properties of the CML estimates} \label{sec:mpcml} A variety of estimation approaches for IRT models in general and for the Rasch model in particular are available: The \emph{joint maximum likelihood} (JML) estimation as proposed by \citet{Wright+Panchapakesan:1969} which is not recommended since the estimates are not consistent \citep[see e.g.][]{Haberman:77}. The basic reason for that is that the person parameters $\boldsymbol{\theta}$ are nuisance parameters; the larger the sample size, the larger the number of parameters. A well-known alternative is the \emph{marginal maximum likelihood} (MML) estimation \citep{Bock+Aitkin:1981}: A distribution $g(\theta)$ for the person parameters is assumed and the resulting situation corresponds to a mixed-effects ANOVA: Item difficulties can be regarded as fixed effects and person abilities as random effects. Thus, IRT models fit into the framework of \emph{generalized linear mixed models} (GLMM) as elaborated in \citet{deBoeck+Wilson:2004}. By integrating over the ability distribution the random nuisance parameters can be removed from the likelihood equations. This leads to consistent estimates of the item parameters. Further discussions of the MML approach with respect to the CML method will follow. For the sake of completeness, some other methods for the estimation of the item parameters are the following: \citet{CAnd:07} propose a Pseudo-ML approach, \citet{Molenaar:1995} and \citet{Linacre:2004} give an overview of various (heuristic) non-ML methods, Bayesian techniques can be found in \citet[Chapter 7]{BaKi:04}, and for nonparameteric approaches it is referred to \citet{LeVe:86}. However, back to CML, the main idea behind this approach is the assumption that the raw score $r_v$ is a minimal sufficient statistic for $\theta_v$. Starting from the equivalent multiplicative expression of Equation \ref{eq1} with $\xi_v=\exp(\theta_v)$ and $\epsilon_i=\exp(-\beta_i)$, i.e., \begin{equation} \label{eq7} P(X_{vi}=1)=\frac{\xi_v \epsilon_i}{1+\xi_v \epsilon_i}, \end{equation} the following likelihood for the response pattern $\boldsymbol{x}_v$ for a certain subject $v$ results: \begin{equation} \label{eq8} P(\boldsymbol{x}_v|\xi_v,\boldsymbol{\epsilon})=\prod_{i=1}^k \frac{(\xi_v \epsilon_i)^{x_{vi}}}{1+\xi_v \epsilon_i}= \frac{{\theta_v}^{r_v} \prod_{i=1}^k {\epsilon_i}^{x_{vi}}}{\prod_{i=1}^k (1+\xi_v \epsilon_i)}. \end{equation} Using the notation $\boldsymbol{y}=(y_1,\ldots ,y_k)$ for all possible response patterns with $\sum_{i=1}^k y_i=r_v$, the probability for a fixed raw score $r_v$ is \begin{equation} \label{eq9} P(r_v|\xi_v,\boldsymbol{\epsilon})=\sum_{\boldsymbol{y}|r_v} \prod_{i=1}^k \frac{(\xi_v \epsilon_i)^{x_{vi}}}{1+\xi_v \epsilon_i}=\frac{{\theta_v}^{r_v} \sum_{\boldsymbol{y}|r_v} \prod_{i=1}^k {\epsilon_i}^{x_{vi}}}{\prod_{i=1}^k (1+\xi_v \epsilon_i)}. \end{equation} The crucial term with respect to numerical solutions of the likelihood equations is the second term in the numerator: \begin{equation} \label{eq:gamma} \gamma_r(\epsilon_i) \equiv \sum_{\boldsymbol{y}|r_v} \prod_{i=1}^k {\epsilon_i}^{x_{vi}} \end{equation} These are the \emph{elementary symmetric functions} (of order $r$). An overview of efficient computational algorithms and corresponding simulation studies can be found in \citet{Li:94}. The \pkg{eRm} package uses the summation algorithm as proposed by \citet{And:72}. Finally, by collecting the different raw scores into the vector $\boldsymbol{r}$ the conditional probability of observing response pattern $\boldsymbol{x}_v$ with given raw score $r_v$ is \begin{equation} \label{eq:xraw} P(\boldsymbol{x}_v|r_v,\boldsymbol{\epsilon})=\frac{P(\boldsymbol{x}_v|\xi_v,\boldsymbol{\epsilon})}{P(r_v|\xi_v,\boldsymbol{\epsilon})} \,. \end{equation} By taking the product over the persons (independence assumption), the (conditional) likelihood expression for the whole sample becomes \begin{equation} \label{eq:likall} L(\boldsymbol{\epsilon}|\boldsymbol{r})=P(\boldsymbol{x}|\boldsymbol{r},\boldsymbol{\epsilon})=\prod_{v=1}^n \frac{\prod_{i=1}^k {\epsilon_i}^{x_{vi}}}{\gamma_{r_v}}. \end{equation} With respect to raw score frequencies $n_r$ and by reintroducing the $\beta$-parameters, (\ref{eq:likall}) can be reformulated as \begin{equation} \label{eq12a} L(\boldsymbol{\beta}|\boldsymbol{r})= \frac{\exp \left(\sum_{i=1}^k x_{+i}\beta_i \right)}{\prod_{r=0}^k \gamma_r^{n_r}} \,, \end{equation} where $x_{+i}$ are the item raw scores. It is obvious that by conditioning the likelihood on the raw scores $\boldsymbol{r}$, the person parameters completely vanished from the expression. As a consequence, the parameters $\boldsymbol{\hat{\beta}}$ can be estimated without knowledge of the subject's abilities. This issue is referred as \emph{person-free item assessment} and we will discuss this topic within the context of specific objectivity in the next section. Pertaining to asymptotical issues, it can be shown that under mild regularity conditions \citep{Pf:94} the CML estimates are consistent for $n\rightarrow \infty$ and $k$ fixed, unbiased, asymptotically efficient, and normally distributed \citep{Andersen:1970}. For the computation of a Rasch model, comparatively small samples are sufficient to get reliable estimates \citep{Fischer:1988}. Whether the MML estimates are unbiased depends on the correct specification of the ability distribution $g(\theta)$. In case of an incorrect assumption, the estimates are biased which is surely a drawback of this method. If $g(\theta)$ is specified appropriately, the CML and MML estimates are asymptotically equivalent \citep{Pf:94}. \citet{Fischer:1981} elaborates on the conditions for the existence and the uniqueness of the CML estimates. The crucial condition for the data matrix is that $\boldsymbol{X}$ has to be \emph{well-conditioned}. To introduce this issue it is convenient to look at a matrix which is \emph{ill-conditioned}: A matrix is ill-conditioned if there exists a partition of the items into two nonempty subsets such that all of a group of subjects responded correctly to items $i+1,\ldots,k$ ($\boldsymbol{X}_2$) and all of all other subjects failed for items $1,\ldots,i$ ($\boldsymbol{X}_3$), i.e., \begin{table}[h] \centering \[ \boldsymbol{X}= \left( \begin{array}{c|c} \boldsymbol{X}_1 & \boldsymbol{X}_2\\ \hline \boldsymbol{X}_3 & \boldsymbol{X}_4\\ \end{array} \right) = \left( \begin{array}{ccc|ccc} & & & 1 & \ldots & 1 \\ & \boldsymbol{X}_1 & & \vdots & \ddots & \vdots \\ & & & 1 & \ldots & 1 \\ \hline 0 & \ldots & 0 & & & \\ \vdots & \ddots & \vdots & & \boldsymbol{X}_4 & \\ 0 & \ldots & 0 & & & \\ \end{array} \right) \] \end{table} Thus, following the definition in \citet{Fischer:1981}: $\boldsymbol{X}$ will be called \emph{well-conditioned} iff in every possible partition of the items into two nonempty subsets some subjects has given response 1 on some item in the first set and response 0 on some item in the second set. In this case a unique solution for the CML estimates $\boldsymbol{\hat{\beta}}$ exists. This issue is important for structurally incomplete designs which often occur in practice; different subsets of items are presented to different groups of persons $g=1,\ldots,G$ where $G\leq n$. As a consequence, the likelihood values have to be computed for each group separately and the joint likelihood is the product over the single group likelihoods. Hence, the likelihood in Equation \ref{eq12a} becomes \begin{equation} \label{eq:glik} L(\boldsymbol{\beta}|\boldsymbol{r})=\prod_{g=1}^G \frac{\exp \left(\sum_{i=1}^k x_{+i}\beta_i \right)}{\prod_{r=0}^k {\gamma_{g,r}}^{n_{g,r}}} \end{equation} This also implies the necessity to compute the elementary symmetric functions separately for each group. The \pkg{eRm} package can handle such structurally incomplete designs. From the elaborations above it is obvious that from an asymptotical point of view the CML estimates are at least as good as the MML estimates. In the past, computational problems (speed, numerical accuracy) involved in calculating the elementary symmetric functions limited the practical usage of the CML approach \citep[see e.g.][]{Gustafsson:1980}. Nowadays, these issues are less crucial due to increased computer power. In some cases MML estimation has advantages not shared by CML: MML leads to finite person parameters even for persons with zero and perfect raw score, and such persons are not removed from the estimation process \citep{Molenaar:1995}. On he other hand the consideration of such persons does not seem meaningful from a substantial point of view since the person parameters are not reliable anymore -- for such subjects the test is too difficult or too easy, respectively. Thus, due to these covering effects, a corresponding ability estimation is not feasible. However, if the research goal is to find ability distributions such persons should be regarded and MML can handle this. When estimates for the person parameters are of interest some care has to be taken if the CML method is used since person parameters cancel from the estimation equations. Usually, they are estimated (once having obtained values for the item parameters) by inserting $\boldsymbol{\hat{\beta}}$ (or equivalently $\boldsymbol{\hat{\epsilon}}$) into Equation \ref {eq8} and solving with respect to $\boldsymbol{\theta}$. Alternatively, Bayesian procedures are applicable \citep{Hoijtink+Boomsma:1995}. It is again pointed out that each person in the sample gets an own parameter even though limited by the number of different raw scores. \subsection{CML and specific objectivity} In general, the Rasch model can be regarded as a measurement model: Starting from the (nominally scaled) 0/1-data matrix $\boldsymbol{X}$, the person raw scores $r_v$ are on an ordinal level. They, in turn, are used to estimate the item parameters $\boldsymbol{\beta}$ which are on an interval scale provided that the Rasch model holds. Thus, Rasch models allow for comparisons between objects on an interval level. Rasch reasoned on requirements to be fulfilled such that a specific proposition within this context can be regarded as ``scientific''. His conclusions were that a basic requirement is the ``objectivity'' of comparisons \citep{Ra:61}. This claim contrasts assumptions met in \emph{classical test theory} (CTT). A major advantage of the Rasch model over CTT models is the \emph{sample independence} of the results. The relevant concepts in CTT are based on a linear model for the ``true score" leading to some indices, often correlation coefficients, which in turn depend on the observed data. This is a major drawback in CTT. According to \citet{Fisch:74}, sample independence in IRT models has the following implications: \begin{itemize} \item The person-specific results (i.e., essentially $\boldsymbol{\theta}$) do not depend on the assignment of a person to a certain subject group nor on the selected test items from an item pool $\Psi$. \item Changes in the skills of a person on the latent trait can be determined independently from its base level and independently from the selected item subset $\psi \subset \Psi$. \item From both theoretical and practical perspective the requirement for representativeness of the sample is obsolete in terms of a true random selection process. \end{itemize} Based on these requirements for parameter comparisons, \citet{Ra:77} introduced the term \emph{specific objectivity}: \emph{objective} because any comparison of a pair of parameters is independent of any other parameters or comparisons; \emph{specifically objective} because the comparison made was relative to some specified frame of reference \citep{Andrich:88}. In other words, if specific objectivity holds, two persons $v$ and $w$ with corresponding parameters $\theta_v$ and $\theta_w$, are comparable independently from the remaining persons in the sample and independently from the presented item subset $\psi$. In turn, for two items $i$ and $j$ with parameters $\beta_i$ and $\beta_j$, the comparison of these items can be accomplished independently from the remaining items in $\Psi$ and independently from the persons in the sample. The latter is crucial since it reflects completely what is called sample independence. If we think not only of comparing $\beta_i$ and $\beta_j$ but rather to estimate these parameters, we achieve a point where specific objectivity requires a procedure which is able to provide estimates $\boldsymbol{\hat{\beta}}$ that do not depend on the sample. This implies that $\boldsymbol{\hat{\beta}}$ should be computable without the involvement of $\boldsymbol{\theta}$. CML estimation fulfills this requirement: By conditioning on the sufficient raw score vector $\boldsymbol{r}$, $\boldsymbol{\theta}$ disappears from the likelihood equation and $L(\boldsymbol{\beta}|\boldsymbol{r})$ can be solved without knowledge of $\boldsymbol{\theta}$. This issue is referred to as \emph{separability of item and person parameters} \citep[see e.g.][]{Wright+Masters:1982}. Furthermore, separability implies that no specific distribution should be assumed neither for the person nor for the item parameters \citep{Rost:2000}. MML estimation requires such assumptions. At this point it is clear that CML estimation is the only estimation method within the Rasch measurement context fulfilling the requirement of \emph{person-free item calibration} and, thus, it maps the epistemological theory of specific objectivity to a statistical maximum likelihood framework. Note that strictly speaking any statistical result based on sample observations is sample-dependent because any result depends at least on the sample size \citep{Fischer:1987}. The estimation of the item parameters is ``sample-independent", a term indicating the fact that the actually obtained sample of a certain population is not of relevance for the statistical inference on these parameters \citep[][p. 23]{Kubinger:1989}. \subsection{Estimation of person parameters} CML estimation for person parameters is not recommended due to computational issues. The \pkg{eRm} package provides two methods for this estimation. The first is ordinary ML where the CML-based item parameters are plugged into the joint ML equation. The likelihood is optimized with respect to $\boldsymbol{\theta}$. \citet{And:95} gives a general formulation of this ML estimate with $r_v=r$ and $\theta_v=\theta$: \begin{equation} \label{eq17} r - \sum_{i=1}^k \sum_{h=1}^{m_i} \frac{h \exp(h \theta+\hat{\beta}_{ih})}{\sum_{l=0}^{m_i}\exp(h \theta_v+\hat{\beta}_{il})}=0 \end{equation} \citet{Warm:1989} proposed a weighted likelihood estimation (WLE) which is more accurate compared to ML. For the dichotomous Rasch model the expression to be solved with respect to $\boldsymbol{\theta}$ is \begin{equation} P(\theta_v|\boldsymbol{x}_v, \hat{\boldsymbol{\beta}}) \propto \frac{exp(r_v\theta_v)}{\prod_i (1+exp(\theta_v-\hat{\beta}_i)}\sum_i p_{vi}(1-p_{vi}) \end{equation} Again, the item parameter vector $\hat{\boldsymbol{\beta}}$ is used from CML. This approach will implemented in a subsequent \pkg{eRm} version. Additional explanations and simulation studies regarding person parameter estimation can be found in \citet{Hoijtink+Boomsma:1995}. %----------------- end parameter estimation ----------------- \section{Testing extended Rasch models} \label{Gof} Testing IRT models involves two parts: First, item- and person-wise statistics can be examined; in particular item-fit and person-fit statistics. Secondly, based on CML properties, various model tests can be derived \citep[see][]{Glas+Verhelst:1995a, Glas+Verhelst:1995b}. \subsection{Item-fit and person-fit statistics} Commonly in IRT, items and persons are excluded due to item-fit and person-fit statistics. Both are residual based measures: The observed data matrix $\mathbf{X}$ is compared with the model probability matrix $\mathbf{P}$. Computing standardized residuals for all observations gives the $n \times k$ residual matrix $\mathbf{R}$. The squared column sums correspond to item-fit statistics and the squared row sums to person-fit statistics both of which are $\chi^2$-distributed with the corresponding degrees of freedom. Based on these quantities unweighted (\textsl{outfit}) and weighted (\textsl{infit}) mean-square statistics can also be used to evaluate item and person fit \citep[see e.g.][]{Wright+Masters:1982}. \subsection{A Wald test for item elimination} A helpful implication of CML estimates is that subsequent test statistics are readily obtained and model tests are easy to carry out. Basically, we have to distinguish between test on item level and global model tests. On item level, sample independence reflects the property that by splitting up the sample in, e.g., two parts, the corresponding parameter vectors $\boldsymbol{\hat{\beta}}^{(1)}$ and $\boldsymbol{\hat{\beta}}^{(2)}$ should be the same. Thus, when we want to achieve Rasch model fit those items have to be eliminated from the test which differ in the subsamples. This important issue in test calibration can be examined, e.g., by using a graphical model test. \citet{FiSch:70} propose a $N(0,1)$-distributed test statistic which compares the item parameters for two subgroups: \begin{equation} \label{eq:wald} z=\frac{\beta_i^{(1)}-\beta_i^{(2)}}{\sqrt{Var_i^{(1)}-Var_i^{(2)}}} \end{equation} The variance term in the denominator is based on Fisher's function of ``information in the sample". However, as \citet{Glas+Verhelst:1995a} point out discussing their Wald-type test that this term can be extracted directly from the variance-covariance matrix of the CML estimates. This Wald approach is provided in \pkg{eRm} by means of the function \code{Waldtest()}. \subsection{Andersen's likelihood-ratio test} In the \pkg {eRm} package the likelihood ratio test statistic $LR$, initially proposed by \citet{And:73} is computed for the RM, the RSM, and the PCM. For the models with linear extensions, $LR$ has to be computed separately for each measurement point and subgroup. \begin{equation} \label{eq15} LR = 2\left(\sum_{g=1}^G \log L_c(\boldsymbol{\hat{\eta}}_g;\boldsymbol{X}_g)-\log L_c(\boldsymbol{\hat{\eta}};\boldsymbol{X})\right) \end{equation} The underlying principle of this test statistic is that of \textit{subgroup homogeneity} in Rasch models: for arbitrary disjoint subgroups $g=1,...,G$ the parameter estimates $\boldsymbol{\hat{\eta}}_g$ have to be the same. $LR$ is asymptotically $\chi^2$-distributed with $df$ equal to the number of parameters estimated in the subgroups minus the number of parameters in the total data set. For the sake of computational efficiency, the \pkg {eRm} package performs a person raw score median split into two subgroups. In addition, a graphical model test \citep{Ra:60} based on these estimates is produced by plotting $\boldsymbol{\hat{\beta}}_1$ against $\boldsymbol{\hat{\beta}}_2$. Thus, critical items (i.e. those fairly apart from the diagonal) can be identified and eliminated. Further elaborations and additional test statistics for polytomous Rasch models can be found, e.g., in \citet{Glas+Verhelst:1995a}. \subsection{Nonparametric (``exact'') Tests} Based on the package \pkg{RaschSampler} by \citet{Verhelst+Hatzinger+Mair:2007} several Rasch model tests as proposed by \citep{Ponocny:2001} are provided. \subsection{Martin-L\"of Test} Applying the LR principle to subsets of items, Martin-L\"of \citep[1973, see][]{Glas+Verhelst:1995a} suggested a statistic to evaluate if two groups of items are homogeneous, i.e., to test the unidimensionality axiom. %-------------------------- end goodness-of-fit ------------------ %---------------------------- APPLIED SECTION ---------------------------- \section{The eRm package and application examples} \label{sec:pack} The underlying idea of the \pkg {eRm} package is to provide a user-friendly flexible tool to compute extended Rasch models. This implies, amongst others, an automatic generation of the design matrix $\mathbf{W}$. However, in order to test specific hypotheses the user may specify $\mathbf{W}$ allowing the package to be flexible enough for computing IRT-models beyond their regular applications. In the following subsections, various examples are provided pertaining to different model and design matrix scenarios. Due to intelligibility matters, the artificial data sets are kept rather small. A detailed description in German of applications of various extendend Rasch models using the \pkg{eRm} package can be found in \citet{Poinstingl+Mair+Hatzinger:07}. \subsection{Structure of the eRm package} Embedding \pkg{eRm} into the flexible framework of \proglang{R} is a crucial benefit over existing stand-alone programs like WINMIRA \citep{Davier:1998}, LPCM-WIN \citep{FiPS:98}, and others. Another important issue in the development phase was that the package should be flexible enough to allow for CML compatible polytomous generalizations of the basic Rasch model such as the RSM and the PCM. In addition, by introducing a design matrix concept linear extensions of these basic models should be applicable. This approach resulted in including the LLTM, the LRSM and the LPCM as the most general model into the \pkg{eRm} package. For the latter model the CML estimation was implemented which can be used for the remaining models as well. A corresponding graphical representation is given in Figure \ref{fig:body}. \begin{figure}[hbt] \begin{center} \includegraphics[width=13.7cm, height=6.5cm]{UCML.jpg} \caption{\label{fig:body}Bodywork of the \pkg{eRm} routine} \end{center} \end{figure} An important benefit of the package with respect to linearly extended models is that for certain models the design matrix $\boldsymbol{W}$ can be generated automatically \citep[LPCM-WIN,][]{FiPS:98} also allows for specifying design matrices but in case of more complex models this can become a tedious task and the user must have a thorough understanding of establishing proper design structures). For repeated measurement models time contrasts in the \pkg{eRm} can be simply specified by defining the number of measurement points, i.e., {\tt mpoints}. To regard group contrasts like, e.g., treatment and control groups, a corresponding vector ({\tt groupvec}) can be specified that denotes which person belongs to which group. However, $\boldsymbol{W}$ can also be defined by the user. A recently added feature of the routine is the option to allow for structurally missing values. This is required, e.g., in situations when different subsets of items are presented to different groups of subjects as described in Section \ref{sec:mpcml}. These person groups are identified automatically: In the data matrix $\boldsymbol{X}$, those items which are not presented to a certain subject are declared as \code{NA}s, as usual in \proglang{R}. After solving the CML equations by the Newton-Raphson method, the output of the routine consists of the ``basic" parameter estimates $\boldsymbol{\hat{\eta}}$, the corresponding variance-covariance matrix, and consequently the vector with the standard errors. Furthermore, the ordinary item parameter estimates $\boldsymbol{\hat{\beta}}$ are computed by using the linear transformation $\boldsymbol{\hat{\beta}}=\boldsymbol{W}\boldsymbol{\hat{\eta}}$. For ordinary Rasch models these basic parameters correspond to the item easiness. For the RM, the RSM, and the PCM, however, we display $\boldsymbol{\hat{\eta}}$ as $\boldsymbol{-\hat{\eta}}$, i.e., as difficulty. It has to be mentioned that the CML equation is solved with the restriction that one item parameter has to be fixed to zero (we use $\beta_1=0$). For the sake of interpretability, the resulting estimates $\boldsymbol{\hat{\beta}}$ can easily be transformed into ``sum-zero" restricted $\boldsymbol{\hat{\beta}^*}$ by applying $\hat{\beta}_i^*=\hat{\beta}_i-\sum_i{\hat{\beta}_i}/k$. This transformation is also used for the graphical model test. \subsection{Example 1: Rasch model} We start the example section with a simple Rasch model based on a $100 \times 30$ data matrix. First, we estimate the item parameters using the function \code{RM()} and then the person parameters with \code{person.parameters()}. <<>>= library(eRm) data(raschdat1) res.rasch <- RM(raschdat1) pres.rasch <- person.parameter(res.rasch) @ Then we use Andersen's LR-test for goodness-of-fit with mean split criterion: <<>>= lrres.rasch <- LRtest(res.rasch, splitcr = "mean", se = TRUE) lrres.rasch @ We see that the model fits and a graphical representation of this result (subset of items only) is given in Figure \ref{fig:GOF} by means of a goodness-of-fit plot with confidence ellipses. \begin{figure}[hbt] \begin{center} <>= plotGOF(lrres.rasch, beta.subset=c(14,5,18,7,1), tlab="item", conf=list(ia=FALSE,col="blue",lty="dotted")) @ \caption{\label{fig:GOF} Goodness-of-fit plot for some items with confidence ellipses.} \end{center} \end{figure} To be able to draw confidence ellipses it is needed to set \code{se = TRUE} when computing the LR-test. \subsection{Example 2: LLTM as a restricted Rasch model} As mentioned in Section \ref{Rep}, also the models with the linear extensions on the item parameters can be seen as special cases of their underlying basic model. In fact, the LLTM as presented below and following the original idea by \citet{Scheib:72}, is a restricted RM, i.e. the number of estimated parameters is smaller compared to a Rasch model. The data matrix $\mathbf{X}$ consists of $n=15$ persons and $k=5$ items. Furthermore, we specify a design matrix $\mathbf{W}$ (following Equation \ref{eq4}) with specific weight elements $w_{ij}$. <<>>= data(lltmdat2) W <- matrix(c(1,2,1,3,2,2,2,1,1,1),ncol=2) res.lltm <- LLTM(lltmdat2, W) summary(res.lltm) @ The \code{summary()} method provides point estimates and standard errors for the basic parameters and for the resulting item parameters. Note that item parameters in \pkg{eRm} are always estimated as easiness parameters according to equations \ref{eq1} and \ref{eq2} but not \ref{eq:rasch}. If the sign is switched, the user gets difficulty parameters (the standard errors remain the same, of course). However, all plotting functions \code{plotGOF}, \code{plotICC}, \code{plotjointICC}, and \code{plotPImap}, as well as the function \code{thresholds} display the difficulty parameters. The same applies for the basic parameters $\eta$ in the output of the RM, RSM, and PCM. \subsection{Example 3: RSM and PCM} Again, we provide an artificial data set now with $n=300$ persons and $k=4$ items; each of them with $m+1=3$ categories. We start with the estimation of an RSM and, subsequently, we calculate the corresponding category-intersection parameters using the function \code{thresholds()}. <<>>= data(pcmdat2) res.rsm <- RSM(pcmdat2) thresholds(res.rsm) @ The location parameter is basically the item difficulty and the thesholds are the points in the ICC plot given in Figure \ref{fig:ICC} where the category curves intersect: <>= plotICC(res.rsm, mplot=TRUE, legpos=FALSE,ask=FALSE) @ \begin{figure}[hbt] \begin{center} <>= plotICC(res.rsm, mplot=TRUE, legpos=FALSE,ask=FALSE) @ \caption{\label{fig:ICC} ICC plot for an RSM.} \end{center} \end{figure} The RSM restricts the threshold distances to be the same across all items. This strong assumption can be relaxed using a PCM. The results are represented in a person-item map (see Figure \ref{fig:PImap}). <>= res.pcm <- PCM(pcmdat2) plotPImap(res.pcm, sorted = TRUE) @ \begin{figure}[hbt] \begin{center} <>= res.pcm <- PCM(pcmdat2) plotPImap(res.pcm, sorted = TRUE) @ \caption{\label{fig:PImap} Person-Item map for a PCM.} \end{center} \end{figure} After estimating the person parameters we can check the item-fit statistics. <<>>= pres.pcm <- person.parameter(res.pcm) itemfit(pres.pcm) @ A likelihood ratio test comparing the RSM and the PCM indicates that the PCM provides a better fit. %Since none of the items is significant we can conclude that the data fit the PCM. <<>>= lr<- 2*(res.pcm$loglik-res.rsm$loglik) df<- res.pcm$npar-res.rsm$npar pvalue<-1-pchisq(lr,df) cat("LR statistic: ", lr, " df =",df, " p =",pvalue, "\n") @ \subsection{An LPCM for repeated measurements in different groups} The most complex example refers to an LPCM with two measurement points. In addition, the hypothesis is of interest whether the treatment has an effect. The corresponding contrast is the last column in $\mathbf{W}$ below. First, the data matrix $\mathbf{X}$ is specified. We assume an artificial test consisting of $k=3$ items which was presented twice to the subjects. The first 3 columns in $\mathbf{X}$ correspond to the first test occasion, whereas the last 3 to the second occasion. Generally, the first $k$ columns correspond to the first test occasion, the next $k$ columns for the second, etc. In total, there are $n=20$ subjects. Among these, the first 10 persons belong to the first group (e.g., control), and the next 10 persons to the second group (e.g., treatment). This is specified by a group vector: <<>>= data(lpcmdat) grouplpcm <- rep(1:2, each = 10) @ Again, $\boldsymbol{W}$ is generated automatically. In general, for such designs the generation of $\boldsymbol{W}$ consists first of the item contrasts, followed by the time contrasts and finally by the group main effects except for the first measurement point (due to identifiability issues, as already described). <<>>= reslpcm <- LPCM(lpcmdat, mpoints = 2, groupvec = grouplpcm, sum0 = FALSE) model.matrix(reslpcm) @ The parameter estimates are the following: <>= reslpcm @ Testing whether the $\eta$-parameters equal 0 is mostly not of relevance for those parameters referring to the items (in this example $\eta_1,...,\eta_8$). But for the remaining contrasts, $H_0: \eta_9=0$ (implying no general time effect) can not be rejected ($p=.44$), whereas hypothesis $H_0: \eta_{10}=0$ has to be rejected ($p=.004$) when applying a $z$-test. This suggests that there is a significant treatment effect over the measurement points. If a user wants to perform additional tests such as a Wald test for the equivalence of two $\eta$-parameters, the \code{vcov} method can be applied to get the variance-covariance matrix. \section{Additional topics} This section will be extended successively with new developments and components which do not directly relate to the modeling core of \pkg{eRm} but may prove to be useful add-ons. \subsection{The eRm simulation module} A recent \pkg{eRm} development is the implementation of a simulation module to generate 0-1 matrices for different Rasch scenarios. In this article we give a brief overview about the functionality and for more detailed descriptions (within the context of model testing) it is referred to \citet{Mair:2006} and \citet{Suarez+Glas:2003}. For each scenario the user has the option either to assign $\boldsymbol{\theta}$ and $\boldsymbol{\beta}$ as vectors to the simulation function (e.g. by drawing parameters from a uniform distribution) or to let the function draw the parameters from a $N(0,1)$ distribution. The first scenario is the simulation of Rasch homogenous data by means of the function \code{sim.rasch()}. The parameter values are plugged into equation \ref{eq:rasch} and it results the matrix $\mathbf{P}$ of model probabilites which is of dimension $n \times k$. An element $p_{vi}$ indicates the probability that subject $v$ solves item $i$. In a second step the matrix $\mathbf{P}$ has to be transformed into the 0-1 data matrix $\mathbf{X}$. The recommended way to achieve this is to draw another random number $p^{\star}_{vi}$ from a uniform distribution in $[0;1]$ and perform the transformation according to the following rule: \begin{equation*} x_{vi} = \left\{ \begin{array}{rl} 1 & \text{if } p^{\star}_{vi} \leq p_{vi}\\ 0 & \text{if } p^{\star}_{vi} > p_{vi}\\ \end{array} \right. \end{equation*} Alternatively, the user can specify a fixed cutpoint $p^{\star}:=p^{\star}_{vi}$ (e.g. $p^{\star} = 0.5$) and make the decision according to the same rule. This option is provided by means of the \code{cutpoint} argument. Caution is advised when using this deterministic option since this leads likely to ill-conditioned data matrices. The second scenario in this module regards the violation of the parallel ICC assumption which leads to the two-parameter logistic model (2-PL) proposed by \citet{Birnbaum:1968}: \begin{equation} \label{eq:2pl} P(X_{vi}=1)=\frac{\exp(\alpha_i(\theta_v - \beta_i))}{1+\exp(\alpha_i(\theta_v-\beta_i))}. \end{equation} The parameter $\alpha_i$ denotes the item discrimination which for the Rasch model is 1 across all items. Thus, each item score gets a weight and the raw scores are not sufficient anymore. The function for simulating 2-PL data is \code{sim.2pl()} and if $\boldsymbol{\alpha}$ is not specified by the user by means of the argument \code{discrim}, the discrimination parameters are drawn from a log-normal distribution. The reasons for using this particular kind of distribution are the following: In the case of $\alpha_i = 1$ the ICC are Rasch consistent. Concerning the violations, it should be possible to achieve deviations in both directions (for $\alpha_i > 0$). If $\alpha_i > 0$ the ICC is steeper than in the Rasch case and, consequently, if $\alpha_i < 1$ the ICC is flatter. This bidirectional deviation around 1 is warranted by the lognormal distribution $LN(\mu,\sigma^2)$ with $\mu = 0$. Since it is a logarithmic distribution, $\alpha_i$ cannot be negative. The degrees of model violation can be steered by mea ns of the dispersion parameter $\sigma^2$. A value of $\sigma^2 = .50$ already denotes a strong violation. The lower $\sigma^2$, the closer the values lie around 1. In this case the $\alpha_i$ are close to the Rasch slopes. Using the function \code{sim.xdim()} the unidimensionality assumptions is violated. This function allows for the simulation of multidimensional Rasch models as for instance given \citet{Glas:1992} and \citet{Adams+Wilson+Wang:1997}. Multidimensionality implies that one single item measures more than one latent construct. Let us denote the number of these latent traits by $D$. Consequently, each person has a vector of ability parameters $\boldsymbol{\theta}_v$ of length $D$. These vectors are drawn from a multivariate normal distribution with mean $\boldsymbol{\mu} = \mathbf{0}$ and VC-matrix $\boldsymbol{\Sigma}$ of dimension $D \times D$. This matrix has to be specified by the user with the argument \code{Sigma}. In order to achieve strong model violations, very low correlations such as .01 should be provided. To specify to which extend item $i$ is measuring each of the $D$ dimensions, a corresponding vector of weights $\mathbf{z}_i$ of length $D$ is defined. If the resulting $k \times D$ matrix $\mathbf{Z}$ is not provided by the user, \code{sim.xdim()} generates $\mathbf{Z}$ such that each $\mathbf{z}_i$ contains only nonzero element which indicates the assigned dimension. This corresponds to the \emph{between-item multidimensional model} \citep{Adams+Wilson+Wang:1997}. However, in any case the person part of the model is $\mathbf{z}_i^T \boldsymbol{\theta}_v$ which replaces $\theta_v$ in Equation \ref{eq:rasch}. Finally, locally dependent item responses can be produced by means of the function \code{sim.locdep()}. Local dependence implies the introduction of pair-wise item correlations $\delta_{ij}$. If these correlations are constant across items, the argument \code{it.cor} can be a single value $\delta$. A value $\delta = 0$ corresponds to the Rasch model whereas $\delta = 1$ leads to the strongest violation. Alternatively, for different pair-wise item correlations, the user can specify a VC-matrix $\Delta$ of dimension $k \times k$. The formal representation of the corresponding IRT model is \begin{equation} P(X_{vi}=1|X_{vj}=x_{vj})=\frac{\exp(\theta_v - \beta_i + x_{vj}\delta_{ij})}{1+\exp(\theta_v-\beta_i + x_{vj}\delta_{ij})}. \end{equation} This model was proposed by \citet{Jannarone:1986} and is suited to model locally dependent item responses. \section{Discussion and outlook} \label{sec:disc} Here we give a brief outline of future \pkg{eRm} developments. The CML estimation approach, in combination with the EM-algorithm, can also be used to estimate \textit{mixed Rasch models} (MIRA). The basic idea behind such models is that the extended Rasch model holds within subpopulations of individuals, but with different parameter values for each subgroup. Corresponding elaborations are given in \citet{RoDa:95}. In Rasch models the item discrimination parameter $\alpha_i$ is always fixed to 1 and thus it does not appear in the basic equation. Allowing for different discrimination parameters across items leads to the two-parameter logistic model as given in Equation \ref{eq:2pl}. In this model the raw scores are not sufficient statistics anymore and hence CML can not be applied. 2-PL models can be estimated by means of the \pkg{ltm} package \citep{Riz:06}. However, \citet{Verhelst+Glas:1995} formulated the one parameter logistic model (OPLM) where the $\alpha_i$ do not vary across the items but are unequal to one. The basic strategy to estimate OPLM is a three-step approach: First, the item parameters of the Rasch model are computed. Then, discrimination parameters are computed under certain restrictions. Finally, using these discrimination weights, the item parameters for the OPLM are estimated using CML. This is a more flexible version of the Rasch model in terms of different slopes. To conclude, the \pkg{eRm} package is a tool to estimate extended Rasch models for unidimensional traits. The generalizations towards different numbers of item categories, linear extensions to allow for introducing item covariates and/or trend and optionally group contrasts are important issues when examining item behavior and person performances in tests. This improves the feasibility of IRT models with respect to a wide variety of application areas. \bibliography{eRmvig} \newpage \rotatebox[origin=c]{90}{\includegraphics[width=1.1\textheight]{eRm_object_tree.pdf}} \end{document} eRm/inst/doc/eRm.pdf0000644000176000001440000162501011706463107013777 0ustar ripleyusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 5824 /Filter /FlateDecode /N 100 /First 818 >> stream x\[s~ϯ[r]>"HS"hiЩ !8 Vh#QP=E>huD:խH@9X!܀T 'KcD( K'@bP%$gq"JPVZ/#IɋHcPl%Γ>9MA^ T(x {efRC=0)rx=r MSr"N@)I@!NZt0,]AWLO&\#)X.N2r?:d2*P$T2LEg*:SљTt3LEg*&S1{Q?NS2vdCƐME~r8 2ņ7fNBϷN>]7'|&:o_i'lL񊨟Lgͬ@ +*W^|]}%^6v'uhz|s`8 r"ࢪ%]UỤ? ]F]I(atnGӽvD2bΩyGUdhս+"|Sh46T>Ҍh* 6.U49|Hp 9ݓn' Q1ZHPaB5Щ+L)Y)x0=wQp+[1:[9L1 *7ďg^f7InFߍ7zț<@_^}VKїdȿD,Ze(RNZVݽ!EhBb!-.;X'xP>2sͧ{v$Zm $Z ]0&bz&LVwvbΘ,&l|jRIፒzGpEל;,py&#%{{Aj~𸺛 $*QAASv?(h7Ass/S>VOH?ڷ e+ |5? ;hg *hN'=30c2S(Sr]l*ey~Ӷ;B62G;X ʪ+ȏ2AxF W$0b|\BVQ-oO`[E[X.cMHi~S=0;Bl@$NB]aHV5 |-]wB`wI(:4TB#({}4ס%voe ,BuPT..-*+g{2?=8_ˈl&f%&[o&}^?~©0[ީ}t2=1%ީώO}chVnvmֺS~{{|Kn}nذ MIL_%'ABY2%Db9بF]6rQÍnx.u9਻SIZև>zTnM}ZFg?7qa]Ϩ>kgg7Wl:NP^]mfͼM͸fr>_ִ(߯M0/?]_6]\zROISO)x`*Cnf mf^Rr)9?k]%ɵ|9jPͯ1o?1A_ԋYԋߦMk[TO3"h 'y|I/ƣYdJB)IPt78jַ^-Fy,7" h`{P{NPWw)Y+_rnΠe)6մ-j 1VlUA[Hrc ~"sxp[HT~Sǥt<nBS-:X FT zI2<cXsW+wO* o-,i (Yb]-t_AVӑu~KYhJ18g*1>$0 >ТO"SkOQS:80 KD"*t/n>}@h4d ˔UUDm|ϖ-+28{!˺#zAJ41'm3%/NY12~n}]i +RKqxs bg  '38ynM 'N,ӗ[u=k`/hƒli$Jųrj0Z39bњ)*fG80B;cufV4["cZ.N@5xbGn& NKˋB%+4D'wl)07>PL, ;%W,+l:vLWL^ AS"v/}є\+,)ZX{$Ox#9bnW%7$}ahJԕL4t&rXQ" 9QBj6.Y-`.%: :Fkx6М~^os ~v9lR֔mKsUz̭Uq-GxP '%/Xy>(5T#̙ZaoԀa;2~#q:,KC"2>.eE 1G,'PtwEpl ׷-} e+~9KIZBmbx?ü|0OQ}V7eɋ_sz1>+"5 NR[q=z~kN{nO?o`k?ȗ! ?ow=w h Ю=eL0 ˴Mumn瘲ͱǼlMòixGce[&n޲}o^(k)rG ̖7,m[)#F;e 㴽=ԙ/|.j+AtX^6@@=zVJ"9ߜ-x::oFcj~suŇnv n+fr>oCQ#ȾE/y*=ZmzHVYYQ9#4Ci vؙQ=__6 8vE[M\[XXň\̚ 6㸙aW7dW([s>j&δ𨯼ݙ~ quY}%4w4 ƨ0#'6#r!PFh6bGz&y+>Sb4Yw&%RPv 'naBT̈́r (|8gd[gٜ'|~B^ĴO^#44g ͠4އJ-##_= irY7l[)T:7n'I3U;"\݌S=# *GkYpe:U%a4YyOBwa'<Sb:K~{iGϦg>xt}2^\69@4hr.F kp>z¹NdGի"Tŋ|e7Qssq4])ynV|AL?L`$܂MzZ}T'tgr![Tk-Jw_/RSSأ*GT96ahݓ^n,GlvrPV鸡z(4K+SY ,Dy |7KG#b~QN.sBY3=-^:e[(vDstҼKi35:Dͧߦ3£ih'ubçtӃ}A;H;3x)_s>[endstream endobj 102 0 obj << /Filter /FlateDecode /Length 3988 >> stream xr`*Ud ieU9*9"!e`Z_O?=r ̣ dz,po&nfȚͮogerk"z?f~z׋e {[-3~v],8_]-[=nEn_ !fIGeQT Y$LS ̣,fKge á_ێa2*31$ʝ-:.F0G[ˢO8vYG# ?4ς!@wt$BOkbŦH^xś+QD ?S'F0tffe<HgfgP?Ъflهxq+fКlDvd ocb*m&Q#(3Br~X1pxcGUA U=<K@xˏǻ F1H6eJ ]w=cs="v7Gơ[9X+XmExe *P8vv0خD)V\I88Wǘ=4s+u{rN| &W~CAZPaa Z)\nJq={T9!vBjxi,2V̺L~j旂9\X=GQ,Uh՘>nfna},9 "NtiR&Ď^xR*#+X>ƊOGEw E*Y}vvc^VhxLY ?uLV۩bT›>]8M6zjXDƒ4 M&|AK( *A]/HB *4|+IaCEĹD3J(wZQ~"υޔ01"} r2T- zpgbq1IUTێh4aV^b[ܣ[Rg}BGKQE|P;xw9ŗ[ <* PYd՜Üq3[u6B0i P)4/]L̷o3 8M%hՑ \yDQ̝Vzili3TV=J6^H"dTPl{^a]W#'m̸% F>yתBAhV1ٯy`YO=XVi]k8S/?گ{AkPL^ ?]YX$)īˡ:wFtP:d% ,誑Z׀\>G~HHQ]|ڂˊ\Rtԁ; Fk$lj%[%y2u 9Ƙ yrN}~`㏛|ci-i3Csêqs $ii Fq/#Nb"fVnJz%Np6q.> . >3^ DW4'ɽ^9. 3R'ւZq>OP4)mvDcO/  TڑN34n{=XѠ$tω{U-T+R Fƌ_3^<446ffI"2ZC$_/p' #P ;Pk*g<ߴ>-[VZ XV F&q:TF\N<,wqy)%$L؂ \,5ňQ= ~Wn`{zY{%&tn۝oI: QrI$Hy!!~K-2S[On%i2Ԯ&p52}߼5&`2M 2Jp=%虨ӒB;6Ec;bI[DPQ2R&һzjZ_yC e YV$Ax}$v7nmp_l?,|ʚ?!D'iA⃶Fq 0=I'RzwUGY x'2P4l+OYagЫ#ZW0y0Iķj,lGu2*}LZak%/{VM8>{0.JֹY[2n_O4tZNh݌RX>ChMY8Qr `T>hBkEXAe=n!!p;6OiݩTXd0~&yUi0V|GZdDm,Mo(gm@XՉ!npPCMؤ=#~1v/kMa<_=p11v5'j{|rs/Z^s+;. 6h:=)1脫Oaܰ,L0}p{|g E}=tdjqUhaAw G0@nkq|j}:쥱1X Gdd$HCI`C^ n,q>.C6ϣ8'3o4wiLN.A$W[f̂*y;o__pendstream endobj 103 0 obj << /Type /ObjStm /Length 4168 /Filter /FlateDecode /N 100 /First 891 >> stream x[[sF~ϯڗD2.}j*U-K%U$8Xi4IlɜdU*O7N@H4QLI3dXlY9i¸8[ T0aK%)MN30ES˔%\4'L$sfrq,TJbVNPްL޲ї@h!(X KG'e % *NAbOΒs$8[ .K=8g JP6hd; dS^1\J)ERRg$ YgPJ! Ln&+A ̭!08g Ι!a"I lѠ<.)0,HԤh]dBC#xMC̈́gɵ(p&cJPjل)Ca#$B<,7~INRKF%b (.EI3<2 S 8w~IhF!tKmBBcD2dXRE:e)(4 F,~,'_tc_E)p6, 1`'&|) h{|'d /SsR7阓%=<ſ9pvT^+`1t Fjҕb.5KjKPrɖrіPa__Kҫ>.&yCq^ky~Gjphgob7oP|]ф`34*R_6b>+7&v%^!L'bVB&=]kx{&C&^q/7x텟{sOt>$:_6ijeՕ p׈{ '?n(GftYa(_- Y^|*ݔvSI1dRlK+= 7ͽ՞+ІxZ~-]w/=rHwzj}<@߿~#dzw0 ^;~z*HO 7fo,EtظdP-vcO% kIqZ=zOǨhi?)QOuyhn_tn{$0qs-~xtP`nVkF-Ho@X'3rf.zPfpUyRS3G(U/rpƵS,D@ZbUjrpTX{)w^w4$S}[e)ӵ}QNZAlՖO{=PU +HK|&uy2z}+VweGؕ]b_Y^)Wjz{5~1/7=rVvttӒ؆tQz奯"%ӳSv%2*fHTrd 0]tDIֶTeR]/`8z^̋lo >%5Di Bf;(ߢazZT ѻMjlV-oOUs ZCvitw7f}K C% 0YN!GnWWlYU~)򦮧҄ wp| ]*'mOt":^D҆WHC*㫲sr{JwRRSuy5u.ڄ~e4.x]Ͻ OW-t?{n;abg ,;cyr)A<,'OOzB"}.ǧ_߻f7CiUE,'ݲ)VX6_BXW~I]]'UQj{\,}sl,:4"9)$S@\J7łDM٬[reM]7rG@oKl$ _,+MYcYa&>ϯ,3ӫ&Zŷ{Łnpv'0cpyX%)SMou^7aIlA͡%K-Ruf~/go.30h Nߵ ֓/KIZ*9o}:>% v?˿bKsr`h}@xqY1 sИ]kd*XR}e2 F3w(R;>|~zTy8YД5^ܖuր󵋙Q#5Z?>˞WyCܠP(_nBX$/4!dζ 4BY&Ѝ/Km4Tl5E];ЗGOzr%܎wǫ ia 9-})^zHrZP+Ig)IS򄖢<&Y^6^ݟBb C#1r4$bhW`ʳ*yތ6?I-~\9LzSsԑؤ=2 F~SLilķn|hRdmr~<_q?[ϓeci0qsHV!m;|}ǰ^տendstream endobj 204 0 obj << /Type /ObjStm /Length 5059 /Filter /FlateDecode /N 100 /First 935 >> stream x\[s6~_MSql񶩳qm@KF]JJJcw ܰ2!swϴ4f]I`"LbBẌ́BDЖNR8& xLT-^rzdNh*W%t@vOHEeJQlGFQuTOJ0cB%bW\'ìbU8f|HC`9dF6:-i LSD3 Ԇ 4|Nys!Pgh2P49!2SqP;=F[˂"!xbQSH0C%xs J"p .d;"T )<)b#p a?HGC+JyNOhQ֡ )"'@*zB2 HthCF@+TI І".I0I(E gԚ'atD 0Uޣ y٣ H=К@~6Ё |hÐLJ- }6%! hxMeц $wmXQ# W9ptd3QOh+@C7׿R˧lݧ|!Z5yрLxCr>jErU&dqyQ2 Wڲ/%c󻌮ѨZj>: 1bUY7a<^|3YHUf W2"̀Z ^}0fsՂpd[/O樵g*^RJL@TȳBdf\d3a%Lf"`:\!2y0V;8?sAf+CqH<"P=HFg v?E%1sҙ!)F}fd%5(=D%ބ\̒1&sx[/nឦL4JxA|PY2qelF hP ;^ʘhl|և dur-ꝰߢ]F@\B fJ0ufa8 y}0Gz.L 6_pޕL=D ͈`Ab8iLQwAͫ BfSXp03l>=ÅHMz pE (!|wt; NHՕDˇ,I\;J^SF=߁aK8`r<bۣ=f>a{eS/h^jTLeɎ)Qy=`º vvGM}R,~5_T \gm5ן=FWb WeS4 > ߇}N\|ğ=CΏ W-/ Q=|K~Og_^3^G>S>5lR.ڧH~Q6U= y |K_%Y65Ei26fՓzFUzsH:uF9rN4@όA/n{a)FEK<~MFU5d_0[GuSƂ? >У1^1; %Ht1~{'?3*]meI}$c4s8Exнy|`&uSҦ QGqj{z:)D~/e+?ǣ!>-IlEytn{S:T/xKlRN{)h@u`G?ScL\eI_1Cc`zlh 3$'ME$[7Z:J::٤>Tj\5YE E,&F#|`-12yq XV[L,N![U]gf4&8|^LNS)y-wMv2n I[!(IݘǕ6@JBڻLw:MtwӽD@{Ot[1T#9Rÿ(608h"1BK m I-w^gז7^=~|~öy͎ϛ2n|~ڶqCzRlؾ R4`lxwyc.3+L9:=-Yqt\%Bu`Ӿ OPbY{IdÂf~`:Ub5T6hPk(pPa[Q ^TpNs[?hotBH")<{W-o35& vv{W{QFEz[Vĥ%rLbcOwve٢\E|Va}FBBt> 急7> \ f0ɫ=Gۭ+b\C'):gBdʲ3]44Xq.m53ڡ9( IYڨ┢|OQ~uʼ_Pdm푇jsIG(8ۻ>5Jz^OYQ4 q>Ix3s:^ޓ:'89.9{Ok.eui^^> m}WTok^)W^a`w6O3.n^D,?VܤIaq&  Q:)ʯQYhoq;M/_fsZi{kw4D7$D滼ը}X:(UԊbpp㜡N8*KVr[tTK`ENj_:հ~@wWIE=)H=ނrpڐt~v.mlܧWQ{|.,H[|EJN9aL\"D)]SF;}?ˆK=1t6B!$AGt( %k7NCZ(HKOgIH?hX(EҳM+wZx/~Rj~$]+ eno4YMy9(r]{hL8S4Exr- t1:HCnмɘB:BB,Ni;aڢ>qt{õGAk5յ>Fc ӢHTNKk\2 ۮ|)ĉ)A/Y|k[0"%%VϱvtmkUw} &TgڶӦ8"q3,:)!+iWS-Zr(R-ڧJOm ,mJtEˡ$9=Xq,IJ(tGK:7 ۇP Kw0bo;^L. ~R. >.'jR7|qN_e즠o< =Ianvk:7~o~/tw]Ꞇ̓wrZ,ǾESMLp"h״e׫;nQ8W;tt7s?Ɠ?N8sWfƃqP{nYG<0|;#T't)q~Nۜ)ѰCɻr"kFvendstream endobj 305 0 obj << /Filter /FlateDecode /Length 3937 >> stream x[Y~_%8>gFArX#/F,SGwO7,0C9j:XMj߯.zLڲN'm5M]Lׅ_"(RT)U??\ʦf._[tQm4mKa(= jT-el*uU6b2Uljhw_aoW^N6e-%<_w; ~d/uA>%NGXqiJ E[J5KlT,]yκ5Үvo,_~"N(-p790d*D {׽]yibNņ\# ΐqםz{9֋_l7aspo(]*m%IWԥl[U`fa:pډ&W>F٦ttb[dw ٸHF;'B\T0GSZyGmuݵ_րK (>lϯo鐬`$=IxTW?{ 92Go*S d̗A$>ijuPl`& ݝ։ޢDy3!,uJTrFٮɰj88 8L$w W~S%G'3TyxKB7r{CrsV|DcHB QqByjýײ~hM ` Y͆_p"7:hT+Z+M+~D@jwtoP8k ,8Iu+~fљN?5-\VE|;t`vTg2 ;]k6˸hx[Z/iW7`via6aJYcs&dY{qÃ3 ׼Rv^{GC.6, 1_Ag[o(⻈H.RHF;a;oPQ Aߚ^EiUVyr,TᅥdU@– aӧteMp,ƱotƖheyaj)Ʃǭ%lۦ'߅+Xƭב]7W J|x:YC@k*=41;% P Kd?D0ZƈRڏx\(Jۢʖ^`&"(hQ@XdH}MG\ˢ3dSt j]k@|upM{D4jMV^nlGFBzMk}K|Tz~7F_lj9aFuG[>o9ȈHC MPTu89**[.FyKS[Yxk˺gE brŶ Uu)juVA "cS09z4 ℶp4*#`‡0)j a10 #`] 'mۓPOs QjՀTUs?v[peSӝzUuw0c#uĴ@ ox\&?ܾVl%& 2#lLjM%e֚[0V$s\'寙JAx͗yь`L4,ԟH_]|!tj?RkW`Jl}j2fi_M~7gDXfqI'݄-~-^fct G&cM\fduelHB'-СB:)E?_Z' ttWJFI[r(*%Tz7U0ir s~18HmqgPǤ!:} C&B!m ng p d֪ }f+ϙ_H?ZrG#:Z NY9+ܾ*12gҺK>avxViAMgG4.5UUXB(LZW?%,eY9<.M?Pj9] Q` V\*bŲYm>3s(j4N)?an:rW::k.f_&QoK_ve`J1?x8 U *"l0W=&|{V.B-%3A!fbMPhJy7 Uu\~XD-|qk@5ܴp.)P)޼n7C"M×G)嵍>dqՃ]B-{ ز1\Ї̣,E[XılAT2PSo}y"aBE(A6j\%^cگ`s GGMKttIB^6#tXj@E8. `>3ˡRƝJ wiشx]m/qWpC5*+od)M4';))Us =8rRDQ?v}P/JjoehnQWBQqO^ l@U4gY{&bM0Ste|[YO*'xTYVtm2LȀe!ú.6mg@m|nS ŋT=;H1Ky(+yGI&Ndq{.OV!-a f uv3/9%?jE}I6fqp3**-zXFQ;˨uȠ|@FE*gCvX1z䆩3ͿPΛ&R}9܇n0qFtyУ|Ra$r{jgC~nwp2Qj3Oϭ~~e՘\BR0 /Vj/dO3 &X+iOl1&;.FZpoPж6lu .»`uYH)() ΍K&(86-ds6uN"Zuf2~ɞ?6g0qJ8|o!k$v;¸ĸmbG8 Gei"7eǤP+5m}RF(DFgC nşa(oV3ٟҁudepb EwE)AEN#w,BıCSq#Ele.._q43yk&qG@>' O|,ʹ͆p^=*d;xrG $EOnQ@oO7 ſ\?=MҀf|Ǒꆢ].dTi&,I-_ZȰٖ]eK3mXi &< 1UKM}/CFdۆUzْ[}onr3j2DOx/ML6Ǘ p|`6ӣcïx,|tendstream endobj 306 0 obj << /Filter /FlateDecode /Length 3413 >> stream x[s6_{CHӇ\&k?LZeOd_ɌE.b؏b~qS&ovunIe)j>\Y9Wu}<{3& V7[Ÿ+l)ؼ?ſkDqKpV3DM1y]T4H[Nfwg\ي;,~&NStg?wzӈ9k$9hlb %z%Xl+:ay;ʧJ<x|ַf޲'88ތLҊB Gk|-A=.$;l{$ִwe$fs2N4ҡXy ~_H qUo,Abd" VB|z@VIG\oF@ϜگùDt(DIZ_'epe+˭OcW2l#LaF|Z2rڙ$A)XΏ2bQ.7{磅>EwaarG,Υ/3]d U3E9uK ,NڮCP=xFu{+0Y&ц#zJtZtD vm]I{ypB#JX!!!H2o%Nr5hUecYpW(RUz2\ݲȶa=DzaP^6 kFy2Ъz؜6ee^WAI]r4OMRPE^]PV HhXM4lʁt0}jpu}duj"INWU[ e| "OLғ~N\~%Fu.1 qg?P"uUIYa4BFaJO50K]`uН'wN>`U3S1e^΄m@r&wuذߝO)(w!ԷΒq Xn-,RY|S֮af.qBI9]iYݬ if)x&?PBZU vנǫ~heB[dM~L(:h%;ӱx4aQ*:klͺ'ˁm9[1Pf/{=ɵ!5\;# k?|=v0$-S~2|4\ k w`jh0],&C=^FƁeM qrr<'ťh1ĸ_-XNc剧ҍ &'\=j(6Eurτ.`WzVߛp8xє;mʵNN Cr{Vñ7`Cx+oee\ f*@}: p+MdM(ns[ }=s{wԭ"[& ՐA7ˋT1PT vhGêI]ʌJ *t.KG츪 ]pRL2Bc8A 4U8ķlsù-ACGi R鴷nCVlH+q8q؅Cn̫蜻[=Evg([+`Dh olt=>{&$p,;H.#``p Q^wR;ϣ$Ƒ2Z,(萕3Ήxro%U@fN77,] O8Jk]t%i݇o2*L^Pa}J&& ?ީP)9Xԡq Vy'S1lU'$5lS5y)UC~Do*ޫMk$&(V a<5VjԨow4A10yÝPHJc`UIB],f Jpi8ۉyMj >k2Uc{IQ[[ʥɦUFAgq zh uKaJfc/J܊˛/iFraJҬOqWN8ˮJ -_g ˨O<La7\`fK7Mendstream endobj 307 0 obj << /Filter /FlateDecode /Length1 2798 /Length2 23803 /Length3 0 /Length 25366 >> stream xڌP\ "!8i܂Xpw  =CۙoU[T5m49 1P֙ "`ddgdd''Wt#'W::Yra 4rDAvrvik ?C;Gn)@ mg t'p4pW 5_!-@hhbd P3:{'=79?{@tt~ 7]=<9@o#X[m@.@G(9@EJ`Xoz`g7?޿Yldbbgcodaik0eݝlMY;ف\,A17 )@S~uYTh#v'`fikjS{5[K?&  `cddb@w U=)~Ax@E},̀^NF@ O<` 4Ơ;ZtA`7=zZ{5_yeuY! ڿ+W',lce11~/ۀTrD5p]yd oe uuM@L/{;_B.l,=13,@aM5j@!dkno--݁& omi TsЁt3z8f 53}xl#GG#xA PS_ `s@O [7b0"+0@hl08 d-  X үDNɾ"9W"'@\E ."JU "@dT^+Q{E 2DFrh&~E צ1,L@b@3v42̜_,&T5h 'f0$ A?v2xZN+w2CDFNta@C0{ ? ohGL~l,+ ߂l\20⿲cMhHG~FPV@Pg>A4@l( l W[ЙnJl5W5(=n`eG`:%{4+9.w?^zbg̿v@Scpaa}U:\h+f=?fj+6@A,_TK*5-lcAuvyA=vyE:?vGz_X@;~a΄'kp](F 5ׂc[Dꪌ5-1kE'u0a- J 'vZDZFO*SG,^̟g4U:~]Ÿ )'[W;}dO̞tU/Kٶ"E<7/ F'+ jЏ8,D3d5lwckLҹ2"X.q0w *%[i*z"ң`Υ`^б5]?gf%g.#Jފϙ}q;W 'z SMF@0m$db,M}m=9kX8[ ^8N1H`0q4CplUԜivV"o=He/\k}2Ln螙g;WPl}e ӭ=a,T^W)`tQfvLPٝhӁXtr 6:CԀ0`11c|V=m?y_E ]iI@~͉8;zI%췁]q`lY[6 7SUYw\|TOmRu^ h)L&36PE]$rG6}36 )P4i?ar̮{3Řs ՅUevyKNKEn3y*~t!PkBيCǓFsz)ţv680zC0@J@]{ƏkGؖa/˩o5r"T`ː &~NŃ4uYNnn\ӊMKX4u}I4%ͼp0Ϟ:ue%K_NѰ [ţ!K3eL߽)6TG}ySrszogДueHk { T54{΂+h:ባ6iP=]t?*F?kt.Bk_]g>m$|-ZBֺ6NβY l>]uA;\_)q+3'+WKgf~n<*1qm:6}.;8s2n3"{_6GY#BDa^ihG3Eм9ކ5x4\B OőOBBC ~y{wZ;su `;A]y2g4K8˅I8,x>O!™lpx g&!LD{1z|yfcϴ&璂y .o$Ąe<n8hZ+lb# hIgTB֍'L˜..'[ $rLQB; ?v [E9#h'0pJí B#E{nυGNS͂3 4VB(YJ=vɦ5Nѿ|?R2~!Q(FG[a(2(R ؃'NSlu/15ȗ3 *U!WoሩUN=q,Lк~iG, S|W;|: AZ$8,LѥSLGc1jXsE9dӠ{N7`k`)-K$u/x|Ei) mcGNKCE<$"Nx<()ӕ~1Bs IؚNxٝ;dH!cϒ,: ݆.TSh W=?3teID9U;:# Nߚɸ1(l<*-q"\zn4#e ;iereRfmd 4Skr=_/o >O~Rhn*i6AK}Cv*h_?,sLB1:]EPxrEML2X7Y2LjmHX=~\,#zk^&koݩd[7Eڐy__xC|ͽFJý6HQbgsISIgY(qO&.|E^_B KtPp5T=,a pPi^M\ Mݻ4j!Thryͦ>J͟m6wXtS,[s˱fcAO_?RУQl/X{<as$`!j]ϊV|9opa3H_`*5=y6(=4Y9C7p=S{yf 2oNȲ״P-]{V{/'N&ѴQDocM a !W BΨ?JccPo XlqLqyU撉 FS)1$v3LhPلpsx#Ef&Tˤۂ,St]m'֭U+\{to[SE NMr~]wEv,_FghEH:U(bY=nPGFxF|G|sG=hIݱ shKmvedZ$1=@/SC~a]1yaRJS@i1g6Bh:VHyN\muϙHE|Xi*-xi0yade-LKHw}HQ;E-?^p*iR\!Ar%$i.)ͫx ͗_ EbG_P7{IhT8v-jVCZ-!PL.K#Җw6¢-]'~UUBH]):dO lO{=,'ru}77Qߐ =EKYnhirVԼrm/sy/cuQMd/"ᆱ #a?AV၊ j"Si|+~t?I }  DtfQ4B2`NHexY:(W$Q}5]z4^:ͮEg/w\f&&76 q3 LWywwBp`/t˴m"1aD%1VzU@RΗ[' Rz{sBd*EyȝgCZvUh4tsH??J O1J#|*%H}#<)ކumjwwoMt\QEvs~[ , $o`ϗ:絠tUᛥ'ҭ "IQdVyg'37SIx[VMtJQBQ)ڵ4E'D\o&2[4)PږJ7^{tٲ̡X d#Gl ۟9El"E:9T2;1a܇4)-N&Г_yIf:2D yVk攪uc_u>6egͯ,/Z[b\&k@lOaõiv7/3mK|0}m0kn 77+;/:>?U=ߚ] P\)C e`Ml*&WԊx>+#Bg~Bx9* !W!vP}B{F3WwfPDe6}l"JYkvG[+)<<]V P7ቊ:0>Sā|Py"! gG>p Cs:Vކf'Y"cbPo&d,jNY&n>BeP#Av&W ]R*RYBX8D"zZP{*Nv-cJQ̀Bৄš>S@z unK9ƐһԹECo.ҿ'np>a;qٮyUX.{(G߳eLCcSa9j+խǒ@1&m_fvٙ :ܽηNe$%yOfѶ+?k|"KgL2o%Y.qF}c 赮]n1$ FŔɬ+pG Vxg/+ !B)*f`zhne8yD5WмGƎUj qS[IX˜vXmC%l- Qw[}cPM^0sRcEW }<9gE4nwפP/\|kc3'MT_[ KH[>PӑB\4F~X A7DiTCTڸm-Tv mȫ`䌊om{ny}J&zH2wblgZTҗK\mKch%^Z\:&i[Q:%EKGLCTU^-K*0e>A RLitz'4e,JUFwpJ VO$5"..c4f|h=< W|4\4OԺW2q| -Y0RAohല*S?}LK/`am.$NCo-[`_zq?5R^0!ד}̵Pv=ٜ6-#xj6 9|Ha$WEnJ42#MOc" N<|-|>v%Q{gL,{u-Əkja t'xK'ndTBjό\GiE;V?s+G\iH(F.;%-rC[zԠN Ƒ_eN}QmI\ݥ',΋`r32 PΨ@] ߗQƒ2u0O  _Vakso8O_ZFX\%!ÜI}=zw+bm)H&m#.[א| GCN-ȑ)q<((PFJ0!t;8,Iy3{G[/]\mVH M<^ߚ9kX[40LJmz9cpVOGțD +$]4FuBEz6V?o:f#o6hB[]>s_Jt>t'?o8̆V&25Cc{[Z=&5we\mtsXe8LFC%L)A( y~ÐMb}C(`G좬]΢suu{7  }ٽ i._{W-y7mBp99*13 uf#Þni}IUO߾E$gu^И_@,{Jy3lmT=X϶KML眵{oʝk~|l^LK +ݳ ?j̘{;fɝA2%5)lcVA>xʉs [L>Kg<_r=1 cH?5ڬ? v .<5sw{ddR+.J.kՏ6,^!EpL$l{e  i)d}{8 Ke 6G )kG ,}sKdˇp۸XFFbufW{XE\djcQշK.#6Ҟߤ-[3"gq !ۓ7t7ڔ">{R#zɯפ8#dfD~N%WcNJz.@{/YypϿm`A_v')XR6]pd/w{&\ەBSk!E?ȂtKA##Im yxQr xٙ# ^B.D,sU_c c>**gUXM0\vz(~צi//ӌPj퍊R #23Yl.{ XPYނtd46i=' -bC&w@U1n\wXxd&6|ciZ>IϺÄT`?꼏/JT9e4]o#W}$7b>2 0E)$"}e= '!wZM__;PJn(VFwXhƠ^-l5a 6NfLfף4TZc)'?iDJK%///+힖%ybL:zHj'Ьo՝J˦_C`x Zyhw=瑱Cwڞ8Y'%7߭o?1Tʢj! C{:A{H}Nd"?O/xI\HKr1TJcċPGKKsYUq"7pkxd !x"ųbCa+Jm!B#DcfqL/̾>ʙ8L{kVSSr"w&*zpᜓά HܡT er My_6s_$(ff"r/1xn?`"Aʦ$LQQ-Rǵ6gp&T)"r W=*WLN0msz,<Ǜ;Kp6NͣU3^0Jr륳C/%:zMUkN|sI9 Y)E~`g;8B|;!?38fQn4<*3~<|ΜXwWNebۖ.730l ]g&OC d }?n-(j~.g:mowSo(0rӐ5 >uD3'oG1z<1y\&i &Th =%OJکMe %gF8wכZKuuVb M^$^m{;]URD"Ʊe= kfL5%}"]MK,xb` u\/rn 9zZB+EV}(# Su)It#KW5hn~\s+b,&|+k򠩈*i? ^><':y6yٳfRoRآ =R6 FJ26֙;JW30oo_#C)40"eN-;H=1JS$2QEqlws+e 0áFPA%&|&Ҡz>VY2YV'KJnFe uQ mo.X;*Xl9增u4yU*&вUiЛ}d]e Ӵ6$dLJb+#ڶVWٛ-l -4(JmVekF=`  =li-\Af|&~}D7,@ pù{30^|0!1Ce~'(dӿBZQFqW< ~zl  n6eX^S/NVxtxrx_@B$]w#ՀO'.hi~f:ӊC30pgͼJq?T2mϢ"BN8BVfG+A2-P!b/)ܲN@X˳4'+7y\.;sh$S۸H"U|ɅdEfo\a^[)g̕5=-Oaj7ƒ:!Ǒҵ)ZNQO_|ٱ Fk)Z!I,ꚔCF5[ʉDftRm- (ޘ^۷i0vbIx]ƼSOk]ΰf'nZ'E"u^Eh)3 !R4L<ƎR,|Sh=!&~*uxsq TʺR7e9  px? ]4Q$!qZiuY{~t[u.9 ac* EfXxzz:^ p\Q5UàCJC3L=b՛&;)YF'J'lbcĶnSH''esQFwHSd,C9[HkK!i g;ю{!p;eLT)css'oRȻ:QU|f#(`(!Q:k-š @eVry\;%4/^U}2Tl1~D~it6{J'xܢF9 Vr<#%m*QXOGsr!,C[g ;b^|B,>l ,TBz75w%*Mql ;%zoҕ0w=67RX͌ޯyzb; qʯ,SkkU"63eGB1B68)T !J ;` t؂";55|#- s)D^; e\42 kQ ɕ2y\ =EHmI2ϛ v, F1s8\JݖM&Kz;hu*jvNT}U7*L庌0O%!#BN{D~KKp">q+;m,%%2A[.|5Y5S3k0iSDZcFr:U=';ZJAc0!r\7͕r0M ;G+0Ծ|{Ӟy)iEx(\ Y;. txP#Kr"SFk\:<4҃Z@]M֎m*'u7ŕ?8zO"ll*y!T,3/DlQ9vExQB}k;n6{I=]_A{)q9o\wBh2>m</-+y/Wi}۹,Z?IqA98ڭpk` Σ^(V?Vcjmp0>5 9$+memZð̳bϥ>Um(n\?*Sd|Km\fxv77XwGc`!W6Q 9n~7:SWQM,_@-Jؕ2r]BCB*=i>)/ޕQy'cdA̓1b6k/{$BL7WxJ]3BZs ♛1Atճsmºbho>/N.*<`1Fzut)Q!t _Ϳ=Y3Ys.?LݠRl)Jơ6d5$*;4L "K=G׾Nz S4NS~[R^'C.Qj!lj~882f(Y jќߡ_`=Nr YoX8_xʢ8:TXI8yp!`ނ?!i<+oYV!&f:1 ssm'/ +tcCMͭY$m#duHZbNv>Fȷ1¶i>fȎ2gxܵF_-]> :d00 F"* #J[H |q] VM5*3O8E%ًT,AhwwoZ ϥ }Aqb;S~>swAJ&; n-ʲm gYj͎62 Y_dKLEGMmS3oG{rpGU׹&e֑F& 婽R1i!]:2Q~7ѥPQ1D;'FuQr:F`VxNjabǠ-e ]VG6̝eOht_K nʝr3W'Y28xQLKUw^;Ћxab;@JO5c-Qw)rz2M-kܶz/s _%ab9_ jp#),_ gEJO\α;~sm yHM1,Y|g$gK3[ һd^'ra' ׹:ڎ)9 *e$5`(;&GzC/8i,ƈFϋ(b ;.{ILvۚ?,)ZKX7w%u CMs.~:Z)?:sG䤸v{Z6r( RoϡI Zu`? /BZM 9@_?BsQWXWܶnmόܥx$*SlPamژR>7ոaSnH%z:lZuo98tNTN_<¨o e ).Vf8 H _p7o\6LAxYhզ/p{>-lT$̽] ;ؿ?R}؈f$pJ*#_qdp\.HRMsw6c&ڗX)+CK?23ۑJ1YͲ̊vS.M6ƶ*S[ ֕U d]Aj8ƁҼ$|^>Ӌ 4x2!Gq˖x践h0#8dآrpYMzC? O+^ qc"dOR{dl72due8j8߱N"h"IQ=lk;]\IHP1.RuJAT<%iDxxԪNXW1hHwmtˀg() =+1hUU%ap(A\hr,ϊ䙞)hb)b_nKP!S܂Jx23ר?63t6'cZ Y[iXI ոߑ !x¾HtuQkV< M},*lZT[Av$IvLH`49bԠ3JXIH.7TuEDcwZWn ?kڠR_a_|_.*%4}ǑoǸ mo҄I.ᓛ*0m}[-)($ gxMb{p`}-ԿGsbqSjYHn4k ^lSU~/ ArF`@zvw:=`A^~r|440>pSTƛMA!q;Ύ1lup-Mu"E.WÀ;ІmB=csFz31[LZ>9 7]ʤ"5aJ'C(,tl>\ +xtcY *dmweAP DMD"A1U\ԫ;fm8Eմ_lV*[[p]GVJYL #Ԯ|OC(5W$F pg.7-V~tVMpկm)鵀N?YNN,ȢiHC ^uTYޒDVڛ?⿙3|5F8|W0K']IJ18^++ICE0ZmyQ\y}nw ,ϣ_eΞLFÎ`y 77Flgz|Isͷ2`Ͽ'K&zSZǷdp o1djkl6*3f}qu_t@ @DE;x+wRvJ[Yٜ`GT.)*̘MɧڜIzCW]H󸌜<Ns 8a?mb l%O >X iSb?Cn@\fٚLG5/RV+?esf]y. oI=4\l=>;aC`oN'q RrqG7'urfC+gPllB#ZR3ꍇP9aXe.(ˌ+C_^\hf,|> >X st|_0 ePSCr'"d"&X~i#D:}䢆t0t# +è;TՓP5XE+K>lX 2Ko<'y}9KG1^آ1kh7N6{_ /yccZY&YNo\Aw~RS.B H1薪[iҮ)h;+S󹐋Q=y2i3~ls]4a5|ZѺLZsh"?$H$ˑw P '=>8}d \Vaޱ2| Z2` ΀0b [*b.UN64=]1Zix:">B[]vsgE.%xWu0@~Y`I֑z:j^ս0 EQFL!VF=2kyh^uۮ >5b> 1ڿ(,o ^q0a6ԼiQMhZA8[>|kgXBibR-](Y jcEM4\2m?=gXUwfL'YqJx?ם3k0$ B{˗cNqm(#)8GN~m+)ޢ02*6T;=r]Z{ʚ.)4w3Z݁QHS}IemߨJ .sY"#6ƅ8ů3?rO|E)o zXBct;*~e*(nv E_wr?)AR2,ԺO?(ep"[UCYеW- ,AM;񎦂Z]t#W:S7[ʏƪ%? \ _J2$&=((h ߰5FR[m47)_Uv~Mxu83Zk?NT%RUkXge!WSKGFL?8^g+X mgxl R5Thh%alYn[`BIvNNlNfHukio܃y~Qu  TO޿s< x*M?[S, ip`F/>PG'xZ:$t5R0Dcfշ1,;C^y*bX 7P)bt>W* cbnmަ+F ̋FqKr{Nݳ:z3NCP?ͧ-Aa7*~yo^ja"!);=]q9Ѻc!")HD?隒SZpy\ՔbMl:+g5ʹ/SP2{B6*Cǂ͋k4ӯ N51}DI}|{R~҇l3{$aRwd/(Cvj<4ySP]6E5 6RaPr[tbs648QdscCl*bːLp+EcI >i+3S LZؕ)뼺 rc%^SϖF(^,""xsxm98d/:$K;\3¿˯ Y5_4L7j&b+Fu@p1>' PΜiL Ce;swWlkRWCz*J$Kb# 7,cG&,%ٌT0r5~nUӯ ƁWf$WqdWvWo`9ٿ{V̖ݚ&i~~iS#O\uEX`צ{EMfwkL3 1&ե%N@Y3V `H :;yxK] H(SeQ%QZ0&$]<71]W|[>)aP8_;3ÓTW2 o`$MQ'ڸy|">-[Q<.0cp_Zzy}/;^ʺK IF ˝lܻF7f'G4\ηe?ɩ|)W=^v |˙l3'68aҵ+Ot-`tz!1=DzT|0NUg+A+;%&:6[~4]A@/O~ca=K|_<'!_HWRcWL\aIY DA"lL(:X2JBk+{IK|guM̞co~̊8\' _bE g3BBoqJ2 1?&J,Y?ކs'~=8g5 MA\ `u+CT3zC.oS>=RQ'^D/vM+V%o (YP"J8| #IƏ?8{pD}_V;hi/E[3B5gXE ©GX#GwS1}5TWBH=9ḹr[nLJġI'>E3*I=¤ tCɓw"bk=q?w,jUShRp >d~gEݗ9ڜt5ʧY̘҅pPsge b:rG&-21ˁ F Aw`CZC/8B)aS0/:Z$ \bjL&gz-o *9C_Z4Y 6foendstream endobj 308 0 obj << /Filter /FlateDecode /Length1 1560 /Length2 8235 /Length3 0 /Length 9257 >> stream xڍT6,ҍ%CI!] !!( ) tw7{ϽYk}v{gk4$,`Y8̅(R@nv ^ -Ơ;9C0H9]e.vp@ p r . P_p'APRpO'c &w= 2]lAP&xWFaAwwwvs{gv(+b;TUƎAв8%ׄ[;(9?z,NM%_{dw8@p{s'f @UY%vV99of[<9@VB`X9 .9~y R no8c4 zl'_a^+w0+XAoGd`` Z?? |">+ ''rX!0Dwx }2~%rhjHʾc$%/6^. |Lx0n#U3}DTYؼ7-^&gP?r `<@q(7d]?j?ʮ.k \ZeI8u .!gYR Acp(V;C~?56N t{|N'G~ܦN)-/q`?p..|Vp'pho$0pp8>1? W@NN>6_[{A3SpPmUPU;n ׌S j"ӧE UsY{݆x[;X&D=#ݔlZ:v 98jyW]rC7>Ƽ0i_3X =ZFsh@8ࡩ! ?Ruh7jUs |ğ?op>5jUbd2}aaΏ 4`~E=Ф|u{], C?b=i ܉{Ļn y)_|oTH:vbǮ9[{MSqAQ/ᩊ12gٮS_C['nKNP:ߧ浐Gr#r$L]ISFުUtUȝڕʏ뜼Qv u -~nX_݁LKAS17g-DQ)LtV2'o. ߵs"čHW7SnHa/.(8ؐ pvS~ c6?g]1M8ys]ÙU|8!k^MHOk䑬^K֘lW[A^o!cŰ]hKGV}5'Ԕ=b">v;uΔ-h%,3ȓx=)WlJF|-Fz-˵0DV<%uQݑ늗@R](S\8YN*4:OҶp5SiCJZCEyьNcNa_QQrXx+ UX}峏RgwE1d鹺$Ʋ<[ns; U$+Wf<׃if:,FLh A8j OZ|O)E%Di:{9vvAxf?zY 1.tbUs` KRg8N<|7Kv4qyؚD ;*SI4I7.޳Mkٷ!)]1ZC 󉅘o$ > 0[G@̓z*Ǣf󄟋 O9O8x؂ҕ:V-nB%)Yg_V&d8C:6}^Kowjם=YkFk/QdzOA`ua Gy)9-bHI ji -(aНa#(MÃ!фZ\2mA|Ujd'Ɋ|.><˜ǏEB>o",!5l]-[WX_1稞TDo<Ocv6S%;˙qNYv3qȟЦV43k?ΠxJShi7^&H.N,9ḡ~UOA6Z怃<*o1$OTfPԅbLʝX]>@.-9* \9(Ϊnyv?xVG;=|΍p 8B^Ԕif:x'l\[E#祶ΪP j*/,eӓDzG"0Z6^*`{7M@cnP +9/o~+gL7@Zc+Iim : .{=L.9l`촗;2f2_uKۊ.~O@$I||H-K¦ _ҳ~;S[OĀQ=uO>]aT0uOH'W- 7΁䄍6] 7?sٷHATp/#TDၕl'Tz*DN[8y3vV;oc@.%81U/#)F4% 9bw:vbO4LT> 9{iw]92kcA8*,Z`"gbUscĄs~rPyb]4wG/\|iC+ya F g~Z3Pa#7["b*gs{YU\Ą3VreKbU&0.v0Yq[45V~Mn ?_ly 4(,E+t]W`@/$+Fwp{e6%aHP#d!Ğh"))srpP_A|,tU 7fݪUx㜿sYCJt֩:C2+龏 qڥIzdoCH]^{=hi\$-oi}Z2 hQtGִ̬|^a#W:mvomo; Hm&%#'ĕʞ>x}[P?);{9$Z&Mm+Hg%Hu8Dzr:ZmQ W -RFzscJ>-B[J?D#Z(-ǿW37eZ+z+TLQX}c(PTTk2p"'.rLagODq F=wN/ݤN`#'LӰG;m!{|i-ߔVb |#/Uq|yu~n^s@S %][_I/ gQhݦ{44)L3 A?d0g暯j{Ds}6 s[tbPˊ~^}0)VVvmrvx|WqDSggt_vetk=g?r4&U _$q@Xmq;F07 _(-ogi!j6wzK|>eW*f1Њu7M]hx@xaW^욡 2旹ˉ?ƞ7׏씶x]|5K!xev?ZnO>mG] ȪE\h0,=-w|{rk9x%M:~XY¸aSvrB}$NpvCafo45HE3~T?|!_NJ5]dh'D_?Đ|!<|z 0Cyx0Ry˂,Dž|[N:!R2JO͆󲤏us2h4w 3EYk[6oKu:Ck{'`8Y{czHn7T0$ v,{4i$qMLsP)u =RowA6rjn, kګ'rz&6Øppy 34f|$/3 '\ݟon$i1̱2Zeh3 (P^"8~ UlYٮ5y%'ܼ-ߐ2,3%/R52kv\7 ߚiri@t]ij:Gyދ_X~X#+1!edق|=/uH8ޗ̧owg=aΕdQz]W)VvFT-xNfw?gh(5)%'Il>\%|ȷ_. }Wԝέ;DDl-Y67kKT2M_9 r\9{we m_NYE[d5bҕ6 h[&j'\GcZ%pl[ga]TNbR 7$,I3yGŘ ~ $U'F=lEq$;WoD_==3$t m˧vb!&>U*gbH|{8NN,vpNoNQЍ DD?ɭcVn$Sz76ܨ^ob-:~ yNoyC@ah@q.jxLB1 =e^C0 .2oM^ΛKMPoӖB#DWh(C,y%| r҃D[?|,rU'/о'qhM90julU9@& );],TFpn: \IMe2;n9%{TZЖ7RAMY<}C=}rV .ajJj uPES0|vk=lX V.:eQTwlG \WilhRQ|۱QlrGwULV_][xq=Ž/qGGKYDG_t i #O_W! o鷒آiw.+n|́2ӈE [UDePP04o^7Mġ.TbSي6.^ZKmq 5߆NOWm\jFb+"+s=]V݂|=̓' Jb7$3 S;ly 4ȰokWE.^{j(_,/?LWC1drr8֮\k)2ZoJj,w];rDү"R$|@2Ny(Ky,wU~ ( eݴ4XgQDn)g}ՀBnC#i)Q o%9zĕrYau*p'Kb4Y CT{x:=TbvE(9 ѣ/䩏ski3pMaӴ_^UǹhS0jYIT¡n#co~JDRr8]^I%s\p> ȺĖ:4rDQNm=rIߝ< ^o xJ'!}KY|9`j^] #KI}~JN9z@ {F<>ސ!-OGEȬDB.¦VjgT*9(lF5{ qvn(K ͬEXwȟKWưRXriL!{_n{dǸ_m3z9hLL"Ae~bOxթFp;J,<_{my IJiZT-fk$o%;ZM]i\1DvGهP4Hv|7WR\w{DUu0W S}n%O v6ns{-!jIԸ,q1݉û 8TKA |Yc(}vq=^jSpo|8J®|TpmkC[FΌԾ5"Ws.1HI\oZ+e6$m/GT5V^)DV֒ o#MQk4]vI 2h4g(->y*uUުV-~T.f[a81uVԦΐO k]T! >7HB-1봗6F/K^eþļK]4ap8qlɕRȬ'xɃ:,`VF~ 4+l|$v7\x<зymP!/^l,XGmi8 yi !:۹f/6I2ss+N~կ/M٬٬t薸:mfN&{*weK ڻb/Cv8D{\2^ h(œ I&G}'ƣ86YaDLlJiTY.J{12qʹy{uxy@.u) \UxK sqBT6@9/*a Lc;FDg}T? ,~l'6z %-StMyޗ ɜtm-ܼb1+MG(PSa)Qp5q=<#3]f8cE ܦgmx?-*BSIN {M%Q:Ғ*;N<ֶZli(˖%?LVl8xhx.S+J{.[jݽGBleO6Kݥj,IB4OLQG͝]QV}[DKp<_9-Vq u[hfNWw)G—nh<`3]hR''3Ԝ.a endstream endobj 309 0 obj << /Filter /FlateDecode /Length1 1731 /Length2 10552 /Length3 0 /Length 11663 >> stream xڍT.Lwݝ =CЍ JtҠtIwwH<|Z.hK[C-A P;7@V]Ɛ ŃAO vsc``(D?,da &t{2TB*n^KoC(L [9*P^ ڹ=d d : ЁZAnIYӓJ090 dwoοzB|lk4ݝ9 `w_6Oƿ- %( @^Vvz;Pr8:C6O4@`? _Ws% nn5 ` C0l{֏/ӧ B5cĜ*F:QG)# y|Of UDT@xD< Ά4O 0&\\VO?G]H=ӟ= >=݆:B kjeSv>]46]^ kMݟK;! M+`Y9<=*OCz:N)Z=a07$~/ӑZm'x"0~OTCpj+=:`['jAI)ѿ rwq|*o`? ??S 6PwؿSRSg2;?uGͿBa xzAJuGOnv0TϿQYÞ p㘞Ǜ y0fV!5!-Ui쾳8)̕,.S:.|Pþ$i5ݛ'hm6č>)ؗD`ו{pߦB.Gx٭US2?vjSkR@d=F/u\IRZ7vJ4/#UX1bx?L,򸶓 3"D cyB2[)zY|:K0"\Hw⤪>m&),]}ZlO0G} #u-o(}MR]_y&8恎R'ޚkU^_bwե8Zkf g)pLQ 9nnN!\y>ט__>9,tcz hYj$>|l3" 3o_uyMj¦C"sX)]%mkɕB`TbGg!Dr^i:ɑh>:D!QtWQ{\Sw`V_*Z9gnjb ej@a[MUgc̡Ǧ.ڴBAG픍)7FJ' 0bW*y5w+cAU.hj*chukn5ݕ10AL,jLBEvp: \oL[N~s}4yB1k=H/Gj8W*JHM.XV}x ?1| 9I]@lGfeJ9p![~DЗ理\fqi+uF6Ym:`e@b#1};nǴ+7a)ϭîI! Z]o#"j߹-!(S ?־aim<NekV;gMN8o6ݗIyeGXn,5翺3K;g ςo2pjB#.F?NtxW:]B6>58uz9l{H27a9ւ\5CKnxׅ8sЫ*z߲ɽS۱c&LMi+flE ^1+*YFkj"wn=& 9m uH4}<@b<""l0[7U1(v*F|l*o6pM0콱WSօN|m0,j1חۍ7@Vt-,IkĔRePYqDx;=j3玸/ k^G}{S۪I֭6 XlctG¤C54y5 B:1 J[/es-Ba>_|WNq!fJ̽eKoYĎl7/fn ,^F5=X"sc\WT_+GeaREk l߆?khej%g0ry-f]ЖMB N+xcũc[f vNkgVKV! )Ʉ5^`yUyӬ;| SƼֵM Z e2vS?6HS@HV6yUSg̈́NٗI${>F55axɲu*87yiȲ m3ĞJW/Z]N0SFp]Ib;. X>윎@ f4iɦ<\wRТ-EJP ~ϣj#ɴ4Qk]bSGeNZU]th53~M%{"Bo:K MDmM].-`S='joXܼ#s4]VWX[Ix42U iQ[gbmGiXpAGM3+c-wÓ QݩZa}ُo+2鳬 cFBF!G$1gu3NG@aNLwlW30-^wg<[r}pĀ5D6{`e1Z$Ke>­͡y[+QﹻqWo/EjUr|> WgٳfCƑ.Z1`-GThNf-B#zGq%;>ۛM(Dyy8:DŖ+}Udk^Nyq? sw1X_|Q-*Vqֺ O>r$zVt6SEDհdd, ;7qL•{h G6J`W8:?]( `ř3RJ;H/.8,K'-oP*Ѷ-/{EXjxSGx䍺X hŹ 5-x?gN"_ge[oF1qQA܍&P P/Żz:.=Rj0PKV ˑ5y!y9QݚB]2-I)FD T㨰J~6fnƢYG3o ꙥ ͇qA׾QH7x c%<|Kxӡ֜Gr̕7|; p)-nI["Ѥ;{X{D7pGʂ8,SicĢ \>xO9`@[?P%Yn_sY!3eo$W__̇(P@;Oz6vpuO]" dn^o9o$~}]&lkl|l!GJlzd./Dl pGuJpɣKtʇl[M?wE/! qI&Gf;M;ܧRn*1K$-=7I"iy Rz7+gP5qΩQb6¥zzaBeWݖvOVRYWzA6l,I札p%[n:NwM$ԆVW3܀]4 =|}/?@l3CYN80rjR611ϵ"qaG{2M? g| o]Y fT?Y9 Kw67mdO>yo ֥.|_A fFֻgnɬӾ)93ljF5!G3tgvY]^\ڭ,$^IBaB8FKJ܌2 y/k#Jwi|뤪y߸UӦX!xRQ&l^gBU.:5SY?[魖R3e1S7˫Y׵Ӟ>Dqd3 yN*>ͼz1:4.c6p%5PAm#^CpGE*`j(M)ޕO[wIxAll @mN w1:﬿$ܵÏ~ۯXOKUbnx7A==^`Cm Qso^2s* aϯpjZWWinàdK^vBlF8^u>BŚEPAy+1*wڝ7-gqwќ`ٞK Ldl=٪C#:r/C^:eh`fl*f՚HU[p3  uXD{Ք!r~aNڱ@t:ti XAcr}I58JįMKb̀5]FunՃC|**޺fyWzqBH[Kg߽8y1iwk<JS5hU;+#6o5x{^dԝŌbF$@DtXCm{F c~$ChK\p̪-NZg=fU4#k˼[P<*V)TxRWb _Ѣ|L*;ȡ[%$ᘂ&ȘdPY- ?oS`;u,4FE22wGFL?H5 y%l"6,JΝ<gjVLո\*Ӈ+d|22ն&]͋ȘLx׍6Z8&0%_gF׹74b8—saKme^5U:_uEv$)%}x׀++Jl*'Wε5Yuo,Q3EUV%!.94/LSǫHgWi?{U@ Q Ҽ :Q 6P8!3A6(OoҼ;2s~2{o,h>L-v1S 6I9KYB$EW\NDBpt)Ǎ)R~D J\,t0-ͯ̕pbO*RwsAڱ__hvq;^ܶ6/̨ Ad 9 SLb0ξV}Ʃ8rɅ-hwЗQB_nG[{*C-w$^bm` ~Vqʺ7M)搜z2̖&ԌJ ϰ-qt )ApSR%U~B[~{QJ]&6tV~~DTz/Gr Q;A혭ޣz+O!Ty/ը)<\EZ_AH Alѓrbg\M @S:tԿLʺKV`t̂U1/J)+ NOP)^H~+OQˇB27>KdH{WV W+`SY(Lq%4g 8+ mVrN*a_ cw)9%~5{rKUƛ…}9 x.Hلil_o]ǣETH>WKB옂rj v" x󐭺GZ>MlؠĺVNzRɳ|;M}[Deh\aoƵJ Ks+|h2ž=SO_ T_V%b\uo|_{IYI O:I^ ػn'{{qW`r> sJ`u 0ov!`MRTԞ,m'/8o,|G}L>}lG:11eQEHwNl0#!LG\u)쳕rMrM-$f(Zd9X+p9;0t)>c*j%qPmLr b#$J`_zXعyW" jndoZ5ew?ߟ#DBVAa\gmjt=%@=v tȼQ5&cgX inԛ D WH'nVCcïYICxԪtk}xZ?ӹ>j9{5vI ziB([iR{# w)̭(kc+⪻Pc#^t{W3eM&rɐXd7&dѪWg?@}^..WHN.;33$F"l% 9t%Unᴩ$n4zG>cNC5Z _2@+ʚn 4Mt-e^G_"ˊ.Hi源sKzaUЇ}]XS0)uohMafO+l@C # U Qm'yK(5*bcH N1$j~1)t*AY-W(DdQo[Rrh|ք/MG!ī llS566ѥ l܆҇#3夦"9nq\R1E-At q-Oe|2yٴ( ~ߩ]H'C)1Z.7>-<[:ψ;SGR(7^dȜ !-]ݑǬ&JPM?SmL޽ѪX^dL<(AеZNsc e͉ʘNq\7WP}]P {y$X􄮚(I:>JơtDב!=²W{# PdBKO5>2=rdz>^!DH8̜άgޅh!yҕXg038f8wI7#)(LVZQPbGR/sr`-deˀ4_BE*p.EA;RܰX>'Qt&uu7ppZ}&vA#GzH,|&VCJ;L;Xo_•&v3K_LPJ'7yleXght|hC|@ɣ,_@R0q:]®>  ^-x NgD9(F̞|=̕ϿR yqs%ሥ.x%:F(}OPh!L"ȲBS=Zc%fbJ@˳;_*^T5gWjl?b8M^>_&1G9˝Lbi>he[ti@5tM+=jjD."8 G-#F5/W،_&jnMWx!& 1eJu 8Ӟ?Oa5_T5L%迻 dM~fF*.]'9- G-v0@8j6ﳌ~/S & HPȿ3xzbtt.$t>)pqz==ZE4q^氂2%,;ˠ?O'Aȸ+RņwREω 7] JM4ͻZ4n[j^;"ȝWYᦆm^${u~ȝo49&32z9`-GuAdͺ`bt_ Ue~~\ްwMq #~&OLq:ĉ7痽Zt={utGJ,Ή{WDljZOOM2QQS$T׸u*X3qI52 k߫Qo"OK\N =A&ҁ,nGn!*fZK|Y)UM<]LGdUqX' +ZL&^uw.щ1 ׾ȭo>qCxW4DfI6vpb";!Tв}ԯW{dͮ(5n [_-7s|iYPnҜƒtvŸ~37dw$u!cy`fOAoQUШWf`YxOr7ԣE|3g\'/vsM<3cE `$wk^~4BU!8evqtzY8UG"+Kݾ5xwl,MZ$3{@B%${?LFD76!#2-\p,8*THfkNaGhum:J9C/#Nwߤ] )gD&'>_Ԣ_c>W犄#Sz2@phTw,0E"0%;Ź30dܩ#㤈H@M$mu;RzL xCјOi%$% ݢy"s; 6Du}6^Mjrf5ϒ.oX^2:`/yK~fpJa%M/= x)4T$&S ­7xbotхendstream endobj 310 0 obj << /Filter /FlateDecode /Length1 2289 /Length2 15630 /Length3 0 /Length 16980 >> stream xڌP #]!4{ +9*ղz{ %W45821pe t LNVaIU6\v8}D >elmVFf##;?\sc @K*lk`njQ?FFNNv@s#@?dM`= \'g?`FNC 3_Z `翟t>&OWLUSZ\ߒuxҲ2hX̜vVQUߧc(acb G˿'kC  PmV_9[YW2bCՀZh![+p2ASQ h,odd!-|[mzp sF]~15kX  dXRc߳ uH|:ulzLBz?@/qE G?裂^T>8 _ Ї"Gc@.csqP?iOI濠˟lֿ1j_q]fvffo ?b_Zlvv_26V@?V[qHl6{g['P0q//?2QƏ>ֿdwx$|ǣEdǥ}HurGG?;?n,Gל~z??oX h4okd#FǕvwwtW-sɡ *:3Ý`P(ŭ2qK=TDgxũVIc><\Ze=W{/UKNI{gD<_nu}e+! {lRp/eӴ*QųYsXDNxTnHw3԰'̅L1skL]_HhbߢNy Hb.xJD7#سv.Ll` !mO)l Z)ew`ԱO2:*tbxDVf:[:3&XRCv,-~&Ox_d2u4pC8cuqʧ< ;Dl0hF ]m ݒ4z Lbd{c@e]Gx{]E2-TLdͧb&,!CT ۠%)y͡7x\SapB ^Seg_u]d'""Yj]tםtl Ux<_dN(T |Hפi}g7@Z~ RO;&K'\H:}.̌1+e]*"t_n^9i2֫g'5W#0O)lV30=(2[~6RkvdCjlbnk+fǮT+]g#e.Xʰ)Py4y"YVf@g=^Ps@)|4*#ч@1Js%T_!ws[2u29AhjfY{aHh[Ȃؤ%|aj" iqSrܯw嵱Ә.$Ppn a=o&/FjER^p?)Sh#Gki3-po.Jˮy]C} RJnݵRN*I[]6,bP ܘ|_x1/w\9 p"pu'Hssȗ>8=SH6q wdf;.rHƲ&͍>ϧY ?0菲:TI^sc >mTvl.ÔlpU{CUA,%7BB$qxT%N!X|S[>W탂C3E<]/zb6C[ux#CmrMf:Dǖ&A$5PDf*$'N|#g h:E{\C*'v>1\!f}no+"P[ Ƹ4ѹ P̪dhc-vBn0Pw{ͽKid_cH={ zɹ@Mp}YZmwEoMU8@NñYZ?EfѯT ~N *+EN$9wi/Y$ zLZGzcofƌ{xba2BߨQ-T9I| L= 趹,bmVd%Z1v uq}lZ*e՟i8 I޾%:ѫ/Otv*`iV\ Vi3k-kf-%c c* : G~{"ekM{RbJ/J1!Uom(dgUiQW-QʖV|?7~|=_ M||vrF7ai8l(5v MxP₨p~ ދW }LFLDl.d4y|T`0gaKI#@3b=I>́C$c$cAwJs[%?o7XP?(g44.HN ;a,rYܻP>yrnM.Xt䩓=ɋ:gwin=WZP/#Y|;A"̓ZFdѪ*〽x$Z]5'Bt@xBeEA"?1 OόڵƞrFn@xM< 7<\iL[nU^T[ps{΁r?3'MHx.tKq[QVWi@JMhW|\aj̛rE dl n[u^#fSE`:uڝ8g<qO681aF"|kʀggpxwh\VO}{K?U#LZo$QrW; G%@y[bhnui1 ŽH 7m-]3_3᣷$ZsOhcµ\k(8zhHH*e+`lgh he~*,tZۺ-Pi;ysm˨н#5xoS^c~Yx4N%栵So-Wb_^L #dWJE,~_rߛuӛJ3͘| {rYlPP'͌UYpeD6JS+RZ0I-euYz:`PHhOa4f2y,G0\ۣuo.71| 9І~nWzE#ҽͿCkn&X=M/ˍ| YZ_n[LmN:efuiRW<dž·çҏF-딼N%C=C"w"6-0aJ|<2=AH(#R-?Vˋ ɻLa_+7C 魄r|=ir:d!2>:NcFjpU8*+vpt6OJg;jx=xj[p7"gI§%߭ `2#" "uh UxC.Lgh ',v7JF؊\,|W FNhuio/P.YsFeJ-Rχ=:܈jꢸM]$"cp:(C6fOIqUɄB|mXwb"nlc[]`XS8kp#"Sh}3{R^mHӣV9,Д+ED<¥_(.NڡH,#I'Bdfo7ĊǍfM@j qiوF/ W*dfcalqrOjLXqէ/sٜWwv=w%g:S;l!6}cdAcۘuQo=jAB)+mIO]6lڋ&ږ,)ƺ8Z`dgheW%/)2WI«06gn9Dҭ{m a~Z>U̧-mvhZ{옲Y[QFI[3(+D2aߡ}@PN6Ůl9-i4Dbw?vuxHPCcT5rVIg[Is8a-mO2RupM\w9_:mGr׹"0͔0]Z^U@9ʱtt.$F{*y_m5sс=@U>,5($Hr։R?-Rþvϼ;p5O m۴OI𬎸n9_*arLZm WyCQ$5*8%_ ZVTϟ7!qڙXK琽VԟN^8.\/ZMI٠ևbww2 6M'I5C??BW}d` }P#uX;LG=thYoqU0tOmBQQ|J@S}X,W9`ҝpXT|w$oew~%F pַF.ʃ;rzAؤҴ1TW}Xyw:Gf {h3>S*.Oa@x&U24c8GNҁ92hU*+~b;/#;!V롕9y.0^OA  $;O2A{I D_d=n7+`;ʲZh~JJ}$c[;7&hZES .ܘ)5 &W)i*tAkt [h*ZC~d;[exb_]A`vd6KJH o ʳdTNmԹ)y"&ֺkK}cJPZy>yctqT}Lb^__F'ncX+WBAQuA טAOiJJqefBWn/Zmw+y "_vBhl&Z0W iݷ|֟ž){k9OŢ_t" [~'_aȌ:Pf"aC4+;-wd)q oi jF*{@8_9CSځʀ/jG6p#b^D?ӠH;J7C`jـk℀[VC ~ేUy , UCm:ɸZa+ܢ ĠRvOހC5p`SƄC SN4oLp[^@;`U\E@=F4ls8.se"a.'߻#اU)BKUW [-rU! 2, G7 >t,~eP8i,I`z0=7HڸG)F;)%4w3,=`J%8gO\Cu7 v-FcNO qYjoexL>W'ICv_Bq'(]ك1ވ @,lw"X\w. yӠ)H$wY0]5VuhK |x nGF|ɮc8JVpY#}.6/H=@6TZ9&۱ E6ll T$ D[(܆{#ad G/pIm&ڕqy䕯uc-NF,f#Y93Up3V.  yl^!Tx3/EI6Nk#hx=%"ALZ!=Q26NKV |ķ-itO4wE}C2 `/_.Еuc6*3g&XH p!B%3#.#C8Bկh'[xUjq#~s^~fS,n 4 v|yxJEht %g޳Pz`[S,'oV۬Y`bs5b4s0c ѻB #gpҭԑNE*|~-Ä5r/Տ7g;ҙM%$ 1+~LycϦ˜o[RIO?7˔dq'+DO-E ZLj oוF e$cGvc;Nػׂ{ F5$zS-O,JD6Hh˜)[uV|'uՔm!q-ps%?cGBMz8?+jf[3<ެq9m Ft1t8L܅|'fv"UƣUalXI4PSrr7]l'wMBJXE%oEUngᗌjn^>#>۸ᷔ-ySeHG>VҌ_64'mD~Y'!,{X58=烶mQ#*zj rB}a=s_>:G,_fs[bHispDýns\7;r1Ǣ;+܀HCŠ!"e Tc5*Q̓{ď;>Pq~a$йK ,KL(ۋ6MLw0x )Q2-utGλyqm+Kjb׊tZk} vʸ\#Mu4MkóS>fou6ڇb楉ʻŤ,j4L(b#~p@`m]-|Hb]^^\ǻgNZIo[3BM2#rYgg8aLQU~U571t\L0 ,65p"rg1st5I2K1E"p<-;p9 8Xb8J-u^m&E}7;Ͱ#%,vD}64Ԟ͢cA)I<5 >"/SQ=]О;GԬa>[UM2 &|d}mփdDqIf/qmS, hj'撟Uil]8*poˈ*̺oLfv]&d_Mj.w(d\tyxOE;N˨״WRk9}r÷ 3,C&֣8}F~BEq8·+bn .VKmuY,HٹN8ˍ2Dxx|_dP"Р`˷+)H^7;+e 9 pYzĵ< 8KT^j(L]q&I(ʏ= K_lIP51lyS{՘OzS)DFO!r ?حS6E%AL ɏ_?aSSG *4GC@&n@EUw΃Pқ ;65RA=7z#9}~ٞ;Ѓ,[1 ֌鼈=O5ղ밼1pŐK3Za)j 79_ Wa2>n|S{Pe$*KQwVd Z%{;r;tF OI&z*E*<h2p> /O0YAUTbx|ha d/ls)_N $ʨ^.J#-єp}P !56_sF;K95xb!5cp4/VS^d/ %=h+6Wύ!3'ܳieKZox FW".O>Â:l9M-uFԱz6MyB;6aiLj6cGߒ2Խgކ>.%?O!C`*зKbO78{2^ b6mRgg\c_CB%b|[;u:8]r!ATtxڕ{/qYxv57o0&ӹ[Xb:IJ YrVru|׵btrVԂK^(G߂*)$$:w[$ }U"̠LsH^Ho2SFt"I_4}?G~NЙwYo~<u ~v5u"K[6u[攝 j2U@xc6WN׏8[exì{?~Cʰ֓Rv ꙸtH\y{`6| B[qwMr+aҍB?7`&yT#sm3D YsX%4\O=& Ԉ)Pb̖f 6; 3ԧ(3WxmIM[G@!zY/86 'rĆb/oCuȊfQ:~Fsή,@awVzEϸ%33B]lӢ[KR`e`BzE?x5k֜#ϕ\_yoGi#7l3OӵYHz\jQ X\J&[)F-Exo ŖN(dP`&e"pE9;Cu2s[?Ϛu ͐X{Jb^~MKDw,2;Yu9D 8џxTc Rƍ[@OQ2Ȗ$m)4yi@H_.,=AnmP4o(94H~FCZgla(״,;ѭ@ F2OKaK/|.j4t7O[8X#ثXYb^pF=94#ʮSloj(εDk4fyQCM)֮x,؏{`ϯJGh3A?[f[Lf6S]m16|c\K}g"jR +Y9TjzH+p8x/k+g@QQ.`-\:BkO?Π~[w>֨NJ)&N׷Viq+'X%$:Ky(qU~I~D|_;>rMn^ۃMR>{Rjxc'A.ڢFن%MoPuT~ j%R2A@EV#p3 ivkL5[,%=]K.k1tł1K/ h\ze猂 M'P_'O!vXA4 @jd`0V/_-xRS]),8$ 1Hd5K<_{J6 s~x&+\`#^Iԭ.OJ3&54\  x'T_iiY8eɓrW~.՘2'XnT%Bj ZOFz%ZE'$Sq{E :PU[ (_ o/54|Znh;˨3QLT%UP'[xmS=R@4`ڳJWT5!M>g܄@ãἎE*H+zfѭN k @W ї¶"y!)<~!f1s-TDd7 K~q}5(2j5;LDn{YH0$Alhew9ny)i/C(&(yQ OF^,1/"rJxg< )anT{L7u+gC(UfyHBj _b W@)o8e1_"uy<HF[:keFQE7NX˳UsoIDģC.|iT `'o+Ws?5Sk5ځ 剡FFtE-D)]䤚(cDe]!qI#ñ=RܮKi0 ]ӄvifor +ܦCXID暸ZqL۵B/>+g;5QawvЀF>Sس|57$TSav=~k<~͛~ovi >\ɭKBneEME*Kl6g]xmFL۰MzR BqMR;JCïJ1Sok˵g"ui pה+uD[}V@͖6~l~i-0乀e}HYhr6Xfj}XT ۸OYe@d\n5cZyo ][~NZW(uKZIۢ-AVuZ-56yS:U) |ɿPM67+~;r|0g0/e繆\鱼' يU$Gg%!&wn;"G:M |2m(I80a f=˱6qf:Rjt/ }9`71$^kyIuwsדrklDl&lHimV%(-58sn5fzD/͞_y|Wpy旌ƙ[)\+7slI})då^B(O݄lFi?vu`b6Cw2Z^VL{kR0#b!a{3K(=\:+y FjӐ[ʵ^$3ŴWCk5{%L\&D刡rqA;8w[Wm'^M ,kTLs4ޕ b0G3IA3Ekm_\^FgH'`^h%mڨ'LڽeHWs443%\Gb0 .G)Ai.I:ji[Q_=p1KZ(aI4o+֡_ &#lF?!q{+:Rи'm8$[8W4=oT(Qs3FzΝ%+Z @)W;/8B!"JT $B!@   6 IMEw&> kxdJsLGGxɥ]iFa6硣uS[n' `-_[  SlfA$ƐzilcQR+^uޑO WKOP[ShiAHעKj%SsF'cnP$a}-7X}`~ L-hAR+myUdӨF7%' tw/X M^s]ў,޳ML<4.^d\ :>mZu*O}5\.j;܅;_\aMzM %#ȕ]yCtۭsx { #T L׎- _Wd=B+6)uwSDӉ,jЌ%%J Zb8Tsĭmg~*mTS'[h֤3XS >yCNm:@$t.xPd[7i3@tnXDn P Ԙm5=o}ݢUֶ, ,$h ;@a㦮@@h9) c_`̝"3_Sڽ/*u|Xu^>DBXHdĢ8͖  ka`8nNqmdwk'9pi jN*"Rw |,03|n6E.>%,u&6o3ZЫ󸈣q> stream xڍT6Lw7R;FJtw0b#Fni ECAE=yپ+y|y Ec@@@O@@Uw#a5{xB`P4= LSԀA^.(*  H(!v >* ay@wqyprb<`- wmA.=- }G8MLJpx`O72@ +@<;  3ځ=wz*-70/exo럎 _ [[:!.`:A~*\SGsZ.aIfn`.w`(5)-q uD u?d,VzA"Bŏl)9l.c/o/g;Mŕ\q+Tɖt,OmNj?zjXx6H3r%`3RU8bO|ᝦq{LASw}rz'~|SABsU5v팖H6-$WOY14[r|" [ote@]Jb)^yj]| Кn%k/IJ飘}VShf35vU E;VTPCsRV' GuN6%CZ3ASSې<ޖ3mlbΐЬp$뎶m08>$0W˜:T?ڏ%:^=,ӐP.pZ %5Gyy?_E׬XƝztwA`kr%ێIQ۷ѻŊO* /̒d-3>ݜen*I}IJ#`~UF#-8PmO[a~ QSRs1 4Q=dlj3]mUklwm*Uxcg"? Jlb*YxkEK]3%x@Y尒\b+R-zn+bcCpP߾ٷxٖ3q1yn^)gkE˅T 3eX=}RvAD`eɱ|wq/zIw9}ʅ BA_aҾ)Bب7MHmsh{ !مfcdtw =gI-}ﯞx;r/A !D`B6(]MMBsU8ܫu wP{WGj!VgWVA6Cr"|V AճʍW1qjbhYIX3DR}fr\HXARbuwP#QΛ٦KGo\>;dG_ks~* Z_%L Ȍ-A/Dq N'xC .øY88q"}DӮw,JRVbv?)8zކ&C.RKe@QϒCaP4$7qe斳%C>&q( =*=w$3 -X_di+\H'Xr苄uZŭ!1 (KtjĤb5$N lGחf)Hp&_Ʒ<[8U|7KS eϒ"R)(Ga|a qC)h{uNd>l6-~\pik=-.~dMƔ=ʸ尞JBD%=܈τu^{4Tѩ(MDpdkSEUC=o 3]Y0^X*/IP_#sP:E>eVsgcT9jZ u]a bXHhvM-^'Z2B_׵5P€cC=*х5^+K:+ѱN`LSa2\Oseٝ,\] Ǡ*v=(КqYsr5RJdV[ 7ĄTY|&{U~33!3B~wU˻FS/U\yyz7( {}SSX"ܦWξޑΛϪYuc j\GA{tVcl6i7~nU\QLꔋ R#Crm_ʽjte2ouHFHz@oJ#.%05æq_Po$S#,OMmׂD)4S'q ݲ]Ox7W ӣh6('+$')jtIy1-شR?6&02T uF:=qi8{{vC^T~T .1I`S;OR{NC0e-5tFlgGic< o~(7ϭ- W3y2bUw|Sbɮ-§#}3ćtzMzSli70!].Z. z7DOWC~X^>E[iT!d.ۜBm{׳ [+{3CQi.'k:߿!٭y`a"F•8h0}Zv怼b!A1լ] oMD[Yx#F&;:j90?!xJ3 74?N!N7H "sFK񂜯9WI劭fʍyݦ =v9rב"aO=DR+>/ $){2 ix x&y*G$ݽcamUC@)Ml3|ⓕJ.*QHLyk'QȢcz$vO^a`Y Ζ V 2ŕp5_7>X ֢~Mm* c,2^ ~xXǙ"Li]ݤZ*h'DN8b% &#t;ߧ TFg /|-j O.o}W{ʀOջM6 _QֵOw)t}o]%%"bFJ#Mm wĪ#c;+LQ4b.~zz2  3;#ISy+AD4lPn+eCNQ=}Wa]]+?@{>Ll@|7sItiR2߾_c"Q G/[&kq$8'V3m{^n^`R{&ݣFߌ[DĜxM$ K;zYNq[fbsIie/ۄ<@eDh/syWF aZ:q5g2Yxf |Gu<.QpߪQ{ʖ%s6;p_rPGn'68v:뜯RhP([7Aot M$'\R,C/D/P&_ sv;jM԰0DԸc o(|Zo|'JM^yC)#{MK"&alGjA~qqһ 8d&h#Z/mIN D 23\fp4M2R$ݼ\CP("4;F;#62v+FlT y3.VJ$Hy9G3 ZQ'm~O2DFK2?KWxll 㺼2ZnaqhàmHM{d15L7:qQqt-q\4#+/Ng Tu(7|sI 4INQˈbg>3>HW b1Fe1;܃k(-8? =d=B3KZV[UY<ҫ87}C?p.R#jV5,KHdD| J"7SwlW|ZV_tZh·{ ǛLzPod2 r (o5_+:sQ0 6&{*Ů|$־߸gHÞJ9׼؁iPVIWgs66%:X!BΡH,S*v\]8Fx*+:{T!'7'V!xT6]!W9m|ֵFCQ[S HW>.|8쑰EnJL)pegݡ7Û<9ΫqY*^6kݹh/*;%t!I}zWEF.Z;IRdniLr{9pX=isCizma8|F<1ldOW1q4}GJ$.2e{x2nhDR2҆m{6lFuBЂ:mh"Hjnj%A6+)u]2YQoޗnޛ@)l{<}GA uߕπT}Ɩ8#AklI&.{i_3il=1amP7'J'$^Y6Dg’Qt{ģK~AD-/+>u.M]%E>,A:/P|~bwopC(8%h!6vX揗 b'g K١/Ƃ AOr,֭J:=nKnGQ4A Vl|ɭGޟU|caT,)vuqHNl4Ϣ#QR58!eF a$QWj(`YJ1Ucvt}\A \t2dl{{(iKLB-,npIĚhzfPOOghRҭj%ۜQkTߓF,V˞B07ϗy7Z1 D/ B z x] OE7hv7<9*f0-8kjRsv(דp[6'gbqϦc:W\4g ,3$+ѭr-<ۑ*mGSxiU4f9g_F^AOtF#%KEvO=U? W>Ѿ~5١ud%َC#I/R9sf%m\Xu]yx4W+;=aS-?ΤV80U*h^i6MQ1? I#waw=i(ӋlmM^E$xm-z(>nT)1Zxë%c43Ӽ-?nD{RWX!*J0Z.,HURLH 7Ё*3 .f*"`c4vU6l6A8Fw>cq iӡ+ڬ\oERKnuqpL`@eKVl%g@ox+b~σ֒ðt.pq4 Ux6x*} ` 6XA$G%vaJ2WC )rHnV#u^L%W?kHx"I=EBT@yoFd׀Z-x3vM~Jw>K~(V[vqKrmP&*Y,buHBg1^L~?͛Y}u*Nl5oM$DYJIPwf: O2XEb!lr cg h#n_ly fUr#O;\]-\=Ԟ0=2R8M9Kmf[#RXbԒyoR]1w2;}ttD@W0c?hzS-{e;|uޜmL1BWY5L7h|:t"lB򝗜ay]YQAΝ=qDbk8-_|J)g- %=U2bHW(V"}x2Շ!7JIV(Q%,@ȉ#BY;%$e#2\ 3-HǨoifIGV)3PG\6) :Tp%<`)+C$YR)QصX^sYwdD Ah/ל[_hidvidq+6K uhdG=S58 22kWޥ*fc V)K}u$8~J F"@o8+kI|Y}LVԿ{<k"y7KGieMӻ\tI+Qd\@:GE\K%'>*`uw+p3.#=K>D۵V%I4eׇNr鼅,g ;ԼkWJwh| IUR!E."~IVgb]oR3˄}l쑟rcץ-dNc\-m+OHgǚj/ HG*jLdk?XTƥ#PK*?Q\l266cZZlFxF65aśV?oF9@@ }(fzcUn.iw0d2lmAIDfC*h?(]f%53؅5-Rx:`:9^\Z3'WP`\~ fb- ]Ҏz 8>iͮrO.,T= ̄5b@HJ؇塁IAJ);n iP /%jKA!RŇTYYv\C^t7"2?(KV{{'q).PQ}+ӗ;E!#zD4μZ97 dq&^=o+E`ֳamOܒSLekN)ݺy Jzxb>}19Q[Onpf"?nD"L/]Ìk=W N};!>G ghmsH\=E<.,k5˺>,,`%:pb1'ΈP9CyɣzNq,ECڿTFzͳgB")Gb*ŏ }d@WSJ@ lv2M(9"v9^iUhe/R m~[ G⏈j{59oc)(an9ިP/ʆ15/Tu6aF^3议H3ܨ~|- R=&aUv^g 2f7tu"rwdvkeA65myp]I݁CvsCHf;Q )ST<9LxA7ȶ-ۨ_Rj,KoG|!!G/wv\t_ګX,e#6l|#s6qC01Onl2\(V5⌭vT 7?5sla9hͤ/ެtg0j4Ͽ 0%xbΒeyRr3 wysꡤJ< GΙbYaFg2ϾThDVm:N08ߎvsknBZ)ޠZvKznHyQ jH ڜUo%&G ̂ pFMdSff8b_-*zѩ)ɔmav\Fh/4P ~O F} MGʰE߃zhW:b[C6!6)eitUY> stream xڌto I6m4m;ضFmIc;iƶ䤿Ysmڸ!!S71X;0sL0$$JfaHTf6YeB6 'K ==?6!}g3#4-@C"hcfofb~v/fi}GSEC3 2ut堣sqqշr7ᡠ9@{g7ehaHJfR(;K3Cû~:@Q\ k Կ .;?6Vnf&c3K @VDՑomP_YR"w`hof@`f#0e6Z;:Oh^w77?7 #'[:ek3;'пmE0d&@G =&6t54}-%o;/[[; 1Apwzy_023tM̬aDoo Ф?'?-SRP7*l\4LFz=3oٿ+ Y?L= !c>@עg7|bQ_f$di_?z}+3K[rHۼ5UkFfNVW+$&NLK/+H_naif q0}{Z_.=J}io |_CN2`G<ޗhm]60@'[//tbNbIAQdtr{?=Ez_[ښ[ѿbtfzw045/fa_!ߛAg2#1 ;}l߳{"w&ow&7C=ӿf3 OwV[9Uw{?5|O hwlOVLm$lo?ezlW?*;_<7䯢.L{,*{'[|{'{z~_?}vW?+ 4Y1 4 lqx|H3'<(Y* c!#`w:]ݍ_d fL.B#Hjg">9Nh^#>EOڞ_."_$Ze;ӥqU7 vX1ɲ6"’dR %`\z"$wΉ9q!Ts៳R= DQ(ke.M/ ȿu~1fpd*lnc'A'L{bB"$HmqgrI2I\s?Z:|ث-8:i[ =8 2cYgȗ^sѷ7`alD$ Nr75pK>&h]%ѽ"FLaߢ З^~HVcCF:)a6j y]&5uD 35\xwZ뇲])נͩKL4` %tX>n&I#W`vZn@3K;uGNol j n\duZ7I:?E+g4{DeWZ٬Џ{.ay3#C寍{͌'u:yl?1q 5>\|$4<ՅyzVΧEiyt] E NQ/`g`3ȥ.:pf"U Jk/MxdR*s̨mhq|}>mTKLrSz J=Q5e}o3mi{!Fu!~w$xAW k dJ5XK$(]kH@\h)Kܪ!צzTu?3r qa*y#gy~co -s4CZ9?eT|iM-4y_<2~O>Av&yLsCC!f=HIrkue$Ҕ,VqV]l]\%iͧ_~54kR*jN<3j}R1 9Iշi[G%1AW^r۳C?rzM6>J;e{ǽoE42_}0-;(qoąʿ$A,Y]ֳ`j]j(I ,⫞kl\'+O\jے)1x3U%>?@PXek/'"'ShcqJ#3MS^T"ەV6ȒТ y$#Fk/S'r磄i E3b|y-+9#a:WY]J7~`CFrWQvln'.t9jO!~e&WExN]#D qA_V v(wyqs`ԉeu y.ؖ\Eꏼ ѼPWs3}kn[R[ΔcO;ʬ(џbPc@ TeMh"lw1 >Bit#MM6Wv0T"2*;uto.`t]ة4'ݺY^:ˤjkmIZE4vEq?%=c"6$KՑ޲wE u7$D}O"C#jF( WlrSp/ߦqUJgrxKL!ބ@*$ݸuQ<:vq.}l9t|$g5*`~f Xˏb?ĝhʏSRt1{7yR_Q̩1'Vzm=?xt欵"s$֖DϕV+|dӝP @h!M([0Xx6+~tpvNoLɸv۵$F,ABxx꯽kISVN=vs:XP h߷WBGV_W \Ç;rĶ`.=ATfūf3?gm}#A^0ZݎbgS`6v-fޭO)`\9U8/ݴI!_Bɋ}N6.h4H/ ƉEYPa3},w\S1K$'q xLe?i;))3uHaQdf0{砃q*F p){qj^%:GMT`{^/-+_C ]5p.jJ5#dp6n'sF-LeUDM\[+ ܀Al|Dmmy$hDYل˕ 7nbwH?,wWÓU p6sP"{ؠP?_-"ZFEMgHyy(8z [|6Z|RJI Udؔ]wʜ4“;A6-J+,U?:K1:ULxNΓr U5=cY+")/ʍpV RM?sws0Ɍ\xqhCՍwhM D.KŎ6V}@3ɗ⦽6hguʛI@F13;H&lqhhݵKT{ (@;N|*tƸUIWzا ]3g֤g~8 _GpgLJ[䡕sGʬыTCDQ8X]NGUCf<"g)XWNj?fW; א/?َ -2~L <3غf"v?E~$Yg^;qmA|/aIhOŁp~ƙy{d/_u7"QWc&/Q-*W:Rd8yl.`u/Qѐ%Wxv%qH먮쵈5#\lA6Օs>SeJa+lb(m>> ZZYS]r*FZgƅ1W.4x}nٗ<%@TƆ`+29.l S-~sV$1w>MBl5 evѴYH 0k~԰޻ ]Vp_8zWN.g˲k3H(Jt/Ugyߟ-I,ݯSe%V.wy.7,V=%`)ޮŴq PM9+ qQ$i秽&ǕoN uD/.}ʆL'Y([#XG816f\ YM`0C>srå.!~Q% %ɷU_/ڝJ4\ |J6Q*m>i8s|]3ŚH#tlhkmAsLjE_}o/TIUS;^9Zn:ܮ]e[;rXeBxxv>Dj~viZKgB$PcFC+dK4Gv9OڙhCw8 puC5NLl\^Aַ)@Cݒ,Kcך# U/;vn S\7\aiҝCEQ՛N Sfi(@/T˥RTM"J>ڏq t2(y k_2)J7E 6xj,wÃkVĦC nYڇpA0Kbk+x%;W=ʻMdo|M6Bu=#eBJs$K f-㐻tOKOH QzeB7>Z~m_[ӏ®l\r7JtnOHjéxz>zg:p40L}ď L)Y{@qa ,X.3*C&A_#DK}fvkQQO;Rj>zAcթjG%UY`ܩsfZYWb8=jX#&lu~s6#fTIusJ.{%v!JRὭ)-z)#ȰPI48'YSE(dP=BO25{Ӳ8uS]$4?C1dz`&JUلART.[}5zhij π`T{"{)h] +t@hf,Q_I\8$u-m!4XYi6%ZH GS7l1`8G1HH ($>VkT#/n2#GWff[J9VJ\ZpgLuRg ' T?x46XX9ÈgZX Th=6B)&kPn~(KY< 9՟K uB귍l#ix ӻŽoU2Ms쟿KĺN8t6Û2d/J:(S^ʹƴ0+i#X Ȯv(lݮ\ R/n5x[qhٹ (vBnLc Z޾H$"ǐܘ+sbPX(f&3qc HJE9K͒s?sVHuQ:3I/YJۀ 1ZD?зuB5&IP%**C22₩C%ւ;{򸍰M35opAsF<|k:XU橼UR`]0kCOgN6*+4Aȃcf% ]WfB7#:a"腴\LVb5oTŏl+ vk%u4$Z &PXaQIQ OCذox:S}X 5(kvmi7^h<8 ImhNlP3xlM('>&E]0'9սA Ƌl瀋ΉP#\~߯C S|7֍J_>=K𩖙^*c RqT_o gK$]E՗<87uܑ {M7e8-c n/y ڐiaL=;tLP{W4-U,B}7JBYA5+W'? & Br ?HUT:"z{~B]#h 梻(DaP, :쇋Mg_{π0c.)EbQk5ĤX$pX (KEsU?y6R;a!!J؂tHiD~U.P#'.@Xlbϳ&bRU2rGn{Y`O(=ǙG-㾪"}0 :zҢKkcM[Tʃߙc',yL /SB0[8V:Sŭ{v$b*2ϥJ =ZA(PVuYkc2O#.jk?ȧ |ҘMqSUCr/G'BC8jf#KslSUeLE<0YZ0)L(u`v&C)!1_`p$t*h70#|GK3aIBWf{"a~ zb6 'x7p ģ|]rKz4]ECfi0¢ CEpr҄5jd'p%Ltyש&CDnq1DnnTq5t19M#>klϒ\ɑ;$ )5+Oe,Z~ۣ6ɜԉ'!~w-%Wڿt%%;,gS6XBz8ǩ+DYKǘ@| SQMYݚt`&n}d0D.)ĢV@rO/HMR[Fr>q-BШMgK50n!lAe{x]@W?C6+Ն$9*ۚܕ,qӺ:xiv~0' ;G|sz1fpq9 b{\ɉn6X-R*RE$ЂvqN~Acjg%z*DQr C*4ZV,6 s|mj@0*6.xPytc,W|z—sCrðyf)VKi&6jOoDw77%@3L1==sS0t3<:7õGno䅐mD.6V=@cTXyqqJn-xx+hb]#c茟=C)=+=Nԩ?^qDd.f<K.mBM"ebiԊi?K5aݥ .m vD1N㱵Ƿ;د H)bZ+k@Dbdl-e!|V^ZNkJCz"έvn Qz^/]_άCsYΕ 3{i /g6~\P58uZϑ y9`+1US.|MUǤt5SfLv*&٬![b΅J9R'˜XlB";(Z[g#4~+J7S2b˪!ä\R ^}Ki扁;\)ۅW(5=aJ$OLhP69 =!u9 f|8 ~ cdÛo/CIC}Cexal~j)2y; t3`SJ@@ \$( $ CF8\V@26e_`=9zR7EtS@W2iN :9 ;Ȭ7ғܜn)'hZ_ 7+P:"7B9{q-35TM;]Ȫ]KVeO|<ڎ 8~xw|{]&=q37Fm!"A,GPY0oZ~~[(\g4.Xrm/mo Ӯ¼~n-QGBc*B/b:6X/oNWG& ;JDi%I).Wv.™}?f95s?$Pܘ_oD8s}Vu0Q~Nߣz=pQHssE.C>*@mX32LE pR6 ԵY}`ogK[2=F&A"侷]NJYqk0 ktx$RX mY2&Ð.!踅 𭍤`[P}xTft;273$M%UAA#Mp 8Kѐ}IJS)W5Y H R6Os;v.}y_pd"S7w&轏E3|\yTTP{BB+;S!c\Bڶj0eOIӮ쵂n&oj+94l iyglf\_5>rqd3ψy,)tkFcm Smkt0}7Z*B1~`@ʂFnjȝ YV]fi+qp.E3[GJM++kT%bu3w{H"@G yh~6W[|;w޹%n@#X Nn>e q=XЍt8 8͠B  A;g\ $b@vCJgmT3嚇vm>l'.SBVM'jdT#:H qv$GODv]vc,oW\h:H@B$z%.Sdd&DXnTֿi/`ګ>hvɤJ[|Pf~Af.z;]֗JY2=;@1_h{hx`L$^_oiU/:" Ì=![mg1veqDT\_O.3x8g&V7{y{<1':5g>~BAHN֫?[n onhJX M]ĆΪzc(Ƣz_Rcw{rK>,6tKpL~(;='W6߲J)~OP]{kz)T B#n3Xj~jӧ8WSi(\F6E*xћ. v& Ϸ9E-;pR5U3',cfA/59o'ci۔a EHҕh< p稃lr-~o@:C/Hiyٗ'{L߄lكJݱ? R#$DUXkOIf!fdFPuEC.N$tSy *(f5W9LO:˜io >}xݬeAח (8puVMO" lc#ET9$S\[rQaI9q/p![~DO-msf0i( рLy['^0p3 J2b&L 7/4gzW"-xIyBNBԢrl%PW0Bz#e ̼5b+@C?@PN9!,Uҩx=SM.ӊ*Y/gD:Nk+{=q|spfp$EZߋӦhgmĕɘ&Iai*sE3/&;^x&Ҍ ~,ח Uy@ FmQj~/4!!d՚[S}yx i:W?8S Q ŞX`:I)_h*!E۟W,Ɩt*Q2B'4D斻sXofGbDBK`~I &[[pL1ǜ7zKRZ,T!k$8xj" a-$MlLث&4m`uUX-0mWmY52n6AcxIB6l2vg./!'**" L CHMҶ*Fa8{4Fx5,- _ X&Ji&BJPo"H.T^~y(2oe:q@\#Kka#h7ER"+ ZK (pnC n&:)vD/%#4G^Ν%c0PA׹@6"g3DK\ч@v[VtVܟ![&Cri4OM.ÝF M!AofP}*Z?hRmfg/Hz!騀2a߬Ai_&=>3(^800Ќ:鮇,qE@'9AM8xv ^@C zcn)2˞9q4&Q ez9o?P pW$E @yKRa+~0 6e]0~dFqxk$Ύeh-ۓ4J0&œ%E0_F_@v7ߴ:jE9Q;F&L1#B$pr!O9zhЂ6ѓW]]Q_CT_Gtr_(ļ ^܊f vk3 6тi(ld9Kc<ukRUJbVOp H3nwpt}1ׂ~FӇ1BFYaG>Sm6ܬjLkUQh9cJkXYVJ|&D{4Mvf:~PGs~oX Ir}{#(}1ah(` bQsГ*{V6mJj4wݙdvx>soBW % Ryv#)ȱإ,Z! tQ^YեV#P]g8iaԽ`)*]8hJ.P+}t-vQ^#yy6l9KV@Of!CU F^u 1,!VWpgt، q$`uR PNq_(ߧTdwC!᏾, tt@D~5-fv>f3:}_./R]mo{\—uo/_x_H$>S֑OՊG /ϛz d'V$,nsʑ "6bƚM9*}Q;PzWtsیn j N~}YI.}yc&_Jnjcaԭ#ZNp_N8A*uB˸zce^nxa` PD<;Xq239_ɞ7QNc!UaY ť+1I%7mE](:wUH޻!̩tf{˔&ET.eA _|`Fw<7p%ARS/y Bk"2L27܂0/LdTc=okPD)I5{&.F8oFe0*|fbm4↯xFXvF8;ˊny8FПQJ #Z<|Jx<`>*$kayDKkF*sءюu=kANQ0HHWZdձQ1 jD ԃyA^ S=Up)P[CQ; v_rI"dC* lC1aUn䊓dWb&/6VBeNq~jMf H1`ꚲ+F4/V0$dV-+RUݺjCYԇxW?kȱ_3 ")5x .|_>^JE/S*rqkůϸcqH7a6={?.&d ~Oz;?WWs/Rfx#~sҚCJ6?KCbvzN;+>o})x?]~3F{vfT ; 6gpd MdF[ǝ$_ eL7U% s6`}3&[˻NNf[SwG? 0/ʂ Z8Һ|-=KMXWxeUӼGz^DO/6vgê@L7O+$*Us `H߱*1Eevh&^ǝ2lX]67C>5;4i'vma`TXtr^Y (4hB t34L!R|z+endstream endobj 313 0 obj << /Filter /FlateDecode /Length1 1529 /Length2 7722 /Length3 0 /Length 8737 >> stream xڍT[6LJHH ) 0H#)!HtH Jtw)]99k}ߚ]{_MO.m )@!pvn.2?^ weƤȺTܼnnA..߁Pl P@! WLzY(lk%Ɋ-,,; r[!u UIprzxxp\9.l0 rqY~h@q`t:Px08@ׇ 759@GY A `u7n+W!0w2 B<[ TP〿_@GWC>vZ>>9 > wp;-CeNN . kYCm~vqAn eBLlAp?  U^e~` 6$@`+|o [ [0f|K ׃\~Y= q'w9 tt Y0OF+`z+0@aUoZ@_g2 ,fo}2M>;OߤRpstfN`GϿ up@7gA`7*Á! }8;7;Ud[߽x iA]^,.=Lëб?.7= Cb5< A0֠@)ξ 6 8L0aps8mA!C?/p B|7؀@/AV3P+` ira1ô C0SQYͥR.9*m*篙 $y*VeN8ZFMSoI_"ӝ 7]cp0`wTHӺ:3׮H'=Q xCVsIސdNgfpoW1-)kay ~G&:U;n]"eRI _^|:$oɈ%*H62U[7Ƈif{&zey“:_B'}W s}3q#UόEri<[ə梽-׼([x2ʇm/fcj7՗"Qjj'Ӻ/SʣjQ;&fZt &>!Xɹ(ސIKm"[ESh`~bkvvh9ӱ1gHx8]; ίZl(E ˛H瓵{\lOIزʈ8+C#@(lu eg:r ̜9Tm Mhh{+3ul̅wX)@9 1'_8ɑzJf.UђybNYE-aC* ̠}A|ٜf mz-5+ġ=w5;]\u&ƢG;aBދ92z$O#8엽,9-a@< fY~⫹Riπ`,M/k魇Oѹ0 LP߉ltgw7ÌK󮓟\ڃ/-ɂ}+I}ȉn;2s#XcF`&ҺW \B̔fxu"1۩vcVrHvt/3I)sQcà,ui4RA}&F\+P3:7Ÿe ҹ|}`gHoU pCK\ƈ6怷qM%yvK:Ϻ!JL#C08!h f$jX45yzwh(Y{}O,"mI <[ө FN9MYdԳׄP@u]3{0 |5`ڞCJmMvu)4U-Oح0vyKi[,o8t77N{G8v۵"w`g^5TXKm]Pk79^S'Qū5PP{ KfJ4Ud?WQ7D'MbE9CU2oC ey3ǚ:tΒ׉% d i U4+W5囗MB-rQ?kdoNfxwșa_VϨ tU%jպPhw9p,+3y^B=Y&VB3R`e]`SAd6 JUi,FuВnĴrXUPN۩uf.J[NJ߬Ex5a5@YVIJ|[W O@:Ki*]%o<\_Ik_taTRp=^ZEy0rbb < ¼ !p+z`8>#?uGB7L$>mxU!nv^cYJ0aTo6z# فaH2T4)$$>vLj<ߐG~@Ghٸm"%y/:hv n@a }WÚZU=0Wl~ݗxأQ3(Xn} Jߘ~"6428p@qز!ϟWXꥳhKºD?UmXVd/ar~/|p ѢL*7)2V&yrڍZݐKs(@q`1c9>΄̑ %k$V(4~`$U i1G@7Fo kG]KBsGFnӤ9]PQoxTiXpsk%vܻ3,?AibBYrulO_>3.Qs8p#[հ=DW3 N-j *,t5S姯7K.FS5k|A³e3gGV[EmYr%No>P;#a%5Œ/ djUUqo˽ҺLg6Bp>>keb}I|]o$0Rl=kIj-t::MJ+px$(厀W0ܧ(4)YH kZl NuU(B@r]%KSC)n.'.;D+M_Xt'R8kr:̿yFLǪ8ӅDtJ6 UU7A~:~`L]z9q6"! 9M4wKhX)VWZV7Q _2&d oL7Hy`'#oW" 1Ω0]{ Ԧ7 >TNbd8/\-85|:ȉJ`>)-I\y%iԐE͏g  7NGm.6kq 5,#sOU#LNOMOsIdy9ף׷)@?y]8jbqHl~dϚac?V뮻.X].q\2J{I*z@ă/˻Wo%Y" cUD30BWyCRu}/iQ32|0nk1xKFzTd>o-:Bis揇kx*iS!;o⼞KY|[gPK}K3/w !O\`޵3iC\۝ CA& ca, Fϥ&UHs8e2?OٽCZ*|"D]Y^o#?zfz ]utt(Eɢq/?{޲푆bF7VƎY;~R'T>od?+gkJ}NR۷Oam<6+ l6^wcUB]an򶴡o5f2CMc/)?K*~3&ëu zK2 .[JZ"FVuUt(TsGb Rre#ƣECS: 2ʻpC3x:nAehCKif< CO̷2›pXOz Fd;u'&mWȫqR?@~~X#b{HKN;BR@ n\z8esb%-5~ӭ;1=xNM3]c'ԡNGa YXF x͋@۟)43JߣM_})+{U-k$ށ}83-yHm [HSEt|>[$F'CejwԭݧngFrw=5rL7S[MtTU6J0_d?f5:ԹúG"0ae?8ls q':,K<#n1 ' Vf\F;w1>kB%]vca bgp>4Ruk;!:b@Kk6a̙6vNKs ZOU,ѻvja0I6gܣVΊgCBNBp9Թy6!F$w]c4-xH$ZĘc)uSR*ȶ9.޿9oҊ"\.K2Ix"sCԯFyd,.{:xΊ"iOw( 4714Nu3:w7eFطmGÅDToCN2`N_OMn^-ItB\{CTGή) 4MGNK _*L[CM$5g&D4J.թHduiY  CQi'ExBY'V8K[uC^:&cSMεy0]4Q>z\&]Yw<]m`1&< ~Xkc';yޛ NaYD6}L޾}_+/ª,s&A$D}_ ؟& х T#09y,$&/37kKʭj O[_tM1?,IDt`hLCPyU8G2r;Z4)J: *Î; pCj_bS7 B߫mCKE~mJL_"*(KqO_apg) =a[,~*=#USKOGWv$Q | ܪ[q %=>:FDsoԟ 9?M,.#>D[#x(ò:mʡTݬL,6LzNMǍZ(NܽD0q-"]Ez PJ?2rfgL㈠"c 1'a&6wfŸif~smk?"U+7Vݞ>6# 0'm&>D  x95-:,}HF,;~.bfhHaI#r^\P`Q{%w2S~@5h:vFkz3zIfyz$&[s-\QUUs&ѫ5ͪgጬc48`0ZP`ؑ9KDkޥRq(P~R%U7)Hv]=^ӃW-_';0F.@x\0Gv'h;Gh 4e2CJ+.,bP?7@_ G;זO5lXRa8u1Yoz,s.fgM#g"MmSį o־Exyna,u\%si ï݅KDZOI-^}̈zg(qoY" *:@dh9"OѮQR?+17ĨǍ8JADCٲ@{T6h;1G*<8Nr0;jx~X]d`9UDZx"GvnK۹x 0ЂnWeg[uR>Q4 3˶a瘼nbML.qD]|`=|v&}gEWUɜ^qFrM-jA)U85KYysk{8 u|aۉyʣ4.QT:–+Ld2D1~`)f1 1(M18O |Ls[Y|&ܯbxSgVdd.iq(4oʘD%5 gzR'ξfV4Teqb7^Uf{oY4X\kYoBa=kG!Cv+xw4Y8ϋ',~ 1RWQ㟹 U͝PdN`f,P" 2$/Z*c ա,EQN%(T]$*չN XN|K2N[CU )61s5@^vd]c9؞6Iʲ xVy:dxUX;{.gRhzvIBEtSY+L,|-_=brd#Ӎ endstream endobj 314 0 obj << /Filter /FlateDecode /Length1 1741 /Length2 10473 /Length3 0 /Length 11586 >> stream xڍP-Cܡ!wwIh&$H!x;3_^uUYkTd77C,Ieey>;;+;;'* j KJrrCe BeR@賝=b p r 8ch$Y 3* ?GC@w lP+sF3-@ Wza+(AՕhjd) pC g{9wXQiZV`?PW, .s99@S^ i3௻pr/߁?ffv@;b ۂ2JP7(31mumTȈ ՞ "0Ϸ, 1AΨ;̞ݝ@]! 0w.l H^/g?2K 9@nfVlk;Pr?w`xn =z:߃P'P98`3(d Y ? x='gzCl1clzLvNB `appqR5 (pтaw g.P߀yp(7oA2.@;_Tv>r@T*+.vCbLqnVv?`g\ 5Hs[0f <{yl_:? Ơ%:!f濷tr><9l{ go1?M ` ؀ ~?Hf7kv.sdA_f/_9s23i 6 {vux,Kq|.o\3osmqr>)89=z(2C]^7 츩'qeٝF{R=jq%_!C|=!D}K5+_HU\gbYrնm7wڜQըdCXk'z |"-kfn Ó->i6x3I<̖ ӈ}3 g SV^j+G2& L"0EF gu#;럸9a>&m6bQb嚾6vy8-d 9N@W`r1{1K ŜXP-i%5cp,vp`^=> 5Ac<፾7I2[(*C0fw5 и #"jq7-e7rOӆu"c6Q /=&w\twQ–Jxx?>zDI;SP`ego;\Q ;! eK`)Uj5_u )3J\r)AaBK2jrudPg'{8#Kא*4#FMY۴ `J~Z9+nt;BRðDk$V+rR~qFvlAYTXi&RyYɁrEl->-yH0E&]N,-,t9*\Y6m2*n?k[ݝI@_ (>8Z's #4WR ͘#&$O=Ö^A3:V7&aEr&{K|H#v/n7Msu" &WI&J\'Y C*b,''o&?Vd?o[G,4r 펩a>gBdA[&[e ƈA6Ae)T 1'/'|3RZ9bz?-;U,u{:Iʜ[@/= EÖߣW2۶LS-JXK^, Kn'3}E#,=mJCnsvI&_7IT&e<d)Zr^h9!4$]ZTʙ|%XBv05.Q뼌f6_^KfO 4v#-KZ'gʆI{t/#o#6 kX\ }P6w" 3CXG] >=LoxK'ރ9U 9 kq* khc)gW_?jfLVvG4cyr #'`7QV`#XJ*E gC~Vכ(\G-' &% vP[WwjW/:JP~ zJ0hV5kt 0" >o]A$u+f @x$9y[1UnM0fiWD޾BQ6ཉ&(KhaEQHe2gl"(B9T৐b`١a)Xk<1>n4̠tD3UW{msW(#f QoZMIJ/U!jDJ|VK6]ˎި4L52Y}I3j8zX"MȮǪz/lHlHf[Ϻ#)mAAީӊi vZ >=dRi#No-TV2 !xEׯoqB1dj5;F߾.mb=&2X 3ȧ[I|vDC~ME~$H9OEj]݊~t_PRIJk9}wfya0w;,kL`Z[$(9MT709{Nx;PJ)S}#ǵϋʰhz|LQ+6h |kʏ[T9.OSzީPe3I;TNy7,yCFe&E$is`v׮^, 8js^ܲp$uI5, Ѹy'}ByײP+䘈GyPL>;նw=ُ%Mj8ӝv)F)rR TSe U }EJؠJ’JXۦcyI/ 4t)Sygb"xlo3Zb=I`A 9l8|bޓzG$;z"th ]̋)̈gLaKfm[ml[֝x6޳n8 {8<2jnOOj碟C`6',<[ n04*MQ-LQzqάweO>d %f R8"Wx1;Q=qr j՜X&iA=c{;TY3=ȸ@|)<-sO)kav/! flD|[*u{ʊ0/ҢZ/GpѾ/|o±(8j)0P+ZK,`*ۢba$C/]fЀYۋ <6'-*zθ^YGwQzPqkZB+`Rpd 5Ip%fUb2+ *ل/QnR 363UǛH~bZ8BT;7%︹}ߒ.Pķ5uE6'Ó¨LyU~yЂIiixqYw}#Jt-X z$޲ET; ږZ}BE狲Ԫakm%-wIzRP3QŪ1ϧŶ/Ox!T#4Gpej!l\e*ca8V9mЁ5vT%&6VcǕ]zuW't\ԥ }}e05\y8Ne~a-y6CT) Y]$Mn[AA*TKIh٥{h)[y~֝z=\3+5Gi6_#wM8+Ӥ(u* mVoc./9U'DWf)KnPy5" 26u{O9:Az*2d~l$-xlG:M[EWhsg!&춓Xn#×Ur7T,g[܌ NDZ]w$PkԔvWRہwUi!@s[NL`!`o5Y!Td M.-P*&tq.Jc !qsz62{.YSM/ hS $':ODCA/}5s5 !17 1v躩 ڢ8gVz螳0j,'6mXYYO䐐_jdHsk%[5!`1OXNH!B*ymyxeq:hWǯ}hlfK4VDDȆ#^Ϸ1XDJ|p$3MlvsB$yKrgNC~c]UUA(.ƃ-M_h 25ezϗ8Û'}Ws1NߊW (]AT^rϩNC:r6 ~cf9mւ2lSklcf]itUv;@ R`.RP]Zˉ?k1Q EgIWo rEѩJi׸HN6zPTdEOi/̶?|] қ/mo 9Q_1냮.Z,_vdu_5 ̴$p5KwD!%L#Q~[A!xۤQ)4Kc\_˳++BQD&BrCXwr.)8u+kG2UCPaZ~O4`$BAҪY3ElT*gʂHyVЊ`([s)vU@ N>[FvLyJqWQ'2=:` 2T*I571vx@:FJ!&##04 S!Ҧɵn"v䠾3*;`+7Ն[X=]SP{Y O&|Zx=ȫ47Za_k"Ri"Iv~``4MQZ,.oYHy^ݬ~V6Sd6d,=_,L>z { O\,s(YP>?S|;bi{zqUkH̭/97Eiw#D/B6Ar18_}84BbbGǃ aJ}MFk8N81@[Iw>yVX]z# B"g8Jɳu(񫌩X&%, ag^6C-#j{AT9̋'Pw^?[@jxv/V+n!(#iLi_Qr|=2z|JNp>|vTD.̼a$>g3- {I!R!u`O)+n⃘9cc_U/:\bh g"!1#-B 'Cޢ) s LJi6&)|ߌd;>D~6_5J˚; X&+Ɍsbla)c| ՒWP3)lx(<*Td|M"a G rW@Qߤs?QK~ϸcG}y"FĹ~.zV.Bhit~}xE 4ʽdzEm.X!V2%fO@[Ӿ[BA}zħw|VDn|;cwh,z3~%/Im5OJW+RlGXl ^mpP"|\?EK!@R:5[] b`!cpVqϻcvl.cYfS^VژzAXk QBn_ c p&s]ZA%a~#G*J& aMfVqa ?ݜm6 T@r~/J缩!DJrK1%SU~)SKu7Zyl&E]iZ7Y7SR6JK̫wLuah Lr;Nwوv9M2RJq%1\﫛Ci_Nr~3)r~3Xd+$ζ,D,ٙ2É ǩH޴2*clM<ڑ^(G)N'8l񭨓=mb{cЍ,%Cs]iAgtGٮY*6رqb4.*FMw:%Ji^#dy䆊Sg'r䄚E"mDcV, T8艳vKƇ7J6@&]f̈́1T͌,fQR?eY| s%W< @^M-:@|SC{8 ujʆ|~a|,CsS-tBA윐[kٌ*6q %tb9^ qhiάFs xZVWjU8"2=e#Nhs_m_E- qȔѡMφe[B1E還fRpNjԑ0'ob*պsPhԖm4;Q`W׃]YC܏,n"F-ܺ2IIs' 2hlETKsJ' "ڈM Wy /Mv4VG|Z咩qhMcWo (1k`ʶRʸն#PPIfVæ}&Y?O8w'’Y=^`zo7{rK1!$@J]F;endstream endobj 315 0 obj << /Filter /FlateDecode /Length1 1562 /Length2 7387 /Length3 0 /Length 8415 >> stream xڍvT>%1Z'!6nC:0b RH(HJ+ " RҝH|ӷ~?ٞ9fd*rkQXo"%`q ݆}(p(C185}B$iy< RDP?3P_ B}|j(@4 (BdD~Uh C1np/\Dh!x˃@bP/_1UQPMp.ML 4sC\0P4<08gE:@\lCYB #1CyyC+ ja0"@("AP'ġ@Mc Ov04+ Hg5OpUn @:#>X8 ``n_῅_0.`o7Gp~P?8CS:#`]:Fm>pBz 35 SK߄2@Qq)07F?sQAWh9  P9{[oy_zz ?R3O9nFp{oU KwF`[6Cwscν' 7B"~=3@Q_2ܮHA.(,@BPyA޸ wdz$O8{ =D U =bu p`jqϽ^q ڠ(ߚEh{JE":3b}I3!pDMECxۻEc|NǙ1BWFf]r& BwJ[/?:{i&bAxx`T]OcXv<;e2$J^kK?v"Z1ө/E{Hofޡ (Z[ ұQkp[(fˬفuu.b`Hqd\td=RRwZ{`zIP-W}XugIGƤGi,֦.XIͬƥI˹K)2L: |l?7CG&g̞^ 1 ^,>6տW%@AeAI2a ?fJB+ԏ}gײN'@ś$jmR#DO_J8|[0Dh~KhS>$ve'Mzܧ#2vjܜb= lC^h#ՄNOMu}qJS~h@X*w^ۧOl Pk{>v|SF&պ|[WM#v"RBJ9/<^i^z+BY?٭$N\¶$LU'_^H:;/L&<2EHRicTtRgL;JL< .A~xa{T$L{&A&oc)(Ǚ !99H^Bq']\kMwȈDj>x _3`lO1̚7/zCQMcaK}WOA!{C7Ξ%25"X2NHtOd04^5QQ;OIj8I3/@=0*qH(5ӓSBWhs 6(#irP[{9zsYu!t m{-z,YQ'9@;IOd^S!Waئ8Jw!fA-{ K|mڛ;L;jxbe[#+(ZEA\\u\Ŧ՞-vF58iFEGYmuLH3,Xăo-l ̣"e# }p:)v2i+)mT 4ұG۰0NWD(}zZjq,E&.o( J)[$VHԬ>8c>RېI=3%#噺g=]|Tt5؍wzfi912М)KE2xc}1af,6tacȞ0fK?&^L*Y|?q^/N<|sKd޳64vq{spt})K#AP323b*5Ӄf'c@Qag[Ԫ.@fQaFQ {K% Gbݝ&K19~E\mSi;6SEq;laOc?`K_f@K(`È@K`3E$S_M~rўM H,y Lxo`͋a.QPw/9H`lr} ܅ad:RcOb^j0P&Cz Tj,!rzW6ҔLN܋Rh~,~8U9؀CrvROBXF x0N*zt34L5>$ .Nw*~VJС2\%tk 3q'+[DG/x\7;3+5BJ!G"RyfXg+I!kM=?m_Y4BVh[G d-ނ!{B2cv^,| %8]ar!f'(t.M1ev%L.圷I UEwOpXx&g}î6U5/x ['wWB~\rKShZ1r{$]#q'A/~3mV_ۓo oe.//sl}$\=aF7 mw_u#]I^8=H8(/-C3jd#,@U5ߗam2n{ i[j@΄&]D8h%95Qwkk;KN|ʼε;۾[ .SF %L?L߶|]Kd `B +%H*.\+/Q X=Y -gfaP6Dѻr$9n KxfLvz؅L†km"G[\{).`ÜJ`KYr1F ';Q z zlTj:7z(ؿ{3BŏLtjpo:DL%3VL޺*/V]><_N!Sc+FU#m "իo]]ցalŏvl>&WI~9zŌ W'{ְo[`oJ8ш\ -~y ){FxA΢+U3wwMUh5IV_tKh9} C3K^α|lkw__Aa+7^kx26C 9ULB:AV*p*D85ʰxkrRSn5ŷ㑷 Bbu-( XX OM٬NggZ[Ĩl,ŀGXގes)3?({3r!+R$ֲ2O%HhJ/|f;p+Th\ॶ H11OLz@^UiۗCWr$BNk>Y#sS/ޝ/He|j'/F 6 ߳ٴ^=*R Mzv<PIu.f&p#Qm% Xz_W|<7R~|9U8}eeIhI޼0t,Bɮ/?x8DD;ڵ>p6}߲zÅ-`' ҥ -qz68xF<0he*d'BL|^i7P\d]vq(M(ҳM1 6,}>g|!螄; lXnwrяI&Z0WI$L» ?'9Փ. M+7O7{7&uU <4 ΂߭"cd: @Pisom&Yp=98- 鼷240rgSs5,ſ-e*hhU!emb6+HO"&~g o(QYO0lwp>"D ẑs9ui:y,B*Hy9DwF*½cXhr6˚lq}| gN็d .c-(XNʏ 9WkAlmY_҅VUcInxP; dp I%Z$V>DZVϜyd)'r%w~tl6gɈϭ2l|~בٮ[VA(Kq,qs*kw و]E?iC/+솙u#_?SQ;y;K)G͠@qsI2`YWIC~KHeLgy6mww?7Vibq^yx1ndNj.L1EPݚAq)VYYƏ{2Dd \DHT$P1gQ&cxEo1vҫ:_̅<^o +6L"?vtj=CdIũbn9x=E)Zʤ&Q5f%y[)J{>A0q҆K&?]?@Q9}qogSDp²chf=BߵJ':L'Ҋ vds3ԁ!r5@i}B$R 'f#8'zn`{FuBD#whLj/ܿ>Puf':IC*72ӦjՂ!V>k%d<~kl`oS3A軍Ϧ}5vOGQ "hA%jEH2=wRΚ/4<o*9d7g]n˖}|onXDendstream endobj 316 0 obj << /Filter /FlateDecode /Length1 1709 /Length2 10376 /Length3 0 /Length 11464 >> stream xڍP.Z\B(B$-Rܝ"ŵw)Nq"=rϹgޛ$[[Z{&TZ@)Hu`dHh)sr88880hir Z]=Cd(1qx$EG'/Ș86" Ǡ]q>,$m@v`3(@dЂAA.fbccd8, {]2@WilmK -(@PGG(dxRPA?X]? f7s0PSfspq`@&{أ bbH#u¿75vxͲP4u dx5 sP2pv( Ql 3K]?ŏ5xapc O9 psy[9LA`(? ?c. q€0(-fWx##WQJI\|\V.^''7~sWTWv5,.`kmA8 ?y 0^9B $&6`_yvtx @sU@@jLwDj8笂l<|r`P`f(݋0~v{<3ǧc@aef0 ٙb<#s>*DŽ٠0Gcss_*Av#S`|3 x?u9csvGwC+| |l/`w|$tW;?W?^Gd17 3o*|̺1ŃsMo]iKWZ;;dVmr3z+Zh#)Dx (={eGM7PQf# Tꎼ]]e*}>pX*7djsWɉSიr;I\k= YWbÕSw;ȕG7 M> Zi 룧Op"Yp1鍠ӍjJm*Q*!GDym4.t B%v k=a5t+/VT)'|bf'G9j R^=Yz hJ#pˇvH6K<՚l6H1%{H~l*P$ T%d] 1"m6p3MWU2 #)(fE+d8K='t1#K%kzgc_Vi#5z Qb^Y \&77 aF}TB%W-޷&i̜d76 MK;TG/7I'5 k7xӴ6{ j7(d_5'?7`֮x]@ѪƳ%sɏi|sd\MHoW)R,(Epl>kmr|CofoRӼ`jmpIJ¼<~ur$Em~vfEIH q²,v3%gR<uv@/6d@鸵{t^*}QDKgS4}(3D}Hy{r=%G~Mw hU\A2fY6[6y0]<sIPoDzVr0tĠá{(8eqʻv^f7$81+%h-3j(}nf*b \CvmP4jԛŏv~å9Wy=F7=seTI 汊|  Ґ]9݌YRM;!F5Advrrgdn򌑏cz|D厕~̫"MpԚ'fAA&("޳;-'ץy&Wז$btLjo%nj5Dž6yR-OJYSgXޗ5$T]y@(tpN'bW #gߋMқ]^:KD_Ltd(/R[|5 Tc %VCC~f>5K2MI| 7Iux.ܼźjB) XTJ$A u.苪wz{w֛Du]]VlBz+7Jo)U!$>%|>+ҠV ÞJ6wc?x?ω*쁡p e{&i`;-SD9,V#Kqh q1<08ɣervX4TMfR.\!41STǡen*wՉtT\QBEX kx:0bE}b/}kV+Nڽ/ʏgD0TS6"ۉ.pѴ/]8÷{>"uĤr(I4ywi.CK֕4dѽupENb+FKh3YA~Hչ(Yo6]BBg 圿͘󽼸SKFd侊"%fauG__,`\n1><՟8XO@-X+ D[yG(Cm?ܭ=̃ߌ݂@>UzrD$͍U>ol&/Y?eJ2]EoiìжmBN#?}# ֤"LAOĖ#ZV(Y5\0Py x;*6¾E*D̬-gM/' 3W,6="c*2vDmC A*إ}`Ea.sb=%4Y(k< WOvpn*)@돿 bW_S oqSI9&>Ո+$rn:8E`vw-dS3+ku9m@! `]N>%nѯI(/W0 9dXm{ì ;8㢊%A}KH"RSrs˗C9O.N_^/PSpW%D~. C%u=IXPNq҈+ [Jk'\35YXџ}bIvE ǎw.H(rjfxSU#KӔXSN#e+3UE88P&A?]= +5*jIACUc͍ *[@li #w{ѥgR00!5r`K[Dⷲ8 "*#w =XJfu/+l!uo ӣJGHIװ/uyR>uxRT_se\kkJϕdpH?[G ~P5v{p,ˣFܙ<bÿ́^:ggJ!MǁNbWcA߽p%AϡqC*+{1Y"[KTDordhr 8l G3h_뫭7@kTvέ| @麛MU} ޢ"IG.a zt(1&̜ʢy,X+l Ygk@ 5棤jiO:TVs"+t^v=$`$\8GyhZqwJ_K=tC8d4F=_mlC묮Fw1ڙMR1ۙP+AFPMn~ވ W*<sș'ȸv* 8oZ#WCq)ϴ^9 ?/Ei9U8Mrznl؞~*It;Ͱ1k8~igwM +N~9אrVކf=a~0ը(9G͗alTRRʖsOsQAAQ1_ڄ:!WdfcPB! 3gjXp8` ~kZ:9Fu|~Bw<|"ȉ"BFi\71dT4F#tGɪN?@lV ev(JcƖT`̳'`LA>.tbtBoƾ҇ ]L4Qح''5csƁKli5"N؏غM0()kZSνci@q_Hk4l{iBжlM.mi&MeRY=[]iضFc) [FI\})ϑXl}WCorP𒌛n3lq8qB1ʭ~9;]W#+n"rw67i+dY<>|a3rm3\ O4lrѴ^.#7AmfyޘP0p%Z(VS[TȑL 'O[+ѱ@jS$x3$bp,yq5``cx9` Vs.[Y"AuN kU\(ĵw3wlO$M/9 r`)9QhiL(BfP|#he u^ pQ-_MW8^g^AYCU_ng.Ve}FwBQrB\H8*v_KN 4Wæ)eq$ޝ4֯'*xV}%*O_"շG7 q}4bܵA\jMGe%]c݉}=ߡ$Fxz+V"p[ 2qR9HOhΑu.x"yzqU魫kvfs_b (5暿7M}E'.*O2(|RNXr@jף+ Νbjm=GEgZdo_ۦ s*9k+k.@PZ;ᘶ'=jubt1s&ouڞ+98GP{&^n*&l擬PWֹ';i~,,AO+땲 F%2?R.4] +< a8bo Uz6`p%@Eˉjn5og*]r[4)IN+n~BPK K! f!d8s|+:FC-=j7j}'P@zl99nu*ZuLa=2f݆܉ <^uTͲWzWNI h|?7?p,,ɼ4QE [mTc!h Cg/#ZvkqQƯMM_y/HAuN>GeqpO"gwkJͼ uY;v-r)ҨBÏ؄,p?v{ʽ0+vDu`aao;SӤڭ|$<(@EZn8 T(ۡH`ߕ]+;-V#YjyB5ߪ+8ٍdD?J"ُ3O.h`zLl9_umWlSdy@Hj֑Dhm_pK$94#9:Bni$,IJv Uۙ=Zf>? 9 pu|J3xm1rB /ԭ 65Hr_0J'ygCXb*=?o~_9y4D¸pNmrMγP#~׷3XldU8Iv2ӽ~NH˼7S6B^ ;&IREx'GU?DDt3.TR*E!my0Hŀ4>70뭭$':<P?kDIك#v ] s64ss/pLui!l-WM|1r^fY՝Y0tdŰ~ݷ?DE< ;o2G!*.ky>v SV]M*0m<16 t?NժN}t.@ܠR~XA׺,ZۮTsuZ^: ݶ Z[qj{hooCk۔pP"qw;J\(YSϯ:*?/祿`2LQlYHHE&pV |ʑ\Ln{<` .}F{EgU)'AaD5jbVxlhMɽ[g.sJ=Qw6 MrҪ_v keR[ee.*^!=!IGSekREhƾnc`NkdecvUC`DDkU tsny8=$%6$gʬ t1Zd^iƑVH9(u/zŽS0CR՟+| t@2eR/A#LchM6\P$c"p] 植L{fu&ijK6N*, 8v Xm~}& {OV|EhӻAz_UCIH>07 οȋt&1'4^Ig@)tf)n~ң〫3`Y)ΙHvYbɳ}.(,4I:[y*oxE ?====-KQR^FI3Yɐz Oho}b6 k`|UJBDW[ +sμ;NCVfL5Dgš /@>|X7Th*J~P"b%5C4HV_a8l"`2LRu@ Y[j!t;m<IxdH+&{3dc?W񞰲 gOVrh֜^[Vpr 2ŷ mp$u29$郞  hD;Ln:q"~) y0S5s:3JA$`<WFk%$- hP4M#ʓ'{rm|hz(31t !0+H ܀̬4X`s>m!򢄈S%$,'Ȃ^dgj}V3lK!k:aZ, Iv^הPyQ:S.˹4Nk;#}z5PyY1m=)K=bgn(%I̿~endstream endobj 317 0 obj << /Filter /FlateDecode /Length1 1604 /Length2 7328 /Length3 0 /Length 8401 >> stream xڍx4U{3EHJlj֪ڛRjowKQfV]mڧ?=}OIr}]愛TXriaPoj@q P AI-`h8 )4T=`` Sc($@@2 )  JP@IUQp'g 6_|~HZZJ9@ }0憍#( >9g ]FT[Ay8) g AJ`J!9 LQo p ƚx"06:T[`COs m @Pn`/p#`C =FFB)h#Xߩ0?!pw Z GQlՑPU AO `+gH7# uU]  VH`PZRR=| ΢~ QGl@# A{OXDps#IalX_|2 B"|Q=bQc} ?%G I%$ :wQ#0Ovˣ6l.3 ? PX>|P?ix"|)rGgO v7Q A%쯅ևAn-ƀ;t\t[x/րFp /.= l  3B_2A\w ;"vW AA$%2{ 8@T`M(_bP + Q?@Fߖ‘c'7("Yc słnp'5,J"aNob4vGKAO4\oK%@y2_Mxzx`lvK AHfQp,W$0<;eiNHczww&.shqMN_QnodU Gp]bN3GET,5-g ҏɂqF'Ր6y֣$s{X׳;1$E; զsp@qN2jy3+=ܹXav?J٦# ||"i`wvK0sipx^qf2dMPO!kn"$ngfmT꒘:n{ \|.MYk%1x q|3fz8׭d)ORӸUUJ}&"z'p~nu r}5":{"h[N8֌c>ZOi4e8ËMmt6`bM'HA6gC ?PQPS<_'R#z <*6qvl"YG(p=n~x˺HV*39+ ոH|]C7ʨ;XB[ n/GPXO-k~w)f/z Ob`d hG~4frT?{'e-qS' =*\gP5F×@H-h8q &8ʘw> Xܯ 2d[N^[n88ǡ}#_M1od x**-a,?`W@F)/~% Q&IRjUcbiLm6U>ՖSrS6%N~xAA(gŷ,q=/ԙ"c5g&I%{NWj'Mes#;L,b+L "~g3A{ydfk=W3O-Djz3'l*3nY(;@A4ǬYpke;rp~a9AY(΁37[\NBJnpYɖ)ޑ&L)0ʫǾzSW ,ggdӶ׽fإ]I}|e?]@9c2&ᶕ~ܡ"owOL>5|?~kf[ٻ"DF G%.0r%vfmQ=;/Of@GQ1zZEtNr3=0M{^}^m}az" % IhiƴtXw/*3WF{3r[*i]{MFf)n8#na7e7}$yݤ>|P:E,^~ѪS6# -XVĬjg/MNM@]}\FP2CZV/1P]GKZ%<=巇#{S"Df&zZծTsoa6r<YIUfiEy=U'U7uoc/`{d- +/v,vdd?K9rfB*z-~Ǜw?]|Ͳ6Z |J_azPIu)8`e[5i? (ե טm}%(6t~)(=S6B@/jg#CΚN^1eB.H<6O*$V@]V:4K!*.V\ O㺟XsT$K{a!{Uz;->qѐLD=٩#ճ~xŹ-4ݽFCkRׁ$t)QH5(&Pò{Wkצ@G%=6GwOzxI.ltcsIN!wu{p-xf*_Gި2HLY&98l/)>A=]hBd= '7 qF!g?cgz$ z Y_J7Hܕj^TIPJe"M:HiLa ,Q2: ,HØ䬴pzHɍ֧܏u"Q Jui!?'KV>w[<:V/hOp'ylzMPcO!!~V4]JzriJ7_.n$a,LU92^y,4mڗ'yẠ&qu?0tirc / o6aaK]f .+1T#õÝ"ބM)>#t*h.>W-eHiAOnsBUk Sr9d>\AC( WemH~s&ӂO?f̡O 3xJy/ J8<̓r#}g]{?gS UMr}yvrC9pK;澮dk6d({b@<8,opZ+>;[߹VcqZk=djޫ綅, t;gItCECfV2D,V619~\\wj!-k(U-Ӕ?=Mw0yl)3aNZ3V]w;4~Muގ7<+@n5Fe*HXL̍?wxf/ueZ}9rcQ z٧qSBr f|Ps\/i?~Q܆*C)m˙ȷXpX{3?ʪBd zw2Ղh].4) l=M޳U{imUW97 Zg5o#i'{T3t!nӤg63OGw֞>~ZUo8NgHD`EDŽULFXzJ d5:INEܯ/{nLC+-6Prjրw؀x3.iZjG̊GD魴FK=*3F lZ췣U&o^LKv6\Yhr"h.^i0ժc#%4rke$Hw?GrI\C2#o2LT^7Qf]%Wt=.MъbYa@F3~v:,I~nՍ$1_Uvd>e|oy_q{'g3yz!;aF:nb02s7tuflMGBdf`u"h3oF9EmmR(Uxu?Y;TI&< |}GZr}ӁK"-H:(QŃ>j0t:)؃II 457}0RqZTP։;|7>[1}]yɋwPPyrqH[,_1wSt҂Ruę;}GnOE#nmGWebzlAhvzuNs9#SS) Ek# գ*WN*1OcGfNdU ˒ſu Dq5/ko̳7$]Or3@ai:cE\řW)wlc_~n߮?/Oa)ͩbɿ*3T*O]L)n" *6{oA79"誽!f)X ;t"lٖRQYd?a$o`ІnR/97Ί%!J||;H+PH W+K9CnϹF#7qgSaKkxrFڳ!o`DL4׹IJzwX:0CwIwRpB}cW1#l "dSz>9Wcf&=pj3ZB[MݐUc6cEpvÝXƷv^V &jXĮy}0kY> ]uu}&w4i wH({ Ok}=fWSVĿRՒ8H!8x| " C;=׷ęt?@ǡ0/Tu #Ta|kw1JҞ 7[;7OUW;.M/s9?#Od[2} x5QOڬDyL<5;1=-dKd|ie9'X'I:,I\l-kM('ФkOqj/Vru=|vN@ ՙQe!u-={Ql]ȟHQ$^}oJBwKmΞJcņk_J >}vxZr/q 9sVfD0ʹ{It%_Tqng),u6Y/gZ?~#G{{BMwV.qP)RidJAfVe5' lP_(zxVѥ|3Gd~Ҋ/sAN@!foO5YQ d>; wb\ҭ/}ZN a^'-0))O Ug^ofx}m`g~5eJGm2o>⩕5Z ީN@@͗*XSWKSޮAym)Yy߉0SC5:9/?**ng9-69ӷnj<*itHG&E&U,%GJ_H%n5f\-#ri\%ћJM956S VgMj\k{ugsd:$䔰DF7# f xxr&w̙cg`97Bȫlͱǂ "Y͸c!1䁑Lh#٩[{3EJWmN?Q='*@~_Ǒ(m})~к7)rzbw4E6?w&_Q'O?0}1ve{h_Ⱥ+aS9'|%H-^ZdmЌyLW+:[ /P@,G2Eũ6JJ.T49,86]TiZ/}k)-sЖy$P~68֓t߭O OXxPB섦tQ>kMsI3b90*UQQ1[:YfWBf  )&{/ߨܻ5wwgo6>b[2=рЁ?w`55oYRi xFcoØfA39oG.,> stream xYn[7+p9EE耢BqDcI&%Jz񐼏(o`UuW6YMr!c΂LFȲ`,J<Ѫ`9:xBދs|P|T|R)r*yEe2U:h$ gDMCJZ܄A9*xHqLf  V lh 6@s@s͑&4'Мh"BsF}a fl1>BsVm)G ML&&#Ȗiu>9<<"_\b` JWDE,%L .>Csӂ'XМMEv"kI242Csa1ԐȎA|agZSaVC8t1#tԥr.3|֌ttt׀ZQWZ0n&SJ,ⰏUM)w1u nD`kÊE3Se?@^a҅ e&; = G :?ЊfO<8S )&"98`6;q|iĄzTW `Z-iBF4QG$vXe`KW}rAXs>O"`%fX4$ؙitmF(zFjFyc{3ڮb*Z`0IGx%FObaEZ8Z> 'N׹Am~Kw_<[ zElBC/h נ:>F]twV̓.8~1!~n \q(3wLgx%: Yԡ/M}Wy3O8Ks ״E,Nwn22O8u’§MxKZ}pWs@msiݶi[k/gNfo /nەqybf{1 _}:i)-D#\1qQM)ze _/oJXV|w|qn?ooqwvhy4xkl˯zϫve|!^n?߯ژf.21ɵ/FQR.I?,o?,^,15B?O^T?m;՗ۿ^Ym,X4V*WN û1 6DGl/'bVL~2sZ=@G3젼o<1V+N =S9c`|rF0D[[11 gq gө,U W#Ǖ>ǭʧc@ΊcQy{ y='u=NTc缟@tUf{zm> stream xYߏ6~o}VME vsAbs]lː $)Sۇ]Qpf8CR"g9S3sd\x*ƝS3 i,Hm$[@NL[GO,t*)u9Kr9I k+ PPrƥG@FRRQi8h4H2[kЀfS4\@9Q-@ AFbB@52L0d95 W%lN hv0Eh.875X n7IA&L FRTLjWҐfIC`t9l5ґ-49 c8p0q  V< RNөaȏw|D bB(rB"¿aȳEx N IآA3*eD05k !p8a2i}/DWcYhG`iC5⡍#hY+P&3R|-ݼwYs5п[vT?+2{v!۲. UT-){x~ޕki/%j.ԁ0p_қIo%SI4=БC;ppO~r}m&ҜL6=KxΓ'LJ`niomJ=k$EɳU[}GsTMw{oaX2fತ'(1MvȕX[oZt˦/3!u8 l>'|13w-!YNO"^?ܳgř? bDPMB:E#=bI<{(ODV|K#?(4ab'f$98tZiK9QE1'Y$9al*r8SӥCQҡi0>e1ɳETC\II NJKxmiӀ\tt@ГHppι,Wf͐Usj[j#Yq1dgtQ*Gd{mϘ/F.,|.I%MA?rսx-;̌%B&>_\43,5^)ЋdT[J9-Y*;XWˏ>dOo BU/fE{Kk|>]%FM{5*?'fd3ħ%=E$jVsZF.~K"Gvʖ 7J.hhIE:I 1k&$ #a n.;%vb$bmc8l=n}bmneՎ'`}l~;Dn7 [ߙ::{^WAtI?gǏO_ųKoR0ΐ DϺQ4(hcAla@fdyՓj|..S]o%a%ևnSןg]%M@5gxΏ89~M_Wqcwl0DP7AyYTx I$-Ϝ0ĹEJ> stream x[]s7}ϯnQi}R!B $da0m<=Cf{Z3k5[ [:}?sn{h]Fk ^yNoz3CV$q,MQxb1fGV0uhǛ0& 6 9M$NVw:nOp]>g 'lyasy*8sJ09+a 5^{˷?L= J"(͛:ƠD-0+BJ![)+ǣNXH"8FtQDRXH&7G'RSыDʄjMLUȁOZxň9Eлh>9 'MУ}} , 寰܈u BNxY}Fl3Q>̡-m2c3{SmsS(*ĦaAa BsX p=a#&(3N%9BA(1| 4`<ٳt^*Q0L&7B {Od5: DOo4p|kY߄@>|>\@:QSQt&a gQK-OzJ@;[WxB)OW}pݵd/Vqt>t E bd[HNι HX ;^4 L6]Y n{Smv:58 ؈(-2$uMXNG'];|k@ڜS]+;.p'ҪˠD=i1e?9+Cc"E R.AFmrp<>gWQݰ.G.ۑ="2:drF1yU[ĄU n@iz?KڃxbABa7BY cdG22?z3e[I~B-<!#elF3To7%Ğ|#7c_h6g0U*p\>UNQiX? +xuWtOBVX98E4pEA}c˭M[PYҰ[Bؒw WL󓲏P T̳QADL5blىI;-Q(g Ɗ44CA7RNZ ^-^S`q)pMeJyVqY Ynl1}}QJXfúl"x b5O6)v&J-`0Pfjp(zx\R|y)5Šۂgװ,k#dVAD3( Yέil >߾^Ю^]9 K%9ȄB .Eu,7aϘXM lDc ̆naJc6\ȥRp((((((((bQEQE:^:^}_qڣxel!,5fWpR0"bRz0 }1\4Y-cDض/y?(m 1]Vփs>(꺔۴W62l8Z+>5k2ְ@٤[`:/a$gY%Y.Ԏ*lրdy~qڟ,ݤ) [!3,cY=G)x yYv ez< ڗcjyKD_+ v6n=,ϛYV?  )7aV+鎕tJc}.R%TI|:^:^$>HEßeOB[ݩOT TMW3k&L sf3 ~T Z3p v36gՂ0K,5BAwCi ݹ?YQC6Ӑ-95fff#KU|y/EHl|<ңl~<@dMw0/AXBJaď1Jw_9[MZ*#,?EL\SGlY 'k .dyu|,^<ò~\Aj#w5RD~~܀ݴ$s5M,Εr ;5:n@^([>EW+[lh ʃX8P'o}wk@u8qA)ƁQƾ)iaV32 NxXEE~91ݍqSw.(,(,(,(,(,(PthTthumMmmm]m}mCmcme 8=~Mz¯v[##OijKoW\:V"5ߛ0Zl/l]_?:-` /歇gK>::WNZpzc+ޥ/&g}`''>:Rȟ+Xnko%lmʟٗ'{@>HS>OS|&R|'eRLȓ^5ky*O?9gr*3y.gr6r.|C>OVC~1Ʌ\e, Ttr\ mcPÏ,K9\HKIm>?o!-endstream endobj 620 0 obj << /Type /ObjStm /Length 3226 /Filter /FlateDecode /N 100 /First 916 >> stream x[Ys~&NMU…J`fADcKf_#K02p jI|g>m$)Ťrj0$(w,HYpȤthɤo5tͤdF[SPeϔvT'0eptd*|nSN$ ( K BAcVVϴ.L`:0`:ཱིQ_jfd@;0<HO@*xf34nAzN0&DrfaeV+ϬN:ajhOm!"+44kQK7ʑz!' zL u$44RHY"(G #s,Ԝ /B$(X%v@fRѰ詋hY# N@HRAb[z &/@>T'eEI}H=CGh𐠔yEmγ[YYP4/mO~ɣCⷻld~bEiN:?<{}zt:Of+m׿>{ALY p\ HZ'o?ֹF 9'ӐfV9ʯ,[iC"9(XR$z2Ѱ+EImgH430iJZ\a}HzZy%,<59X "AYw5x* 6} &1WԔ{YSu96?6jߨU5W'}3cjUN@愕Z)%4` qh/iZd*:x*:=yO{7yd/UoP=7Ϳ{}bmV߲~-|ʪ"붊6#&i-or贿=qΎ6:qovB2<8ɯ2ȖD2AI20 Xݠ.DA-ǐ)h)k &-CBkm!m3*H@˄U R·10u0/7ddB σeTyX0@M+U}6nLyc]c]д1o&e&|2%"͌V -FL]O- i&1H:V#Vޝin DnX%6&&Zw2lĻa6c A~ hIzWl[K`&W뫡s8'r(`^~&ƔwfSRk-:B VD8:j`IM"AԡeLwLXYk&^nuֿ:epܷֆr#xFīl b 4$P:>v-(Gn߄}u&M;4 yCDh M{i/45H|nޣZz~6g}bh寧z9Ҫ,_OJ\8ox&l@ 0R+pܣ0T\ќMs-۳]ƛGK@^QH= :NէYݑ*Z>ZnB~_І? a]"! U/˨8,:F`%Lp|x$&-2aSV5I4$B)pkM[.vyҭcswxבׯ0iHm ?M'@۟^nû 8_T 9pvأ?‚)~:/դI&b3Q} L%;6C$AX#7=x?ɽf 3/^>g/㋊ J=Y.Of jeV+$)FNI% `ȘS}Px'.Ys:hoʱ}$cTm ivx7x2LdϞ?N:^>mn_3w'=um cfr1emZݬ84)Ϥ_i?345;KF#sXNG?g__WWK~>XoooSI> stream xڍTk6Lw4Cw "  ]t7!HtKK z{k}ߚ׽4%,@@.a ^ %E9K9L]dҦOf7 \\B1tpH-{ 3)&sfPHH;@ 67(Z2B``zn(ajl vh\@ /S;obh-k_bMKWSgI]-@΀My%#/c _ '޿;;9{`* ue[2487_u@~k~?ZS;0o>m^.%VYW+jV." , z9=3v v)qyoi;ů:;z=5 Oik' W?D_$4pO,-O( OOOOOO=dG$~8?hyB]Oq>pS-?Su0O&:^UKyoNo0{;`!'1We[uH!t.@u99A$Ncb8QXw^ Trv-m{'o[&'7A,<+9hmopv"]${v4}Y 1 +; 1{b /{J! Kw̛ -nN:}b s /dgs^%gF]7[*W{?DŽH9 #"W_S#f/)B 4ƃU!5X&=V)m."IekqŃֱuʻ+:RnVtmY=U*NugKTa&hmR*(+yFɐ:TU ?TJaֲ+3ņ%3ilx&u0\ٰLlˌ6azAF9$(~oX0)"%ge-@"CXųI~_B/r>mKxDXUnX0 H^f_OҸ|-Nfi)*hO-pʴ|Uɯƃ-jәՕ}8Conqޕ]CUsNTA-W\7nj iyά\n!Ddx'9<Gh9JCi`9[=LV4k=^ZE ˞aXs|3ҫ.!9XAUً>xmL"wep’;1kD"z fvڦ|жMI`>(,݌2;u~ǔ`X2!C-q[hU6oH;LݕG ԰MnM"cR` :BH(ޝ9NVhf6G{_9 n${%9=ff-adT^u1:.dǔҥ$>diZ5@; [5&qLXb= }et+p08:v!RC46EF@ZwM(Ht ~"%ԿϺTu68mu?2I4Fo߄%H lKg[lxѺ7*~6)Dʹћ鉠& Oվ01_P(i.Mx*_;pDdH¹rtRC9U6T="l Q@&,8`TeYD?l_<A+x /15~xQXM4.!YBV'3!XO܍dϲ%R2[0Z퇁TE94QG;*c3,N]5#d\<ld|qAJMeB2>R`ϳ<6ϱ&b<"i b7M|nM.P9˻h냂z# 6$ƭZQAc zz"1Ϡt)Y-!g8m*u& cge7o\FGRVPI|M\ܤ+o6VQm@NDW]7ňխLPڮe,顟whP`a%љ̜^9!.pIȫ2.Ahz<3*-7gK_|΁KAzFwmhꊃb[ k>u2$+\#b\!o6F _N,,zi,12HOMKD^NU,!^4=gyiZb =Z1 ]&≸=휅,0]"*S^uHTw|JU]:|>ΰkDN65SY5nD]eaM|Uu؉HqdDO֣)DX[Wrq"Ah2sltӔCtcN @Zʩ k fGT^ :yr:>Mt`⪐YNԟڅph6 YCZV] |ydR4 q+,5$|]׌ڈY= w9fg˵2Ud0v)BPx+F^[&@ttR PJ--l h^xžQ.mw-p ӺNVQ巘Aa.3ړnOqn{*DŽUz*K^Siz}\~6[DK:x: 0[<9ЬAlc0:k7yj a{Ԉ"osuMH~ r|fv kf#&I Z^D=|qiUH#dj8^dž-V^a! WT.;js%w4}CS8cbRϞŀ`'eFqUW_j2K"&2s WBׯVZH<8Ut|Q 0VaW!S/"iDe06Y(1!|6G h6UCjWXM씴x ҁ 4e^^.=4R8P,-ћj(\k֑%[!푌°%'w1MJ?Vݴ8,t<wsӃff<جR” X=OJ|$<0¯9 P4@#6ӗq%:'FG'C5} Ep3dvmz8ˡ *'?n/<;۲(13F9L3x|hh>L̙@^b~Xǵ已*Ve@m x9Oc+@g+[HmRYN@.u ·ޖ$!ä[wzC#e(k?2aX* 0DOcLA;nqÚ o=' F=k2#Zf 3I ޾[bp…U^; >gcz{vՉÜ)C2@Uo!b!ш x΋ SרV-il9}Vl]אPbF;v5AH5rC 챋^ 18Zem=+9Jh l31@'5塀)؀<2vsp FeVXj٘+Cҭ7^ZDO[, wGLyRA86 WѕwfM ۂI,4 0~xWQ t'^rGE1b!m8e" XWYYX(2XJǶ=*YdQ aq&5"ACu'cD3}mYgmG4,%oTFoS_1"M$ dL+YˌE!0su3~'f zd6'n*^ڧhҞն `J1sΥمV2ip~ѡ2%<\O]˄a6BC4l{icd#y*7 YڂŌJG=R%esjk紖-PVU%>ޜ$۽ (.Ag6em}ksN[ ՙ5缾7\l!_Pۑ*Fg͋kI! ~ >òJ|@1Uj,2&S#aV`lsQCxgH4ț+:ҧaMԡ"\D69Jj,PDbb9 `{"N_:9[DeD}\X^oAqUd CQA=ِy9nH5?Dдvct+v_T{{$Aݾ!波pX*mdn1o?{gr{NO8ckg ߦ8c$aSk4o(oIGT\E"sDGяcwub-S[L u zF [[3Cq4F9AԣmCc!ZnH;k>~w7)x9XHoHR]Dh[/rh4;5а"=`ܻv#(HmvJ3] #ݛhY7FaKvZ^cS=q)L}Ux¹y$鼾$]jo0w?fCD#.kk<£-ϊep.oxdq꩜*ujɋ qsfUlJ$z,Z vZFv69`(&L^ ~^O./F3oMWw%kPV ʡmaO8Z0

oCOfNPicl$ؚNY[35Hyŵ8^0YVZ}-ET߷E*o5)FU{1S|GuP\|BA=QaϾhs&FeM'L5ni\ R{{QAqvc ؑ{J7„u]|>zNy)yab- A-/;reB-{r 0 \H` s3}O7 k{$b^kǓ>jhPUF>^-'-H+0P6_%YmzZ5\ ߌ/58h/Ml 5J2\>)Pgyv4XKB.ڡ!ǘƺ;\76b>=u ;چM;ϣ1 f3"kg&ӷ7&F{<:t{FEkK5gnPajn7YM}=l)|уt0GEY U/"ӱR16dQk!'_]בn ;>q0 CRD#o"q]K7;|N-Sy3k\=z":g@12XLw&1#]!dSO:&Es7jt 9#,yHC~\Q)~PS&nZ{\?⥚qGa PBn[𗋦tD|X#^yY!vDwRYa(LP1jpޠ)m$1ω{b) +M g$#H7b{_aɣ IXg&BCf{719Ϫ'5&/KձHO +^jܯuOO%020@`7cc5?k+A£ VHprN+͍Sd+Wq_ 5 ŶM#-xU )K@ WާBdEhe' *^$4^vj5*X^$ ,L_; (=yF /9JjMSE"} q٢;x*m\>J~*Qb)Z`/r,zuHgLW0v~&~yGU^']cm3'Ԥ* LW2Z:ཡ5z4}Mb%6%C?I{B<QZ_Re :Z%@`BEzhVW:c{C^;Lvց\Y9 T#1iK}zH!R+},h9Y:W;" xXL sY]7Nfu'_.ymeugޠ>pdLN8ׅgJUʵ0 %[頿hzS9DO`;b kɢ4wCs\ՖQ_S1D f dਥ[ݼRH]sAփrxy4(xm1=F[_1$b*68pSv塣G|)q[OP 0x쬪OWAOl3a-,.LGEde@VAE!zhyۂ儸-2@N!=zW zT@k@@Un޷ ^b2_7paccbTy_! vqQ\1?YgAղ-%=(\{9\8 P| [Z"CU16t3 r T?rWirY%ׅ'F8&3RYuRHxDiA'XYvShϒ,`lyMuվ#SZ9 39j|3N *Ҿw"}47c8NK:9f4֖j rI_d$i':yB@t 㟎yOE4j{\&'/:O^V} QbQiާϼ7,L#T ]5w@ݳX#}%iᄻpC}v 7 sgb-pX}SŭnHݴwva;[ Mhw x}laJgus^i"cB&xNYA*c>`T-m2 F {&݉culk;əϖt&nHŰgWW\%߿7Y'hǗtbM(k-i'qaZ#Q# zD 3UM.J×s\tTJ&H\}T(/|~>l` MȓĤyղ<um9pnm: !e4X̼*{D0eTLжME)J+G_G^ >5:/U$jFO/n{?-U8yV3A K$(`A[]!}krK`Y؏s/:^Tc>޺Ɲ7%s/LWl5x|cY ZiJ f_酔X8ek˽x%;h+ D̑i&?$B'=|/wY;1mOTkYrό d$ir]l~)ѯ`$ go1/C{mOV6_ɰni$ tw5^f|BxR9 N=wįZ?e:X5Pj%:WfVɟ}6EݑVdEU|N:~'XJzs8W}^Sm2{ E^Oˌ/.cۋ&`wC)&Ih*Jߖ]_]"6U1fk_wӣ$id!9 m8څ{݄TNaD8zs Oev,y+ΊY}rtyoU֎=Lz6xAf6l2\okbb$p_ՙκXo2,^fq9}oUy!7/Rg_b,KxEFaf%Ujt.^Xk. b.8>,6ԛб#<_wXr~oZpesmâ7ՠ_Fcno.{hm41ټ*V=#\6 yz߭J1T")KbO?mTmi~x!)4ZQ%\~7zҔؓendstream endobj 722 0 obj << /Filter /FlateDecode /Length1 1470 /Length2 6897 /Length3 0 /Length 7877 >> stream xڍw4\]6тDFeFE] 3 w;E'z zD I>}5g_w^gؘx H+2 (h@ ^M )&`{E0P@Ah"mD>882D`w PC".l H'/_K5'OLL;@YM5CZà^ !i$zxx]x([iNn uܡ/-#fl};r=p5pC@(zs܀?s;ܟ޿HG'0 Pce ^WOWne `w0 B>9, IsruuQ βtt"\]~OZO`C@l~9 0g7&h?2[+@Wx}/'o%/1 `&@/;r߈Y0?b]|` B{en/w}J54`N^  ||bBaN_K?'* :yp-8NwB{ @B ky~W@npo5o;^[=Hp (#֩!D2 цZAC@._W /zܬ׉ RUP4{K%5k` E.< | znltE6Hz rDV 1o'] BnBS 1PO5 Z"M۳j9:/Rl_ S9y|PmnDx/92-N^&YXS8g/%Q /cT jye7|:> rjPcqv%#?U+Q%NxU:kcT<Ŗk9MsוC}OI7Lj/vb }LO{/tҲҚ0` MVSR]d`1(F va,}a=ͷA kaq:lirjE'~='piI]'$]U(Cj^t@"NT_+N/z&"UҽqK03g`ey錙ZWo:-$?aΖg.'e4c#f݀AmC9aV'lՃdK Utuv璱B9>|+E110F2OjH$+ƒqaI=;PB2 !fjS=*NiB8zMĢ_J>fa30'q<>w[ChTRWHv{7U1:/Nxy;waBca"sG("Uj RSE^:R,OHMz$RyE/o ]z"'aeE VRT^I'i`}} -vep>P#ElDX ~I %n"S(]u:FfDr=P"աUm˙#ҍi:wo RR&r4"YgM&DnIf :hYW+9)5>ɪ L )S[qU=E bw"eH( (=/';ɏ2@Y=0\d[P+zN#BvBS#7l?[$]V(8LeT>O%HN j0yAA>Ƙ-j}aK1' KuOB"~eV`Yt㰨q>.II=!K,wbgt4rWX810& E*%,IL>q2%nL|dBhHءzgGB4Ҳ~ȘiVUVfHLSud:}lNM($J5mC8@1]mGĽhШc6@i_SR~̆}8܂caN^_DsAS_8uI| TmR_r$HTj8= Crqd |SFhTh80dyeHħHJ@i\9-.dqizOHFS`Ӗ:'S1'f-7r{gͲ::_vܰBDnE9y` súz~\#]F4=* O)(z{iGal#ܹp 惡إ"(Gl2AoMz8CdWx ҂շ8v&q"7p,G--wF!c6b, Iݺeΐ0(Y702t_E [Q}7NQ?,,'UC W2I9I=/LcOCukTLT{!P>[y-~D9|u"-njb&Ż58!_ں6hjȱӞ*|aAqL<7C9ֿCܢ1JDs.YW2%V;IbuԹvDh6n1W6#۞4Y4|}]USJ{Ev& ݚ(SMn|֡伆8 d?&BHʶaT~Ɠ\)ұpq02])kMۛ, 3 (B+eR3R'~}н~i;g;UՎP>7`ر l5* fR\SGO;{BklFӖ=@h(@qՄؽC_M] ̲Yh Q/]O@~ʙ%So6b LBqⲧግyB2#Wx a@;lȚJR Kϰר 귳$5"H Y)8fGA936 ۫ƃ\E !Tx4Ni(?^yVk2t`#MF;%'շli>oϞW'R+Ut\MjTxy˷6vKסoP%&sA7]}2:ZI$ -es ^Fɼv?WVs2W:HGHfu^d.@f[=*_R.Q#S[`=\B$y|SȕYde4pz}PPaUb !Vw"y+J B*TְXз+ś*;,ᾤvNV q߬oJ\ ק\I2#}zT{QOh{F|նɠ/t5^L z$*{\+1(Lkzڴq\2ə3-k0g|hﱴMG| 8:ޛZv({Y/((ĸaeR5\{|;Lm:>(]ץ֖A?ͷJ&v$,;GzOɄ//B{=&؏ K"_XH>,JrV8_BrixF&%Ȟ_c04D_.!io6eYvRYiN^Z ZA] dĻL.ϒ;q| 7omg0!=$ڂ{΋Z8S{C_ӻM%TieϾϽ,= &Ukir[wfgUR#w,CI@f2VCU,2UppN.$?o8<#8'h{^p"90zISZ :إsja?4rUk4.笺vnmt1W/LSdز֧aã` z[QE~ - oi3}vA&t*7 g-\K7JKxphN^ymZR[wZ(M߇l(w&שLx@̀Iյ\䢄&Saњ۬e]enP`bR= ZݼJoFʆYzW)Z4d(z*ѐ"xb!M̩ 8Z$ՓWܪo\JS"}87˪`vO^ܣ$RAv% !"y}뜖=(zzbReR{V5[xdoV-)X9+. 6Zh AwŜRǒ!=fRb[r /t ǺT`S{ߚjt7c&rR7:Q9ԋ&`|?V'cWy^m`jHާEP_㋴Qֿ/V[x!, &uf ]lTm"=c>edHohT?/r(.{y(?S:lWrOe j>n"ۣt~N:Zf\ΆB3:!nx˘N^瘮FNQv+6伐EӞmjA7e3]9$q{3٢&VB ]&TH!vf.iW$X|Ft eNݬ&֟Tiud%P x~ݸug.y n%^i/YF˾LNĢ+|D; 0|C'jR$VNy .rx.; éQEY%j._(T[* G+^60B&Q uT s0efT:l W1ܾ~U_YK'xdq<h g2[S=)3^Sdc0wn ¹7m#ҁnRQ${)YEYb,`yn76$te *(F"'^ت> /Font << /F1 227 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 926 >> stream HV=o1+\ ǟ-"!"!n;DP"PRQy36.7=oQI8yg?Yg[V_락߾;Ӽ9yx˚L5do= >82e<MD/XVj%f1C8D\}{!jaYo֘;W?b~D;v7ZD:;VN3l'`8r\o_#|XG3q{O W<)6~!EVǔ5pZw:JX'>7V-QHCX;ksē׮{J ml LM$8],n͞t*,x:v8Ej~UWLųL=)nXyS!CNP_5dh=lYMI%tMQJTa ܐKaCԺD5[Xsay*n^%uJ9J0;ɇ߻jmx"^Or\qϘ%'޳WhG5n[ڹب\ǙU\ ʼn6*uK,TS ]7e&*Ϡ/ ^> stream xڌP[ӆ  0w<8w %85psjfWwk~DYA(` SP03123SP\mSh]@9M\m&a Y7[ @g^;uS98z9,\߫@mF`{9@ 23(Z+@@WIAohbl)HCZT.@gw9/E;)V ,\=Lw- h x P(9,zt^W"ߋMM@ -$)J07+} =M"*w}Vb rtuat4,ao.`gwu?q3}׽>V{{=[-`Ȥarrʈ;f tp033sN_ս;Y2xY}\L܁Wg7?K,,s+h 3~X}d~m}LJJJjt KT```0q7GVeп{cQ/ {VPtx@klߝ+˵H5z~\'B}.o_C4_d[wA. O2_?gdTvp003ټJ\Oo}_3 0qv6?w9{ `bwp}_xpp<99LE\&3I+Id;IWCI^OWPC{?SN&d߳DuLw{Ki G{ǖbVJsOĻ..G:uwX?{[+˿ߛr|Yl˻2 "_tN2_l&Wv...{޵?]3yU]/ /zBC:jE='f)2h|;ݞaRij6DRGPv%oW6„%?%N/M}>Yt"0HG.|W3["ɍYc@ʳabu,la_堆Sb!V#F?t4g+!,-/OۻY7bD:xXb-ָyJuV\r\]B[iJ4YEKyHk Y5 h:.T@wfL#gR%vh :6 azǝfu: L`$)7c|] Hb,1CF oSFw1-uRU-t;59RigSٹ!ΓGlH0AȢKQ/T]{IjU?@ 2ltt)WZ_&n顯a9$!]b-cޑ>LM-B1Ep+X|{>:JfJOJ関v޾>xqz7*gNc߂D7g2Tih|V[oQ ]ɠrc/iKs LzwY7h[309*xʆT# %a/BY1#v2a)e_-hq*r*dFqw%+Q;пzʬf*1 4û Q   D W>??`Q}/QU3/8<&õ紎^-lU}YwIUǻ}j /BADʱ)~wc|; qKb  \\/#ROә{GEWZU/^SagrOP>e˛\ճpۂ4Kc8*p=RM4b8m#{,[n,nr$Ð=kjOs^S;չ `)(}8HeH5lgJ.rvv+,ZCyHf%}"ڐ{E2j՚[s:[*?eAmt|%d:1ߝ^Xa;3dK3UnVh2kIO1Iʁ/Z,9ɚ~)Zm#</7va\ uk 1? H\F4} ʿegG\ARA nSEQF("b#/p@: !]i9ctt!(/ .OKmՈ9/]w]Y(fATɭf`xMS͍jͦě%+!$ts %7g>RkEqV6cylQlx/+Q?k̖x9F;}c:Lطݚt\"A~0M7Zj:Z1Vg)iv9H43}G d?1SI[513{-f{{o }QSQJTL̲3eG HFpDmf -Wn-M<~vi ~$&q[_ tYczz=o>= $iFeDyz15?S% c&ϋr?wP 6KU!ބݕ7:?3׳S̬HX|Զrڙ7ɮ<[=2}; ~ģv_1V+#it I(/z_TK}]*36K*>"lAl+"ec1CqаMW![6nbDs/s '-[PkTG/"j߶aWCĚQ{˶_pXaB렮mJ-B\Ԟ1db=) *?1$Z|)C\hˇHGؾ#JYH (#30I3ycT*N64%edL2hD&_F{z`p +~X_/> K˥mu֑-UZ*}^Fݜr,ߍ}={^Y[4FgEGp P }zAP{gN~?ҊoK_^s dDRHTqbe1'1B/1}vzR6}$R׮K԰?X}h@,Li::,")cVG~7-M 6tQ` u<&ꝰ 4E_IP̩xpLx%"/IZ\FsU]QH+)G;y\H df)QƭԻH Y2O=x& 2Ț;O+=qa[ӓeでlW,j ^ǘ%WN $ 9 EseQN"9S_;Ki@f6\Z’jS>=r3b11@.u.p°3RxMZkQ(S~#>D=Ʃf\Fzb2ʔ*9aХE&';5NnK] aa;6}ݬ>问(X 3ɔ AK돂$4=lsP #Z&@sL[O-},d&P7󦶑zD\:KcN:scl"h$Ƿ3eKBӁs75wp3D>s6H< y:lZDSR#qݾe)SQ|d$Uv5X1OC+cV'fSDfRvQ1/fQ+ aE|1Ȓ57PyeD1}AY!GiOjwxP:2n^8.UBDc,=SER5?ne-+]|Dt~X~x]q>: u^s;|+P  4`:-V&Ԧx@Nf"KD[V<ȋP!eJ*mÚ1$I%TwR6 |'+kM.}XRͩs3;U(q@| ͛vOz0B 1ǵ OP '&Ι;~:VN̶aܧ$6_?U@̢XG SsOL軯>5nsEkđڹcJ}"['~=Qr:T|m(LMN~bڡSܐ4ƙڇlOw<8F(&OMH13nƶ@~{=\ć:9]ʍm${ ]Ϲ;NFQ4I_#`ܸUF9"vh" :]g=`=-7 1/~E wFŽ +i3쟖 |KH%T \0GJzJ;1GD+zͩqmh&6r0WWж_dYix1DҾ412FCI(%nuG\=ܻ@z?l6B26Z¥VR]Uz^\5+e"gԓx8INߔ+8P" ͎+Ug9"q \] 2N'[+Ir2HV0@ ߄lI>#E")^!&@LlR:b#险`ƵddFd昖M鬮K9׬y=ԑ+X*F_ҵ½-7yX-n+OǖF ,v_&6i^c @>>-f 12%U#PAb?CLeÞ1)xOp苕ΝK{;@Õ/J~1DWjѴu&u4)}NF\xFbU~(+91k:Td6UomhDZlImJ7#aDQYs{mG>}%3̺/81n3U%@BVz$5ZQ@"B.jfV.m̅oFP:-k$dgWejn!¡xa\ 繭E\ӊQp]kMQGO |cv. ܥB4!b|bH6cЭt'VN5V:n96M[{pU >M'y߿x)~F{vpjݓ<9J$a³(Ɓm_5 *f/:C\Ɲup\ul_nU{Kw9 eC Sj[{ԽhxS9}[c3S HP4_ 5 o>e+O$s2~5o>ՇmT a5vȊ+`UU7H.OcR 5iQs>8P@d 5<UDziΈ!iu*[7rg=͵E-7T|qJ}QV<_={sj_O1a~8L׿"t'~pE<L'MqbV_SB6 %|>, v<Uo H}CDO<7PH |+$1$=lZ8Ż>z.t!#pgm) [P͞vbI'λS_ieJ4x+KmɃџ6>܁8^p|]){i魏~n0;} "tn[)->iCB3];6"! 7m=*vtS/0ҺvdNC\WKp79oF%"J$2#Od٧{]ݗ4ꜤV[\sWL5>%||UJFa ,4Jpֳ,H)wkbB!qmUj"D'v%3yBWݖZ&UjENmn/a&L"ϯM)%!+)aJCB}UY AZPMоMYbM~hphpA&>Y5XqἿCoO!֥n7:W, '}\0cuw!>zV߁7fSHFϸap-׶cK6jS"ۧ[+3 R:`e m]pfvlK?MZv.hQ,ODGpv韛v<źsmy&aKԵI&h)h"bU6viJg.Z 9&*(g &5{O{"ϕd;3O9Y3>U&OZaǰ~_M]S hDv$R/c+5Z:Ը熋*#<&kW(I!a6+Pz.GV=_b 25Ŋ#NZwlbǸy7ߵl؆zC/$E7_L$P֙1˱|~KP!Hlbgۊ" h 0Ha,b0I]K/V>#F.[Y 73 +::ʈۨKmŴkUe0bZc̐jHy풾]3nɖXӾEuN[)g5 R"6<_"O0hj58\kAD$TqveܬdBi.+``HӡK‚Y2G7Y p#lM#wY . j/û9hen{'CC=[}9!Glth7=>v8>Ͻ޳T܂hBp{a_Emc5A*ZSFisP3H_RseGV}mfR2\%RF' =)SWJ=Ē"MjB:"[ɼ>XI!ZcPpͭR&S1UnoHyPw@>W6&ek⻅(&olH8FrBJ9F4Q'7{*[CO">P` U7WvCʻ䝀C33S 4g `QeWbn̉}C@0 $X8ʛr49̵ FM*Œ0kۢwGz8?~B=w7yr!{s!(15vkFpBF|(yqك<epb(a+_AʡyaHbLh{)+WQ)d#ccE#tI+[]h\O{Yôg)0GI H]reEB 5=(I`c?WZ<}# V(ƼunZQ"a*V9JP2ZWlq33(;C(>=iZq~Y#qE~S8F;Mm/4E@4,|hTjfr_& SrAhbр>B.=fvCv"Pnke@UW=[=O1<xSjĆk1AT ?A_Rk]J ,C84@+Žm[߶v0o7ۀ^OVe1yVoܘHsP\Qf1PR5NC.Q5%9s|DmBs1N\Za01mXM"o+S8ݑ.A3} έMrmw61ۤ"ZyjYE6UIsgiQVo<y;ƹqhi#QZXeKT *fk.~Dm Y6L'yG{&*LPds%܃zekLlii{iX=[x@hi9R= eHOϸ6*x%"FL糓jpJG=+`srIQ&r#ÎWk& YEM{N/s90]h&5l2w{9{*Eͷ2R`RmJEɉq[zalz&VUWӲ}JGH`ڊ!w[,׸,0n 2fs 'Ho#t;W8cuh\~+qڌ˕kMTpW Hp|`,' fn9/@cLd5닓el"S.k!QXYY(]lt(s†y3SWax3 K<~Tem½ֵl@=U~9"F FɱYn\elO%_ȩs/nBuj'EKwVEEi dNRһQ}cԼց-/^L )tU,pc@r^-:knBV*'C bxOsuO \[\?ͭ LÇd`1 DmC݅V7 cqA30I/oޡC-lJr I^.ru♖ksA"Xgq)ɂ ..|t(]qe6rZ3E_}!1NUiloΏo&o>)qǴǓ+2!WOhĆR^:_b+9IKhhl.rHDt (;/V<=\-B6I0/D4 ?yF*nF Qd yBl ORk Qfo09!q(T>n =iv]%yLs"QD]7v^Ewn˯Aɶ`E" U 0`lh@sCh,*h2|J MM" T,WvޖȰع-Z84V]P/ߋ*`Wˋue 8 >,.i^9Pأ`& Oֻ ॵdNcc:*͝2tMFƿƪa?8]%̴|Oj];YhF[gXO\샘>bEe|<ΝC]3Qgg##3Z3;6IH;W>";Aڷ[g{j؏#B"57<3hdlKxJҤq*-w8_0*/%W 19Ί[QL%~R1J|QW\zF)GII)⣙2D}ݽ5VCTw7C+H'T6v dǻL.&ˤȍ#hSy09XJ8xrdC>}5]Ɠ0΋gW$ĚeIðE*D<1SNȡt5V̢bc{ _'DCՈod;d~cJ~ z[}_|s//tq !OF z'աuy2^)!dG@_j" Z91 y~Otuܫf>A?ttSkgƦ>mƧ1_!0("th]OJ0!|?0'hf v|(O;oTqu%6Mj~+B<L?XWUzvxF6p2S!s}t$&͆> l'|sFE0XKf478QnH# Q=r>hd`q3<ȿ]T]=ǑϬ#Kwt0e)ܑ7|K~ݶ c7\#]]/+ `#.ca:X#IohQp'ls25%sw:T 1P@C5=׾ᮡ d I$!-(YY,pzftf6f0N zCFc9 Py&43Җ;dLʕ[XK= ngD+PhrxY[ g6F20`N =$ۉ1a(E(0 NZ׆;/g^X/N1,MhcbS_ oM0h1a0ٶ:qJgH0|\UйF: I&ʙ^H;FwQڷ]oyx~,NxczOa~CCBj;J>^tVP$BksOhRyXfgMA0i_E\"Gj--B?*?rd>?e+ȥX(iٛsp ʅ+R0`VYR,Xy_lW~eԾ|Ig$$E-pP f~A*ޚVHw207v׻O qq= Sq\xi' b=*Y]FwTEIڶΨ704X_^83Ff(m1ϸq9W;C9A'P1T{)ڦwsyyi+?_wrvT1G#^9+)^~a l0"Y!P ^>.FˊSYALl,X:@bܞf@ wBN#:o98'JY *$ EwZb~1 S6sεB_83l%3&>[kVsh=}5yNv|s Y%$u6R.)_ OijCҠ퀋\efj~"L28%FN`5ʳE7)݁(k .$X x˘s DpIw)ͥlZFH"zƐUÝGy_chR[ήh>rz 4sct6ۃXu{0ZL}4yĸKVo6. Gj;~^r}{jF}aǾT)Dk/o_@kf&5$4~g`:^ l 㻕> &ǡ]wX-0R =a)Վs !7CD,0QwQԨLDd rJh50,.a B1<_ASXEdӤZLU̵V%,i &jjy?@dʂMXK=wx\؃[SIRܬzy|imWag94QtPpÐu74Vg%dz%Mˏ`+|*|tI&t](ێL`O-6DUqVR˩_"~t`ZhԹ۲m]4іfS: >exN6-tﯣL3;N2 4/3(ҽ J{BHAA8ɾVG&eae) 0l~.W iyץֈk ̥ h@\NHk#V4j'=k'AJzӯ\%6k' //KtT9Q D7 A^_׹bO.k-qf,TV\L h+jziuC‚5W7G]/S}]1Fl5R#uЂOR°Wqb<_ulZXyHG[]:ۮ4OH\W`\$Õ%{k ?P(^;Rrn+}.k/_t\ @cه2!ZBq j`ѱFxeǎu:C(~ @p!D#= K91/u8DҌhD(caYÃ8ux8TL[ Qfy^S@ݭvTdN^ #J+d-x5cLa8 exՅrlqWo3GVAO>Eg80j!Gɮ`ϒ7j @~F})M<m ɞ->]1I0E ?W2f!v\"|*idu"F[\1:}ЕHTDp60tΑEBv//QD -QkZJDg}Ap\ }b A0ܯ i` =s׭{EZ%jast6Q o\j*(O5MFxSS Q^ Vx*,60qrˆ(G )d X.,!-q> stream xڽ\YG~<-#dVef`PMRlZMEZ͸(&ǻ0Ԭ#*Ȉ/mfzg}ٟfꛪy;uk7{}nuc8=Ry3ݢkj\36kjf^FZYF^3cV-j>z٫k5?d,/_wۑ~WO=aĿ? Gw54g.BD{xzM,nw[xG (hQ3ɗǪSU0gճFmS_Q>Y{UkYBE-sTj"{bt2 iUkOUU&ڄVMߋ}aJ̨v3cTx Մ ?Ze0IWРUmKYe6mZ$Nuo/;[X:UyVMȂB [b<$-z9hO##<+íE_ mY+T7;r|6;RLqx1b+4l`ҸگQ= f)@?= ێ <_2r<0doNQY .E{TÉn㥬abk362+HM\f 2pf(zz-lA СΖ$:WǷ{Oр, lZT-=pc^ 3HvM/iUNj۔21[<>h0shzG'jlAQ[ܠ_e7AV QCn#lgGZɰ>{wG [l]aX#Oz%>/1M\u+ŵ}7>ץBڠD!;-*r*^hQ=3Dp\f2KytcTTD@=ֽ]0!zwïdx'oKK4?֧Q6> ̓%4~_'qicEVg+׊$̟ gq1VrJrJ}Ni MWxL6GB`o,׌[z;utpb7;z=*sْ,y@/#`qz:^bp+Y]=yu}->C,4UDdC@gOF]%>lH\mҺ*/.UQ)ma&68sJ;0ع}sR#ߊw_wQ]m\(2.=ͼKB0GD/ އDOx'`) {/+裣?'l81M n&n>5l `E0uhmӳ^dh>KecklVў~שK*.i1X}D%mI|kX "yIct#t_Dςm { {nbaҳIIVn=Tpx8 GcHI+.h%>:Ij} #:9zmUĵc@dUe٬eD_-Wb~֎ZRmkP_0U \V9g wV=c![XBqT\(Dy.FHq =̪oIն daI&zsDkQF$x:$)@L\%j">>_GD)Z986둾BXF0(L J/ZebjFY#x*rYz\ѓ{=Hlw;z3"8<(m66D+6 KUU5m_NHa}DdR^Gmlju^2"fMY<tk9l*rFlַJt?q Fa&FoVcfnMz=o=/nw_]la5rY Y|hb+DP?2j-."{4(9"/U o^-Ct AOݭ@ ãhѓCx̙*AҨЦT"AH@2& d_u(ؖ2ɷ,ĸΣZI\ BF2@c\M=Zd5|>0_SQ k]EmCCj+(_T*uɝ,<"M˸fTK eg#]PW_X)ڨVqzHyɨ*a@'y+HC*2`6|v|&)'UPfgteir {3`9/((k:bzLۉTWin%-ehq<+"DVFq8(+n=B"2D׵cx2[Fe2k(RN<ŜDpg1K8M6^`DPaCS;AFzG wեl Q!JN,!,{NMB)}*ԓSNt^7)[q'~CR|8+ yC㪴3Lm[bEp+xG[Y=Is u%z@Rv9F{\}ڲQk>+3Ui[i&0azۆ\jw.*c\Aܤ7JLua owmT+cͬR?bU7||kGna & Ƴ]EN5MFeS{1X%$D+sc"laTk lRP=\$Nu=y>&SzSyc݅Rj3T#ND:֮P񴰦79G7|Wگ.ۭu:Κ!Aɻ@]uTM=e`i Wa3]]{{mt{q"U~b_/(oQy Ȫ^2/kBME L[=zC廿b.i,@cT4 3J p/_B"+j)&ih?g>`\_T; Af_ʪ`g1?9rs%yf>)n݉vBpS-Ǭ\BY Ov{THendstream endobj 726 0 obj << /Filter /FlateDecode /Length 3074 >> stream x\Ks8WHՎ 䖙TR5j[]cIcvAIgRX _wh4 *+_/~xZ.o2 :Revy}1/g49, xr?n _l5^]Zf\IG7Hܸ\k_fLR",4-"Z_|ȮoYA5t)Rf} >hE}Fʼn.cUgX 6Ă?94tHeIT7vVo/gsP(d Z$0Qt6؄+F$rh}rdMt IO`H.Pb,&+l2e-%΍OD3~ByX(eVcAEOԢ7 % ޤ°,(qRdsATa&vsM0WѮ" !YOa |t$Whh7i!-0mW4nJu'TiZW6V yaD Qdsks-voLY2@bsGhPgb:E 4sV2a~9[[ 6֕d0igp*l{)}W,.e:!f' zi3QXd e41Ő8a4U$D`aN&\,3+F*URr!ŅOUdRNUFVF2;}T1a*]4UD:4kXto@s%ae9Prfx˖>_F_Bz=!>% vxy_g`xۭ/xn(fsa+')9x2DጄE2,VY<|sCPTv'UBS̝t+6;}8-; \A^h z[`fs;08^rv7 HwtA wNG\z,$ rY/3 RDQ@C?X]ТL;wC_cDa)hQ#9V2/-`[gb5FV+wڙJ6rTSiNOk; vvF Sڽo#8e..׾cqE<gmxXo#ь_=qk.0Ι`Rj,Ng pauiq}؂phX+2)©=3;/΋~z럝gQ%NO!EGyu sa݄.Ltǻ>f-%$DVg^[Yf|FFJゕ*gwaPh]1>\k}Dt1Zx4ٸO!P8H%yM045 DEO8ߟ''kI*]LDh$%KjHNJ $ñ5'L0M8ʣy`օ󖼅9#b,,ҎP83# rqw+)gOL&m'J?tE)s\ -ӱ* ͎FdCx-%\W14ronWMa@6Dc@@9Ж0z @}t bh~`buoHT ĪA9I, A Uc"]p\)4,,p2u˧43' (n% QQ\>:?x~wLM޿4~X`J{#, SiR۲jon5zG75 ytVؐ5ʯn .p.۠v0{_wFf QOݺ8)+ۄt}e{*nK"8Xu Hhүdv}74?R0?[]=X9pqNU" ܵsa_aƛas^ /:͛wNPnnӺ{sim3. YԶn!kPؤF Kv}V v/m*1 _߻nܸ';V71!m?%Iendstream endobj 727 0 obj << /Filter /FlateDecode /Length 5202 >> stream x\[o#7~NBOI<\,0-9Gϻ= ?GzZ7cR33XNI mY/p HjI:3knThz!l75=٪H|_ۑ*v"c$D:h\ Z״/izAI4Sld6ښԞi7&-#^Ǡ1rw qBCWQ4$F> 69Մ2vu3DasdMiC#c~ńۨrұg<%q$E?& - Li[ʠ' j ԕ 0H0x0 ,试&s&Uv,Th7ې@oSxO4a8֤= Hdr xT␶5n$ +t4N;\~?lƲ-J;q( 5ËPsC\$#|idL, & ? JlK1 XEKuْWo7 шT$W6 %MןM,<&>?Frh-m1GcvKȟqL/Q92ђ7Yx0Px. ѐЯV#3oiF펭a7(߅yj8[N[Q vG=ٯnӹdX=dD՞J{Rvz"a]2 KE1hgd Sٹ lF"Uiosy6Qm6( 2)5݄qȂY5xbp U Ԓ"-ڲhJ/pt+%lEȜМ懑1WDMOB>{)KSv6Qs ’zIʰR9O cΧ;~XLDeXZ4ᒪ&S㒪FMp N35'3wi%YՇxǨq5Sh\H#˪:IN@w|#"6~ܥ#Dj1p U/d[^UĀ[N 1HqKj' uC>h8ہ1_$"D7V(aĐ8F1moaqT [r&΄)A\s&(KZX:V74xk Jk:פUaH|zH8S6Jql㉧& Y"=VLSG by]407,xR9]uw;֑Z3췕HD#}s*`zKh8!;ƴ+@|aXȕ8 u_C?;ߨAR0Yٿ(S  gN=$DJ~%0 k~$~1c!O/,'̎CXghݎYs ¬ fK-w齜GL1XJOTuExW*>,Tf5ł UwxtgkEw@"q~w[|ȑ& v':D"?z7Xq ?XRB> My=⇼!LT1fX*-15ʧSe)2o!hއv޲"B< L6HW-{۸_f?YR% p .KW~` ~=[Ru <י_]4R`{-8飓E(\8'}3r|A`ʺ.+|jcIBai0p 20%Hk9}j eȏ('h )&r9TGbV,b%3׍Ȧݐ9tx Lg@0U jܝBh֓C͐߯CR3\ ب/u;{]_q1Bu)LYѐ՝ b,vTvTKFg\ȴvlwmf@lrN;vҺP]P^qS٦G(1,Yx\z{*Z,dX3D0z?2WNV-( *ɜ ˋϿiB*}(@%rI}e&6e̮a~uϏq>;x;~0) ;è+RBa+e cicB|8<Ð2_,2eS~xIGFExQqE )ж3HсZV 堓ZcV/ + e>PC$n7)noKX.xMtR_=CΙJ'ej*Q]}r.d MP}^n<+MHͮ5 *Wʨedv9wD_h}aU tZPZl_(6;1KY:dKw_DU,6hPlv` kn0ףnZ͙ 8SB¹UҺY|q2L]:e8#5VaU)Xg {!6C> stream x\[s9v~`665ULe'$vS5jĘ)Y= 4Amˎ @br3|夭ZՓWʸjO~Džj݅B닙jwxsx/xcs +7=Z_,~M{c*A4a':'3mqv=b\@-6>' Xax+,% ?gﰥR{n"\ӪNNfVNyp"p)$A ubRW ֨+Lf2 M}oO$<(&r" TM;Z?b2Ÿ&m3ykqj'FKJfR7r zP  b2]a0j9ҚZɴCJ$J_LRnBl WKZA RU-jW1PQPKVJjKZ02NEh9Qҡ5WkQ?l^ ee(S%Jv!эvi6.i豞$u3;M-{(*Ufے*LY:W( V0S$eQ(!2~0`|f"m%6mݻʪY+EF(63VٖaѶjeGZRQ18cI[ϜARЯ8N ^C cFyƞTyjG.p! p?5/cthto%үJ9E¥~BDK{h]5v1ZesbpM sٺQVfg >R.f(R8IbIRp"k= (+ $v/gP[Y\{vZ*CVplDq 8(="m06 lczg貶<WS/`n0DN @pYC7ђٶ%G3|@gD6OTh  RF~]6pj#sݧ n֝/-TgݗEfe s C3ik5Da TߜVcس=f0R`9L;0-,(hEdTIt iPv(eH I#S)} ={|=߫"m[{u[  1KmYwFr>&6\u9FFhzD͝Ii4i4  ?*ԠuFk4`mr?2|$Zsn vYNݱҏ9x5;V#AZv#G'TQa䘥<=Lg,3_d֫? ?r*|M7w.}_XzP ӀP`Fެ|PanNFتyn[,]欩֞+/=Or!sDCC(SMLgьyt#U'M|O,92 |o475uG2tink>?xO_bVW&ޕ&ݭ$w,#@Zvhv\w_|^\`b˗?*aG xKxK?#wW*{|Ž; 7 ǑRIqխ(?2!'9٥kmxSoӧ+|n>o񴫆G {^ U` B_h=VR~h)󂯸/- A]pӤ pÙ-l3M,*Srj)DMfx)E0ʅH6*Ũc.ryy=9yv$s)!yG߾xEa00& kkqb9Ͼ}B4{#Wĭ aşQ.}+sz7DW+˴e)!|C*1 Xt@kx7 ^7]q{S5oIu.}ӋڃNG)#+6isv@l_"!uicASwn. Jq4f8I8l`eliud2¬kcd<xQTAI1̊ݷ|3bm2׋M⏰=G4c3)wśdPŢd`3F_i@'sE4:QLi3EY8mqt{2D귅xG?ϴ6ߐz"BN}FOcrÇ06^8lW}GMbu2vݚ9'vj 9M񀾂wnM>=1F&-bcRdO BF_b03 O?UlnxkwGaX@cfy-C xTkˈZA qrȈ/<0Xy$D=k5xTC{Z*oR!FYOW 1M {sx$ꦔ/qȏ`*sy&KԞdE8lǎ@͝ʂ=K__{ZSw ?¥/]L|fϤu.0t(u8:eӮBBb}g5% udSVVF@p t z{yݜKC`5`ӠGnl,% =d Nh l{o~^ =9_z&t?4"~#2 j 2i2s[}:8תim`'3CхJ2ÔSlr9?!܋.=b:<oY>7ODX#c BQ? V6#<=MpJkӮ. 'Stagg8E%:982<ܛwoZ^_!d#8 6򍇨.-p&nNL7jy+uPB ڐ⋉'+k&ܢ]r($K6?ݞ?'%[tm$ոG`!Ox"9R`+]MaWF~loCl:ϛ'zHR,4n_r^]x ,?z[:W:<$P4 :(g7!һ=5Bc0ڹs3:^; rp~2H#-r _T;}8e 8zUr<~uF$M*qWO]Шendstream endobj 729 0 obj << /Filter /FlateDecode /Length1 1547 /Length2 8467 /Length3 0 /Length 9483 >> stream xڍwT\.]" J -8 1Cww%4"-HwtH7(?ZYk<3.pxD2@/3> DEEA.偩T\@>PP($G@nP+:@;0=PkGli0j A.6` {. v/lb6..\ g.8Zu耝7w ܸpz6P?otw x졖`+ <*4?j8Uc/߆?ApG @` M;An =G 6_ :[".\PIr6Pgy, sq|('a( ;+WG0+XY/`K.<\Yz;DP \o XA-]`k( ?`0OaP1A6 n ˿~߈p?>qp3Q<4Fyy"CbX"c*ę[>|Qʝssqi9 y m Yg|ؙۧ-G4C,9|9nYY5KMq8lNg}Wci*9dE$E ijjUfr>9HQ#gW`wNQ=]XYr7Mu6+6SH?՛4@Q2IJ5e^`_^f!/m9e0sj~țQDS]ЮC&O?ubn[>8J,٭u⋘BE9K(>_:I4ޢZӕP9fk7fNޖF@Rܔm733_OQc-4˓/@A#A`\.:$b-m!},3gN}obԅ Wen;<ǤeCE `]oбgՒKv< G  O!p^\.xb?mSʤz>w=jW;"J0)bRg #n!d6R;T;)k[tx% 2#4)=iSfzvbh0oq~-_,cuG{)m)f }̉}t~k^o{-@8!T -d\la**T5ュF3Ո F j#'̸Q0Z++~MLTO=>3^Ij.6LMzJ$؞DR8A6K'BBam<ӄ4AllYJ{tti^ݕxPXiyW`׏7sG;5;#iታ۝va ݙ-tP^3IbsԟAfR@Z*h0`;=K ݋aދ?ZM ճ\K2QX2ż6u?W¬a }J@iK y`B uP5K3*e$V'Yt6gpȨ-NrÓS}L.Uܯ숻 ttf2(1yhSXbtϯc`|9=>OQ[+#$'Kf(5P7RH]#d*'!&Ţ rڋәD:v|:EmM"CwB'dz۾&qW{X[vP $R`WDlGѾpGT vZd\zZbn9or.)ZYbm¾6ʅ$H`$D("U L~{jɻckKaUI*]SAgeK $ FϦy-N}qǧttErcUD>&IrMn䅙Y>x@s+%-CZb(4f:!"/_=g}Q, HNkpk|%iƕEb/03$EDP)3C r\XGy2 I$S0Xi$t6.+J/9qȣFJ|[>3{j8*XP蜛Sn3g*ȞZdk'ss=[gTfRsdQ-`VsJWˀoZTI*^?|{dE'7]N Y/P9䫁ˀ(^B(h*G*Ɓu3R~1M 9:V& %oۉ–: (+gD`y)BkP!7h~upj#vG߇4;OoFkLjɰ'iKiTwdzl͹8c-fQ}7J-YVǻT"re#aUz/wƅ%{i, W`VqB5-Q6+mx({4 θqz5ie(|%6n>b 3h~2#\ D>#2R *>دgY$\d>a%o5maE[V,mӎ~[=WK;؋j \ȁTl2HB1,9mQ/T2v&X1Z CnsaV=v|]B`"RIVfqܛ`Ӯv֑pPJȼO%coɧe Uj6 cr$Ob$<"Ӆ?~Vxyv2>!л_=1@|Av1.xk/BY-[hSꖋ1SBJm6nu27÷YO{xX~#З?} mh=^|*ڟ#{y>(!v9xGEe%$#PR%öw9i(9La⑸yݍv>!Kd"ao&Q9凇=%x#OǣDъ/o(G̻ l_y-?%IOcuyBfGޠ-N \s8:WbF ?tVZvlK(Z7F2K5ǬhbѪ3ipHv5HRs.AliTe(c~xm*guadOx pI~MJ-wܢZـ)cՎ H.,cnvE/jx?W72¾ jRsw+M\ߠL^UUEt Zx,ȍ’͖:EtyCCfC. 6qRW(V88L笼b:O/^)i~5CS"}opp~JU#eQL^u$Ag0"]p,+2= :QwkE3sʡh5TDTJAӦLGffC۴7)yDSӡ)SzA0V=ۥ]pv$_O")+#˻FwQ4}NJظhlkfMY[DK% |!w$cLcVv)L!&s&Ɨ\Ni"zh))UNͲ./>]ޕQ>_zKY` u\/ֳ7RO3u?P>q@ӿ Ny)i:J#=/:a;4'C[H uG:dIzxɩ!C^M[` czhk 5tcW&bLF \Wa~_G1'?acfɀ4?ca^Ƒƨ)d5P(2n(/TfDyxR?*6?^cĿϿdD`uŋ~tFk5)[쳖]RfY Oh=37Jn~ OnK簈I oPNTt0VC&zU:%xӿKecQy~&xKG8JOGա}I;wAhCѓ^̡e0OO{1;6Jj{FTMz~eVg=|W==O˯gٸc>.3d>l϶fLX'$ukwsZB߿c@d(??nK"(6T j Vb&. `ܿCRLJy0u5uw!Һz#477C3[p38r{s!oygz~c堷Bhzi2Z)$oЭtL;ؒGnױ }>f $ަ;Y1XŜw'"Ի/YbzIR7̌dxOGzq _Ix^_J19 w6Cqޏ>Wrv`,gT(Dr?yl_ܣU+S1ɛE)RmF'6k1JPd:b,42t<䏢w6K˙'/cF>m xʲoTWǥQfbٙBؚ캛M42H %~4`䲐ЂQx\a&SH ̀طc*VM"GBOEV_#t"̝j=e :5; be66ux7 -\,*b102*GJVShH`=ȽyrC%wiܔ/DY 1꺱R/cC]Zrl3|ʍa#')0=|qgo&Fz'Zܲ}A 阹pe. .PVAX6{$gr*XEKkyb3RيCj} j%ume/kyfdϻե(Ae0+`m,^~oSͯnD0h~߫lQ]*ÙF -K^7͸cscLZ|V)ddkK0XG`Եwj4fМr{nB*smkrN]#"?9]}Av@f8 B̗^ Tܨ]NSļ7v,?ç֗6/Psh9V'\η6B=!Ilz&0 UťJ QVZ(M5F1vƌMT^w]m A |n ~eG!ߟLS1B%,RUp })r5 0UP9 70F8TeJ @0g'{-/{3*Q|!IVU4!QTIGs&A$޽ z"lod^`O=IlLn7,YHp^# f,k"FoӤ`z^`پ!KӌmB*x$Dq+?OKaݳ\f ЊZ#zE8F΀Fg_N/TMgO(񢴷w(ќ-度-sXyR>]=2UB)#aMBlp2uK>iuw@)6IH7"ّyZ.\T@xbQ%joZ0P`!\^-_V >lB)Q WUfE+c^H8EH=&QL@|Tw1&8}jϚםdmy= ^b12Elk37Ǒj V '6ҕٚI2ꦚJ4H>+HAiz,Qbih\=]s`bVZ<Gv/w|@٘ 9RYu,dpAuY_aTA;/ü! ǹd/Llb 3vzųtscGZ'SN9:6zRNX]6T\Dy!u% 4>XtR[bMS1_ϑ=F҆,^[Kq}=_.9E[B6i`SiyHiay$^y׸d+g{endstream endobj 730 0 obj << /Filter /FlateDecode /Length1 1452 /Length2 6273 /Length3 0 /Length 7264 >> stream xڍtTk.(1( ! 5tKJ C0tw#!]"J "HJHww *(9kz};MGOa UFQ|-}10  &0"`rP@B(4Fp@P (*!(&! x#)P{lZuFġpFQ2 9C0@AQJ%eBH`g7~N>AݠH a#33~"=EyPpAp7t;4.Pg? @_tEJC g0œmeM~> vrC`s0@YNF sA~QJe% GOB 3YG8Esu)傆AQQQq7v6 }].[4 ?#u{@(;}" (5'Ov4 sF  ' Z^6hhhl// $B?ąNM7՜2m8/oɂ믕#ZPo. ;_Y_ݝ~~3Z(Zh!oW#Uւܝ۪Cn8 P?B{N08TAG  uGW =&(z]W A>!(D"5z7G!4G-Ik 戞/o\P@#@g _ȿ#tP/(hf sk^)ɷ1hBqN=RsHי~1:s}˼='x1M5Խ&WK`7"F~| ^p7EW-n[΀dlix=X6T9Y,@V,,|SYBfB_NOw ηT>lyɽI9n R;Gp.b;WO21't 8J`QH#MB}Rl?}sHjBj}b .ȎwiRfIJTp؄=7FnࠃK!8X#˘DFiV7 QY9j?]h\g/ O)(8oNemaO{ȟ3e㾽eeyu5)Bb=뒷dj]50h1/^3$NRY`8ө2# ͜?BRA&J@޺E/wquf5>UZ@`oV 13~4_՛7;0۲8 ?uq g_y=y$&gl>/IZ>_j&xMTF$>!@op.b?eJos*HL5CerQƸ>;"8="h{Cin:<=eO$a}҆i%C~f QŒw毣"R 3jTA"]"ۼxət&89Sy8S[+wgĆ<\5]%qȹ"ą%0Ea]&yWմ85{se] m =I bzp_]+4VǞ[dmGw8U_2wXDÚD/;zN)ǍnL:W|vY%duȅj&(8gHV->㜻ƿh/mMt1^GsQ,-?QPc|[گӊ?E}cY}7G:6i@TKn3'az ِ'pA弚atÆ] +L;̲?7jh] /(_Gzv߯R`e(iPFXvT|OqA܏;^V& >y4=o?aU}0|\i{ V~($5h,Z4Fz"ᔇ~grn-_{ouhhr/F^e 1 ٷ5fJ>^`7S,L U6bg־O0` HuT,Ѣ-#*oB[cKod fd4g= Ǒ}'r-Md͎9}AkˡD$cH!V~۴Ta[IПZz|;^)_s0|Us?ij䉿^g1}Y!-g*se.)_8=HVH3%m=1q빱U)NOz1sSuo"N:hf;dXF۞1?~ނBҷ:\B>1=<۔GDH+59Z;?:!3xJ"$Ҩə_ 2\c dobPYN_-Nt֔k񄂲bFF_#<}XIV-m;\HS7x=/$dF8~u4 =H HMN aZݰz4F-ir:13[*OXk⑂](ƪ TiJ5M=r^ 0^ yaJԻ,Wz )MOBo"j.9L*Klq$f특Pp6I:\[)/ț6nGUT;ϋKcW*RQ[#ں>(/\&Ա)(i]TtE4 RIPcwJR/H.&5n\ZB#'j4+!se[cIsxu[-Ep`>}\o)M.Dѻ - *jVM NDLA!gbnx%Ԙՠɨd$|S'L/TPv:N(d,0Ulk1-?Ҿ5iʥ'\cJ]s+!\aejxfݳY4$<.!`'kI-~ڋcL$i/E~a3 oE~CWܬ2nH.nw B59Ey*]&LxJ_mKlL$%" 5đq:s)e!֒@̟~< ui]\>}JFaur9#ѕO>dZ(Q3[}gƖWdj.YŠK=/>5e[x\>#ǩ ֈqƷD1WLVڠ;@2(GoKs|dGKWFC%71ڃ*B4Gq2o\˹p01s9G}=X4T1) -Tncp8jՅ.&ԹNc1=z1^07}˞{_-ܒ<+xpԾ!d3@nÂ97ߢ˩3Ky[6&8|;#l}a2|4Ҍ>Ro;yM)|wu2<5bdyudw%n MiwCEMsԪ; =9>eb#qkz fV ,q?vs6J]>}g NCT3%x` Dvf|ʝ*JFłf&h |q2ۍ!"[oxv%3HUJ-f^uyE^N$Gqi-c'EldlvTa.aViMĞZ9cV^Ӝ=9L'Q} }=qEijKR 2f'cƩuBMEOC_ķv2;Gɚ];/u0G/O!ϳNj(5XN_>F2oo}?z"ʃp< 6֓ѥm|Ku!R<2NZ3a%۾:3ۄ1;u[\)o6S-ק>ݡ< S';U 晹(XK o{;% 2Txv\tVc7vԉXoа |U*C'$ml< /yTfZ)tVk8 .ֳ]^P/Br9DSY D:*>Ն 踌:W̿r}@O`)SwjTl] N8=)o*[o^C*8ꙔaG(=yQJCm_pXYǩ0.?1vMo5ыŇ8%h})i7Hq~X2 ?v? 15%Np#s,d֐]xƛ` }vEMD{0BPCtS**\Idwr.JfTٴ(<_LXg:9ن7귾TM[xU*,ʴr9"frjAӰJYe-9u45'PcGWay:9 Sq>dy P]XY>73d^pHLoa 'a~P%)>~ް̛g?z`W,CD9=8Qv[po=5x-.H] m Jל[B|޷D=qdvC;:L$,9in]^Xă]sR޵;2CC-_55:-V ĭ gg.PUZܰǻ??&~mU;tnO85fi&Z ?W»h y<J*=y"&ީcw71ڰcet}&Q7Qnvgzlgi<.zye}ei Fg6ҡ'Xb!>2& `j=$)Ei e.RVj_1 IM!}e6ȑiﵾ}]ĂPA^N34SX?H֏7"YTRX{|*]}ZOʏ-!ryoN0tfz#w)P^yn\.Cї>ElZ]UWTECkQU:sL)OzM0/H},Ni_0nd 2"[`3Uen!SlJ? (ڹ>9؉;`O@H^JMdlӎ{ɒO^S}`uxE,Q͉aPIMw5T,j؂'g>%F?yDxu0wl!ܛv>i} v(hrx-iY̤5wCe1]ፋBOZ}qZz 139m9Ƹu4o8ɕ&|ʉ$hWcO}+MNCn(d.EH{4ҏuϝHJoRJXrJ"裷-, Lne9*27ߪ΍gģic8q<{/1×miUÛ/DP/VG[X3y=\5,Ws.+lJuacd>ob}lvШN*PJ_;5zDȰ<rħH΀R{ħER M˱YQFBfhr5zzрٵ kֱ]6=I bɄϡ>n|A3~{Wtɭ*ۦk=^m7|Lyz³sNJWW0+%lwJƙ{0:;~JICC`%# Oe/?#bUPtIg M ի?]R0o^'s4oNZaR58^3ijv6,).>sٸv5F+̃WXq7n8/8DEHn`pC+ /4q)]8ZjY"ݹN⿷VLȦj55]V9N]&YHh-x@ tykŪ'H&/%Yf<`sCX'x{4/?4 a~jZ/#Pʪ!%ɢ`zhendstream endobj 731 0 obj << /Filter /FlateDecode /Length1 2317 /Length2 19400 /Length3 0 /Length 20760 >> stream xڌP\݂;kpHpw www|rNr>smXI^(aoBWff01201Y##trB h.3ry7ȸY<̜@ebg2yy v7E[z F6Zϳnۿo5@SKWv1za;9gfc`bGn,a4Ut1gs9l,JΖ;^LLGz&wo37kY9FNNFpj { v..w3{'(`8Q70q2%~#fo`XҿQ7zE7zE7zE7zEῈ=oO7zϧSF4~w~ZEwK蝻5}c[_?_{""`&6 _[ FߐSKm^?|ޑ9-~W& O ?^o~1drw__vH޽ĝ,h){?;K?;K?{4R?{wV^ }t &N\߯߾@naބ7ت&Jϝ~g 2Fc[mO9P5/r^kf7a툥ZZ'%k xgYΕeWSېI4:NjI>r3hʆcHhl..Vv ji2K8aR:PˍCDCn\Y 4v87*cUAR K(i&u$po$YƔ4b- j^씥ee5 Y>rՃj )ti JW޿'M`#-D T@2~T K#G\<~?LybldlxR}3cm?|߫1i w;<M\1;|!uC"dBk&0\.g)uly 8L-3s[ZMƹF[zYL|xc ]ϵݟ#dT;kJ{knk-kI.je}\P<Yهa,QZ<?޾J ߗŵO Lgab;aюv1 - ڦUg#SvIhɨg|UN %u0=幝)8ah5\ǽd!xɰJ }Ύ"Z2e+4 d(} '_נ `wZZ5͓7Өjs)KzЃ8de!3<(ɄV?_񦤢q"b]YRv|94dⴕ>#''Ziv)^ xD ((yL kaebLK@dI.ŤޜG-7)h+ߘ>^xā[Ӓz %yZ4E_b- yS]r4\ mzzAQ=Ie( _3^LZf%4ppD 84ހF^t a}v4#:O#cQ{0H=^+f^-Wf Ƚ"jX}>tì}s<RˏTDVd bE> Lݛ:mvixuƏQ=zƖV.=::cTC<;},Hn<]UZ|UXn"o#v8lb(E+x S(8nR'{BiNu"Eiys|:0ږ%K]@if_3;i\1̣h/CY^;%IӦE`tQej♾flcWO6.a24f*=ѭ @Ҋwl'NhpW:Xd՛{V' } ~t]waQ 2?')=T+7C"*^+^pPTJp¦: ~Mk':ɭv$6'ɨ3HG ]S||.nB~!A(1 m~.IpϰhHSWtd,$' Wf~EY9D*C+\ V%AбDgi0DΉ3obUX? b> k-EqVK`8kZQ M* YC$}q/}!|SnA\ӀGŻ5@ίBU.`؉r.GVbf^$w4qm-x_7\M=zz~g>=C" _L:izf z)vgCM/T]j18A) Fr u #F!RewQ+Vj%C>Ӥ]1%S~8s|C"r $e]mwcsYYX¦qY3ܟڜTv# 2/RP(nMW>%l|Iv cn QGkX s{rZJ@hYF>w: R6N~^a?c`~~d->Dow`޵߿/Akx:USB).G 9r8\\'2=dk~)7jZ}PP)q n!g]?`j>6UMSt=ᆇ 'OcFoAUG^j.Z6{بq,It`2.|3!5v 63x /z7CA֡z9 hkLwl+eZfĂbu\0u-C^1&?*fUJ[LLUU(׽:N4!Ik P KLq#X!a 0r;m\.u#^ZUS7erG{gv \FmqNye.Ӯ1V+7!&uЂR&`_eSM^昡Sȁݳ1#bl#S ccB?YZ̴4233T`$pvrȿ#B)ݣahox @JG9e?bׅu}y > 1C4tuo w.=rwC9 MG BL9 [XIL2PUA: 6Rqb.^rz~B hDtixSg* uA;D6}"]+;3'^_-Q|G X5ٍ/e(κL j:avÏC83:%ܨVKk_P7;\gG*NR$Mn7N3`}TZ^@Q?a [#Sxr p sq"3#U QR &(: ڙxu<kЧgnTq44fMvGKuᨳy+ ӱd?|cn$o3fr>u{ }qƃeiXmS33TٶZZ1l>׆C6~;<v~js Mo3+9KjtBjA!ew%38YU^?ۦֱw0+ֱ&^7FRdozQ#Ѥ$)]`44^&\i6(f*}I3h`8C}E 4Yg5݅R NGU[+q5 _v@Xe7:e;uȒf#~͟T˩* wKR Ώ͘W'@(T)5,gA- ?wFC |[8+: &o{Xo.|ޑᢜ BlkX)rx3Aeӡ9c>Ʋ98YxI7Zn)4^葦uъ:S a|B,rlEv`Q\!]eɛ|G;ja9T/NӍ{_>aȚRF?~&v Jls©Z|>m{*+q/^@r& cb:z\ܨr?N鱡:R'z|F|`^X $aG %Qfrq/-/goFGKes\S{H3ʟ`]sM jK+C{mnl(I_V/ŘTqU7џZQEBTA-B;MH=y[ƈkE9o8~T FS-]t)#֪Ïcҁ'XXi"`]`\Hk};weWc`]-5sx Bmc`w2QxH+eז 򄗞E$щ #36:6+{h*j]=sJkI/״ $]Q |v:B{ﭝq՞V|tIZ6[\ULY%Q)!ikyl)|d1aAW T|!Pez Bs 4Vm9{iHV3֤ܒEaw8j.%Pp2>́6 JQXE* SZ,a;P|st];G£$ep"gV_k*#^o*־E9%LpZ7a@=*jJ7?׊@tP8ib U)|r6Iz_Xfb2a*7oԟ8܉jѧF%Ch @3#Y /ug˛Wh#mi @KȝKMÁbPz֔ h&UCL*==?`F ˏ ߤVD"m"v2,}LM^ qky[Ό7KӴOdF4YfOSvG8;b AKU]G#βF_I%?IՅ>EV׻(X`Nbt+9Q :m`5F_N'ZWU _Md!p9Q`չ*?݄N`R;K 0-6+ο:½f8\z+bҩcPu#Yr5!]tX"@WP[FGŸA~EFAU![y4ǠS!Q|dC\QUXvBl0>oa()ʡDIs@?rYPCL3C:+8fZx#fm鸚:r2$ pΔ|̤鐧嫢ׂpo"`BJۗ@|} (#(w8ڥM *NœҲcMoAWgپF::TU;Wb\lzMi?zۥ^V-v 8^41'KuZB2-UAq٦sE쏉6M$ V`Wu~,:&% $flXgLokBG7e,Z-_]#CÖt( 9$F#rȸ9gݰV&yqIAh]@ \*DͬƽI}?f7sUIqDC_9 0u<d$q̓ irXq.2 'ƭX*LqSVdqOQAѮ)M\m֣-NߍRqI/ykyc;{-wB*Vd62Ijy6/?k{[\x! &;B,2lU4Iʴ`b>jk\^Bj+޵z}}rm93ʚ4G w`Y"ξN1u#&sC(ĄёZ]ɷ pw̄A3Ek+KbQI{a78S͚BHe950B/&[ךьd|زV8sB [q-< D9;"fH?K_K~~/\[ED|U,z~XZ`^"Z{qyE1uJYI+73A]{Odj*Dѷ= 5բV$r}=Lzj-S`/h~_6g\``ORѱ!~}(d;bW4em&ꍄnjED 6qNo`31&fzv|,rͭx=:`'iZ&ds""; ۝4[w3Fz'US1|v$n`t#>9*N'b7GvrqL:MV@Яx6Ca!P7`3_C76ܲr* aUO9K45r$ 9EAR+jF+%RWB ȫ+M\[ь㘯 '7{p˱%٩lS'mjÅ~O眍O/j,#iZ+ e??*S}^,w6kAÅm ,I4'd2qПzYn(l"n콷#{KًoqYj㌺"I 'ӭ:B&6g+_T1gaXI h>i"|[G۪ݹx(lM'"Ũ1q7*ݓ7@|WWǭ!)(-H̨^.2'i \Vm B!M;uw^wYպ~ O78bʰp~[亠 gUT/3{(N ȔTcʒuU<-DS諲NSڢjXșjT]!T: #QD4|nN IKUncec) X!οT4ݚ4\K AsR  ' >R~SO\hOqS#<ލ0)zO@[n >`,Ci\Bv[?dG# }q΂pQ4M`#<_f Qa;r<eKqJGzhPlLHp[/Ձz7uKCA7D\EDL$+0FI#lDVOA@ 5z{QAeDU}T|mZI;=n>` ߳vBj-G`2H""1'd.ILp9dL~#נԩ8 RCi qI[s Z\vh baPR8B"Cpv(9ᤡXvhp;gWEW}B~؈r}|ށtzCsG,()9adRhr&7<ʼJqmѲEBe'2{:||ϐBR~oUM%C+ƞ s7#\LW 8=(GhuOi豱ʼYj1oC..a'uv#A$t0k$bu}*6CuYQ!SQoUahZ!(l 6lP5_@`.?*6i?>JomcTb9zˠ`Gƞm`˅eN~"<u":҆ &|f~d =nᔴif 4H+tIx/a.2PrO҅;}L' qrgE %u.=j٭|Yѫ@:xEµخ^Lq5 k[B Ъ j{JHn:O4nF+A<_#%O" Zmv䦼yѐ}jL-uU_}}QPr$j!b_S@]o,:㺠U5Xy3]6C"52Ͳ|H'S<J'UZ@,vOcP^ 2zػR܈M% /*Y=,8%&5k{mDGa$*zZeC`+B7kZq2դNh䎭?(D9$crT wVpg!w :W?+$jѐ nh7rj{,8Qq͓_@a7~d}-zI8 7G`O9#5IU_4GiNj",7OI'Զ=d0Jwk4I#m[װ qN;Ŀ,2oO9琏YDѩ,Yu~'jӳEc6tԭ=88U]4*ͣk}7mzU"c^kIFE6,BA|!S*J-AЁAuo~JId^ x}MmPJOfRB6I '(~ oӜsFOD1bKVDaA0K9EAsLpϬ(>#qq؁ !X9r`*F\5:n,mҰ^QZ|y//$9~sVm&!. %M&fF@'.~}6.KGK%=7i:.xsjsVYh ݯ $FbF0D<ҕPqwκW-b1:!fr*cJf"ʩWJf3C :e(Fdp9b["4>"aX3Ĉ[8:9zrCl_3j|\\Jt?~mpq(K*uYt(~ϧa+9B"e* 4q~F麌sAF)֒A w>RҜ>tŗT8ˆ"2- g|LtfyzdT ^&"xB[Lu1emg0N(ƹF=s!Y77;F/r֖YdJR+.8GȲ%FZYhOm]oN?g_+6? 'Bhtsz5Bs GJ@,:?Dj #vX,Sf j=fL~]rΫ,bu9}5gUшC񭥮p RfMOd HEQDb>ꁽZBv:(1W.,TY H<^hZa4 gjdLZgQ+T] 44rF_ '”͊U_Z҃Zٲ6eNnV<[oXt‚$H#uqu+Au}n٩~מxOՉG[a i j'QggGA#sC!3(0QդhԶ0Z:ѳiwn[a тEo!BY5Vi`ۿ~^h(S*9/IDNj ~=zR!!V6 o?#:gV}ٯbN)l3d~Dq\@|**5$g{)3}FRR'vfd:_0x~*]u~GY++lM}SrpPOYӳ N [5KWE:"R2%xR\ɸ2΅R̠Pz쮢K'$m'p&._VkX5fOa:eBgX^I!>$i9Zg>+ܸEG4#WtրD3hQvN,?ZrHŎxv/oB+0P^|c9pшdbE9dִ0yZB)T~sM0PN/Ъrׂ_k:Z_ZfuCe?F-Wo~޼\ d{55 vȞ9?{{!E|ġ%VPE^+Hq=].speZ!<=s/fgÙ?Q=ƫl/ZjEDIrtqɆJz8gi.e,# ػ~~yP ސ0vJM`^sXHUїS.@ŧ.n=sW wg~LDtD"' μK zQn{t.fh bMγ((ĶkǜǸN #XEJgOZēT/$b$WIdu7șf F+a}Tt!:>?3Z->(QE2n鴻|Eҡ{B,Six<-awl#:a,'3*f?b~˴Df j(ɋJc-hO9سA eip܈}ܒ@r|dD&d%g-.U|?m귐1؟ŷw'V5]>-/Q.;o$ &[Xٷ/2MII,&}esqsCq]1;wR ɤhbxxaR➩g-op++m^JњGxAI*PL S#9h 9={ftyۏ7  yB&3 7R Цk ]ae8Ցdm!#<[QV;[p{(:M.U ˾z}GpStPZ9-L ZR`qc/'Z(OLLxn5Nyr-a#75.E7K]^N!g!iUQwP&Gp6"Kym!JȖZSR@ 4KrC|tƢԎ{k@-N܅.Fkt7οLouS!s ѮtfjI£Rxb? b_{rܑn&<1}SPƂkLNkEsm.R@=Pbڮ6݃Y6~HxVmz)0kvqne۟*jnR;g=Mӗ͡Y{]8%gD)!@"ul .\G}sturf+[8t1c e frETx͆bxjMW<%ms-1exs ۠tC>seI߾4khf]"nEMxsK'Qõ=SK~ Ŀ D[%E\ovagk4>*Ƹ: N QiRRF2OiuVؘ!V{A肘 q:Youw9NlX3R\^SH߄!Y"Ο$v3C8xtH9k˷B QItmXc)Sa +1;ݤoPIS:-T q.w`Ğ~4'FX$[wc BO|=!|'&S&ˡT5z +t H '1ݹ5?<1rȪoY4w;0Q&lw9dqfvΠgaWEfC5 ܰ$Fp<Tg& Qx7#tԥxS9 /qU1Y}6qQẁ+'GE)Y-q,B8W#t@: m3pGG^t9GfY^R^\-Vh-"KK[o/@Bo;\-:7)G\qًސ@_m)r25<+6Nr;EU%ފ@ PO6yχDzW}D_`$Y$DZËJz6V lfJ{pb-Y!u~C1R*9Ȅϥۊ|04ҍ+%U7nH`%Ÿ͎]\0Z .x5`Яol(~6X68/ңh0]i.йUwGjҭaD*SDT϶ZcAQVF#U U [WWTYT*4hyZh Q7hNt@G+k/zl3[58$f˫bhꟅF?,O̯R'U 2&@KZϘ{qNCA;Kpy=nmS&mE%٦n[ z.WtRm_Y*ZۿjM8;P>%eOgFelOދVCrϭ.b,?HF 0N[b!)AYUC$IrQRz~ɐb|$oĻt/vz 'UbN.WBZGc jMn6T㱌%awz?y9+z1&~'UߵTgv2uTW?R^ozQ@$rm-n˞VC#̘Sdz,ɿ6Ѐ&V)h%_'ۈֻxgt[s ku?C8tix񼥭1b8pbw,!@ *pnFs*;* m3)JsTwBӈϱ+j湅8;AV(35t.#ey4sM.hȆ<.E^A10xaUI5GZenkWӮu#C_#Kj/0T89J}hvl'"sjvO|qn}Vw; )a\).22϶%|VS\4"$F q=B%SX; %WMc*cOZ>b:lx+|`HOM˨p=t yv.V2$V%'CZz̩1`M >\y-g]XQ6_1} d TNƀ]󽟗(ׂcUND=߂Od%7Ƞ܉c̅RkFnYCvFReQ_܍o.Hk!Y^>!ؠ\$RoP@ϒ"_7|j t$~gѼpnBvvAoZl.x˭U 7Stoow '> hW3&JfD$h9oTo/sqqlc(Z/(bJRu: ǯ/e#9bkc6;NqOêzYby)μ.~FsEϾ3^{6j0}s_~Vg[TT`z{2V3bQ !SXz9N2Zj9ҶÚ)>v+?6 >M8t+K<,|H Ε٫vU00pT)qʂxF馆x~VHθ~j22"8`ob]'!%!Hπ׺85]ڞilxL>/xiĮP7|(`Hs5>!'{/Wz81Oq#tc.8Tt]^G 'X!cQ;!dm4>:[o6>l {,Z*!w<>\pĔ||b[vV Q2i`((F:Nh9t ά\bc_n=c K`SljG| g6R$hH;B^8I~XTV8]Ӵ,5͝4$n@&M65O|u81Y3/$/Z W|Mj[=,w61x2q9T^&`^0[2FKĀF<8cpn*Kd؎" AowG/‹mbfݯ6-_&\WB۲L~9 1kZokh3ڹ%LsN[45 IƝa *`88';"̿$Yc<\/;E:u{j.l"놪ᲗIwĄ>9c!Wժc9ݨ0IQGY:٪C$L!f(sŀP{sKբMS'+)WfB2~e<:?l x.Bf8b}Eo G"ꉼ endstream endobj 732 0 obj << /Filter /FlateDecode /Length 4417 >> stream x\oBTY1s !) lh$dYzݿy!9HG"ù{&6W'~&Fɫgm奞:4uo WF).OgRT|q*62 #}g+n(K{|a3Xd&Ulxkx[3?+!M8R|'Zfw8rVʻZ$TJT%x ߄^]J3>t&.^4+-|e[ڇV;7[yivSiٹ-Δf_ g7s#;g㱕֧ ӕv~`}O ΪC+W@}N~u '9a8E}w969߃K&wu32zדʊQT~Rk7B |#…N8A\PKb t%%Na/PU*^GQY;d3>ͱ {|4y|t<[i`Ep`܆Bo  i& ::i 2,:pAp}{v:j+fqS@Yc`q%֟t;`3* qJ(;L :HSsuZ215È煭)mnhrCZOI_ny#Bw>gaFWLmwNX^#_< eAcmqa|&cUxeҎ{;0qĠq1"#ęCٖ1-%ƥ]Ru~HE&{1wϏExXEedG`gx8PfRruQv8R9?gz" %x~cnj':LeR>iȔzPnIuhDp(@>SP't<4@w7K+, K@+8zp:xL7l3{E)XcfgX851ǦC}ɠۊKn|@ xDֈVgޜxw "[Cj@@`I%}FӞ870+s8z,3pGW'$J8SUHI:?u DݠtܚD|vIAjBɩj݈ŎXgZ[O!_ɷ0b868r=ʵ׵Pc\ ka9$4BA5: Hd.>Gm77*iL*WOp+' ! )z_C =Bj:t|!++ {+FBv+< DQai;wS~[Ͽx{{-jᭇΛempsÇk{w9n⏥s^0:ǽ7 u{Vt npP+ K7ᾓڄ{Eٗ ª{pE(oh }љDmtMB޵vaǶCn ]򷸡Dpʔr"ST1'C.խ YGuF|k I{vW |j(ERTQp\T12X.)sR%-J"s`}3}Cz5.> Y3헤nREaHkBb\R꒶JtـJ1'h,пd=݅056ܦu|1XQ/.`G{` iAF=8{zov)fMb&i4ޠ ˦"|2)6VEٜRY&?k6#]#icnewl1Iyx uP91q:堼J<|P?Yr F}MhE4z)IWSceP_H &X݄`7\/4Eвj Tr܂ArK ԡG ƪfX2q>ol9<҂A{(fI |֙^MRd k J9.S;w8Z.WoR@,v`)l>2u :@dLGGwT&ml2%"/`l*O ϑʘ;[E"7~J@EWW/]ցmC +{e "ayE?YDUק:R+2n;S-aI}DRޔ;a9A[\i]AMR"$4câ3u$yKRT)CRC5,VƄT|L& b[A%UJt̏K!/S1II\W8V =En'$tnfHGM&u\ /bP)Wvҫ>&UaRY>96++f6q޿9]YgZ]VdG˺Ɔ~^2S8E'j'C(Ʃ,K~T6sJ*n+2ug3MpXexFKh=I g6A%TY*#JO[v7=h0Khtm;_A¹]MGH=ςoǝbKӮ7U*u#QAmxFFael/;u~nZ&Bv/ -PFNa;FDG+t7b%(j=*jSU@!bGX:EFNz"k,/Q]%+(X[ъY8>w=@L_Y^P<}?JOB-A (]<żnO?[RjE٧ӕBz +:SKs 2ϋu8UNQpIIQx锜KЕsuxFF>^æ ]e@ xQʔW8icć)T(0Cv "Hlt~ݖp%-e$Ƅ?$`AvhYq-+\ ]g:A[|1pg$endstream endobj 733 0 obj << /Filter /FlateDecode /Length 4622 >> stream x[[su~ׯG yUڋ]^O*I*j)9Wd53[yHrwjV]}7ߖfդMw\͓\cXp=bM' #u7K Sqlp\3jO~v.+L`y,x$OhUY׬Ȥ|O&1Mmf]&.^e:\#⻎7=|{YǑn!@NQף7=2}M߯öMVFz} ? ^6HixX^˺H:Mp=˫>{ H-ȅ{YQlM,\U ]y.~ƥn.~vEZv­Yr\9Ѐ=aL&uyX\[ {vYfѹCW.k Bc$\{Sh[uڌdiBpϯ~Sjb OwBSy]UٳN4Q@f+I.r:O)oLn//1bcҦXפMG"P/L_E[F@\ Axh$' k{%NkJ b} 7G |(7 ]!9A;.&9G:vIHO$˸)԰ԟM벜)jUMekH0I]#χrDM"!F{ka.yDV{>2tʤe䙨 A_L̡lL71䳤uݼŧ3QIUd,键\,5ʾ%s~'diD8s(oP†UiU _߼!̬rgS h9\lJPm5=`s%\Y݉Oj05k޲=LVܽ410y'rK"^Ub,{Ħ,s\/qii/@ LˌD@j+S˼U |̷~eY -S[ɤ| ma@shW4oÉ(@\|涩io4QЂ㵩YP  w[dǼ J Ѐ.dP[dy ..gMZ7jWcC\XQ2"3\ME[ jB";US7}>"u^؇JN@> -I-/Ij%tM,O}p&RdrEU^CsW)ֽUh&^nVR{3gmn^O,~ᾑ]rNv 6-"4 4`Kn{ {p85 p8dkfq!% "9`ct7DҨͼH~*C@ ŽM{?fLiʕ@ *w`ltQX@;#0ֳ!qS)k _@QB)&*XW0rrC@ F~Rvi wȤv{3eGi?.-a:hM1l|jQ`u^c t RxLt{?n㉤&K~bmh&-?z̮{A'8"S0vzܛt l69UJ/9&-Yqߌvg)<mn,^u{x53 : %GBH3 5Ga@ ռŇ{04l:=Tf4>"6H4DpMbX2!_\lNxDzڝ8 cH¥:yM1VD 3iJHp789|d=vVyIUX٥1 Ni>,%DWV|!QPXλЋZO= ^M_PBaZBILAYB0]ΒqljM5͜xʦb2B,Oz.bWoѷNWf\X٦L].eݪ'"fOﲗwK)YhzBm|10lPxLieZ>M^ii<- j1IіEj34~lfEq5^wJm5k5q|jJU' D L3Nήâ~C32۱{ :s=є8ȱ-g9xeΉec[(jxXq=x/,; 6-;-m-)T!yR !c lI2FBFՎ^^fS,%>;8D/8Ai)ȭU'Gzp&DhAG>n2&F힢X|wpsl r呟(_4\Ծγ߻yX-Wck}Z&cm !tj"5!8o2pD% esA~3I%}Ym,!~V?q]'˪D(>(M73 jj#/m&XX^zNayi}*l@v}# ?N xЍ߹g=L]rHyG@,'NZ۩|quhF8TaIİ;^/9HزG#a[ot\:py1 >\iY ˒uqL>ዸir^->ȠK>(FlR[2RߜmvSRa!'<£dw:m I0z1=ѣOgDwzV܇ňbU9S)TvAc2HGR`;>s _܏N)o?xAF=_#waT;=izcߣLR \*4o\*ذb'aw_pn G];)^J6i,m4%)aY * +GhDNG@U|o떹+HxASϳi5-KݤU^Lay2;y Cܘz-d6 1mQ-/KM0UL3 3&5xgY=o4D b?&~AœFwl/]G9I/Aõ61_RvpP$_AxVHNrKߞgnZaa9`;Zvoa3=J]jSqoo SkvO'e؎>CduN<-W_ǒ?Ƈ ËkW_4.fp& +;rt~=;wb+endstream endobj 734 0 obj << /Filter /FlateDecode /Length 4792 >> stream xڭ;kF+IX |>%z/3XgE쯿zd Wu=wyû>6wmV{xk󻺬(w\D@00<ܛE,b6nuoZt=S"1PVXIStPj%t{Tq\Q&ކѪ\1 ]HH~CEk6,%tQE;ߎu^<I3#~4mDT)}dĜǒl]d.q yT&)l䫌ķhaVTKgk Z*A}#-|DL:J)F>5D^EYLX!x84!cB]^ECf4bL<7Ҩ1Gb1V%7G@d7>jo C kQ"~Ƶ$QEdQ/F "[|!\FnYlb7`ճ&MD:߲s(C>w3\Z Gi8|XaJxJDR:,ڹpE~fguf¾0@>/A$N& DQ>'|uDwχƘ̌?d{v mz{b>ކ+EQFKQ`x_-ܸ9,.ط#W)G@`۝Ir19@\>dx"ӹUKOz#Y!M7+P4͜$?O{٪"D1BⱻcO3h#Wdi!}3\άe.S^1r0W1LV6y{N">ͯfd Hq&Da:Ys_U\D G<ʄ(`oZ[U2MٺNYk2N: 6>~c1 ` ֠d35E-O|.% fګZ8HA]nM)-}A6T2?%b)i⣝e&oțQ|蹌x!9 &{α mP7l m YU)cIJn^s,f²qrL"rz yLÊS%Bx)_wVC/2]~8jcKcYu DX_fE?DI|gQJPG"1z+QIǻ94iQÈ#?8|~o. JhUj=6! )IpTM4 Y0uz)#(δ+u]j #JU҉p#RGqvE"5=+BM6/sWo;NkiKlTÑiI͡fosRɯGv.4Fe YLe}R8)oxl 6@jX=L 5+BU20b`97r K '{_ȘZ2-jp1I}&$$LOAK_Dc+32Nn8 \XﴓE5D:,YN(*OqEˆyQiᙃYNV.`y.X|E3,۳"A :bߓfcj1Iޯm}EvOR./&bR)rU>JQ|?7HCp.lR0زB-<zͪju_ĀG#_,ڣdQ6 1vX䮱a\gMpfsnͯ .5]2M\_g&Y]Փދ7"&utxJ_'HFEl|dWޗaJA!=eSSξ%\^ ȂԂZ F)u&jo84.slN1LFY>>B爺6wer;'RhLTT< XKy=)##\@ [9jߚ#< f5LoXmO\.p ?g gN':iHuc x_*T /{!@MI+tiaԨ+|yO;nfUx I*31Be~GL* !ps^8rn\vC6j񍴏#p=J35TVx4Y×s\5Lq^lzcs)u;ɹoyA}}T Utͷi4XFEPsIf GCT.o $1b|ay|'!~S1حyx|~*СU_D,v߀q3 Mq\ȐK vq+:C\{Uዦ7Y|ƻ_UGRG|/DmRcR5 WɌc]}70N2oo^ʺ+.w3Xԗ06<_pNByՌԍg$Efb5cs*BMc£W+; V*_1k5ϑeC|]>5hں-հ)Ij~k׫YDѻM"5'5{գzgeulcx>({-%5B>4s߱J&O/M`=[3\1A%W<|,B]Ix?ތ2%-FGIv>//R g*x'i|V6 #(c.х`rS ";^#;h'JRЛdr1f*@^Zendstream endobj 735 0 obj << /Filter /FlateDecode /Length 4471 >> stream xڵ[Y#5~_ؗuǎ :a v8v"`7ڮ6h\mfo^RIeBTIYRJTZfjo^ҳh+Sňiծ.Zf7/L3?+SZXk_{p 6~Vca|&) g5Wkx٢4veLyѦUAs:+ZDA"XIq-%Ëi9f5αWr4ɽnEx }0caK (8PWqI{4;y70|zG|:i&c22]ُoH?&:|q\]l&nad#` ^f< /ROD9Z>,XfL#ƭrQ FKr`. ]YOVR|T/_q$ yf I<1B[]A>܇[ bL*LJZb-HoI6Eh7PjX hk;xuVo'c0-U1$)w٠čq3{g8X%v6w =%~ea)[Y Yu6y<;۪;"c;/,Ytʥ8 Gjʜ!*Ej =?#lS.'څC,@LPߩ &'j'rF]^`c#LHv՞zͭ9e`ۓ-&?% 3ch-{)-X:vg K(7&IV&# aGz/p.ݛIc]*C!sѮ4{+b"#!w9x^ H:w籫Njֻ\C9J2IUQ2[(Iߏ|sW} 5{䗐& >Ʉq)FG̣t֏0.{Y8lm{q=,DtNF֒VUKYNFƻ L"OE(.^#qL=Z\X4ݿ [=ɛ CC)ˇWnb]ɚX.s?pb{HE' fQ>!De.HD}EQܙ0H;"w1/}$Ťc0g>`*5ⴏPfqIyb}CYve'ߡ=Jc.. T[oLQB.ñED6S`jM7OUt'2$5/"ogQS4fsURu1fTT,8= 9{9r.aA X@F: rx޼®n3*~`Z c$āR8'|bmBx5^̈́+B´P)Hw+T+#<=򂑝􊃣HNyL1WpJJذğ\B<kc8!Յ#~l}$,; t(;tr3qmꌟUS_㰑F"_]3N(c.\7$ѳnHj&YS.H⟹zv_esX~64!OBQx"rQoClljWSǒRXy? cѹ ;Bxwf]a\Q~8*\머$`8e}ٴBVΤUeLJu]?<S%){[ЛnK8u[Tt~e`̔ ʑnKSV0%SNӐ8n[*-9ᒝp)lO:a'?si.qYL+WRE꛰2Ď!~ͪ6'\aa 7<^9^%6$Uaz8VRF,#-7j0DD=f; KvjjM-$!> ةs]\xC(V;Ή[VE)Wi KO=b`~gLzu4K,x-qQٹ1߾ o䱞&|-ʋlwfw >LʞX. +्ע.fRO.(5&^aNnd"y@ʤMu'I V`|''nn& )\2:NqJ"=e0L5nJ šO!ٞܤ0`rBWg> stream x[[ܶ~I dn.4)PvF;32y_Revi ;ytHwjy\y틯_WU\6"+*ͮnW_~y8D,M~eTϯmrQlOZ|*6ri^$feqQYN֞缻\1oap?H,F-r yHk_qǶ[;k mv`"گ?w:'.n/yY'RYYLۊ7)˞cSV*op2C2ğ$͙c!L*Ғ`)Srp@,$NJ2@-dK&+*7fb&.A'WMSrU[/w+jbW x:[V]4LD&=\zĕX3(HZn Y;~q'X?9"k Չ|vNo Sr X-A,yωya8YZV!U%h HAꈉ7o޼ow+q6/A|,Z$Z< ]ۊpǔE<(cS\Y:r-Za4 r`2H6nec-c[iƋL^-bX h@yso@ %aUw6ۖ;*gAT']:4zAcٍ@M{a[ezQMX6ā.šQP![!vP546k5&{6g*bx'Ajc ?}F_7,6cZ#sG@vNaٻ[6.+8Tu'[ed@|(eLuټ''C(>C!la|?:ubw߂g2VUߎM]~G{16#cYo!pϽ|(LN,ov"l؛$גn:V'!1(%hUqVپ5_T}Cf{|}Amw7d2e 5)%>:p8$xJh 3>6En <}t'oIk%Mi^tx'#Džց1H !EFsd۴MD6VgJ4u%SI|L$9] L\ *Ϥ+=l9ۜLwNe#O\q\ 'KfC-eYcEF: ƷGfy,.V;VSbѨFѴS"Fnhnm>XL>HA!>`ڬIA9)+h:J4ߴNlE<%HfTٽH>;ԋtAn6[蘆 m"+@ܫCTV)A%F/@*| nxLt'Bs]×=pTˆ_Mmպ1:\S*Cp$Za:ӭ5o}G z:|ቘjaSb~d>%X+3)FXu<$9$eUm 7ɳHLY DyI&嗦K2? s> N# )ܬrSF͓eKv}X34Γ"4ؖs%/X9_xL?#(ziv%6\:j!{?)Ƅ}düL{ A>*Ȑg(T \6scp Dj?ѻ7 3<U+&{Z?qtgﺛpg2[@|ѻg\E֘ ت{)(zĵ7ۚ(Iz[:$N/VBV.~=P`Km+1\;vr*Fi|@[tuc}a#Ӗ 獖FrĖ7ѣxr&}{mx`D 7c{) ~ɚ^ Lsr&J#8~8!ZBG)ܠr4;j'UAɗ t9cKAޑ >t.#w3QYe0$fq%òku⎕~L"1x|UceR3Ez;Uv>Yiendstream endobj 737 0 obj << /Filter /FlateDecode /Length 149 >> stream x3135R0P0Bc3csCB.c46K$r9yr+p{E=}JJS ]  b<]00 @0?`d=0s@f d'n.WO@.sudendstream endobj 738 0 obj << /Filter /FlateDecode /Length1 1358 /Length2 7249 /Length3 0 /Length 8181 >> stream xڍwTk6!!(%1H 5tHtI 1C !! -H7{Zfg}^FZMNik%DCpreԞyxxx@8P#o? /+Ɂw@58  A<<"ᮢ@9T n8pg/Wߏ@+V tj;]G+#Pn UEpF"\`'7.+ E!nW5e: #PW@n@]!;# sKqYC\w݁:OU_`տ?O?ٿ AaVVp'g0 @!@ U.'Yw`0ly}t0PAZ c+uőWkY 0ίA]!Vwg08eYa uqw+h \ 7 Kޝhw50^/_u]]]ӿ xBpfgVb!!m?)O&X9}f]/Y2]O|Yg9mzђz{e=ފyw`Wm}JN] k_@fewa<ͼG?=ͬkmT ^}~eX<Řk5 DpRazONQgۋ+1ZŜO{/u3O'|dR>E4q HlGR@J Iڒr)'9=T0Λ om{d0ȆOB y||+gnXkuEp8#bRdT/iD=]Y~XT*ls~ry\͔h ӎWKm)l=*]WFӽfJn NK:?yX?Ơ@{ H,7Iix57+مX$`͍lZPh!Wn\@7#U=YlZU dyU{Fz?KgXRtM~:Vo8ffZrj'(lrknSpwZQ-p q1(.}Sp p@iэ5d.;4ٽ"8>Y{Dž<8lT!c댠J;~ 0gr4eSʩVfxGErw`c!Y~unB;rAI_~ ojJ?%!%␪ۗL:b-l\Ǯ"%ςψfht]L [|Ya9* ԥ. B`'>-k[ɳwϰF70 0\MI}5!ξ)y:U2z޲M~a#I`V'4ݍ^鄈M=]28L'[ `0ʊC)_L5O"YW[KR!<Ӌ=:9/x*^4B1a=JҳdQ,0mst(-ʏ$ɘ /FVo|ͪOƼyAC/0z &UQGGi8dO̕& /=c!*?jC/[ҧml[2Pk ZyXra3x@BBeF\0(ݙ3`d}⻔{ y-7 _d}&c d+Tİ֔T:jKiO8ېMGuRI~P.[{%y1)tWLq(%խC||n|Zzв _DMwrjz%l3)8x4vV[ `w? wqEXb5VlP)`wOt4=t16ܳvlS=i10\5`}SIO٭yx?プ|A q{:ǨZv^)RØD&I)@ /T^bB0?'q~!Rez9BʻD*i.Ϙ6u|Z &KF:S#Eҹ PkчnGecT!({\SOÎz&Qa D%&65e|+g!Ȳ%cɹa[# 6uIlswd:aT?.+L*bDzu/*|Kgn4Ѹ`~۩HhU["}Fﰮᦠ'<;MϮ!pE9ųz]&6o134o)61m*>sqR)Y ΀$Q ${6O[8i5չnG9qNUmƷuw'Ig+uF]l;۫t=}ur")ѩii@>~WÀ45ő T("@Y}> Je+AjlT褅^zm6 X*>c,*9L1=] )?nOQ\'g m8/Gs892t8&QT;\}u᪯:h`|l(VƺbrF+VaK AH %yӎy%{)|qJlWp+nE>HaŵLBWțILqׁf/XJ$ 3Eɔ:ʣWtȣfy<Y>aPВe{]wž^TL(ZDH|Çٰ6.?WAg 5|UR IeB{MSG2*=(9"$?{{-ə3C#`Y[ө7=>BSFݺ ńsB[SU  Vb14'^j}=Tt:}W'4700bʙ#qTFE"{8׭^L:G]&dwf{F3c؜S]&Z.~䬕GvW|=_ʙm4R?qZt\( ʖcx~K̑n,bnb9P/v{7ǷvYqܿ"' gz p w6 $x*Pp|x*3rgz(+XTf ƿEj$aNBFm(ڤƥ׉$g1'v'%2UXw~xP(hP}C I[ν<0 oÑo!k%Ymӷ5Vb~UOq9$Z!j"4pCo䧳l[(VX"kN@iRa\*m1IvoEAԻt·0$2eLi"Y'+Q7ֳFb,ޫ z>)0DV*T8;UޡǬ%_%X\4>骩A+ԤlA;*b)ms{uMc=u"&T&?@_q70A;tw]r1ԯS츗T5z,ѣll lDŽl6DaU=3~]vF+R0@,Y s*Η~:\0l?g-@Ȫɠq9cw)e/K6 u`恕UH^ 阡.m~ӂ}S$IP$vKit2dU!3EX~Xa29cK#t=ǡ *Zl{;,]?ay|,3q檖*QTB8e$If/ XZz1s+^|Z WGFz/xsnu{[J苯*-LQE-\]X1QPXS`0<53Tl/}ɸ ])9=csV毻ዺҸTk'D i){MdֺdAw/}Օ7;D\\H.J_Dţ:02K }XW.ׯ j4wbKoԾ:LZwޤf*K]'oQ7 ,(A8e%+6h"$E{: 74cb߫-&G}tJ{b9Z!#Դ!sD\1Z_E-~ nJ {3_| 45.z[@g6 C|WasBV͆K+Q<&Td$IK}v3̶z(zV YM%^눖 .<>o^8B5+̵.wKm ( ޏK-8bQ_5.@3Rƾ|$zz-EݔХ䱸$rKB}e.%VUPgD%% vNFp K/mV_D[i/,"AiqҜ9]OQnuxyEH(6Tª2\pQ SB앾jKY_yxt/V.{912;  ,BW{!qhtEb1ndmd f9M7= s2O1%Acs.5|,L)(]:V-؇sk:\Qc!UBH;'!>WZ/&6X3,Up/;GsZis XkpxQ3s3ޡӪtg7Jv7D]$_89A(BS*֕nl-%|jon P/>erT]aMF;D EhKC4`a V:s ;'~Ne/Ԋ PHl8t)^!XM{D |7Co5ۼW W׫^rE|XZUW2d՞XpY3rJ_:f1s*eGǖnzx?|gGu!rh`5"3b $3qs)]nDُ韭@WkםgHj- t,e`&ί7fQXj=:N*}Z3RAGj:o\bwM+:1Zso֓q CH.J"U0lߡTt(FǕ!fq0΋T⥇ 2frY1'69=+9Tzt y_=\Ko:h"{)a| c:y2^|-`r_GejV uD4/kCYqWM \*֎m>` |m4"S#,N=n|Ƒ}zggm1)~U`t{doV;h3i #V"7[̞&KqPmr~Xŕ j< ,Hf*V4Jx@ism3> xxZZ>P*ٺ]Pƻ^< *KN j/ota6Z7@\Icp.lN vnװfw S.m[- ,dBw $R#LT,.[뙳Ru z /Qa!j;_62Sph.Ϫ+hC"gMu a.f^?_ѢMw_wʩi`v `-N.kyiDY)h%zo:dv`Эf4_Rj4ƮmCԶ޵[l5Y wE6t )vjnDJl֯fRߒFm!;x<\ewNʥ,2R\Y]n۱%!F'ϬPv2=aG)iUO( < ۍFI^^l\RjgoVLPʗ,PlZC2M> stream xڌtk Ƕhb۶1'm5m۶jF$oyo]\׾3J&vF@ ;[gzf&3\9g-?,D21CCy;[5`ab#@ :{8Z;W15ow  olhlh P3:{O*>sgg{FF777C';G3j:9@tt P09@_ U;Sg7CG C`ma upq5:>T@ˀ `ja (J18; mM24v7t5640tC29;Z;;18YXő0m5:;U#>\+[;7[ S [ӿh3Z8m!#3:ؙXY@؜j?8xL?h},Lp^N@ 럊Ep cgO1_-L `ofbgk#f"-.JoUع,vVN7-U2wm'mkjKC v P~&v&7++ߊ$\SGohcaoivq y2(A>2(A Ob0AlG/E:?Cg}Tf_38HdM?ѓMc|4A1R۹8(?%~4h?GU `>Zb~\J"~L?l?2?^@?-bc:Qǽbn<?:>1vS_;}!>)ct6wp>8#?G3]?|x~S͇'_f0vqߗǵtí[6w= /jQ{9v A'Sf8>'lS z7Au$*w2 mkd\q^Msx;h y Rې{pTʡQ-,h(%|e(gzkwEw"xZ8hb/]eϭ*5>\2\-{y /U&ja"^_`U iϑVqhM$$ J`iM]c< 5e4j{Nhcnl9`CqyYf{ Z^/vT_ A%#epףrwSkKo08ȟJa4["nJdiC )=.3heBTHfäFhStZ>_" *m9yĒVv_[k~=F&VHIQ~p"(t E^4bwds{+p,r,f*卺_Tޑc+tH}ӦyLF5ՏhPi/dR5v,pw(<ޯX2DOJ5oE=Ŏ/ʷ.Ҹ۴-ci֤bL \l"28Wٛz1ysUf1{1<Cl.dY?mRhlcGnz\H }ں$nJ[_Ng -Rޔ%5Ak>ק!)rX˽K%QW!hFtW^~*wyrw41T2۵f`JFa6KDzaTS]zDQ?HA@)!$+O I`M9uLHy)6s%XsʒI@iUf.?ZKK~s T}1Tj\I5F9OsqIⳁ [B̸ z*)ٷ%XzYĿhYKrX{*u8=7 =O2_ wԤx"EB*JI$7#U`&%<~#LߑDrbtGt=K\(wP$PEy{cfyٷlh>NxnM&dc!*Q|;?}TQɬ-Ыe&G+8*rNv9J}&SJAyD&2,YK0i,Bg ^vL7S)NrӹRnp%:g@D\H eB/K,H)%@#*ɢ$ġ+H%Opq b]?|Dr(]s=5XJ[@- 1yh[+i[2B$*~JMʯyUe/jp^T8< ӲR⿗ c:~Gd 唚[SMm9ХH=0N`l$o!\ [GWz~.01@NAAb?M61_6!˂\P\y s? 0"4Oxf-RZgu͕egAVg#kn?C'pC]Ek#ѧA2Њ}o# \Y3vgkgI05ՈAGo~Ն45&q o4Ly ì#DcC܊-ڬL?G &7à#q- {5QRݓ5DG,H7"ݘ-">Z9mh{'VAjvEEm}Z:յ޹+rW$k\ٸv pz|OSbUCLV0JfpnG],"ge%m cfԻ I~̐p+A+f` t%e+Fm\ sTqHtM4(Zud6NHIdIJājω}$;$*c7|O V-a]!B{!`ܱ f28f_e S3&khnW3ZA4m{u oy3} e7`\ˉ=r/`ؿye#yk_5ɧ$C} 6?h3!\QЦWF= b@)J_ ScBRIt4_?B"AgH|^E,tFМb Q4!%)Uo8 g7fϯ fC}NWz h]S#\u&_EĮgƇYi@ubr!6yVd iiN%]lv,޷'tI$cىF"3cW77$gstx;lGYwj݉@a1 b`9b ?&G=3r !>s pK `{O8! aX &- 4 Ǿc|D$3(Xv}TDf3U;CMꢁ!UdT]'49 5+LK3D*,|9T"{ފ;,*SOwu/ic]}*gDMU@"~.3G[H#i_K%@i؇>& ĩ+STӸ3W ߷ %`ɤ 4@ɓwpPC"] e_P:Q*Ls˶ }nw=}r L$!̂RdձE=l wmp<;v!:ˎ 2DgoyVU =ݟjv֏h5?FRW<߅|3t eB*#nj%.4&N2iT:܏nj:] gf}ϮY+;zx{n'^;,~sv#r3ˑwȕKJ6乤C^}EvNYF8=OziwDS)4Nž&Ox^FŋVJF0# J=Ղ"U* .ND@.(ӠGú?f]~[ڵ( 4i~դv/iP? M, ܱqx+ΠfԬ7 M$az .T&5F[C9lNJv\(lDqh"A#20v; ڕDo 0tuߒ[AZ2m=|y ^'1f?]-%ﱊbOKo!XЬ ?JܟZ#cŪg#e$7Le/̏esMz%|TW?^%\(q3zxD&֕j$^u7 Ba@LRs+̃v;,ޚ^b(TbhJCj>,> ](MA+YY=j&!;('(dXO>y0O)!4B2%xnJT,du(`Ŷ,lSڟtZˎv֑3Um5W{wE4gc韱+^]+w6%]8PW -~$h*=<A>Fo{N?=r a=G].{YAaD4P ֡} fs+gL13rK:ޮ:'1P y=]Wy*(O[+kn9V?\ ϴZk3x|sbz_wC) Qй/8`YqN 7s@Ӓ/6:O0XYA!5$ʽb=؀S'VoLRo`ѷ]J_v{ WE3Dl2- Cд0u=c=wcYj!@o{\'nD@2 UZ93*.ABw`w78c-~ H21yBUu#ȯݞO{lT&dJsN}1ȳJ{Qoy1sUvGū Ir4yk9.QD>9Dz) S}9si oM{wH'$ 7Sܗ%E_(`g%Y ?S29dቹD&cA6 g]]d[U*nrVe|Gw[iazvl$]=̘JHOK~Tg[TPEZ\oր'D*VdLp vR/鮞7ǙAX)`cZ`gZCkLBi,|NHCQѻ5IL $xN35G` ~-ӗ0ձ%mˣJ a-'I_0fp_RKǂfbvZi:EPAi˪17%4lMB}fU4]ҍ[]3ص06-慂bX|:.%*j\L U<-a=T)) xً$,Ё7h#{`6:.E_5 ʎ dEjw KnNѹyngGzizF3zJeqS d3lՃܖnYUzQ+)_ʑ@U_VW> AoZEFkh5_X%ՖQ*p!ˇCӊFEL-V\%cj"4 <όoIv->iN e D禨F*]<6mV@R[V)$4󰊊*La5 r.&,yaOvFch_]7]/qmks.~&elp {j9$(й%}"tb%@W#g`eaIp <ޏHH$U@Kt|$3B1L50#pfP5}&އk }& vhMX{pDڎpw\Gx*IOihD];Fb Dnrth $ɧ 3FEtˡR5A(-]ځ!?0)˕ۚ(p2ϋb-'SNEd#ݷ^啃& \ȸJ1 $lrLl9 HY,d |mcn $K{iIMҋ !.}] AM/{x{tHWY0 ) y9d .CAZi c~sQQ9љ Inusx!z}\@̄5*7L>~f˼hG;spƉnX-u/ģqBZdШB"tz*hsYQ< d`d=arN֧2f0|E*/"7$~e9 Ur54-oOrYJ%F$tiAjEI 䰿\1`ſ OLCnW*)XoߪA@,cut_廩 EO|>tȿۋ F0o1EZ`ʶ dL|'#)>ttuq]ʱKqym9{i@CSwY)! 62a`_&>׻#1WM/o~vJ-/:ԋa]*g#-U8HAG6Hj~ HԦ&-Ve')5:ugɭbƼ'y4qo>d$Meu ;E205 ?%Zj港v6&N//^=2b~kD @XLqB0˷'kܭ`@Ty9_od/eD|utVk6N5zd64~Yf%d֜uqsrtњ;'vz[sf^gÙUcʢ Iz _elW$߳PҁIS"sRW +I7 J#m5D}@x쟪0wD[k= ?}e.Pۃ[8+Bh WDBRaMcB:٦C,y$}_Ϳ0^L ^ (d?PCQLF}nŶEZygDv?~U'|~r~LdYc 'E-8~<5ѡ1CsB^ ]։i-- PY9u<]le* SHD,OҲdѝPGkGӗ.8o>BAk5bR6DSt%_R*ߖ~$w0ݝޱ 3Y;2Sn=]zsN=.gxΖUIUXH,QQxm~OB]lhqGt9Ev^EV޺q|gtEH"7FjbTo 7OIvW@!,?:qtQZ2":1e ذdׁܭQ:Ltd} SjVS,>Z*gMs' r\c^|8ecZn{S`9E cdXU7_xL P,:G> F'E̱,I>=OOu#[ S /4wdDF?z=939z{Dg7ȫiD6 =զ2% nd';C<( B v8 ^%c1-X/ L)#3n/[.þȜ.Nk~q͖52eb>N;UN9=[eX۞OhA|^()ݰ[̡`jP=Ètp1E~r _a vew#/QgRaJބ>/4$nܠ+qtN0ATإ;ZFOa_{e {u[$MۿOn%x9|ʡ u !)F[ h$N|iʇ:IdkGOxĽ5(֕2A$-2m4m9&NyJZHlN\~6^æ?3g@)wzh?&s>9ڵw.pkh(xjmMi g0Re6eKag:?<7oT|#v3d_q-W4俯)fY7+nđERU|R;5}z =uv!.}t d;0b 8D%?7@U,=-3UWOݔ}mfJ,ІO(sE UH#^:R?0"7ncNƧzD_Tߟ1 zRf8ܝsL5l~ /9ZqHK_ZfةԪ_|0=\{QŜ'Jk:l$.Tpƃ˓ qV dP%Ȓsȩ'9ٚ7Ll:wQ`pk\)Fyˢv2oͣJ~"{!0fND*8c_Q03OG?b&xQhQ;'4mЃ( aqr 3I]ݍEfK:S>_ԡʗմlb6;IIz~W-rBg6S jnEtD-,M-k ̞jyrUU&sHh2tC۹vX)DijO@bFΊ4Щ\(=3H?MlР*mPa| M5t^OGmwB?]&D%}OukR[z3+`)ݳbroDwl&*z+!IM-2h*⻘?HL-0ەz޼j/z M'?fd1&KN\(i֩CI9$ͻl5)Bl͸/E `6|",^i&=c5Wl'-s2]U}n]0؆BB/(J>Uv%t~eFv@m1j?HHHv-1tu63J`4EA'^G rhF2+Ap|8Dj* #0 ,13ti`( D0j QQ&)q#j4=W@wAPwZϏ.aսgѽ-ld_SiMqr>U]$䘿\ۥ-v[߱{5q[Z!Oԁ&GvYi_ר6X%V{pu}"m UzH.5):>P,%АF|B%WjQ-%3$65xUɾc,;ΗiQ!mQƗ>Wß(ۗqpA 3s!Q0d pp켬V&w; %nŎ 0ǝ,ܠr-#j¦`upbXbߩ(w20I.Q1LJڙwߐ9t5^W<~PWa9ri dDZVͬKۨʽ8P8&gw7˱};pL3X29몌j({?EF0zFhAKBaw8OFKi`f\kdL[QJċ`6v0bO0UByKl Y4o aWuNAsڟ.3X辦$.1zm6%yY~Zq^Z)\T4Oc!f6d$ lkMRs.ً-6M0!i@"PoyaJ~?ԼLr9ʪn*gFON9@4 }KK\rk`2yxWq4,y\:Ȥ(yY.ТƉm 3jo xgZȋj~/0ðzl>wG j0MkP2dggʏMsbRGL9vq|&jD:\E $y)_ѪNy? qۉ @\''Y {˷= {s|ᄁTQ+c/@J3p,y}ͧ^ *N׫ǽ#Vpw r}K,0sBe3~Da#wހ MZSkSsX DiQ36h\Sُ.m }d8Poz33Y@x <"xO'InSigk& 1ئI{ P|O'-$OVQzf.l`QeI]B=xzEm btUI0@WH8y IһXcj{^MJ)6ۻ{NDәYQVq>oH d̫l ' pn4` Ciŏfv ru(]/{J.㑧j}ڼ46~`F XxLͽr/:xs* #suv1|s*JU'{(wdf3* ?a]Jȇ_7dp4m6Qץj#tnZ {Ώkړ:4h!1cVv4 ϤR;վ2:.rԜs'q+nTzm alEé( T "(PvgEAs9*ж- b^IpId]ROYXA5T_\c. Rs*nW9RBC`b/B?C YEOqV7v%NCgM%p&0FvWcp엁RL]{W_dn)&/|ܙC^hj7&d Nƶf0( = YwaiLϚIw_Dn2H(K%>f ̌?ޣ M`[f"P#@8|ho`i =mE5g2'dd~ VB:_ yKVyV X&yEݥ]#w;i)V.a7@Њp[,g%J%Zz<x(u'o(YٞPwe:_Hж\S]HoDr6L]1: =#$M\O'Tn)̆B+?$bALntY.@!R4qs˟%S\Lw7;;`_(Oc t'fk6vCYMokО 6x42ɽx t0ӭqKa^*!3IfU)?<7>sy7}@{e 6sCgOԍEK0k8Ix & >\?Kz)鲳}|#i#Vxw@B᪾I7jpKO*iC+Kk>~!QK0 cC\ L ;bflqC7elAæw~Լ7x KɓV?was$K}R)2SLW vH'Tdx7sA:d]ȻM\b UFUJơlfIHe N9V |˘aJ^?\늣5h m*"*^@3Jbgh^sO sv1ɚ^uOcu rA`i[f>xmI`ꢼ%£+YApe4, K̸HAhM $AmKM[iAy hCd˯e 2gGםxM mMwRSZ̐4oւ|jNk,q"&Y:PdDw7򂚸ݼ}x9Y)_o7QF"Fn{Xt>ҍɭ,DgWR1Nhi i*=(5h{mlFnm~^%=d `d\z$pEywZvW81*5{rGF`[LSVs5oW* g/(>/s3BDWlt|1`|_Q>U_Z/z=6f>2䀽)i/KwֺY61!P, ;dSVbS1yй1o*}85Z&Z-fEQ@s3 l"Z_DV"FgU#^I'|0T.{{Q=:6>8%<ȔmvvMxgtו<4G\$_6uƮyN!rf+=)X7A?|S&0* Y$ry[0U zk}  ld9phY~ Dd0V ѠZ}?|d&s}q->@cάQdۮca^Ǒ`P>s-ɽ%}k$4Oe`^[Y C\îԑ(W7&%@ 3 SG98apf1j[-"Xw †/hHIF"bjN+@mf/> stream JFIF``C       C  xZ !V"17u2AT#6QU$3Bv%458aGqsFCERde&Sbcr ?*o^JAap+ Ñң͗"j8*DLDY-m{ջWMfLm.5zFFamR/TBxgi^KpcH~Y\,522UN:(uF%IK"TZt&5z'QXz v\F%x8NA4}t332 'f[maBDf|n<8spKmn79;~I,y//Յ9s_]ȓcmٍOuoϒm? e[%jIo2{@l1,ךN<$9±nDy[9f|BѪ_ʅ*%UTDO+Slyɷ)IjYtm޴0A\橹]}*{IJV\aWjFˍ6Я"z5&”aO9XŃ05M2e8I!=ŊJFB2q׌*]$V;dV^B%lʚFе]7-!?1vkBdX*ӈf f:y6͵\uYi^5+ER!Ń/y)ɍz7 Z?zKf*^'lU3j ,f, 1d:U)BhԴPPa먷Dt&6NI5(]:f\:ĖUsƺLؒKK4y%:٥[6T 2 &;ȉ3&=vP \h:h^&Tn-xE 67R͐QV*4by05èYi <+`fMv[~MO˨NE4XjaPn;mTtt #.1]}ri BH'4Imԑ" @s\tma+pH.34<.|GN9kJZ' [@qXN}rzTףH(omnl;i-'NZ,9'>y7,Y"Lvaas68?آZoT{o٨EcȟXMNpbnZIey8,گ2ŵ$9EAxģOrwcvt̗ޖSG@ܘ$"  Or55(r|Y \ WmKI6l\5AA: íƔgcU*YZUm6P߈Mv;rۖ"Z,1yZRW7\+M6.EwgÀr_MRkGMě|4ՆIL6 ^ùx@?}lJg'r'R)GGi8^E"UX(eٓaRdd钊’ҷ*@sΕcic0yRi*Cn \,Hκdq7(ˆiإZfz]8,=s:˂ mKJҝwiڍKiZh6WqPVA:IY&G-9-}Qىsm X"!V%%rwK(i,lZ^R J$V#0+Tz+qeGT̤Ďl88ښI$ qA?Sr^) ?UUY0ꞷs:=5+3}wNHZKT'aTǠPfס`>+bpe*non̅qڒ}:-n.|Ēa"7~Kn4ΥkQ$FEBwʋJȱ1^T3KlY*Ie֔ۯKՃVF%ىsm X"!V%%rwK(i,lZ^R J$t 5[Tƶq^v_6o'*,Iv^Lʿ]kWW+.[ŌXl[Iě-+Gk32EfWآ~Z2m)g X\0RZm'kV*׈a6gؽ\fmxԥ)ܘh0}ݩ'k.&Z  Yc-lRqb~ ͲXqgqۺn{ }+$q8"0w`Μ[PˌMv U:)hR%nVBHsaa:^GfyS^ ?D;))8#sA:K<}kck?ܱd1نŇpbyiCaS]fO"}a7+9 i&m5?;#PkSj{ԒX>RF9ݎQm2_'zZ=MOa\CYuG 1\d3*xp (ДmkbUjزr֭c<˳o2pR\RT DӮ6Lm9Udp;.8hg'+^('VR԰X A+II5KLrcm=O*Q%hv߿Mz@jFò"ﱯe6dĜ4i$$߻PZzHgiZb$لvhC +~ ף^ 7oICYaL]-3!ߓfq#?-d><*j[Ϛu+IhDDEaZ5Eˆsv"LZ[M#=Un r/?/| MjvX0ܨheVՒTZDzj4\~+̴Ihn6j- h'R{-2d`9VXx1c7P4qZoF.ëJ,׮u }8Fz]a8 V`EK1žo}[;FrZm>X6Z'X1֦]cuRmo)i3Y"I$<8r$T̪9%Up.n> ;hBM.2d{RR i߬v.~B4Vhr;)V1JiЗ7$QU,=g͘ldX"%l[Q IR[ f$A5wgUIXrI2Rp)q;[a ON\"S8YS8q _u#SU:nOv>X6Z'X1֦]cuRmo)i3Y"I$=XWa|9+XN#E?o[[J5 ${u2.OSX <9cBq8֌.,~7SƯyi3۩hz IĘm75\OaET),=fl4MjuSNnq͆I0a#(N$} *jv7Ҏ8HR]-tNPH|.Ĺ\ʜ?8$\SЍ)dsٙo=Z'hUDJ"˭ N4r]㆒ֲjmW^%ϓmUֽ4 켂[jڲJTC-@ioc/# p BL7ÒnfR=8}z4Z5\G1*eiOqAŁ";rL%K\8Be7ÒnfR%vjCu0݄τ'$X@f˨[%+Qh܍UȖPk;"STerC`u*GZ-k=\=6q&^.'qB]r%CqThZtON y~aJWjyfɚ̓rPHk įP9doYTT"I(.35 r/?/| OoI{g]=6UKbm+52V7mפ20V1l\2&ǵ;]sK.Q ࢷk:MDz6I?8]M!?H$%a#([RJ; IqfEf\G"$%Q-YnJФJJ# 2$됗l#-܎ۆo%K#m*7Dj/@6%řqDciDme+B)*#ԌeqXqy 9)fd-DqB4 +ڕ}gW6=Vc/a|#*JǪe;/pP 9bSj/II{eV[> t 9bSj/II{eV[> t J2RBj9mB\ZS4A(e+ñ=\Z r` Yq讚SkSN-ZZ48t)&F_qy/S3x}O̞m[y)M S l: \Y}[+ò?\g00jcUQ[s%sͻxksDUle)2RY3=)MKu8}z}m6C* BIl>7qm "ڭN2~XoѷV86ƹdZ>l4kmHtiX] i:+&ΑRJK]t+S#L7@8ґx3eX0^A5`lMQFuV2~􎎖T\5,5Kq‘30il LJD&5ĔHe^6+7P)qJaj~,Bz2[-tI۸:FHZE[Xϛ8b2RàU7u OP`Ö1?S L-OEOFKe 4{wBөHvtglGyإ;c䓞yD5#eGCXt{TW)V`a#`&ss_f5U1X<۷4E[&+fR[c%+o>ҔXykTGSivn24$jC|G&Оq+R-9mT'٥q un-slkICi6 3OjV64F+0P=iL,u!,ĺIuI B2?A$̃t)c;&Z٨V+dTg]ejC.G/HeLRTح)1c F ltNc\IMOA?O$FwtC 2fRE[ô崦$CJNq4.rL5tuSc3Xa䩷uSp'-ْm8B:6= .\Í[2i8q,4| .1JRdHp$iqev*Zy;xiHg.Zb55ڦ a c;$ R\iTgZ=4]w] _*.i]L\C7HnC۩|_Et0೵%n$<85ۗWn_@ۗWn_@ۗWn_@ۗWn_@bHOML+s,;0M55"kIaQ5Ȉ.t ϥ660.mg$d,ohس5'C>oWn_@Qܿx4GrmHsO}/ 1>Qܿx4GrmHSíFUe?2SQ1(Tn%4RMczRKVSx4GrmHsO}/ 1>Qܿx4GrmHsO}/ 1>Qܿx4GrmHsO}/ 1>Qܿx4GrmHsO}/ OTN[aDĢz!SWw]ӊoKm5eI-Z3MmHsO}/ 1>Qܿx4GrmHsO}/ 1>Qܿx4GrmHsO}/ 1>Qܿx4GrmHsO}/ &s6~d-ZeS3 C\Juķ\ܤ6Ɖj"$֝鑕!QT!&Ͷq ^q%fDG3IktY[GJjM_qdZcS%9mM^QvSN)J-:HQ$i`,mQgaILU@y3v+D*w9m\gC30Y8jU^J"V۩r'К_[O^Y5³-,vC\%]9f}5 ijN`1=L.U$N~jȾt .F%{ aNEUs)TZ+|s >ʐ(m Y 3XI8qNA-i y2v;p Hޒ[O 5XjReeH6dfA)e$FzY5³-,vC\%]9f}mmHsO}/ 1>Qܿx4GrmHsO}/ &s6~d-ZeS3 C\Juķ\ܤ6Ɖj"$֝%scf28mHsO}/ 5>%>=\+2{wOOx4GrmHsO}/ 51\p^.|[ijoyH#qZGiz$9>ۗWn_@ۗWn_@L;K&^%ϓ/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>L;K&^%ϓ/ɀs y~9ÿy`^~_0r$ym"KeqǦjDwRhq J4&Z P bP'@D\;[i r->\ii[N%DiQ@nÿy`^~_0r/?/|9xw𗟗>L;K&^%ϓ/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>L;K&^%ϓ/ɀ˖Q%Cl)[+.=5{R#CHZUI2R{pf}}T( ʶL;K&^%ϓ/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>L;K&^%ϓ/ɀs y~9ÿy`^~_0r$ym"KeqǦjDwRhq J4&Z P/q]NUVϯ[>!VǙ8ȈIz/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>L;K&^%ϓ/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>LacjljaU6,F|Cr=hm;)-T-LҀs\u8 UXW3>Yl[f:w""z)&Z;K&^%ϓ/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>L;K&^%ϓ/ɀs y~9ÿy`^~_0%z}Tر wu\DR3J5%LlW#+.E5Q㴓[8%(BSJ3Јb/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>L;K&^%ϓ/ɀs y~9ÿy`^~_0r/?/|9xw𗟗>L;K&Iql +k`\Zi]-vɸ_ukuԣ5(:(0T`< ak8غҽ.K٦,~2O6[[#=z1T`<8>6rqud.c8FNvI]%32o.qkqcdUo/mhO˭ףlYb q+ܪ%wtgؑh'E-zw e>؃\bYF 1!R嵶3 G4IeӢ7G0/0=j{Du-Xuk5kθ;akP,#=ذ42Tp&2cBpͳV-}6eflA*:m ,hE'RJ/M?ˊƘ&x*/F<]g)uP#ձgO^kIҀs\Y?[Kz$f`<"ԕO6؜'- F!Kn1o䖘-[h>mܬԋO XF jF&r1f-[$K>ђ4LIvVb15grdJmRRT.IIN&ða(,Xfy\KڧF߯zttdѴvbR"Rg;ל &TZX c88r 6Ɩܴb R_4HiHCԥK.|+y2k 9-ג[5 `4C+PRu "F]n\f%&IJRD~= (21.q]:?nm ru?d:GO&9Ǎ0?gox5Anf#u 4&ޅi l[,p ݔ8ʚBdٸ4ԵH kWb%(w nvv|jnK,[OBJICNRV7EEg@TZGHSA%NGSs~ߡ$ruFUV gB-R3q;jI褙jGuƘK3 2jYc.=w'4F qt"$ˣPku08ҁ;g#qm4H׆ɤfi"]@+rJ.L sśmZLȒt]Z ]f%lI ־MI2>OSݨu0u!+U/[`y1n?Ihf\_: 2qE>OcllSJ?ݷQ ̃W86^1:r 1ⱮOA&rHxAH""3 l C0^]Od~s͓j:/\,KM"'VJ-:td#/E^*gK cYJPW!t'/p0G'6nї#e*'jj]%4zl"OΎz8盵 : $%R]D_qcA螥LjuSe~8z >2 S-2AΛ(qT8eSe0ZP^oӣAkغeIz=\RU(1ly!MWhIidupDK2]-EqFda ]/ y)#Xŵ{RJKJHӪƻ~+pfZGĘ"a' |s #Y=v>a*J'lAZ:]. a[Ol^>Fxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vi]vqJWDmuI褑3Ӏ<'#<1:I}8 N{_N3Ӏ<'#<1:I}8 N{_N3Ӏ<'#<1:I}8 N{_N3Ӏ<'#<1:I}8 N{_N3ӀyrYc,=6r4qiFf֚OjJ/dg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯN˟ )NXzYFyc}շQT]$g'_i=ݯyOwkFxbup}vg'_i=ݯyOwkFxbup}vg'_i=ݯyOwk'q~ǘV5٘fh-&Cy+RKH\P^g *<=D$4';ڴ+\f+x!% ]Uj^ KVnC-o]e ە˪) iٹmFT=~- mDm"6:"' %MDJ"WN`6#5b J{+Xp23x)`XL˦ˬ$!mryF ?)iR NRgL9-_>x2!旵FmZ OE$?HeYKSb"E{#d2AlizOVc[ymLإ|NK)%6Q)oqq]>9.rg}\!,Wt2oEe^[$QA+OeEwPajk&ӹ6'vo;] PPxNv rCҭlc-mNf:QF1[ m22Vgʿ闟OUJ\kتDQqTRY#-ƣO_*,8K#Ŏ;"C$6h-Zԭ )If~ڙY>hwO[۸Z"Eflt:DNr'K$D lp/Y3\ puPeH޴_ۇu5Ys d<5z*qS鋇*b"K͢;N%cDTON4؛x_<.\uvP )#fi|Zrwd^I&%&ˆm7tVne3-et?p i<$0N>XAY`ǎz !JIܙ>AHng`:Jd3ɞ IWU :~ž_ֽyR)i"7*#R/8 3s.r-=e!r$I-E&%LJf?w/c_`,{zŷjQVm3)+C=dde` ؚrwX|⺣$2O%@-2𴸘c ><]4hҎk\y^Ws6D[tA˼͚ ʃ3f3d1)!!$QI Gxzzܿ0.cgg8_tY4 3K1ƤgD-tPs𐠠CW<8f?ˍK-eΡZۣ"QoGrf o60m'T`Kk^}}TQ*1ִ+jI== g[bR"u1ZZ!}32I'iUn:,}AD\;rJ&44 t)kڽ32FL>0z+8Dz7$֪SiOM7uhl!1N1ʪ|>IҸGO ,Q' ~ UwGSΙNq6[m6129>rZ ~ V꺦U^ 2Bf;ɐ{"l6R}Ե3D[ bs$7p~W #߷Wg=b%D;mAѶz$yD[7teuahyqLBi-\Z”7jROMzqxel+baKb-+2Hy5i_h+!p^cHcThi7CdF]/ά-jg5mExVLurDH Iuk=V4[_ĭDYLŽYM&k4QHӡ ά-^+Y|m܅ɸ{]gMxŦg%7X⹸QR\B"$i7OUzwv8(Ԩ)|;SPrT#bj-)K_Ftt2)9C}9Vw)SY~dygڶ~B|řeѷ>GX>)\)ri6RGD0I#5-p,i|(qH ]i y*g_ҒIjsQ¶8o RKJǓcdKTf֏5PyZdN_sq _RV)XR䡴8ެp:W$: ~ {CeI-KDKHBnCfҍQWF`>RY:)uM:A2{NA7m>ދKfjCa|&G yQ\' &82do/G]sqK?t0s BŜp0lKmq!ɩ7I*otjROMz]`,s7-Jz#VpcTw8-"lZ܃SK 04(V.v%:aN!~Sj[m%nWA,ie-ŒR_y gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v (q^/ a#NȐ|$6h-ZԦȒjf`3sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀sOM؝4Y>͉݀㄰ 4SzQc3\-INM Q8g{ :[jVzU7&{U?Ekqϓ!#f{e;4.Ak\57eG}Q)Y4o=_5Dd`2H5)>i:8Y[/CC%$"0} g`bSGmML:\..qQfFf8{5-V3SԷ+a1ۤm2&9f9"Q-Ԥձv7rjjgMhl3>Žߗm%2j"R$N/@^`KTnBǟlURUVq ˌ)"Tt@L܍j+üz2bS#IIqH5dAb^3k[tQmlb"pӕkvz`Rt7|_Ŀg͗6,g@Fqau_d9m KU Tg~eV-QO ^!ֶӅ`v7nTk}ui J2⇪m۪]Rkٗ+ԥJY&;&Ĕ [ojjWAq[[UWef!ʪ9Ck!؈-/Cڒ01KLEy1C2 (ukI$?x9-*㫽EbZx:MkkQChZ7mC."U,cGj߆3eRQz ImdЯеjNPb;h4Ms,IZr3ߌѾFѨ'IFu#Tc?:+؂Z׽vșoU)꡹&Jҝ47҄Fvxu܄ 'Wϯ߭G[6\RqV 9Y^W4Hx~*DxN&䬣& Ljh48 [_M3eiKq^!;V -mL~,a2^Ę량冤3]-dt;45/v"{FqeCKJhRҺ{{"7+5q&AqWZ%k<%Y9,G[ʼnaOphm&hf\ؼXhdѶK-եJpa|<ͫUw$ VO3D \x::]ssY%;jNݩ1嵴]*]}t6l̈́6UJ9IAsD+טWѦ+ƺ"M' [1fuՕj+4 VwVFE~)F(,v&KCDp.9 Khݴi T,bk~͖mIE )&N嵓BBթ:a̗&&DAa WKgY")e&N +Kݴ׌{j+SN:{+ruZK *QqJ['MQfc,B u2_6g3&.7osetu$JI*H \Jf7 !XULYnzqd+p8{ՅT, =\W+ڣBROE$?HXq- aFq#ӗ|-\5l8mRVɝ4n̘LJ*\V {t㹢v6fZ+v Iw26믰ü()1:d(z 2؄0IS . )uc,iQloY%%u㸖q//vi30ag&Avׅ]1B Biֽ`O]Q8S.;7^VM6a7nx~uY \+mȳʍn4ӛ\i-#i8'VHPbgiUb ܈#4N̔G-!ipj4aٓx(4HgBQ(Pi|񯌼O_#|o_-x~p$ljWc^|RKMkbd-+uݢRjSh="O]׉<&r+۶\.4--pDN2.3VQW◠E}3(4ڗ(JB8{KpȌN^͕%L Wb]G&1$+V¯$8Y)ٿ.kkQFEPƒ"#\4/{!ڨ2[&_ĪvnR3VN2-7 Mm$mF-f@=9wjŬRTV);YqdńBMG:=wy'wTDn&%CQAXwBMHݴ)3-RZ&|8óf-&%b[`ڦ%q8j\ZMN={_!0=SOTx-մKYF˫yOq*)Փj2܄ԙ'_!*56>"%GבGufHhkj6m;Pf&di/2icb$$kGj6"A"‹#%&\JĴT*Ŷ5!PkQMǨ.|(2"ޚ-,kjେ"F6q48N ѤK@U`e dƃevVoy5utF]=UNx2~_!*56>"%GבGufHhkj6m;Pf&d~jy2zL.G!ur]SϓeT۫qFͽC;Vb$=q1* 7OĚü5jFiIj5~eL,Jk#+TlX6rT险jxj3Qj > *@ŏL秉:J'>PvF)~Y, |jդZ|D%o3A.irap"5HӖifI[RH'si&m[D$D{Ww|NHz6bT5 Kol5xk4$ԍLғ2%y&xK$5MF|r24ZC'!n:R)K3YZY;溙K+J3mheI~L8(e&jWݳg[eӹr)"PK*]_5;?!Y̲T Y]E:L&q !ڋd[L:z(2"|XQUM} p`*msHmScO[aOGD;LA_7*3Hy[[sS$w+MoV=<[*lU81y ;}hS*[4 RI1Df>_}ܞIiKf-d λ2+LCMYfZ]'TVZCqjS̥+3R3޽Ci/2icb$$kGj6"A"s SSKXoۍuocB,hmdЋjt WgE7k19*) DA8ۚ)d[L-E/vWS$YJa1Jyg!rm~{giCj#$tu߻ i9ٳ)F;K6"JڒGDy;I63j܂$" bu~)D*+CZIb|!&Sڝ~hIWx pmk14RQPa[VYYYȌ%+D`2v)UW+unyۺCK$hGG*/ f"[v(HR[cfAFnF=CɆ&r+?O'xUI<5[ŵ ؔ'pZ(7ڒ2]!6w®}xqm ԩ[i83(H$O0 ֮ Vsx2⸇^Ok5$Qs/pܿʴL)M1GZN-I Cԉ##sC# ʃSWgv ^XD6] 9Te6j"RFf^ND|+n\,wQm|H4my[YhZoN?ĞV'%÷xfCmqC%MYOH ;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr(;Y*>҉sr( X̸̊"DIK4[n6ܕIԔF@2O~LTW)V`a#`*Jnb&+iqO2hj PbN&uп^5n̽ 58s0M0 0q~H{'k\|FW:>sJwauh(]ZY4_n;fᑨJtF$̸̊"DIK4[n6ܕIԔF@5Rq rTr똺Ri5KaǴ65: foasL͜DZ|7VBIK{A@GLAk"V51pHm&ʜSu[IH YrÊo"ڥ62Z#%?OI_)Iy]U-h'Ql#mIdV4_'z#SMu8=_B";RP[oW.ZPv&CDd9!A(d5tjf, YmdWm+%d5Y6(֍2J.^x O.g \-tHbRp]䴡kMɲrC uDQ$j?\Ҫ%EvҐH\$?4DkA֏;OHm#uSb;oǏGPGBzSO]z.nrXq] Kr[nIY|'ITtKI2qVVAl2SDR#kKhliA6ӚĵV׃,<5khZz%&gzPU7u i;/ۘɀ*bLH䴲Ӳ %SmI%n?S+bE,032\M\e$mÀh3*Ḥ_q}5LKƋp뜱~׿Ua2zZ^+Wׂ_">ڤ'Q;FN-/fac_dPYZkEʞtn?۴ush5'yrH_$^j4=L zťv"#:zU0e 3Sr[Џ?)V`a#`6-cUD)oʌb[@u6 jAbnq Q(JIDİظ%T0AzA}IirT#/eW[VysatW`On= 7sʣ%nV%0gcFKޮ!@2ƠiQe3(kqŬkZj7 FԹRO>ʧ_%{snUjiItfkU.QS]z 8jUǔ۰&dk4 ݧF" |U[8~KݛfzI+6j/i B$X+%mKIn=uzrYGz,7':hhKs .4fɷ$dIJ $2%ʁ4jb0! Ƨ5Wؙ cPkJm4&vxqq3b6̈+1YD8m~O%h4fRLsmax;UP"C%08$3i o4VZu1ˎ.EKM\meIjBЖF+RJ+0o~@%qߡ+]O3cf28SíFUO~LTiY dwMʋ!y%-S];TZA1QUPۀք2%IJP%%ǔҔ% 2I}{*. mĨr0 fiRfZ 5UIqf>_u"NHS:BR5IKOl +0bŔ[KIn5m%D&q.tϊf^R R ʔ5+%jrM{s7^{@mZ^Q5j5=Cm}}tr*ڶ5G(6㴶8& M6?`RtRRd \IO)RdO26%- *k ГY#MSK@7uf)9mcmg&KM4iJIZzL֖4UE[ZTْC}OHIKˬ+*SϼԮ5Ϝ{wiyFfT֎G +-M6S%@sR#.b8Æ;7@EUŦࣅ#r"G (qO#GGFJ~&1gZeIKgo? }m6". J(TϜuN:L*4pu&-S΋%AXW%%$qB}8ĥFJu$D"ÊHVu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`gSh6'v>Vu6bw`)bD+1"2#-GI jPBJREendstream endobj 741 0 obj << /Filter /FlateDecode /Length 3082 >> stream xڵZmȗE@^ nڗI%JQv$W,֖p8| lq/]Ԧ_\,2EX2ڕZ6Ǖ]Yafv._~ >_n>b[~E~=|ܶ_W|e^t`kJ\dbS/.,\d Y0.wL~]0e,K՞{qYnR._+:ZGX'C'Ӱq02*+͗>[vJj]nyZWCw('~nX]xGwNF6Eu͸ؙ**CtG]3|:x_,֮2m rkB#"GkYi۬e]?H%_V2;mP1{#wލkp?YZt i΁nO9XEHjӈk8L;-:(zk|fԥlAl%hIrJ^WI-9q;sA tY3=lVw?>w" ذ7B<4[##\SE ̹Λ`*\X^O@rn4=@Wv&eȜqAV@bٛ gZf<[#zyeq//R,2O:C4ܸ!B OIbOTReQ7H -C3u^yȾ%Q9_šnͳ#ňx9!B?/B^; ['wd/'Î101aX8??v(%90q!Wa3Qt˞G R܃4L6DcbD!甮:70'ΣG [sLi܅g+f¢ @YEL+&}2h 97I&J ʗ#{S V&s ; !]"SxVum$4;Zp)>c+aŸ29+Y7̐OT2daVeKRJr[9T+Vdaٵʌ=w&G#i Yµ5Ygv:3uQUnp;h7wsIoӰbYtm⫴If@^ `ꦄ$arP0 lO)I7 T+li5櫐q)rӎMVruU5p!cedH &d"R> ElosaH5ޕ/QQ)_i׏i}&>f܉o Ηefΰ 9:U67-Ov) h,x0ޚY:5KY^NA5w2j+k9\ӃbvԡdA{>͡D<~<RB6p{-K"5CI{*| ^`5W`RQt",6ReAo34"@ #Z5_ '`tc4yeٌ5J#:^yZ3: ,z4zE$/$'*LԐ1&~_ N^I\ΟI%}5w֖=a<#s5 i+b^Lf O5^ؐ'kqGscGLz:+ULO 6ROjjB\\ȩ9q/Ol<1.Rbt.$7o< h9K+51>)$0pJɕK'](py梎*$sc,B藚C@Ͱc1P&.nxWhR 1y'z'):Iz3x+B/)jqˤ |/?w {>Be/9.iUuB l= (Alz)Jb F@xG1:8umU0HCܻœf/KUr洘?UYA[rqn:dؓÇ^l>pqؤϳkcjr.5LD `S{9w1úN%ʰ]SN6Ѷ G x;y%5[A\wb|y`!G5v^$??Sɦ T|x[`Um˵7×̠#[}nn娃'dMfP-G>ދqme|%> /ExtGState << >> /Font << /F1 637 0 R /F2 638 0 R /F3 639 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 6174 /Filter /FlateDecode >> stream x\M&qbE@v#|P@ 2!-vﻲߗ 33]3׷_Gj׬֮c7׿_~WWz}Jٯ=W/o2N|ӯv?\і?jcׯz~_^ˋ8Nrw?.;Z}L#&Q=zXSGh͂9Ⰷ9-K|8q|܏Vq c8qܦb˘0pA{/2}H}Ղރ^](x7RP>F{/75￸/`-HuQzj$aq CJXuVҿȽ>r-5G~ٛ/7]雯?jv??+R>`tO?~tU7go~ۯl]7?^x}W~ޑ)F 3\/.U Lnz>C^J@PuI>ǝCL婆MCJceN) p6h?l %ն SĖ}ϸKޒZIy̛1xk\$.X|K%+8w<0Q䉥{KTV zzuj<͏Z=.]˷0:p;>t>=и ii,0k|[\i[v?`fR x*1mByMx>MǪq{ԮlEh[o_kߩy OT|kh Ӳ|eF\vx_ɞW8?Lm: q8xSoXA\T&$mQO:[ OB / [ک^Hl]!@l@k]oLle^5|X Ba}k1Վq=|e9 _q~ QR9_6 1{?/#?s?'|f;7wO2?—>~(,O>̼UawWg%1eہoHZG#OY> %zFA~e%T{L_`և2Q}> 8oEP`~B}E]PS[c=Y^֗8x>ߌmv [>//]oml2 :SZtßdc ώQZ> ЮvCixUL{45<?iH?b_vkRP'Ħ/_xQ!.k_Bq[&xO/^4N~C[[}[~~{~V?Xk{^{5Ƈ'__o?gGG~7#OI}~ПR__?'_1GM}~ҟS_ߩg7K"89zs`bUBo?׷lUnk3.ںzwQ&䌪vV/8̑xҭcm;w^ou* Z\T)'V!=e23O e)i/ W:bH7 %ǩ+ݿ[s$R3u9eJwKʯ׳s"?uz*ɭIɜ?;T&2V"3qW'3qz(St~X:'P>mhӲʘTngbBMcV-^iљHqkhotuf1?3ݟoSJ[~Ryo7'/\'>nHu|kѶS9 Lw !8UvGJ`m~Tu4Opo'T7e?֡ԩEZvK^]Oԧ"-:k/4oEZ.u*Y>qzxU][Wb8CG}xY_xЯsy_hn?&[緔{0\"<bFUY(;=ݜn _?ȄuBh|<]ȆuֆY~ #O16 +7xw3Bf4|Cf񏸬4> dIwfs75׾282woAl6}T q)[p'b8v )zb8q uT_o+m}`AcĜq\Ͼ׾շ|VQ~[}gyο>V?{;/??O__o?ς_߂^/C~? }q6o7g5IN}S_x}x?;nmYAYoH蝭SֽWS}h0Sxmg?m^_lwݧKpRjΔfH.|`Sɰf$FO~ZE*[i&^ݬVq16zG`(~a)&^ j `7/馞TοwZ%Uc[/FW_,ع)- bRYLӶ*ɥVRLj]nm}jR"ȭK)7IˍFleX>ʄ'R_Ios+[t7)&:RYʬiME+f@Uq=e26OnBVo8̷~h(J;m4G^ɢbE|g^6)?|C^fUA>o#C^޼_QOK_>"&.1b~YK91S:Pߵ~]^q>Ưu t<^/qlO%=_th֏^brE=V_ep= |g~iKzCdt?8_"s+2~d3+ UgB}q}Kd{ק<ݪ~1C?8ucKW<9ԭRlk[Go43=2r!% [[}Gq_<ǘϘȏȟȯȿX~\ ?!_??"韈+?#?;?$K'BJ>R菩o?k#7JN>SP?P_>5_*HE bTFAb@ͥVC*x=4!b0l~[AxFlBx P`$qkTt^Ty~ v#=my)Ҟ񙇕Jg:c{ˁDyaKfgyj@j `঱`T1 F9m''ۇ&~ ^[{}ALX?;|kl _Xֽu >!,(M/GǫlelPΏ>YxY_=;$?7wtUh̿#&?[cȼs$͝q/{Kݿq lM_Acz;ϯv}5lmjo1]rVUqGEV Woz0}hyl}Ezf Ge|1F㩾#6iؾ[YCro-υ~ܟܿ>~?_?ܟܿO?$/ck'B:sK}~S蟩Go'9KO}~?`}xI: >]܊?tVi(|/q/ϗ|`CcI4V%aW1M{Aᖞ۫^ᮦSϭTZ~K,570TUZVJh {]@=J|_ AԦB;JR.=VJߤ5i֤[bZtiVxTkEo~x,ov\t:>8iVv51ȴä}jh/ul| .mtfĢI=5zxĶ Kc+j[nR.0b{+ 껬46k)@U2}8ċqz f˷}[|M?({~`'oI͙!_YaOׯv\_He9_$Q7[?uૺ&T@?6%$~~`]Lؾ[?Hi_E?(a;R{T;-V$_6Cv?G/0G8 ۩/lC߰mcց~~k֟mY>ۗ[ƭy>/q<9[[9_}WWI>mg_pZ*G[[gn}}t_iX·r삷?ʷV<_g˷ 7?%]P=r?q;?w>/o?آ~#8Eb˿Oj'ק$W[D7{ٴ/_oD<~Jl+BMwʎwtL+th/qO\)/cgkosw{X?b_>~ܟܿ>~?_?ܟܿ??_> p5m/ǡoC_>t=}oB߽gnz߷oHendstream endobj 743 0 obj << /Filter /FlateDecode /Length 1719 >> stream xڵn6_!*V%Q*RlK=t@FVJ΍k^/~`Dŋ<(TY (UI&/.2=6Sр`ַ+V1fyş_Wqt*/4[mxL1JgKHZD6%Il$*| \kal`2oWa%b8Q/HZ"``嗌4`0r3~Q-8Wq7# D[̡<<͞;&&Zٌ}\ 0˾ct뎸x! 8~[X=v#lQRw(ݎqwyT-qGݷa3a τp?Gi)cG4fߊ.`u=sfdBM{!o5kSyС{"D!cN\_6cl*׊jaGO۲I0<߄$N| e}uqJRܖcw"l֓Mex4> #5{/0. wCp8)Y;Y뵀D y$cʑ8TKҞ ;G)gK0ž,m$7Os"EMVL>x;2~pvBfbT/F.v":&VqQҠl_XLߓ?5&q z'*Nq+<$<= D(Ha7$&h ”A9VK_1E3 G@ Xy: V;(bg}(_|L< oNhK7}l~kS 4RQb{zZf7 XvV4ty [$)`ڬT#feNF7#)dR.)zNHS$C)K. \԰fCZf k)zΥ2.' 7bN&f"K`dg,w=sr}Hٱè{'1:dF9`;F7hP$1~J]X:]\^b56Yxt>[ bTA ^DyG–XIœȰ`A%xxye){P1$]zϜэd$$hVL W D-UsˠS[rNpԥ?3Is½) ;YdnS~4'=0:'96>|hfG1*0mbN@wBtsλ7% Lg8+] ?W/ф 0nd$oʣI>4Yc&~ڸ6OÐRMsѰ> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 745 0 obj << /Filter /FlateDecode /Length 2344 >> stream xr_GBw;UCJK"\@קj9{z(9{uw\u>i]&YT&F?ߝr\x?s]?O8:8Y pƌ# 7eIf4«2\q k'Vؤ,x'65psDG'~4Km+w@y }46EBYM\jT_ZGD8v.ٌ7#;[Z$Iu[{q-˄rMP0r&@"> H6-%ELiHe-pl=S^Yiu[P*<4KQ ˁ/u@KwFiZh ]ay첒&2{'`8); GwT^Pe>SiLhѽJ>ۣ 2BG)` Ĵ m %RBbyc1XQ;%Ъ͟\cDv@;F$Gĥrj(u0aq-ab&=}iLӣxCzW=z2ƴ=&BkVW;rG|z~OhͬzBwu#]'e4{(M׿ynK^%0IøPBb*kK휍E(ޑ]a_TO~A(bdҮE=fε?4dho\\(sxbʤµ4u>4uWD+}3yemWWu/vOt ΄: r>)RJsȆL겂ͷDޠcἴ1gxcz` kR: h) }Y*Bs54p8l(NДbѩ+ T?i7G>v] Àw5Β5=O;Oư`;a:dQјT/4Od],<*m7+g]v+ĕqo d6E y>|=Zisԫ R?Fq1OeΩ6 " Vsh#ItDҠu\U p%Roywu#=-bC\u>"_{ )|E'M+wJټmbd#[y3~}>=  xendstream endobj 746 0 obj << /BBox [ 0 0 432 432 ] /FormType 1 /PTEX.FileName (C:/Users/hatz/AppData/Local/Temp/Rtmp8l9X64/Rbuild3fcc65a6/eRm/inst/doc/eRm-007.pdf) /PTEX.InfoDict 651 0 R /PTEX.PageNumber 1 /Resources << /ColorSpace << /sRGB 654 0 R >> /ExtGState << >> /Font << /F2 652 0 R /F3 653 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 6516 /Filter /FlateDecode >> stream x\K&qϯ䁟@WяvލA$Sdr-q{WVfV+rw8:p95@"+z[}ӿt;qroҙqKGׯny<}:7zJoNS=J㺽}/߯{==uOv7OaK yOÌ[&?ƒ[^†qoD%{Ќ^Tc x>ŠV6qX8j{ƒ maaGEW=~kag=Ƀ=JZ,{l+똏U{pz`.% ׸7o㞭6q> ^-?棈݇5^qy<_߁6=^~cݞW FJ>&qz|~j޹Kx܋= H%e2kyp0R.޿by|͖r/onj+6lf|ƗNt?y\K[?vnedJY7o?7{ baDbÜxk<ٞg{O_O[Gݰx5cEk"j'nilRZlyYkk:GV g՗쟭U[3#ReͶl_vچbh=y/oFxw}{:}o>}aVox~Ï߽3~w߽޿zŷIqC76).cL9).rN5).V'E4wrsa=&EŎzIq}"礸؞Sw:).6s'ck/ͻ}̈DIq#U`~-).HJR\pzMqM[i(S(.(R9v~n)mvk9Fqe Aq΅n9FPaQ\[Iq)#k];ŵHYDqEq4VQEqSܡ▃)(Qf^/kc8-mTvAq2R>Q\ilnhkQ\Ĺ.3E!y9ޤhgh?SR\O Ƣ/"EEIq1O\kHq1-6m<شSAq1b~z,˔Yv, P$ H2A1B|#@1Bb #(* PP@QiXbg&@`x`/P<(TOPL"²@a[pE Ӗ{|mhq~%oHV=A@s"~-P[@@ѷc}"brݲorEc\TEc(bYWZy)Au"vEqأ 8(bS9(f{]"6%"z+?(kcE{MExcE{-D| Ţ<;@iS P_bI6] jly?t6Pa" xE@цiOb-E/N*-@nʤd'dR^[8Qz\ H\LAFufA,Wg)q۹j\($" 8%j~?C>a8'JIts)ܰ\U<+ƹXUfqė3:vl?vdDJP;e/NfP;e TEl#C*tQr'(9;kj%gkEJJ0֦- EًEŏU FWŠ@?[,X`5E\)^/jX*j-)p`kunx+좬R9C ?튁%Zݜz@<` Y܏@;88^M^|#7`\0Q\1\d-ί(eyp6+Lge/2Ă]͍ O+E7U.tQV"}%&+Q-LU(*( L`9W(5m \uv@yf]rlڃZxʳ1"j_ZUim 4l,y稶ɏ=;m[?d.~M[8S إ޼_mcOvǮŏ]M޻cm[ؓK?vͷ,~ŏY?rl\;8?FLE~Pŏ_{2]?F7ƏWj]&"?FrckcGycOŏ1ӹ1n۰=\۰_g۰ԝ#/?NY?aď?FAqbغΏO*8/~\O?}P?.ǂɇK({P#΋#fD~!~|!&?F5-~_cc?4aɏ&~ qdǘ̯ȏ1m9>ǘuc{I^H{q|ŏ%tqg t'`Jĝ9:Uݱu3Nmwǃ;3xS=vf$3.S;`Jy L0cB`J I^9d `8 5lxl uxLiLPs`Z`Z`fr0)tu\7ļci&h 0S!`b#01 ;`y-4dN /Ĵdذi AA LX@eC՗6`LMkLۀ`;sh>0! 0I mY.Zl&(s 0L<kfz $^mIgOzZT%jVYm}jߗՆ1d%K Zm n!@ 'Zmn9`4iyMgS26uYm7x[mp?8h% މnZRQ 1dPRW|b`8šWVL)ycEGiGK%yc'68f}?vlV~M+V[:T jmf٬{i]GTӺ3d1ltefa뗪8V xez]ck6QuoMkMg nfwmZVUVTk]=5 \Zj&fj.C9Z{t4[fp5A&ntњ:haCc=-4{@ h釹֢;u*^gM35DYpUaF|Q-yoq8p;7fm:Y/.~FEgȮ?(m^!Zy}#alc8X\Xձ l&4AIm-LcHHz.ttdhe!ŧ_eQT-, Y' ]XUCEB\tbؐ~^y/2,j{^*4<PَrݠGW'JϝzFg*L-z=A#YI=UHd¶UyX:l"fo, חA[+n36Լ,x(KQUhd6/ړb|YjI2bN巯RxMi#ץ,R4[US.'W\1$CPŎ))k߄9EHǡ9FBG|4JDH{hR0cRiWRD_~9!ISHv "R鯟XJ̗h}lT<$h'En@k*"EVA|$Okk*EB\ӼVKC&5e⚪q(o⚾X$!c;&5U ߰l};GpQJlQPJhÌ ٨{M`ôݒ.JfplIuTkT3TR+jlij]]C4DR`vd#Aj+0e쵃YNf9㧪Kvfpf-F P4 A9<$[ɪ73/Tfny6I1fۧ] l}z؆fdn.m.m%PXBKs,s=-soK] JV],UQgDuUj7O olWRZ4EKUT3O_Tg6~/ qK6)EA%ĉK.`wg8 Rwp_rt+Xut3]%eK.`w#% UKxqg/ V4X/a_R/a` V_c~%ayf@ P5xK)/_K؍l4/,K)K5d.]zKi-'R|M._ Y}u0yYRW SWZzq:J]#C)ؐ$[uCh'aѥd&!^RJC٬Yw#EuX:$tʂ&R+zHB TRM`? Tn$(4SyGAdRk:G7(X|L_5xT:?8pB8p;Ε03܎qCf^RT3N>STN>OTGؚχM5r.SCzC gg,33Xc~.?.>0Yn)yx5PIKT&(5}T/~u͵j 2Y%JR}uj\6mؚ"/ⳝUAgn_kl UT깘l]V9kX|VAIb`q;>JJE`Tљ}[(zb('FcqW|+Z\/ GrQI(zoq=g 6/D\예+םbY&ڟ]pMZu JM ͡"yu"^{D_=ǯFFu/D+B?2y,>D/d1e+q.nVIzyY,yY#icʪ-6 /6kslv|,>͢.6;fy XljfCp.6c1Y/2E ۧW6Lj29fXC)>A ! Bs* U//W_}_^GB}*kU%U/X_zb_hi+EV!JC[$'5U mumDVCmQ}[IhOܱ|[ޣ~grGcE-&uCFȃ{,ěkGzTNl]!'3[m#aO9 u5m!c(' D@ ̴F]Z KI. )4S]O tPh8tc9/uPxd Qح JP xcdžxPέ41yPʖC!SرUBQ\- x*SYmOgendstream endobj 747 0 obj << /Filter /FlateDecode /Length 1104 >> stream xڍWn6}WQ*F$%JZZwS@}Au 6/Bl3gg$.H7EP(AyRg1JHOIF-- (Z"6QfjW.=S iJmAZ$zΓ@R+2e*Y|Y:G O)g7& ~kCw*Î=)U\eΕ0%G֮`4&}ETx|mO-'Ҭiމ٘=Z$*BΜdٟ޶pp#hhdطye[$ Gb]ՌjnDfYE'8Uy  "i r66~8[ol\pGRys ^;#HoI8Վp4Jt0y+%Xv{([3Rj#d GJfDvm}BlR6zT ܅6 E䟣8SR JGĆbL"F X:⣲B8T\rв bhr  N:)G0}pm=z,FlމVceC~I\>2Ή)t-V+# nnY^[P&}NGx ^Oi|B-W78?җUQ _ $ǧM< ϼ('̳fNM:ؽ)L5%xҿ~qF_{ <󭏷I^naSnfz`> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 749 0 obj << /BBox [ 0 0 432 432 ] /FormType 1 /PTEX.FileName (C:/Users/hatz/AppData/Local/Temp/Rtmp8l9X64/Rbuild3fcc65a6/eRm/inst/doc/eRm-009.pdf) /PTEX.InfoDict 658 0 R /PTEX.PageNumber 1 /Resources << /ColorSpace << /sRGB 662 0 R >> /ExtGState << >> /Font << /F1 659 0 R /F2 660 0 R /F3 661 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 1037 /Filter /FlateDecode >> stream xXKoFWQ:x3&iMzrpa)]+YJA>̓&n~ϫ?7wzzNGCc;`UgO$[;+[.֐|U%e/ WP ]+;2f8I{@_/>moiݽ8x/WkMg +9,YQ| eIRʺfdf;+rv DQBEn W8҉H9?+l #n8. GكGR*|BGʦVvz2ES C*уE'[ł"vۇryUIisƔʡz7U2`0)+(tQ*n8 [TJ G-G囻TrD"67;eQqM(x.4Kc9X gY]~Hܯl쓄!5KUNJK/I"u+HtHByT;4dc$B;9I|Wxs3'}%m:FvW)qQrߍcG۹}/i+3bR*3SU>^V QW#;٭ȣ͊y9vNK ĵSjl46 !qNh2Zi/ u{+QV:Pj73!:T697w-\0|]߼.WGXNtTIf8`"4Orl4v/OI`lC% TC9zmÓAFBܫ(5:  p=Q9,w- #GmraitI.gn&g 3a;gOZn9+Ҡ$.o6[Vv#MAՋw=g߃=wyYMV[Ym%xYڻsendstream endobj 750 0 obj << /Filter /FlateDecode /Length 1593 >> stream xڕX[o6~=C$J:.C d Xڊն\i%JV,<Ms3)w,qQ'vmE/h.3' Yjw00*o2Q&\xfE$iHm%SxJ3C7=<9aVʒdRxWvT 7Pݴp~N +[MLݖx5ݞ{8SY s籪h>*p=m,`$Ϳq%I:X@B,<52aoGia?G( 8ng|Nņ]ܖE>߹47.qUny"WcVr; C;B‘x2tZʱآFmȈ(7 5{ˤ:I J~ pm }8d h\0hx})9:eɌ:iCw"i[PP *!X^DOQ)5OIqp&t*m27i2⥢{LP BOy^YR;º-^Tl0%z>ZxCiQD#d)\ߐ+1Fž`V` AO{_#lt:Elѹtq9zvt󌏌(Y޽t5QѠވb<A{We_`F%w_Fg$q#'C6Acm2,A:f'|×;JjF$^Y6~衇+USdb~b[XG}U;JQvup eOeKn yrS{e>.RFTB>tQ0jap0BJ"o]H|jet3ǀbJs J3r9"eǻNeܮW%mgqS{_K6ϠNendstream endobj 751 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 752 0 obj << /Filter /FlateDecode /Length 1210 >> stream xY_o6ϧУĪx()X tE3tX"3[VFV`9[#*-Ӆ>ȤNǻ9F^E/?.^\^f7[zY%qf/ ܿÇR?XyYh9<,dH) ."Bׇ6i&&|^Wr`9%2H b&mOB+C%NIgg.%9eI{>iy\ QUM0bގkm\ f7a,bʏ#6Ec0F {u8kr. A2ْJ)%( CZnJSWUi5sH_u@[=i2DQ5ý&X5g!eqdk] &kqZp6&z)s<SF Ak7$*raiQY@`2PYh%;2_MH Ňop[,!,VJeP|%wp҂ߩphVOr:|}H;'d++b5}nznAIVqt[4Ĺ6Ă'ZR^-C>o=UsAϵpc9ioweB1{+HA0=PaAjāƗ8p\`Qiˎ wthOq PFo;Rs?WVȴ7V/ks3/L縼P>㌝PWmL6`]Q\0YxCU0a4yyׇZ;2\"}kWArn${V~Ewf՝'(QB5x(ny%N WslC•{`4Ui#mZ،ogva]cendstream endobj 753 0 obj << /Filter /FlateDecode /Length 4491 >> stream xڽ;ks6+Qq ArrWTU.ݪ$8l"^L믟 HL6wEF'dVw+O7W*U[ʹ(U]Enk׬k럮7޻_nn/pǯL~3_?|դ7eQy -B\!@SJ7/:gכy /}R(Bڌ`oqU ӄڢd3m5ovl[y7$v\Ѷ0Qpji | JAj#62,@i䃄8,&v f0'NFf)&,/E.>S%/k:h<}$Kj U2gɾ)Yf.,%;d% eVX8K^DUnTE.m"+ /L< 咏tJHKgHpE|<R Â\Yu nxݣFӮn=Rw@BnSݴ 0. oJpf'BLɢf3jWυ6H0JHϏ/{7WTCd&{)ؿg4eR`j4RI1,DRg~{ L"_sQmy}Xj}s1QRv?dә,s-z#ÒuoOJ:E"z'w8ђpdN48[! -q {~?c{&hn;* ?P٫IT9>&TbNI>c!"MLisfj  O" /x8,#mjBS C$ߝ2M<ԕ;\/ Bjΐ>#h4y:r k ia*<غt@"z8si:c@ZwODL^ /9&do0F/f#ȩlqhzIb.FEQ-K̞.QJGw! Vs#8>B )VAYsf J<M0WQ;Q=6tUѡUokXH- ?Κ$o$nڎ{ѩy䐁QD0$ߗ=d;RYN㎭.a &QarItDϢ NkD_U>> =L ǁe_Ѭ.X(ᄒZ}O0Ĥ ]><1a6̎{E)1C>Fm֩MuUt󶱛T %[.I2-D"H@vXxʥVDBY??3,OeEն 5Iη*'oyp ˴Ai,1k dCq.2zS+Ah[yI"sf8J6 遧 oĪ. c >(@)Fi)=w2 >sc#: yiRE~Y$ F-[yM̓8QI&ppYw<0Ycv"xBp KXM2%t.R^qSHQu}%pҼ;2`XEKaLR,! eo&,gv qjzKvw"@ kXzQ:qk cBtT폌B͒gD8.B|Ҏe76dm/>fB3bSZ:Ʌ#<;t1*4#`(q=I* _s7P4 cgȓ97lٹ /òꅋdCW* |jtu mCLY zhGۡۗZ=s]M=&Ƨ)\ nACWG2]ూZ9?<<{0m}Pz0f+r,ɄezYMfiY֘X}aDIҫ/oyeI5uQ <"p/]mOWhV;#\x(i4tUج]7pi!AUweBAss&-m~~3ڲ _>YvaDGb<^% jSc2L{l2flʁZ^ӫ~~ XJzoekj`p`Z`j7kdz+\6$̖LJ.}n!DQz/ODt]bN-@Z-S/o~ +&_ls잻CipۿK{r[|Z2eNs+'U U̷z35UdEB4q~-9VgfvxjiFtt&Y2S-CR8/?7賦I}[9 ibO1,#"}1E$6T'UʹBxj_B7X0>endstream endobj 754 0 obj << /Filter /FlateDecode /Length 5472 >> stream x\YGr~#&LvUW_^GԱܠZjvXއ4FC;:pp]U]GVVGM9[/f}7]嬭ۢjܖWjLȴMTz6׷Wf>^-ol7WxU7a=r?Ui:yV/J]:uuf\ѵ{jAr~:f[c?#m\tA tGEŏR[= /7q3) .4Q 04W 7/gCD4fTUWtMdTҪ5EVj07pШ3aܲh^27_iggآh,ay7̰[hU7H >Wo毩 \Bx-_B}_7<&v㛫m{z]6* yCwFZ|kAҕ)VAXΰ Kx\ M]4Gy!-J/ vƴ79n]aY??$`z9R17O]ժQJ>P7pA]YS n, ʓ<|OqU])8|}a r}تE9CJ/ rzhf:fCS1A#_8A*WnGxӊ9^IihU9߀{/נ #_9%m6ZJx4t1 u_TMɪ67pgr3 %j[Ͽ#G ]moq6KnLEH[ݜ)o*LvKun_v [;Y^4PTtp骶A15tfEIG."=%xEAG < +t=ttkqdxpzNiJq2{a;^9mXU)MQsX]r3l'Cq]? ߠAӶ[u2?G+;OߟwnljԘ %hi5 7iOI!;(Zs Rj<ծ< >&tWkomB2čm ђtd.?DG%J#QA;Ѕ3k&ڧ<k`V$c h:X1Y@q\a ڀV{Lv=*[wj md]T$~VǜZ^Ueo?m 'J [KDaNҁC Bac/0!f%g/03B e|ܱZ~>B{HW>Z` qɉWiA r(vCAf2# 6N`OXWѸI~4\GNiÃ:M>ڢhu6Ε=P߲,aw*_~YSx5/gl(1y*eM!S”끦]rj h`39*Ƒn4ZyzsDHm zgQG "4kc! pZ㊦? =xAj'+0Qi< đ`>ዀmS- j %:";SmY`% 5EI x#uM)!&a*bIxV*;\ЀHug?}L۵:6–}rr fb=׮k1"y)5Kឰ2lg,d(Fs|UͿTŐzp)J7=M#^jDJwĉ Q:MgSp$~.xω:Ҡs֙ECiЩČ'MYq gʖ=e[-[}I2Q8Ab%SpD"vY'm7f0Q(j&I @E8̍0C8٧e5, =FQ3ɨ6l{(b̭@S-3Tht=Cv~!FcBv Xʖ@&bi2D_S瑆@*iR؃^05 _5VJ:]:@&ƟK9U@ٗ'0/~bUw#pUQ0VG}i+cg8D /7ӽ1mmrM){q ¹<r&3uU-H޵O/ `m$g($9$X&' J嗜']HJ`^khf jTQ@ U ٰt 򺦪ߟrP$j&ऺ P%1܇ځ)z:H ?^pI)۷-bt{ٽO:IL"qnI^"[86uX$1ӪJcelDQqlzH ɷqeB~m<3/'3Rx?,HNQ.Cf[>gf?d@$JgP9dvG2҃FsjV/lVBp2f ryjI*?] 'e(+U{hf|y3[5<2/g+SnH-e0Agn݋]j4"X ģˬU4s9Ї.fnpE݀@ɐ9)EuNJ_օ)ļoż|&>p`Jc|" %Ӑ~}MIzY,vs'V_{vjpy# :$\6 q`(@apZ颥~1ɤEKyO1xUTleBǂnօkJ*Z/ sp$He{Ml^In&plbd%F&${ٚ3dFG/C9垲n6 56zy0GL/!X{?欳*zQ=r: KYN<&hH5\wX% I&[8 7/NԲlIxe+R_'a14څ5iR$ s&VӫW2cIF)qوEWl/'}<|&3dzWFIؔ$Gm=ׇJ Μm,5Yj+5[ @/{~;9-]DdYāy8 k~E ^#Zʇ8c2QWϕsl'|Xo>E)tQnkś5$j$!R&y %=حw!wX[Ƨ=/.Hʔ=(T6]ߎ /P#RD|#}zq8*QiӴJ ]I֢yAS+f4migd~l[ w\#Q{aUk+rsVjן z}}dLj`YvzӜ}цDN*g`s5 434;\/jvϧț +@=NOy릙 o1Ox.D\Mnp~Inlv&9_r9zauܜD>luc!1||AY_Lb|Vz\=/(%砮s, {ٕZ]rmaϒԐ6 Ʉ?FD5695:@݄'}3:eDž6Ru=<|=tBbbbPW6,aWendstream endobj 755 0 obj << /Filter /FlateDecode /Length 3356 >> stream xڽZYs~`%P3k$ߎrIl%q -+kбU1G7_ ݌чW'^QWG#"+Jgѿo )"=8ZEp]o5p7>Gxn_/KgF%^%2ёQztM\,@S`kŤ98ReX \8>M.V,7Z\+ѕ~ORkz~wY%)"k{A9=PUy4#ͺc}ox~<}j*g~9@PQU&Ƨб6-ŋaU\zL%4q報kBzc!h W^:j(d,\= oiG&;`PBf6fo. )o1TpƆx1#mF:qIWy\[,9Qߡo;}}_P1)x.j&~& 7Oa Aϵ^ }OUU@3e5jC戀n+B 8lF~R[Tx*t.ek,mZE;" M9 cL5߀S aIꬊKrfr)r=Y}[aHLYnPp-Z4ԾFba,ciDZCAV8ߣKh+|<_Hࣀ'`':y]ЂW{ &<ѕY\E2DQًD^~ [b =4b+pS3t|0[>{>diId7هUةw# ٨ғ%渥Ջ T1P]~H!1l>`DMgR{P e>48B:KT ;Jl[oݰ&餻ւ~""74  nu)C}k ^D[E6GFipr]i663޿%"e `cY (ȧD%n1bkK!!fqKiZ˩_rQA :*1!iݷUSb8ڹvdA(/(Ӆu AlXx$xwձk)tA]LcnZf%aYgP"O!&:PYucy4$PomEYDR,mae#l7< ?lf+=n8F]~?\,y0)S dF3vv;6֠0"IjP#VF8:YYSŦ*ߦPY.-_/Xu^^H:`'U9lW RL%8:!M9TK,ݓkqĩ>W9i+F.QJFpLmÕ7jO`-󒲣֕KYp)iT9rT@1Im^N1]Dh;07{DdX\HȤgrZ6k0;ġK$wI;pY)/;f}mX?jT>.?48Bt& ~lq Orָ!M"Y,\ UW]e4Ҭ!/3JK`̀"6-kǴ?n]qn4.WN)S+ MRsm={^ (T>8+KK=5q> clrZb0 mTfLk& ='*v3KF?a쳼oO ,+lib`k)g!at\fYbȩa#S@xCKjijPxGt'1 ۬~(rt &q3J{-W Raֻeuo!ۦLEr3A{M]F[endstream endobj 756 0 obj << /Filter /FlateDecode /Length 3258 >> stream xZIsWĠW3b[d*e IY#󐿞5628sIׯmwKzG/^]0q]򤗺4΍]z#xUH>FWѸ?0DsYT :G\̐E"k6:izc,e.e+%i1>C~ƲŤ+6۸,4m_3˭<Y2U4u 6u1yFR:Y'b/ּ۸l':M*JiWZVAM劭=/F2-*besnr`OR|Ȫ糉^\$#VdjVtp֩)qt8" P&e熵!J8Ӷ Ķݎv{x|i9aNNXZ'dcm4o Db G풤' aAgDOY:k9my lT$`.xI LEAлqDDa #6$["d)Y)1Y /)#v'tu]ig-t -G Yw{n2sg$LRPljӂ~R({ToCODTUƱ_I.Qe!G uH2.)e`G,.%+pBqcQf`%?c'%e_V꬐XKe3Zʚ Bd謪\=45=NJoA93/%.7&xC\ĭ!ҝy\L=Xe*vT/I~e!bc`!2hQi4pK5akGـ,.p(Vj7d_JESM2Xs:H^Qm SѮmB~v@"Q-b iKz~V˒ᲊ7&"1KzoUM*s*5pm_X`"\L}uЍqx΍Հ|5WWQ0u VK xTQCT.|toř|^8o1`{0퐑+e. eR4ևUA Dzu$Aq&7q74-(ŀ0]7/&$vTX dpU۠ǖᴱ6~o󫾡fsWpVj*G?!AorF5Ҥ21 ߉dSӪL3KLst ~ɅddCzR gFZɣ"hЯ+ó׳V\y]]Ŝt ]TҺQ1*2e"cUN1K )>!CjRaXiyvFLJGl|$RqxٕB dx%%CR J>Pk&>c*24GJ)/]` %UKx'uT5pWUjM&}_I6#k!bfM%z OȿU nim!Tޢ#JԦ 6(]XrGBrrdU.%i؀8;ܣl>̞5\>3ZE>b( \̠YQ=!ַ]ؿWc+'G{AU~ՎO:w- rXe"|e*Ll u;R#Mꎦ|; ' `X@K`N#դ]E~Tq}Ul"Tp:nc{6p@T=6z?Jp;J|er%:|2]x­/+xNzڷ>kq u e>zC^;"NʄvT5Y!S2uAb;3_b=uJ`c.Щ'-PdfX^ίu''4l*_dUi o'd"kHƑ!RQn gu|xo/`(}<V+lt.`YVz3i 8Z@Ȕdvg28BW[8ֵK&.tu XW73y:# b>˙$`D_rIf>Z_T!s/="}􋯯^ mSNendstream endobj 757 0 obj << /Filter /FlateDecode /Length 3826 >> stream xڵZs6e! ssujg=N-ѲjITAJ:;Ӄ-F8Z٫w7|LQJG7w# 6# f1e|5Qٸh&x2Tk5_^Y b{5[=(oY&|z1g Ѱ4Wi*ң*F8 ?$&SX}O'k:44H2G!jcؔ8=wOجeIBZ2JMtOq= ?Ѣ} 5=X OmxH\fpl6> g'di<<OIq6Kc-J),`JSbMo'DXK&Q90|A;46҃F".mG3T9? D?m_?ZDq.KO">_IGS9FUv -":`'Y6͈wx53r8E8۾/(=}u#yOCOF=_DsDCE:#pW(ڟI,!FM( 1]Ga$3kGV}H4ʟ2k?Bv'~mLHjTdZg tl$x| Z-8u慈EOC2dd ~M k^L-އUz˲FUۚyOKl4VZŪMv] G Y8fI$<~lhO 'IJV !YƯ7>rfĠsIИH-b6``xZEAD]8^*8&/:(Yaz *V^*,Ey;]ܭuJys,WP" 77,OvKO0trƊ D^ Xmw.(d+nz@`/8e0DL+|5aw^\ ݔд:.$^L@[j 1JgpIz@VKt؍(FmMi8ԇ˒)v#cAƀϐ)7>!M']u8dia%F˧$dWN;T8: tj^ 9w:LN˜TX!ckbzF^sۂΡ@)|@wۂ.NtH{ޒx ~z&0Kyف 7ydM1(} 4Ru7ı;Lz1~b )ESSV>3đ4QĠ8qNS; 8h<$0)g˅ e=ڤ, "yGOI +i5'9jN? D=d\d3DI@`]/ v.w8okiJLYzJ ȇ5pe#Msf(?<^$,- %Y+ܥ╶U]Aw5% x3xWs'F._=W*<ἰd:q.x*yU`R'P"zNȜsV20ZOC?j:GDo5.Ѥ;R͜1!fvLw`ܪ%y*!GlE'ձ#%wة_kU=u{;b|$|S?Ĕ5R sJJfPI y6NK@ h2q80!.c!FriP-v+I\Y/eVS̴jMz4Ʃ3q-Ӌb9[>bI (nRHqВC?*BM{>N]IÕ]mpQY8&u [F\7:!&ީF3,-Z֮/'()x7%} O؟.vu0?@`Sss4oeF-a]-D !++jKɗK` ڽVGz;W8=b@y[,ߓq-Y~wƃpiS2tfIw<_rΦYwZn=J3#WXgb2侖d~Q 5@ϥȭ!Wd{Jڅ̕K[GS.L@^îܹv;io#> ;=8RJ6/+P%Š{[BL 0GKdIX3 w_WG.x & 9k<_^п藆ŁDڂ.oC˩!rGTߕ;}.P6"pn ѹb B7mΟ@MsAtm]pQ'^8EAVJ9EW.lKؗRW;tV 28B֢[ݒ<0Rs7e809@GMΎe4AT|:k[E۱zL:r:r4tYͫ1endstream endobj 758 0 obj << /Filter /FlateDecode /Length 2526 >> stream xڕY[SJ~ϯp\'. C@QIؖ!۷%[&hufx՛WϲA$ARdWdgn 4Q0 u45?&ФסOb6ylޚ'ɕN,SzRbՑq^SYʳ\az82ITˢ`OD'>8N#MOںMX+ ,ލv*w.H6F+L3Ny$3 (a[^nH& h5F:),J0xAEi4Q3_w؞pKu*ΉB|c Fd 4ʽ`/8,n)m57W&*o/jnxe)}(Q_N{gc4u$@hƫ$o0I{f5/8;.O#O•p.k{7I|lN?o{-uhT$ϊ Gȕܙ ~|P|'DAQt wcM T7xH@#+\hWyu4ȑ?%.&/W GCՎ.v\dCfS+|&S(I?Ś2EO'k؆1zޑQy}3 9"sflyC'` .^2Z?:Ʒ<'syD?OAEg.$;X:GX~ :#b_ N\DV39f.*sQ#O'!vF`UX옧~â7,F$acȤ@`O1eoxu.O+oC0$eo(s6!y9}՜kF^0Oxx+O>SyvÙM e%PM'O>>Sl1?xqM%q@FZGo[a[gyJ1L@Jx [UQMN!2ӉdoM N2jGsf{ p6 &#,whL/LxYVI!+༨z/HgweSġ^ VHAwe<@QWIkA3&u?;վ P'#Ys=)ZFj4}QAkPlׇZI.Vl -b{BLN(!3y9uIL\dP b̼O`|n9)Ȑ\]%jb(JRrG}Yw 1Ic.`r~-sA_÷hEM `u`HCy`ewG|dbq>swDrw2ν=G~y*4ru*qa Pbv  .MUV525TuZ $Yc}-癊>/9a>1eR>Tܳ f[[LȵHHaEô2mݣP4KFΙ /gP>o]vƆN:ǻ`n7Jqm&WV ]"xaMG{"9m_OR- aG`MBFSv+|-bJtC^ tTY\G-eaXML_M p'UF%tw tK~Bɤ`Rv9 U9EH⯘/ `3Qvu^lI=V!?:L NHL \=* lj)0IL*'ʔ.Yy SYaW6͈IŃ2tSj낝61ȷHZ7[rcԽa3eSaІLԣ#"q>xFC]d/Y-N'k&6+NfTH*.N A9* Qvt~Ĝ޼?:Nendstream endobj 759 0 obj << /BBox [ 0 0 595 842 ] /FormType 1 /Matrix [ 0 -1 1 0 0 595 ] /PTEX.FileName (C:/Users/hatz/AppData/Local/Temp/Rtmp8l9X64/Rbuild3fcc65a6/eRm/inst/doc/eRm_object_tree.pdf) /PTEX.InfoDict 711 0 R /PTEX.PageNumber 1 /Resources << /ExtGState << /R7 712 0 R >> /Font << /R10 181 0 R /R12 184 0 R /R8 179 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 10699 /Filter /FlateDecode >> stream x][,q~_G~y $ CGvk)O}E3g䙆%ebXZ9)+?>?Wb'>9)Nx!%GS8AiXOE}m\thkƢQJ}i>=%8z>tg_=>i c4!:RE_Jb}%Zriwj[MA}:({o Ƹ=`0j?;刺P:%O>})@X9CUEp?yY}Eӌ!>ӯ ?>O&BMEkm1)k: [u6mɤb@(nAD=^D8Ps 71UDe!=76 ^n 0)^"Fkץ}zĞzܾ Ou(/_>| FEMsTۓ7&O*&+Q2 Hbivm)&JcQEw>LC},!&˺Q%81тi!J.ï~vCi٬)&ZO?R.m|qAJ-u! #,RezDZ=NJDtB#ڵޣ\XvuX7io@-_49t !h(>Gh+yă\!MyD!a Ʋ`7(Ezk8&"h῾|^矾|o (>B0w(;P<0-t>nK%P(Gk٣\X6`10w Ͱ jh2~&W29T4b&bX7Vi#d[=vs15P:BEz8`i!zFhJ/kQq,Gs2nOR^{ tXFʦPh -l#WeA͆G)XC$6>F+/k[v|(`]-R6;(j AYA۶qS k2X7JXR8I3ghwȧyl@'ц1+nc+`.u~h ;}1)~-ޔZHyJ%Z M'UX v4o=`-bBaF@RGuNJ6_ϿnVKl_KS_f9 SG9'!tRDޣla~ bX 1ZnFi qe!|õeo7 K4X0Eb4VR)t2#D .d7uP\u>+$"Nv(;=QJxmTaPБ ;/ 6ZM(`jbX *.ɌXJGN]9ݖX\ժ8ϪP`rMmrX=Tz[@ [P܂%5 tL`i&1 ao4lT'Yӡ)c(4U9_a#+v^0CJwֽ!UX" 㟌{B7}^,,BC0f`. [~_c D~5o,F@˙%d+ZՃ|ؽu0:a(L(/!gx!E,[`3K{h,sl&\&5ga u/wtٿ; G-|͙$]>Ց|zuq ؄"Tlm6Jͭ:&$[+QJmTxދxuu>g$P8ޙ<Sz{NVs*άZ p iƯc=Hq[w]9A9ec@m#W :&PNOJ(o|:-f乳JmT \uaYػ&iA4\|QR P ¢OBB+۠乱A*6z׃o7]%MLBT||cì4Ui_W|~oOߩvS`K3 C>sSO"9j3n+9>>/UsNNe;rvr~dNm)bh`?{EH1@3q)9d#y!"3.rÆ^ٜ޵+Vmƍy\9u5,0xS1l/ޛXo:T%Ňfչ,G&w6.)GS??j?+\=}vV!Ra*\Pv 0&;-˽`qJh–p`v~o;a\vK`>vuQkAW: hBǘl)[TT~r@W(m|~C8GE/{3\ S=<K_ `//߿@ 6DC֤`.@€"LEA_@SkRӭF(m - {õ1@@1Є6})%I@yPM0ʑLacz?8UEvGk |õw1e6a7u+Б3y0,ݱ pDcsBq*&!\&dCe$`wz08+! T))chK䢁e="@EsFR'Ap8- &lX2=B9_ A# /x춥XrꄸNyS8$Shye2kMj6- ߲-5~r82r­x(_?|Jmhz6>%Tzd)%V7=Zφޱ͏keމ۔ViuViuViuViuViuViuViuViuq[$H.3 nMs~v 6Pp (_(u.!_Fq;/_l;a R{h7]jo擉ۯc j/na{n{l;a,ޡ`0\CfCjo46Jj9 4r(7!:.8Ik{7mFu#7M\pH1qRHBI9Vg5eK{u ІU(}v'=X=:@Qn*u<0k+Vs-YC.CW6ő=p٧[ \6bEQ .Fv `K b ne<DzE?-xvTxp uh4L'@ܑێץxP]NJSX*3{t!:J> [akp-Ocqiי <'[hE0p?qk>xvx,f9tT*.GjwHz3)ޮ"l;@Oگb5fkgl|;xeJ6bXn؆H"hbɻGeù+SE:<P[GqlxRxdTաf)f Wwv(#0 sZ ?2oX`C5A?*A~]@Um5MD QV@Rv$q,@ys]` 5ڈi r>Gq pW"h-|¢, ڷ.ϗsxM;b~=_QVw(]awCQp=`&D+5Ja,תQjGF ;R&3-8KG [X I %!QIY[bqz}2q4" ڵi8fqs[< t8)%, %q=iJ1KB)CfO#PY^, -}PMP2Lgr5"xpS1"]  $)] ~rDt9c;RuGए wv]Ӹt}{Xh2&^lx1AgGAc e{1*b\ϴҞ%K#\-˥h^ᬄAZyn, m ^HG]?B~ݒǓ3sX7}V.Mo֎ &Ь[8npRcY6E$3oP)FsVqlpB5s!@ڸdzC`rtNcbZ U/q<Ӳ tR4!S+P6J8(jC( 6M=dߺSmH3!! dC_[S~^ג0҇#BG[-6Ą)^8Mpr]¢T1 – N 0JP8p@;H i {5~$OKF D85bEUQV¢>Is44)_i.5sc!iT m\]Ty -miG;qLouYڜgJ-g5 mVZM"#Os(h}0Cyzqe5طFHC1_>x:Q Ͼ$^A @z&,!^WEm_@[htPbkFPUDֹfP);=+s cu`O>ƕ҇Fdmk@{udc-$N8#Miy)Ǎvs2:&jJ[sTEaja$Ȅ H&\cdSMC^E0rݢR~W/`ЬxݎB NPvyLWDc }\҇Fy"kyuc1W 2O,.Nr Vʄakw:M)q1WҰcgŦ\WVaЩ%(AGۦBPrJ SX4f,IH J-UPp(i66i;tg ӝa3Lw0;tg ӝa3Lw0Nӝa3Lw0;AOw0;tgxtw2|JzzXc y&XԠm H )sg!iD@ڸ-A;bF`C xO;)˞s1XxXT[7%Y] 36G}FpVMn}CIÔrMm0'+Ef ZxiaG.!5s!i \cl&ۀ$ƙss.!Z3gDp mA;H+iM=l{xl3q ԸMT\ %59Y\Qs\6OOd .pnr/HjRjXPN\B0)BTK GN VBk*5ƣQj+Z ~T{ucٖpOEt3h$@+B:@Io$W_ιc8RgsFm2u]xgzDPKH yEZ!5H+WvI9DIVbQBqŅ͜l/1E#2!2' yF^B/ǽVߚX\<8ҵR!g;k}˵QB Pxo]/R%o-eڇp@}K^; *ed}Y;֔k}˵Qns4,mw7xMu1P(P.;*cΖ8\8@ q>Na w(W fk}E8M@{?~.bŧ bZii`(kqT~ ّv:rh>~BIx]XSQep#m,mQ ~Fuv`0X)o햲X\;@ШuK퇱\{~p*^FFb#A fږnQQWQ n4); ȿg]$?┏?^<M ^^rfpd7:HBBVCN EE~k~{HvpEgȆ+l`ָ~AvRqGE_X |Pv~/:%!C($١\8dQ nE wɼ(;ogX y+v{tl԰;8 Sa= ǡQXP;+y˵(@6u5|=n]窏mAWbjG3MBlH|;;6x\=Q;UnE:,2%;֗'ޝ&AnXZb>@/ OȫzBHcQBPϮ] @B^7[;3to9vWQ 07~n] $4kvIц˸TҢn N4{O4{_afifififififififߞ|9pendstream endobj 760 0 obj << /Filter /FlateDecode /Length 198 >> stream xM 0EY&i:id+Z d'.J[(W~IjA7s sG@ dHR)XnsLV[p;obMD x⥻{CGU m+Ocр mz |: p 5.daޑ}|c@endstream endobj 761 0 obj << /Filter /FlateDecode /Length1 1408 /Length2 5856 /Length3 0 /Length 6811 >> stream xڍw4۾QT-LlԞ5KIHBRF]1ZF{ݪE'$y>|x8DPv0u+!:*F`<I0;H ;?? Ģ=a~ X ӝ9&H Z A>&t45ԔwVE IʊK#>&b8tQMÀX $'|F].{Knn"wB'`&ރak4PMN%EAR$u7AwCMa Ga>2rٻ0N`5= ťP4 H ~uy9PLJ@GfA@1'M0o vfD|R2Ndu84itwςч)mVnpmWSI2h8}pf`8x?5\y5۟{<0 r%'n˓)KK]Չ\5RZ ?&cmT'39V:]owWrMoHY|=/5ǴpX0_'=`ln>W~78P(yREIvk1Ƽa7xb(-:ZaיU.^rd{ld㋾#}+=->"ɫJB~A96FJsm@ $F8ilxNE@OtZ%9nxҶ I–/rsy"IPx7Nsb("6RB7 Ж_F)h\cѤeer,ݤu P[6xOt/@{%>-0UI$A~fwLq'mYDeo5zJO]تgalOЬߵˣ!⬯+Jҽ˅:N^1&ޏtKm=f⿿gzc> y,6|l2JʡHv,xK^WV})3,f/Uw6|S3o<gq9!M4,yϋVF۟:9{>f0U(T8j{Cdvh\XƲԮoo>jg55V&yY~~FG2#R aW[{xǐ١`J>n5黼JCWV⏱Ɛk5I1]^LC!%7)~޼zSccV셫ZV$Sb\tJoxBO*g~URlogBd?S†7KO5$rw&ö;,bE0\RtT4fZÊS^K9#S^/džq4,W~cuQfH~puWvN`Z僧MitIq?xsK]Z-'w/ͧG?d\v\*|pݎYF2XZFE6"VkhA)4`RQrYC^2{[VrJI1Oliط0hR6QnCQd?PϴSyaJPg^O, +eOSg@oa4n?u[HZ,z#_(Y-D)poes!/]ŵBZ!I##a.#;B7:1 bWIF|j{bcsbzyn$I! kerL9{<޴&__6ktC0a}e&nzG`$G@a} ႅwŌf~o`Q3K JMr<rc)# \cb񒙕Y1wym=Ko n}[S,X6vئ` .q9PޞCAVȨDbeByk:XSX_2qJ3`mLE_{ENU yLf\͎fIh0Rq'WiS(r h% l$#"ں 1P帚ٕܠNV\3 cc$x<{EqBoWOp2x3.NWVJ&y;:n>-Hq{\Tv gwe$6G-;_SBcUqm U57qkwUD~hmS{NQyb48=zA-'F4ERe+]s>W15=H? !dVk}3;(.Wjg5X TϞe|Y/iídfAA͉و8n%h-CYm%D-?WWxȤ(»fYHwu+#)uа\mpGI\Oi&F*ZiirFB1]WX+겫p\[05~Ps1gN96?I=9IQ BGMe/_q+N|ęT|GRųxҞIR83--%3'qJ80l6C {^nmd[ihQGÀ]wd.:ˁIf F P; zwݡoG-C]Eoa:l_JEn_YL$UAc sm,Aa0Al@)/u;p2_ĉK=-qW(?bZ֝}Ikhd@џvbWT(1溼gzCRJ##r7,gQqSbw~pϫJ4f*ԩX >g6'?s{}PߚH>-a5),m 8-s$l~ED&fh j YY M8s5AqwHɶ" 3;, QR$%M˲בN젽e}67=:~BV@I@QS.uKH^9-{3ҤZT-":ZjOK:ۣ#U?[I \8M\P< fb5bX~wG2w޹YNF$$͖sj}C|9dHo/ 6[ʻ>zTXyv:1A=׆RSn{)IeӅ-3%02oF5raDWWW+5\"˦ 8}._>; Su'IEw2\IaKgpd?_['I!BRi{8wcRś;<`7ǚ9j89:hīzїSM1Y^aM-~#Yo]30Xiw .ĵꍓӥ*|ɹ+a?ʊ\{bCZxptKRj뭇:ٞNEtLyo뤩42s78^쉰telϴ !l)&W*s ӤϝohR05NF}OvZ}{Ljvʕl"~SX}wZ s]y6OܶV8p|4tfٙĕC!dD1X+KEGIKjm'+Dk5[{$KHVˣm)2 %pׇ ukyf9!  K)6ʍ~[5?ea%aLѠ0F=3/%zr=K guH/jt0> stream xڍT TV]S .< !5ᑊKTD!d&d&kʺUY*ZJy" ""Xd=VKX; Asړs&{ҐpwEJ3i >  `(!'HX# WCi`Zl3| SH@0 P8bJ dW@&u3V!b+ȊbH11I)q)pvqa0a 0R_A LFqq>ǤR41d*@VA`FA7`n`Ҙҙ ŘB Z)"zQH"J@Hcd<!r(0@$C3?\BN#ݘl?* %p~~ }ÍG1 t`!1>f @ 7A$Ѭ ym\ _aa@(h!&{ L!M,SHM~8?kRj܍`SFx|}0S_"Deݙ43@pKBB7.Y% `8ba '>r''frbM.쏊1qYOT ep@2\U dR8P CIQb+!NʤbJEIdia8 SZ+lboBGCm4;$v~~_SSGsν/4Qʉt Kil[JUzYUUQ1lT"WvS3$л,pKxڜeeEo f./yNh8>ˋme0JE2K|P{t[,ΓMEN8X+{VU00 #Qw UIý'Go{؜{Br S /YT&&&P3/qܝ}F>.xLaP9ZrW@qKi}JؗucK豊kKsx>{Fݜ,}_aʚqK^td8 y5QCQVE69R;5GYT3ںhtUêC(gKf>6{uAQ_Ќ IY?-Gjc%-#-~^W?R'-[VTRnHi]FE<}gV TkU-|ُZpBB N!N8kZ_/FoY;\TɭIC?Oa/93AW֔g@mo.Kr (5 y37ȝxJ#-|}s}z񣼗;ݶi[Yz8SuU=_N5,n f8){Ãw"Wg7輷P Di oEk;d/ߴn7.3Jm wtY7~)zPϰVZښ`(51_-<_(]_`?y0}[,U*b/noM6Z(!=){c(b__ayrq_=x$ȯ6<NhW 󔈴%wݺlQ,ߏ5mW3Vxu/Z<*KshhI廋rt;bu6c:[c]RSWnK`_Avfn˖vAShtHpf~#8ۏS]G.#v3WL>eua=CVR ٚqN_Jzdz}F<Ώ[> stream xڍeT\-]ݥwww !, w \99}ߏ7zfU)TEf )G7FV&>+ RGHrqvt ,0u*::V.>Vn>  ]@"@H)bmi߿sZ+//7ÿ ksShn r4VnnN|̞LL.B Ok7+*dj8&DJUn. X`gmrp;;A.p~@ oc0Cp+ÿML,v # lÛDUS+_U2LP$]@`潙nla;HV?V`% n,2b+_Jֿ:}R@ "++hm0YZ; ,Sbg!+ sttcF3ih]j11G//#  o)oZx(`w`[ƃ?C Jhl '9yE؂LRvv?6vw/#xUoSmпw[vneu<#/׿֮R^ տgv8ZuY۔u 4_2K/ft0wl\SSoDqr|Y+ ykLn`B __#n `q V` 87Tj8`F`d74u ۺڙZj7{;ځw`$R>fߐ`@-=Q3\&1R;3OD'&L`? S@0.sg*ߏ̎m'v:dv ?V P>'b$iZNV]/ giDs3B*n~G^y> e1W ecw P(]UqcRSl]7ʆNB3m,<ˬd~'sWqМ`[M|؅.P(_Ҏ< l?}KڕJ22nG}^Hq@E ҐӪ81 @)K ̛țdOo.bHJivznS/,y{GL+β'U̶xLvS&,A/&yXDm}`=>=CxV-:I-TdQH/0 IMem@}cmEje6>kyuO2iZx6&<9,# ?Z/tedmD?ZwyPQT f=q\XbrĞsI3fDA˘g{E+b[1-#}30Z׹*Gi5o 翏2S3۹(PdesqLFݔG0bnXbs[r8'k&| 2m ͨCo?y~,=ƧGj(ȆZ Qd]u ֶCV ~EMhfݫ0黒vQ}1׊_FIſ2p25ҸX'\eY,=UDU; D-zFR>KqWRk{ F |KE5V`վ&7*epd&>DBwr3~u3Sy; ?+d~bVG`2%sL-̈́i& {g*4%*+/[8N[3To0r~p*;a nLMlrpyS6 ֬>A7"_ s#JV 0탑!:an#EJ</Bm|l# +&Y˔Dzӑ8*Mqj&, Ux̮g:9j Cw=$ 8HbYYh>J6&^6WҞPWqZޔcӊ楢jI1I${*=%=V\[,S6mBϐ~Nl1YR9U̩q<*DO2l{W[q;Ns;d,x5TI2xϜ>CI\|ֳbv{L }STXo]lۈ~~0x6=_U>lGQSzo(zT:闙-ܩ2aĶH5sb7^,DhV:mEX}A%0jA,{ik@p{@p,aIjGQ$)tUTS8\CUdv=co ]#GpwXgơ,`!*E2 hr6xKOT~^Wu1qZ*oxkYJVچ\.T t|f}r g2IrSe$x5$6?+]$c,Ў^vI鳬?ӧ3G 6E/In~B+B )Nsa)_8N+}ި2TqZ-<O vZp vLį d} A|YzL# N#յd"EV+硝u#j[%va٭%׉#Cu7`Um9rVrĥ_>#k vp¾'8LS.HOhpeV5>s0qRX 1 ʓYOSC}@:? a̻A9BO1昪ifZ#4}շ226p6)OJEqgl#4ʊKNJYMq^@G=irQ/} f<4ǹ%UsE7B($KB6nvnDm$Lh༠qMGt7`a;B]wiW3UH]~0qItrj2C1c(ѕ(i`|~Ŋ2b 6{5p#ABj5 _W+utJ k,'`kM ն^15@` B"Ϟ>ysG! \xGdGon2h8p? لt(HڐQ]fDF$2 ;m!my}w9S'gbZ>#kd3 z&Ĭ~d֝3>^8Ex28v܃5j߯įJӹO]τpsD%f L~N -cocqKP4o]VEi^/)SgB)?aRQԲssf(BB<Tb1;|ZǑ^ݒ~=g x"%! ̂qi:.^ݑ]/=i~xœ*%2 [c f=UK#5^9tϺQ,rrd^vP|q?<T:``u; y:.SΙ6uev!Հb-},R;_sDVCR~9QDžн{]R{J9(OX!qq¦ BH jY͖F8&ee)Ev`14r=tZ̩K%;T`_DqEۨVoexu@] a+HFЄQ O|6/к6I%1B-`%thtTx&ex0W=x8Z?e>D[#t\`Nj/Ijww^>hhfYY b7D3ARLa_6WZ7 H]8F6Vg-"{J(3'n}-,lX5p>-EQ#"PC>^J=z=-řFQ!`G0zmB$ln%br-tm _Ɨ#_:~f2J,ͮ+KO fi~^u hIb"⦷ޯw߼ʝPn44|T,&=΀P̗'W2/n攱amjM1'~-ދ6i4"qW?2GC./ y/eT8O_Ytg?]e}$(G9s3J4_K5a{u w%QlM:/rJJAH~/jN?VZaM[oY!azL%S^O=xlѳ|7fhJ eZsc!2Unqd. үɵ9F.3p {Z 6L+ʜS2 ]{˴4 3ۃV^g7|ԈEs^ƛQ T`^}3FZr H%g!^\ .p-zsxPTHzn8eqv\rwYP $xё c{; Giud/ xF'Žzk"ܼq{#| Z筁ͮ<\94n۾s&ws^j@nOz73 -\HI(Bo#F&{KY=)RQ+Ѧ{BO>i]U99i7`o'wM֐lT7#d&$eQh4M;4q2x2ͻ>k<$ u_ ]M+Mt^nf]yeN⃿gLq#|;NV:~H0`D4|zayH\ J Q?iD-aa}ubi;ukP< E^kD88jr"Dt#`\3$z^sz)R]9z@vB41'RMB,dཱྀ贽H^_B_ܔȬ!Fc_L5'o5{SyDAXxp}n&m^hq읺g?Oѳ\8RMĺGYӽr0lF[V5Bܤݿ5Tv[>͌!(khEVq ï qH YTJZe+)*o6dyU˘gaBZJJWJ^Lyv fVNW#{,)_#ج | zlE"i(xv6iq: f5/4D] 1}OZb]|0hg)Jhx&>) >aNr+)4EW4Eìr^q5ߖ0b"k6kR$@3KyS#`J5J-܆HLvd%\èKSQAt8Dag|d"U ^h;Bd%)îcWҧDKK<S90#Km4tpJ1ycwǺp'ϻZ3n|=^OO/(e%ۇ@\U3+?0$#P[T>R">JPmg *%'C^T?iZ:$@fkiǷeLg _DC}]Ug9 C{#k\KAﱩ^ 僇zVX_=o~qg[2Q $Qft6ClFOY(F{s/_.s hBQFU eםHˌcɖN 9&?/5Do#{D2m<(ēKѲЍO~6A e#܊V<#9v$b}o@Aflxw@< jQO\|h**auᆷ=#˿w7ȫ6ziq>i?4cxA{Zˍ@۝v޸K>g4C!4r񲯙`'x\&q[ ' [ة':&Ưcu*eF>娖#Bj bRv)-Y=QB@7$%MxNH {݋48!;6d_aM2xl% T"Xo'qpƆS`U bn}Ĕ"ZÄxaEe \zJg%V3|,+칚Od hNwtb1a:fB16m ,5T(5Q@}HbG2}Aw YR D7 c}/t~oY4(*n7x-# .~V 9,Ac+s+^r@P-ΪA]|$qlJ^L'a3E[\vn.fćP]6O~i 4W;_ aQ>,WI ~hH+~-V5;!lڍa;pK5%5UChVe68}ޥT |Vg3bfŦo 32ȅՠB/?i}Yk~ζ")~p) >3_H{,}zJQh2?})J@gβ7D A9ww L OQ(/=ff.OV:F+&a(>s#&DqI3Dªx֎b2}2RgOK$t1Ɵ\aڅَbD XmP߮<%4+(HL_=]q12w W5X림'^G51J ^eX)WU.9iP0"Zw ~o4~g7S-z ]/F. no&x)fY~ " <}?XϧqҝA܌ :mllbU㕦7IҪT%ƾd0l9[Cc%)(\M`&{;}m:Y5Qg!vVs1Ͷ*2ҷ+Ga/aa-(p}dAyӸ*4)X5Je˞dG{z[OUOJ1_ , 4{,4 <4-=]J?xx L k/.{^>B3G}ۗ&7x0E1p" VcmwUӂi( eHS@' u۳Z“es<`{I8|%.UER@\=64jipf~GSCGO,]YM͵}f(_0K%e-<'hnԷ|"Zn5c ~iEÐ/@QhXntP}}ssi[Tm`.{Y  -@qծ r]Zs!Hwю!s.~{Oo]}0B^4Oztm; Rʎ2VZ<:7T Y÷7NoAĮdC Ls ջYjCh0l4꣯Jy3jByPT"^ixTtMy1JCZɫ}:"[gC)F2T%ɂr5P>]tJ~Ԙ(^M_v2`n}+P r~z-ۗ#hĔ%6v':+*C1#r~aàL_x!jzNs9uLēUL?%'n_-ufPm~:aa.r)!ȴa#z~([*<6\tⴊG^IN6blWBg d,g96ܰpYV-+UUb`?5J⑼__|'}n|%BBL',qJUhjO!Ȏb1,4?vݓ'LA(1Rusk<ډ߲"tXKYehx>X/{fk<15oъvⓊ;! <)$ hD?(W02cfT®΂`iu}[&_QXm`iHZ<´*/ַߧjfֺ>&/v/ Ư5(Ϟ e]CU1[Ǘ!nS&; x!z:G/*fv1U]6ו~Mqw!yx v|MiªVy/<ٯJmFp+VWyHp&&J9F:]}Sh *FD_k@ʱq?סNx"(pCnv%c I5NR4ݢpa-cNɜHW"|@&EA<|©-5LHnW{}ڇ±ܪ7X[Gz9q8]|R]E$r.ڥ!V/>.FI:CU=?gHKFnJAo] f"1yxE]H!ϤιZr) h"palۑPOvgޙɺ ߩNBпY[mW]0a:[t3 \㏋!q ;bF$9K;"? $:}lG2ýzLKYӼjfθH!n6(ؠIttiIE>qe/%qZ(L[f1Pvo?'>+(L:1@ ::+)wv|2O^l"~#@1OJ7|H5ɵ UN;4.kSx`9~S+wh`ـ3s7=Rev+Lʸ܆  %D8!5-= KAtӤͷuPBurzP,򤥐kWc"vڃ*KYZ7{XLvϕEI6d~dCϟl!5R5 `3ڦ=л(enF[^$㳌EG g8^ܺljD8 QBj)өb}Od ?Ym j~,~aHC|$R yz~֝st^SZ7FTVœ)qpZ;ܯwJ.sb[vfٍl0VK@3ǦMyvBS-j90"⢼ 2OߐXVaM7| u$ː|`>ԇ΋"x]7ǜIb#hB1knG[$~Pb:Z.L5‡ښ>!{rg|<;\.Iд%7~;J:y wTO(6['~۰n"cXx ةۉWBWU*چ?C%1]j"TӅ2l@Y]o/'bGEi<h_%-$Aqڈ&^h\:̳pN6M>AGZNa gxwH,_iO:ɞR;2vmU0"V'%HJwS12Xqnp޺LΑԹVFY9xu1˦9 cg^X`-x>6r) :l"BqZLJ*n=)Dbȹp0۩\ߣwTO(0\EM b^*ߙOo USQ^)E ݀K,3WlݍJʠ4Em\8*r!\Ȭi6zji̞5tiq3A]UȓLXM {'i3K#B{8U6OB9ًq Űjʽ>W ]"cBցtnEpcH*d IzMdm] nȂǯ]B1sаdN5g _|٤И4hwH508iuI2HcW`G}$1g|d ܾ6lJ{%wdžQg+ᖂ=:jQ=-Dx(]s2o.>787ɇ( m'_C) 6*糈^Sfvz#͢@|θ[փL}ratpۅt$6-\YTL^Y?*"ق䊘9٩7SyC0/ӉC\ӟ!`'~[^H6VOcn)if#ǾY [@|e߳טJz* {AmLu2ofWK ~!7 oRfs'EqKW|CD#EWu? J"~ٕYnk:xR|ܑ!Lv:hY&ztR>Vw,cߚi Y<Pv;>/ < (8 esNtJSs6_֜Շe ͘g%Q_YVѥ9ByRjROJ绫UE\$ebP!tQGr ^뼽'%kk`VO N o!Tkj22Ө+r<^3V !3nϺ׊`}viɼ9G3,|2ꢇ$ްjLHڗ`X#"G{T0)Eü2Ʈ tզ4(;Eҙ6yU QTÇޯH=}[PJW&%ɼ>P`wW<`+͵_ʗ ^:CN7pDK9eI%̡Cv=Fx"m!}* \,,vQp-+v<"@.7״8Zf &gʼnڅ\HT ')hi#nqP2n܎kܲsinAT˂5e ̍F<uE?yk[_ “qZ|˷m&P+Rڨk9\h%*:yv^N+^/+me-FTWv׼kdjcCiUrxYt!9ۋp:8~;TL㌆lra,'АQ* gDbw O-5,5sVV=0=o/~OX>R4pѶZjxܗg9|KN\e^9 xc_I G|_XxKX> E CBtLG@!4,ڎ~KZ@w9m26ODkƬ|\(*)]n%rNj{vD |wߟ{31a{ZFEr5ah5fjbw}8Z}$Et_}lF`~~M__#g6T+^7%h#.AfL[lׂ^O6sbΌo $ӎpsf)ؾșċ]( G|iٖW2Ey͞K"47뿯Gѯ/Bt< !2!>5\Eg.&:ݪۙ)N86Ja_u / tt"VD3XY=&椞=y.7*ԹюquS]䌘TO:TF>}>E)b ;rlPUU%w46AOR>{֧0ȧ-&s X9N!p ] F? b$IٗM|u=ȇ)r~:ڄ^*}X @\0@Z5/*#o}CpA,5%L%"/k^Qwwϗendstream endobj 764 0 obj << /Filter /FlateDecode /Length1 1392 /Length2 5960 /Length3 0 /Length 6901 >> stream xڍwT6R HC7 tK 1%  ݒJI H R"ݝ7sk}ߚf{k}_{YkXxdm%8 5A$  ౰Cΐx,w;DHPT=~A_D_T@ pw @ 4xU8 cC:?6(p@ 0₪hv6P)إHW >>///^ n/ xA.q(`jx,CnCj!P!0[;詨Z_`J'W"(w0 @ah)"f vFQ`O0l:P(!lܡH/#߯46+l..~ Pw >|{>Aavhz=A< * 0(޿m$ @ x8* eFqsv({B$?<P$` euF P~}d0[8#S5R5C_N997#qqq@TXg5o`GByT&o٠[_Y)y8;`j=(ihQ75 P (H(B Ն"mڢJ A7%:'[oTmO@X}@ Q*x^nGBkb/?x4{ MOm$kZϫeixV>b/,E'F #Y9x4&ge[A9=U12Où{>m֙˦~M<,P "Ue|,1K*'|*_$7ލ7{tNNNv'F46F[ ɻM8bp%ּ7Ҿ{CBStlmzOc汔L_#09W+ ȯưSX}DI7|XYáƖ/L񒎹8,$2t=뺸{PyQ`!!-~,:p( ݾwQ,(F[} beAH -<ۮ6 OO,. D[{Wl|h0>|>d| }g\nc{viQZ]rFF i}|`*+ZHdU=.͍8э\]V{GʂN! 'QX,V\I~/B#5-w-6s ;Z486;6W}Z3yZ?{8V?:vnuVJZ-MƤ'p4 !)νNwz 6[w??]j6=QKkk8HJNoD+Gk }ZZ6XBHɹ1$[qK 9BaVu`X1K&(`+hI2|Ò!*#k&z*BZkG5#uZY%c7LG22"mPQeܮޠEiN8M:^M<QGi/̴OAsxG#?^adE#Ь>G xޯ19ݧ~5Ժ0G@ofqID@x_~['z|GYKr p3ruTOzKMX]sEkmh#H¥M|lvHa2C1@vY@LL2_A%B눂4 )덆e_ ឯHRԐejaPH@F9q1)nDzpίjzIEwCNĴ^Hsn="H7h %h5';I%?_sQ,%P"=!{;!{МT6m(F^- &U+lYBIB@þh𲍆 \5TQXE*穑l2'WDĒ\<57cqݟ•82=c)!H 7weaR/qͽxU) ls PwIRs#Yh>=lmZzQlv8eZm{(:AU/? MeIa;'3>Ʒ~mJ1V'؎ޭHSբOjT)#7Cy$2]kIڻ}na9׉f ZjO_E|<p.HQPgx冷=mYz\B%)ܛ+eså/QӘ Da1{8TIu/I(0#1p: l>O|TL*dD#l\!FwbZ(Tj ^txOӧáP&HW$mh3ģ3 >BXK =9z8`g(38mv>X.O0BOV_P zhݭo;]2FqX{kqQ Wwd7{k>NLopv/DH\߿樘nG^lV-)@p[3+tvʣܽ0ûK3Ǣ!>hzJ]hEǧ肫\D{HAx'; ۩Ө"M6B׻$gDhnxà/ت×aLznqr'ȌiIg7AX}N=[m(7a-~ΩʞxMFv+UÊ woZ, `Xo3kkX^OA4DhǒӪ`CI,_ ءUU+ ;9CQtZ)R_w( n}^r (ptnd~R(JL]6"xE.xqX"@ϪWN>ONZu„%[ OI"N7m-ʰSd1~w[ ы:/ Tf( sk}}F'm*_?r ]8e˕JtK@@B_jU]t~kwӾ|2p-.Ȯ J8@d(ui<Ůbe9/^9ǨH8<Թjnyqv~]ꡒ k{?OJ\4eBk>Dfz=> M$?=soPgeICk44P)ew+i) oe}mݘrq{̏ڂ꧿)mfƩr.ιqK_*[װ&GXY011c Va^!H}>O5B6;еتXx<Uf,]ږE8k7?x_te}4YzbGA 䩺F,7}ZRWM?7Hʨt͓UG'kX#44qLuܐ1> Vk ^OW\ 4Z|ƂB> N?p DŽQran cC硧BL@FC%XP< ]8׍͔@﹛=e;dk!w 9{ b`)$Pѽ!s^/W,&<ڡTy}u<< +iNxn+'Kol7"IwޙU,wUɟRt|ʚ?OmIzIE'#"DyɸfG-%v`S7ITSAдںlr^W7y+{QdA*Z9_'0yS/6ux9K$ҏҰ o8tGf龉Jڀ„xf}me<1%1*i8]c@)ĈmM ~ݰzI{ !CK=\_9}`]+Md R r Jv>a4@xk =KNto2{rBӭ7&_oIO9NGiMUbRZMwe"m?BT\y\}R%ILl:UjTtmqxF<.d^|lh/N`/zIG,R*S78)@GS.s'lU6ۻ=[9+|y ҉} Vy{3y=`T9,_li0Y8V! cƜ>C竄90`fJ; Le;>7be*3zJB#"er,zynU|ӈ ȮwD1^4<π69BC6^ٙ9M>bO}mrzީNve^ 3 3-_+DI=_K$] M_ :PvW͎ Y$"!c 4ho̚TQSmAG3å= P`jAA nrY];P*z8M"h"h>^hTo:Fʓf+;wZ8L N,F<^d)-havʣ1q50U.},^ʟJtz~9˳NUw6q'݄21y@6{U`vA?W`Ͱ(7F5-+ީ5n3qbVe7q'@-g#uG0Hd3B6 VšO?~Jy|X^NA9D~Cȣ IHe>kεOς̖X?<9d纎^UѢS|wEt3 4Td>{A1zc3hv?IrogXab#5-Yҿ) @%JA*wL\TM0:.?v=V :yI/d͒ &/ Aw5ghЦF'|*zHmixɂGLI^cʝU-Î)%m/̚vۦRk;@6f.,¬6&ꚩD$UV)J}htʋwLG&D~XXml0: b8 VOK^?YxG]lǕp^v.1y mҴollʹu}٦q04m&>3[[ecM$ۛ}Ev| 2˸}BȻKGD=3,u?MnxԂP9%'g'%(&؞pЍhm6 b}(W$KW9ItSFw֝%a3?+}vp2-Hq:0,7w2`79ޗx,CE\.-r%}s3kwoA-S?Wu.CG U)Ptycد>  {#n/YY^ugUtv _Zoѻ1o9>~i1JhV #ڈrrj:zlD Rq2&\3ABLa؏h'~/Eo{AM-Ķh@:bˆ}Ws8mLXx80Y:tvN/6Ԝt"^pTږ6?v,dg,C_#)"%}*&sė)OQ'.){NLn%$QۺUP[suäOM*~ͻ!R];f CPʕFhL%—I9u[mN0C~GGz۩ircK\}x|7y3& K36H B"6^-Q[>o7V FŹ|dH L鷝NsvvX%+*pM޶Oqr TY [[bendstream endobj 765 0 obj << /Type /XRef /Length 330 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 101 0 R /Root 100 0 R /Size 766 /ID [<121c659c1ca6911c59e365f9667f6972>] >> stream x?KBQs uAHtI(l+pnĭMQ-ŭ"""3D:W8W1  2XhQtIF>s1}ƖuO%OF7-wKpjIy1 pO1Omd ;ǯUyVMeބìKtF*$ݧKt[b}l"y1 j|ݝ7|+W/@?n\C?̳Gfap"0&ᾫI w8|Sz28?? {3ug:E endstream endobj startxref 468903 %%EOF eRm/DESCRIPTION0000744000176000001440000000232711706463106012544 0ustar ripleyusersPackage: eRm Type: Package Title: Extended Rasch Modeling. Version: 0.14-0 Date: 2011-06-05 Author: Patrick Mair, Reinhold Hatzinger, Marco Maier Maintainer: Patrick Mair Description: eRm fits Rasch models (RM), linear logistic test models (LLTM), rating scale model (RSM), linear rating scale models (LRSM), partial credit models (PCM), and linear partial credit models (LPCM). Missing values are allowed in the data matrix. Additional features are the ML estimation of the person parameters, Andersen's LR-test, item-specific Wald test, Martin-Loef-Test, nonparametric Monte-Carlo Tests, itemfit and personfit statistics including infit and outfit measures, various ICC and related plots, automated stepwise item elimination, simulation module for various binary data matrices. An eRm platform is provided at R-forge (see URL). License: GPL URL: http://r-forge.r-project.org/projects/erm/ Imports: graphics, stats, MASS, methods, Matrix Depends: R (>= 2.12.0), splines, methods, RaschSampler Suggests: lattice LazyData: yes LazyLoad: yes Packaged: 2011-06-05 11:22:28 UTC; hatz Repository: CRAN Date/Publication: 2011-06-06 07:03:22 eRm/data/0000755000176000001440000000000011572663323011746 5ustar ripleyuserseRm/data/rsmdat.rda0000744000176000001440000000030411572663323013726 0ustar ripleyusersSA0\!DO/Մ.g)L 4\69s9wK#~#痃#`܉>+='sbY;lu|y] |ܗ ]0>XOR][ͷ1}Boj1eRm/data/raschdat2.rda0000744000176000001440000000022711572663323014313 0ustar ripleyusers r0b```b`@& `bN.J,NHI,11@4zb%VX>!uĪ'~r%d }i\& +\%Nm>t@<:9%3H ;$? ,xeRm/data/raschdat1.rda0000744000176000001440000000120111572663323014303 0ustar ripleyusersBZh91AY&SYYy?~;0P@eEAyNXHS *搈 a H@ %kM I@<:,F TTd)vF+(*bȠp [HqDiI0H0 jTt±AWXTNME&Z(rH8PJ*k%ԕN]rEܳ*TX`(CZ1ST+JM`[TY7^KLԻ8E6EF hZFIB4F<1A0&i KI*1 T prea\VL%;m֠ͼmT,KɣDAF^\A08n߹\Yx90NQF[W3(:w9E%M"0-&dh)EUfGQ߶E ᷱo`nCEQ@4 a=J{nt֩$RF7"fߦO 40pGI`G#bv1>@r<jbY!H  /'eRm/data/pcmdat2.rda0000744000176000001440000000073611572663323013777 0ustar ripleyusersBZh91AY&SYʑ  @.#L@@@8lD j4ѦLL`I$C@RPz=(0J ,~] ݴ>]rvEEtePrCUuĪm.x8a*xȀrd=K (c!#1pF@y*o)Gg˓vujq\EZˊvs%`sgN:j uX8gECWV۫n}hhHk$1,jM?tSqۼ;?eRm/data/lpcmdat.rda0000744000176000001440000000032311572663323014061 0ustar ripleyusersS0\JO\x0D|_` 8v3tΖi37Ś`;tX b|8mc_P ޞX]_*_q : V亃xFIw)@ &@@ @7keRm/data/lltmdat1.rda0000744000176000001440000000120211572663323014154 0ustar ripleyusersBZh91AY&SY0b~~{0P46P@P~p(04H&C0<hфdh@`HQhhфdh@`JhijV  TJ YQPjRXS 0I*!@ *Jؚ2@xXa<,R,"1d{,+ 1V)Qk*muUEPeEAd=l0Ao≕ؒ`LaR ԩ:'Lbh!i6LQ*qpUZKK*䋹gUҩ>iabQE' Vc+X(WJp9G{sn3Rd-i' j&t(P3L8exLzZMfQTֲh#Ƴ-p 岲g7P1-ܦh&-mnZ$PdXFM 8tD`$e(I [+;yyLiQ^ J:ζ6\يe"æUH `a2E TҔ"#fwC8m[TP#+ X{ORe5d$JH6]l{vq7Cs~9Cш Fǿ)tHkhw$S /eRm/data/llradat3.rda0000744000176000001440000000050411572663323014144 0ustar ripleyusersUKN@ }3D#!!qJKNu; r8EpT<2NDv?o͛_[?'M +Γ7k<#syzذr~p-ڪjUڲlR4Y:^7^ ckeRm/data/llraDat2.rda0000744000176000001440000000224311572663323014105 0ustar ripleyusers͘ODP*L[8: .& |\?#qRoWwWW]n^+7n̬o3zSMT<OWb|3OerOe˧Zf%,/{.>\ږXznQ_P .G<5mlhN[椑~3>V@IJ_ǭB_[XAvOqw{AM[I%_9}~--I='gPzh~8x'<]U.}މU0}[sgI-cScPS+mYgc>{_^13k|Ռk|878} C[y[4,N|_{zR9`BG|ӻi%c{#=5F%gZn}kd8@Uu/0'so"7 Nj.p,cNm̳mP#ކ5EZִRqq>g㩷j~|'ϐNI=rϽqFF07     :t#@O=zog?ovgV7[l 5loݿsf ss .̗a[ l%J`+V[ l]u[l]u[l]u[l}[l}[lC 6lC 6lC 6lc6lc6lw/2l47?N7b}H8eRm/data/llraDat1.rda0000744000176000001440000000501111572663323014100 0ustar ripleyusers՛MY# @n|cMׅ#D$?l=̳՜|HBbjm~?MD1܎}rO2>qo=J_~(kvNTW=z{bQ[ÿW/&ӻŚ1v Ȏ|h(AbwM3Jvcxc`_jlr}q1%M+uD=ծ5X}=d^_5nĜ(#6^q;s͆ݿ6&&{5;=Vq|Tokmk=.m}0?>2Z´v'?;:o iql<<Üؼqh=} |+_(osz;&>ȱeNz弧G9_VK%uO>aN7W3<%撿2ب8п ͅ99ݶ~^ճEnH>MyA/}5F{OoǜGn|2^+ލ66[Gy^sҵE?ǩ:ls`{N"n=K5)o\mng~ϣ.-{sgZ_ѳv{gQٺfCbXԛۻaMߏ\Wkz#x{q[} ʚ&+T\ֶDhz)kۈ]q*;[k֑mf`?r9<>w+}|FcV<6J'ufCz- xTulhYW81;rMG;ּlekiW XSm?rG1vǙWG'&S}o6a1vS5{7r9JAW֣~M^ױ&%jExil<]u~}o&̼YxLT0QDLT0SL3T0SB ,TPB{*S TO_ㇿ1eںf5o:][l9es}6O}hD;$!vHChD;&1юvLchD;&1NvJShD;%)NvJshD;'9ΉvNsA{GOhwpwwpwwp'N;w p'N;w 3pg;w p.]wܧkϸo?^^coJ? mFeRm/COPYRIGHTS0000744000176000001440000000041211572663323012451 0ustar ripleyusersCOPYRIGHT STATUS ---------------- This code is Copyright (C) 2009 Patrick Mair and Reinhold Hatzinger All code is subject to the GNU General Public License, Version 2. See the file COPYING for the exact conditions under which you may redistribute it. eRm/COPYING0000744000176000001440000004311011572663323012070 0ustar ripleyusers GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License.