gdata/0000755000175100001440000000000013115545573011361 5ustar hornikusersgdata/inst/0000755000175100001440000000000013003720417012322 5ustar hornikusersgdata/inst/xls/0000755000175100001440000000000013003720417013130 5ustar hornikusersgdata/inst/xls/ExampleExcelFile_1904.xls0000644000175100001440000017100013003720417017510 0ustar hornikusersࡱ> fw  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdeijklmnopqrstuvR FgWorkbookrSummaryInformation(hxDocumentSummaryInformation8\ `\pGregory Warnes Ba==d +8@"1Verdana1Calibri1Calibri1Calibri1Verdana1Verdana1 Verdana19Verdana1Verdana1Calibri1h8Cambria1,8Calibri18Calibri18Calibri1Calibri1Calibri1<Calibri1>Calibri1?Calibri14Calibri14Calibri1 Calibri1 Calibri1Calibri1Calibri1 Calibri1 Verdana1Verdana"$"#,##0_);\("$"#,##0\)!"$"#,##0_);[Red]\("$"#,##0\)""$"#,##0.00_);\("$"#,##0.00\)'""$"#,##0.00_);[Red]\("$"#,##0.00\)7*2_("$"* #,##0_);_("$"* \(#,##0\);_("$"* "-"_);_(@_).))_(* #,##0_);_(* \(#,##0\);_(* "-"_);_(@_)?,:_("$"* #,##0.00_);_("$"* \(#,##0.00\);_("$"* "-"??_);_(@_)6+1_(* #,##0.00_);_(* \(#,##0.00\);_(* "-"??_);_(@_)yyyy\-mm\-dd\ hh:mm:ss.00                                                                      ff  +  )  ,  *     P  P        `            a>  "  @ @ "x@ @  "               ||IQ }-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}A} 00\);_(*ef;_(@_) }A} 00\);_(*ef;_(@_) }A} 00\);_(*ef;_(@_) }A} 00\);_(*ef;_(@_) }A} 00\);_(*ef;_(@_) }A} 00\);_(*ef ;_(@_) }A} 00\);_(*L;_(@_) }A} 00\);_(*L;_(@_) }A} 00\);_(*L;_(@_) }A} 00\);_(*L;_(@_) }A} 00\);_(*L;_(@_) }A} 00\);_(*L ;_(@_) }A} 00\);_(*23;_(@_) }A} 00\);_(*23;_(@_) }A} 00\);_(*23;_(@_) }A} 00\);_(*23;_(@_) }A}  00\);_(*23;_(@_) }A}! 00\);_(*23 ;_(@_) }A}" 00\);_(*;_(@_) }A}# 00\);_(*;_(@_) }A}$ 00\);_(*;_(@_) }A}% 00\);_(*;_(@_) }A}& 00\);_(*;_(@_) }A}' 00\);_(* ;_(@_) }A}( 00\);_(*;_(@_) }}) }00\);_(*;_(@_)    !}}* 00\);_(*;_(@_) ??? ??? ??? ???!}-}/ 00\);_(*}A}0 a00\);_(*;_(@_) }A}1 00\);_(*;_(@_) }A}2 00\);_(*?;_(@_) }A}3 00\);_(*23;_(@_) }-}4 00\);_(*}}5 ??v00\);_(*̙;_(@_)    !}A}6 }00\);_(*;_(@_) }A}7 e00\);_(*;_(@_) }x}800\);_(*;_(  }}9 ???00\);_(*;_(??? ???  ??? ???!}-}; 00\);_(*}U}< 00\);_(*;_( }-}= 00\);_(* 20% - Accent1M 20% - Accent1 ef % 20% - Accent2M" 20% - Accent2 ef % 20% - Accent3M& 20% - Accent3 ef % 20% - Accent4M* 20% - Accent4 ef % 20% - Accent5M. 20% - Accent5 ef % 20% - Accent6M2 20% - Accent6  ef % 40% - Accent1M 40% - Accent1 L % 40% - Accent2M# 40% - Accent2 L渷 % 40% - Accent3M' 40% - Accent3 L % 40% - Accent4M+ 40% - Accent4 L % 40% - Accent5M/ 40% - Accent5 L % 40% - Accent6M3 40% - Accent6  Lմ % 60% - Accent1M 60% - Accent1 23 % 60% - Accent2M$ 60% - Accent2 23ږ % 60% - Accent3M( 60% - Accent3 23כ % 60% - Accent4M, 60% - Accent4 23 % 60% - Accent5M0 60% - Accent5 23 %! 60% - Accent6M4 60% - Accent6  23 % "Accent1AAccent1 O % #Accent2A!Accent2 PM % $Accent3A%Accent3 Y % %Accent4A)Accent4 d % &Accent5A-Accent5 K % 'Accent6A1Accent6  F %(Bad9Bad  %) Calculation Calculation  }% * Check Cell Check Cell  %????????? ???+ Comma,( Comma [0]-&Currency.. Currency [0]/Explanatory TextG5Explanatory Text % 0Good;Good  a%1 Heading 1G Heading 1 I}%O2 Heading 2G Heading 2 I}%?3 Heading 3G Heading 3 I}%234 Heading 49 Heading 4 I}% 5InputuInput ̙ ??v% 6 Linked CellK Linked Cell }% 7NeutralANeutral  e%"Normal 8Noteb Note   9OutputwOutput  ???%????????? ???:$Percent ;Title1Title I}% <TotalMTotal %OO= Warning Text? Warning Text %XTableStyleMedium9PivotStyleMedium48dq:Fc-2NWgFSWc-2NWgFSW̙̙3f3fff3f3f33333f33333\`S= Sheet First_ Sheet Second$jSheet with a very long name!Sheet with initial text"5A 7B 7C 7F 7G 7NA 7D 7E  7NA 7NA 7FirstRow 7 SecondRow 7ThirdRow 7 FourthRow 7Factor 7Red 7Black 7Green 7A 7B 7C 7A 7NA 7HThis line contains text that would need to be skipped to get to the data 7This line too! 7"- .o/0 PK!pO[Content_Types].xmlj0Eжr(΢]yl#!MB;.n̨̽\A1&ҫ QWKvUbOX#&1`RT9<l#$>r `С-;c=1gDJKK2e,$nE<@#Jem)4 Pc`{w8!A=2̙<&JꅐfM T5BNd tY#9~#*@ K/AKחFF]et`~!کկm Y\n?aZ]'2Gc"ry Ǹ8E/ԡnޙ`5xsѵɷrΚ\xpK*y7NF~b\c|‰8'VD5N(B<=. |z]ң}'D;4L|6CGM|VoC Gao±eǬXE>%qm #8j>-~ ..R(zys^FnVԇ$*cߓqrB3's}'g7{t4Mf,t,Q;7ɛ1Ѝmmƍ` F$vNyad@}m }w^-/m z<}%s7CXWM->Y°t`Qә")Mi?F$@3ɌHK8ۙe/o}'U}f@bvyE/G9#sh [1/"ZXZըfZ#0b8k,ބ] xy Z4M0#w;(5!ʬDx@l7&vy ;H)Okള0i1?tr`d]v%b :j8mC88IolW;6kϬESSEÜq8RmcWYX%YWk:.beRB톖 $T`Vc XэGbCڞp` Z?My֬Ӕ ήcF8&%8 7V`Jm^ݍq7EWeRN)zie#EHk#BEPѰ#`A,SphStq"A%}g0fDzd#Q%uejCzRt s \oN)F{b:P3,!gdbKU z=uAYk;֌˫rYaϤpg?0jCoEAV_] 2H7HhIv>kj!f.6Q8/ٙ_۵Ȟ.QXQ݈B~$“tO$ɤ6F#YOG [BkFE6hM \\ I,&.(dh&1oYYG;&kŕ{%e (w䳨A񍁺]y 7x5R`8tMc3ݤPK! ѐ'theme/theme/_rels/themeManager.xml.relsM 0wooӺ&݈Э5 6?$Q ,.aic21h:qm@RN;d`o7gK(M&$R(.1r'JЊT8V"AȻHu}|$b{P8g/]QAsم(#L[PK-!pO[Content_Types].xmlPK-!֧6 -_rels/.relsPK-!kytheme/theme/themeManager.xmlPK-!{֩theme/theme/theme1.xmlPK-! ѐ' theme/theme/_rels/themeManager.xml.relsPK] ` {[_  dMbP?_*+%M com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMHorizontalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMHorizontalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMOrientation com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMOrientation 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.subTicket.paper_info_ticket PMPPDPaperCodeName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDPaperCodeName Letter com.apple.print.ticket.stateFlag 0 PMPPDTranslationStringPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDTranslationStringPaperName US Letter com.apple.print.ticket.stateFlag 0 PMTiogaPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMTiogaPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPageRect 0 0 3058.3333333333335 2400 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPaperRect -75 -75 3225.0000000000005 2475 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPageRect 0 0 734 576 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPaperRect -18 -18 774 594 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.ppd.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.ppd.PMPaperName Letter com.apple.print.ticket.stateFlag 0 com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PaperInfoTicket com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PageFormatTicket Mz,, ` g(HH(dh "d??U  @ @ @~ ????4? L?? L?@ L?@? @?@?"@?;@?@?0@?P@?@?9@?@_@?@?B@?k@?@?H@?pu@*zt]]]]  >@@ 7ggD ` }  dMbP?_*+%M com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMHorizontalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMHorizontalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMOrientation com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMOrientation 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.subTicket.paper_info_ticket PMPPDPaperCodeName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDPaperCodeName Letter com.apple.print.ticket.stateFlag 0 PMPPDTranslationStringPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDTranslationStringPaperName US Letter com.apple.print.ticket.stateFlag 0 PMTiogaPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMTiogaPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPageRect 0 0 3058.3333333333335 2400 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPaperRect -75 -75 3225.0000000000005 2475 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPageRect 0 0 734 576 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPaperRect -18 -18 774 594 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.ppd.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.ppd.PMPaperName Letter com.apple.print.ticket.stateFlag 0 com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PaperInfoTicket com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PageFormatTicket Mz,, ` g(HH(dh "d??U  > > > > >  ~ >? A A A A  >@>? A A C>>AAC  >@>@>? A B  >@>@>@>? dFTNF>@@   7ggD `    dMbP?_*+%M com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMHorizontalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMHorizontalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMOrientation com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMOrientation 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.subTicket.paper_info_ticket PMPPDPaperCodeName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDPaperCodeName Letter com.apple.print.ticket.stateFlag 0 PMPPDTranslationStringPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDTranslationStringPaperName US Letter com.apple.print.ticket.stateFlag 0 PMTiogaPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMTiogaPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPageRect 0 0 3058.3333333333335 2400 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPaperRect -75 -75 3225.0000000000005 2475 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPageRect 0 0 734 576 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPaperRect -18 -18 774 594 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.ppd.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.ppd.PMPaperName Letter com.apple.print.ticket.stateFlag 0 com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PaperInfoTicket com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PageFormatTicket Mz,, ` g(HH(dh "d??U } mG  ?DL@E[[??X?DAF?X  GG4[L@HG4[L@D@D`~@E[[?_? LA#$L Bm Gh@@   7ggD `   dMbP?_*+%M com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMHorizontalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMHorizontalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMOrientation com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMOrientation 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.subTicket.paper_info_ticket PMPPDPaperCodeName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDPaperCodeName Letter com.apple.print.ticket.stateFlag 0 PMPPDTranslationStringPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDTranslationStringPaperName US Letter com.apple.print.ticket.stateFlag 0 PMTiogaPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMTiogaPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPageRect 0 0 3058.3333333333335 2400 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPaperRect -75 -75 3225.0000000000005 2475 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPageRect 0 0 734 576 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPaperRect -18 -18 774 594 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.ppd.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.ppd.PMPaperName Letter com.apple.print.ticket.stateFlag 0 com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PaperInfoTicket com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PageFormatTicket Mz,, ` g(HH(dh "d??U    > > > > >  ~ >? A A A A  >@>? A A C  >@>@>? A B  >@>@>@>? xFTNF>@@ 7ggD  ՜.+,0, PXx  'University of Rochester  Sheet First Sheet SecondSheet with a very long name!Sheet with initial text  Worksheets F$Microsoft Excel 97 - 2004 Worksheet8FIBExcel.Sheet.8 Oh+'0HHPh 'Gregory WarnesGregory WarnesMicrosoft Macintosh Excel@UΛ@|G|PICTtJ HHJ JHHzWPJJEk{o{g9wo{wo{wg9swo{wg9g9wkZwg9kZwkZwPo{sZg9JRs{sNsZ{s^g9wo{{V^g9o{VkZo{swZRNsg9s{sVR{sZso{{ZkZco{XkZo{wZg9Zs{sJRV{swJRRo{{F1Ns{o{*kZ^ZZs{{so{o{)g9{sw{ws{o{{cXFcx{wwRwR{ Nsso{RscVg9co{{ Zw{R^s^wcVwcV^RsRsVZs^kZ^g9ZwZ^W5.{kZwRwR{ RVRV^wRo{g9{cs^RRwRs^o{ZRg9g9Z^F1JRRcNs^csg9NssBww^{wVw{ VkZcVo{c{Zo{g9c{ cg9kZZg9sVwkZZwo{Rcg9ZwZsZcsVo{^o{cg9g9Zwo{,g9kZwsswsswo{wo{gyowwkZ^ww{ wwww{{{{{{w{{{{{{{{W5[vwwZswwRZ{{RRkZcRZkZ^JRZZNs{ VJRw{BNs^BwVJRsZF1RF1wBF1JR^9^Ns^F1JR^BW5Cww^w^o{{ cg9V^o{V^wRo{^{ kZ^ss^RwRwRs^o{Z^JRo{VRRRRRF1JRwRswcc{wsRo{{wRccsRg9c{Zo{Rg9{ Nsg9kZZg9Z^wkZZwo{Rcs^VwZckZ{wZsZcZ^w+kZo{kZ{sw{ws{s{s{wsRww{^c{ ^^sg9^cscVg9cZ{ ^Z{R^kZNswcVwcV^^wVwo{ZZRsZ^o{ZZRwwwJR{w^^{ cZV^^VZwRsNs{ o{Nso{o{^R{BwRs^sJR^F1VJRZVRJR9ZsVRJR=wwkZw{^o{w{{R{{{NsZ{{RZg9kZRwR{ wNso{{VNswNswRg9g9swNswZVkZNsR=NsVwR{R=JRwwo{g9wwR{{{RkZcwRo{cV{ZkZ{ Vo{o{^sccwwV{sZkZkZZZ{so{^ssg9kZw+kZo{kZ{sw{ws{s{s{w{JRw^o{{ g9g9so{^o{scVg9sZ{ o{Z{R^{Nsw RZRwRkZco{Zw^g9sVo{wo{ZkZwVJRswg9F1Z{kZJRNsVg9F1ZV^wNsF1F1{ {F1Bss^RJR={wRR^Bg9V{ZNs^ckZVF1JRZkZwRRwo{wws{ ccwkZc{Zo{kZ{kZkZZg9kZwZckZswZso{Zs^kZ^kZc^o{ZwwwZskZ+kZo{kZ{sw{ws{s{s{wcc{wwNso{{ JRkZsg9Nso{scVg9Rg9{ Nsg9{R^Z^wcVwsVg9^sVkZ^Zg9RwZg9^ZZRwo{ZkZwo{Nsww{F1c{F1^V{F1cVZwR{ NsRo{o{^RZF1wRs^RRF1F1RwR{RJRRkZo{NsJR9wRRw{^o{w{{R{{{NsZ{{RZg9kZRwR{ wNso{{VNswR{wRg9g9o{R9NscRg9VF1{VkZwwRwNsNsVwwZsVo{wsg9w{R{{ RkZc{Ro{cV{ZkZ{ Vo{o{^sg9cwwV{sswZV{V^cVsR{Vo{o{^wV{o{+kZo{kZ{sw{ws{s{s{sw^g9{w{Vw{ VsskZVwscVg9^kZ{ ZkZ{R^ccwcsRwo{ZkZywJR^kZw^ZZ{kZRVV^ZZV^wRBR{BNsss^RF1Ns{wVVNsJR={wRRwwo{c{w{ZkZ{ Z^c{Zcc{Zo{^c{^ckZZg9g9ZwkZZg9kZwwZskZ(g9kZwsswsswo{wo{wwo{^o{w{{ {w{w{{{{ {{{{w{{{{w{w{ww^kZwsRJR{wRF1kZcRJRkZ^JRZV={ R=w{BNs^9{wVJRsBZ{5F1s=^w9sR^JRg9w9wg9Ns^wZwZw{ co{VZwV^wRw^{ o{css^RRwRs^cNsNs=^=g9^RJR={c^BNsJR={wRRwsswg9{ g9wcg9wc{Zo{g9{ g9kZZg9kZ{wkZZwo{c^kZsw^ckZsZs^ckZwwZskZ+kZo{kZ{sw{ws{s{s{wcVswsZc{wV^s^ZcscVg9VZ{ VZ{R^ZRwcVwRc^VsZkZZw{NsRZkZ{NswwcBwws9Z{{9RVs9ZVZwRBJR{ =JRo{o{^RJR9wRs^RNsR9{co{csRRR5NsRRwwRo{cwZsNs{g9g9NsZZsNsZg9kZRNsR{ RNso{{VNsNsVwwRg9g9swNsNsccRcsZ5{RNsRZ5{wwwcwVw{ Zg9cVkZcV{g9kZ{ ckZo{^so{^wwV{sZkZsZwVo{^cs{Vsg9kZsw/kZo{kZ{sw{ws{s{gYFcx{wZ^o{wo{Zg9{wVcs^Zg9scVg9VZ{ VZ{R^ZRwcVw^g9sZw^^o{ZZ^Rwo{ZZ^W5.{wg9Zcwo{ZNs{sVJRVo{ZNsV^wRRF1{ RF1ss^RZB{wRs^BJRRZsF1Bg9NsZB^F1Bg9NsswscwZw{ ^kZcZo{c{Zo{g9kZ{ ckZkZZg9o{^wkZZwo{^^sVkZkZg9ZswkZg9Zw(g9kZwsswsswo{wo{w| wo{s{^{w{{{w {{{{{{w{ {{{{{w wZkZZkZZwwF1wF1R{Vg9RF1{Vo{Nso{JRZcw=F1R{ sJRo{JRZs=JRJRwVJRsNsg9NsRkZJR^F1JRJRRBsJRsw9w wg9kZRRwRg9kZR{o{g9Rg9kZZR^wRVRRV{ R^wRsRRRwRs^BJRkZNsRwRF1JRg9NsZBg9csJR={w wg9g9wcww{VwZkZ{g9cg9ZZ{sV{Z^g9VwZg9{ wVwZo{kZVw^^wkZZwo{^^kZZwVsZcg9Zswcg9kZw+kZo{kZ{sw{ws{s{s{0w^kZ{Nsw{{ww0wg9kZRw{{ww0wcg9Nsw{{ww2 wkZkZ{g9sw{{ww+kZo{kZ{sw{ws{s{s{2 w^kZsZkZw{{ww2 wg9kZ{Z{w{{ww2 wg9g9kZ^sw{{ww+kZo{kZ{sw{ws{s{s{2 w^kZwZkZw{{ww2 wg9kZ{Nssw{{ww2 wcg9{o{^w{{ww0wkZkZwcw{{ww+kZo{kZ{sw{ws{s{s{2 w^kZRo{w{{ww2 wg9kZ^Rcw{{ww2 wg9g9www{{ww(g9kZwsswsswo{wo{w2 wo{sw^sw{{ww2 wZkZkZg9{w{{ww2 wg9kZ{^kZw{{wwCompObjbgdata/inst/xls/latin-1.xlsx0000644000175100001440000012574113003720417015327 0ustar hornikusersPK!܆~~[Content_Types].xml (ĔN1&æW[`azTj;[鿴-B %Mlm~3x*[Қ p+KxH"3)k$[d:/B&Sx:0xRYY__S@G-D01b+ nH0dݽ֪$9%9JSz2΃ k#o#xh 7;|8nSKHR@6g>>3twkEwj'mUIƪy`"4Q"f;@2/ _1'ohIL!n݉sn=vu?pps]?so]rhsBࣄà8tG]u=Q8M,|PK!}T  _rels/.rels (MN0H} PnRwLibv!=ECU=͛f0={E tJFkZ$H6zLQl,(M?peKc<\ٻ`0chGaC|Uw<ԀjɶJ@ت` %TKhC& Ig/P|^{-Ƀ!x4$<z?GO)8.t;9,WjfgQ#)Sx|'KY}PK!A xl/_rels/workbook.xml.rels (j0{-mC)sh)ڦ ebKFW$q ΢|w@w $qbJz $WWl5CdDtq2RiJ|.n*:q e+Pym l[{j >{ѡ #FCjd(fIynI⡍%y.~xo O)jI[duNuD$ė{|%&mFPK! xl/workbook.xmlRMo0 twl+[,Xa(= ч!ɳa}zىH==D?u#>!yk*%~U?gc+q}cZ[k4*!uڵ{%c$yxFX0M#+ bwFH83BRs#If<ؾ YސՕE5Q7ZĢ7[s)'u+I v/!$D[!˓g.N_O>fP7͂4X)rG`] pLU7_Iʡ 8 $IzkaG[UVMFlHvXFlDi$Q:l%dߗQ QYL̨x>8*pcU$=f0 w,\uPK!v: xl/styles.xmlVMo0 todmmVh*۲#D,~(vvY!(Q~@4\g1낫*?5FB+5&!jl'Æ1B51X[i 95SpRj#HSFIA+V2?DRm\˚Zqm0RL߼<7ѥ=8˒5%Y@JR+۠\ ]V'#+_ ,0I\ mbsgQT %\te~Ci΋8M`[{&˦ε$Ebqjݛw 0 VVx;+x+=Cx^~В?7PK!q xl/worksheets/sheet2.xmlێ0+,ؤa(vM{`l礪JU)7{xx~9u ƜT]$%΋:!pt}ݻ$.MĘ%hƦj1sut&O=!aFw' P crnݓJ S0ZL|t&Ӑp,M9=ϯX ,~X3٘ Zk/ߥheb/ߏPK!0kxl/theme/theme1.xmlYOoE#F{oc'vGuر[hF[x=N3' G$$DA\q@@VR>MԯNDJ++2a,/$nECA6٥D-ʵ? dXiJF8,nx (MKoP(\HbWϿ})zg'8yV#x'˯?oOz3?^?O?~B,z_=yǿ~xPiL$M>7Ck9I#L nꎊ)f>\<|HL|3.ŅzI2O.&e>Ƈ8qBۙ5toG1sD1IB? }J^wi(#SKID ݠ1eBp{8yC]$f94^c>Y[XE>#{Sq c8 >;-&~ ..R(zy s^Fvԇ$*cߓqrB3' }'g7t4Kf"߇ފAV_] 2H7Hk;hIf;ZX_Fڲe}NM;SIvưõ[H5Dt(?]oQ|fNL{d׀O&kNa4%d8?L_H-Ak1h fx-jWBxlB -6j>},khxd׺rXg([x?eޓϲكkS1'|^=aѱnRvPK!>y`xl/worksheets/sheet1.xmlYsH'bﶸSrXa_U=F˴(IԐtU'@n礢M׋E}8̼yIpfnF~qfįFa9?×x!Zx:܌o./_6I?~xzt<^^>O7ic~>n}#(?|>Tk?zWgćǩn ^}wݧke(חחwo?n_ ~|Nr6|_oqas|F.rnnFv2BBvyS}=ʮoepU?n=B/Eԗ:ly/>pTqiݷt(C hsNPD'Ѣq+Wa緾Wx6&>lx+]=wסЈ942=Y3ژ mCa>]Do\*Jq~*D8!~8fzr=4"ϏDⅎ\k'sDY,_Xgfr`;+'TܼL_$O{4U|ϋ9h:be\f=23gY'_|,9~"u͌ӳkL9=+eʋ|xWܙ+w䋟b|/s5 ʏb|/ώB:GQ;7g\[ΈB ssP:~ e|9;e¶` ߳Q./lFټm>;m)gT^P6p%Zى|}vU]J$_V] 44aݷ r57y#3mIo%sQ̥nw1Ϳr!d)\aXؕq7+{dJOG׮$厦SWqGӅ+ɹ+)f܊;Avkhm9d d,Y@v{$x$d #KĿ]Bvŭ KA7wc>Wa4]]A~ohQWaD}]Ao!iu )Ƥ5vv_אaT}MƐa\ڝMƐad뢶t2Tcs=N &{7`t7tcTz-GuS-Gu,G9<ʑiQiƜilh1r$Z1rZ1wr ̚}aZ u1g/!knaZ GuAfr|+Z uQerә;1A C#(*5AqQk|8vxzJ_ 8"G#H29ABP#h: 8'/+Dz@q Y4R q1A C#(*5Aqqe\b7X0{hVg|'HEJs F HdrAF htpr./9IPK‚k#)N:A E!JAEYIėt!P^b\  ԟHIt) A@P"-NLK Ib@ Y2 )N:A E!JAEYI<Ηtݤ'%vyOlBf?#$RADP!4Z/Y}Y?qH;`? }EZ3O$&IdDr"HE&it6qm'5?j }KTˊĤID2"9HI"RiD:6%C~%zֈ^aH1ipɪ5*wDb" HF$'R)TDj" Hg ;fPyE ?2}=EZclP-+&!ɈD "%HM!l _>$V~-40F%HL$!ɈD "%HM!l c>uS!a]ЧiAHLHJ$#)D*"5HKt[9D|6+7:8-JZ+;"1HJ$#)D*"5HKfS6LV{~-5=r,Jf+cjY4 HF$'R)TDj" H7 r糡_96@M&ƾDϡpWZ~G$&IdDr"HE&it6q r]糡_6^6WO3Hk eEb$DR"HA$R4DZ"M*Zủ3`86@ }qEZ~G$&IdDr"HE&it6qmUhXg4@McЧix[4 HF$'R)TDj" Hg{m^EӗvXm;Oh16IIdDr"HE&it6qmw4H%@>6@ }(\֨$DR"HA$R4DZ"M\U=x,`> Hk eEb$DR"HA$R4DZ"M\UmP /ip5*wDb" HF$'R)TDj" Hgq%ݢD73DŽFV2ipEZclP-+&!ɈD "%HM!l*zʫ`؟)᭪O3HkTD")HN RD"-& /^AM=aŻC WZclPsEb$DR"HA$R4DZ"M\^XEg(M.Jxu+;"1HJ$#)D*"5HKc|zwm8qp5=4vQ{Cf\hD")HN RD"-& rYEuT`E }qEZcl{VѤID2"9HI"RiD:6CQ (Oh16O(;c$DR"HA$R4DZ"M\J7=^c%|%i`iAHLHJ$#)D*"5HKk\lܽ6\CMB#R >_40㊴F%HL$!ɈD "%HM!l*Z6@M]+u#$wD2"9HI"RiD:Fo<+Y,MPpˮK\+"j[4 HF$'R)TDj" Hg׉r 1T`sU&z1`iHB$%ɉDʁZE&it6qxa5- !'ȭNPB8`W'$ +%ɉDJ"HC%u܁@uS+WEƉ>ӦewqID2"9HI"RiD:8N_XS;oLȞ8ѳˡWSHDb" HF$'R)TDj" Hg \璣P݃AT'|@<􊠄EZdi")HN RD"-&/N> "!5Wl| %`iq`jפID2"9HI"RiD:NeoL[g;,xp̼0*P \"0n`4r.>asu#Sv_Tމ`gq w"B8lLr#xa-Cf<"+0)$7rK189D DV`R(09LB6L!!zBنux է a$̍}RdpX`_!<ȹq@p6+uoF q&2!7Zx¸8d)xa(w3 S !.Pgg`r.CN1[B .VZ4.8o &zVf ص S !D 8`B+O%\R7҄r>u.nO{.w_Nqaִ߽]<2F>(!ةYl^O[JԠ}yAQQ~zEl߬&vEt |9~}x^?o)oF駝oFϻqG_s'B&8%#A+Ro@# Ҫdf$0 ݳf@B5e'N_ +lkKC1Wq9+*K9HW=DQ\/@ vwɋ*Kr+@y/-*!>5Fp}N5d5PK ! v,ttdocProps/thumbnail.jpegJFIFHH@ICC_PROFILE0appl mntrRGB XYZ   acspAPPLappl-appl dscmdescogXYZlwtptrXYZbXYZrTRCcprt8chad,gTRCbTRCmluc enUS&~esES&daDK.deDE,fiFI(frFU(*itIT(VnlNL(nbNO&ptBR&svSE&jaJPRkoKR@zhTWlzhCNruRU"plPL,Yleinen RGB-profiiliGenerisk RGB-profilProfil Gnrique RVBN, RGB 000000u( RGB r_icϏPerfil RGB GenricoAllgemeines RGB-Profilfn RGB cϏeNGenerel RGB-beskrivelseAlgemeen RGB-profiel| RGB \ |Profilo RGB GenericoGeneric RGB Profile1I89 ?@>D8;L RGBUniwersalny profil RGBdescGeneric RGB ProfileGeneric RGB ProfileXYZ Zus4XYZ RXYZ tM=XYZ (6curvtextCopyright 2007 Apple Inc., all rights reserved.sf32 B&ltExifMM*>F(iNHHoCCo }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?<;Aց\:O=4;|%s։F 1]wW6ůt&f!T.n*If$QON|ߘ'᩾|i-OO,<mct[-oDƽ̚ZmvVRZ\Ouq"o_|^?'^(  >x[ooW]S⏛^MC𮇬hRF>/m̗!g߁'7 <) z=xm#z]e愉;H'|),2$lo k?O_~9aB$kvoeZ? >&Yޢ=OM@ʷvV t?<7v}O_OwˡxǺWutluiگo,=6QITm|]AOƞaoxAu 4 />*?B~x*0j6^:x]utYg]SlIZG+Kjٯؗㆷˉ~ ?7ćD# w"\μnm%IiD*C#?L~kX O)#DC[Iuxai3^:UiϦ{ZX晨\_m/-綝RhE폵x[_ٿڟj?ڿ ~G/=#(4MA:&XiZG6Ma{3ypĉI75/'5iceֹᵟC񶧩 SX_^:$[[TeX^ HehRXCѼSD|R k+mbInn 2Et$1^/>9>ho| foh 36g b믭#յKm=Ťr 6I} _3i? _3i? _3i??:FGL紓Q<*n-- eh.%ʝJbj> EbHpR4ʍ:գtըNJayw⛿~ڹ~폊-|"|= KwEo'Rӯo.,O G㛨5CkUqQ𖧃VFQ[:YGo,Y~L"ϒ饫nI_|_[o? x7o{~0GGMbV4m{,y~_- e\AC[SrIeYy#/m?Jz:焽[JOUu{?5̭_xFZTxK2lr07 漐<g'3O{I%P-;ZK ?_xGtZ6Y2.tG,/ͤ\@,/ͤ\@,/ͤ\@,/ͤ\@,/ͤ\@,/ͤ\@,/ͤ\@,/ͤ\@,/ͤ\@,/ͤ\@,/ͤ\@,/ͤ\@,/ͤ\@,/ͤ\@,/ͤ\@,/ͤ\@,/ͤ\@,/ͤ\@}cg;OiW>QY,fTK;Mt?-mOö{kvchc,OtW$Ih⅏ࡳ;]bG./_ eh/> S🆾J]"QO[E?|e;kyF~ӫ˨G~x|[kuah#O.AG ([oo֛$x3?;{q ׺|]ு+x"QӬ]_Ŷ$|x4]2/;p ि>+JxoF|-_x[YA_&|B~< |C=R>,[ƚV?)Ox~Ox퇈?fY~5k_fE,o(]ͧKe{wmhWQR6/Ԟ/{{zGt~|V~|Fx3Vo{k=t;{]Gúvi\iZ[kaNOBCh_*oo.~(i5; >ۭV>M3\ZiMAѯt٬irn>a珒{ǁ%{5.Bki,rIk$n F <;FvNW n>~4|%~P&z$zkq5m LSֈC\u-5"VKqk| |9bf;6Sȵ|.ju}R<+ ?b}7[oqiIzO(o6|Xo>-m'DH$SG@oq?5zF}2k;N$+ qඇ2WUsS<1S%l9_xx6zvmoy WZX,>}sҴ sLVožl W6i A-S 3-դI{{Kyp gxzL[^>kz^?I^b5ļMSW4k{KfNWX䷇ōcg ^F^Ꮒg|5y~0w,Sq*^5i:X׿l3u?O|/q?KP]EMNfUg´<{mK&oFs7GOG~9|cor|C_B꺛B]Ӵ5m[ ?CTү4[JM>_\+|,мUB<E/¿=x_u_Z_l/^>"xFx'Q>4Z՞C݀Y͋ᾃXѼv 9sA^7< }>>oI}EqXx_Nc5^4?c3iN>'MG░q|9WZt3[O 5?ZjW|[a_4?k/oi 4kv$>'u|5jNm'Z|-$nDXڔP@P|bU {x/|Pw35֍\gGJOxztgB"?>`$n&y}V~}SY{}?[״pVv7jwOies4$ۼ]_ƻNDӟ~i S0y$[P~?&<|^>xTu?hּ_kh? TəW~xm>da^> cƾ%~=hGijzi t|iK XukyiMx@hp=؛mK5j㰻[ŕՔsY̟qZ;X9 ?Ʒ[s AWXSŞ÷>)˟zw3[ ~ {Oiq`@OC t[_|FǂUaq#;Չ΍%DŽ|N |Wk|E վ6)1,]Q|W/B ͆h+g&#:o5X|=ŨYZIuqmΧ}va3rUp8^c)N-y~U3O ^*ѥJ}&ދ}xD0) xΙ plT{okk;5+^&dv*B=3(d5cҥ*˰1XC S]šn4NujJЂM]vcj=xs;-#_\7'h>(tm#Fu%vCZΩ:u7lW,?#beuxY+gC,Վanc8AJ<[bq^:,UY.J3<!E|PԼvZWZ珗l~m. Ĩ£Mviwov$?v+.ϱxc0UW Yep/o0ib#N5"y=Zt]O*q9+u/}:NͤjgEe㟇o[/j;񏋼 t V~<|Fl>)R60> 2q|J"\fyF!PgupqUx~zte _>-{qxN炼Zx[Ҧ1#м9^xT ϩ6/$.Y-t ^6[ad8*1(lga!q8l*$1xWX&vkhGV1F|ؽq!q,5\fK#iAqQ%N#'vYg˻7r:tKzz5Ӵ䷽{K^N!YJw|SOg|P>%G <^)[aRx7MMS7&ӺϾiT?~x{~)>:x7H6kCaͦe-.\iZ}c,;+homܿ8Ͳ:W<26e_a&zX|R$W JRt+F7tckK{"4V68*Du KG͛bQ[j%Į>rL:ym>X?i1%ҭMɮd7}}5*>ҡ>׋-ռikut+{\۩xs:4/ 'Qӣ? |MG7ρx8|g5,p tb8rӒfn1a */{{o;?5Ϋ%@|4]:֗[cKHuDʦk#omd`+ȅŴ7 CiAwm[q8|ć~ |ǐà1Db4UU1c2ē4/u.ax[@u.ax[@u.ax[@}cF ? y-Njn.šlFŢ2 |\_ >vVz5. hW/co5Qַ֚Lַpqo,JtvR xíc!AE<7W _n×w_kKxAV mD-tjk{Ila/~>xV𷈴];sh u65ͤWDp[ByHMHM|HĿ j=xoi|>ŗu+[+X7wo -3-y.e0ã 5\^3JbJuNc_RIʢ Σ$} ?&|zꟴ>=_,~+'bæ5BG tYmϫǜK h}/ f\>puL+qCJ\G>*U_ .F旺=v+۬࿉:mQ53mg{4{:U,z֛j:)|fh!LF `|~!jgy=ܘ ֮QqggU){I jWUgt?~!o^FM?ş |FMcÖ1?Ҥ񯇮4G+k5.J+N/pA jk}fQ/Zo|lq~ѿ~!xId>*mC|"6Gry5-R{ͫwY\kphF"Xۯ|f8gpd̞y:e^J n4ԩp9]<hWOQk)b*;i+/׶I^ij-o~x@۩j?>3?:ߎ4?;xFqOѣ9t}6S& _~ q.L3xe8\7(Ȳ 8,2Wpc?:Y01K~W '5 vE |t~>|7Z i(M.,%ԥִ4`FU.UKe;S <\f_MOм9^!ׄ🁾+;# 6~ 5()VHiw|R7qqK&X渼f[\QOθ G>+&W^S(pO)g^7𶬛qV}b/es_F|v>/#:◂].4Kl_:/F.Y[}?k8K%lK,0X4Qpy],lbc+<UrڸW'ۼV^Ww)6nm@ҴM-Op]CfO+kSk:βOɸ4+p/?²cc)%g>l1R3)R p˫27/}5߭{]ޏJZ^=ַ֕:OߴCj !ƾ3֧a⇟\khDJf/k,M=P N3xZ,|'L.[' #&'(2V_VtB$^V]Ź&ͷE?1GI~oxmBu}L gh.u'ί-[xv.3n0|V>$kZ:?8ğMk_@}cm?ZgGo LyR($$PQkI?ï7A@ ]g uq l~0r_%xA<3)!|MQRm֬.㝏伔oRXnt%p Vʰҧ~Ӛ{(շ~[o |@?ho[UGVObEeMw@ŦMӤ@>OB~Ҟ,s3P.<5exT3h/Z7=WY_6&Ӫ(<c=+Sw? xo>"JkrkWx>[닍;Z]t & bO_c 5#($%mG\ʞb$n"k @ooy@½CqXaԼ⟳AKdsEu%kYi&%x^7{LmMqm(5_wZ!gi8@=[*׎IGMx_T%<4#i-=_gQu4e$E1kw` ğ<;%s|Fe+I+GIY[:rM2i x◆"tOG< |o^&>omt;V"źnB;=z OIrnt^/^OZxzX<iR>5U-?^N]Zս]ѥuG{"Q|D 3memu|E_4MKa}B;xtRk_ijjIi|\4Ǩ5[]*Őih =ƭhx]?ѭt,{M|{(VK"kVE퀞."!|$^u;Rk.g> ڬG5 mhiZYM߈ fI>6 Ğ-𶅪[_xKwE/tRFozxƿ ~+S񯀼aOi~E}ZTZX֞׮uK"F-RIEqk뿁lߴ}gnE;D׮Uڝ4}G}gJIRi {;4j?/Koxf>i] .>&QGXoMҼC9OJm_Q)AIUR㋏|,υ|<xmn ;EPtK+OS-̐Z]Koh 7/;ᧂ>0|*u?;x%ͤƇB&ze!RR{-B%)#P ?G~_?=Òæǯ\ti5X,ST'+{g340O0O.Y@8CE7~i ~#Q<-a_C[|%kB-NՠP]7u-)Wi"?ko?U,Oď xJ5;MgSXu{ *Emqog6q$o^@)jG׿l_+N\+|Tc43JւF~!]P ;Zo@6g,;g>=*_A0G[GRÿ鷚z/?4ZR-m,!m7Pω4KZ%.<uxZtY鍬)4 WPմ MҒ]CS/ (f7ڟߍ42iZg>(Cce 6D麖 97W~_u|`C1jZW^>u;{ksOoix]a uskummZ^Ouksm/=Ѡ'//I5  >wcF 7Ҏ_" _Rf b}ai%]NDh#i#uKHWL`/x|~G<=6>(׼yh<7tJ+IԮj]Ktk:jg@#ZWu~ZkS[[5Ŝ,lm`˱q}Fس)~Ѻ?=#VzN>]&0'դi- $ӼE֋iT;_\CdOτ)ޡH}k chnk <-ox7ww֮S+/>5]z6DiI^-m7M4ڽip>OeowÏ֖Oi%Ie\x]cjŭ͵]jAoR, ^`šL_xmytvjmRqր? z/fYN[SW9t/᥾Tf:ڟt5_\"?;y=,'};?#|M53|?;m߳/uBvwze [x/O ѴM?WkR¥')[_v]Kvf;~H?d]KCM~.]S u-wg _|2a/.IYGw\I}w$Gο?  wO?>$xO]*/MN>MV{ O ͭϊ \WKYSR_3ڻ ֿ[J+L w$?IiMXEw ry7$g¯xSO~#hW }GSďw>Xe%_Xx"_ xJO=ZmFMFs=[4xSků5f4x/\ҵ⎇{NxZ$z\6voauEj68;8nx?Ǿ+|["o}Jx|*~ '̋*_xw?to×zxKԵ?IĶvow _$x_ǀsͿ(Xhwውc&G)Emi?~ Cuà&xlo$Vk{\t~x៉k{|9~S-4k{kQez2؝CMgm2}$Ͼ'x>Nj<5xWcIֵxMw,گ5 |G6\O"}e _ſ,[oXP|WPgoGMmS[Ϲ㷋u̙#^Sk?g@Exw<~ 1~О"u7:/5/ ڇV|MIC5~!bߵo_Ѿ!ַ>_~~-'QC-ٳW|U>zU^g ygf|I:ey^o> ],%|Lφ*CiZ"]w|c/ODӥ|C{ek㻭^tMg>iПGOpoųm}L4K?_$m8T2J1Tpkp.bOkO Śv=/w˻"G5has&|߀z|Kj ǩ>LK+l_ WdRvc$N1s 86,O<q\;6]N юe[ZUNMo\Z>׾?_Zgŝ^:𵞃%]?SN[?y8t[mk׆5?j#Юp1ZԣP <3|x8rN' <{[w.>2浰oeQu(W#CNx|5dwngif_{3_푮?w߅>Ma> >OxD}nS>mGVڼ^ q :mGogg|'i!J\wft<~gX1Xe]ַwդs6?j߃*c✿|O߈|A x?ß}'1cj/')xZC[0[ǨhG#QgZAd>x=, S\soZYN3'㜷-ɳ|4<7,vmacׄrX<ڥYa팱cᰕ [uݵf[g;EF?lh~:tp^|< '_W|K5/Q5=@g5/xLĶpGb_$:ݺ^f=;;(q5>q+(O׋QG%x_yj!VzhJ]ޜ|Eye|n|o*Ww<[4id|u~"TfVa6KαT23 >˼>匧G?4QJNlW3ٻk{~~D xP 7k:o:_\4ٯ溲|kzD @:w| ÏoH&;oV~iزx×|,Fuou]PYxw=k9{h֓as]<i5&C+ @5 i$}.TIó~گÏFд~!O}S K:|w/ ŗď{tcX։gڸX?@ex;~7`g|5xNKÞ#kYxڭkvڕ]Ocy wHO>Ю5X]I]jZc}kw]~I|4/tS 4)FҕZ*ӯBJ5ԅZ5NTҫNJtө Sԣ$j|OF|_>E| g5$fxoKufq_\g!q<_.<9Y{k66YYV֖v,ֶjiE1*h@^*lN&\F'V|F"IU^ikV7)ԫV:&ܧ99I giZ&Oi~*Ւ=G_K5%ᵎIm+y Yx"Kq6'ZXr8*8jʴJ%QQQV#ŸMks_7wZkZе/ٍeDҵ 8s_XkoZ< 鷳 Kʷc&o`2Ÿsx¾5v>'6c\5x>gk,_ZTwV2_Y-],/md?#EIX]6>~ڇt/II'L_ZIY$׬mn<]4-4hLqc.o`f¿/t3Y J.tm9<1_]YYXoXkSi:]d6wfWԭ.4{NgO4u !]^mgܰ"c&obgQ=Ě' ҴkIMʼ5[HneŬJ#9"?]$5BQ|)>xb-HL{6m糵ekK5-6 !jZuH&?#EIXx+?L8:_^ú֦i_df+Gu-1-Vwv:.{iw1?.L|C/]8hLup͚iZ֏~tnՅlq}k7NW<3/n_~*t^xm/pX]xtjayyS2־|_>1 SA.o5oOun]Qtq r+27-h#H]oZF?&»#|Mwk@!G ր<τ4O^<-%uQg?.afK,RCx[<5kxpjvYBCi}Aymʏ68nxՂʡOE񍦁:ޙ>u>ZEφ+SW_aߌѴ)|AM._3_=,<[Ekĺ]΋wKoŧg2M{--ڣ4Co*!H-q?f<'Cx'/?xzP)RҬ7cK]3wq_x"GBEknP}0i ^0ּ#_Džt_?gĿo~^w/455<1_j|;mu? k5laO6?_>? |rrXSt8f jڶy>뷖_Yψbg|'f;ѼK⾷zƏ˯|A~xI h#-5WӟLGj-><9iZJt=/NWREec[[WwJ"|ۛ湸tK+Vx*7w sDc; V"7y y"P_nIR+.oiik??wJm>+崸촛t=VXC}2{infg&z>i ֮`Ҵ=6 GZVO v^^w{*DYn.i'Gوu']AokZ*,eI. TA$x?'ZfqftMW+ páZh X+x_oF}1ׯ<{w4chVa ayz-RmU&;/vV{ ePͮxZ&{-'kZlw/c5܆<Ӻh?7/ɼ/G4klfG4 I[kLJ,v7T7\N|.4K/ ᾧ6")5K ֓mzBjvw:>@h -%} /NNimxcFw|ayiD5N}{NКI|5]T~㕶os[e0jz85 6뚷;EZjK_]$y@~!|Wk7]Ji-M*Es6ꮚm͵ųQ*\Zo}acun,_FsgQԴlگFëKHs?5W'4V֒"w |% _Gڍ1>wsq|c$~"D8T~R݀O ki5Z4o431HԻjh4O ϡӠ zfD/>?\xn'{]msw~o]Κ7-#Z+L/5[Y>]KX S~xo ?kgdiO4?k|EEjE׉|Ax Op|c8-{e{^ o|pGޓcWZ-"d>e_@.@k3Kk֗IՄiCmW({]VG?[Ѿ,1Nt|㧅5??h/r߲_;~ g_gm~z__#!jZ'W<=qo [ho~Zxv/uk?TYдBUR0'z|3taǫ_,XaM6NH~ڑwudZMF7rr#վ&^`_xKgwDx5Ict˯%|Oox/ x[ H[Q kh F3~G<9هL־)|jsg:σU|z ]+zM WcR:pKXxy~gvqLjoh:k x iA׬m~+^j|:-~(%4Y40৚Bඁ7)g_uJ]kZ¨Gt'څދg'.ƾtx7Vl+WDm>UxP/aĞG>v:GkF>5Uh4,Ϣ> 4m{܀&_ۃX<i%xK?{{׾`-~)[u9<}Y³9DbZڌuo8'g?Ei =GLu+ myii,Ǻ|RVRFH  ~ݿA<ǿu?YqN|Bտgwmn&PGxkiߦ^麦Ky4k;uu=|*񚾣6Oj:6e42$n+*M3N|]mw6TNt W h/>|7Q'w^Oa|/⿆䫭iVwz[2UYt,縂Ym`+߁W?ƀ9m^ XDqoJDSKG?<Sڀ>ƟFReO/Rj.ߴ=ޕ}y|,Yb_ZQjOiԴ%̒O:'Xqe8ڗK'/ ! by!)qaK$xW9c[4gҌ^g9_krdn^Vw_3x>|8w|0~ 7,~xÖlj|akgU~ vRPu( 8ڋJ%ye\crˣ,߇e>0e؊؜ԭO*xNWJWd6ݷ5ۻזPq3R26CCV'<'#I>յddϋG-_~2'=r_]RRmFmm%ϱflG8 >& |Q<ps\Y|Bx/2YWJY|sEW _ NlKB191ZumjkkoGglOt_7 |)x/: <[hn<]{X<6c}x<> WԜ0<)x1x%xK“[&Xg߉c7BZxovxZ/!}8G#ǼaÅs 8ļ P S̱+f'pw&eౙDp´:qQQ\2mZK[?;FJ<5oxÞ~ M+[K -^5[/.mgַ7*9yb+#, eYeJ,L~3,MUQKuVTn5Z:J8NVmyrm-(D:"`¸3߁W?ƀW~_He?]}#hw_߁W?ƀW~_He?]}#hw_߁W?ƀW~_He?]}#hϾ,x? yyi.ºnHffD`rS;PK!kgydocProps/core.xml (|_O Mg;J$]L3_̌o:b |3bv[4>997dmU5Xbڋ1-T$`<גFQ~y cњWP iD=+kF+؊px$%4>)YزHA w(zsUN:t=eKщG֩q6i#u6}nFJ7L &,pold%qV>MWђ;? _*]VB cwm ﴵ@u"OpĔF1hN:e)}k7jbŃ(IFoO@>,7PK!q}xUdocProps/app.xml (n0 @V1zXI&ӱYDֈlqÀH?O$N-zhrQq=|brA@v'߿D2Ejb-QXsN".)Mc4{ҁ#*ˏN&6dSuO۴:~ X!X[GGPq-4X68w_AS&!e<2i݃& 4W2XzrmJ(xPdc8cs+W!`I5ސlU |9+(M8sq ͱ-` M "`?w^q]VE M6rϞ_"C^Ĭ&??PK!ccfxl/calcChain.xml97U Lؗ5~;@IsT |I`.kpU7mgD񁲓l}y]X_([%J j9 xl/workbook.xmlPK-!v: B xl/styles.xmlPK-!q -xl/worksheets/sheet2.xmlPK-!0knxl/theme/theme1.xmlPK-!>y`xl/worksheets/sheet1.xmlPK-!i#)xl/sharedStrings.xmlPK- ! v,tt-docProps/thumbnail.jpegPK-!kgyAdocProps/core.xmlPK-!q}xUߤdocProps/app.xmlPK-!ccfxl/calcChain.xmlPK Igdata/inst/xls/ExampleExcelFile_1900.xls0000644000175100001440000017100013003720417017504 0ustar hornikusersࡱ> fw  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdeijklmnopqrstuvR F ogWorkbook`SummaryInformation(hxDocumentSummaryInformation8\ `\pGregory Warnes Ba==d +8@"1kVerdana1kCalibri1kCalibri1kCalibri1kVerdana1kVerdana1 kVerdana19kVerdana1kVerdana1h8kCambria1,8kCalibri18kCalibri18kCalibri1Calibri1Calibri1<Calibri1>Calibri1?Calibri14Calibri14Calibri1 Calibri1 Calibri1Calibri1Calibri1 Calibri1 Verdana1Verdana"$"#,##0_);\("$"#,##0\)!"$"#,##0_);[Red]\("$"#,##0\)""$"#,##0.00_);\("$"#,##0.00\)'""$"#,##0.00_);[Red]\("$"#,##0.00\)7*2_("$"* #,##0_);_("$"* \(#,##0\);_("$"* "-"_);_(@_).))_(* #,##0_);_(* \(#,##0\);_(* "-"_);_(@_)?,:_("$"* #,##0.00_);_("$"* \(#,##0.00\);_("$"* "-"??_);_(@_)6+1_(* #,##0.00_);_(* \(#,##0.00\);_(* "-"??_);_(@_)yyyy\-mm\-dd\ hh:mm:ss.00                                                                      ff  +  )  ,  *     P  P        `            a>  "  @ @ "x@ @  "               ||Ib}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-}; 00\);_(*}A}1 00\);_(*;_(@_) }A}2 00\);_(*?;_(@_) }A}3 00\);_(*23;_(@_) }-}4 00\);_(*}A}0 a00\);_(*;_(@_) }A}( 00\);_(*;_(@_) }A}7 e00\);_(*;_(@_) }}5 ??v00\);_(*̙;_(@_)   Mdz -8!}4}}9 ???00\);_(*;_(@_) ??? ??? ???Mdz ???-8!}4}}) }00\);_(*;_(@_)   Mdz -8!}4}A}6 }00\);_(*;_(@_) }}* 00\);_(*;_(@_) ??? ??? ???Mdz ???-8!}4}-}= 00\);_(*}x}800\);_(*;_(??? ??? ???Mdz}-}/ 00\);_(*}U}< 00\);_(*;_( }A}" 00\);_(*;_(}A} 00\);_(*ef;_(}A} 00\);_(*L;_(}A} 00\);_(*23;_(}A}# 00\);_(*;_(}A} 00\);_(*ef;_(}A} 00\);_(*L;_(}A} 00\);_(*23;_(}A}$ 00\);_(*;_(}A} 00\);_(*ef;_(}A} 00\);_(*L;_(}A} 00\);_(*23;_(}A}% 00\);_(*;_(}A} 00\);_(*ef;_(}A} 00\);_(*L;_(}A} 00\);_(*23;_(}A}& 00\);_(*;_(}A} 00\);_(*ef;_(}A} 00\);_(*L;_(}A}  00\);_(*23;_(}A}' 00\);_(* ;_(}A} 00\);_(*ef ;_(}A} 00\);_(*L ;_(}A}! 00\);_(*23 ;_( 20% - Accent1M 20% - Accent1 ef % 20% - Accent2M" 20% - Accent2 ef % 20% - Accent3M& 20% - Accent3 ef % 20% - Accent4M* 20% - Accent4 ef % 20% - Accent5M. 20% - Accent5 ef % 20% - Accent6M2 20% - Accent6  ef % 40% - Accent1M 40% - Accent1 L % 40% - Accent2M# 40% - Accent2 L渷 % 40% - Accent3M' 40% - Accent3 L % 40% - Accent4M+ 40% - Accent4 L % 40% - Accent5M/ 40% - Accent5 L % 40% - Accent6M3 40% - Accent6  Lմ % 60% - Accent1M 60% - Accent1 23 % 60% - Accent2M$ 60% - Accent2 23ږ % 60% - Accent3M( 60% - Accent3 23כ % 60% - Accent4M, 60% - Accent4 23 % 60% - Accent5M0 60% - Accent5 23 %! 60% - Accent6M4 60% - Accent6  23 % "Accent1AAccent1 O % #Accent2A!Accent2 PM % $Accent3A%Accent3 Y % %Accent4A)Accent4 d % &Accent5A-Accent5 K % 'Accent6A1Accent6  F %(Bad9Bad  %) Calculation Calculation  }% * Check Cell Check Cell  %????????? ???+ Comma,( Comma [0]-&Currency.. Currency [0]/Explanatory TextG5Explanatory Text % 0Good;Good  a%1 Heading 1G Heading 1 I}%O2 Heading 2G Heading 2 I}%?3 Heading 3G Heading 3 I}%234 Heading 49 Heading 4 I}% 5InputuInput ̙ ??v% 6 Linked CellK Linked Cell }% 7NeutralANeutral  e%"Normal 8Noteb Note   9OutputwOutput  ???%????????? ???:$Percent ;Title1Title I}% <TotalMTotal %OO= Warning Text? Warning Text %XTableStyleMedium9PivotStyleMedium48dq:Fc-2NWgFSWc-2NWgFSW̙̙3f3fff3f3f33333f33333\`1= Sheet First_ Sheet Second$HSheet with a very long name!Sheet with initial text"5A 7B 7C 7{F 7?G 7NA 7D 7E  7NA 7NA 7FirstRow 7 SecondRow 7ThirdRow 7 FourthRow 7Factor 7Red 7Black 7Green 7A 7B 7C 7A 7NA 7HThis line contains text that would need to be skipped to get to the data 7This line too! 7"- f.M/]0 PK!pO[Content_Types].xmlj0Eжr(΢]yl#!MB;.n̨̽\A1&ҫ QWKvUbOX#&1`RT9<l#$>r `С-;c=1gDJKK2e,$nE<@#Jem)4 Pc`{w8!A=2̙<&JꅐfM T5BNd tY#9~#*@ K/AKחFF]et`~!کկm Y\n?aZ]'2Gc"ry Ǹ8E/ԡnޙ`5xsѵɷrΚ\xpK*y7NF~b\c|‰8'VD5N(B<=. |z]ң}'D;4L|6CGM|VoC Gao±eǬXE>%qm #8j>-~ ..R(zys^FnVԇ$*cߓqrB3's}'g7{t4Mf,t,Q;7ɛ1Ѝmmƍ` F$vNyad@}m }w^-/m z<}%s7CXWM->Y°t`Qә")Mi?F$@3ɌHK8ۙe/o}'U}f@bvyE/G9#sh [1/"ZXZըfZ#0b8k,ބ] xy Z4M0#w;(5!ʬDx@l7&vy ;H)Okള0i1?tr`d]v%b :j8mC88IolW;6kϬESSEÜq8RmcWYX%YWk:.beRB톖 $T`Vc XэGbCڞp` Z?My֬Ӕ ήcF8&%8 7V`Jm^ݍq7EWeRN)zie#EHk#BEPѰ#`A,SphStq"A%}g0fDzd#Q%uejCzRt s \oN)F{b:P3,!gdbKU z=uAYk;֌˫rYaϤpg?0jCoEAV_] 2H7HhIv>kj!f.6Q8/ٙ_۵Ȟ.QXQ݈B~$“tO$ɤ6F#YOG [BkFE6hM \\ I,&.(dh&1oYYG;&kŕ{%e (w䳨A񍁺]y 7x5R`8tMc3ݤPK! ѐ'theme/theme/_rels/themeManager.xml.relsM 0wooӺ&݈Э5 6?$Q ,.aic21h:qm@RN;d`o7gK(M&$R(.1r'JЊT8V"AȻHu}|$b{P8g/]QAsم(#L[PK-!pO[Content_Types].xmlPK-!֧6 -_rels/.relsPK-!kytheme/theme/themeManager.xmlPK-!{֩theme/theme/theme1.xmlPK-! ѐ' theme/theme/_rels/themeManager.xml.relsPK] ` Y[^  dMbP?_*+%M com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMHorizontalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMHorizontalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMOrientation com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMOrientation 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.subTicket.paper_info_ticket PMPPDPaperCodeName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDPaperCodeName Letter com.apple.print.ticket.stateFlag 0 PMPPDTranslationStringPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDTranslationStringPaperName US Letter com.apple.print.ticket.stateFlag 0 PMTiogaPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMTiogaPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPageRect 0 0 3058.3333333333335 2400 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPaperRect -75 -75 3225.0000000000005 2475 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPageRect 0 0 734 576 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPaperRect -18 -18 774 594 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.ppd.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.ppd.PMPaperName Letter com.apple.print.ticket.stateFlag 0 com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PaperInfoTicket com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PageFormatTicket Mz,, ` g(HH(dh "d??U  @ @ @~ ????4? L?? L?@ L?@? @?@?"@?;@?@?0@?P@?@?9@?@_@?@?B@?k@?@?H@?pu@*zt]]]]  >@@ 7ggD ` }  dMbP?_*+%M com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMHorizontalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMHorizontalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMOrientation com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMOrientation 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.subTicket.paper_info_ticket PMPPDPaperCodeName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDPaperCodeName Letter com.apple.print.ticket.stateFlag 0 PMPPDTranslationStringPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDTranslationStringPaperName US Letter com.apple.print.ticket.stateFlag 0 PMTiogaPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMTiogaPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPageRect 0 0 3058.3333333333335 2400 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPaperRect -75 -75 3225.0000000000005 2475 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPageRect 0 0 734 576 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPaperRect -18 -18 774 594 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.ppd.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.ppd.PMPaperName Letter com.apple.print.ticket.stateFlag 0 com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PaperInfoTicket com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PageFormatTicket Mz,, ` g(HH(dh "d??U  > > > > >  ~ >? A A A A  >@>? A A C>>AAC  >@>@>? A B  >@>@>@>? dFTNF>@@   7ggD `  p  dMbP?_*+%M com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMHorizontalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMHorizontalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMOrientation com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMOrientation 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.subTicket.paper_info_ticket PMPPDPaperCodeName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDPaperCodeName Letter com.apple.print.ticket.stateFlag 0 PMPPDTranslationStringPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDTranslationStringPaperName US Letter com.apple.print.ticket.stateFlag 0 PMTiogaPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMTiogaPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPageRect 0 0 3058.3333333333335 2400 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPaperRect -75 -75 3225.0000000000005 2475 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPageRect 0 0 734 576 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPaperRect -18 -18 774 594 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.ppd.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.ppd.PMPaperName Letter com.apple.print.ticket.stateFlag 0 com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PaperInfoTicket com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PageFormatTicket Mz,, ` g(HH(dh "d??U } mG} m  ?D@E[[??X?DAF?X  GG4[@HG4[@D@D 5@E[[?_? LA#$L Bm Gh<"5@Hh<"5@  L@D`f@EDDDDDD??" T A G՜Zdf@H՜Zdf@@D @E[[??X?ԕ? DGxl%@Hxl%@@D@Eqq?Lt?w4 ?  L GcǦ@HcǦ@@D@@EDDDDDD? ?:Fq? G9"H@H9"H@@D,@EOO?l?ځ-? Gm},@Hm},@ @D^@E[[?_? G7؊^@H7؊^@"@D @Egfffff?/?  G~4,@H~4,@ $@D@ Eqq? ?   G .@ H .@T   >@@ 7ggD `   dMbP?_*+%M com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMHorizontalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMHorizontalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMOrientation com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMOrientation 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.subTicket.paper_info_ticket PMPPDPaperCodeName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDPaperCodeName Letter com.apple.print.ticket.stateFlag 0 PMPPDTranslationStringPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDTranslationStringPaperName US Letter com.apple.print.ticket.stateFlag 0 PMTiogaPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMTiogaPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPageRect 0 0 3058.3333333333335 2400 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPaperRect -75 -75 3225.0000000000005 2475 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPageRect 0 0 734 576 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPaperRect -18 -18 774 594 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.ppd.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.ppd.PMPaperName Letter com.apple.print.ticket.stateFlag 0 com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PaperInfoTicket com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PageFormatTicket Mz,, ` g(HH(dh "d??U    > > > > >  ~ >? A A A A  >@>? A A C  >@>@>? A B  >@>@>@>? xFTNF>@@ 7ggD  ՜.+,0, PXx  'University of Rochester  Sheet First Sheet SecondSheet with a very long name!Sheet with initial text  Worksheets F$Microsoft Excel 97 - 2004 Worksheet8FIBExcel.Sheet.8 Oh+'0HHPh 'Gregory WarnesGregory WarnesMicrosoft Macintosh Excel@UΛ@07dG|PICTtJ HHJ JHHz[JJEk{o{g9wo{wo{wg9swo{wg9g9wkZwg9kZwkZwPo{sZg9JRs{sNsZ{s^g9wo{{V^g9o{VkZo{swZRNsg9s{sVR{sZso{{ZkZco{XkZo{wZg9Zs{sJRV{swJRRo{{F1Ns{o{*kZ^ZZs{{so{o{)g9{sw{ws{o{{cXFcx{wwRwR{ Nsso{RscVg9co{{ Zw{R^s^wcVwcV^RsRsVZs^kZ^g9ZwZ^W5.{kZwRwR{ RVRV^wRo{g9{cs^RRwRs^o{ZRg9g9Z^F1JRRcNs^csg9NssBww^{wVw{ VkZcVo{c{Zo{g9c{ cg9kZZg9sVwkZZwo{Rcg9ZwZsZcsVo{^o{cg9g9Zwo{,g9kZwsswsswo{wo{gyowwkZ^ww{ wwww{{{{{{w{{{{{{{{W5[vwwZswwRZ{{RRkZcRZkZ^JRZZNs{ VJRw{BNs^BwVJRsZF1RF1wBF1JR^9^Ns^F1JR^BW5Cww^w^o{{ cg9V^o{V^wRo{^{ kZ^ss^RwRwRs^o{Z^JRo{VRRRRRF1JRwRswcc{wsRo{{wRccsRg9c{Zo{Rg9{ Nsg9kZZg9Z^wkZZwo{Rcs^VwZckZ{wZsZcZ^w+kZo{kZ{sw{ws{s{s{wsRww{^c{ ^^sg9^cscVg9cZ{ ^Z{R^kZNswcVwcV^^wVwo{ZZRsZ^o{ZZRwwwJR{w^^{ cZV^^VZwRsNs{ o{Nso{o{^R{BwRs^sJR^F1VJRZVRJR9ZsVRJR=wwkZw{^o{w{{R{{{NsZ{{RZg9kZRwR{ wNso{{VNswNswRg9g9swNswZVkZNsR=NsVwR{R=JRwwo{g9wwR{{{RkZcwRo{cV{ZkZ{ Vo{o{^sccwwV{sZkZkZZZ{so{^ssg9kZw+kZo{kZ{sw{ws{s{s{w{JRw^o{{ g9g9so{^o{scVg9sZ{ o{Z{R^{Nsw RZRwRkZco{Zw^g9sVo{wo{ZkZwVJRswg9F1Z{kZJRNsVg9F1ZV^wNsF1F1{ {F1Bss^RJR={wRR^Bg9V{ZNs^ckZVF1JRZkZwRRwo{wws{ ccwkZc{Zo{kZ{kZkZZg9kZwZckZswZso{Zs^kZ^kZc^o{ZwwwZskZ+kZo{kZ{sw{ws{s{s{wcc{wwNso{{ JRkZsg9Nso{scVg9Rg9{ Nsg9{R^Z^wcVwsVg9^sVkZ^Zg9RwZg9^ZZRwo{ZkZwo{Nsww{F1c{F1^V{F1cVZwR{ NsRo{o{^RZF1wRs^RRF1F1RwR{RJRRkZo{NsJR9wRRw{^o{w{{R{{{NsZ{{RZg9kZRwR{ wNso{{VNswR{wRg9g9o{R9NscRg9VF1{VkZwwRwNsNsVwwZsVo{wsg9w{R{{ RkZc{Ro{cV{ZkZ{ Vo{o{^sg9cwwV{sswZV{V^cVsR{Vo{o{^wV{o{+kZo{kZ{sw{ws{s{s{sw^g9{w{Vw{ VsskZVwscVg9^kZ{ ZkZ{R^ccwcsRwo{ZkZywJR^kZw^ZZ{kZRVV^ZZV^wRBR{BNsss^RF1Ns{wVVNsJR={wRRwwo{c{w{ZkZ{ Z^c{Zcc{Zo{^c{^ckZZg9g9ZwkZZg9kZwwZskZ(g9kZwsswsswo{wo{wwo{^o{w{{ {w{w{{{{ {{{{w{{{{w{w{ww^kZwsRJR{wRF1kZcRJRkZ^JRZV={ R=w{BNs^9{wVJRsBZ{5F1s=^w9sR^JRg9w9wg9Ns^wZwZw{ co{VZwV^wRw^{ o{css^RRwRs^cNsNs=^=g9^RJR={c^BNsJR={wRRwsswg9{ g9wcg9wc{Zo{g9{ g9kZZg9kZ{wkZZwo{c^kZsw^ckZsZs^ckZwwZskZ+kZo{kZ{sw{ws{s{s{wcVswsZc{wV^s^ZcscVg9VZ{ VZ{R^ZRwcVwRc^VsZkZZw{NsRZkZ{NswwcBwws9Z{{9RVs9ZVZwRBJR{ =JRo{o{^RJR9wRs^RNsR9{co{csRRR5NsRRwwRo{cwZsNs{g9g9NsZZsNsZg9kZRNsR{ RNso{{VNsNsVwwRg9g9swNsNsccRcsZ5{RNsRZ5{wwwcwVw{ Zg9cVkZcV{g9kZ{ ckZo{^so{^wwV{sZkZsZwVo{^cs{Vsg9kZsw/kZo{kZ{sw{ws{s{gYFcx{wZ^o{wo{Zg9{wVcs^Zg9scVg9VZ{ VZ{R^ZRwcVw^g9sZw^^o{ZZ^Rwo{ZZ^W5.{wg9Zcwo{ZNs{sVJRVo{ZNsV^wRRF1{ RF1ss^RZB{wRs^BJRRZsF1Bg9NsZB^F1Bg9NsswscwZw{ ^kZcZo{c{Zo{g9kZ{ ckZkZZg9o{^wkZZwo{^^sVkZkZg9ZswkZg9Zw(g9kZwsswsswo{wo{w| wo{s{^{w{{{w {{{{{{w{ {{{{{w wZkZZkZZwwF1wF1R{Vg9RF1{Vo{Nso{JRZcw=F1R{ sJRo{JRZs=JRJRwVJRsNsg9NsRkZJR^F1JRJRRBsJRsw9w wg9kZRRwRg9kZR{o{g9Rg9kZZR^wRVRRV{ R^wRsRRRwRs^BJRkZNsRwRF1JRg9NsZBg9csJR={w wg9g9wcww{VwZkZ{g9cg9ZZ{sV{Z^g9VwZg9{ wVwZo{kZVw^^wkZZwo{^^kZZwVsZcg9Zswcg9kZw+kZo{kZ{sw{ws{s{s{0w^kZ{Nsw{{ww0wg9kZRw{{ww0wcg9Nsw{{ww2 wkZkZ{g9sw{{ww+kZo{kZ{sw{ws{s{s{2 w^kZsZkZw{{ww2 wg9kZ{Z{w{{ww2 wg9g9kZ^sw{{ww+kZo{kZ{sw{ws{s{s{2 w^kZwZkZw{{ww2 wg9kZ{Nssw{{ww2 wcg9{o{^w{{ww0wkZkZwcw{{ww+kZo{kZ{sw{ws{s{s{2 w^kZRo{w{{ww2 wg9kZ^Rcw{{ww2 wg9g9www{{ww(g9kZwsswsswo{wo{w2 wo{sw^sw{{ww2 wZkZkZg9{w{{ww2 wg9kZ{^kZw{{wwCompObjbgdata/inst/xls/ExampleExcelFile_1900.xlsx0000644000175100001440000012434513003720417017706 0ustar hornikusersPK![2Ј[Content_Types].xml (̕N0M|c\^*=*]۴=+B #%rfk&J%+p^A' hnEFO;`h-xm'3g4vp:.6E!pA>8BhQ~A B}W 4M/PK!}T  _rels/.rels (MN0H} PnRwLibv!=ECU=͛f0={E tJFkZ$H6zLQl,(M?peKc<\ٻ`0chGaC|Uw<ԀjɶJ@ت` %TKhC& Ig/P|^{-Ƀ!x4$<z?GO)8.t;9,WjfgQ#)Sx|'KY}PK!8$xl/_rels/workbook.xml.rels (j0 }qvuzu`%M`{3ٖP !vյmb%)TBoXU[v[%+,\@;nݣJ;\y4O 5K (N4ɾd` a}uU5xP3-6G+F`JY:VV'zf}au6&/TƐgbdnhC`bMT[(yqLDbڍbJKaГߩPK!85cxl/workbook.xmlSn0?;p R(I3Ma>d.Uz"-z)3kU]STW\ y7cdQZ_wNA3e \;,ҚIbtdHiNm #sRDI"I>9eM[ɔ@ }[ry䂽 i/D^`$uۊ;V8_6[. d GoZw{u^)]W`i^k Ҭ}Եp yᬳ`\U ѽ W^'Ods:r gr*75D*ov{<,8\̾zے'FF5ɨ&鸫)p0HG D 1lTHDPX?,c hk g _֕K8QkxI7$wdot&YKl$YZW7ޕmQ-`wr)zA;ANavye]'PK!sUxl/worksheets/sheet4.xmlVێ8}i;4iaGٝy6CNmhS9lX}9:S!/CM]h~TLHK+2T!PgJUKǑqF "%xR. `+%)+ͰӔtSAKee*Yg++x=U8IQ/K.!oF:ٌ,\TM!c5/֫]v$hGo;oFg0"GL0pU~IBBNIsR _g<y ,N~ً@"i,Q  M)Wϔ3VmʱL[*c8Mֱ%W!8]hTDoX  ڿ[9g@ ҿ܀En#g> ~ 8"G-yc?hIq׆*pK4Dԅ߈8⡟i7!=y" MV{z]Gx9 iH-lp]=d#ZbmHO-r@;ҐZR{Yu}ܮw}H;dڟaLUSM]K* Z~#^#+%i jiڍ\)^eCBsrͯߨ:U 1\(A(_ۊx/fy/%Pc,F'3;o8߸{; ?|+.nzvWe} J[I_tz,߀YKG4d/_PK! Wxl/worksheets/sheet2.xmlV]o0}`I MRhZZfi}8>/ {9uw,EegC@$YDg|HHG8e93W"03)i0!CV21w(8QYȲ3474Ì1 I}FrI8I"ز=tb-M<]8ަdOpXq/= !ЙZhԜG#NbXڳcyٟgJ=>4ڀ-c 9 8IIZ0|Ȋ&,3V*jMim +H&p:pD$T~eO Q /2!lS9 ċ9gGkbe D gn@XxAq#&mĦ,>FoXBv`[mr_(ʞέtn(>ͱ#ҲKoXUIeoTj:u.74]G7櫨GV Ԉu7сZ._TU3ש N:"eM;~wdtIf7s]7s0H6D}z. j0bK5Gg"X~Ϋ$>YC/(2)YV>&ps1czp>/&gyg\rLe8 t2uT4PO-9{!oP˙Xۃ[1r\k#wWԱwe84)$e~/<;Њ/9y֩K<TB-hTXKPK!0xl/worksheets/sheet3.xmlXێ6}/PIQe: l.V@+k!J[{gHY(ۈes8srxzY;O(|{Rڦ">\_L{u*.uU*ݷ_z΋J]UծeRB2/6I_*YN&ɶ/~G\fFm+P*ە{ogm㮗Y&{|oSFeQ-DG8 "& h[Mpjπ! ZqD7ll3 bg ۀ&n#vöxĎt(K*)5rcegSI3)YQLE#F70ֵVB3B`< TA;Bt3]mtV`1nN罶tI~bVYm: iv(6m1Z*aBe DaŚރG:'9%$&6ӃL:3Q((4U0 ]rDg}4'5Z9DF õ>2"Ǐc*D@(`J0fQ0ѽ#HNSɢ DfޞcQSF%y-q!VPsl!8U F}1,IxSc  XD@A(- &zɾA# BBqʈP?j)o@GY#azE`ym}Y>Z-Em@h>2Zj੬[ C=!>2{5@+1]PC2ؑ 2D[a 4dDXu^[_fU>fˈ5hg꛽WM<̐c<VP48(콯3975d?3kuV^ a] ڞ%ϻhO_Vj\Xij=X,jY"!P`BDϰȚY?k# aѤpޟB5Asn&#9nw;Uo+<|o "\}Rമ_Wp ۅ(yRSɋ . m\nEU$Y:+@;(s%[D, wznJ:w$ŤwbHz!a""(btn<.N^lW/WZ\i 6/Oߜo1\V}|PB_5Xq5PK!; xl/worksheets/sheet1.xml]o8W`vюjJ`0c;496$)I&96>,vUZ*$uSz8H*Rg5 J|YP82ąRM`Y2-hE7'9Qp+6l%UڶgUոs{L7`kd4'R}S)DgP] S¢hCDh)+ { f@JGC 6 m#{i#pDr9=`#{AGCϬӳD wkAfrYsI2fzፉ#0] RJ]v)\bfr\<,E?&]~m4?(imt{T _BGd\3# h1T$#Ufg>+Z<HyDC{YUh#=$#yPWutn񈢧IaY~.8>=Q]="gӻQwm;ڻb)9n(ph} o.HĆ4ԇ*(ޘ͕╹,S8) 9W+Uq %Sdo8P_>[SRmgdFoO|ub?m:4j._Ԣ&ZVzZ>At@_\%hQXmPK ! IssdocProps/thumbnail.jpegJFIFHHICC_PROFILEappl mntrRGB XYZ   acspAPPLappl-appl descodscmxlcprt8wtptrXYZ0gXYZDbXYZXrTRClchad|,bTRClgTRCldescGeneric RGB ProfileGeneric RGB Profilemluc skSK(xhrHR(caES$ptBR&ukUA*frFU(Vaeobecn RGB profilGeneri ki RGB profilPerfil RGB genricPerfil RGB Genrico030;L=89 ?@>D09; RGBProfil gnrique RVBu( RGB r_icϏProfilo RGB genericoGenerisk RGB-profil| RGB \ |Obecn RGB profil RGB Allgemeines RGB-Profilltalnos RGB profilfn RGB cϏeNN, RGB 000000Profil RGB generic  RGBPerfil RGB genricoAlgemeen RGB-profielB#D%L RGB 1H'DGenel RGB ProfiliYleinen RGB-profiiliUniwersalny profil RGB1I89 ?@>D8;L RGBEDA *91JA RGB 'D9'EGeneric RGB ProfileGenerel RGB-beskrivelsetextCopyright 2007 Apple Inc., all rights reserved.XYZ RXYZ tM=XYZ Zus4XYZ (6curvsf32 B&ltExifMM*>F(iNHHCC }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?|<:n{O>iWi|$w,sU۷{ΓO5^gtvMJ粢)8YrJϪk tk/9WR$P<-oCk%v#R2Xy/zžSDOHŸ'^ Lz~*x{Ÿ94ޟ k#úkjRgi Vi=fm6ER^Q>'W7g闺? j$i+j66Xе(_j1}\ k7nz}žSDO@)?N0aw Gj}}`wf:Yb5(eug/2y7z)?NžSDO@(~ܚncQ;ῃzyGڌZ|~&˛״ &m.,-[WQ

/¿<-e+ރm?-/KH~#? tK=(u:og# {O?=y7o_H#K>:şv&Fiw6icnd[ev@0"hO|)u'GŸ'^ Lz~ _~)o)w&xW_7 ~xw}O ?fV׎u{o x_i~ִ[JV:J?V9Qx7F֯m.W?K'|߳7rKklžSDO@)?N>:־|1x_R5/jP/t[ ˝*[偤yfI;$žSDO@x/KO7D-)5N՗L>Oֱ 8n'y}#gƯzoƕ~%Ү}=YK>;]2!k+SFsK6}}p.>kpѨhb?x |]h>#|Qwwu[+rͭIK/=aQ@n`x{?ny]'_þ#JK%_CIV z@4xHҏ܉Y6G"3[+;8R@?;ItM!˫pº<7 2x[tNNѓUnat[FWι2 -`|IE㿃w1hK<eĭ0_,GLXg8gm)`x{?ny]'ro3M+mxдy#utx jkߌp>em3m;S|egš gu]7扬ޤw[Qle/ڡ|+g]eZW,u|EÛ?xz[]OQZ?q/<7adr€F5>5sό'|1Cmmt:__'?6wZ"GxPu-QhLe֠,I/P5xz[$0kȚ{x]Pꖀ;y]'o?Ky=kQPcZMMkyP|'}NC@4'jI!A4g;KTu$0+AC:.X|_~h/>)cÒ=O GU zjGŋlWX_>4GcË/H?ſ+>*x' CRY&% {ߌ? ~JW= (icB#n~iƝ3ş㏅ n;𯈾x-KN#qIZiBS 3k-I_ğ¿7l? CxoĞ4]|Xе{Xi ƿi=_@Žӿ^tV<;[Y_ þ)_tu]|^#MJ+߆"m)|l|A`gM.چU.mN +^t}2{MToZ+{Y&/6 ~S߁gJ/%S\ѴoT.-4sFG :VYJYix{^oS:9u~3 ӧ PTONҌ]$UuR.GWkŘnwW>`2cdC0ʴ."8ѫBtJRB*Fᚾ W|A?^.5|1Zk+^ >Oivyω=6kǶ-/SIb ;'.|1H_ hLR/-K-3s7-4pMHYVG$Oj^ )Yi/>+ex,oÿx9jz-vEѵr9cYծn-^H>Ԯ5ɩxQ|G|7().]}YZ+ZF oJS"c|zϿa ; UUw>ʪ7x}+ WTnW*2Zjh2P𶹠]Y?o4tqm&Ke3CK6H?fSEΟ|)~¾]Y>k7:4}Roh_mOlF"y$SW* 5_UP_j˭_>"|GV/|%.whE @|.]éiohVx Ꮙ?K/> 4%khmU:^aZ5 x.çjY_EҴZ~j>7|g{P~kj=X__|A_Z[]CM$S鰐oz֯/+w?5 UuNZ5֑XO]ŤVD"W* 5_UP_j @.ﭴf;.fyn %ډ\YEk2\I4T~h?F|csekᖗQx_~%xŶi>c_#HO5M2TJ5mjwZ"[ʠ?O .֑>#XY3n5I`Mcf>*(fwqOjqԥOcC|_5kV4 W_kMv?V)-`+/|2&uo?| h|'# r|o_߆F|O^xsQԼ3ZZMǢji)-etdY/:b,Qb*Sbg,=$|j)sw78<2v3/b*a<=7SڵɪnWӿ~CÑxGgxPσsj}4X,A}.jXB^ I4d G0Vv Ov Ş5%:}N/h Vˡ! n6Hn4CO//ËCNźgW'K_!_&M6W;4}F jo6#V@^,>/(au^,>/(au^,>/(au~C|,s-;~#jsHy'[Ix?>4G[-{7_֦}a}O_iPSIk2x I|yJ?Ə-yt_|Ho!OFkxwz޵o'(*[.i%d/~Z?kKj6xsS?| /(au^,>/(au^,>/(auqO\xyrxMw7EOKmVL{f´[5G͟xM ˧K(# jsď i>|m *[HweQ񞯬PmrN O /~ xl5Kh?|YTծ~xI״hl<19Ӽ7?O; oZԢ}vJpM_")WסO.V-ǃnف>?ߵ473]+֛}Rkok _x*Rm|a#;ZGj]/t c ^՛vxr:3\sie5xƚpxz\G~x7wۼ!g8 |7-nXj_yw: ;{^mFSM3Xe(xvc̞=?~OGuÖ/?|1?CŶqaamY|j}%e.Oj~"nmʥ?eH:7oZ΄߈_cxIax|BfwLO xq`h  ~|2_,o} Z­;Ptwc7c}Wm쌺{.Seܧ G4> oUCVH 9.u{_ItwB4 3TZ5Xg60u? GSO"_x x>|XoQ>,z.^#1{H<% ?0[H}I{JѬ8΁w7ωέ7ӬgkS˧κ'{'ƖZǂ4K{o2n/ iWC_k:u.MxZm^%-MIkalA*r @U?^<;?5oOmc燼A`CwklP_a9aph_onmss>2Iҿno+=,?MDРӾ&|>𽯂MK]G#e4%/-'~4sy=F[kV_?>-ڷ!qúƓφMx#s⛭K_d/&u>)|)no3]g/0' |%૯||оg]HOn!C3P kkx_8g +[ǚ/ Kx~wM%xv>Ѽs_Z>x RxZMT=ψ4pݿk 6jocCg]VOTKmLºR^ --S zoK@Gc4? 2N_\7:ONp<<|S>k{G.mU:mLe*@O+ uyy3 WGPj:y[[u~ h4 Ygim+I< {ҼqsH߶xc0 >\~$OsX3et55ZxC?-{O{o#!<{PBE~|p> /v / U5}xig,(K;[kwQ4"G4 ?j߅>-s05oٖmx[Go=xƱH{tSğO^4IjzoU􋛍Z+WZUͺ~Om+;N5Gß~x|4Ykgem2<f/whO~-ּyƛVZԹ.t|@:!2_ x-ehxX־o5o OizAk'G+^~7xM /2<-㯉74ּ=>j' ?:gV\Ǥx@]G 6Wržyľ "x ~!&ҵͧh? 9^hPx~oh_h>7+\~?á~UeAW=;^7{ƞ= ƺO4HutɴeP5?7OQ>xG5ޟ"v ^'u/xkjZ4Kkm=𧄼xOWNmS? V*&+j / C!AB?C-x+ KZC?íோι#?"~+2>k?x#uϏeҼY8"&R[P?ŷ<3_2/tO/-|k/ Ϣ1vk O ƾ%񇋿j|-|j?j'5KO93>-|M |[Iik9u ^ּo'tuOLLӮ~V_3EV7}ljfM/.cW?ǟ| R|3,<7xL曩y~g&$+|phkqG$;"Mdi 84_R_oZ60 |-B<9Zze0[h<}JO<+&j,_eNx#wǟW.v_ |Qυ ѣ>| %⿊QK[<9xׄo h|Q G~&xK_cJ[\' ?O3g<h5? 5~gƾx3Z.|UȦ- #O5_ u?|J? ? g}4K+?j4i:Ok-R x%|W׏>htմpfG}Bw+/#ͧ߇?&ѿfKg[_t{N ZK/}? |VYimxo[q'IMK_t ᷃g_<%5O:+Of|U׼Ch=@_6Koj@^g<&]NYe4$2ŝbI<@w!O |j-׆^xFod-/U_FmkGm:yuK,tPX}U%|_PƋvlt~}}O[7[[83۫0%T qCT~'>1x7FއHYP&-jw_ imgc\֤|h:oG/{+Sk ΰ}"." 2 $(? /I}4 ^0’h.a%\1x K?b@:~xޕx𧅼e}bx㗃~]\M[S~)h>-ӮiG>ޟ_a4Yi(e4-gDm;Tio442\}P /I}4 ^0’h.a%OۢZ4ڧ>cfo{?~$xS|F|M]=t|;|Uk>{xֺvy<|E;o_t~=/x[cm|6-⦓No ~&_Yj}.PMPßt#=g~kZXcjAՅݽ՝mo{oKxn  ^0’h.a%\1x K-h ^լ_ZҾ|1KFExڞc? j)4a y Bz헮[~3x^;1?<{~> |Yπ|Q{A]|'_ ,τugy.2i(1DU࿉o?IZ߉~OO|(|+ <)&/ǭ1/kk7v[JRԷ?_>EsxEWWK_v~ KAawxǞǗ3Aѣi  9B1'y ߆ׂ>![j=f߇<[tA|LŒE.gzDey?W? /G}4oC$WH/!(h? CzE{8dJ? ?'?䁜zd3{n<ᙎ̺&8|AyIX\* 誣 8R>0x>$Ѯ<)OmU#k_A]CxwωzvY)ux@Ѯ qjp}/5>[\i??cFO_ehUk!oqx|RZghoĭ?߃4|?𷊯1t/"GtJmIok}+wj1\esz+^^M(gMn^Pn>|Oy"!C7vթjVbt~ʺWo1==o¿f|N-- ˊ?|[Ce9|7/Zu=~<~*ψ 3OoRBGHӯ|#௉׌.,l}xxෂ#|K4Gď~hQ_ŷ17t #I^>Դygoŗkaq_|Ty/¿|c7]k~?,.u/@Ӽ953? m D_~#޷h<>*-< x.E|aՇo [:-Nw-l='ſ6Y8?q@Olpgs-Oq_Aq=- Tt딗RCkh[ǬMtWP;C3̏fO O>_W Cψtv\+[Zg+ :I~¦<|"2o/>"JD7u%mltF;Sm )u[+x% @-k<'_>i~j>Ƌšw~|doi; O=WkM_|E_ZsgyoXFż{GVׂύ|NޫE扨qK&k۫(9 Fct/#K'CݟSzk$gExhtxIO_薲^֛t񵥼(iSOjZ?~;>[Y}|9GtoxT'C|5'#D^+g{_HѾx⿀}xDýgᆋt*<[.uc=-5[xi^ڒ[Z `O[~C.|@GY{x{Ğz%5I|CexEƧmYx/>>^Zhzi&ѧcچOTTqbHg?~$~ CxWu_>>^~:x#E|CwxG.ڕ֧h|kkiu.sN/|GyM?XxcET>MB;+ŵo>\ f̑0 x^_ ^s>)F;mkZw|GekBHku=R/-TiZur>!j: >_hVVox~t&>Oi@ωOoPo? _->5GIUxv>7_x}4o4E;Ck.o FOҼ"p |wxᦻDHе;;]6 ;<=qg5L&/[[K*L=g'7(7 M ?|MB|<€?8tkmxOeiM!7qV-KM𗄠O P:߬xAľ^9GK$|@-'㻽g+x}kX;W퍗kM2Ho[:_Iˣk>Ft~%H~x>:ֆ{|.>08!|AxoE/m56Nykv3X(-[u>$Zx~&|q__ 5x~.? ^ >'ly3O'm1xCz>M֗oYߦoOOk^!aai CŚmΙ [V_?`:w|]IgďigSNN'VS&]cQ2@ut]R1jV0ռ 8_@G"? h^2|sޭ⯂>n< ;zφ7Ŀ.uK;|lm.J4G|9"/OZ4?i8?|>ߋuRMOVk{_h:,|Ck/ckyr}u}[i $^y]N 0ִ(rnGC|o_x] s^#, ? o}OQ|=`,6_|ASYkLx+JdӼ1ٽRN{{#io"cլL,>./|<,|hӼE2F ~~$þ&KxYAntYx[Wsn5lr{N/ Xs쮄ρ2фz0ìf@ʋIe +,zL%P ?/?2O=@&PпǨ?6_oO!͟o<_jzυ5-K<5x5 4_[fM VuU,5OJuKk9w_˜G<7|a-} i}2/[iX5NH!] U5gY-,-cTnd(__ de +,zL%Pċ}fO |ikN:wk3x]id?-_|/oŻKO xk?O_&G95 Cx7CԴ5۽6Qga&fmY&DP{W&PпǨWY?CBK'1٦'_~G3'9z#'~<9e-i}7_g$,?9 hwCཷ-'Ž7Z~:2'µkG._x^|Cڒxfalk?'mx⧈3/e{ni}~ Ǣ={τor&>-ꟲ\}jPJWQ~/t2bKU]X~7~ϋZ4YMÞ(/9ĐO xL/Ƌf$ODOk>-ծ5$⏅>.MW֑O~^x?ᖷS5yN֡ZBg<+œh2Xu_P"[[ÿxw<)ouW x>&Sh|M/,o`<Yj u+mL5wtL3i~E>9>oL~M6ڴڦ'o,|XVn O_O7_5|G~xZj2@\w in _?l|<zj tƪb|q1_/E3'lnfSSӭteylڅӋKKhd||2l}<^3-ҧ*ʌVƗ: F5$7hŵKf~[4sPKtO8ԫR(*A(U;hR_Me +,zr&YK;;  ׂl,?x/ ׂl,?yȼ[ŏx4"7𧋴m Z#\xZU/麅͝؊{yI"`߇?_~:ߵׇ |P||E)k~{ x]|]|'ÿx <'erǂ4l-xZMw 4q6]Cj5[`ޯ 0y'b|[Y' BRT8qa&?3G&m9@s^)1- Jo7٧YC5Ƨ1eM\hמ*Y`` 'o?6i~:uG!Լcq/Z~^t߈? FMύE~+ ~~*|v1xm[Winn|3[?_ "[G665&×]|R߰wƿ hV6ſ >,Ii~E|UXtq_~d٬x:߀SV|=?4>'[{?b7]~؟/tw? m5oOĖϏ vFK(f/ Q_E&-:?i{1]`|k௏._ZWuΞU4;[uxGAwwFO:~Iwvxp{zw?!7_'ԭ/x]gz{uu-ԋ\Jqi1[X[eN_,Rai71x^YɹR*oQE%6YZpf8ǚU18eYV^INrrPN\ҦNNCgqa=CÆM.ƒY\M K<_J>_J>_J>_Jj^׾?gGz^v` /_uWR?cx@|Josi/ŕחℿq<~n;GGX mqE=|[],^_$Wǿ_įx @|M/~ϟ~ JѾx{h^-Ņψ<5>>^cGcjz_]^(k?|]{%_~!~|կ-?=/u;yr #⵵߀:$mw=mK>5Iy>~ɚW>o]cRifo Y[b.%j75kVo>o a,SZ&#}?,/ىe{@@@ߵSc_=~K_ /Woh?~(?[%m?}Kη6}FOZz~`H5Դߴ?l[+c/|k63@ž2<_|HҼ%k|=)C[mW|mk޻~aeOKxÚ-?4OZHҬ_xfu5).4^|HŹ _ {~>!x/Q>xRY?Ġ/o_<]~2/-1>?/~>izقW~!O];GvoPu@7Dj^9Ӿ0xFX =_UXk>)h C_ u]|CikVwwkڇ  }=߶o Czj+,z^4{#෈//>i? ho1kk`:xK?m+¹tθ6~_J?XQ{I/~"#ÞԬ/~ et-At1tuF-_Zм*;׆ꮻ1?L_LoZX] 5kZ,x'o:W ?|Oqig_ M;e:𾫥\Z*\9-j=& C,ڥ.2Xmc|!~!| 3x +( |- :FR[,höo$%Bj;mG?nOe)?Cmɔc &P6@?nOe)?CmɔWlIyxg7q6jxy_(Tw?矅R 8~J7xy_(TOm,xɏg*oR]Cl,muuq3g˿{ou[qP+g >X, !\=csğ'\N ?VL2>m?3y#PQO?s۞$::wPO?s۞$::wPO?s۞$::wP+>!03 N>PliRlr78 |O nx@?NΝnx@?NΝnx@?NΝxSX/ .o'Mi"8lPWO?s۞$::wPO?s۞$::wPO?s۞$::wP-c?)5ВcOQ"<8R2;O#8<:Нq;(;l!E)7_M/|)7_M/|)7_O~.b#eF|Y̾WM6y!O1eCê?4?FiχW*(KTP >)7_M/|4тRhEGS >)7_'4^G-[I><noݕi&eR" @τ4}Ak8=>x} ?g x[Z^w5kFm.lnnlw @P@_'¿?:PK!no`2xl/sharedStrings.xmlĕN0 #;xPg(bF\ Әӈ. :# |؎ӜN#r gC=^s^֭\=op[,6E菂895)5]dFw-L(r7bx):tW.x`c-,}dj {_t / 12C<ϻ&Rhڧe5%_PK!{֩xl/theme/theme1.xmlYMoE#F{oc'NGuرhF[x=N3' G$$DA\q@@VR~MԿ;3x8!zgkf㘡C"$I#^$!d:W$N4 w߹7TDb>DJKK2e,$nE<@#Jem)4 Pc`{w8!A=2̙<&JꅐfM T5BNd tY#9~#*@ K/AKחFF]et`~!کկm Y\n?aZ]'2Gc"ry Ǹ8E/ԡnޙ`5xsѵɷrΚ\xpK*y7NF~b\c|‰8'VD5N(B<=. |z]ң}'D;4L|6CGM|VoC Gao±eǬXE>%qm #8j>-~ ..R(zys^FnVԇ$*cߓqrB3's}'g7{t4Mf,t,Q;7ɛ1Ѝmmƍ` F$vNyad@}m }w^-/m z<}%s7CXWM->Y°t`Qә")Mi?F$@3ɌHK8ۙe/o}'U}f@bvyE/G9#sh [1/"ZXZըfZ#0b8k,ބ] xy Z4M0#w;(5!ʬDx@l7&vy ;H)Okള0i1?tr`d]v%b :j8mC88IolW;6kϬESSEÜq8RmcWYX%YWk:.beRB톖 $T`Vc XэGbCڞp` Z?My֬Ӕ ήcF8&%8 7V`Jm^ݍq7EWeRN)zie#EHk#BEPѰ#`A,SphStq"A%}g0fDzd#Q%uejCzRt s \oN)F{b:P3,!gdbKU z=uAYk;֌˫rYaϤpg?0jCoEAV_] 2H7HhIv>kj!f.6Q8/ٙ_۵Ȟ.QXQ݈B~$“tO$ɤ6F#YOG [BkFE6hM \\ I,&.(dh&1oYYG;&kŕ{%e (w䳨A񍁺]y 7x5R`8tMc3ݤPK!+?0 xl/styles.xmlWo0~G-`U*Bb Nc?"ZzM[JZ4 ;w;wvҷ ȣpfBJ-f>_go(=3)kĄ.EMf/__*0W$RhVJxSX'u5:iD3ihDCSVW˙T/%:΍ulbx; ̝m.E!s:N)KM$on9L^]P3gZߟq~Ordiš k>)ˡU?#S00 " Ǚa8Z3XT UiXˑ\1jy'l˱|`}l܃6X*+Nd) g0 e b'Ѯzr8<ޡJrd1ިAf k$ӡB38t~Kc Ox6ȎNׇ zv 7u5߭_Nѐݕgh}sp ߎOPK!5XdocProps/app.xml (o0ߑWgBr=iLTχsi%vdB_%Q&v?|Z_J ~)/gmȝ/n{{QDsǥ0k^`$I$jJ%[b imϝ"ӸW(ś`j1hq⢥m0џr4ƐBAR)>,VZMe97hdZMSPa ju*;b2EB$1ISHI+!j \AquTaZ!?Saq6=]i ; _tT v^xk8A%]4՞-zP7;n=u"[`&¨Q8v6ϋzSBĜq 8VU ~Qw7 |qVf~PK! "MmdocProps/core.xml (|QO0MP D|qߚi!m-!f&룬/ЦUH"E3gLg߱7vMFj[|'fk rUQٮRm= >jy| ER  !"#$%&'()*+,-./0123456789:;<=>?@ABCDHIJKLMNOPQR FnoFWorkbookSummaryInformation(GDocumentSummaryInformation8 T\pWarnes, Gregory Ba==d 08@"1Calibri1Calibri1Calibri1Calibri1Calibri1 Calibri1Calibri14Calibri1 Calibri1Calibri1Calibri1,8Calibri18Calibri18Calibri1>Calibri14Calibri1<Calibri1?Calibri1h8Cambria1Calibri1 Calibri1 Calibri1Calibri1& Courier New1& Courier New1& Courier New1Times"$"#,##0_);\("$"#,##0\)!"$"#,##0_);[Red]\("$"#,##0\)""$"#,##0.00_);\("$"#,##0.00\)'""$"#,##0.00_);[Red]\("$"#,##0.00\)7*2_("$"* #,##0_);_("$"* \(#,##0\);_("$"* "-"_);_(@_).))_(* #,##0_);_(* \(#,##0\);_(* "-"_);_(@_)?,:_("$"* #,##0.00_);_("$"* \(#,##0.00\);_("$"* "-"??_);_(@_)6+1_(* #,##0.00_);_(* \(#,##0.00\);_(* "-"??_);_(@_)"kr"\ #,##0;\-"kr"\ #,##0#"kr"\ #,##0;[Red]\-"kr"\ #,##0$"kr"\ #,##0.00;\-"kr"\ #,##0.00)$"kr"\ #,##0.00;[Red]\-"kr"\ #,##0.00>9_-"kr"\ * #,##0_-;\-"kr"\ * #,##0_-;_-"kr"\ * "-"_-;_-@_-,'_-* #,##0_-;\-* #,##0_-;_-* "-"_-;_-@_-FA_-"kr"\ * #,##0.00_-;\-"kr"\ * #,##0.00_-;_-"kr"\ * "-"??_-;_-@_-4/_-* #,##0.00_-;\-* #,##0.00_-;_-* "-"??_-;_-@_-                                                                       ff         P  P         `            a>  Q               ||F}-} _-;_-* "}-} _-;_-* "}-} _-;_-* "}-} _-;_-* "}-} _-;_-* "}-} _-;_-* "}-} _-;_-* "}-} _-;_-* "}-} _-;_-* "}-}  _-;_-* "}-}  _-;_-* "}-}  _-;_-* "}-}  _-;_-* "}-}  _-;_-* "}-} _-;_-* "}-} _-;_-* "}A} _-;_-* "ef"-"??_-;}A} _-;_-* "ef"-"??_-;}A} _-;_-* "ef"-"??_-;}A} _-;_-* "ef"-"??_-;}A} _-;_-* "ef"-"??_-;}A} _-;_-* "ef "-"??_-;}A} _-;_-* "L"-"??_-;}A} _-;_-* "L"-"??_-;}A} _-;_-* "L"-"??_-;}A} _-;_-* "L"-"??_-;}A} _-;_-* "L"-"??_-;}A} _-;_-* "L "-"??_-;}A} _-;_-* "23"-"??_-;}A} _-;_-* "23"-"??_-;}A} _-;_-* "23"-"??_-;}A} _-;_-* "23"-"??_-;}A}  _-;_-* "23"-"??_-;}A}! _-;_-* "23 "-"??_-;}A}" _-;_-* ""-"??_-;}A}# _-;_-* ""-"??_-;}A}$ _-;_-* ""-"??_-;}A}% _-;_-* ""-"??_-;}A}& _-;_-* ""-"??_-;}A}' _-;_-* " "-"??_-;}A}( _-;_-* ""-"??_-;}}) }_-;_-* ""-"??_-;   }}* _-;_-* ""-"??_-;??? ??? ??? ???}-}+ _-;_-* "}-}, _-;_-* "}-}- _-;_-* "}-}. _-;_-* "}-}/ _-;_-* "}A}0 a_-;_-* ""-"??_-;}A}1 _-;_-* ""-"??_-;}A}2 _-;_-* "?"-"??_-;}A}3 _-;_-* "23"-"??_-;}-}4 _-;_-* "}}6 ??v_-;_-* "̙"-"??_-;   }A}7 }_-;_-* ""-"??_-;}A}8 e_-;_-* ""-"??_-;}}9 _-;_-* ""-"??_-;   }}: ???_-;_-* ""-"??_-;??? ??? ??? ???}-}; _-;_-* "}-}< _-;_-* "}U}= _-;_-* ""-"??_-; }-}> _-;_-* "}-}5  _-;_-* "}-}?  _-;_-* "}(}@ _-;_-* "}(}A _-;_-* "}(}B _-;_-* "}(}C _-;_-* "}(}D _-;_-* "}(}E _-;_-* " 20% - Accent1M 20% - Accent1 ef % 20% - Accent2M" 20% - Accent2 ef % 20% - Accent3M& 20% - Accent3 ef % 20% - Accent4M* 20% - Accent4 ef % 20% - Accent5M. 20% - Accent5 ef % 20% - Accent6M2 20% - Accent6  ef % 40% - Accent1M 40% - Accent1 L % 40% - Accent2M# 40% - Accent2 L渷 % 40% - Accent3M' 40% - Accent3 L % 40% - Accent4M+ 40% - Accent4 L % 40% - Accent5M/ 40% - Accent5 L % 40% - Accent6M3 40% - Accent6  Lմ % 60% - Accent1M 60% - Accent1 23 % 60% - Accent2M$ 60% - Accent2 23ږ % 60% - Accent3M( 60% - Accent3 23כ % 60% - Accent4M, 60% - Accent4 23 % 60% - Accent5M0 60% - Accent5 23 %! 60% - Accent6M4 60% - Accent6  23 % "Accent1AAccent1 O % #Accent2A!Accent2 PM % $Accent3A%Accent3 Y % %Accent4A)Accent4 d % &Accent5A-Accent5 K % 'Accent6A1Accent6  F %(Bad9Bad  %) Calculation Calculation  }% * Check Cell Check Cell  %????????? ???+ Comma,( Comma [0]-&Currency.. Currency [0]/Explanatory TextG5Explanatory Text % 0Good;Good  a%1 Heading 1G Heading 1 I}%O2 Heading 2G Heading 2 I}%?3 Heading 3G Heading 3 I}%234 Heading 49 Heading 4 I}%59 Hyperlink  % 6InputuInput ̙ ??v% 7 Linked CellK Linked Cell }% 8NeutralANeutral  e%3Normal % 9Noteb Note   :OutputwOutput  ???%????????? ???;$Percent <Title1Title I}% =TotalMTotal %OO> Warning Text? Warning Text %XTableStyleMedium9PivotStyleMedium48dq:Fc-2NWgFSWc-2NWgFSW̙̙3f3fff3f3f33333f33333\`KSheet1Sheet2 correlation"n aacute   small a, acute accentAacute   capital A, acute accentacirc   small a, circumflex accentAcirc   capital A, circumflex accentagrave   small a, grave accentAgrave   capital A, grave accentaring   small a, ringAring   capital A, ringatilde   small a, tildeAtilde   capital A, tildeauml  small a, dieresis or umlautmarkAuml  " capital A, dieresis or umlautmarkaelig   small ae diphthong (ligature)AElig   capital AE diphthong(ligature)ccedil   small c, cedillaCcedil   capital C, cedillaeth   small eth, IcelandicETH   capital Eth, Icelandiceacute   small e, acute accentEacute   capital E, acute accentecirc   small e, circumflex accentEcirc   capital E, circumflex accentegrave   small e, grave accentEgrave   capital E, grave accenteuml  small e, dieresis or umlautmarkEuml  " capital E, dieresis or umlautmarkiacute   small i, acute accentIacute   capital I, acute accenticirc   small i, circumflex accentIcirc   capital I, circumflex accentigrave   small i, grave accentIgrave   capital I, grave accentiuml  small i, dieresis or umlautmarkIuml  " capital I, dieresis or umlautmarkntilde   small n, tildeNtilde   capital N, tildeoacute   small o, acute accentOacute   capital O, acute accentocirc   small o, circumflex accentOcirc   capital O, circumflex accentograve   small o, grave accentOgrave   capital O, grave accentoslash   small o, slashOslash   capital O, slashotilde   small o, tildeOtilde   capital O, tildeouml  small o, dieresis or umlautmarkOuml  " capital O, dieresis or umlautmarkszlig  " small sharp s, German (szligaturethorn   small thorn, IcelandicTHORN   capital THORN, Icelandicuacute   small u, acute accentUacute   capital U, acute accentucirc   small u, circumflex accentUcirc   capital U, circumflex accentugrave   small u, grave accentUgrave   capital U, grave accentuuml  small u, dieresis or umlautmarkUuml  " capital U, dieresis or umlautmarkyacute   small y, acute accentYacute   capital Y, acute accentyuml  small y, dieresis or umlautmark3 !4o45RX555Y66/7}78k89_99<k::1;;<i<=^ z= =I  PK!pO[Content_Types].xmlj0Eжr(΢]yl#!MB;.n̨̽\A1&ҫ QWKvUbOX#&1`RT9<l#$>r `С-;c=1gMԯNDJ++2a,/$nECA6٥D-ʵ? dXiJF8,nx (MKoP(\HbWϿ})zg'8yV#x'˯?oOz3?^?O?~B,z_=yǿ~xPiL$M>7Ck9I#L nꎊ)f>\<|HL|3.ŅzI2O.&e>Ƈ8qBۙ5toG1sD1IB? }J^wi(#SKID ݠ1eBp{8yC]$f94^c>Y[XE>#{Sq c8 >;-&~ ..R(zy s^Fvԇ$*cߓqrB3' }'g7t4Kf"߇ފAV_] 2H7Hk;hIf;ZX_Fڲe}NM;SIvưõ[H5Dt(?]oQ|fNL{d׀O&kNa4%d8?L_H-Ak1h fx-jWBxlB -6j>},khxd׺rXg([x?eޓϲكkS1'|^=aѱnRvPK! ѐ'theme/theme/_rels/themeManager.xml.relsM 0wooӺ&݈Э5 6?$Q ,.aic21h:qm@RN;d`o7gK(M&$R(.1r'JЊT8V"AȻHu}|$b{P8g/]QAsم(#L[PK-!pO[Content_Types].xmlPK-!֧6 -_rels/.relsPK-!kytheme/theme/themeManager.xmlPK-!0ktheme/theme/theme1.xmlPK-! ѐ' theme/theme/_rels/themeManager.xml.relsPK] T ?9hz˃  dMbP?_*+%,Mo com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMHorizontalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMHorizontalRes 1200 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMOrientation com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMOrientation 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalRes 1200 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.subTicket.paper_info_ticket PMPPDPaperCodeName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDPaperCodeName A4 com.apple.print.ticket.stateFlag 0 PMTiogaPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMTiogaPaperName iso-a4 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPageRect 0 0 13050.000000000002 9316.6666666666679 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPaperRect -300 -300 13733.333333333334 9616.6666666666679 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMPaperName iso-a4 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPageRect 0 0 783 559 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPaperRect -18 -18 824 577 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.ppd.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.ppd.PMPaperName A4 com.apple.print.ticket.stateFlag 0 com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PaperInfoTicket com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PageFormatTicket Mz2$e5%g{HH(dh " d??U } } *} #} *} *} *} U}  *} *} *} } *} *} *} *} *} } *} } !*} ""*} #$*} %%} &(*} ))} ***} ++} ,,*} --} ..*} //} 03*} 44U} 55*} 66U} 77*} 88U} 99*} ::U} ;;} <<*} ==?>>,>,>,>,>,>,>,>,>, >, >, >, >, >,>,>,,,,,,,,,,,,,,,,, B ? A B ? A B ? A B ? A" B% ?( A+ B. A1 A4 A7 A: A= A@ ?C AF BI ?L AO BR ?U AX B[ ?^ Aa !Bd "?g #Cj $Bm %?p &As 'Bv (@y )| * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = E B B"BBBBBBBBBBBBBB E ? ?"?????????????? E A A"AAAAAAAAAAAAAA E B B "BBBBBBBBBBBBBB E ? ?"?????????????? E A A"AAAAAAAAAAAAAA E B B"BBBBBBBBBBBBBB E ? ?"?????????????? E A A" AAAAAAAAAAAAAA E B B" BBBBBBBBBBBBBB E ? ? " ?????????????? E! A" A#" AAAAAAAAAAAAAA E$ B% B&" BBBBBBBBBBBBBB E' ?( ?)"?????????????D E* A+ A,"AAAAAAAAAAAAAD E- B. B/"BBBBBBBBBBBBBD E0 A1 A2"AAAAAAAAAAAAAA E3 A4 A5"AAAAAAAAAAAAAA E6 A7 A8"AAAAAAAAAAAAAA E9 A: A;"AAAAAAAAAAAAAA E< A= A>"AAAAAAAAAAAAAA E? A@ AA"AAAAAAAAAAAAAA EB ?C ?D"?????????????? EE AF AG"AAAAAAAAAAAAAA EH BI BJ"BBBBBBBBBBBBBB EK ?L ?M"?????????????? EN AO AP"AAAAAAAAAAAAAA EQ BR BS"BBBBBBBBBBBBBB ET ?U ?V"?????????????? EW AX AY"AAAAAAAAAAAAAA EZ B[ B\"BBBBBBBBBBBBBBDldPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP ,!,",#,$,%,&,',(,),*,+,,,-,.,/,0,1,2,3,4,5,6,7,8,9,:,;,<,=,>, E] ?^ ?_" ?????????????? !E` !Aa !Ab"!AAAAAAAAAAAAAA "Ec "Bd "Be""BBBBBBBBBBBBBB #Ef #?g #?h"#?????????????? $Ei $Cj $Ck"$CCCCCCCCAAAAAA %El %Bm %Bn"%BBBBBBBBBBBBBB &Eo &?p &?q"&?????????????? 'Er 'As 'At"'AAAAAAAAAAAAAA (Eu (Bv (Bw"(BBBBBBBBBBBBBB )Ex )@y )@z")@@@@@@@@@@@@@@ *E{ *| *} +E~ + + ,E , , -E - - .E . . /E / / 0E 0 0 1E 1 1 2E 2 2 3E 3 3 4E 4 4 5E 5 5 6E 6 6 7E 7 7 8E 8 8 9E 9 9 :E : : ;E ; ; <E < < =E = = >E > >BXPPPPPPPPPP********************>@@ ggD T @  dMbP?_*+%,",??@U ,)!#$$BA>@@ggD  ՜.+,0 PX  ' Grendel evidensbasert psykologi Sheet1Sheet2  Worksheets F$Microsoft Excel 97 - 2004 Worksheet8FIBExcel.Sheet.8 Oh+'0PHPt 'Rolf Marvin Be LindgrenWarnes, GregoryMicrosoft Macintosh Excel@K}E@,z{GvPICTn7 HH7 7HH 777kZo{kZwsswkZwskZwso{wg9o{wkZ?o{swg9kZVVkZVVg9{sNso{skZAkZo{wckZsRF1{kZV^c{sF1kZGkZo{{cg9kZskZkZcVo{{so{RckZ&kZg9VZZVg9kZ{skZ"ckZwo{wswso{wo{+wwwwo?w{ްw9wNswwkZwEs{kZw?wZwwJR%)wk?V_R?{{Zsw?wco{wRNswk?k?{wo{sw's{wwbR?k?{wwkZo{wswsws/wwZo{ws{w{ްw_w{Rwo{Z^VVkZVg9RNsBkZ^Nswg9{Nso{o{NsccwwZ{wo{g9F1^BF1{F1JRJRsBg9wB-k{1F11!R!kZR{V{1={1=RNsJRZJR9Z=%)o{=-kVRRNsNs)J-kF1BkZ5cwws^kZwo{^ZVZkZV{VZkZkZRcwNsNs4{sR^VRVNsg9Nso{RZsRZg9kZsRZ^VkZNsg9Z^ZV{NsNscVkZRg9R^R^wVg9wkZo{wswsws+s{wo{wo_w{ްwkwwZ^ww)Jo{w{NsswR?{k?o_bo?fso_wwVg9w^Vg9ccR^VJRkZV=VF1ZwR?b{^^kR?fV_k^Ak?V_s_bZZZfV_kbbV_fZZo_R?o_V_gV_gZk?MZIo_ww{NswR^=c^9^{JRo{VF1JR{wfR?R+{V_bI^{V_ZZk?V_bbEE{ZVV_ZZbEZfRR?{Z{V_bk?^ZZwwwco{wso{Rcc^NskZg9^g9Zg9Vwwk?k?{kfk^$wo?fk?bss_{sw^s{bk?bo_f^wk?fkk?bsbs_bs_bwo_o?o_sw*kZo{wsbV}k=wsZV}Zws-s{wwo{ww{ްwgwZ^ wscg9g9ZkZkZRcg9^wsw{o{kZ{{sswswwwg9VR wo{g9BNs{cg9F1Vo{{w^g9{ Zo{NsckZVo{kZ{Zo{so{#w{ZskZo{kZg9c^o{^wkZkZZsZkZ{Zsg9o{kZkZg9Zo{ZkZco{wwo{ wo{^ZVg9kZkZZ{NsswkZkZ?{wo{sg9o{o{g9{g9{kZwwo{o{V{{o{wso{{g9kZso{g9o{ssg9wwg9wo{wo{kZwo{so{sso{wo{wo{o{wo{ws{ww{ww kZo{wo{wswsws;w{Vg9w5g9ww^{ްww{^ wcBso{cZNsRJRcNs{wZNs{o{wssRc{5{{kZ^JRg9Rso{wwo{R wRcF1F1F1RwBw=5{wJRNsF1-k=RNs=BVg9%)cVwkZ%)c'g9RVJRkZZF1RRVB=B1Ns5g9o{cB%)c)JJR^!ZNsNsJRJRNs)JNs5BF1Vww{co{ wso{Ro{RsZcsZ^ws^R{{{Rg9NsNs9Ro{Nso{cV^NscNsg9RkZcR'o{Rg9NscV^o{Ro{RNsZVZNssNs^ZVg9VZZNsg9Rg9VZg9VccZg9Vw kZo{wswsw^wss{ww{ްwWwwRg9wo{{ww{o{{sw{swf{Z^o_ww^VkZwkZg9NsR9c9Vc^=c^NsNswsV_w){Zo?V_MR?o_ZfZ^gVs_V?bIfR?bw^V_bgR?kZfZfV_fV_bEsww^o{NswkZcF1R^wRc^VJR^JR{{wkV_k({fb^ZwZZVZ^^MfV_sZkRk?EfNb^IZV_N^^sZZww{ckZwo{^ZJRRg9cg9ccwswRswo_bw){kk?o_o?{k?bk?o?o_fb^^^bo?o_b{bbo_f^sbo_bo_fs{o?k?ww/kZo{wR^wsbV}wsZV}^ws-s{ssw^w{ްwiwkZRwkZ9wwcZV^c^co{kZkZVw^{wwkZwkZwwwZ{wRcF1kZZcF1^B{=s=kZ{wcg9{so{s^g9ZkZsg9^wo{RkZkZZkZg9o{Zsg9^g9g9o{VcwV{g9o{g9g9o{Zo{ZkZcswwg9wso{R^RZZ{VZg9Z^w{o{kZ {{o{wkZkZckZ{g9{s g9{g9{kZ{sRwcsg9{o{g9kZkZsskZwkZso{o{{o{wo{o{wo{w9s{w^RkZw{wkZ{{o{w kZo{wo{wswsws+wZkZwg9w{ްwWwwVZwo{ZZF1ZNsg9ZF1ww{Z{Nso{o{Nscwwo{ZR wo{g9F1JRF1JRsRNsZwB-k{1F11!R!kZR{V{1= 1Vo{F1g9BF1JR-k{ww{ckZ wo{^ZZZ^scJRRswNsNs!{sR^VRVNsg9Nso{RZsRZg9kZwNss^R^g9^=Bw(kZo{wR^wswsw^ws1s{wo{cwV_w{ްwewsVZ ws-k{kZsso{kZswM{{o?o?bk?fo?wwg9cF1 w^V^VRB{5F1cg9=wV_b{Z^kNbV_bZAk?R?o_bZZEZbV_gRR?wCompObjbgdata/inst/xls/ExampleExcelFile_1904.xlsx0000644000175100001440000012427513003720417017714 0ustar hornikusersPK![2Ј[Content_Types].xml (̕N0M|c\^*=*]۴=+B #%rfk&J%+p^A' hnEFO;`h-xm'3g4vp:.6E!pA>8BhQ~A B}W 4M/PK!}T  _rels/.rels (MN0H} PnRwLibv!=ECU=͛f0={E tJFkZ$H6zLQl,(M?peKc<\ٻ`0chGaC|Uw<ԀjɶJ@ت` %TKhC& Ig/P|^{-Ƀ!x4$<z?GO)8.t;9,WjfgQ#)Sx|'KY}PK!8$xl/_rels/workbook.xml.rels (j0 }qvuzu`%M`{3ٖP !vյmb%)TBoXU[v[%+,\@;nݣJ;\y4O 5K (N4ɾd` a}uU5xP3-6G+F`JY:VV'zf}au6&/TƐgbdnhC`bMT[(yqLDbڍbJKaГߩPK!V,xl/workbook.xmlSn0?ےkX \?PE4ir)"̇@RU*COr3}'zerr<1Eu)ߟv9FUY|_l9>#P6Ǖs",$vk sF96bIDap }0F+8ȱnL;'^)$ C hc ,2_+E9qF6ē(œU ',&YdEInnVYrjG7ZӀu0Cbehߠ '؟afye\_ PK!sUxl/worksheets/sheet4.xmlVێ8}i;4iaGٝy6CNmhS9lX}9:S!/CM]h~TLHK+2T!PgJUKǑqF "%xR. `+%)+ͰӔtSAKee*Yg++x=U8IQ/K.!oF:ٌ,\TM!c5/֫]v$hGo;oFg0"GL0pU~IBBNIsR _g<y ,N~ً@"i,Q  M)Wϔ3VmʱL[*c8Mֱ%W!8]hTDoX  ڿ[9g@ ҿ܀En#g> ~ 8"G-yc?hIq׆*pK4Dԅ߈8⡟i7!=y" MV{z]Gx9 iH-lp]=d#ZbmHO-r@;ҐZR{Yu}ܮw}H;dڟaLUSM]K* Z~#^#+%i jiڍ\)^eCBsrͯߨ:U 1\(A(_ۊx/fy/%Pc,F'3;o8߸{; ?|+.nzvWe} J[I_tz,߀YKG4d/_PK! Wxl/worksheets/sheet2.xmlV]o0}`I MRhZZfi}8>/ {9uw,EegC@$YDg|HHG8e93W"03)i0!CV21w(8QYȲ3474Ì1 I}FrI8I"ز=tb-M<]8ަdOpXq/= !ЙZhԜG#NbXڳcyٟgJ=>4ڀ-c 9 8IIZ0|Ȋ&,3V*jMim +H&p:pD$T~eO Q /2!lS9 ċ9gGkbe D gn@XxAq#&mĦ,>FoXBv`[mr_(ʞέtn(>ͱ#ҲKoXUIeoTj:u.74]G7櫨GV Ԉu7сZ._TU3ש N:"eM;~wdtIf7s]7s0H6D}z. j0bK5Gg"X~Ϋ$>YC/(2)YV>&ps1czp>/&gyg\rLe8 t2uT4PO-9{!oP˙Xۃ[1r\k#wWԱwe84)$e~/<;Њ/9y֩K<TB-hTXKPK! JЧYxl/worksheets/sheet3.xmlXYo6~/j_ڇH$EQxdnx+t,mr )e+$8Y;O(|{Rڦ">\_LU]$|WUo΋J]UծeRB2/6I_*YN&ɶ+~G\fFm+P*ە{ogm*7;pqU;uMڻyEr_(Oҽo~E^w9c< 2@ڝB-w7}MзL=G*SkVj:\A&c:}7`D H`$'5Ro7Q=VS'٢Z pPq]}Ο*{XUk|}o ak!vB3P#d# Be絵՜JᔐX̀Z^[:<1+3D@h$fȴG=mocR SЕF .#q =p$<"D2քfz0kR *@EἶbB㼏\k۬ io41?~,PD BiS1+DLx 9Na!r׆?3GcL "b2g}8%[Cͱ5Jhp,LcYi<8 ,APZL::= {}-j7G?j)o@G^L'<}'Z!Ԃ52fˈ5;h{`SkmM~s p[ ó돀F|`}PK!; xl/worksheets/sheet1.xml]o8W`vюjJ`0c;496$)I&96>,vUZ*$uSz8H*Rg5 J|YP82ąRM`Y2-hE7'9Qp+6l%UڶgUոs{L7`kd4'R}S)DgP] S¢hCDh)+ { f@JGC 6 m#{i#pDr9=`#{AGCϬӳD wkAfrYsI2fzፉ#0] RJ]v)\bfr\<,E?&]~m4?(imt{T _BGd\3# h1T$#Ufg>+Z<HyDC{YUh#=$#yPWutn񈢧IaY~.8>=Q]="gӻQwm;ڻb)9n(ph} o.HĆ4ԇ*(ޘ͕╹,S8) 9W+Uq %Sdo8P_>[SRmgdFoO|ub?m:4j._Ԣ&ZVzZ>At@_\%hQXmPK ! IssdocProps/thumbnail.jpegJFIFHHICC_PROFILEappl mntrRGB XYZ   acspAPPLappl-appl descodscmxlcprt8wtptrXYZ0gXYZDbXYZXrTRClchad|,bTRClgTRCldescGeneric RGB ProfileGeneric RGB Profilemluc skSK(xhrHR(caES$ptBR&ukUA*frFU(Vaeobecn RGB profilGeneri ki RGB profilPerfil RGB genricPerfil RGB Genrico030;L=89 ?@>D09; RGBProfil gnrique RVBu( RGB r_icϏProfilo RGB genericoGenerisk RGB-profil| RGB \ |Obecn RGB profil RGB Allgemeines RGB-Profilltalnos RGB profilfn RGB cϏeNN, RGB 000000Profil RGB generic  RGBPerfil RGB genricoAlgemeen RGB-profielB#D%L RGB 1H'DGenel RGB ProfiliYleinen RGB-profiiliUniwersalny profil RGB1I89 ?@>D8;L RGBEDA *91JA RGB 'D9'EGeneric RGB ProfileGenerel RGB-beskrivelsetextCopyright 2007 Apple Inc., all rights reserved.XYZ RXYZ tM=XYZ Zus4XYZ (6curvsf32 B&ltExifMM*>F(iNHHCC }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?|<:n{O>iWi|$w,sU۷{ΓO5^gtvMJ粢)8YrJϪk tk/9WR$P<-oCk%v#R2Xy/zžSDOHŸ'^ Lz~*x{Ÿ94ޟ k#úkjRgi Vi=fm6ER^Q>'W7g闺? j$i+j66Xе(_j1}\ k7nz}žSDO@)?N0aw Gj}}`wf:Yb5(eug/2y7z)?NžSDO@(~ܚncQ;ῃzyGڌZ|~&˛״ &m.,-[WQ

/¿<-e+ރm?-/KH~#? tK=(u:og# {O?=y7o_H#K>:şv&Fiw6icnd[ev@0"hO|)u'GŸ'^ Lz~ _~)o)w&xW_7 ~xw}O ?fV׎u{o x_i~ִ[JV:J?V9Qx7F֯m.W?K'|߳7rKklžSDO@)?N>:־|1x_R5/jP/t[ ˝*[偤yfI;$žSDO@x/KO7D-)5N՗L>Oֱ 8n'y}#gƯzoƕ~%Ү}=YK>;]2!k+SFsK6}}p.>kpѨhb?x |]h>#|Qwwu[+rͭIK/=aQ@n`x{?ny]'_þ#JK%_CIV z@4xHҏ܉Y6G"3[+;8R@?;ItM!˫pº<7 2x[tNNѓUnat[FWι2 -`|IE㿃w1hK<eĭ0_,GLXg8gm)`x{?ny]'ro3M+mxдy#utx jkߌp>em3m;S|egš gu]7扬ޤw[Qle/ڡ|+g]eZW,u|EÛ?xz[]OQZ?q/<7adr€F5>5sό'|1Cmmt:__'?6wZ"GxPu-QhLe֠,I/P5xz[$0kȚ{x]Pꖀ;y]'o?Ky=kQPcZMMkyP|'}NC@4'jI!A4g;KTu$0+AC:.X|_~h/>)cÒ=O GU zjGŋlWX_>4GcË/H?ſ+>*x' CRY&% {ߌ? ~JW= (icB#n~iƝ3ş㏅ n;𯈾x-KN#qIZiBS 3k-I_ğ¿7l? CxoĞ4]|Xе{Xi ƿi=_@Žӿ^tV<;[Y_ þ)_tu]|^#MJ+߆"m)|l|A`gM.چU.mN +^t}2{MToZ+{Y&/6 ~S߁gJ/%S\ѴoT.-4sFG :VYJYix{^oS:9u~3 ӧ PTONҌ]$UuR.GWkŘnwW>`2cdC0ʴ."8ѫBtJRB*Fᚾ W|A?^.5|1Zk+^ >Oivyω=6kǶ-/SIb ;'.|1H_ hLR/-K-3s7-4pMHYVG$Oj^ )Yi/>+ex,oÿx9jz-vEѵr9cYծn-^H>Ԯ5ɩxQ|G|7().]}YZ+ZF oJS"c|zϿa ; UUw>ʪ7x}+ WTnW*2Zjh2P𶹠]Y?o4tqm&Ke3CK6H?fSEΟ|)~¾]Y>k7:4}Roh_mOlF"y$SW* 5_UP_j˭_>"|GV/|%.whE @|.]éiohVx Ꮙ?K/> 4%khmU:^aZ5 x.çjY_EҴZ~j>7|g{P~kj=X__|A_Z[]CM$S鰐oz֯/+w?5 UuNZ5֑XO]ŤVD"W* 5_UP_j @.ﭴf;.fyn %ډ\YEk2\I4T~h?F|csekᖗQx_~%xŶi>c_#HO5M2TJ5mjwZ"[ʠ?O .֑>#XY3n5I`Mcf>*(fwqOjqԥOcC|_5kV4 W_kMv?V)-`+/|2&uo?| h|'# r|o_߆F|O^xsQԼ3ZZMǢji)-etdY/:b,Qb*Sbg,=$|j)sw78<2v3/b*a<=7SڵɪnWӿ~CÑxGgxPσsj}4X,A}.jXB^ I4d G0Vv Ov Ş5%:}N/h Vˡ! n6Hn4CO//ËCNźgW'K_!_&M6W;4}F jo6#V@^,>/(au^,>/(au^,>/(au~C|,s-;~#jsHy'[Ix?>4G[-{7_֦}a}O_iPSIk2x I|yJ?Ə-yt_|Ho!OFkxwz޵o'(*[.i%d/~Z?kKj6xsS?| /(au^,>/(au^,>/(auqO\xyrxMw7EOKmVL{f´[5G͟xM ˧K(# jsď i>|m *[HweQ񞯬PmrN O /~ xl5Kh?|YTծ~xI״hl<19Ӽ7?O; oZԢ}vJpM_")WסO.V-ǃnف>?ߵ473]+֛}Rkok _x*Rm|a#;ZGj]/t c ^՛vxr:3\sie5xƚpxz\G~x7wۼ!g8 |7-nXj_yw: ;{^mFSM3Xe(xvc̞=?~OGuÖ/?|1?CŶqaamY|j}%e.Oj~"nmʥ?eH:7oZ΄߈_cxIax|BfwLO xq`h  ~|2_,o} Z­;Ptwc7c}Wm쌺{.Seܧ G4> oUCVH 9.u{_ItwB4 3TZ5Xg60u? GSO"_x x>|XoQ>,z.^#1{H<% ?0[H}I{JѬ8΁w7ωέ7ӬgkS˧κ'{'ƖZǂ4K{o2n/ iWC_k:u.MxZm^%-MIkalA*r @U?^<;?5oOmc燼A`CwklP_a9aph_onmss>2Iҿno+=,?MDРӾ&|>𽯂MK]G#e4%/-'~4sy=F[kV_?>-ڷ!qúƓφMx#s⛭K_d/&u>)|)no3]g/0' |%૯||оg]HOn!C3P kkx_8g +[ǚ/ Kx~wM%xv>Ѽs_Z>x RxZMT=ψ4pݿk 6jocCg]VOTKmLºR^ --S zoK@Gc4? 2N_\7:ONp<<|S>k{G.mU:mLe*@O+ uyy3 WGPj:y[[u~ h4 Ygim+I< {ҼqsH߶xc0 >\~$OsX3et55ZxC?-{O{o#!<{PBE~|p> /v / U5}xig,(K;[kwQ4"G4 ?j߅>-s05oٖmx[Go=xƱH{tSğO^4IjzoU􋛍Z+WZUͺ~Om+;N5Gß~x|4Ykgem2<f/whO~-ּyƛVZԹ.t|@:!2_ x-ehxX־o5o OizAk'G+^~7xM /2<-㯉74ּ=>j' ?:gV\Ǥx@]G 6Wržyľ "x ~!&ҵͧh? 9^hPx~oh_h>7+\~?á~UeAW=;^7{ƞ= ƺO4HutɴeP5?7OQ>xG5ޟ"v ^'u/xkjZ4Kkm=𧄼xOWNmS? V*&+j / C!AB?C-x+ KZC?íோι#?"~+2>k?x#uϏeҼY8"&R[P?ŷ<3_2/tO/-|k/ Ϣ1vk O ƾ%񇋿j|-|j?j'5KO93>-|M |[Iik9u ^ּo'tuOLLӮ~V_3EV7}ljfM/.cW?ǟ| R|3,<7xL曩y~g&$+|phkqG$;"Mdi 84_R_oZ60 |-B<9Zze0[h<}JO<+&j,_eNx#wǟW.v_ |Qυ ѣ>| %⿊QK[<9xׄo h|Q G~&xK_cJ[\' ?O3g<h5? 5~gƾx3Z.|UȦ- #O5_ u?|J? ? g}4K+?j4i:Ok-R x%|W׏>htմpfG}Bw+/#ͧ߇?&ѿfKg[_t{N ZK/}? |VYimxo[q'IMK_t ᷃g_<%5O:+Of|U׼Ch=@_6Koj@^g<&]NYe4$2ŝbI<@w!O |j-׆^xFod-/U_FmkGm:yuK,tPX}U%|_PƋvlt~}}O[7[[83۫0%T qCT~'>1x7FއHYP&-jw_ imgc\֤|h:oG/{+Sk ΰ}"." 2 $(? /I}4 ^0’h.a%\1x K?b@:~xޕx𧅼e}bx㗃~]\M[S~)h>-ӮiG>ޟ_a4Yi(e4-gDm;Tio442\}P /I}4 ^0’h.a%OۢZ4ڧ>cfo{?~$xS|F|M]=t|;|Uk>{xֺvy<|E;o_t~=/x[cm|6-⦓No ~&_Yj}.PMPßt#=g~kZXcjAՅݽ՝mo{oKxn  ^0’h.a%\1x K-h ^լ_ZҾ|1KFExڞc? j)4a y Bz헮[~3x^;1?<{~> |Yπ|Q{A]|'_ ,τugy.2i(1DU࿉o?IZ߉~OO|(|+ <)&/ǭ1/kk7v[JRԷ?_>EsxEWWK_v~ KAawxǞǗ3Aѣi  9B1'y ߆ׂ>![j=f߇<[tA|LŒE.gzDey?W? /G}4oC$WH/!(h? CzE{8dJ? ?'?䁜zd3{n<ᙎ̺&8|AyIX\* 誣 8R>0x>$Ѯ<)OmU#k_A]CxwωzvY)ux@Ѯ qjp}/5>[\i??cFO_ehUk!oqx|RZghoĭ?߃4|?𷊯1t/"GtJmIok}+wj1\esz+^^M(gMn^Pn>|Oy"!C7vթjVbt~ʺWo1==o¿f|N-- ˊ?|[Ce9|7/Zu=~<~*ψ 3OoRBGHӯ|#௉׌.,l}xxෂ#|K4Gď~hQ_ŷ17t #I^>Դygoŗkaq_|Ty/¿|c7]k~?,.u/@Ӽ953? m D_~#޷h<>*-< x.E|aՇo [:-Nw-l='ſ6Y8?q@Olpgs-Oq_Aq=- Tt딗RCkh[ǬMtWP;C3̏fO O>_W Cψtv\+[Zg+ :I~¦<|"2o/>"JD7u%mltF;Sm )u[+x% @-k<'_>i~j>Ƌšw~|doi; O=WkM_|E_ZsgyoXFż{GVׂύ|NޫE扨qK&k۫(9 Fct/#K'CݟSzk$gExhtxIO_薲^֛t񵥼(iSOjZ?~;>[Y}|9GtoxT'C|5'#D^+g{_HѾx⿀}xDýgᆋt*<[.uc=-5[xi^ڒ[Z `O[~C.|@GY{x{Ğz%5I|CexEƧmYx/>>^Zhzi&ѧcچOTTqbHg?~$~ CxWu_>>^~:x#E|CwxG.ڕ֧h|kkiu.sN/|GyM?XxcET>MB;+ŵo>\ f̑0 x^_ ^s>)F;mkZw|GekBHku=R/-TiZur>!j: >_hVVox~t&>Oi@ωOoPo? _->5GIUxv>7_x}4o4E;Ck.o FOҼ"p |wxᦻDHе;;]6 ;<=qg5L&/[[K*L=g'7(7 M ?|MB|<€?8tkmxOeiM!7qV-KM𗄠O P:߬xAľ^9GK$|@-'㻽g+x}kX;W퍗kM2Ho[:_Iˣk>Ft~%H~x>:ֆ{|.>08!|AxoE/m56Nykv3X(-[u>$Zx~&|q__ 5x~.? ^ >'ly3O'm1xCz>M֗oYߦoOOk^!aai CŚmΙ [V_?`:w|]IgďigSNN'VS&]cQ2@ut]R1jV0ռ 8_@G"? h^2|sޭ⯂>n< ;zφ7Ŀ.uK;|lm.J4G|9"/OZ4?i8?|>ߋuRMOVk{_h:,|Ck/ckyr}u}[i $^y]N 0ִ(rnGC|o_x] s^#, ? o}OQ|=`,6_|ASYkLx+JdӼ1ٽRN{{#io"cլL,>./|<,|hӼE2F ~~$þ&KxYAntYx[Wsn5lr{N/ Xs쮄ρ2фz0ìf@ʋIe +,zL%P ?/?2O=@&PпǨ?6_oO!͟o<_jzυ5-K<5x5 4_[fM VuU,5OJuKk9w_˜G<7|a-} i}2/[iX5NH!] U5gY-,-cTnd(__ de +,zL%Pċ}fO |ikN:wk3x]id?-_|/oŻKO xk?O_&G95 Cx7CԴ5۽6Qga&fmY&DP{W&PпǨWY?CBK'1٦'_~G3'9z#'~<9e-i}7_g$,?9 hwCཷ-'Ž7Z~:2'µkG._x^|Cڒxfalk?'mx⧈3/e{ni}~ Ǣ={τor&>-ꟲ\}jPJWQ~/t2bKU]X~7~ϋZ4YMÞ(/9ĐO xL/Ƌf$ODOk>-ծ5$⏅>.MW֑O~^x?ᖷS5yN֡ZBg<+œh2Xu_P"[[ÿxw<)ouW x>&Sh|M/,o`<Yj u+mL5wtL3i~E>9>oL~M6ڴڦ'o,|XVn O_O7_5|G~xZj2@\w in _?l|<zj tƪb|q1_/E3'lnfSSӭteylڅӋKKhd||2l}<^3-ҧ*ʌVƗ: F5$7hŵKf~[4sPKtO8ԫR(*A(U;hR_Me +,zr&YK;;  ׂl,?x/ ׂl,?yȼ[ŏx4"7𧋴m Z#\xZU/麅͝؊{yI"`߇?_~:ߵׇ |P||E)k~{ x]|]|'ÿx <'erǂ4l-xZMw 4q6]Cj5[`ޯ 0y'b|[Y' BRT8qa&?3G&m9@s^)1- Jo7٧YC5Ƨ1eM\hמ*Y`` 'o?6i~:uG!Լcq/Z~^t߈? FMύE~+ ~~*|v1xm[Winn|3[?_ "[G665&×]|R߰wƿ hV6ſ >,Ii~E|UXtq_~d٬x:߀SV|=?4>'[{?b7]~؟/tw? m5oOĖϏ vFK(f/ Q_E&-:?i{1]`|k௏._ZWuΞU4;[uxGAwwFO:~Iwvxp{zw?!7_'ԭ/x]gz{uu-ԋ\Jqi1[X[eN_,Rai71x^YɹR*oQE%6YZpf8ǚU18eYV^INrrPN\ҦNNCgqa=CÆM.ƒY\M K<_J>_J>_J>_Jj^׾?gGz^v` /_uWR?cx@|Josi/ŕחℿq<~n;GGX mqE=|[],^_$Wǿ_įx @|M/~ϟ~ JѾx{h^-Ņψ<5>>^cGcjz_]^(k?|]{%_~!~|կ-?=/u;yr #⵵߀:$mw=mK>5Iy>~ɚW>o]cRifo Y[b.%j75kVo>o a,SZ&#}?,/ىe{@@@ߵSc_=~K_ /Woh?~(?[%m?}Kη6}FOZz~`H5Դߴ?l[+c/|k63@ž2<_|HҼ%k|=)C[mW|mk޻~aeOKxÚ-?4OZHҬ_xfu5).4^|HŹ _ {~>!x/Q>xRY?Ġ/o_<]~2/-1>?/~>izقW~!O];GvoPu@7Dj^9Ӿ0xFX =_UXk>)h C_ u]|CikVwwkڇ  }=߶o Czj+,z^4{#෈//>i? ho1kk`:xK?m+¹tθ6~_J?XQ{I/~"#ÞԬ/~ et-At1tuF-_Zм*;׆ꮻ1?L_LoZX] 5kZ,x'o:W ?|Oqig_ M;e:𾫥\Z*\9-j=& C,ڥ.2Xmc|!~!| 3x +( |- :FR[,höo$%Bj;mG?nOe)?Cmɔc &P6@?nOe)?CmɔWlIyxg7q6jxy_(Tw?矅R 8~J7xy_(TOm,xɏg*oR]Cl,muuq3g˿{ou[qP+g >X, !\=csğ'\N ?VL2>m?3y#PQO?s۞$::wPO?s۞$::wPO?s۞$::wP+>!03 N>PliRlr78 |O nx@?NΝnx@?NΝnx@?NΝxSX/ .o'Mi"8lPWO?s۞$::wPO?s۞$::wPO?s۞$::wP-c?)5ВcOQ"<8R2;O#8<:Нq;(;l!E)7_M/|)7_M/|)7_O~.b#eF|Y̾WM6y!O1eCê?4?FiχW*(KTP >)7_M/|4тRhEGS >)7_'4^G-[I><noݕi&eR" @τ4}Ak8=>x} ?g x[Z^w5kFm.lnnlw @P@_'¿?:PK!no`2xl/sharedStrings.xmlĕN0 #;xPg(bF\ Әӈ. :# |؎ӜN#r gC=^s^֭\=op[,6E菂895)5]dFw-L(r7bx):tW.x`c-,}dj {_t / 12C<ϻ&Rhڧe5%_PK!{֩xl/theme/theme1.xmlYMoE#F{oc'NGuرhF[x=N3' G$$DA\q@@VR~MԿ;3x8!zgkf㘡C"$I#^$!d:W$N4 w߹7TDb>DJKK2e,$nE<@#Jem)4 Pc`{w8!A=2̙<&JꅐfM T5BNd tY#9~#*@ K/AKחFF]et`~!کկm Y\n?aZ]'2Gc"ry Ǹ8E/ԡnޙ`5xsѵɷrΚ\xpK*y7NF~b\c|‰8'VD5N(B<=. |z]ң}'D;4L|6CGM|VoC Gao±eǬXE>%qm #8j>-~ ..R(zys^FnVԇ$*cߓqrB3's}'g7{t4Mf,t,Q;7ɛ1Ѝmmƍ` F$vNyad@}m }w^-/m z<}%s7CXWM->Y°t`Qә")Mi?F$@3ɌHK8ۙe/o}'U}f@bvyE/G9#sh [1/"ZXZըfZ#0b8k,ބ] xy Z4M0#w;(5!ʬDx@l7&vy ;H)Okള0i1?tr`d]v%b :j8mC88IolW;6kϬESSEÜq8RmcWYX%YWk:.beRB톖 $T`Vc XэGbCڞp` Z?My֬Ӕ ήcF8&%8 7V`Jm^ݍq7EWeRN)zie#EHk#BEPѰ#`A,SphStq"A%}g0fDzd#Q%uejCzRt s \oN)F{b:P3,!gdbKU z=uAYk;֌˫rYaϤpg?0jCoEAV_] 2H7HhIv>kj!f.6Q8/ٙ_۵Ȟ.QXQ݈B~$“tO$ɤ6F#YOG [BkFE6hM \\ I,&.(dh&1oYYG;&kŕ{%e (w䳨A񍁺]y 7x5R`8tMc3ݤPK!H} xl/styles.xmlWKo1W|Oh&h٨CJIr1k/kŏכ@~}gl(]"1xHoZGjḯ[.|BMϮ(=3)kĄ.EMoo/- L=8Iv 40u󤮜`F%`ph& c{hy9JeDsc)gy&{Zֶآgy\'Skxzĝ/Sl9z >gZߟq~Ordiš >)nP*Kg ô_qfX{'_![e@Uqpk9O̰MwPK֡B g0!ݲ (#=wl9]P[%9޾AfHCwM-0 gq$mz=.eu'%z[{H,ͭa I&T (6K(Y5bR {e@Wh,{e1ee(Jݢ10E@ ƘjC0$NCJ*pPa#p3r3he8zp>$]oGw>,VZMe97hdZMSPa ju*;b2EB$1ISHI+!j \AquTaZ!?Saq6=]i ; _tT v^xk8A%]4՞-zP7;n=u"[`&¨Q8v6ϋzSBĜq 8VU ~Qw7 |qVf~PK!?5NmdocProps/core.xml (|_K0C{t@2-$w]ICMۭV&>&_ι$]E}6U#3!@WkW3JNFB:0h__Lhx֍m+0#ICZE069Ժ.샖0,R}5 لTaAGA0 )*)wl'Tm۠M.߶O/CU<0 6:P6vTK0)Ikjm|_.ܗ\82V9+ؠ<#?N0"˄{|v~U-bI3PK!#` )'xl/calcChain.xmldN0M|fªj( Bѷ.K>3gLg߱7vMFj[|'fk rUQٮRm= >jy| W_  !"#$%&'()*+,./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVYZ[\]^`RF?\X@WorkbookSTBook -%SSummaryInformation( =\pGregory Warmes Ba==]>8@"1Verdana1Verdana1Verdana1Verdana1Verdana1 Verdana1=Verdana"$"#,##0_);\("$"#,##0\)!"$"#,##0_);[Red]\("$"#,##0\)""$"#,##0.00_);\("$"#,##0.00\)'""$"#,##0.00_);[Red]\("$"#,##0.00\)7*2_("$"* #,##0_);_("$"* \(#,##0\);_("$"* "-"_);_(@_).))_(* #,##0_);_(* \(#,##0\);_(* "-"_);_(@_)?,:_("$"* #,##0.00_);_("$"* \(#,##0.00\);_("$"* "-"??_);_(@_)6+1_(* #,##0.00_);_(* \(#,##0.00\);_(* "-"??_);_(@_)                + ) , *     8dq:Fc-2NWgFSWc-2NWgFSW̙̙3f3fff3f3f33333f33333\`O iris.xls߮n Sepal.Length Sepal.Width Petal.Length Petal.WidthSpeciessetosa versicolor virginica N#O JPPas##   $ Be edwy[##JPPas*< *<*<*<@asp  3O0bUr4cN#O p  fOe3O~ȝHp  T1^ cc UOJȿJؿU0Hcc UOJȿ@OJdPSpP|\п>ǰ`JdHOOH4HOS$6JPPcȵN"0H̿ YԿ0,RZ c[_&N#JPPas   p$ Be J$ D   p'^ U   ۸@ȗH   = $q*3;DwMS  dMbP?_*+%M com.apple.print.PageFormat.PMHorizontalRes com.apple.print.ticket.creator com.apple.printingmanager com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMHorizontalRes 300 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:44:05Z com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMOrientation com.apple.print.ticket.creator com.apple.printingmanager com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMOrientation 1 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:44:05Z com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMScaling com.apple.print.ticket.creator com.apple.printingmanager com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMScaling 1 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:43:56Z com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalRes com.apple.print.ticket.creator com.apple.printingmanager com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalRes 300 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:44:05Z com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalScaling com.apple.print.ticket.creator com.apple.printingmanager com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalScaling 1 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:43:56Z com.apple.print.ticket.stateFlag 0 com.apple.print.subTicket.paper_info_ticket com.apple.print.PageFormat.PMAdjustedPageRect com.apple.print.ticket.creator com.apple.printingmanager com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPageRect 0.0 0.0 3058.3333333333335 2400 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:44:05Z com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPaperRect com.apple.print.ticket.creator com.apple.printingmanager com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPaperRect -75 -75 3225.0000000000005 2475 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:44:05Z com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMPaperName com.apple.print.ticket.creator com.apple.print.pm.PostScript com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMPaperName na-letter com.apple.print.ticket.client com.apple.print.pm.PostScript com.apple.print.ticket.modDate 2003-07-01T17:49:36Z com.apple.print.ticket.stateFlag 1 com.apple.print.PaperInfo.PMUnadjustedPageRect com.apple.print.ticket.creator com.apple.print.pm.PostScript com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPageRect 0.0 0.0 734 576 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:43:56Z com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPaperRect com.apple.print.ticket.creator com.apple.print.pm.PostScript com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPaperRect -18 -18 774 594 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:43:56Z com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.ppd.PMPaperName com.apple.print.ticket.creator com.apple.print.pm.PostScript com.apple.print.ticket.itemArray com.apple.print.PaperInfo.ppd.PMPaperName US Letter com.apple.print.ticket.client com.apple.print.pm.PostScript com.apple.print.ticket.modDate 2003-07-01T17:49:36Z com.apple.print.ticket.stateFlag 1 com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.privateLock com.apple.print.ticket.type com.apple.print.PaperInfoTicket com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.privateLock com.apple.print.ticket.type com.apple.print.PageFormatTicket Mz,, ` g(HH(dh "d??U y|A rp.DiTtEpxpa+ x0   z @|A yyo..raePyPyz|A @{oLyc|AtzD @{     @ @a@4@ ~@@a@4@ `}@t@@`@4@ |@`s@?4@ @v@a@4@ @`x@@e@D@ |@@u@a@>@ @@u@?4@  {@ r@a@4@  ~@`s@?$@  @ w@?4@  ~@@u@d@4@  ~@@a@$@ z@@[@$@  @@^@4@ Ё@{@?D@ @`x@@`@D@ @ @a@>@ Ё@w@@e@>@ @w@?>@ @@u@@e@4@ @ w@?D@ |@v@?4@ @t@@e@? ~@@u@g@4@ @@d@4@ @@u@d@D@ @@ @?4@ @@@u@a@4@ `}@t@d@4@ ~@`s@d@4@ DlF000000000000000000000000000000 y!|A"# $rp.D%iTtE&pxp'a(+)x0 *+z,@|A-y.y/o.0.r1ae2Py3Py4z5|A67 89@{oL:yc;|A<t=zD> ?@{ @@u@?D@ !@@y@?$@ !"@@z@a@4@ "#~@`s@?4@ #$@t@^@4@ $%@ @@`@4@ %&~@v@a@$@ &'{@@@`@4@ '(@@u@?4@ ()@ @@`@>@ )*@l@@`@>@ *+{@t@@`@4@ +,@ @d@N@ ,-@w@g@D@ -.~@@a@>@ ./@w@d@4@ /0|@t@a@4@ 01@ w@?4@ 12@t@a@4@ 23@t@`}@a@ 34@t@@? 45@`s@~@? 56@l@@@`@ 67@q@|@? 78Ё@q@@@`@ 89@t@`}@d@ 9:~@n@t@? :;@ r@|@@`@ ;<@@p@`x@a@ <=@@ @? =>p@@@z@? >?@k@@? ?Dl0000000000000000000000000000000@yA|ABC Drp.DEiTtEFpxpGaH+Ix0 JKzL@|AMyNyOo.P.rQaeRPySPyTzU|AVW XY@{oLZyc[|A\t]zD^ _@{@@ r@`}@a@ @A@ r@v@@`@ AB@`s@{@a@ BC@@@? CD @p@y@? DE`@k@@? EF@@`x@[@ FGp@t@~@f@ GH@q@@@`@ HI@@~@? IJ@q@`}@^@ JK@ r@z@@`@ KL@@{@a@ LM@@q@~@a@ MN@@@@e@ NO@ r@@? OPЁ@@p@ @? PQ@n@w@[@ QR@n@ w@? RS @p@`x@^@ ST@p@@d@ TU@@@? UV@@u@@d@ VW@`s@`}@? WX@l@{@@`@ XY@@y@@`@ YZ@@@@`@ Z[@@p@{@^@ [\@@|@a@ \] @@p@@^@ ]^@l@t@? ^_@p@@z@@`@ _Dl0000000000000000000000000000000`ya|Abc drp.DeiTtEfpxpgah+ix0 jkzl@|Amynyoo.p.rqaerPysPytzu|Avw xy@{oLzyc{|A|t}zD~ @{`Ё@@@z@^@ `aЁ@ r@@z@@`@ ab`@ r@z@@`@ bc@@@[@ cdЁ@q@y@@`@ de@t@@@ ef @p@@g@ fg0@@p@@j@ gh@ r@@f@ hi@@ @k@ ij@@@@j@ jk~@@@@e@ klІ@ r@@f@ lm@@ @f@ mn@v@@@ no@t@@@ op@p@@g@ pq@@@@@j@ qrЁ@@@@ rs @q@@n@ st@t@@l@ tu@@@f@ uv@w@@k@ vw@@p@@l@ wx@k@@? xy@t@Ё@l@ yz@q@~@@ z{@q@@@ {|@p@~@f@ |}@t@Ё@@j@ }~@t@@f@ ~`@q@~@f@ Dl0000000000000000000000000000000y|A rp.DiTtEpxpa+x0 z@|Ayyo..raePyPyz|A@@~@f@ @q@@@j@ @@ @d@  @q@@g@ @w@@@ @q@@k@ @q@@? @@p@@a@ @@@l@ @@u@@n@ @`s@@f@ @@~@f@ @`s@@@j@ @`s@@n@ @`s@@l@  @p@@g@ @@t@p@l@ @t@Ё@@ @@@@l@ @@@g@ @@@@@ `@@u@@l@ p@@@f@ 20000000000000000000000>@@? uB\pGregory Warmes [B'b=]>8@"1Verdana1Verdana1Verdana1Verdana1Verdana1 Verdana1=Verdana"$"#,##0_);\("$"#,##0\)"$"#,##0_);[Red]\("$"#,##0\) "$"#,##0.00_);\("$"#,##0.00\)%""$"#,##0.00_);[Red]\("$"#,##0.00\)5*2_("$"* #,##0_);_("$"* \(#,##0\);_("$"* "-"_);_(@_),))_(* #,##0_);_(* \(#,##0\);_(* "-"_);_(@_)=,:_("$"* #,##0.00_);_("$"* \(#,##0.00\);_("$"* "-"??_);_(@_)4+1_(* #,##0.00_);_(* \(#,##0.00\);_(* "-"??_);_(@_)                + ) , *    Followed Hyperlink  Hyperlink8dq:Fc-2NWgFSWc-2NWgFSW̙3f3fff3f3f33333f33333Ciris.xls uB  m%.:8BKR  dMbP?_*+%M  com.apple.print.PageFormat.PMHorizontalRes com.apple.print.ticket.creator com.apple.printingmanager com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMHorizontalRes 300 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:44:05Z com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMOrientation com.apple.print.ticket.creator com.apple.printingmanager com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMOrientation 1 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:44:05Z com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMScaling com.apple.print.ticket.creator com.apple.printingmanager com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMScaling 1 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:43:56Z com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalRes com.apple.print.ticket.creator com.apple.printingmanager com.apple.print.ticket.i< temArray com.apple.print.PageFormat.PMVerticalRes 300 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:44:05Z com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalScaling com.apple.print.ticket.creator com.apple.printingmanager com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalScaling 1 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:43:56Z com.apple.print.ticket.stateFlag 0 com.apple.print.subTicket.paper_info_ticket com.apple.print.PageFormat.PMAdjustedPageRect com.apple.print.ticket.creator com.apple.printingmanager com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPageRect 0.0 0.0 3058.3333333333335 2400 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:44:05Z com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPaperRect com.apple.print.ticket.creator com.apple.printingmanager com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPaperRect -75 <  -75 3225.0000000000005 2475 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:44:05Z com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMPaperName com.apple.print.ticket.creator com.apple.print.pm.PostScript com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMPaperName na-letter com.apple.print.ticket.client com.apple.print.pm.PostScript com.apple.print.ticket.modDate 2003-07-01T17:49:36Z com.apple.print.ticket.stateFlag 1 com.apple.print.PaperInfo.PMUnadjustedPageRect com.apple.print.ticket.creator com.apple.print.pm.PostScript com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPageRect 0.0 0.0 734 576 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:43:56Z com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPaperRect com.apple.print.ticket.creator com.apple.print.pm.PostScript com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPaperRect -18 -18 774 594 com.apple.print.ticket.client com.apple.printingmanager com.apple.print.ticket.modDate 2008-03-25T00:43:56Z com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.ppd.PMPaperName com.apple.print.ticket.creator com.apple.print.pm.PostScript com.apple.print.ticket.itemArray com.apple.print.PaperInfo.ppd.PMPaperName US Letter com.apple.print.ticket.client com.apple.print.pm.PostScript com.apple.print.ticket.modDate 2003-07-01T17:49:36Z com.apple.print.ticket.stateFlag 1 com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.privateLock com.apple.print.ticket.type com.apple.print.PaperInfoTicket com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.privateLock com.apple.print.ticket.type com.apple.print.PageFormatTicket Mz,, ` g(HH(dh "d??U  |A rp.DiTtEPpa+  D   @|A o..rae00Ё|A  oLc|A (    Sepal.Length Sepal.Width Petal.Length Petal.WidthSpecies@ @a@4@setosa~@@a@4@setosa`}@t@@`@4@setosa|@`s@?4@setosa@v@a@4@setosa@`x@@e@D@setosa|@@u@a@>@setosa@@u@?4@setosa {@ r@a@4@ setosa ~@`s@?$@ setosa @ w@?4@ setosa ~@@u@d@4@ setosa ~@@a@$@ setosaz@@[@$@setosa @@^@4@setosaЁ@{@?D@setosa@`x@@`@D@setosa@ @a@>@setosaЁ@w@@e@>@setosa@w@?>@setosa@@u@@e@4@setosa@ w@?D@setosa|@v@?4@setosa@t@@e@?setosa~@@u@g@4@setosa@@d@4@setosa@@u@d@D@setosa@@ @?4@setosa@@@u@a@4@setosa`}@t@d@4@setosa~@`s@d@4@setosaD= lq444444444444444444444444444444 !|A"# $rp.D%iTtE&Pp'a(+) D*+,@|A-./o.0.r1ae20304Ё5|A67 89 oL:c;|A< =( > ?  @@u@?D@ setosa!@@y@?$@!setosa"@@z@a@4@"setosa#~@`s@?4@#setosa$@t@^@4@$setosa%@ @@`@4@%setosa&~@v@a@$@&setosa'{@@@`@4@'setosa(@@u@?4@(setosa)@ @@`@>@)setosa*@l@@`@>@*setosa+{@t@@`@4@+setosa,@ @d@N@,setosa-@w@g@D@-setosa.~@@a@>@.setosa/@w@d@4@/setosa0|@t@a@4@0setosa1@ w@?4@1setosa2@t@a@4@2setosa3@t@`}@a@3 versicolor4@t@@?4 versicolor5@`s@~@?5 versicolor6@l@@@`@6 versicolor7@q@|@?7 versicolor8Ё@q@@@`@8 versicolor9@t@`}@d@9 versicolor:~@n@t@?: versicolor;@ r@|@@`@; versicolor<@@p@`x@a@< versicolor=@@ @?= versicolor>p@@@z@?> versicolor?@k@@?? versicolorD4 l4444444444444444444888888888888@A|ABC Drp.DEiTtEFPpGaH+I DJKL@|AMNOo.P.rQaeR0S0TЁU|AVW XY oLZc[|A\ ]( ^ _ @@ r@`}@a@@ versicolorA@ r@v@@`@A versicolorB@`s@{@a@B versicolorC@@@?C versicolorD @p@y@?D versicolorE`@k@@?E versicolorF@@`x@[@F versicolorGp@t@~@f@G versicolorH@q@@@`@H versicolorI@@~@?I versicolorJ@q@`}@^@J versicolorK@ r@z@@`@K versicolorL@@{@a@L versicolorM@@q@~@a@M versicolorN@@@@e@N versicolorO@ r@@?O versicolorPЁ@@p@ @?P versicolorQ@n@w@[@Q versicolorR@n@ w@?R versicolorS @p@`x@^@S versicolorT@p@@d@T versicolorU@@@?U versicolorV@@u@@d@V versicolorW@`s@`}@?W versicolorX@l@{@@`@X versicolorY@@y@@`@Y versicolorZ@@@@`@Z versicolor[@@p@{@^@[ versicolor\@@|@a@\ versicolor] @@p@@^@] versicolor^@l@t@?^ versicolor_@p@@z@@`@_ versicolorD l8888888888888888888888888888888`a|Abc drp.DeiTtEfPpgah+i Djkl@|Amnoo.p.rqaer0s0tЁu|Avw xy oLzc{|A| }( ~  `Ё@@@z@^@` versicoloraЁ@ r@@z@@`@a versicolorb`@ r@z@@`@b versicolorc@@@[@c versicolordЁ@q@y@@`@d versicolore@t@@@e virginicaf @p@@g@f virginicag0@@p@@j@g virginicah@ r@@f@h virginicai@@ @k@i virginicaj@@@@j@j virginicak~@@@@e@k virginicalІ@ r@@f@l virginicam@@ @f@m virginican@v@@@n virginicao@t@@@o virginicap@p@@g@p virginicaq@@@@@j@q virginicarЁ@@@@r virginicas @q@@n@s virginicat@t@@l@t virginicau@@@f@u virginicav@w@@k@v virginicaw@@p@@l@w virginicax@k@@?x virginicay@t@Ё@l@y virginicaz@q@~@@z virginica{@q@@@{ virginica|@p@~@f@| virginica}@t@Ё@@j@} virginica~@t@@f@~ virginica`@q@~@f@ virginicaDe l8888877777777777777777777777777|A rp.DiTtEPpa+ D@|Ao..rae00Ё|A@@~@f@ virginica@q@@@j@ virginica@@ @d@ virginica @q@@g@ virginica@w@@@ virginica@q@@k@ virginica@q@@? virginica@@p@@a@ virginica@@@l@ virginica@@u@@n@ virginica@`s@@f@ virginica@@~@f@ virginica@`s@@@j@ virginica@`s@@n@ virginica@`s@@l@ virginica @p@@g@ virginica@@t@p@l@ virginica@t@Ё@@ virginica@@@@l@ virginica@@@g@ virginica@@@@@ virginica`@@u@@l@ virginicap@@@f@ virginica27777777777777777777777=]>8> ?   !"#$%&'()*+,-./0124568 Oh+'0h @H`x 'Gregory WarmesGregory WarmesMicrosoft Excel@jG PICT G HHG G    ""("5"B"O"\"i"v"""""""""'"4"A"N"["h"u"""""""""")"6"C"P"]"j"w"""""""""*"7"D"Q"^"k"x"""""" E E  E F"e " " "F "d " " "E " "f " " " "g " " 1"" "" " " "" DDQQ^^ , Lucida Grande .+ 1 (* 2 *5* 3 7B* 4 DO* 5 Q\* 6 ^i* 7 kv* 8 x* 9 (10 * 11 * 12 * 13 * 14 * 15 G( <A)LB)JC)KD e, #Verdana# ( Sepal.Length G)K Sepal.Width)K Petal.Length)K Petal.Width(&Q5.1)K3.5)K1.4)K0.2(3Q4.9)V3)@1.4)K0.2(@Q4.7)K3.2)K1.3)K0.2(MQ4.6)K3.1)K1.5)K0.2(Z\5)@3.6)K1.4)K0.2(gQ5.4)K3.9)K1.7)K0.4(tQ4.6)K3.4)K1.4)K0.3(\5)@3.4)K1.5)K0.2(Q4.4)K2.9)K1.4)K0.2(Q4.9)K3.1)K1.5)K0.1(Q5.4)K3.7)K1.5)K0.2(Q4.8)K3.4)K1.6)K0.2(Q4.8)V3)@1.4)K0.1(Q4.3)V3)@1.1)K0.1 G!E E ((E 55E BBE OOE \\E iiE vvE E E E E E E E  ee   FF F  ՜.+,08@ `h p 'University of Rochester  iris.xls  WorksheetsFMicrosoft Excel Worksheet8FDocumentSummaryInformation83CompObj7XIBExcel.Sheet.5gdata/inst/xls/ExampleExcelFile.xls0000644000175100001440000017100013003720417017033 0ustar hornikusersࡱ> fw  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdeijklmnopqrstuvR F~gWorkbook`SummaryInformation(hxDocumentSummaryInformation8\ `\pGregory Warnes Ba==d +8@"1kVerdana1kCalibri1kCalibri1kCalibri1kVerdana1kVerdana1 kVerdana19kVerdana1kVerdana1h8kCambria1,8kCalibri18kCalibri18kCalibri1Calibri1Calibri1<Calibri1>Calibri1?Calibri14Calibri14Calibri1 Calibri1 Calibri1Calibri1Calibri1 Calibri1 Verdana1Verdana"$"#,##0_);\("$"#,##0\)!"$"#,##0_);[Red]\("$"#,##0\)""$"#,##0.00_);\("$"#,##0.00\)'""$"#,##0.00_);[Red]\("$"#,##0.00\)7*2_("$"* #,##0_);_("$"* \(#,##0\);_("$"* "-"_);_(@_).))_(* #,##0_);_(* \(#,##0\);_(* "-"_);_(@_)?,:_("$"* #,##0.00_);_("$"* \(#,##0.00\);_("$"* "-"??_);_(@_)6+1_(* #,##0.00_);_(* \(#,##0.00\);_(* "-"??_);_(@_)yyyy\-mm\-dd\ hh:mm:ss.00                                                                      ff  +  )  ,  *     P  P        `            a>  "  @ @ "x@ @  "               ||Ib}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-}; 00\);_(*}A}1 00\);_(*;_(@_) }A}2 00\);_(*?;_(@_) }A}3 00\);_(*23;_(@_) }-}4 00\);_(*}A}0 a00\);_(*;_(@_) }A}( 00\);_(*;_(@_) }A}7 e00\);_(*;_(@_) }}5 ??v00\);_(*̙;_(@_)   Mdz -!}4}}9 ???00\);_(*;_(@_) ??? ??? ???Mdz ???-!}4}}) }00\);_(*;_(@_)   Mdz -!}4}A}6 }00\);_(*;_(@_) }}* 00\);_(*;_(@_) ??? ??? ???Mdz ???-!}4}-}= 00\);_(*}x}800\);_(*;_(??? ??? ???Mdz}-}/ 00\);_(*}U}< 00\);_(*;_( }A}" 00\);_(*;_(}A} 00\);_(*ef;_(}A} 00\);_(*L;_(}A} 00\);_(*23;_(}A}# 00\);_(*;_(}A} 00\);_(*ef;_(}A} 00\);_(*L;_(}A} 00\);_(*23;_(}A}$ 00\);_(*;_(}A} 00\);_(*ef;_(}A} 00\);_(*L;_(}A} 00\);_(*23;_(}A}% 00\);_(*;_(}A} 00\);_(*ef;_(}A} 00\);_(*L;_(}A} 00\);_(*23;_(}A}& 00\);_(*;_(}A} 00\);_(*ef;_(}A} 00\);_(*L;_(}A}  00\);_(*23;_(}A}' 00\);_(* ;_(}A} 00\);_(*ef ;_(}A} 00\);_(*L ;_(}A}! 00\);_(*23 ;_( 20% - Accent1M 20% - Accent1 ef % 20% - Accent2M" 20% - Accent2 ef % 20% - Accent3M& 20% - Accent3 ef % 20% - Accent4M* 20% - Accent4 ef % 20% - Accent5M. 20% - Accent5 ef % 20% - Accent6M2 20% - Accent6  ef % 40% - Accent1M 40% - Accent1 L % 40% - Accent2M# 40% - Accent2 L渷 % 40% - Accent3M' 40% - Accent3 L % 40% - Accent4M+ 40% - Accent4 L % 40% - Accent5M/ 40% - Accent5 L % 40% - Accent6M3 40% - Accent6  Lմ % 60% - Accent1M 60% - Accent1 23 % 60% - Accent2M$ 60% - Accent2 23ږ % 60% - Accent3M( 60% - Accent3 23כ % 60% - Accent4M, 60% - Accent4 23 % 60% - Accent5M0 60% - Accent5 23 %! 60% - Accent6M4 60% - Accent6  23 % "Accent1AAccent1 O % #Accent2A!Accent2 PM % $Accent3A%Accent3 Y % %Accent4A)Accent4 d % &Accent5A-Accent5 K % 'Accent6A1Accent6  F %(Bad9Bad  %) Calculation Calculation  }% * Check Cell Check Cell  %????????? ???+ Comma,( Comma [0]-&Currency.. Currency [0]/Explanatory TextG5Explanatory Text % 0Good;Good  a%1 Heading 1G Heading 1 I}%O2 Heading 2G Heading 2 I}%?3 Heading 3G Heading 3 I}%234 Heading 49 Heading 4 I}% 5InputuInput ̙ ??v% 6 Linked CellK Linked Cell }% 7NeutralANeutral  e%"Normal 8Noteb Note   9OutputwOutput  ???%????????? ???:$Percent ;Title1Title I}% <TotalMTotal %OO= Warning Text? Warning Text %XTableStyleMedium9PivotStyleMedium48dq:Fc-2NWgFSWc-2NWgFSW̙̙3f3fff3f3f33333f33333\`1= Sheet First_ Sheet Second$HSheet with a very long name!Sheet with initial text"5A 7B 7C 7{F 7?G 7NA 7D 7E  7NA 7NA 7FirstRow 7 SecondRow 7ThirdRow 7 FourthRow 7Factor 7Red 7Black 7Green 7A 7B 7C 7A 7NA 7HThis line contains text that would need to be skipped to get to the data 7This line too! 7"- f.M/]0 PK!pO[Content_Types].xmlj0Eжr(΢]yl#!MB;.n̨̽\A1&ҫ QWKvUbOX#&1`RT9<l#$>r `С-;c=1gDJKK2e,$nE<@#Jem)4 Pc`{w8!A=2̙<&JꅐfM T5BNd tY#9~#*@ K/AKחFF]et`~!کկm Y\n?aZ]'2Gc"ry Ǹ8E/ԡnޙ`5xsѵɷrΚ\xpK*y7NF~b\c|‰8'VD5N(B<=. |z]ң}'D;4L|6CGM|VoC Gao±eǬXE>%qm #8j>-~ ..R(zys^FnVԇ$*cߓqrB3's}'g7{t4Mf,t,Q;7ɛ1Ѝmmƍ` F$vNyad@}m }w^-/m z<}%s7CXWM->Y°t`Qә")Mi?F$@3ɌHK8ۙe/o}'U}f@bvyE/G9#sh [1/"ZXZըfZ#0b8k,ބ] xy Z4M0#w;(5!ʬDx@l7&vy ;H)Okള0i1?tr`d]v%b :j8mC88IolW;6kϬESSEÜq8RmcWYX%YWk:.beRB톖 $T`Vc XэGbCڞp` Z?My֬Ӕ ήcF8&%8 7V`Jm^ݍq7EWeRN)zie#EHk#BEPѰ#`A,SphStq"A%}g0fDzd#Q%uejCzRt s \oN)F{b:P3,!gdbKU z=uAYk;֌˫rYaϤpg?0jCoEAV_] 2H7HhIv>kj!f.6Q8/ٙ_۵Ȟ.QXQ݈B~$“tO$ɤ6F#YOG [BkFE6hM \\ I,&.(dh&1oYYG;&kŕ{%e (w䳨A񍁺]y 7x5R`8tMc3ݤPK! ѐ'theme/theme/_rels/themeManager.xml.relsM 0wooӺ&݈Э5 6?$Q ,.aic21h:qm@RN;d`o7gK(M&$R(.1r'JЊT8V"AȻHu}|$b{P8g/]QAsم(#L[PK-!pO[Content_Types].xmlPK-!֧6 -_rels/.relsPK-!kytheme/theme/themeManager.xmlPK-!{֩theme/theme/theme1.xmlPK-! ѐ' theme/theme/_rels/themeManager.xml.relsPK] ` Y[^  dMbP?_*+%M com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMHorizontalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMHorizontalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMOrientation com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMOrientation 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.subTicket.paper_info_ticket PMPPDPaperCodeName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDPaperCodeName Letter com.apple.print.ticket.stateFlag 0 PMPPDTranslationStringPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDTranslationStringPaperName US Letter com.apple.print.ticket.stateFlag 0 PMTiogaPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMTiogaPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPageRect 0 0 3058.3333333333335 2400 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPaperRect -75 -75 3225.0000000000005 2475 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPageRect 0 0 734 576 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPaperRect -18 -18 774 594 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.ppd.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.ppd.PMPaperName Letter com.apple.print.ticket.stateFlag 0 com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PaperInfoTicket com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PageFormatTicket Mz,, ` g(HH(dh "d??U  @ @ @~ ????4? L?? L?@ L?@? @?@?"@?;@?@?0@?P@?@?9@?@_@?@?B@?k@?@?H@?pu@*zt]]]]  >@@ 7ggD ` }  dMbP?_*+%M com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMHorizontalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMHorizontalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMOrientation com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMOrientation 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.subTicket.paper_info_ticket PMPPDPaperCodeName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDPaperCodeName Letter com.apple.print.ticket.stateFlag 0 PMPPDTranslationStringPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDTranslationStringPaperName US Letter com.apple.print.ticket.stateFlag 0 PMTiogaPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMTiogaPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPageRect 0 0 3058.3333333333335 2400 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPaperRect -75 -75 3225.0000000000005 2475 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPageRect 0 0 734 576 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPaperRect -18 -18 774 594 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.ppd.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.ppd.PMPaperName Letter com.apple.print.ticket.stateFlag 0 com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PaperInfoTicket com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PageFormatTicket Mz,, ` g(HH(dh "d??U  > > > > >  ~ >? A A A A  >@>? A A C>>AAC  >@>@>? A B  >@>@>@>? dFTNF>@@   7ggD `  p  dMbP?_*+%M com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMHorizontalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMHorizontalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMOrientation com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMOrientation 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.subTicket.paper_info_ticket PMPPDPaperCodeName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDPaperCodeName Letter com.apple.print.ticket.stateFlag 0 PMPPDTranslationStringPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDTranslationStringPaperName US Letter com.apple.print.ticket.stateFlag 0 PMTiogaPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMTiogaPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPageRect 0 0 3058.3333333333335 2400 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPaperRect -75 -75 3225.0000000000005 2475 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPageRect 0 0 734 576 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPaperRect -18 -18 774 594 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.ppd.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.ppd.PMPaperName Letter com.apple.print.ticket.stateFlag 0 com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PaperInfoTicket com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PageFormatTicket Mz,, ` g(HH(dh "d??U } mG} m  ?D@E[[??X?DAF?X  GG4[@HG4[@D@D 5@E[[?_? LA#$L Bm Gh<"5@Hh<"5@  L@D`f@EDDDDDD??" T A G՜Zdf@H՜Zdf@@D @E[[??X?ԕ? DGxl%@Hxl%@@D@Eqq?Lt?w4 ?  L GcǦ@HcǦ@@D@@EDDDDDD? ?:Fq? G9"H@H9"H@@D,@EOO?l?ځ-? Gm},@Hm},@ @D^@E[[?_? G7؊^@H7؊^@"@D @Egfffff?/?  G~4,@H~4,@ $@D@ Eqq? ?   G .@ H .@T   >@@ 7ggD `   dMbP?_*+%M com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.FormattingPrinter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMHorizontalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMHorizontalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMOrientation com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMOrientation 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalRes com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalRes 300 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMVerticalScaling com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMVerticalScaling 1 com.apple.print.ticket.stateFlag 0 com.apple.print.subTicket.paper_info_ticket PMPPDPaperCodeName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDPaperCodeName Letter com.apple.print.ticket.stateFlag 0 PMPPDTranslationStringPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMPPDTranslationStringPaperName US Letter com.apple.print.ticket.stateFlag 0 PMTiogaPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray PMTiogaPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPageRect 0 0 3058.3333333333335 2400 com.apple.print.ticket.stateFlag 0 com.apple.print.PageFormat.PMAdjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PageFormat.PMAdjustedPaperRect -75 -75 3225.0000000000005 2475 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMCustomPaper com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMPaperName na-letter com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPageRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPageRect 0 0 734 576 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.PMUnadjustedPaperRect com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.PMUnadjustedPaperRect -18 -18 774 594 com.apple.print.ticket.stateFlag 0 com.apple.print.PaperInfo.ppd.PMPaperName com.apple.print.ticket.creator com.apple.jobticket com.apple.print.ticket.itemArray com.apple.print.PaperInfo.ppd.PMPaperName Letter com.apple.print.ticket.stateFlag 0 com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PaperInfoTicket com.apple.print.ticket.APIVersion 00.20 com.apple.print.ticket.type com.apple.print.PageFormatTicket Mz,, ` g(HH(dh "d??U    > > > > >  ~ >? A A A A  >@>? A A C  >@>@>? A B  >@>@>@>? xFTNF>@@ 7ggD  ՜.+,0, PXx  'University of Rochester  Sheet First Sheet SecondSheet with a very long name!Sheet with initial text  Worksheets F$Microsoft Excel 97 - 2004 Worksheet8FIBExcel.Sheet.8 Oh+'0HHPh 'Gregory WarnesGregory WarnesMicrosoft Macintosh Excel@UΛ@ѧ~G|PICTtJ HHJ JHHz[JJEk{o{g9wo{wo{wg9swo{wg9g9wkZwg9kZwkZwPo{sZg9JRs{sNsZ{s^g9wo{{V^g9o{VkZo{swZRNsg9s{sVR{sZso{{ZkZco{XkZo{wZg9Zs{sJRV{swJRRo{{F1Ns{o{*kZ^ZZs{{so{o{)g9{sw{ws{o{{cXFcx{wwRwR{ Nsso{RscVg9co{{ Zw{R^s^wcVwcV^RsRsVZs^kZ^g9ZwZ^W5.{kZwRwR{ RVRV^wRo{g9{cs^RRwRs^o{ZRg9g9Z^F1JRRcNs^csg9NssBww^{wVw{ VkZcVo{c{Zo{g9c{ cg9kZZg9sVwkZZwo{Rcg9ZwZsZcsVo{^o{cg9g9Zwo{,g9kZwsswsswo{wo{gyowwkZ^ww{ wwww{{{{{{w{{{{{{{{W5[vwwZswwRZ{{RRkZcRZkZ^JRZZNs{ VJRw{BNs^BwVJRsZF1RF1wBF1JR^9^Ns^F1JR^BW5Cww^w^o{{ cg9V^o{V^wRo{^{ kZ^ss^RwRwRs^o{Z^JRo{VRRRRRF1JRwRswcc{wsRo{{wRccsRg9c{Zo{Rg9{ Nsg9kZZg9Z^wkZZwo{Rcs^VwZckZ{wZsZcZ^w+kZo{kZ{sw{ws{s{s{wsRww{^c{ ^^sg9^cscVg9cZ{ ^Z{R^kZNswcVwcV^^wVwo{ZZRsZ^o{ZZRwwwJR{w^^{ cZV^^VZwRsNs{ o{Nso{o{^R{BwRs^sJR^F1VJRZVRJR9ZsVRJR=wwkZw{^o{w{{R{{{NsZ{{RZg9kZRwR{ wNso{{VNswNswRg9g9swNswZVkZNsR=NsVwR{R=JRwwo{g9wwR{{{RkZcwRo{cV{ZkZ{ Vo{o{^sccwwV{sZkZkZZZ{so{^ssg9kZw+kZo{kZ{sw{ws{s{s{w{JRw^o{{ g9g9so{^o{scVg9sZ{ o{Z{R^{Nsw RZRwRkZco{Zw^g9sVo{wo{ZkZwVJRswg9F1Z{kZJRNsVg9F1ZV^wNsF1F1{ {F1Bss^RJR={wRR^Bg9V{ZNs^ckZVF1JRZkZwRRwo{wws{ ccwkZc{Zo{kZ{kZkZZg9kZwZckZswZso{Zs^kZ^kZc^o{ZwwwZskZ+kZo{kZ{sw{ws{s{s{wcc{wwNso{{ JRkZsg9Nso{scVg9Rg9{ Nsg9{R^Z^wcVwsVg9^sVkZ^Zg9RwZg9^ZZRwo{ZkZwo{Nsww{F1c{F1^V{F1cVZwR{ NsRo{o{^RZF1wRs^RRF1F1RwR{RJRRkZo{NsJR9wRRw{^o{w{{R{{{NsZ{{RZg9kZRwR{ wNso{{VNswR{wRg9g9o{R9NscRg9VF1{VkZwwRwNsNsVwwZsVo{wsg9w{R{{ RkZc{Ro{cV{ZkZ{ Vo{o{^sg9cwwV{sswZV{V^cVsR{Vo{o{^wV{o{+kZo{kZ{sw{ws{s{s{sw^g9{w{Vw{ VsskZVwscVg9^kZ{ ZkZ{R^ccwcsRwo{ZkZywJR^kZw^ZZ{kZRVV^ZZV^wRBR{BNsss^RF1Ns{wVVNsJR={wRRwwo{c{w{ZkZ{ Z^c{Zcc{Zo{^c{^ckZZg9g9ZwkZZg9kZwwZskZ(g9kZwsswsswo{wo{wwo{^o{w{{ {w{w{{{{ {{{{w{{{{w{w{ww^kZwsRJR{wRF1kZcRJRkZ^JRZV={ R=w{BNs^9{wVJRsBZ{5F1s=^w9sR^JRg9w9wg9Ns^wZwZw{ co{VZwV^wRw^{ o{css^RRwRs^cNsNs=^=g9^RJR={c^BNsJR={wRRwsswg9{ g9wcg9wc{Zo{g9{ g9kZZg9kZ{wkZZwo{c^kZsw^ckZsZs^ckZwwZskZ+kZo{kZ{sw{ws{s{s{wcVswsZc{wV^s^ZcscVg9VZ{ VZ{R^ZRwcVwRc^VsZkZZw{NsRZkZ{NswwcBwws9Z{{9RVs9ZVZwRBJR{ =JRo{o{^RJR9wRs^RNsR9{co{csRRR5NsRRwwRo{cwZsNs{g9g9NsZZsNsZg9kZRNsR{ RNso{{VNsNsVwwRg9g9swNsNsccRcsZ5{RNsRZ5{wwwcwVw{ Zg9cVkZcV{g9kZ{ ckZo{^so{^wwV{sZkZsZwVo{^cs{Vsg9kZsw/kZo{kZ{sw{ws{s{gYFcx{wZ^o{wo{Zg9{wVcs^Zg9scVg9VZ{ VZ{R^ZRwcVw^g9sZw^^o{ZZ^Rwo{ZZ^W5.{wg9Zcwo{ZNs{sVJRVo{ZNsV^wRRF1{ RF1ss^RZB{wRs^BJRRZsF1Bg9NsZB^F1Bg9NsswscwZw{ ^kZcZo{c{Zo{g9kZ{ ckZkZZg9o{^wkZZwo{^^sVkZkZg9ZswkZg9Zw(g9kZwsswsswo{wo{w| wo{s{^{w{{{w {{{{{{w{ {{{{{w wZkZZkZZwwF1wF1R{Vg9RF1{Vo{Nso{JRZcw=F1R{ sJRo{JRZs=JRJRwVJRsNsg9NsRkZJR^F1JRJRRBsJRsw9w wg9kZRRwRg9kZR{o{g9Rg9kZZR^wRVRRV{ R^wRsRRRwRs^BJRkZNsRwRF1JRg9NsZBg9csJR={w wg9g9wcww{VwZkZ{g9cg9ZZ{sV{Z^g9VwZg9{ wVwZo{kZVw^^wkZZwo{^^kZZwVsZcg9Zswcg9kZw+kZo{kZ{sw{ws{s{s{0w^kZ{Nsw{{ww0wg9kZRw{{ww0wcg9Nsw{{ww2 wkZkZ{g9sw{{ww+kZo{kZ{sw{ws{s{s{2 w^kZsZkZw{{ww2 wg9kZ{Z{w{{ww2 wg9g9kZ^sw{{ww+kZo{kZ{sw{ws{s{s{2 w^kZwZkZw{{ww2 wg9kZ{Nssw{{ww2 wcg9{o{^w{{ww0wkZkZwcw{{ww+kZo{kZ{sw{ws{s{s{2 w^kZRo{w{{ww2 wg9kZ^Rcw{{ww2 wg9g9www{{ww(g9kZwsswsswo{wo{w2 wo{sw^sw{{ww2 wZkZkZg9{w{{ww2 wg9kZ{^kZw{{wwCompObjbgdata/inst/xls/wide.xls0000644000175100001440000007600013003720417014613 0ustar hornikusersࡱ> 1<  !"#$%&'()*+,-./0456789:;R F6&c2Workbook1\SummaryInformation(3(DocumentSummaryInformation8 `\pGregory Warnes Ba==d*8@"1Calibri1Calibri1Calibri1Calibri1Calibri1 Calibri1Calibri1h8Cambria1,8Calibri18Calibri18Calibri1Calibri1Calibri1<Calibri1>Calibri1?Calibri14Calibri14Calibri1 Calibri1 Calibri1Calibri1Calibri1 Calibri"$"#,##0_);\("$"#,##0\)!"$"#,##0_);[Red]\("$"#,##0\)""$"#,##0.00_);\("$"#,##0.00\)'""$"#,##0.00_);[Red]\("$"#,##0.00\)7*2_("$"* #,##0_);_("$"* \(#,##0\);_("$"* "-"_);_(@_).))_(* #,##0_);_(* \(#,##0\);_(* "-"_);_(@_)?,:_("$"* #,##0.00_);_("$"* \(#,##0.00\);_("$"* "-"??_);_(@_)6+1_(* #,##0.00_);_(* \(#,##0.00\);_(* "-"??_);_(@_)                                                                       ff + ) , *     P  P        `            a>  ||>}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-} 00\);_(*}-}  00\);_(*}-}  00\);_(*}-}  00\);_(*}-}  00\);_(*}-}  00\);_(*}-} 00\);_(*}-} 00\);_(*}-}+ 00\);_(*}-}, 00\);_(*}-}- 00\);_(*}-}. 00\);_(*}-}: 00\);_(*}-}; 00\);_(*}A}1 00\);_(*;_(@_) }A}2 00\);_(*?;_(@_) }A}3 00\);_(*23;_(@_) }-}4 00\);_(*}A}0 a00\);_(*;_(@_) }A}( 00\);_(*;_(@_) }A}7 e00\);_(*;_(@_) }}5 ??v00\);_(*̙;_(@_)   vd{ -#~4}}9 ???00\);_(*;_(@_) ??? ??? ???vd{ ???-#~4}}) }00\);_(*;_(@_)   vd{ -#~4}A}6 }00\);_(*;_(@_) }}* 00\);_(*;_(@_) ??? ??? ???vd{ ???-#~4}-}= 00\);_(*}}8 00\);_(*;_(@_)   vd{ -#~4}-}/ 00\);_(*}U}< 00\);_(*;_(@_)  }A}" 00\);_(*;_(@_) }A} 00\);_(*ef;_(@_) }A} 00\);_(*L;_(@_) }A} 00\);_(*23;_(@_) }A}# 00\);_(*;_(@_) }A} 00\);_(*ef;_(@_) }A} 00\);_(*L;_(@_) }A} 00\);_(*23;_(@_) }A}$ 00\);_(*;_(@_) }A} 00\);_(*ef;_(@_) }A} 00\);_(*L;_(@_) }A} 00\);_(*23;_(@_) }A}% 00\);_(*;_(@_) }A} 00\);_(*ef;_(@_) }A} 00\);_(*L;_(@_) }A} 00\);_(*23;_(@_) }A}& 00\);_(*;_(@_) }A} 00\);_(*ef;_(@_) }A} 00\);_(*L;_(@_) }A}  00\);_(*23;_(@_) }A}' 00\);_(* ;_(@_) }A} 00\);_(*ef ;_(@_) }A} 00\);_(*L ;_(@_) }A}! 00\);_(*23 ;_(@_)  20% - Accent1M 20% - Accent1 ef % 20% - Accent2M" 20% - Accent2 ef % 20% - Accent3M& 20% - Accent3 ef % 20% - Accent4M* 20% - Accent4 ef % 20% - Accent5M. 20% - Accent5 ef % 20% - Accent6M2 20% - Accent6  ef % 40% - Accent1M 40% - Accent1 L % 40% - Accent2M# 40% - Accent2 L渷 % 40% - Accent3M' 40% - Accent3 L % 40% - Accent4M+ 40% - Accent4 L % 40% - Accent5M/ 40% - Accent5 L % 40% - Accent6M3 40% - Accent6  Lմ % 60% - Accent1M 60% - Accent1 23 % 60% - Accent2M$ 60% - Accent2 23ږ % 60% - Accent3M( 60% - Accent3 23כ % 60% - Accent4M, 60% - Accent4 23 % 60% - Accent5M0 60% - Accent5 23 %! 60% - Accent6M4 60% - Accent6  23 % "Accent1AAccent1 O % #Accent2A!Accent2 PM % $Accent3A%Accent3 Y % %Accent4A)Accent4 d % &Accent5A-Accent5 K % 'Accent6A1Accent6  F %(Bad9Bad  %) Calculation Calculation  }% * Check Cell Check Cell  %????????? ???+ Comma,( Comma [0]-&Currency.. Currency [0]/Explanatory TextG5Explanatory Text % 0Good;Good  a%1 Heading 1G Heading 1 I}%O2 Heading 2G Heading 2 I}%?3 Heading 3G Heading 3 I}%234 Heading 49 Heading 4 I}% 5InputuInput ̙ ??v% 6 Linked CellK Linked Cell }% 7NeutralANeutral  e%3Normal % 8Noteb Note   9OutputwOutput  ???%????????? ???:$Percent ;Title1Title I}% <TotalMTotal %OO= Warning Text? Warning Text %XTableStyleMedium9PivotStyleMedium48dq:Fc-2NWgFSWc-2NWgFSW̙̙3f3fff3f3f33333f33333\`;Sheet1" PK!pO[Content_Types].xmlj0Eжr(΢]yl#!MB;.n̨̽\A1&ҫ QWKvUbOX#&1`RT9<l#$>r `С-;c=1gMԯNDJ++2a,/$nECA6٥D-ʵ? dXiJF8,nx (MKoP(\HbWϿ})zg'8yV#x'˯?oOz3?^?O?~B,z_=yǿ~xPiL$M>7Ck9I#L nꎊ)f>\<|HL|3.ŅzI2O.&e>Ƈ8qBۙ5toG1sD1IB? }J^wi(#SKID ݠ1eBp{8yC]$f94^c>Y[XE>#{Sq c8 >;-&~ ..R(zy s^Fvԇ$*cߓqrB3' }'g7t4Kf"߇ފAV_] 2H7Hk;hIf;ZX_Fڲe}NM;SIvưõ[H5Dt(?]oQ|fNL{d׀O&kNa4%d8?L_H-Ak1h fx-jWBxlB -6j>},khxd׺rXg([x?eޓϲكkS1'|^=aѱnRvPK! ѐ'theme/theme/_rels/themeManager.xml.relsM 0wooӺ&݈Э5 6?$Q ,.aic21h:qm@RN;d`o7gK(M&$R(.1r'JЊT8V"AȻHu}|$b{P8g/]QAsم(#L[PK-!pO[Content_Types].xmlPK-!֧6 -_rels/.relsPK-!kytheme/theme/themeManager.xmlPK-!0ktheme/theme/theme1.xmlPK-! ѐ' theme/theme/_rels/themeManager.xml.relsPK] ` O<[  dMbP?_*+%,"d??U ,~ ?@ D@A@ L@@@@ @"@  $@  &@  (@  *@  ,@.@0@1@2@3@4@5@6@7@8@9@:@;@<@=@>@?@@@  @@!!A@""A@##B@$$B@%%C@&&C@''D@((D@))E@**E@++F@,,F@--G@..G@//H@00H@11I@22I@33J@44J@55K@66K@77L@88L@99M@::M@;;N@<<N@==O@>>O@??P@@@@P@AAP@BBP@CBB@ LCQ@DBD@Q@EBEQ@FBFQ@GBGR@HBH@R@IBIR@JBJR@KBKS@LBL@S@MBMS@NBNS@OBOT@PBP@T@QBQT@RBRT@SBSU@TBT@U@UBUU@VBVU@WBWV@XBX@V@YBYV@ZBZV@[B[W@\B\@W@]B]W@^B^W@_B_X@`B`@X@aBaX@bBbX@cBcY@dBd@Y@eBeY@fBfY@gBgZ@hBh@Z@iBiZ@jBjZ@kBk[@lBl@[@mBm[@nBn[@oBo\@pBp@\@qBq\@rBr\@sBs]@tBt@]@uBu]@vBv]@wBw^@xBx@^@yBy^@zBz^@{B{_@|B|@_@}B}_@~B~_@B`@B `@B@`@B``@@ L`@`@`@`@a@ a@@a@`a@a@a@a@a@b@ b@@b@`b@b@b@b@b@c@ c@@c@`c@c@c@c@c@d@ d@@d@`d@d@d@d@d@e@ e@@e@`e@e@e@e@e@f@ f@@f@`f@f@f@f@f@g@ g@@g@`g@g@g@g@g@h@ h@@h@`h@@ Lh@h@h@h@i@ i@@i@`i@i@i@i@i@j@ j@@j@`j@j@j@j@j@k@ k@@k@`k@k@k@k@k@l@ l@@l@`l@l@l@l@l@m@ m@@m@`m@m@m@m@m@n@ n@@n@`n@n@n@n@n@o@ o@@o@`o@o@o@o@o@p@c>@@ggD  ՜.+,0 PX  ' Gregory R. Warnes Consulting Sheet1  Worksheets F$Microsoft Excel 97 - 2004 Worksheet8FIBExcel.Sheet.8 Oh+'0HPh 'Gregory WarnesGregory WarnesMicrosoft Macintosh Excel@b@0bG*PICT"K HHK KHH}:KK[o{o|g9{wkZ{kZ{scs{wo{{wcg9{kZ{wcg9{kZ{o{cs{kZ]o{swg9g9cNso{kZRc{s^o{o{{Zcco{Vg9o{akZo{wkZg9NsNskZo{kZcV{sZo{o{{Zg9co{Vwo{gkZo{skZc{o{{g9o{sRkZ{s{RZo{{RVo{cVso{-kZZVco{{so{o{o{(kZo{{s{ws{s{s{sIwsZwRwwg9R{sNs{wZkZwRo{wGwRwco{wJR{w5wwNscwNs^wIwNsw^o{wVkZ{w^kZwkZF1Rw{JRwKwsg9{wZcwcV{o{Zw{wwVww(kZo{{s{ww{s{s{s1wo{g9ww{www1wV{ww{www3wRZ{ww{www)www{www(g9kZwo{wsswswo{wo{)s{ww{www1wsNsww{www3wwR{ww{www1wo{g9ww{www(kZo{{s{ww{s{s{s/wwww{www1w{Bww{www1wVRww{www3wo{R{ww{www+s{ww{{www&kZo{wswswswsws3wkZV{ww{www1w^www{www3w{Rwww{www1wskZww{www(kZo{{s{ww{s{s{s3wkZg9{ww{www1wJRZww{www3wNsg9kZww{www3wg9V{ww{www)o{ws{s{w{w{s{s5s{so{w{sw{{www3wwVsww{www1wscww{www1ws{ww{www+s{sw{wwww&kZo{wswswswsws3wg9^{ww{www3wZJRwww{www3wRVsww{www)www{www(g9kZwo{wsswswo{wo{1wskZww{www3wRckZww{www3wo{Nssww{www1w{{ww{www(g9kZwo{wsswswo{wo{5ww{kZww{www7 wNssZcg9ww{www7 wcsRwZww{www7 wckZscwww{www(kZo{{s{ww{s{s{s5wg9{{g9ww{www5w^sRww{www5wcsRww{www7 wNs^wF1sww{www)www{www(kZo{{s{ww{s{s{s7 wRskZRsww{www7 wcs^sww{www5wcso{cww{www7 wg9o{kZcwww{www(kZo{{s{ww{s{s{s7 wcwwc{ww{www7 wcs{Rwww{www7 w^o{{g9g9ww{www5wg9kZscww{www+s{sw{wwwwCompObjbgdata/inst/xls/wide.xlsx0000644000175100001440000051446113003720417015013 0ustar hornikusersPK!0i[Content_Types].xml (N0 HC+jq@avR ?7c;L.Zsjg2m rdb2U6%X,(l%P Moo&˝(b([0 RvH^ȕhߏF\: OvlcCNwQ.]F`=&Ȍ?U{-[-jM/O1OfMI PK!4Mxl/workbook.xmlRێ0}N Q\HUekL MaU;@n՗27|fΙ)G7ἴ*Hnki.%m0򁙚)kD_7`+K܆m 턁JcfBw!sվ"hEhD3iuaFrq„ [y\TqaX}dFpeu3 J{KUn7|kaWbj?Hzpyb`Ov9$Y{/ nJiX@ȫVr?O'ɞn+q:n8SNf1È[{@=T~cCUE%~1;#.;Qi'{Jӌ-\l\KMXqɲl8N|3fB۝N]@s:Y@dvOPK !dKdKdocProps/thumbnail.jpegJFIFHHICC_PROFILEappl mntrRGB XYZ   acspAPPLappl-appl descodscmxlcprt8wtptrXYZ0gXYZDbXYZXrTRClchad|,bTRClgTRCldescGeneric RGB ProfileGeneric RGB Profilemluc skSK(xhrHR(caES$ptBR&ukUA*frFU(Vaeobecn RGB profilGeneri ki RGB profilPerfil RGB genricPerfil RGB Genrico030;L=89 ?@>D09; RGBProfil gnrique RVBu( RGB r_icϏProfilo RGB genericoGenerisk RGB-profil| RGB \ |Obecn RGB profil RGB Allgemeines RGB-Profilltalnos RGB profilfn RGB cϏeNN, RGB 000000Profil RGB generic  RGBPerfil RGB genricoAlgemeen RGB-profielB#D%L RGB 1H'DGenel RGB ProfiliYleinen RGB-profiiliUniwersalny profil RGB1I89 ?@>D8;L RGBEDA *91JA RGB 'D9'EGeneric RGB ProfileGenerel RGB-beskrivelsetextCopyright 2007 Apple Inc., all rights reserved.XYZ RXYZ tM=XYZ Zus4XYZ (6curvsf32 B&ltExifMM*>F(iNHHCC }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?5SGOYtu;9tM`U:[|?Тյ7Se?׃?S x3 /<)ozeς:}6]O 'Y h p5ܰ%T>J@ W;5Cs|V CoFg8|ƶڞkۅݸ('Ə i>&xVjg< 1P¶^?&1Io]b/!S x3 /?׃?<?z95/xZK?W,.N|cZ018`LG (ƪ oS x3 /c0~Ox%x7 W$5}WE:]uiu [y ʹ]mfDvBq}N#O 7Ꮜ+Z#ZE񅗊~燼1;]'ºt&=Cçſ:vS x3 / T#~ #3"?ր<:o :y7~纻{yFgGgbI&=/¯'^ wKj'ogo >4kV[Jk=oHԴvϩ^YYGn€=7z_|#;\^C=$_ <)?jml]5|?7x_ W;5x/K^:މ |sƗjZfg/o-ID u)]X~0!(6Ml+im%߇x 2{AGrX/O<9=W_}CKVxV#\x@>'ĶY v/%of]ᯅ<?~VԴ:W~>6itȒ,l[-wI綶xg??{s%'#>7 s9'^\wI@# 3JOGu[݌g7Ǧ3j_G5xgW5z5X]^\,]y@o◆S+ ]_F55 ɬhv{o+; Mah2tXxG5xg??Z<5ml\~$?qm[hznMݪ\x*,=/}{ےfݐ\H򘐈ݲ$H¨׿x?]RPCƹ x{\Q}l 7~_ؔǍMsj_|[cy3Ξ-.BuHeyaK}#L) ox}_!Ws@Gm\xg4kEx~M_KS]5k;Inq}{Y>c5Յw1Z΁` Ÿ > 7ExSÝg*h;git Zm.DK}*ɥKϱl'd}xg4ox}_!Ws@Gm\~iw ;]1ŬX˭iz5 {Yu ]miubLx:iMx]MѼMZׅ'5Ӵ?5k+OT1v=ԓ:[[#(ox}_!Ws@Gm\xg4 D k_&=n[|xGFJ/i>-kUlu\ZwU rj=F @=sֻ#n0oxئ3M]Ixf\Fe {?#6.h 3˚?CB>o Џ?-ᗏZow$>pBP b@@ | ^wn|#Osuu"{mI[7YdbY䑙݉f$h#.q_xX'q7^G73h)ǩjYҴoMq5;-7R֯S6gN@>3V\o<5'дY^]i GhGd$dF#*￱ikƨ? HV6jm#Zo?ikƨtNt4~"rb\kX`P?ikƨ? HV6jm#Zo?ikƨ4(tݝ5~=KɡIrV3@Eʼn|yϏPд3u~rv&jZ[j. _i:36hϤMEMq`Ah_m!xCUoLA<ҦkMēEfbY{aL*H'QPR :?I@wiJ?eCNP/t%&撀ӣ)74i-IS](|+YF _!0@K;_5Be܀WwiJ?eCNP/t%&撀ӣ)74gx IOIZ|W%ҖQ|ed6w^ SAْA!!LgwiJ?eCNP/t%&撀ӣ)74gx IOZ0Jxde27BYG$p]bdY/t%&撀ӣ)74gx I;_4JM%5|H<)08$}VwZQBњ8ӭʣK ~aQ-7c;8?wiJ?eCNP/t%&撀ӣ)74|UZ|4xK_ k<xj[y&iIο8rS ݱ|6EfKTtfXt]YO2 0 \"[[k19 US WyB?@%w)x!WyB?@%w)x!WyB?@ljte"M^cռ2E !@4'G%w)x!WyB?@%w)x!WyB?@%w)x!^&__EDX=/2/.0yoS@W%w)x!WyB?@%w)x!WyB?@%w)x!xGķP x_2_&&;XC.t&d2zW ] ^%HХ_ ] ^%HХ__W?/*F~ @dn7)8K?+KĿ K?+KĿ  ⟉.~ݼ3 ezmq=BCi+ X;J'Xb[)uM:9cwH侶IL (ee9 3@,[;9xHvk!<x\j<;3k7Akh~2?oxhn#?j\Jf*w+弌x[ !!A` g?v/*9+ON|%0O2u 38@olP?T}~_Umo@lP!^Z]"na,swmm>g?v/*ݷmeݠsun3[ ܂9vmo@lP?T}~_U2K?.Oo/-WxoE[-AzU?T}~_Umo@lP\onmٛ DђIn0 I&<:ßj Zt;۝"[> gVMK,ĖbI4o읯|N_~6f;wZ)o_ 3ö>"-:-r]_.4w:ͯb4jK׃o4OgLĒM=k9 FC NFѺ4xWnD<+B_7@"wF} л]ѿ_kG4Fž!Ƒ5 *9_/dU ;@:D<+B_7@"wF} л]ѿ_kC¿./t>x64qE bd.Tyxԁ? 7 xWnD<+B_7@"wF} л% j[]lˠ2obBV8cQUQ:D<+B_7@"wF} л]ѿ_kxC¡;}??΀9xS3x7’ˠiK/ti$-NfwcYfcĒI&:D<+B_7@"wF} л]ѿ_k+_v }qoiPOa+t)Ndc +# |6/[߇ -KƋ=oc৷eEecF̓ı_ xT/;DDnG|O|g??q?f{PφC>n9[ }&/KnG|O|g W,qxwt%gx@w?wH \C>nG|O|g??q?f{PφC>n9/WZDxqmoc"5tVZ"@[C 7@#ڇ>'|3j83t=?_G?cxgn9W;²XExsˈ:܈<=$|H7;6I?q?f{PφC>nG|O|g>+hw|3\~םDm1J` q4Rm'ly?kxi?x_D~&N4#M &dxbg4hIt/ BV_6_b{[Foi֐x-g[8duj$܅@=Bĭ()gCҭ-5(ͼREܖa7QL`eUpSOUZw-hOUZw-hOUZw-hOUZw-hOUZw-hm?>m?>m?>m?d6}Q~< sn<n xkD2Ǒn7_QUoGPoGPoGPoGP\og?*I.8vI=??~~[DIt%g<ǫ14b?׋On(4$= EΙ;|Y~!u[V_^ 5]{O񥞗i(uK=&XX^~|!eo/|?M9$y-eψ<6d##uQ@g?,@!xS_Oi Pg?,@DŽ|0:C<)@ /c ? 4(3Ÿ? KX€C<)@ /c / kt=5x1)kXXn4}@G` KX€C<)@ /c ? 4(3Ÿ? r4 /!XǤ}p}O|1?+4$wFI2[;dI'I' 4(3Ÿ? KX€C<)@ /c կ?t3-zXfHM:D`2+Ac/w>$͡hC^յҬ4xΝ˫jUYo6yfVc |El.|9E"~ |?/ |G.oH yn5}Vc,oMov:%׾*ZI?hkCt ן$# )FT_Zg@?p?"??WC&E4\i^'IC|-Y4kxd<{qY|@:E4c-38iM+Lh_Zg@xŪ,+"E2_M8a||{9=(_Zg@?p?"??WC&E4GJ;^WA|YaYJI\1z(!G:E4c-38iM+Lhx+OHtx+<x/J<MMo-nV= 6bYh_Zg@?p?"??WC&8_bk.|Iw|C}:K$m%&i.JŭZvZU?G.>/OZ$xks&&? xX?k G#NeXqJ_iO%|E?2ğJeϗ?+hK/+/V >^$W_Vźi7C|+ RϫeX2T1Ef "2ğJeϗ?+hK/+/V >^$W_-/|I[@1|WM Bξ/gx̌iHPdl6:L'"miO%|E?2ğJeϗ?+hK/+/V'A<@YG~P]f1N`q$LDQX@g&Z_x _L'"miO%|E?2ğJQ&JW]H? k^-`wa{?ú21 2WN̒!Uљa -/|I[@&Z_x _L'"miO%|E>*Nih5 k#OvgӮgOHaWTAf@>&KZ ׯu1kj^H oTY@93M3~|c|+MDȺ>e_ |co H/~&xckϤZO[NQEp?lsW (9+wq9 @mP@q?2_E{@P@q Zkۗ|( d(rJ6;H4x?h|:m (:$!e4m)|p|?.iijZ~Qg3}Նbx>V,|C؛XV߃8J:Ň_cj_".x;~־5x~(m?V:MDk\}]\xko_ xwO&X[ͷET0Wp;Q?j?@ >YG ?]OgKZ5ҭƘ|&en$zQmYd-O >YG ?]OgK ,vt?.9M+xIYG ?]rG.-uM,c_ O@mf8ڠrrrI$:@|'@ڏ%;Q?j?@ >YG/ sxVݝ.71' tOgK ,vt?.@|'@ڏ%S^k˻m8qm=zhde=:ᑶ#m`VSс=y>k >Z^~x}{m&+,*>% յbty8TRkX~̾7?fX>[|to 7čV\oEC⯷xZ5+++kXSIof5Vξp\Zk]s) KŖwI{׈RL6KQY"HкΫ }_4}_4}_4}_4}_4AOMuO V3+G vRA*ȭV@:?:?:?:?:?OIƟx&q0þs6l ߗIT 0Y[Ok[Ok[Ok[Ok[Ok'x~xO^7z7uǘ/n#RѰ  j?eޯS_m5ă@WJ>iZcqLoi+m^D`OWwkگM~<5;OZ~$߯K-;}#Ǒ_&^FЭMԯNDJ++2a,/$nECA6٥D-ʵ? dXiJF8,nx (MKoP(\HbWϿ})zg'8yV#x'˯?oOz3?^?O?~B,z_=yǿ~xPiL$M>7Ck9I#L nꎊ)f>\<|HL|3.ŅzI2O.&e>Ƈ8qBۙ5toG1sD1IB? }J^wi(#SKID ݠ1eBp{8yC]$f94^c>Y[XE>#{Sq c8 >;-&~ ..R(zy s^Fvԇ$*cߓqrB3' }'g7t4Kf"߇ފAV_] 2H7Hk;hIf;ZX_Fڲe}NM;SIvưõ[H5Dt(?]oQ|fNL{d׀O&kNa4%d8?L_H-Ak1h fx-jWBxlB -6j>},khxd׺rXg([x?eޓϲكkS1'|^=aѱnRvPK!3 xl/worksheets/sheet1.xmlY[G߯4^aa/I˃*hM(D@ Gt.gO?_>O_m?޾y_>n7x|_~~o??|Ǜ~~ݻ{~+~Ï~|{ECݗ~{}o믟>t~痗\_?>j_>o^ˇo?ʗۧϟ~o|4_}?׾z/>~?|w(Wz/?ͷ_~*O~.CO/~痏?^~)z?&/|CoRٓ}vXN=Es6>[ߗo}I7ѿ8ot~o|~˧Fwu+{ݗKοLJ?}7oSY{/J|_ݟo_} \ޔoܩ<t)T_)*)u6:f}f[xzΎ_0XXW 9u`; -ao+w \ܹ08xt;  |, Tt*xPwQ \L.j`crTK\.䒩V&M5҅S tTC/]0aÈS 3O1 >0eØS O1>Ś}a)ւgSjߧX վO<}b-xV>ZkSk>0^{DW}=>gbOGp٧#S\})>{W}aS >k>O1 bal _z̠lnB=&um2^&umeՕ0jS sO1̡>0iS O1- |aqiS KCbX]Ч>Ű4)Ч>Ű4) O10 }aiS KLCbbЧ>0q&ЧV>Ű4)eO1l3 }aiS LU< >0q2.[}O}|W1VnU̡Fy8|ڎlqءFy8PŰ4)ͧK&Ncb5l>]հ4v)VإX Ocb5l>]հ4v)VإX Ocb5l>]0qaiRͧK6&.j|aiRͧK6&.j|aiRĥX OU<տ~0q0.[}O}oߗ/+[} sImV_jgç$6jg5ɳMa&CMlc5ɳMa&CMlqء&y8P0jS sO1̡>0jS O1L>Ű4)ͧO1l>M}aiS O3b|̧6f>Ű4)̧6f>Ű4)ͧO1l>|aiS O3b|̧6f>0q̧6f>Ű4)ͧO1l>WV(il?-7eo}_nClknClqvjg5ϳMa&Clqءy8PŰ)ͧO1L>Ű)ͧO1l>-|aiS O b|Z§6>Ű)§6>Ű)ͧO1l>-}aiS OKb|Zҧ6Ϋxf}ad]ןշ޾/_V:P|aiS O+b|Zڧ6>Ű)ͧO1l>]50qZk`iRͧK6.|Zk`iRͧK6.|Zk`iRڥXOkb l>]5t^3[GY5S՗+ܗeeo`γ70Z&iγMaZ&ClcɳMa&Cmlqء6y8P0jS sO1̡>0jS sO1L>0q֧6>Ű)ͧO1l>m}aiS O[b8m}aiS O[b|֧6>Ű)ͧO1l>m}aiS O1l>m}aiS O;b|Χ6v>Űt^3[ S՗t(ܗ}c˳c˳Mvg˳Ma&Clqءvy8P6Wy71&>6Wy8ϳM=8w)jR9ޥXsK&P{bM̡.ŚC]51ڻkbw)jRޥXK&6.Ś|ڻkbiRޥXO{bMl>]5w)XOb|:6>Űt)6>Űt)ͧO1l>|aS Ob|:6>0q:yl2LLV_S:ߓxYiߗ/,{}Cn#Exy=HyؤyS)Ay8Zh%R-Z-̴q/ 9Wsd܎qA `K`VJE@o"A[`K`Ki=_+9m0J`i"  6x(ri" $6T1JƵ"+76.WW:mܯtڸb]q˺ iu]*@U@ND,0KE@>LRyIz.?be0K2t@>ld" vR[YȇT ,|JE@>g" 6u Ju"gں]L[i^vu5MXngۺ]Hov3m].w' R' 6R' vR' R' .'X0 O|o`Qޓ{~q=X`l:_lan:E@o"D ;E@Dn QH=oϒuۻa{>֍"@;EN[}:m.tں]ui5(r@i" $6(rXE@N3/ABt@>LԒu#|r'#fjCHZ'#jGɺ^OG, N?Rxf_>}*@E@q' 6nW>ƽ*gڸ^Lwë6nW>*gڸ!^OGOG z>*z>,z>.z>0z ȇd/am,aq,QxwG z ȇd/KX!rD2w_Ҡ|_U#˒q|.Gl%yn\1|؍KUX7jE*@Wͫn\6ĺqݼ ƅ*@WΫn\:r@Dq~r@;Pf\>/߁R5y2(%r@i" $6(r@6.׿Vtڸ^ҫiJzƥ*@kUNӫ6W:m\Ntڸ^q 5(U+R%ic9m," a,=ˠ|r: ϒq5ͳl,["a,=9 s@>|?Kl%z3m\W| UNW֫m\Z/ όkUϴqq 6W>*@za- a-WoGYSo-|ЃՒq}:6j*D0"D[@m@|z H=E@ěR5{C?L""w@@N:=tz a @#_@@8JՆE@Nɠi" eP䴱 6A(UTmRa@>JՆ2xCS6 GZy}`!߁G0 gQ@>'i*i|Q%m*i|2( UFGQ@>Q%m³Q@>Qx6 G( UF6 GQ@>K|( 4 ȇ?>,Qx6 ȇ?Br[񇈥1z(z(/,>,@jm cil Q>6(Hb}l Q $WěRDkl Q $WD@i" $6(r@i7jc" $6(r@i" $6(rXENO[$ j|;$ 4 ȇ3M?L|MQ6 ȇ3MQ6 G'_ҳROg_zïTs=W*+dO| |'>K>nn&>KyST>KysTS=$,Ga@ϧ> Kz>aX*O%jS=<,b@ϧ>Ky#T6e&^Ҽ|g: D77 J X ,EAԀ*`Rf0` z3T X ,UA0` ܀*s XⷙKU3@g,U X=Tz>3` |fR̀*KU3 nfRKU iDyD f)EL8n1YD f)ELHn12YD f)|ELm1,b"[yDǢE6H;*- X?,U€*p[T.w naR- X˛ ~~z}|7~~,U€*p[TynRKU,UATynRKU,EA€*s X<7` ܀*scTyΰLTALPr Xn0B2`be %wˀJ+-&V([FLn1eD H2b"HŸeDˈT[FLn2b"^HeD  H eD-#&RGn1JrԒ[EL*b"V(H*b"UV+HeA"w0ON VSڔ`R^j}>Bʀ{ %w+KUr2` \V,EAʀ*p[T.w+neRܭ XKUr2` ܀*s XKUk@,U X=_Tz6` |mRڀ*KUknͰLTAGLn1uDJ)[GLn1uDJ)[GLn1uD THuDխ#&Rn1uԬDLj&`bM*U6TLRn0JպM*U6TLRn0JպM*U6m&VZ Xj&`buUm&VZ Xj&`buD`rR@a&^&|LуIכP KUƀ*pT.w[nkRm XKUr5` \,Uր((Tz5` ܀*s X<7` ܀*s X<7`) J,UATynRKU DyDJ)FLn1]DJv)ELn1]DJv)ELn1uTELj.b"tTELj.b"UvZHպ]D"&Rn1uTELn1uTELj.b"UZHպ}D#&Rn1u#&R|L䓓=3z0)p9Tˋǔ %w<,KnJ}w ayn}w \yX)pa JyX)payn}w \yX)<,yw <;y坂91+Sq.z0)/U?k.JkRA5` \,U]׀((T.w]gR XKUr3` \z,U X=Tz3`) Jz,U X<7` ܀*s X<7` ܀*s ύhPrcX^& &6(LlPr ؠ0A]/`b^%w J&6(LlP@ؠ0պ0պ0պ0պ0պ0պ0պ0A]?`buE?`buE?`buE?`buE?`buE?b"V~DX+ZW#&R@׏պ1uE?b"V~DZW2` |dRȀ*KU#ndRȀ*s X<7` ܀*s X9)0 OŬ|1`BĀv&%wKQPr71` \&,UĀ*pT.wnbRM XKUr71` ܀((TynRKU,UATynRKU,UATynRM X=Tz>eX^&@ϧ)FLn1iDJ)FLn0E4`bi%wӀ- [M&Z7 آj4`buӀ-M&Z7 آn0EպiU[TLlQn0EպiUf[TLlQn0EպYt-&Z7 آj,`bu-"&Rn1uTELj ;>9[͘׃IbQ> ׃Iy%w3[ X?g,Ù*pT.w3nfR XKQPr73` \f,U܀*KUs@,U X=Tz>7` |nR X=TynRKU,UATyΰLTAGLn1yDJ)GLn1yDJ)GLn1yD ZH"b"UZHպED-"&Rn1uT[DLj"b"U)[DLj"b"UZHպED-"&Rn1uT[DLj"b"tT[DLj"b"UD>9)[2 Oź|1`BҀv%wKKUr4` \,oGJ,UҀ*p[T.wKniR- XKU,UATynR- X<7` ܀*s X<7` ܀*s X<7`) JV D*`#%wܭ&>Rr H*b"%wܭ"&RrH*b"%wЭ"&R@HպUD֭"&Rn1uT[ELj*b"UV)[ELj*b"UVZHպUD֭"&Rn1u눉T[GLj:b"t눉T[GLj:b"UZHպuD֭#&R|L䓓53z0)Tlgz0)o~?S`HڀvGJ,UknmRܭ XKUr6` \,Uڀ*p[T.wknmRKU@7,Uo X=Tz1` |cRƀ*KQPr1` |cRKU,UA3,/U)DLn1MDJ6)DLn1MDJ6)DLn1MDm"&Rn1uۀm TLlSn0Mպm6UTLlSn0Mպm6UTLlS@ ئj6`buۀmm&Z ئj6`buۀmm&Z ئj6`bm6U٦nLL/<狋/Zy6%w<,K6%w<,AN.;.w<,SrRSrN.;.w<,SyX)<,yw <;y*(ay SyX)<,yw <;y坂b"UZHպ}D#&Rn1uTGLj>b"tTGLj>b"UZHպ}D"&R1uT;DL1uT;DLj!b"UZw>Hd&I݁x=_x*Np={>L(;ئ`RA0`) J,U*p;T.w`R XKUrw0` \,UA|UT(;TynRр*KUG@Ϗ,U X=?Tz~4` hR X=?TyΰLTAL>PrwX*sKy3T>Ky3Ts3TZwHպcD֝"&R1SD֝"&R1uT;ELj)b"UNZwHպSD֝"&R@wHպSD֝"&R1uT;ELj)b"UNZwHպA"w0ON NS|q`r|u9T(;|ݱT O,U?T@ XI,UdRAI,UdRAI,E]'T|wKUdRy'TynRKU,UATynRKQ`rI,UATynRKU,UATyΰLTAGL䮓"&brI11111?ȃ*MR(c7ŜIDt(Bh1+rFDܥ3tiL]9ZFD<Х3uiLj]9ZFD֥3uiLj]9ZFD֥3uidL.֥1պ42&vZFVȘj];XKɘj]JVR2&vZ1֥dL`.%cbuD瓳T꿅äoI/w) ,ݱ]JH}XF$.wI`$r2%e$pKHr. ,#]XF$\` 24se]JHge$pY` yXFgge$pY` yXFg$r2cxDgxKٙxKٙxKٙxKٙxKٙxKٙպXKٙպXKٙպXKřպTt+D֥Lj]*D֥Lj]*D֥Lj]*D֥Lj]*D֥L]q&b.g"VRq&b.g"VRq&b.g"VRq&bh{4t+laR+ݿ0_&t+owvrW".wE` \2UHrW ,#]%.w2UHrW ,]*e$p28J` y%\` 24se$hH ,u?__w ,#A3XFf.H$4scb.w1Kʘإ]eL2&vr76&w*W26&vr76&vr76&vr76&v@76&v@76&vZXKccbtccbuilLb.]֥1պ46&vZD֥3uiLj];ZD:ЍXKcg"Vؙպ4v&b.XKcg"Vęպ4q&b.MXKg"&D=m擓t60faRäK,#e$p,#D` D".weD` ,#D` \&H ,#A3XFf.\` In"\` T` T` T` T` T` T,O$83.wSg"]DMt:r7u&nLԙH3.wSg"D:MT:Z7u&RnLjԙHպ3uSg"UD͜T9@7s&RnLj̙Hպ3u3g"UfD͜T9Z7s&RnfLсnfLQnfLQnfLQnfLQnfLQh{|r@7c_χIgJ&;r7w&nLܙH3.wsg"]D-t[8rp&ҁnL™Hպ3u g"UD-T[8@p&RnLj™Hպ3u g"UD-T[8Zp&RnL™Hպ3u g"UD-T[8Zt&RnLjҙHպCa|rҁn&>&Һ[x>Lo~?S^&t[ ,ݱۣR` _ ,#R` \HrXF2-n)lt[ ,#R` \H ,#A3XFf.\` 24seR` 2828282828cxDgr&nLʘF1.w+c]V7ܭot[r2&neL|ʘF1u+cV7֭oT[ߨZ2&QneL|jʘFպ1u+cU7֭ot[ߨZ6&QnmL|jژFպ1ukg"UD֭T[;Zv&ҁnLjڙHպ3uDtk60_\|~.wk;/_ ,#ke$p[ ,#Z`$rXF2ܭn-.we$p,#3,#3,#3,#3,]6H7H7H7H ,#A3XFf.\` InX>&;rq&nLƙH3.wg"]6Dmt8ru&ҁnL֙Hպ3u[g"UDmT:Zu&RnL֙Hպ3u[g"UDmT:Zu&RnLj֙Hպ3t[g"UDmT:Zu&RnLjΙHպCa|rҁn&>&ҡ[x>Lo~?S^&t ,ݱN2AXF2n'.w;e$p ,#N` \vHrX6  ,#A3XFf.\` 24se$hH ,]vH$83.w{g"]D흉t;rw&nLޙH3.w{g"D:흉T;Zw&RnLޙHպ3u{g"UD흉T;Zw&RnLjHպ3tg"UDT;8Zwp&RLjHպ3ug"UD:T?$&''laR+꿅3%&t;cYG}ݡ˗~ڱ|Irwh%ݡ˗.wv,_ڱ]X$p;cЎKC;/ \X$pv,_8c;/ ˗؎Kg~l2t;c۱|ȈX$pv,_4v,_4v,_4v,_4v,_4s0yI̝t;:rwt&LљH3.wGg"]Dt;:@w4&@w2&Zw2&Zw2&Zw2&Zw2&Zw2&Zw2&Zw2&Zw2&Zw2&@w2&Zw2&Zw2&Zw2&Zw2&Zw2&Zw2&Zw2&Zw2&Zw2&@w2&Zw2&Zw2&Zw2&Zw2&ZwHlaR+]뿅ägJ9@larWqaR/wy(v2AGHrGHrG&<XF<XF<XF<XF<XF<XF<XFf.\` I. ,#A3XFf.\` 24se$hHI.$4sg"^rr&.'g"^rr&.'g"^rr&.'g"^rr&.'g"^rr&.'g"rr&b.'g"Vrr&b.'g"Vrr&b.'g"Vrr&b.'ct9Xɘ8j]NVr2&Z1qպdL`.'cu9X٘8]Vr6&Z1qպlL`.gcu9X{<$&?<&>&rU-<&7{&aBJ`UH}%.wce$p ,#X` \&A2܍n,.wce$p ,#3 ,#A3XFf.\` 'I24se$hH ,#A3XFf.\,3I$hO܍t?r71&~nbLĘI1.wc']&OMt?@71&~b.OXc'VĘպ<1&~b.OXg"VęպXFg>XFg>XFg>XFg>XFg>X6 $83.w3g"]fD͜t9r7s&nL̙H3.w3g"fD:͜T9Z7s&RnLj̙Hպ3tsg"UD͝T;Z7w&RnLjܙHպ3usg"U濛tMhw>:7Nw~7Nw~7Nw~mOޟtχI|ä3yԿ1,ݱN?e$2A` cX,#H?2a!lt[,#3_,#3_,#3_,#3_,#3_,#A3XFf.\` In!\` 24se$hH3I$hD-t[8rp&nLҙH3.wKg"]D-t[:@t&RnLҙHպ3uKg"UD-T[:Zt&RnLjҙHպ3tKg"UD-T[:Zt&RnLjҙHպ3u+g"UVD:ЭT[9Zr&Rh{4t[χIϔ ]VowI2AXF2ܭM.w+e$p[ ,#J` \VHrXF2ܭ ,#A3XFf.lt[ ,#A3XFf.\` Z` Z` Z` Z` Z` Z`nX>&ܭܭܭܭܭܭܭܭܭ:Э:Э֭֭֭֭֭֭֭:mmmmmmT8Zq&RnLjƙH3ug"U6DmT8Zq&RnLj!0i>9@a_χIgJL|cx~äK};u.]X$~ߎK};/ \X$p۷cnߎe$ro%ݾ˗.wv,_۱|Iro%A3o%A3o%A3o%A3o%A3o2t۷ccccڱ|IX$pv,_8C;/ ?L^83.wg"]Dt;8rwp&LH3.wg"D:T;8Zwp&RLjHպ3ug"UDT;8@wt&RLjљHպ3uGg"UDT;:Zwt&RLљHպ3uGg"UDT?$Z&I#|?/.~>L{9{tχI|ϔxrwX~cG2AXF,#E` \.H e$p,#E`$rwXF?/HrwXFf.\` 24se$hH ,].H ,#A3XFg~XFg~XFg~e,H̯D]t:rwu&LՙH3.wWg"]D]t:@wu&RLՙHպ3uWg"UD]T:Zwu&RLjՙHպ3t7g"UnDݜT9Zws&RLj͙Hպ3u7g"UnD:ݜT9Zws&Rh{4tχI27׿aB{=2&I&.w7e$p ,#M` \nHrwXF2XFf^HP` y ,^P` y ,#3/Ce$`e(̼ 24se$hH ,^ʐ|&aBr7XF?'&A2Mn".we$p,#D` \&Hr7XF24sOO24se$hH ,#A3XFf.\` 24seD` X #A3g,H̍}M}M}M}M}M}M}M}M}M}:M}:M}MT:Z7u&RnLjԙH3uSg"UDMT:Z7u&RnLjԙHպ3uSg"fD͜T9Z7s&RnLj̙Hպ3u3g"UfDm擓t360UaR>]f˟own&~n&.w3e$p ,#L` \fHr7X6 n&.w3e$hH ,#3 ,#3 ,#3 ,#3 ,]HHHHHH$4sg"]D͝t;r7w&nLܙH3.wsg"]D:͝t;Z7w&ҁnLjܙHպ3u g"UD-T[8Zp&RnLj˜A1u cU-T[?Z0&~PnaLj˜Aպ1u c-T[?ZwH=L~>9?@`_χIgJrX|]HB` \HrX6 -n).wKe$p[ ,#R` \HrXFgXFf.\`$rXFf.\` 24se$hH ,#A3XFf.lt[2I$hD-t[:rr&nLʙH3.w+g"]VDܭt[9@r&RnLjʙHպ3u+g"UVD֭T[9@r&RnLjʙHպ3u+g"UVD֭T[9Zr&RnLڙHպ3ukg"UD֭T[;Zv&Rh{4t[χIϔ ]˟owZ` _ ,#Z` \HrXF2ܭn-.wke$p[ ,]H ,#A3XFf.\` F` F` F` F`n#|#|#|#|X>&ommmmmmmmm:m:mmmm:mmmmmmT:Zu&RnLj֙H3u[g"UDmT:Zu&RnLj֙Hպ3u[g"Dm擓t[60U_\xo~?Sa2ݶ:]X$~ێKm;/ \X$p۵cn׎e$rk%ݮ˗.wv,_ڱ|Irk%3ߵcڱ|IwX$hX$hXF.wv,_4v,_4v,_4v,_4v,_4v,_4v,_4v,_4s0yI̝t9rs&nLΙH3.w{g"]D흉t;@w&ҁnLjޙHպ3u{g"UD흉T;Zw&RnLjޙH3u{g"UD흉T;Zw&RnLjޙHպ3u{c'OT;?Zw0&~R`Lj!0i>9?@w`_T-<&?SaI?rwXF?&A2 .we$p;,#A` \HrwXF24seA` 24se$hH ,#3? ,#3? ,#3? ,#3? ,]H̏H̏a љH3.wGg"]Dt;:rwt&LљH3tGg"DT;:Zwt&RLjљH3uGg"UND֝T;9Zwr&RLjəHպ3u'g"ND֝T;9Zwr&RLjəHպ3u'g"UNDm擓t'60U_\|~??Ͽ& & & & wug~:3nb& ;n۟tnb_~7N/X'pM4M4M4M4M4M4M|tM4M4M4M4M4sg"U.D]T8Zwq&ҁLjՙHպ3uDO:]äW5ϔaR*pX~c@H We$2U`$rwXF ,#\@H We$hH ,#A3XFf.lt ,#A3XFf.\` 24se$p7e$p7e$p7e$p7eݍ|&|U/.~>Lo~ ^:xH}5XFj(,wP` X2UCe$`HrW  ,#]5X6 U#e$p#e$p#e$p#e$p#e$p#e$p#e$p#e$hHM/wH` 24se$hb͜|L|UU|~0ow|H}XF,.wY` \2ee]. ,#]XF,.wY` 24se$hH ,^,\` 24se$hHe$pE,0μ8rWt+Dg"]3.wřHL]q&8rWt+.1պX1պX1պX1պX1պt+.VbLb]U1պ2&vZWU.Vʘj]UXO؝-7,~@ h̰Uf}y?Ѓ_Yӟi3vyo2&~`tX*cVʘպ2&~`X{>$&täO߅äǟU8]%~W6AJ` \*e$pXFJ` \*e$pXF$< ,]28$< ,#3OH̓28$< ,#3OH ,]24s@FfX>&;rtKD%g"]3.wəHL]r&.9rtKD:eg"VLj]X3uUv&bD:eg"VLj]X3uUv&bDUٙպ*;ZWeg"VL]v&bDeg"U3uٙHպLj]v&R.;ZT{<$&'' z4?a|0]Xv]2A_,#]XF".wE` \2oO".wE` \24se$hH ,#A3XFf.\`&rW ,#A3XFf.\` 28|>L"3}>]jcb.w1Oژا]mL6&rWttttTtTTTTTTTTTtTT[8Zp&RnLj™Hպ3u g"UD-t[8Zp&RnLj!0i?9@`_?ժ]x=Lo~Lo~nۍ[?X%pvcnۍ[m7o \XF.wn,m|Kr-ݶ˷.wn,4n,4n,4n,8]7o 6I]7o ˷|׍[g-3ucﺱ|KwX%pn,4s0yK̍t?r3&~ngLΘI1.w;c']vO팉t?@3&~ҁngLjΘIպ1u;c'UO흉T;Zw&RnLjޙH3u{g"UD흉T;Zw&RnLjޙHպ3u{g"D흉T;Zw&RnLj!0ON:äOäsϔx|n/펃OM .we$p;,#A` \HrwXF2  lt;,#A3XFf.\` 24se$hH ,#A3X  ,#A3g,H̝t;8rwp&LљH3.wGg"]Dt;:@wt&ҁLjљHպ3uGg"UDt;:Zwt&RLjљHպ3uGg"UDT;:Zw4&@w4&Zw4&Zw2&Zw2&Zw2&Zw2&Zw2&Zw2&Zw2&ZxHt=L~?9t;I.&7?)0$v!]NHI` \NHrwXF2ܝ$.w'eI` \NHrwXFf.\` 24se$hHH6A2828282828283c|Dg~v&LٙH3.wgg"]Dܝt;;rwv&LٙH3ugg"D֝T;;Zwv&RLjřHպ3ug"U.D]t8Zwq&RLjřHպ3ug"U.D]T8Zwq&ҁLjřHպ3uDä݅M|=LT/.~=Lo~L"A3w&nLʙH3.w+g"]VDܭt[9rr&nLʙH3ukg"UD֭=:Э=֭=֭=֭=֭=֭=֭=֭=֭=֭=:Э=֭=֭=֭=֭=֭=֭=֭=֭=֭=:Э==]O6la'߅3%&=mlo~o?7X%~Ӎ[M7o \6X%ptcnӍe$r-ݦ˷.wn,m|K̻|K̻|K̻|K̻|K̻|K̻]6X%hX%hX%hX%hX%hX%hX%hX%p[0yK̷Dmt:ru&nL֙H3.w[g"]Dmt:@u&RnLj֙Hպ3u[g"UDmT:Zu&RnL֙Hպ3u[g"UD휉T9Zs&RnLjΙHպ3t;g"UvD휉T9Zs&Rx''vla'߅äsϔ ]v٣N` ,]vHrXF2n'.w;e$p ,#N` \vHm.w{e$hH$4sc]퍉t?r7&~noLޘA1.w{c]:퍉t?Zw&RLjHպ3ug"DT;8Zwp&RLjHպ3ug"UDt;8Zwp&RLjHպ3ug"UDT;8ZxHt=LON:äOz|qa|.wGe$(.wGe$p; ,#Q` \HrwXFM(.wGe$p; ,#A3XFf.\` 24se$hHm.wGe$hH ,#A3XFf.͜|>L"A3w&LəH3.w'g"]NDܝt;9rwr&LəH3u'g"ND֝T;9Zwr&RLjəHպ3u'g"UND֝}:Н}֝}֝}֝}֝}֝}֝}֝}֝}֝}:Н}֝}֝}=]O>la']߅ä3%&}ܝO2AXF2ܝm.wge$p; ,#Y` \HrwXF2ܝ ,#A3XFf.lt,#3,#3,#3,#3,#3,#3,#3,#3,#3,].a 3.wg"].D]t8rwq&LřH3.wg".D:]T8Zwq&RLjՙHպ3uWg"UD:]T:Zwu&RLjՙHպ3uWg"UD]T:@wu&RLjՙHպ3uWg"UD]T{<$&''la'?0iLy{*펟}]2&.w7e$p ,#M` \nHrwXFM&&\` 24se$hH ,#A3XFf.M rwXFf.\` 24s0ܘ8͘8͘8ݘ8ݘ8ݘ8ݘ8ݘ8ݘ8ݘ8ݘ8ݘ8jݘ8jݘ8jݘ8ݘ8jݘ8jݘ8jݘ8jݘ8jݙHպ3uwg"UDݝt;Zww&RLjݙպX` XX` XX` XX` XX`&rHrHrHrHrH ,#A3XFf.\`&r'H'H'H'H'H'H'H'b|X>&Oxg"^ęXFf.\` 24se$hH ,#A3X 24se$ha 3.w3g"]fD͜t9r7w&nLܙH3.wsg"D:͝Xsg"Vܙպ$&C:e60i~_\z4a2]X~.wY` Hr. ,]2ee$p+HrW,#]XF"μ,#3/H̋M,#3/H ,#A3XFf.\` 24se$hblt+a 3.wřHL]q&8rWt+DDDD:D:DDDDDDDD:DDDDDDDDDD:DDDD-T[8Zp&Rzt[I󃟼j~^ϔ ]owB` _,#B` \HrXF2-n!.w e$p[,]H ,#A3XFf.\` 24se$hHm.wKe$pKe$pKe$pKe$pK0|LҙH3.wKg"]D-t[:rt&nLҙH3tKg"UD-T[:@t&RnLjҙHպ3uKg"UD֭T[9Zr&RnLʙHպ3u+g"UVD֭T[9Zr&RnLjʙHպ3t+g"UIIz4?ɏ/.~>LF7?)0ݪ&/ܭ|K˷.wn,ܭ|Kr-ݪHnՍ[U7o \X%p[wcnݍ[g-3_wc|KX%pn,#Au7o ˷ͼ˷ͼ˷ͼ˷ͼ˷ͼ˷ͼ˷͜|)0V`t ,#[eV` \HrXF2mn+.w[e$p ,#V` Mn+\` 24se$hH ,#A3XFf.\`&rXFgXFgc,HwD휉t9rs&nLΙH3.w;g"]vD휉t9@s&RnLjΙHպ3u;g"UvD:휉T9Zs&RnLjΙHպ3u{g"UD흉T;@w&RnLjޙHպ3u{g"UD흉T;Zw&Rzt۳I|l~^ϔ ]ow^` ,#^` \HrXF2n/MrXF2m.we$pe$hH ,#A3XFfX>&Gt;Gt;Gt;Gt;Gt;Gt;Gt;Gt;8rwp&ҁLљHպ3tGg"UDT;:Zwt&RLjљHպ3uGg"UD:T;:Zwt&RLjљHպ3uGg"UDT;:@wt&RLjљHպCa~rҁ&&~]x=Lo~&t;;rwv&LٙH3.wgg"]Dܝt;;@wv&ҁLjٙHպ3ugg"UD֝T;;Zw8mbMlho~&6IMlho~&_6I헿MlAoMg~&3mb_6I/$h$h$hlarY5[z4x揢 A` eXF2,#A H_e$/2A` M"\` U` U` U` U` U` U` U` U`&rwXFg~XFf.\` 9c|DfLՙH3.wWg"]D]t:rwu&LՙH3tWg"UDݜT9@ws&RLj͙Hպ3u7g"UnDݜT9Zws&RL͙Hպ3u7g"UnDݜT9Zws&RLj͙Hպ3t7g"UƤݍM|=L&o~o ]nߋAXF?HrwXF2.lt ,#]` \HrwXFf.\` 24se$hH]H ,#A3XFf.\` 24se$ha 126&rW^ؘ]{x+ccb/welL完=ܕ126&@Wؘj]{X+ccbuelLa=֕3ueLj];ZWD֕3teLj];ZWD֕3ueLj];ZW&D֕3ueLj]8@W&D֕3ueLj]8ZW&D=]te&>&Yz4a],}c]IA_&H},^D` XD` XD` XD` XD` XD` XD` XD` 24se],#A3XFg>XFg>XFg>XFg>XFg>XFg>XFg>XFg>X ܕ28X #A3g,H̝x+Sg"^ԙ2u&Lx+Sg"^ԙ2u&Lx+Sg"ԙ2u&bLX+Sg"Vԙպ2s&b̜x+3g"V̙պ2s&b̜X+3g"V̙պ2s&b̜X+3g"V̘23&~`̌X+3cV̘պ23&~`̌X+3cV̘պ23&~`z~r~Ёn&&~㋋_zr7XvkF~&~n..wse$p ,#\` \Hr7X n.|.|.\` 24se$hH ,]H ,#A3XFf.\,3χI$hD͝t;rW9rW9rW9rW9rW9rW9rW9@W9@W9ZW*g"*g"VJLj]X+3ur&bTD֕ʙպR9ZW*g"VJL]Lj]X+3ur&bTD֕ʙպX+əպX+əHLj]ID֕Lj!0i?9@äO)a|O{. ,}.wI` OHr. ,]2%e$pKHr. ,#]XF$\` 24se]XFf.< ,#3H̳28,< ,#3H̳28,D.w|>L"3>]1OlL.t>]1OlL.t>]1OlLӁ.T>U1OպlLS.T>U1OպbLӁT+>U1OպbLST+Dg"U3uřHպL]q&R8ZWT+Dg"U3uřHպCa~rҁIä3%&}owrW".wE` \2HrW ,#]-.w2HrW ,]je$p28Z` y-\` 24se$hH ,]je$hH ,#A3g,H̝ttttt[8rp&?%$iU~z$(7|?}YdJO`k}pX5rtDeg"3tٙHպLj]v&R.;@TDeg"U3uٙHպLj]v&R.;ZTD:eg"U3uٙHպLj]v&R.;ZWT+Dg"U1GbLQx0сIU-y40y4>^&=:ݕZ޳Gn?k/\k0k1\k2k3;hv#jv#lv#nv#pv#4rv#4tv#4vv#4xv#4zv"t+FhFpk@~Zn_&W FpkD~Zn_1'K7ӯtիt֫t׫tثt٫tګt۫tܫtݫtūtƫTT««TëëTīītЫūTƫƫTǫǫTȫȫTɫɫtګʫT˫˫T̫̻C^k:tYæχMÆNb=ՊшBP+F]jhDpuVFZ1\׊ш:VFb4"upNm#t\+F#_+F#_+F#_+F#BWFM_1b4"4}hDh6Bb4"4}hDhaՒk%VK t\[-2ZipmpqpupppSoSoSoSoSopSoSoSoSoSoSoSoSoSopSoSoSoSoSocZjIՒ}[%5>a{o4x:U>_Ob4"lup[hDp*F#V1\ш:Unup[hDhш+F#BWFM_1bupM_1N1N1N1N1}6WO5æyfR|']OшBpRFb4"u'hDp<)F#I1\Oшm'hDhш+F#BWFM_1b4"4}hDh6Bb4"4}hDpghDpghDpgfNljIՒg%]VKt<[-:xZuljIGՒg%UVKT<;-t<;-T<;-T<;-T<;-T<;-T<;-T<;-T<;-T8-T8-t8-T8-T8-T8-T8-T8-T8-T8-T8-T8-t8-TC^>l~>^tMխx>læu~]/шBpQFb4"^uhE1\/ш:xQFb4"^_____mWhDpWhDpWhDpWhDhш+F#BWFό>6[-:xZujjIՒW%]VK^tZ-:xZj#jIՒ*7%UoVKެTY-"xZREfjIՒ7%UoVKެTY-"xZREfjIՒ*7%UoVK:ެTY-"xZREfjI;æx#`6|I||6w6xL#k:FшBF64RF4RF4RF4RF4RF4RF4RF4RF4RF|u0+F#BWFM_1b4"4}hDhш+F^H1b4"813xD?vZ~u0_xLc^i4vZ~u0_xLc^i4vZ~u0_xLci4ZbE0XLc%Vj4Z0XLc%Vj4ZbE0XLc%Vj4ZbE0MXL%j4ZbE0MXL%Vj4ZbE0MXL%Vj?iZ>6Ӭx>lx|u0M?4QFb4"NuphDp(F#D1\'64QFb4"N+F#ӟ*F#ӟ*F#ӟ*F#ӟ*F#ӟ*F#ӟ*F^T1T1T1T1b4"4}tFό>6[-:VKij`Z-:VKij`Z-:8Zpj#j4ZpfĊ`Y-"fVK+ifĊ`Y-"fVK+ifĊ`9-t9-XL3+ibE0͜Cr4sZ"fN!Vi9Ċ`9-XL3后3+ibE0͜Cy||8c-χWi9up~!]шB0WFb4"mshDp+F#\1\ш:8WFb4"+F#BWFm#t+F#BWFM_1b4"4}hDhш+F#BWKg}læyt6t\*FtHb4",upKhDp\*F#R1\ш:TFb4".mKhDhш+F#_)F#_)F#_)F#_)F#_)F#_)FE0>3xDol-MhDhF&B7Z6ol"4}e-MhDhF&B7Z6ѲMol"4}eqhDphDphDphDphDpjId`ZRE0Y-"tLVK&%UՒ*jId`ZRE0Y-"TLVK&%Ւ*wˇMJGZ>6۱4B(F߿ NPv"~$dh' Y0ډGBFY0ډGBv"N? F;~v"8,DhN/DhN/]` F;` F;` F;` F;>3aӉtVKf%]ՒjIb`ZuX-:Xt,VK:%Ւ*jIb`ZREX-"XT,VK%UՒ*jIGb`ZREX-"XT,VK%UՒ*jIjIiFGiFiFiFiFiF;æx}#`Z>6Ӻx>l|据bJ1\*h`b4"Vш:X)F#`b4"Vш:X)F#BWFm#t+F#BWFNVF_+F#ӯ׊шkhDpbuVF_+F#ӯ&"8jIjIjIjIjIjIjIjIjIjIGjIGjIjIjIjIjIjIGՒ*k%UVKT\[-"ZREpmjIՒ*k%VKT\[-"ZREpmjIՒ*k%UVK!/6+׬a;ݿyo6omBVFb4"uphDp(F#F1\76Bb4"noooo+F#BWFM_1F:QFM_1b4"4}hDhш&"4}%]7VKnt8-:qZup|iN򝮃[;]Nw:nt:-ߩ"uZp|iN*[;UNwnT:-ߩ"uZSEp|iNG*[;UNwnT:-ߩ"ZREpkjIՒ*;%wVKTY-"x||pZ>6ӡx>l>MaNb7BSFb4"upm#t)F#N1\wш:SFb4"+F#BWFM_1F:SFM_1b4"8b4"8b4"8b4"8b4"8b4"8bupό>6ﭖt[-:ZupojIՒ{%]VKt[-ZpojIՒ*{%UVKT[-"Z`jIՒ*%UVKT6۱^i~aCbhDp<(F#A1\ш:xTFb4"um#t<*F#?*F#?*F#?*F#?*F#?*F#BWFM_1buM_1b4"4}hDhaՒG%]VKtlڏW:Yæw6χMy4?6t<+F:Y1\Ίш:xVFb4"um#t<+F#Y1\ϊш:xVFb4"4}hDhш+F#BW+F#BWFNNNNNŒ>6_tX-:xZubjIՒ%]/VK^tX-xqZxqZ"xqZ"xqZ"xqZ"xqZ"xqZ"xqZ"xqZ"xqZ"xqZxuZ"xuZ"xuZ"xuZ"xuZ"xuZ"xuZ"xuZ"xuZ"xuZxuZ"xuZ"xuZ"xuZ"xuZ"x||xe-χgi~lڏW<1kasw6χyu0:NJшBNJш:NJш:NJш:NJш:NJш:NJш:NJџHyyyM_1b4"4}hDhш+F#BWb4"4}hDhшOO}^i`X-:'VK<ybĊ`X-'VK+ybĊ`X-"'VK+ybĊ`X-"'VKyjĊ`Z-"VK+yjĊ`Z-"VK+yjĊ`Z-VK+yjĊwˇMGt`*F# T1\ш:8Ub4"NupShDp*F#T1\gшgшgшg6Bb4"8b4"8b4"8b4"8b4"8b4"4}hDhш髥upƌ>6[-:8ZupfjIՒ3%]gVKάtY-:8Zpf#jl>M;n?Eb4"lup[hV1\ш:UFb4"nup[hDp[hDhшm[hDhш+F#BWFM_1b4"4}hDhџȐ[fMi9i9i9i9i9i9i9i9i9i9#i9#i9i9i9i9i9i9i9i9#i9i9i9i9i9jIՒ*;%UwVKt[-"ZREpojIՒ*{%UVK!/6+a;?ælM?a;߿7wBpT>BpTFb4"u'hDp<)F]Oш-Nш-NшOшOшOшOшOш+F]Oш+F#BWFM_1b4"4}hDhaՒ'%]OVKtlڏW:^Xæw5χgi~aCbͿV….hE1\/ш:xQFb4"^uhDp(F#BWFm#t(F#BWFM_1b4"4}hDpWhDpWhDpWhDpWhU1U1} e.hDp(F#D1y`(F#D1\'ш:8QFb4"NupM_1b4"4}h`(F#BWFM_1b4"4}hDpShDpShDpShDpSt)3xD?uZuLx,S;^i2ZuLx,S%^j2ZuLx,S%j2ZbELX,S%Vj2ZbELX,S%VK+efĊ`Y-"XfVK+efĊ`Y-"XfVKՒ3%Vj2ZbE̬X,3%Vj2ZbE;æx#|:v",Kh'R0ډ:v".R0ډ:v".Kh'R0ډM_0ډM_0ډR0ډM_0ډM_0ډN%Dp+tv"83aӉWVKt\Y-:ZupejIՒ+%]WVKt\Y-ZREpejIՒ*+%UWVKT\Y-"ZREpe#jIՒ*jId`ZRE0Y-"TLVK&%UՒjId`ZRE0Y-"TC^=l㕎|: )F]b4"&hDpLш:u0)F#`RFI1\b4"8:ghDpY1~VFghDpY1~VFM_1F:+F#BgF=fei٣`vZ:=fei٣`vZ:=fei٣#`qZ"XT,VK%UՒ*jIGb`ZREX-"XT,VK%UՒ*jIb`ZX-"XT,VK%UՒ*jIb`ZREX-"x|شtXæwY7χ{i~6˶x>l>Maӧv'%@EQ `ߺ{ixf2XMS !Yx`QF/E1hD:Xmb4"x,шu(F#^b4"x,шu(F#BWF(F#omhDpb4"8F1~NQF(F#o7ш+F6aՒՒՒՒՒՒՒՒՒՒՒՒ*Ւ*Ւ*Ւ*Ւ*Ւ*Ւ*ՒՒ*Ւ*Ւ*Ւ*Ւ*Ւ*Ւ*Ւ*Ւ*ՒՒ*Ւ*Ւ*Ւ*Ւ*Ւ*Ւ*wȏ+-[~};a66lFb4"x!*F#b4"x*F#b4"x*F#b4"x*F#b4"x*Fn+F#BWFM_1b4"4}hDhш+F#n+F#BWFM?l"BwZh;sZh;sZh;sZh;sZh;sZh;sZh;sZh;sZh;sZh sZh sZ"sZ"sZ"sZh sZ"sZ"sZ"sZ"sZ"ZREpgjIՒ*;%-wVKT[-"ZREpojIՒ*{%UVK[-"x{y~pZ66Oy6̀{h;WF/{hD:WF{hD:WF{3Bb4"x+F#b4"x+F#b4"4}hDhшmшшшшшшшш&"4}%mVKlM44aaCbAi;xQF/hD:xQFhD:xQFhD:xQF3Bb4"x(F#b4"4}hDhш+F#BWFUFU>#*F#ӿ*F#ӿ*F#ӿ*F#ӿ*F#ӿ*F#ӿ2MDpW%mVK^:-?h;xuZ~viA򃶃WmNZ^:-?"xuZ~iA*WVi4tZ~`E0 XLCVi}JC.i4tZ~`E0 XLCVi4ZbE0 XLC%Vj4Z0 XLC%Vj?6LCtv߷bA?p;ш `*F#B((Fb4"pL#hD:Fшu0`)F#4RFiNNN}Fp;Fш+F#BWFM_1b4"4}hDhшե`1MDhVKid`Y-q;FVKil`[-q;VKil%`[-"VK+ilĊ`[-"VK+il%`[-"VK+ilĊ`[-"VK+ilĊ`[-"VK\+ilĊ`X-"&VK+ibĊ`X-"K4a-_ii<6æ{TbA?q;&ш`^'шup^'шup^'шup^'шup}Fp;&ш+F#BWFM_1b4"4}hDpShDpS3Bb4"8b4"8b4"8tF?eFVKNZ-i;8ZvpjjIՒS%mVKNZ-i 8ZbE0MXLS%VjIK+ijĊ`Z-"VK+ifĊ`Y-"fVK+if%j4ZbE0ͬXL3%Vj4ZbE0ͬXL3%Vj4ZpfĊwȏ+-ga+ݾ=lwNi;8vٿ'mg*`up&"x F^g*`4" F^*`up."x FN."8`*ӟ FN.m*BV`U/"4}h FM_\:M"4}%mVKέ[-i;8ZvpnjIՒ %mVKZ.h-]F. eѲhEoa"0Zv_-/]Fh 0ZvѲ]o"4}e-MhEhF.B7ZvZ.]o"8ѲF._-NaEpK~ݟa;~b]R1<K3Bb4"x,шбPFb4"t,c hDhш+F.+F#BWFM_1b4"4}hDhшWшWg+hDp+hDp+fNejIՒ+%mWVK\Y-i;ZvpejIKՒ+%UWVKT\Y-"ZREpe%jIՒ*+%UWVKT\[-"ZREpmjIՒk%UVKT\[-"ZREpmjIՒ*k%Uo6ϏWZYæW};a66\+Fm׊ш`^׊шup^׊шup^׊шupvp^7шupNNNNNNN}Fh;QFM_1b4"4}hDhш&"4} m7Nn/8-_h;qZvp|iB򅶃%mՒjIKd`Z0Y-"TLVK&%UՒ*jId`ZRE0Y-"LVK&%UՒ*jId`ZRE0Y-"TLVK&%-Ւ*jIlJKZ6Ri<6æy`V>󅶃Y1dhD:`V>#̊шu0+F#^b4"x̊шu0+F#M_1b4"4}3BM_1b4"4}hDhш+F#BWF_3BŒ6%mՒjIb`ZvX-i;X,VK%mՒjIKb`ZREX-"XT,VK%UՒ*i٣%`qZ"X=e*i٣`QEqZ"8-{Tl=6NUeӲGi٣`QEqZ"8-{Tl=6NUo6=Z6a+m?æ=lmA?{l A^hD:(F#F16шuQFb4"xl`}Fh;*F#o鷊ш[hDpb4"8V1~NUF*F#om[hDhш+F#BgFllllllllllZ-"ZREpkjIKՒ*[%UVKnTZ-"ZREpkjIՒ*[%-VKnTZ-"ZREpkjIՒ*[%UVKnZ-"x{y~pZ6ҡxmϊш^ϊшu^ϊшu^ϊшu}Fh;xVFghD:xVFM_1b4"4}hDhш+F+F#BWFM_1b4"4}fMjIՒ%m/VK^X-i;xZvbjIՒ%-/VK^X-"xZREbjIՒ*%U/VK^TX-"xZbjIՒ*%UVK^TZ-"xZREjjIKՒ*W%UVK!?>lM<4aaCb}^ U1^U1v^шu^шu^шuM_1b4"4}3#\(F#_(F#BWFM_1b4"4}hDhш+F.KgDhaՒ %mVK.\X-i;Zvp|iN򝶃K;-NwZ.T\:-ߩ"tZSEp|iN򝖀K;UNw.T\:-ߩ"tZSEp|iN*K;UNwZ.T\:-ߩ"tZSEp|iNՒ*+%UWVKTC~||pZ6x#La򃶃iA`rZ~v0Y-i;LVKf%mՒjIKl%`ZRE0[-"TVKf%UՒ*jIl%`ZRE0[-"TVKf%UՒ*jIl`ZRE0[-i TVKf%UՒ*jIb`ZREax%`a-_4~~h;XA^b4"x,шu(F#^b4"x,шu(F#^b`QFM_1b4"4}hDhш+F#BWF}Fh;(F#o7шhDp 3zDX-i;X-i;X-i;X-i;X-i;X-i;X-i;X-i;X-i;X-i X-i X-"X-"X-"8-?i 8-?"8-?"8-?"8-?"8-?"8-?":-?":-?":-?":-?i :-?":-?":-?":-?":-?":-?":-?":-?":-?":-?i :-?"x{x%`Z6۱ax|vU~?'m[hDB*F#V1шuUFb`^[hD:UF[hD:UFUFUFUFUFU>#*F#*F#BWFM_1b4"4}hDhш&"4}%mVKnZ-i;ZvpkjIՒ[%mwVKZY-"ZREpgjIՒ*;%UwVKTY-"ZpgjIՒ*;%UwVKTY-"ZREpgjIKՒ*;%UwVKT[-"x{y~pZ6xӴ{ U.e2p,Cl d*` \ P[eY *2TV@ U.e n P[e(` 9)` 9)` 9)` 9)`n H[eU΁9P*C瀁? h7ed=@u2pYFn : dYG{,#h7edᢱ=\4uU2XV:Ec[G{Xu,chelaձ=:uU2bձXu,c(V;U2⢱Xu,c(V;U2bձXu,c(V;U2bձXu,c(.;U?ϘF2fG_aԽQ7e, d` ^&:ȉr"3x` ^ '*2@Nu/l D[g9::C@[g` ls 2,ls 3tu΁9::C@[gjsy:Q@NzGi9r ;Jȩw6S(m Q@NhzGi8bձLXu,S(V;U2bձLXu,S(V;U2bձLhzGXQ:weŪczGXQ:weŪcyGXfWZ4άXu,3+V:U2bձ̬XuahQRw+m g wv-P@U/3ldh9SV@U/3l L[e9SV@U/3l L[e9SV: r2tU\[eU\[eU\[eU\[e \[eE9``*C;Jȹw6s(m Q@ν{Gi9r ;Jȹws(-Q:weŪc{GXQ:w (V;UDzbձ,Xu, (V;UDzbձ,Xu, (V;JƅweŪcYxGXQ:we;JUDžw (UoϘFL;zu?*/$av@ȭa m *ȭr2x*` ^ w *ȝr2x)` ^ w *` m w *`2xv *`2tU΁9PV: *C@[e(`#Cȝ9PV: aTexGir ;Jȝw6;(m wQ@{GiҢqEvg;dYҀ_W@f D(H3ow7z9 ,05 ګcvTTQ:Ru7ݿQ{uFr<9`~aC.',dxq89`!  ^ OXyrBȓ2@yrBȓ2@9pBF ,dd9pBF ##ȓ22X98;`!spvB쀅 Y? spyyyyyyyyΎC9;*svTQ:Ru~a4FQ_.frlšqB ^ ## ^ ,dxlqB ^ [,dxl9h9h9h\ [,dxZ,dxZ,dxZ,dd9pBF ,dd9p>2rl22Q@Q@Q@Q@Q@Q@Q@Q@Q@^rhdGxɎJ%;*UKvTT/Q:^RuDGrhDGRuDGRuDGRuDGRuDGRuDGRuDGRuDGRuDGRuDGrhDGRuDGRuDGRuDGRuDGRuDGRuFGRu=c>~rhQW5|?>Qa4  @^ꀅ /W,dx:`!  ^ XyuBȫ2@^}dyuBF ,dd9pBF ,dd9pBFꀅ́24 j者 A5T`#T(_ av/0;j d5̎fGY |Q@V(_ av0;ʇjc5̎rձfGX |hQ:V(Wav0;Ujc5̎rձfGX \uFQ:V(Qv(;Ujc5ʎrձeGX\uFQ:V(WQv(;ʇjgӇ;H0}UM)Qu0 d52v~_/Y ad -Z 3@V#,fhFX`1C d52bȑ3@ rl 3<c,fxX '_ 3<c,fxX 90bF,fd̢32O@V'_ qt/8:j d5~GG?Y|O@V'qt$:Ujc5~rձDG?XM\u&Q:V(WIv$;UjCc5ɎrձdGXM\u&Q:V(WIv$;Ujc5ɎrձdGXM\u&Q:V(WIv$;UϘg#CT?y/aQ?O@N?0)ȩ28L\ X9uBȩ2@Nrꀅ /S,dx:`!  ^ X8`@N9pBF ,dd9pBF ,dd9p>2r:`!#sU2<\ gQ@βreG9ˎr ,;*YvT.\ gQ94βrheGXͲ\ufQ:V(WYv,;*Yv,;Ujc5ˎrձeGXͲ\ufQ:V(Wyv<9. hyr<9ex?'G h`2̓]yr~p{<}~[su0>ݿk0aɀ8`lqBF ,dd@qBF ,dd@\ X8`!#ś2< ,dxX,9X8`!spB`}dpB`စ  22X8`!#sF9Ȏr ";*EvT.\ Q@.r\dGȎrC";*EvTQ:.Ru\fG̎Jq2;*UevTTQ:.rh\fG̎Jq2;*UevTTQ:.Ru\fG̎ʡq2;*UevTgӇ;XKu0>n?0zF\ ?0\ WXrBȕ2@\ WXrBȕ2@r倅 /+,dx\9`!#ś22GF.+,dd9pBF ,dd9pBF ,dd?\__T+0AtE.\ @ݯ\ ˄Nt+ss&:"utE.94/Ru\GG_긎Hq}::"UutET94/Ru\GG_긎Hq}::"UuvTTQ:nRudGиɎJq&;*UMvTT7Q:nRu=c>rhܨQW|?޻Qa"ȍ/vqB2@nr々 /,dxq8`!  ^6XqBȍ  ,dd9pB`뀅  2<[,dxXl\ Xl9:`!suBF@? #s 6;*mvT.\ Q@nrfG͎r 6;*mvTTQ:nRufGиˎJq.;*U]vTTwQ:RueGˎJqC.;*U]vTTwQ:RueGˎJq.;*U]tUToϘW94èૺè=?W@~a;sB2@r /{,dx;`@r /{,dx;`!swB`́22X8`!#s}dwBF ,dd9pBF ,dd9P`#dGώr d d d d d d dCcCccccccccccCccccccccccCcccccaCcF_ա0F\ k_}y d퀅 / d퀅 /2@XY;`! d퀅 /2@XY;`! d퀅́ d퀅́22X8`!sP9(X,dx2< }dY9(X0 AɎdGYr,Q@\ KvT.%:&} dDGXorh,7:TKtM%:&U}cɡ}!:&UCtMT7:oRu2r<:`!  ^ X98:`!sptBF ,dd9p>2r<:`!#ś22X8`!#ś{AF ;*cvT.\ Q@ryv,28쀅 /gO].g,dx<;`!  ^ XyvBȳ22X8`!#ś  ,dd9pBqBqBqBqBqBq>2rl2<Mt].Mt].Mt].Mt].MvT.MvT.MvT.MvT.MvT.MvTMvTMvTMvTMvTMvTMvTMvTMvTMvTmvTmvTmvTmvTmvTmvTmvTmvTmvTmvTmvTmvTmvTmvTmvTmvTmvTgӇ;X:zu|շ`~}v]./̾uB ^ [,dxluB ^ /XyqBȋ2@^\ /X\98`!spqB   ,dd9pBF ́22X8`!#sF9Ȏ %;*KvT.\ /Q@^rfGy͎C5;*kvTTQ:^=94^=:^=:^=:^=:^=:^=:^=:^=:^=:^=94^=:^=:^=:^=:^=:^=:^=:^=:h0:Cc=aCc=TG?n|_a4~4֣(WQv(;Uzc=ʎrձeGX\uGQ:֣(WQv8;Uzc=ΎrձgGX\uQ:(Wqv8;UzCc=ΎrձgGX\uQ:(W?c=;X?O?1az_ 28 _ 2@r쀅 /c,dx;`!  ^ 'X9qBȉ2<O/ 2<,dx&XL98`!#ś22X8`@,ddܢ 0At/$:z d=~DG?9~r $:!ItC.94N\uQ:(Wiv4;UzC4;Uzc=͎rձfGXO\uQ:(Wiv4;UzC4;Uzc=͎rձfGXO\uQ:(Wiv,;UϘwga}U/)aԽa!șf?9sB2@r怅 /3,dx9`!  ^ gGF.3,dx9`!  ,dd9pBF ,dd9p>2r9`!#ś22X9E2<s0 <;*yvT.\ Q@γrgG9ώr <;*yvTQ:Q:(Wyv<;Uzc=ώrձgGXϳ\uQ:h_h":cX/}:֋h":cX/}:֋h":ڗC":cX/}:~<}|иPGzK~u\ ؟_rစ / ,dx\8`! rစ / ,dx\8`!  ^ XpBȥ2<K,dxX,\ X,9X:`!stB`逅  ,dd9pBF-\ a\ Q@.r\fG̎r 2;*evT.\ Q94.rh\eGʎJq*;*UUvTTWQ:rh\eGʎJq*;*UUvTTWQ:Ru\eGʎʡq*;*UUvTTWQ:Ru\eGx{<}=иVGzK~wH.k/vB2@r퀅 /k,dx\;`!  ^ XvBȵ  ,dd9pBF ,dd9pBF Of ȵ22X8`!#s F? sии급급급и급급급급ɎJq&;*UMvTQ:nRufG͎Jq6;*UmvTTQ:nrhfGx{<}=иUGzK~Qa4  @na뀅 /[,dx:`!  ^ GF.[,dx:`!  ^ XuBF 2<;,dxvX\ wX99`!ssB`瀅  ,dd9P`#dGˎr .;*]vT.\ wQ@reGˎʡqC.;*U]vTTQ:RugGώJq>;*U}vTTQ:RugGώJq>;*U}vTTO94RuGG?긏~Jq>:)U3;S{u0>o?0Fѧ\ ؟_ d倅 / d倅 /2@VXY9`! d倅 /2@VXY9`! d倅́ d倅́22X8`!#ś22X8`!#ś d倅́22 a\ \ \ \ \ \ \ \ \ TTTTTTTTTTTTTTKvT%;*UǒcɎJձdGXRu,Q:TKvTgӇ;XE?c/aF\ f?Yš8`! dqB  / ^ 2@,dx,GF. ^ 2@,dd9pBF 2<,dxX\ X988`!sppB  0AvT.\ ׿yHvh9Hvh9Hvh9Hvh]F2r<)ddriq(#,dhq(##2rB0FX 9`!C d9`!C d9`!P9pBF9pBF ,dd9pB`쀅  2<c#2vB`U2<(_ 8;2Ύ|,(_ 8;2Ύ|,(_ 8;ʇ2Ύ򡱌\u,(W8;U2Ύrձ\u,(8;U2ΎrձL\u,(W$;U2ɎrձL\u,(W$;U2ɎL\u,(W$;U2ɎrձL\u,(W$;U2ɎrձL\uy0z|Lè~a=w>Y&?02qB2@Nr  /,dx8`!  ^ ؟̛\ X9uBȩ2<S,dxXL9:`!s0uBF ##ȩ22X8`!#ś22nQ́{AF :&itM.\ 7@NorFG9r} ,;*YvTgQ:YvT(W,;U2ˎrձ̲\u,(W,;U2ˎrձ̲\u,(W,;*YveceGXfQ:YveceGXfQ:YveC<;U2ώrձ̳\uy0z|ˡqF_eK~u7@~aM.s,dxq;`!  ^ GF.s,dx;`!  ^ X9wBȹ2@9pBF ##ȹ22X8`!#ś22X,9X8`!spB`GF. 0 ";*EvT.\ Q@.r\dGȎr ";*EvTQ:EvTTQ:.Ru\dGȎJq}C":.UEt]Tw:.Ru\FGߥ긌Kq}2:.Uet]Tw:.Ru\FGߥ긌Kq}2:.U3;]Ku0>*`~w].K_}}  ^XtBȥ2@.r逅 /K,dx\:`!  ^ WGF.+,dxVX9X9`!srB`倅  22X8`@9pBF ,dd22Q@r\eGʎr *;*UvT.\ WQ@rh\gGиΎJq:;*UuvTTQ:Ru\gGΎJq:;*UuvTTQ94Ru\gGΎJq:;*UuvTTQ:Ru\gGиɎJy0z|ˡqF_eK~Qa$ȍ/̾r々 /,dx8`!  ^ 7Xq>2r8`!  ^ 7XqBȍ22X8`!#ś22؟LO.,dd9pBF ,dd9:`!suB`F9FG{rFG{rFG{rFG{rFG{rFG{rFG{rFG{rFG{rFG{rhFG{rhFG{RuFG{RuFG{RuFG{RuFG{RufG͎Jq6;*UmvTTQ:RueGˎJq.;*U]vTTwQ94RueGˎJq.;*U3,Ɲ:zu|0FQO.;_}r瀅 /;##ȝ2@r瀅 /;,dx9`!  ^ wXsBȽ2<{##Ƚ2<{,dxX9;`!swB`  ,dd\ X8`!#sF9Ȏr >;*}vT.\ Q@rgGώrC>;**;*U*;*U*;*U*;*U*;*U*;**;*U*;*U*;*U*;*U*;*U*;*U*;*U*;*U*;*U*:!*:!U*:!U*:!U*:!U*:!U*:!U*:!U*:!U*:!U3;C:zu|C/aFч\ k/~vB ^ k,dxvB ^ k,dx\ k,dxvBF ,dd9pBF ,dd\ k,dd9pBF ,dd9( a dɎdGYr,Q@\ KvT.%;*Ȓ dɎʡdGXRu,Q94TKvT%;*UǒcɎJձdGXRu,Q:TKvT%;*Uǒ!;*UCvTTQ:Ru_^+ dexl /s`},f>_^ + dexl?awJx+@,y?BRߡ+_4gBO3S?G9?22?GFg`~ g`1#9 9 9 9 0##s -;*[vT.\ oQ@޲reGyˎ -;*[vTToQ:޲RueGfGfGfGfGfGfGfGfGfGfGfGfGfGfGfGfGfGfGfGfGfGx0`94a೺èGI0 dف\ [,dxqh _ 2@V#,dhFXY9`!C d5rBj䀅 - Z 249`!#ś22X8`!#ś22X8`!#s}fY9pBF@}< #s d5ʎeGY|Q@V(_ qttjGG|!qttȇjrձGG\u!WqttUjrձGG|h!WqttUjrձGG\u!WqttUjrձGG\u!WqttȇjrձGG\u&!WIttUjrձdGXM\u&Q:V(W=c~|}XM_>Y0z?_!_ f|&X0qBȉ2@Nr  /,dx8`! 8`!  ^ 'X8`!#ś22X8`!s0uB`}fYM9:`!s0uB`ꀅ  [T!s0U`#L|Q@V(_ iv/4;j d5͎fGYM|Q>4V(iv4;*iv4;Ujc5͎rձeGXͲ\ufQ:V(WYv,;*Yv,;Ujc5ˎrձeGXͲ\ufQ:V(WYv,;*Yv,;Ujg̏gga೺è=w>9s~P.3,dxq9`!  ^ gߙ_r;`!  ^ X9wBȹ2@r  2<s3#ȹ22X8`!#ś22X8`!#ś22nQ}f9W`#$Gr22.#s22Q@.r\dGȎr ";*EvT(WEv";UjcȎrձZdGX-\uQ94.\uQ:V(WEv";UjcȎrձZdGX-\uQ94.\uQ:V(Wev2;Ujc̎r3LJ3XKu0~Yk0IχQ/@.a\ XtBȥ28,a逅 /K,dx\:`!  ^ Xt>3r\:`!#ś22X8`!#ś22X9X9`@9X9`!srB`U2<\ WQ@r\eGʎr *;*UvT.\ WQ94rh\eGʎJq*:ړC*:ړ*:ړ*:ړ*:ړ::ړ::ړ::ړ::ړ::ړ::ړC::ړ::ړ::ړ::ړ::ړ::ړ::ړ::ړ::ړ::ړC::ړ3'Ƶ:xu?_0zF/QO.kovB2@r퀅 /k,dx\;`@nr々 /,dx8`!  2<,dx6Xl98`@n9pBF ,dd9pBF ,dd22Q@nrdGɎr &;*MvT.\ Q@nrhfGи͎Jq6;*UmvTTQ:nRufG͎JqC6;*UmvTTQ:nRufG͎Jq6;*UmvTTQ:RueGˎJa CN}<|V/a=_#@aߓ  ^vgF.;,dx9`!  ^ wXsBȝ2@r瀅 /;,ddߙ?:"}tE.\ @/rhGG_иHq>;*U}vTTQ94RugGώJq!;*UCvTTQ:RuF9Ȏr ,;*YvT.\ gQ@βreG9ˎrC,;*YveceGXfQ:yvecgGXQ94γ\u,(W<;U2ώrձ̳\u,(W<;U2ώrձ̳\u,Q:yvecgGXQ:yveg̏gaܿa=w>9w~\ X0wBȅ2@.rစ / ,dx\8`!  ^ Xp>3r\8`!spBF ,dd9pBF ,dd9p>3r\8`!#ś22nQ́xAF ;*EvT.\ Q@.r\fG̎r 2;*evTQ:evecYfGи̎rձ,\u,(W2;UDz̎rձ,Ru\fG̎Jq2;*evTTQ:.Ru\eGʎJq*;*UUvTWQ:ޟ1?>rh\QϲRFoè0  r\9`!Ë ^ WXrBȕ2@ό\ WXrBȕ2@r倅́22X8`!#ś  2<k,dxX9X;`!svB`퀅 Z}< sΎr :;*uvT.\ Q@r\gGΎrC:::C::::::::::::::::::&::&::&::&::C&::&::&::&::&::&::&::&::&::&::C&::&::&::&::&::3x ƍ:xu?,`zwh ȍfr8`!Ër々 /,dx:`!  ^ XuBȭ2@nr뀅 r뀅  ,dd9pBF ,dd9pBFr뀅́22 a\ Q@nrfG͎r .;*]vT.\ wQ94rheGˎJq.;*U]vTwQ:RueGˎJq.;*U]vTTwQ:rheGˎJq.;*U}vTTQ:RugGx0z~ˡq>F>˱Kz}tH.{7wB2@r /{,dx;`!  ^ P.{,dx;`!  ,dd9pBF ,dd9p>3r<8`!sppB  2<,dx aCtt(Ctt(Ctt(Ctt(Ctt(Ctt(Ctt(Ctt(CvT.Q94Ru3r<9`!sprBF ,dd9pBF ,dd9p>3r<)ddrH rBrBrBrBF ,dd@qBF 3# ,dd9pBF ,dd9pBF 3#  AqB8`!sP9( a dɎdGYr,Q@\ KvT.%;*Ȓ dɎʡdGXRu,Q:TKvT%;*UǒcɎJձdGXRu,Q:Q:Q:Q:Q94Q:Q:Q:Q:Q:Q:Q:Q:Q:Q94Q:ޟ1?>rhègaW0~>Y`^ dm /3@XY`1 dm /2r 6b^ ,fxl 9h 9h 9h 9h 9h LO.3<32X`1#s`Ṓ32X`1#szaF :ړ dDG{rl=@6ў\ hO.Mt'&:ړ %:ړC%:ړC%:ړ%:ړ%:ړ%:ړ%:ړ%;*UKvTT/Q:^rhdGxɎJ%;*UKvTT/Q:^RudGxɎʡ%;*UKvTTQ:ޟ1?=3XWu0~Y/a$xyuמ\ Xpu>3r:`!  ^ XyuBȫ2@^ꀅ /W,dx:`!#s}fyuBF ,dd9pBF ,dd9pBF怅  xA -;*[vT.\ oQ@޲reGyˎ -;*[vTToQ:޲RueGxˎJC-;*U[vTToQ:޲RueGfGfGfGFG_FG_FG_FG_FG_FG_FG_FG_FG_FG_x0 ~Cc>F>q0zF/ы\ [o}} d뀅 /2@X:`! d뀅 /2@X:`@X:`!C d=rB9`!CsP9GX#,dh24 z}fY9GX8`!#ś22X(dd|GQ@֣(_ Qv/(;z d=ʎeGY|GQ>4֣(qv8;ʇzc=ΎrձgGX\uQ:(Wqv8;Uzc=ΎgGX\uQ:(Wqv8;Uzc=ΎrձgGX|hQ:(Wqv1?>|h'诇KzÈ/f__YOa  /,dx8`@,dx8`!  ^ 'X9qBȉ2@N9pBF 3z ́22X8`!#ś22X8`!#ś[T3}@S0 4: d=YO}@h/4: d=YO}@h/4: d=XO}>4h4:c=XO}:h4:c=XO}>4h4:c=XO}:h4;Uzc=ˎrձeGXϲrheGXϲ\ugQ:ֳ(WYv,;Uzg̏gga^t)_aԽaԗ f_r9`!Ë ^ gX9sBș2@r怅 /3,dx9`! r怅́2<s,dxX9;`!s0wB`    22XE22 a\ Q@γrgG9ώr <;*yvT.\ Q94γrhgGXϳ\uQ:֋Q:֋(WEv";UzcȎrձ^dGX/\uQ:֋Q:֋(WEv";UzcȎrձ^dGX/\uQ:֋hX/3 u0Zܺ+ؿFoè{'U. o}}  ^XtBȥ2@.r}ftBd逅  ^ XtBF ,dd9pBFr逅́22X8`!#ś22X8`!#s>F9Ȏr *;*UvT.\ WQ@r\eGʎrC*;*UvTTWQ:Ru\eGʎJq*;*UUvTWQ:Ru\eGʎJq*;*UuvTTQ:rh\gGΎJq:;*UuvTg̏gav)_a|r r\;`!Ër퀅 /k,dx\;`!  ^ XvBȵ2@r퀅́  ,dx6Xl98`!sqB`々  2<3#ȍ2<,dd22Q@nrdGɎr &;*MtM.\ 77@norhDGиIq}&:&UMtMT794noRuFGߤ긍Iq}6:&UmtMT7:noRuFGиIq}6:&UmtMTQ:nRufG͎Ja&ƭ:xu?_0F/ћ\ 0&ȭ28lr瀅 /;,dx9`!  ^ wXs>3r9`!  ^ wX9pBF ,dd9pBFr瀅́22X8`!#ś22 a\ wQ@rgGώr >;*}vT.\ Q94rhgGώʡq>;*U}vTTQ:RugGώJq>;*}vTTQ:RugGώJ!;*UCvTQ:RuF>S0tH.7̾ /,dx<8`! y  ^ XypBȃ2@ /,dd9pBF́2<G,dxX98:`!sptB者   Q}< sp} 1:.ct].\ Q@r8`aC.284gF.2@6X8`! d々 /,dx8`!  ^ /XyqB yqB   22X8`!#ś22X8`@^9pBF@}< #sȡȡH5;*UkvTTQ94^RufGx͎J5;*UkvTTQ:^RufGx͎J5;*UkvTTQ:޲RueGxˎJa CM}<|6/a=_\ oo怅 /7,dx9`!  ^ oXysBț2@ό\ oXysBț22X8`!#ś22X8`!#s}fysBF     V}< sfGfGfGfGfGfGfGfGfGfGfGfGfGfGfGfGfGfGfGfGfG،\ulF!QttUfrձEG\ulF!WQttUfrձEG\ulF!WQttUfEG\ulF!WQttU_ϘFߟC>46#u_x ٌ0; d3rBf䀅 - Z  d3rBf䀅 - Z 2@6c,dhlXٌ9;`!s0vB`}fٌ9;`!s0vB`쀅́22X8`!#ś22nQ}fٌ22Q@6(_ qv/8;[ocוMP ٛ|qcbh+1)JB IFSVBȺ dzGYwxQ@֕wuCc]yGXWQ:֕wuŪc]yGXWQ:֕wuCc]yGXWQ:֕wuŪc]yGXWQ:֕wuŪc]yGXWQ<4֕wuŪc]yGXWQ:֕wuŪcbƺǎFIyyè/uO-m@=lS .=lS .=lS .=lS .=lS^2x{ ؐ9P ́6dh!Cs l(`C@24 ؐ9Pf7[=l(`C砯 ZTCF!swpђ9;GK-s|UA9Z28}h8GK9Z24ђ9p ́sdh%Cs-h'h8GK9Z24ђ9p ́wu;UǺŪc=bձxG8bձxGXXuQ:(Vw;UzŪc=bձxG8bL;z~ף'at]FtyC( o-b b (`C?0 ؐ6dcd9P ~` !l(`C` 6dp ؐ9*`C`  P28Cl !s0T ́6dh!Cs  {~ ́w.C(] Q@tzG9r ;Jȡw.C(Q:4XuGQ:#(Vw;UzŪc=bձyGXXuGQ:4XuGQ:#(Vw;UzŪc=bձyG8RqdmСqdmPqdmPqdmPqdmPqdmPyaܠC=?i0) èAȱfN0V .c%Cȱ6dp+`Cȱ6dp+`Cȱ6dp+`Cȱ6dp+`C@24 K.cl(`C@24 ؐ9P ́6dh!Cs l(`/@!Cs  {~ ;Jȉw.(] 'Q@NtxG9r ;Jȉw('Q:NTuxG8Rq;JƉw(U'Q:NTuxG8Rq;JUǩwS(Q:NTuzG8Rq;JUǩwS(UQ:1>.thQ}=/R^FaT~A|r}] ؐa \ ؐr \ ؐr \ ؐrd9U .Sl9U ́6dpf ؐ9)`C` 6dpf ؐ9)`/@!s0S L283l(`C@24 a24Q@μtYGtYGtYGtYGtYGtYGtYGtYGthYGthYGTuYGth[GTu[GTu[GTu[GTu[GTu[GTu[GTu[GTu[GTu[Gth[GTu[GTu[GTu[GTu[GTu{G8Rq;JUǹws(UQ:νTu<=c}}78gG/0* иaGQ0* <?q}Eȍ6fpq`cȭ6fp `cȭ6fp `C.[l .[l .[ll1Cs l`c@2t `c@34ؘ9 ́6fh1Cs l09r ;Jȝw.;(] wQ@tyGrC:ڢC:ڢ:ڢ:ڢ:ڢ:ڢ:ڢ:ڢ:ڢ:ڢ:ڢC:ڢ:ڢ:ڢ:ڢ:ڢ:ڢ:ڢ:ڢ:ڢ:ڢC:ڢ:ڢ:ڢ:ڢ:ڢܢC=?T<_N ] onr \ K.{lW .{lW .{lW .{lW .{l(`/@!Cs l(`CV A Z286dpjl K.6dpjl a28w.w.w.w.w.w.w.w.w.wwwwwwww;JUcR1yG꘼TuLQ:&(Uw;JUCcR1yG꘼TuLQ:&(Uw;JUcRyat CcbG/fy_FtL on2)`C \ 6dpL ؐ2+`CȬ \ 6dp طL?O_?>Y2@fl!s!s!s!Cs l(`C@{2+`C@24 ؐ9P ́6dh!CsF!Cs` d^2[Gtk@a:XGy^ ;Jƃw(UQ:4Tuu˟Uy_Ft<*`_aDpq8*`C6dp<*`Cȣ Q2@! Q28! Q2@!Cs l(`C@{ l(`C@20 Q A(`C u!s: ؐ9H%a`9H(^ S;2uxL(^ S;2uxL(^ S;1uXuL(VS;Ub1uXuL(VS;UԱ1u7XuL] VS:zUԵ`1u7XuL] VS:zUԵ`1u7XuL] S:zUԵ`1u7XuL] VS:zUԵ`1u7Xu|~Ƽ0z Cc겣+_FO atSFFt*`߾a.Cl0T^2t*`Cȡ6dp*`Cȡ6dp*`Cȡ6dp)`Cȑ6dp)`C`d9R H28#l!s0R H24 ؐ9P ́6dh ] G ؐ9Pj0QxG9r ;Jȑw.#(] GQ@tyG9ҡqC;JUDZwc(UQ:Tu{G8Rq;JUDZwc(UQ:Tu{G8RqlC:zGUDZu􎪎cU;:wTu[G8Qql:zGU3݇g'aT~pfO0[ \6dp<(`Cȃ-Ӥ A2@! A2@! A2@!spP A24 ؐ9P^2t<(`C@24 ؐ9P ́6dh!Cs l(`C@{F!Cs`m`m`mhm ;Jȣw.G(] Q@thWOèQ/}ٛ&^ sG286d`l; ؐ2w! d(`CQ , X sG2@ d(`C@24 ؐ9P ́6dh!sU AW28]lt ^ sW28]lt!sUjt(dpQ@w/ dzGQ@w/ dzGQ@wCczG꘻Q:wkCcZG꘻+::zU\YG+VseªcWXu̕u ^a1W+<4:zU\YG+VseªcWXu̕u ^a1W+::zU\YGИ+V1>>И+v0*?ϧzy]Qy'^ s}ٛ+@J28T ؐR .6dp)`CȞ d)`CȞ6dp)`CȞ6dp)`C砧 6dpz ؐ9P ́ d)`C@24 ؐ9P ́6dh!Cs  {~ ́w.=(] {Q@tyG d;JȾw.}(Q:4Xu}(Vs;Ub1Xu}(Vs;Ub1Xu}(Q:wŪc{GQ:wŪc{GQ:w(V;U|P=xzUVxWun?͛v{ltoT?~׿~>__}Oǟӯ_~zÿ>.]~+%__<?,{X"l<<\UoJЬ1f3*h4UH(ʊ*!^"PZ"!>,iAR *:PH9Ԃ"K[ kX #~c][ڭof$̒V^%Wއo-0?YR ZM%1PR>Z׀}nU]bXޓ b hUy-jC:֊Ã53_8bY|Ȝ28kMã)΍晗BpP ,yxhs s}h-wPK!xl/calcChain.xmllKeuy y,;eַVu:k%ZF-B#CA=5pց͆w=~??|_p8믟ӏ?>]×o?~ow?ϟӯ|/??ӏo >}ۏ? ?7wqط㷇ӷ󷇋˷뷇۷C ѪW+E¢aXd,:!EʢeY,zAEҢiӴi4m6MMӦiӴi4m6MMӦiӴi4m6MMӦiӴiihihihihihihCӡth:4MCӡth:4MCӡth:4MCӡti4].MKӥti4].MKӥti4].MKӥtizD#G4=MhzD#G4=MhzD#G4=MhzD#4=1MizLc4=1MizLc4=1MizLc'4= MOhzB'4= MOhzB'4= MOhzB'4=)MOizJS4=)MOizJS4=)MOizJS4=)MOizF3g4=MhzF3g4=MhzF3g4=MhzF34=9MizNs4=9MizNs4=9MizNs4M/hzA ^4M/hzA ^4M/hzA ^4%M/izIK^4%M/izIK^4%M/izIK^4%M/izE+^W4MhzE+^W4MhzE+^W4MhzE+^45MizMk^45MizMk^45MizMk^74 MohzC74 MohzC74 MohzC74-MoizK[4-MoizK[4-MoizK[4-MoizG;w4MhzG;w4MhzG;w4MhzG;4=MizO{4=MizO{4=MizO{hz遦hz遦hz遦hz遦hz遦hz遦h@>4}Mh@>4}Mh@>4}Mh@>4}#MiHG>4}#MiHG>4}#MiHG>O4}MhD'>O4}MhD'>O4}MhD'>O4}3MiLg>4}3MiLg>4}3MiLg>4}3MiB/4} M_hB/4} M_hB/4} M_hB4}+M_iJW4}+M_iJW4}+M_iJWZEzg5;sw Y3g(U;wv Y3g8u;w ]r_˕~9/wP\Sr_˵~9/`\엓r_~9/wp\r_~9/倿\r_9/w吿\Sr_59/堿\r_U9/w尿\r_u9/\r_˕9/wп\Sr_˵9/\r_9/w\r_9/%(@IJP"R t%(%@IJ PbR%(E@IJPR,t% (e@IJPR<%(@IJ#P"R Lt%(@IJ+PbR \%(@IJ3PR lt%(@IJ;PR|% (AIJCP"Rt%$(%AI JKPbR%((EAI JSPRt%,(eAI J[PR%0(AI JcP"R̠t%4(AI JkPbRܠ%8(AIJsPRt%<(AIJ{PR%@(BIJP"R! t%D(%BIJPbR#%H(EBIJPR%,t %L(eBIJPR'< %P(BIJP"R)Lt %T(BIJPbR+\ %X(BIJPR-lt %\(BIJPR/| %`(CIJP"R1t %d(%CIJPbR3 %h(ECIJPR5t %l(eCIJPR7 %p(CIJP"R9̡t%t(CIJPbR;ܡ%x(CIJPR=t%|(CIJPR?%(DI JQ"RA t%(%DI!J QbRC%(EDI"JQRE,t%(eDI#JQRG<%(DI$J#Q"RILt%(DI%J+QbRK\%(DI&J3QRMlt%(DI'J;QRO|%(EI(JCQ"RQt%(%EI)JKQbRS%(EEI*JSQRUt%(eEI+J[QRW%(EI,JcQ"RY̢t%(EI-JkQbR[ܢ%(EI.JsQR]t%(EI/J{QR_%(FI0JQ"Ra t%(%FI1JQbRc%(EFI2JQRe,t%(eFI3JQRg<%(FI4JQ"RiLt%(FI5JQbRk\%(FI6JQRmlt%(FI7JQRo|%(GI8JQ"Rqt%(%GI9JQbRs%(EGI:JQRut%(eGI;JQRw%(GIJQR}t%(GI?JQR%)HI@JR"R t %)%HIAJ RbR %)EHIBJRR,t!% )eHICJRRg5)~PK!3gDmdocProps/core.xml (_K0C{1Bہ|q 8!nV+9ˣl/֪@iBPkQ@U@L hjey}sClkpQ )G){C1v|$8TwJV0*!s,3<06#Hi 8$(pJ@L Nqlqt]=Cr1B^q]q@e.8׶|PiF[fOn s~6AܵKG`U@D!aBeFқ,l!)M)w~wa ybF π2PK-!0i[Content_Types].xmlPK-!}T  _rels/.relsPK-!xl/_rels/workbook.xml.relsPK-!4M xl/workbook.xmlPK- !dKdK docProps/thumbnail.jpegPK-!0kVxl/theme/theme1.xmlPK-!3 ]xl/worksheets/sheet1.xmlPK-!Dz /xl/styles.xmlPK-!xl/calcChain.xmlPK-!~7docProps/app.xmlPK-!3gDmߓdocProps/core.xmlPK Zgdata/inst/xls/ExampleExcelFile.xlsx0000644000175100001440000012433413003720417017233 0ustar hornikusersPK![2Ј[Content_Types].xml (̕N0M|c\^*=*]۴=+B #%rfk&J%+p^A' hnEFO;`h-xm'3g4vp:.6E!pA>8BhQ~A B}W 4M/PK!}T  _rels/.rels (MN0H} PnRwLibv!=ECU=͛f0={E tJFkZ$H6zLQl,(M?peKc<\ٻ`0chGaC|Uw<ԀjɶJ@ت` %TKhC& Ig/P|^{-Ƀ!x4$<z?GO)8.t;9,WjfgQ#)Sx|'KY}PK!8$xl/_rels/workbook.xml.rels (j0 }qvuzu`%M`{3ٖP !vյmb%)TBoXU[v[%+,\@;nݣJ;\y4O 5K (N4ɾd` a}uU5xP3-6G+F`JY:VV'zf}au6&/TƐgbdnhC`bMT[(yqLDbڍbJKaГߩPK!85cxl/workbook.xmlSn0?;p R(I3Ma>d.Uz"-z)3kU]STW\ y7cdQZ_wNA3e \;,ҚIbtdHiNm #sRDI"I>9eM[ɔ@ }[ry䂽 i/D^`$uۊ;V8_6[. d GoZw{u^)]W`i^k Ҭ}Եp yᬳ`\U ѽ W^'Ods:r gr*75D*ov{<,8\̾zے'FF5ɨ&鸫)p0HG D 1lTHDPX?,c hk g _֕K8QkxI7$wdot&YKl$YZW7ޕmQ-`wr)zA;ANavye]'PK!sUxl/worksheets/sheet4.xmlVێ8}i;4iaGٝy6CNmhS9lX}9:S!/CM]h~TLHK+2T!PgJUKǑqF "%xR. `+%)+ͰӔtSAKee*Yg++x=U8IQ/K.!oF:ٌ,\TM!c5/֫]v$hGo;oFg0"GL0pU~IBBNIsR _g<y ,N~ً@"i,Q  M)Wϔ3VmʱL[*c8Mֱ%W!8]hTDoX  ڿ[9g@ ҿ܀En#g> ~ 8"G-yc?hIq׆*pK4Dԅ߈8⡟i7!=y" MV{z]Gx9 iH-lp]=d#ZbmHO-r@;ҐZR{Yu}ܮw}H;dڟaLUSM]K* Z~#^#+%i jiڍ\)^eCBsrͯߨ:U 1\(A(_ۊx/fy/%Pc,F'3;o8߸{; ?|+.nzvWe} J[I_tz,߀YKG4d/_PK! Wxl/worksheets/sheet2.xmlV]o0}`I MRhZZfi}8>/ {9uw,EegC@$YDg|HHG8e93W"03)i0!CV21w(8QYȲ3474Ì1 I}FrI8I"ز=tb-M<]8ަdOpXq/= !ЙZhԜG#NbXڳcyٟgJ=>4ڀ-c 9 8IIZ0|Ȋ&,3V*jMim +H&p:pD$T~eO Q /2!lS9 ċ9gGkbe D gn@XxAq#&mĦ,>FoXBv`[mr_(ʞέtn(>ͱ#ҲKoXUIeoTj:u.74]G7櫨GV Ԉu7сZ._TU3ש N:"eM;~wdtIf7s]7s0H6D}z. j0bK5Gg"X~Ϋ$>YC/(2)YV>&ps1czp>/&gyg\rLe8 t2uT4PO-9{!oP˙Xۃ[1r\k#wWԱwe84)$e~/<;Њ/9y֩K<TB-hTXKPK!T-Uxl/worksheets/sheet3.xmlXY6~/*/%(u lfsZmjR-@d-ÙoWo_6kIeo]Q4_dۇkiONY%Eη}Uv/Wy\)U9a[^}LWj^S[hY&kB% i!$5ȗ,U<}ܨmejT\eI&)~2i=,1FY_bG1J*i唆0&vLi'u7o>c =6Ό;M#Lt絵>4hn>ݲNth!ڶldd ;xhp6FYjVIc ,3=@HǜI⨋b(Zn?~B-!0cHP*àx^[k!3]mtV`1nNvtI~bVft vБi7:2`38x N \F<@D1V=pCxSXrI" kBMa;=̤I?3[U ?́R "Kym} fAAhOj rk}d[ D18T$P`̢`{&_͠#HNSɢ Df^1(`)#ADT[_ǸWPsl!9U FA$:-Em@>2VcqĦQb9y 8LIl-N^4BR'G)`m>R$Ҩz,Ă:dym}Q>:j.F-hWC}d5@v+1]PC2ؑ 2D[eh%5@Z6hb88뼶Y6a|XkѶa꛽WM<̐c<VP48(ԙY2k5:+/ b]O]txqPl {.4FAC>ZָH~H$,T'mŅD3#v5nNhR8 7'*xMc^V#9nw;Uo+<|o "\}R,vUZ*$uSz8H*Rg5 J|YP82ąRM`Y2-hE7'9Qp+6l%UڶgUոs{L7`kd4'R}S)DgP] S¢hCDh)+ { f@JGC 6 m#{i#pDr9=`#{AGCϬӳD wkAfrYsI2fzፉ#0] RJ]v)\bfr\<,E?&]~m4?(imt{T _BGd\3# h1T$#Ufg>+Z<HyDC{YUh#=$#yPWutn񈢧IaY~.8>=Q]="gӻQwm;ڻb)9n(ph} o.HĆ4ԇ*(ޘ͕╹,S8) 9W+Uq %Sdo8P_>[SRmgdFoO|ub?m:4j._Ԣ&ZVzZ>At@_\%hQXmPK ! IssdocProps/thumbnail.jpegJFIFHHICC_PROFILEappl mntrRGB XYZ   acspAPPLappl-appl descodscmxlcprt8wtptrXYZ0gXYZDbXYZXrTRClchad|,bTRClgTRCldescGeneric RGB ProfileGeneric RGB Profilemluc skSK(xhrHR(caES$ptBR&ukUA*frFU(Vaeobecn RGB profilGeneri ki RGB profilPerfil RGB genricPerfil RGB Genrico030;L=89 ?@>D09; RGBProfil gnrique RVBu( RGB r_icϏProfilo RGB genericoGenerisk RGB-profil| RGB \ |Obecn RGB profil RGB Allgemeines RGB-Profilltalnos RGB profilfn RGB cϏeNN, RGB 000000Profil RGB generic  RGBPerfil RGB genricoAlgemeen RGB-profielB#D%L RGB 1H'DGenel RGB ProfiliYleinen RGB-profiiliUniwersalny profil RGB1I89 ?@>D8;L RGBEDA *91JA RGB 'D9'EGeneric RGB ProfileGenerel RGB-beskrivelsetextCopyright 2007 Apple Inc., all rights reserved.XYZ RXYZ tM=XYZ Zus4XYZ (6curvsf32 B&ltExifMM*>F(iNHHCC }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?|<:n{O>iWi|$w,sU۷{ΓO5^gtvMJ粢)8YrJϪk tk/9WR$P<-oCk%v#R2Xy/zžSDOHŸ'^ Lz~*x{Ÿ94ޟ k#úkjRgi Vi=fm6ER^Q>'W7g闺? j$i+j66Xе(_j1}\ k7nz}žSDO@)?N0aw Gj}}`wf:Yb5(eug/2y7z)?NžSDO@(~ܚncQ;ῃzyGڌZ|~&˛״ &m.,-[WQ

/¿<-e+ރm?-/KH~#? tK=(u:og# {O?=y7o_H#K>:şv&Fiw6icnd[ev@0"hO|)u'GŸ'^ Lz~ _~)o)w&xW_7 ~xw}O ?fV׎u{o x_i~ִ[JV:J?V9Qx7F֯m.W?K'|߳7rKklžSDO@)?N>:־|1x_R5/jP/t[ ˝*[偤yfI;$žSDO@x/KO7D-)5N՗L>Oֱ 8n'y}#gƯzoƕ~%Ү}=YK>;]2!k+SFsK6}}p.>kpѨhb?x |]h>#|Qwwu[+rͭIK/=aQ@n`x{?ny]'_þ#JK%_CIV z@4xHҏ܉Y6G"3[+;8R@?;ItM!˫pº<7 2x[tNNѓUnat[FWι2 -`|IE㿃w1hK<eĭ0_,GLXg8gm)`x{?ny]'ro3M+mxдy#utx jkߌp>em3m;S|egš gu]7扬ޤw[Qle/ڡ|+g]eZW,u|EÛ?xz[]OQZ?q/<7adr€F5>5sό'|1Cmmt:__'?6wZ"GxPu-QhLe֠,I/P5xz[$0kȚ{x]Pꖀ;y]'o?Ky=kQPcZMMkyP|'}NC@4'jI!A4g;KTu$0+AC:.X|_~h/>)cÒ=O GU zjGŋlWX_>4GcË/H?ſ+>*x' CRY&% {ߌ? ~JW= (icB#n~iƝ3ş㏅ n;𯈾x-KN#qIZiBS 3k-I_ğ¿7l? CxoĞ4]|Xе{Xi ƿi=_@Žӿ^tV<;[Y_ þ)_tu]|^#MJ+߆"m)|l|A`gM.چU.mN +^t}2{MToZ+{Y&/6 ~S߁gJ/%S\ѴoT.-4sFG :VYJYix{^oS:9u~3 ӧ PTONҌ]$UuR.GWkŘnwW>`2cdC0ʴ."8ѫBtJRB*Fᚾ W|A?^.5|1Zk+^ >Oivyω=6kǶ-/SIb ;'.|1H_ hLR/-K-3s7-4pMHYVG$Oj^ )Yi/>+ex,oÿx9jz-vEѵr9cYծn-^H>Ԯ5ɩxQ|G|7().]}YZ+ZF oJS"c|zϿa ; UUw>ʪ7x}+ WTnW*2Zjh2P𶹠]Y?o4tqm&Ke3CK6H?fSEΟ|)~¾]Y>k7:4}Roh_mOlF"y$SW* 5_UP_j˭_>"|GV/|%.whE @|.]éiohVx Ꮙ?K/> 4%khmU:^aZ5 x.çjY_EҴZ~j>7|g{P~kj=X__|A_Z[]CM$S鰐oz֯/+w?5 UuNZ5֑XO]ŤVD"W* 5_UP_j @.ﭴf;.fyn %ډ\YEk2\I4T~h?F|csekᖗQx_~%xŶi>c_#HO5M2TJ5mjwZ"[ʠ?O .֑>#XY3n5I`Mcf>*(fwqOjqԥOcC|_5kV4 W_kMv?V)-`+/|2&uo?| h|'# r|o_߆F|O^xsQԼ3ZZMǢji)-etdY/:b,Qb*Sbg,=$|j)sw78<2v3/b*a<=7SڵɪnWӿ~CÑxGgxPσsj}4X,A}.jXB^ I4d G0Vv Ov Ş5%:}N/h Vˡ! n6Hn4CO//ËCNźgW'K_!_&M6W;4}F jo6#V@^,>/(au^,>/(au^,>/(au~C|,s-;~#jsHy'[Ix?>4G[-{7_֦}a}O_iPSIk2x I|yJ?Ə-yt_|Ho!OFkxwz޵o'(*[.i%d/~Z?kKj6xsS?| /(au^,>/(au^,>/(auqO\xyrxMw7EOKmVL{f´[5G͟xM ˧K(# jsď i>|m *[HweQ񞯬PmrN O /~ xl5Kh?|YTծ~xI״hl<19Ӽ7?O; oZԢ}vJpM_")WסO.V-ǃnف>?ߵ473]+֛}Rkok _x*Rm|a#;ZGj]/t c ^՛vxr:3\sie5xƚpxz\G~x7wۼ!g8 |7-nXj_yw: ;{^mFSM3Xe(xvc̞=?~OGuÖ/?|1?CŶqaamY|j}%e.Oj~"nmʥ?eH:7oZ΄߈_cxIax|BfwLO xq`h  ~|2_,o} Z­;Ptwc7c}Wm쌺{.Seܧ G4> oUCVH 9.u{_ItwB4 3TZ5Xg60u? GSO"_x x>|XoQ>,z.^#1{H<% ?0[H}I{JѬ8΁w7ωέ7ӬgkS˧κ'{'ƖZǂ4K{o2n/ iWC_k:u.MxZm^%-MIkalA*r @U?^<;?5oOmc燼A`CwklP_a9aph_onmss>2Iҿno+=,?MDРӾ&|>𽯂MK]G#e4%/-'~4sy=F[kV_?>-ڷ!qúƓφMx#s⛭K_d/&u>)|)no3]g/0' |%૯||оg]HOn!C3P kkx_8g +[ǚ/ Kx~wM%xv>Ѽs_Z>x RxZMT=ψ4pݿk 6jocCg]VOTKmLºR^ --S zoK@Gc4? 2N_\7:ONp<<|S>k{G.mU:mLe*@O+ uyy3 WGPj:y[[u~ h4 Ygim+I< {ҼqsH߶xc0 >\~$OsX3et55ZxC?-{O{o#!<{PBE~|p> /v / U5}xig,(K;[kwQ4"G4 ?j߅>-s05oٖmx[Go=xƱH{tSğO^4IjzoU􋛍Z+WZUͺ~Om+;N5Gß~x|4Ykgem2<f/whO~-ּyƛVZԹ.t|@:!2_ x-ehxX־o5o OizAk'G+^~7xM /2<-㯉74ּ=>j' ?:gV\Ǥx@]G 6Wržyľ "x ~!&ҵͧh? 9^hPx~oh_h>7+\~?á~UeAW=;^7{ƞ= ƺO4HutɴeP5?7OQ>xG5ޟ"v ^'u/xkjZ4Kkm=𧄼xOWNmS? V*&+j / C!AB?C-x+ KZC?íோι#?"~+2>k?x#uϏeҼY8"&R[P?ŷ<3_2/tO/-|k/ Ϣ1vk O ƾ%񇋿j|-|j?j'5KO93>-|M |[Iik9u ^ּo'tuOLLӮ~V_3EV7}ljfM/.cW?ǟ| R|3,<7xL曩y~g&$+|phkqG$;"Mdi 84_R_oZ60 |-B<9Zze0[h<}JO<+&j,_eNx#wǟW.v_ |Qυ ѣ>| %⿊QK[<9xׄo h|Q G~&xK_cJ[\' ?O3g<h5? 5~gƾx3Z.|UȦ- #O5_ u?|J? ? g}4K+?j4i:Ok-R x%|W׏>htմpfG}Bw+/#ͧ߇?&ѿfKg[_t{N ZK/}? |VYimxo[q'IMK_t ᷃g_<%5O:+Of|U׼Ch=@_6Koj@^g<&]NYe4$2ŝbI<@w!O |j-׆^xFod-/U_FmkGm:yuK,tPX}U%|_PƋvlt~}}O[7[[83۫0%T qCT~'>1x7FއHYP&-jw_ imgc\֤|h:oG/{+Sk ΰ}"." 2 $(? /I}4 ^0’h.a%\1x K?b@:~xޕx𧅼e}bx㗃~]\M[S~)h>-ӮiG>ޟ_a4Yi(e4-gDm;Tio442\}P /I}4 ^0’h.a%OۢZ4ڧ>cfo{?~$xS|F|M]=t|;|Uk>{xֺvy<|E;o_t~=/x[cm|6-⦓No ~&_Yj}.PMPßt#=g~kZXcjAՅݽ՝mo{oKxn  ^0’h.a%\1x K-h ^լ_ZҾ|1KFExڞc? j)4a y Bz헮[~3x^;1?<{~> |Yπ|Q{A]|'_ ,τugy.2i(1DU࿉o?IZ߉~OO|(|+ <)&/ǭ1/kk7v[JRԷ?_>EsxEWWK_v~ KAawxǞǗ3Aѣi  9B1'y ߆ׂ>![j=f߇<[tA|LŒE.gzDey?W? /G}4oC$WH/!(h? CzE{8dJ? ?'?䁜zd3{n<ᙎ̺&8|AyIX\* 誣 8R>0x>$Ѯ<)OmU#k_A]CxwωzvY)ux@Ѯ qjp}/5>[\i??cFO_ehUk!oqx|RZghoĭ?߃4|?𷊯1t/"GtJmIok}+wj1\esz+^^M(gMn^Pn>|Oy"!C7vթjVbt~ʺWo1==o¿f|N-- ˊ?|[Ce9|7/Zu=~<~*ψ 3OoRBGHӯ|#௉׌.,l}xxෂ#|K4Gď~hQ_ŷ17t #I^>Դygoŗkaq_|Ty/¿|c7]k~?,.u/@Ӽ953? m D_~#޷h<>*-< x.E|aՇo [:-Nw-l='ſ6Y8?q@Olpgs-Oq_Aq=- Tt딗RCkh[ǬMtWP;C3̏fO O>_W Cψtv\+[Zg+ :I~¦<|"2o/>"JD7u%mltF;Sm )u[+x% @-k<'_>i~j>Ƌšw~|doi; O=WkM_|E_ZsgyoXFż{GVׂύ|NޫE扨qK&k۫(9 Fct/#K'CݟSzk$gExhtxIO_薲^֛t񵥼(iSOjZ?~;>[Y}|9GtoxT'C|5'#D^+g{_HѾx⿀}xDýgᆋt*<[.uc=-5[xi^ڒ[Z `O[~C.|@GY{x{Ğz%5I|CexEƧmYx/>>^Zhzi&ѧcچOTTqbHg?~$~ CxWu_>>^~:x#E|CwxG.ڕ֧h|kkiu.sN/|GyM?XxcET>MB;+ŵo>\ f̑0 x^_ ^s>)F;mkZw|GekBHku=R/-TiZur>!j: >_hVVox~t&>Oi@ωOoPo? _->5GIUxv>7_x}4o4E;Ck.o FOҼ"p |wxᦻDHе;;]6 ;<=qg5L&/[[K*L=g'7(7 M ?|MB|<€?8tkmxOeiM!7qV-KM𗄠O P:߬xAľ^9GK$|@-'㻽g+x}kX;W퍗kM2Ho[:_Iˣk>Ft~%H~x>:ֆ{|.>08!|AxoE/m56Nykv3X(-[u>$Zx~&|q__ 5x~.? ^ >'ly3O'm1xCz>M֗oYߦoOOk^!aai CŚmΙ [V_?`:w|]IgďigSNN'VS&]cQ2@ut]R1jV0ռ 8_@G"? h^2|sޭ⯂>n< ;zφ7Ŀ.uK;|lm.J4G|9"/OZ4?i8?|>ߋuRMOVk{_h:,|Ck/ckyr}u}[i $^y]N 0ִ(rnGC|o_x] s^#, ? o}OQ|=`,6_|ASYkLx+JdӼ1ٽRN{{#io"cլL,>./|<,|hӼE2F ~~$þ&KxYAntYx[Wsn5lr{N/ Xs쮄ρ2фz0ìf@ʋIe +,zL%P ?/?2O=@&PпǨ?6_oO!͟o<_jzυ5-K<5x5 4_[fM VuU,5OJuKk9w_˜G<7|a-} i}2/[iX5NH!] U5gY-,-cTnd(__ de +,zL%Pċ}fO |ikN:wk3x]id?-_|/oŻKO xk?O_&G95 Cx7CԴ5۽6Qga&fmY&DP{W&PпǨWY?CBK'1٦'_~G3'9z#'~<9e-i}7_g$,?9 hwCཷ-'Ž7Z~:2'µkG._x^|Cڒxfalk?'mx⧈3/e{ni}~ Ǣ={τor&>-ꟲ\}jPJWQ~/t2bKU]X~7~ϋZ4YMÞ(/9ĐO xL/Ƌf$ODOk>-ծ5$⏅>.MW֑O~^x?ᖷS5yN֡ZBg<+œh2Xu_P"[[ÿxw<)ouW x>&Sh|M/,o`<Yj u+mL5wtL3i~E>9>oL~M6ڴڦ'o,|XVn O_O7_5|G~xZj2@\w in _?l|<zj tƪb|q1_/E3'lnfSSӭteylڅӋKKhd||2l}<^3-ҧ*ʌVƗ: F5$7hŵKf~[4sPKtO8ԫR(*A(U;hR_Me +,zr&YK;;  ׂl,?x/ ׂl,?yȼ[ŏx4"7𧋴m Z#\xZU/麅͝؊{yI"`߇?_~:ߵׇ |P||E)k~{ x]|]|'ÿx <'erǂ4l-xZMw 4q6]Cj5[`ޯ 0y'b|[Y' BRT8qa&?3G&m9@s^)1- Jo7٧YC5Ƨ1eM\hמ*Y`` 'o?6i~:uG!Լcq/Z~^t߈? FMύE~+ ~~*|v1xm[Winn|3[?_ "[G665&×]|R߰wƿ hV6ſ >,Ii~E|UXtq_~d٬x:߀SV|=?4>'[{?b7]~؟/tw? m5oOĖϏ vFK(f/ Q_E&-:?i{1]`|k௏._ZWuΞU4;[uxGAwwFO:~Iwvxp{zw?!7_'ԭ/x]gz{uu-ԋ\Jqi1[X[eN_,Rai71x^YɹR*oQE%6YZpf8ǚU18eYV^INrrPN\ҦNNCgqa=CÆM.ƒY\M K<_J>_J>_J>_Jj^׾?gGz^v` /_uWR?cx@|Josi/ŕחℿq<~n;GGX mqE=|[],^_$Wǿ_įx @|M/~ϟ~ JѾx{h^-Ņψ<5>>^cGcjz_]^(k?|]{%_~!~|կ-?=/u;yr #⵵߀:$mw=mK>5Iy>~ɚW>o]cRifo Y[b.%j75kVo>o a,SZ&#}?,/ىe{@@@ߵSc_=~K_ /Woh?~(?[%m?}Kη6}FOZz~`H5Դߴ?l[+c/|k63@ž2<_|HҼ%k|=)C[mW|mk޻~aeOKxÚ-?4OZHҬ_xfu5).4^|HŹ _ {~>!x/Q>xRY?Ġ/o_<]~2/-1>?/~>izقW~!O];GvoPu@7Dj^9Ӿ0xFX =_UXk>)h C_ u]|CikVwwkڇ  }=߶o Czj+,z^4{#෈//>i? ho1kk`:xK?m+¹tθ6~_J?XQ{I/~"#ÞԬ/~ et-At1tuF-_Zм*;׆ꮻ1?L_LoZX] 5kZ,x'o:W ?|Oqig_ M;e:𾫥\Z*\9-j=& C,ڥ.2Xmc|!~!| 3x +( |- :FR[,höo$%Bj;mG?nOe)?Cmɔc &P6@?nOe)?CmɔWlIyxg7q6jxy_(Tw?矅R 8~J7xy_(TOm,xɏg*oR]Cl,muuq3g˿{ou[qP+g >X, !\=csğ'\N ?VL2>m?3y#PQO?s۞$::wPO?s۞$::wPO?s۞$::wP+>!03 N>PliRlr78 |O nx@?NΝnx@?NΝnx@?NΝxSX/ .o'Mi"8lPWO?s۞$::wPO?s۞$::wPO?s۞$::wP-c?)5ВcOQ"<8R2;O#8<:Нq;(;l!E)7_M/|)7_M/|)7_O~.b#eF|Y̾WM6y!O1eCê?4?FiχW*(KTP >)7_M/|4тRhEGS >)7_'4^G-[I><noݕi&eR" @τ4}Ak8=>x} ?g x[Z^w5kFm.lnnlw @P@_'¿?:PK!no`2xl/sharedStrings.xmlĕN0 #;xPg(bF\ Әӈ. :# |؎ӜN#r gC=^s^֭\=op[,6E菂895)5]dFw-L(r7bx):tW.x`c-,}dj {_t / 12C<ϻ&Rhڧe5%_PK!{֩xl/theme/theme1.xmlYMoE#F{oc'NGuرhF[x=N3' G$$DA\q@@VR~MԿ;3x8!zgkf㘡C"$I#^$!d:W$N4 w߹7TDb>DJKK2e,$nE<@#Jem)4 Pc`{w8!A=2̙<&JꅐfM T5BNd tY#9~#*@ K/AKחFF]et`~!کկm Y\n?aZ]'2Gc"ry Ǹ8E/ԡnޙ`5xsѵɷrΚ\xpK*y7NF~b\c|‰8'VD5N(B<=. |z]ң}'D;4L|6CGM|VoC Gao±eǬXE>%qm #8j>-~ ..R(zys^FnVԇ$*cߓqrB3's}'g7{t4Mf,t,Q;7ɛ1Ѝmmƍ` F$vNyad@}m }w^-/m z<}%s7CXWM->Y°t`Qә")Mi?F$@3ɌHK8ۙe/o}'U}f@bvyE/G9#sh [1/"ZXZըfZ#0b8k,ބ] xy Z4M0#w;(5!ʬDx@l7&vy ;H)Okള0i1?tr`d]v%b :j8mC88IolW;6kϬESSEÜq8RmcWYX%YWk:.beRB톖 $T`Vc XэGbCڞp` Z?My֬Ӕ ήcF8&%8 7V`Jm^ݍq7EWeRN)zie#EHk#BEPѰ#`A,SphStq"A%}g0fDzd#Q%uejCzRt s \oN)F{b:P3,!gdbKU z=uAYk;֌˫rYaϤpg?0jCoEAV_] 2H7HhIv>kj!f.6Q8/ٙ_۵Ȟ.QXQ݈B~$“tO$ɤ6F#YOG [BkFE6hM \\ I,&.(dh&1oYYG;&kŕ{%e (w䳨A񍁺]y 7x5R`8tMc3ݤPK!+?0 xl/styles.xmlWo0~G-`U*Bb Nc?"ZzM[JZ4 ;w;wvҷ ȣpfBJ-f>_go(=3)kĄ.EMf/__*0W$RhVJxSX'u5:iD3ihDCSVW˙T/%:΍ulbx; ̝m.E!s:N)KM$on9L^]P3gZߟq~Ordiš k>)ˡU?#S00 " Ǚa8Z3XT UiXˑ\1jy'l˱|`}l܃6X*+Nd) g0 e b'Ѯzr8<ޡJrd1ިAf k$ӡB38t~Kc Ox6ȎNׇ zv 7u5߭_Nѐݕgh}sp ߎOPK!5XdocProps/app.xml (o0ߑWgBr=iLTχsi%vdB_%Q&v?|Z_J ~)/gmȝ/n{{QDsǥ0k^`$I$jJ%[b imϝ"ӸW(ś`j1hq⢥m0џr4ƐBAR)>,VZMe97hdZMSPa ju*;b2EB$1ISHI+!j \AquTaZ!?Saq6=]i ; _tT v^xk8A%]4՞-zP7;n=u"[`&¨Q8v6ϋzSBĜq 8VU ~Qw7 |qVf~PK!}MmdocProps/core.xml (|QO0M63`135#B!m-!fcUz_MQр ef3gLg߱7vMFj[|'fk rUQٮRm= >jy|splitpath( $0 ); if($whoami eq "sheetCount.pl") { $text="number"; } elsif ($whoami eq "sheetNames.pl") { $text="names"; } else { die("This script is named '$whoami', but must be named either 'sheetCount.pl' or 'sheetNames.pl' to function properly.\n"); } ## ## Usage information ## $usage = < Output is the $text of sheets in the excel file. EOF ## ## parse arguments ## if(!defined($ARGV[0])) { print $usage; exit 1; } my $fileName=$ARGV[0]; ## ## open spreadsheet ## open(FH, "<$fileName") or die "Unable to open file '$fileName'.\n"; close(FH); my $oBook; ## First try as a Excel 2007+ 'xml' file ## First try as a Excel 2007+ 'xml' file eval { local $SIG{__WARN__} = sub {}; $parser = Spreadsheet::ParseXLSX -> new(); $oBook = $parser->parse ($ARGV[0]); }; ## Then Excel 97-2004 Format if ( !defined $oBook ) { $parser = Spreadsheet::ParseExcel -> new(); $oBook = $parser->parse($ARGV[0]) or \ die "Error parsing file '$ARGV[0]'.\n"; } if($whoami eq "sheetCount.pl") { print $oBook->{SheetCount} , "\n"; } elsif ($whoami eq "sheetNames.pl") { ## Get list all worksheets in the file my @sheetlist = (@{$oBook->{Worksheet}}); foreach my $sheet (@sheetlist) { print "\"$sheet->{Name}\" "; } print "\n"; } else { die("This script is named '$whoami', but must be named either 'sheetCount.pl' or 'sheetNames.pl' to function properly.\n"); } gdata/inst/perl/sheetCount.pl0000644000175100001440000000415313003720416015744 0ustar hornikusers#!/usr/bin/perl BEGIN { use File::Basename; # Add current path to perl library search path use lib dirname($0); } use strict; ## # Try to load the modules we need ## require 'module_tools.pl'; my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_ParseXLSX ) = check_modules_and_notify(); use File::Spec::Functions; # declare some varibles local my($row, $col, $sheet, $cell, $usage, $filename, $volume, $directories, $whoami, $basename, $sheetnumber, $filename, $text, $parser); ## ## Figure out whether I'm called as sheetCount.pl or sheetNames.pl ## ($volume,$directories,$whoami) = File::Spec->splitpath( $0 ); if($whoami eq "sheetCount.pl") { $text="number"; } elsif ($whoami eq "sheetNames.pl") { $text="names"; } else { die("This script is named '$whoami', but must be named either 'sheetCount.pl' or 'sheetNames.pl' to function properly.\n"); } ## ## Usage information ## $usage = < Output is the $text of sheets in the excel file. EOF ## ## parse arguments ## if(!defined($ARGV[0])) { print $usage; exit 1; } my $fileName=$ARGV[0]; ## ## open spreadsheet ## open(FH, "<$fileName") or die "Unable to open file '$fileName'.\n"; close(FH); my $oBook; ## First try as a Excel 2007+ 'xml' file ## First try as a Excel 2007+ 'xml' file eval { local $SIG{__WARN__} = sub {}; $parser = Spreadsheet::ParseXLSX -> new(); $oBook = $parser->parse ($ARGV[0]); }; ## Then Excel 97-2004 Format if ( !defined $oBook ) { $parser = Spreadsheet::ParseExcel -> new(); $oBook = $parser->parse($ARGV[0]) or \ die "Error parsing file '$ARGV[0]'.\n"; } if($whoami eq "sheetCount.pl") { print $oBook->{SheetCount} , "\n"; } elsif ($whoami eq "sheetNames.pl") { ## Get list all worksheets in the file my @sheetlist = (@{$oBook->{Worksheet}}); foreach my $sheet (@sheetlist) { print "\"$sheet->{Name}\" "; } print "\n"; } else { die("This script is named '$whoami', but must be named either 'sheetCount.pl' or 'sheetNames.pl' to function properly.\n"); } gdata/inst/perl/XML/0000755000175100001440000000000013003720416013723 5ustar hornikusersgdata/inst/perl/XML/Twig.pm0000644000175100001440000164254513003720416015214 0ustar hornikusersuse strict; use warnings; # > perl 5.5 # This is created in the caller's space # I realize (now!) that it's not clean, but it's been there for 10+ years... BEGIN { sub ::PCDATA { '#PCDATA' } ## no critic (Subroutines::ProhibitNestedSubs); sub ::CDATA { '#CDATA' } ## no critic (Subroutines::ProhibitNestedSubs); } use UNIVERSAL(); ## if a sub returns a scalar, it better not bloody disappear in list context ## no critic (Subroutines::ProhibitExplicitReturnUndef); my $perl_version; my $parser_version; ###################################################################### package XML::Twig; ###################################################################### require 5.004; use utf8; # > perl 5.5 use vars qw($VERSION @ISA %valid_option); use Carp; use File::Spec; use File::Basename; *isa= *UNIVERSAL::isa; # flag, set to true if the weaken sub is available use vars qw( $weakrefs); # flag set to true if the version of expat seems to be 1.95.2, which has annoying bugs # wrt doctype handling. This is global for performance reasons. my $expat_1_95_2=0; # a slight non-xml mod: # is allowed as a first character my $REG_TAG_FIRST_LETTER; #$REG_TAG_FIRST_LETTER= q{(?:[^\W\d]|[:#_])}; # < perl 5.6 - does not work for leading non-ascii letters $REG_TAG_FIRST_LETTER= q{(?:[[:alpha:]:#_])}; # >= perl 5.6 my $REG_TAG_LETTER= q{(?:[\w_.-]*)}; # a simple name (no colon) my $REG_NAME_TOKEN= qq{(?:$REG_TAG_FIRST_LETTER$REG_TAG_LETTER*)}; # a tag name, possibly including namespace my $REG_NAME= qq{(?:(?:$REG_NAME_TOKEN:)?$REG_NAME_TOKEN)}; # tag name (leading # allowed) # first line is for perl 5.005, second line for modern perl, that accept character classes my $REG_TAG_NAME=$REG_NAME; # name or wildcard (* or '') (leading # allowed) my $REG_NAME_W = qq{(?:$REG_NAME|[*])}; # class and ids are deliberatly permissive my $REG_NTOKEN_FIRST_LETTER; #$REG_NTOKEN_FIRST_LETTER= q{(?:[^\W\d]|[:_])}; # < perl 5.6 - does not work for leading non-ascii letters $REG_NTOKEN_FIRST_LETTER= q{(?:[[:alpha:]:_])}; # >= perl 5.6 my $REG_NTOKEN_LETTER= q{(?:[\w_:.-]*)}; my $REG_NTOKEN= qq{(?:$REG_NTOKEN_FIRST_LETTER$REG_NTOKEN_LETTER*)}; my $REG_CLASS = $REG_NTOKEN; my $REG_ID = $REG_NTOKEN; # allow # (private elt) * . *. # *# my $REG_TAG_PART= qq{(?:$REG_NAME_W(?:[.]$REG_CLASS|[#]$REG_ID)?|[.]$REG_CLASS)}; my $REG_REGEXP = q{(?:/(?:[^\\/]|\\.)*/[eimsox]*)}; # regexp my $REG_MATCH = q{[!=]~}; # match (or not) my $REG_STRING = q{(?:"(?:[^\\"]|\\.)*"|'(?:[^\\']|\\.)*')}; # string (simple or double quoted) my $REG_NUMBER = q{(?:\d+(?:\.\d*)?|\.\d+)}; # number my $REG_VALUE = qq{(?:$REG_STRING|$REG_NUMBER)}; # value my $REG_OP = q{==|!=|>|<|>=|<=|eq|ne|lt|gt|le|ge|=}; # op my $REG_FUNCTION = q{(?:string|text)\(\s*\)}; my $REG_STRING_ARG = qq{(?:string|text)\\(\\s*$REG_NAME_W\\s*\\)}; my $REG_COMP = q{(?:>=|<=|!=|<|>|=)}; my $REG_TAG_IN_PREDICATE= $REG_NAME_W . q{(?=\s*(?i:and\b|or\b|\]|$))}; # keys in the context stack, chosen not to interfere with att names, even private (#-prefixed) ones my $ST_TAG = '##tag'; my $ST_ELT = '##elt'; my $ST_NS = '##ns' ; # used in the handler trigger code my $REG_NAKED_PREDICATE= qq{((?:"[^"]*"|'[^']*'|$REG_STRING_ARG|$REG_FUNCTION|\@$REG_NAME_W|$REG_MATCH\\s*$REG_REGEXP|[\\s\\d><=!()+.-]|(?i:and)|(?i:or)|$REG_TAG_IN_PREDICATE)*)}; my $REG_PREDICATE= qq{\\[$REG_NAKED_PREDICATE\\]}; # not all axis, only supported ones (in get_xpath) my @supported_axis= ( 'ancestor', 'ancestor-or-self', 'child', 'descendant', 'descendant-or-self', 'following', 'following-sibling', 'parent', 'preceding', 'preceding-sibling', 'self' ); my $REG_AXIS = "(?:" . join( '|', @supported_axis) .")"; # only used in the "xpath"engine (for get_xpath/findnodes) for now my $REG_PREDICATE_ALT = qr{\[(?:(?:string\(\s*\)|\@$REG_TAG_NAME)\s*$REG_MATCH\s*$REG_REGEXP\s*|[^\]]*)\]}; # used to convert XPath tests on strings to the perl equivalent my %PERL_ALPHA_TEST= ( '=' => ' eq ', '!=' => ' ne ', '>' => ' gt ', '>=' => ' ge ', '<' => ' lt ', '<=' => ' le '); my( $FB_HTMLCREF, $FB_XMLCREF); my $NO_WARNINGS= $perl_version >= 5.006 ? 'no warnings' : 'local $^W=0'; # default namespaces, both ways my %DEFAULT_NS= ( xml => "http://www.w3.org/XML/1998/namespace", xmlns => "http://www.w3.org/2000/xmlns/", ); my %DEFAULT_URI2NS= map { $DEFAULT_NS{$_} => $_ } keys %DEFAULT_NS; # constants my( $PCDATA, $CDATA, $PI, $COMMENT, $ENT, $ELT, $TEXT, $ASIS, $EMPTY, $BUFSIZE); # used when an HTML doc only has a PUBLIC declaration, to generate the SYSTEM one # this should really be done by HTML::TreeBuilder, but as of HTML::TreeBuilder 4.2 it isn't # the various declarations are taken from http://en.wikipedia.org/wiki/Document_Type_Declaration my %HTML_DECL= ( "-//W3C//DTD HTML 4.0 Transitional//EN" => "http://www.w3.org/TR/REC-html40/loose.dtd", "-//W3C//DTD HTML 4.01//EN" => "http://www.w3.org/TR/html4/strict.dtd", "-//W3C//DTD HTML 4.01 Transitional//EN" => "http://www.w3.org/TR/html4/loose.dtd", "-//W3C//DTD HTML 4.01 Frameset//EN" => "http://www.w3.org/TR/html4/frameset.dtd", "-//W3C//DTD XHTML 1.0 Strict//EN" => "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd", "-//W3C//DTD XHTML 1.0 Transitional//EN" => "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd", "-//W3C//DTD XHTML 1.0 Frameset//EN" => "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd", "-//W3C//DTD XHTML 1.1//EN" => "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd", "-//W3C//DTD XHTML Basic 1.0//EN" => "http://www.w3.org/TR/xhtml-basic/xhtml-basic10.dtd", "-//W3C//DTD XHTML Basic 1.1//EN" => "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd", "-//WAPFORUM//DTD XHTML Mobile 1.0//EN" => "http://www.wapforum.org/DTD/xhtml-mobile10.dtd", "-//WAPFORUM//DTD XHTML Mobile 1.1//EN" => "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile11.dtd", "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" => "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile12.dtd", "-//W3C//DTD XHTML+RDFa 1.0//EN" => "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd", ); my $DEFAULT_HTML_TYPE= "-//W3C//DTD HTML 4.0 Transitional//EN"; my $SEP= qr/\s*(?:$|\|)/; BEGIN { $VERSION = '3.48'; use XML::Parser; my $needVersion = '2.23'; ($parser_version= $XML::Parser::VERSION)=~ s{_\d+}{}; # remove _ from version so numeric tests do not warn croak "need at least XML::Parser version $needVersion" unless $parser_version >= $needVersion; ($perl_version= $])=~ s{_\d+}{}; if( $perl_version >= 5.008) { eval "use Encode qw( :all)"; ## no critic ProhibitStringyEval $FB_XMLCREF = 0x0400; # Encode::FB_XMLCREF; $FB_HTMLCREF = 0x0200; # Encode::FB_HTMLCREF; } # test whether we can use weak references # set local empty signal handler to trap error messages { local $SIG{__DIE__}; if( eval( 'require Scalar::Util') && defined( \&Scalar::Util::weaken)) { import Scalar::Util( 'weaken'); $weakrefs= 1; } elsif( eval( 'require WeakRef')) { import WeakRef; $weakrefs= 1; } else { $weakrefs= 0; } } import XML::Twig::Elt; import XML::Twig::Entity; import XML::Twig::Entity_list; # used to store the gi's # should be set for each twig really, at least when there are several # the init ensures that special gi's are always the same # constants: element types $PCDATA = '#PCDATA'; $CDATA = '#CDATA'; $PI = '#PI'; $COMMENT = '#COMMENT'; $ENT = '#ENT'; # element classes $ELT = '#ELT'; $TEXT = '#TEXT'; # element properties $ASIS = '#ASIS'; $EMPTY = '#EMPTY'; # used in parseurl to set the buffer size to the same size as in XML::Parser::Expat $BUFSIZE = 32768; # gi => index %XML::Twig::gi2index=( '', 0, $PCDATA => 1, $CDATA => 2, $PI => 3, $COMMENT => 4, $ENT => 5); # list of gi's @XML::Twig::index2gi=( '', $PCDATA, $CDATA, $PI, $COMMENT, $ENT); # gi's under this value are special $XML::Twig::SPECIAL_GI= @XML::Twig::index2gi; %XML::Twig::base_ent= ( '>' => '>', '<' => '<', '&' => '&', "'" => ''', '"' => '"',); foreach my $c ( "\n", "\r", "\t") { $XML::Twig::base_ent{$c}= sprintf( "&#x%02x;", ord( $c)); } # now set some aliases *find_nodes = *get_xpath; # same as XML::XPath *findnodes = *get_xpath; # same as XML::LibXML *getElementsByTagName = *descendants; *descendants_or_self = *descendants; # valid in XML::Twig, not in XML::Twig::Elt *find_by_tag_name = *descendants; *getElementById = *elt_id; *getEltById = *elt_id; *toString = *sprint; *create_accessors = *att_accessors; } @ISA = qw(XML::Parser); # fake gi's used in twig_handlers and start_tag_handlers my $ALL = '_all_'; # the associated function is always called my $DEFAULT= '_default_'; # the function is called if no other handler has been # some defaults my $COMMENTS_DEFAULT= 'keep'; my $PI_DEFAULT = 'keep'; # handlers used in regular mode my %twig_handlers=( Start => \&_twig_start, End => \&_twig_end, Char => \&_twig_char, Entity => \&_twig_entity, XMLDecl => \&_twig_xmldecl, Doctype => \&_twig_doctype, Element => \&_twig_element, Attlist => \&_twig_attlist, CdataStart => \&_twig_cdatastart, CdataEnd => \&_twig_cdataend, Proc => \&_twig_pi, Comment => \&_twig_comment, Default => \&_twig_default, ExternEnt => \&_twig_extern_ent, ); # handlers used when twig_roots is used and we are outside of the roots my %twig_handlers_roots= ( Start => \&_twig_start_check_roots, End => \&_twig_end_check_roots, Doctype => \&_twig_doctype, Char => undef, Entity => undef, XMLDecl => \&_twig_xmldecl, Element => undef, Attlist => undef, CdataStart => undef, CdataEnd => undef, Proc => undef, Comment => undef, Proc => \&_twig_pi_check_roots, Default => sub {}, # hack needed for XML::Parser 2.27 ExternEnt => \&_twig_extern_ent, ); # handlers used when twig_roots and print_outside_roots are used and we are # outside of the roots my %twig_handlers_roots_print_2_30= ( Start => \&_twig_start_check_roots, End => \&_twig_end_check_roots, Char => \&_twig_print, Entity => \&_twig_print_entity, ExternEnt => \&_twig_print_entity, DoctypeFin => \&_twig_doctype_fin_print, XMLDecl => sub { _twig_xmldecl( @_); _twig_print( @_); }, Doctype => \&_twig_print_doctype, # because recognized_string is broken here # Element => \&_twig_print, Attlist => \&_twig_print, CdataStart => \&_twig_print, CdataEnd => \&_twig_print, Proc => \&_twig_pi_check_roots, Comment => \&_twig_print, Default => \&_twig_print_check_doctype, ExternEnt => \&_twig_extern_ent, ); # handlers used when twig_roots, print_outside_roots and keep_encoding are used # and we are outside of the roots my %twig_handlers_roots_print_original_2_30= ( Start => \&_twig_start_check_roots, End => \&_twig_end_check_roots, Char => \&_twig_print_original, # I have no idea why I should not be using this handler! Entity => \&_twig_print_entity, ExternEnt => \&_twig_print_entity, DoctypeFin => \&_twig_doctype_fin_print, XMLDecl => sub { _twig_xmldecl( @_); _twig_print_original( @_) }, Doctype => \&_twig_print_original_doctype, # because original_string is broken here Element => \&_twig_print_original, Attlist => \&_twig_print_original, CdataStart => \&_twig_print_original, CdataEnd => \&_twig_print_original, Proc => \&_twig_pi_check_roots, Comment => \&_twig_print_original, Default => \&_twig_print_original_check_doctype, ); # handlers used when twig_roots and print_outside_roots are used and we are # outside of the roots my %twig_handlers_roots_print_2_27= ( Start => \&_twig_start_check_roots, End => \&_twig_end_check_roots, Char => \&_twig_print, # if the Entity handler is set then it prints the entity declaration # before the entire internal subset (including the declaration!) is output Entity => sub {}, XMLDecl => \&_twig_print, Doctype => \&_twig_print, CdataStart => \&_twig_print, CdataEnd => \&_twig_print, Proc => \&_twig_pi_check_roots, Comment => \&_twig_print, Default => \&_twig_print, ExternEnt => \&_twig_extern_ent, ); # handlers used when twig_roots, print_outside_roots and keep_encoding are used # and we are outside of the roots my %twig_handlers_roots_print_original_2_27= ( Start => \&_twig_start_check_roots, End => \&_twig_end_check_roots, Char => \&_twig_print_original, # for some reason original_string is wrong here # this can be a problem if the doctype includes non ascii characters XMLDecl => \&_twig_print, Doctype => \&_twig_print, # if the Entity handler is set then it prints the entity declaration # before the entire internal subset (including the declaration!) is output Entity => sub {}, #Element => undef, Attlist => undef, CdataStart => \&_twig_print_original, CdataEnd => \&_twig_print_original, Proc => \&_twig_pi_check_roots, Comment => \&_twig_print_original, Default => \&_twig_print, # _twig_print_original does not work ExternEnt => \&_twig_extern_ent, ); my %twig_handlers_roots_print= $parser_version > 2.27 ? %twig_handlers_roots_print_2_30 : %twig_handlers_roots_print_2_27; my %twig_handlers_roots_print_original= $parser_version > 2.27 ? %twig_handlers_roots_print_original_2_30 : %twig_handlers_roots_print_original_2_27; # handlers used when the finish_print method has been called my %twig_handlers_finish_print= ( Start => \&_twig_print, End => \&_twig_print, Char => \&_twig_print, Entity => \&_twig_print, XMLDecl => \&_twig_print, Doctype => \&_twig_print, Element => \&_twig_print, Attlist => \&_twig_print, CdataStart => \&_twig_print, CdataEnd => \&_twig_print, Proc => \&_twig_print, Comment => \&_twig_print, Default => \&_twig_print, ExternEnt => \&_twig_extern_ent, ); # handlers used when the finish_print method has been called and the keep_encoding # option is used my %twig_handlers_finish_print_original= ( Start => \&_twig_print_original, End => \&_twig_print_end_original, Char => \&_twig_print_original, Entity => \&_twig_print_original, XMLDecl => \&_twig_print_original, Doctype => \&_twig_print_original, Element => \&_twig_print_original, Attlist => \&_twig_print_original, CdataStart => \&_twig_print_original, CdataEnd => \&_twig_print_original, Proc => \&_twig_print_original, Comment => \&_twig_print_original, Default => \&_twig_print_original, ); # handlers used within ignored elements my %twig_handlers_ignore= ( Start => \&_twig_ignore_start, End => \&_twig_ignore_end, Char => undef, Entity => undef, XMLDecl => undef, Doctype => undef, Element => undef, Attlist => undef, CdataStart => undef, CdataEnd => undef, Proc => undef, Comment => undef, Default => undef, ExternEnt => undef, ); # those handlers are only used if the entities are NOT to be expanded my %twig_noexpand_handlers= ( ExternEnt => undef, Default => \&_twig_default ); my @saved_default_handler; my $ID= 'id'; # default value, set by the Id argument my $css_sel=0; # set through the css_sel option to allow .class selectors in triggers # all allowed options %valid_option= ( # XML::Twig options TwigHandlers => 1, Id => 1, TwigRoots => 1, TwigPrintOutsideRoots => 1, StartTagHandlers => 1, EndTagHandlers => 1, ForceEndTagHandlersUsage => 1, DoNotChainHandlers => 1, IgnoreElts => 1, Index => 1, AttAccessors => 1, EltAccessors => 1, FieldAccessors => 1, CharHandler => 1, TopDownHandlers => 1, KeepEncoding => 1, DoNotEscapeAmpInAtts => 1, ParseStartTag => 1, KeepAttsOrder => 1, LoadDTD => 1, DTDHandler => 1, DoNotOutputDTD => 1, NoProlog => 1, ExpandExternalEnts => 1, DiscardSpaces => 1, KeepSpaces => 1, DiscardAllSpaces => 1, DiscardSpacesIn => 1, KeepSpacesIn => 1, PrettyPrint => 1, EmptyTags => 1, EscapeGt => 1, Quote => 1, Comments => 1, Pi => 1, OutputFilter => 1, InputFilter => 1, OutputTextFilter => 1, OutputEncoding => 1, RemoveCdata => 1, EltClass => 1, MapXmlns => 1, KeepOriginalPrefix => 1, SkipMissingEnts => 1, # XML::Parser options ErrorContext => 1, ProtocolEncoding => 1, Namespaces => 1, NoExpand => 1, Stream_Delimiter => 1, ParseParamEnt => 1, NoLWP => 1, Non_Expat_Options => 1, Xmlns => 1, CssSel => 1, UseTidy => 1, TidyOptions => 1, OutputHtmlDoctype => 1, ); my $active_twig; # last active twig,for XML::Twig::s # predefined input and output filters use vars qw( %filter); %filter= ( html => \&html_encode, safe => \&safe_encode, safe_hex => \&safe_encode_hex, ); # trigger types (used to sort them) my ($LEVEL_TRIGGER, $REGEXP_TRIGGER, $XPATH_TRIGGER)=(1..3); sub new { my ($class, %args) = @_; my $handlers; # change all nice_perlish_names into nicePerlishNames %args= _normalize_args( %args); # check options unless( $args{MoreOptions}) { foreach my $arg (keys %args) { carp "invalid option $arg" unless $valid_option{$arg}; } } # a twig is really an XML::Parser # my $self= XML::Parser->new(%args); my $self; $self= XML::Parser->new(%args); bless $self, $class; $self->{_twig_context_stack}= []; # allow tag.class selectors in handler triggers $css_sel= $args{CssSel} || 0; if( exists $args{TwigHandlers}) { $handlers= $args{TwigHandlers}; $self->setTwigHandlers( $handlers); delete $args{TwigHandlers}; } # take care of twig-specific arguments if( exists $args{StartTagHandlers}) { $self->setStartTagHandlers( $args{StartTagHandlers}); delete $args{StartTagHandlers}; } if( exists $args{DoNotChainHandlers}) { $self->{twig_do_not_chain_handlers}= $args{DoNotChainHandlers}; } if( exists $args{IgnoreElts}) { # change array to hash so you can write ignore_elts => [ qw(foo bar baz)] if( isa( $args{IgnoreElts}, 'ARRAY')) { $args{IgnoreElts}= { map { $_ => 1 } @{$args{IgnoreElts}} }; } $self->setIgnoreEltsHandlers( $args{IgnoreElts}); delete $args{IgnoreElts}; } if( exists $args{Index}) { my $index= $args{Index}; # we really want a hash name => path, we turn an array into a hash if necessary if( ref( $index) eq 'ARRAY') { my %index= map { $_ => $_ } @$index; $index= \%index; } while( my( $name, $exp)= each %$index) { $self->setTwigHandler( $exp, sub { push @{$_[0]->{_twig_index}->{$name}}, $_; 1; }); } } $self->{twig_elt_class}= $args{EltClass} || 'XML::Twig::Elt'; if( defined( $args{EltClass}) && $args{EltClass} ne 'XML::Twig::Elt') { $self->{twig_alt_elt_class}=1; } if( exists( $args{EltClass})) { delete $args{EltClass}; } if( exists( $args{MapXmlns})) { $self->{twig_map_xmlns}= $args{MapXmlns}; $self->{Namespaces}=1; delete $args{MapXmlns}; } if( exists( $args{KeepOriginalPrefix})) { $self->{twig_keep_original_prefix}= $args{KeepOriginalPrefix}; delete $args{KeepOriginalPrefix}; } $self->{twig_dtd_handler}= $args{DTDHandler}; delete $args{DTDHandler}; if( $args{ExpandExternalEnts}) { $self->set_expand_external_entities( 1); $self->{twig_expand_external_ents}= $args{ExpandExternalEnts}; $self->{twig_read_external_dtd}= 1; # implied by ExpandExternalEnts if( $args{ExpandExternalEnts} == -1) { $self->{twig_extern_ent_nofail}= 1; $self->setHandlers( ExternEnt => \&_twig_extern_ent_nofail); } delete $args{LoadDTD}; delete $args{ExpandExternalEnts}; } else { $self->set_expand_external_entities( 0); } if( !$args{NoLWP} && ! _use( 'URI') && ! _use( 'URI::File') && ! _use( 'LWP')) { $self->{twig_ext_ent_handler}= \&XML::Parser::initial_ext_ent_handler } else { $self->{twig_ext_ent_handler}= \&XML::Parser::file_ext_ent_handler } if( $args{DoNotEscapeAmpInAtts}) { $self->set_do_not_escape_amp_in_atts( 1); $self->{twig_do_not_escape_amp_in_atts}=1; } else { $self->set_do_not_escape_amp_in_atts( 0); $self->{twig_do_not_escape_amp_in_atts}=0; } # deal with TwigRoots argument, a hash of elements for which # subtrees will be built (and associated handlers) if( $args{TwigRoots}) { $self->setTwigRoots( $args{TwigRoots}); delete $args{TwigRoots}; } if( $args{EndTagHandlers}) { unless ($self->{twig_roots} || $args{ForceEndTagHandlersUsage}) { croak "you should not use EndTagHandlers without TwigRoots\n", "if you want to use it anyway, normally because you have ", "a start_tag_handlers that calls 'ignore' and you want to ", "call an ent_tag_handlers at the end of the element, then ", "pass 'force_end_tag_handlers_usage => 1' as an argument ", "to new"; } $self->setEndTagHandlers( $args{EndTagHandlers}); delete $args{EndTagHandlers}; } if( $args{TwigPrintOutsideRoots}) { croak "cannot use twig_print_outside_roots without twig_roots" unless( $self->{twig_roots}); # if the arg is a filehandle then store it if( _is_fh( $args{TwigPrintOutsideRoots}) ) { $self->{twig_output_fh}= $args{TwigPrintOutsideRoots}; } $self->{twig_default_print}= $args{TwigPrintOutsideRoots}; } # space policy if( $args{KeepSpaces}) { croak "cannot use both keep_spaces and discard_spaces" if( $args{DiscardSpaces}); croak "cannot use both keep_spaces and discard_all_spaces" if( $args{DiscardAllSpaces}); croak "cannot use both keep_spaces and keep_spaces_in" if( $args{KeepSpacesIn}); $self->{twig_keep_spaces}=1; delete $args{KeepSpaces}; } if( $args{DiscardSpaces}) { croak "cannot use both discard_spaces and keep_spaces_in" if( $args{KeepSpacesIn}); croak "cannot use both discard_spaces and discard_all_spaces" if( $args{DiscardAllSpaces}); croak "cannot use both discard_spaces and discard_spaces_in" if( $args{DiscardSpacesIn}); $self->{twig_discard_spaces}=1; delete $args{DiscardSpaces}; } if( $args{KeepSpacesIn}) { croak "cannot use both keep_spaces_in and discard_spaces_in" if( $args{DiscardSpacesIn}); croak "cannot use both keep_spaces_in and discard_all_spaces" if( $args{DiscardAllSpaces}); $self->{twig_discard_spaces}=1; $self->{twig_keep_spaces_in}={}; my @tags= @{$args{KeepSpacesIn}}; foreach my $tag (@tags) { $self->{twig_keep_spaces_in}->{$tag}=1; } delete $args{KeepSpacesIn}; } if( $args{DiscardAllSpaces}) { croak "cannot use both discard_all_spaces and discard_spaces_in" if( $args{DiscardSpacesIn}); $self->{twig_discard_all_spaces}=1; delete $args{DiscardAllSpaces}; } if( $args{DiscardSpacesIn}) { $self->{twig_keep_spaces}=1; $self->{twig_discard_spaces_in}={}; my @tags= @{$args{DiscardSpacesIn}}; foreach my $tag (@tags) { $self->{twig_discard_spaces_in}->{$tag}=1; } delete $args{DiscardSpacesIn}; } # discard spaces by default $self->{twig_discard_spaces}= 1 unless( $self->{twig_keep_spaces}); $args{Comments}||= $COMMENTS_DEFAULT; if( $args{Comments} eq 'drop') { $self->{twig_keep_comments}= 0; } elsif( $args{Comments} eq 'keep') { $self->{twig_keep_comments}= 1; } elsif( $args{Comments} eq 'process') { $self->{twig_process_comments}= 1; } else { croak "wrong value for comments argument: '$args{Comments}' (should be 'drop', 'keep' or 'process')"; } delete $args{Comments}; $args{Pi}||= $PI_DEFAULT; if( $args{Pi} eq 'drop') { $self->{twig_keep_pi}= 0; } elsif( $args{Pi} eq 'keep') { $self->{twig_keep_pi}= 1; } elsif( $args{Pi} eq 'process') { $self->{twig_process_pi}= 1; } else { croak "wrong value for pi argument: '$args{Pi}' (should be 'drop', 'keep' or 'process')"; } delete $args{Pi}; if( $args{KeepEncoding}) { # set it in XML::Twig::Elt so print functions know what to do $self->set_keep_encoding( 1); $self->{parse_start_tag}= $args{ParseStartTag} || \&_parse_start_tag; delete $args{ParseStartTag} if defined( $args{ParseStartTag}) ; delete $args{KeepEncoding}; } else { $self->set_keep_encoding( 0); if( $args{ParseStartTag}) { $self->{parse_start_tag}= $args{ParseStartTag}; } else { delete $self->{parse_start_tag}; } delete $args{ParseStartTag}; } if( $args{OutputFilter}) { $self->set_output_filter( $args{OutputFilter}); delete $args{OutputFilter}; } else { $self->set_output_filter( 0); } if( $args{RemoveCdata}) { $self->set_remove_cdata( $args{RemoveCdata}); delete $args{RemoveCdata}; } else { $self->set_remove_cdata( 0); } if( $args{OutputTextFilter}) { $self->set_output_text_filter( $args{OutputTextFilter}); delete $args{OutputTextFilter}; } else { $self->set_output_text_filter( 0); } if( exists $args{KeepAttsOrder}) { $self->{keep_atts_order}= $args{KeepAttsOrder}; if( _use( 'Tie::IxHash')) { $self->set_keep_atts_order( $self->{keep_atts_order}); } else { croak "Tie::IxHash not available, option keep_atts_order not allowed"; } } else { $self->set_keep_atts_order( 0); } if( $args{PrettyPrint}) { $self->set_pretty_print( $args{PrettyPrint}); } if( $args{EscapeGt}) { $self->escape_gt( $args{EscapeGt}); } if( $args{EmptyTags}) { $self->set_empty_tag_style( $args{EmptyTags}) } if( exists $args{Id}) { $ID= $args{Id}; delete $args{ID}; } if( $args{NoProlog}) { $self->{no_prolog}= 1; delete $args{NoProlog}; } if( $args{DoNotOutputDTD}) { $self->{no_dtd_output}= 1; delete $args{DoNotOutputDTD}; } if( $args{LoadDTD}) { $self->{twig_read_external_dtd}= 1; delete $args{LoadDTD}; } if( $args{CharHandler}) { $self->setCharHandler( $args{CharHandler}); delete $args{CharHandler}; } if( $args{InputFilter}) { $self->set_input_filter( $args{InputFilter}); delete $args{InputFilter}; } if( $args{NoExpand}) { $self->setHandlers( %twig_noexpand_handlers); $self->{twig_no_expand}=1; } if( my $output_encoding= $args{OutputEncoding}) { $self->set_output_encoding( $output_encoding); delete $args{OutputFilter}; } if( my $tdh= $args{TopDownHandlers}) { $self->{twig_tdh}=1; delete $args{TopDownHandlers}; } if( my $acc_a= $args{AttAccessors}) { $self->att_accessors( @$acc_a); } if( my $acc_e= $args{EltAccessors}) { $self->elt_accessors( isa( $acc_e, 'ARRAY') ? @$acc_e : $acc_e); } if( my $acc_f= $args{FieldAccessors}) { $self->field_accessors( isa( $acc_f, 'ARRAY') ? @$acc_f : $acc_f); } if( $args{UseTidy}) { $self->{use_tidy}= 1; } $self->{tidy_options}= $args{TidyOptions} || {}; if( $args{OutputHtmlDoctype}) { $self->{html_doctype}= 1; } $self->set_quote( $args{Quote} || 'double'); # set handlers if( $self->{twig_roots}) { if( $self->{twig_default_print}) { if( $self->{twig_keep_encoding}) { $self->setHandlers( %twig_handlers_roots_print_original); } else { $self->setHandlers( %twig_handlers_roots_print); } } else { $self->setHandlers( %twig_handlers_roots); } } else { $self->setHandlers( %twig_handlers); } # XML::Parser::Expat does not like these handler to be set. So in order to # use the various sets of handlers on XML::Parser or XML::Parser::Expat # objects when needed, these ones have to be set only once, here, at # XML::Parser level $self->setHandlers( Init => \&_twig_init, Final => \&_twig_final); $self->{twig_entity_list}= XML::Twig::Entity_list->new; $self->{twig_id}= $ID; $self->{twig_stored_spaces}=''; $self->{twig_autoflush}= 1; # auto flush by default $self->{twig}= $self; if( $weakrefs) { weaken( $self->{twig}); } return $self; } sub parse { my $t= shift; # if called as a class method, calls nparse, which creates the twig then parses it if( !ref( $t) || !isa( $t, 'XML::Twig')) { return $t->nparse( @_); } # requires 5.006 at least (or the ${^UNICODE} causes a problem) # > perl 5.5 # trap underlying bug in IO::Handle (see RT #17500) # > perl 5.5 # croak if perl 5.8+, -CD (or PERL_UNICODE set to D) and parsing a pipe # > perl 5.5 if( $perl_version>=5.008 && ${^UNICODE} && (${^UNICODE} & 24) && isa( $_[0], 'GLOB') && -p $_[0] ) # > perl 5.5 { croak "cannot parse the output of a pipe when perl is set to use the UTF8 perlIO layer\n" # > perl 5.5 . "set the environment variable PERL_UNICODE or use the -C option (see perldoc perlrun)\n" # > perl 5.5 . "not to include 'D'"; # > perl 5.5 } # > perl 5.5 $t= eval { $t->SUPER::parse( @_); }; if( !$t && $@=~m{(syntax error at line 1, column 0, byte 0|not well-formed \(invalid token\) at line 1, column 1, byte 1)} && -f $_[0] ) { croak "you seem to have used the parse method on a filename ($_[0]), you probably want parsefile instead"; } return _checked_parse_result( $t, $@); } sub parsefile { my $t= shift; if( -f $_[0] && ! -s $_[0]) { return _checked_parse_result( undef, "empty file '$_[0]'"); } $t= eval { $t->SUPER::parsefile( @_); }; return _checked_parse_result( $t, $@); } sub _checked_parse_result { my( $t, $returned)= @_; if( !$t) { if( isa( $returned, 'XML::Twig') && $returned->{twig_finish_now}) { $t= $returned; delete $t->{twig_finish_now}; return $t->_twig_final; } else { _croak( $returned, 0); } } $active_twig= $t; return $t; } sub active_twig { return $active_twig; } sub finish_now { my $t= shift; $t->{twig_finish_now}=1; die $t; } sub parsefile_inplace { shift->_parse_inplace( parsefile => @_); } sub parsefile_html_inplace { shift->_parse_inplace( parsefile_html => @_); } sub _parse_inplace { my( $t, $method, $file, $suffix)= @_; _use( 'File::Temp') || croak "need File::Temp to use inplace methods\n"; _use( 'File::Basename'); my $tmpdir= dirname( $file); my( $tmpfh, $tmpfile)= File::Temp::tempfile( DIR => $tmpdir); my $original_fh= select $tmpfh; unless( $t->{twig_keep_encoding} || $perl_version < 5.006) { if( grep /useperlio=define/, `$^X -V`) # we can only use binmode :utf8 if perl was compiled with useperlio { binmode( $tmpfh, ":utf8" ); } } $t->$method( $file); select $original_fh; close $tmpfh; my $mode= (stat( $file))[2] & oct(7777); chmod $mode, $tmpfile or croak "cannot change temp file mode to $mode: $!"; if( $suffix) { my $backup; if( $suffix=~ m{\*}) { ($backup = $suffix) =~ s/\*/$file/g; } else { $backup= $file . $suffix; } rename( $file, $backup) or croak "cannot backup initial file ($file) to $backup: $!"; } rename( $tmpfile, $file) or croak "cannot rename temp file ($tmpfile) to initial file ($file): $!"; return $t; } sub parseurl { my $t= shift; $t->_parseurl( 0, @_); } sub safe_parseurl { my $t= shift; $t->_parseurl( 1, @_); } sub safe_parsefile_html { my $t= shift; eval { $t->parsefile_html( @_); }; return $@ ? $t->_reset_twig_after_error : $t; } sub safe_parseurl_html { my $t= shift; _use( 'LWP::Simple') or croak "missing LWP::Simple"; eval { $t->parse_html( LWP::Simple::get( shift()), @_); } ; return $@ ? $t->_reset_twig_after_error : $t; } sub parseurl_html { my $t= shift; _use( 'LWP::Simple') or croak "missing LWP::Simple"; $t->parse_html( LWP::Simple::get( shift()), @_); } # uses eval to catch the parser's death sub safe_parse_html { my $t= shift; eval { $t->parse_html( @_); } ; return $@ ? $t->_reset_twig_after_error : $t; } sub parsefile_html { my $t= shift; my $file= shift; my $indent= $t->{ErrorContext} ? 1 : 0; $t->set_empty_tag_style( 'html'); my $html2xml= $t->{use_tidy} ? \&_tidy_html : \&_html2xml; my $options= $t->{use_tidy} ? $t->{tidy_options} || {} : { indent => $indent, html_doctype => $t->{html_doctype} }; $t->parse( $html2xml->( _slurp( $file), $options), @_); return $t; } sub parse_html { my $t= shift; my $options= ref $_[0] && ref $_[0] eq 'HASH' ? shift() : {}; my $use_tidy= exists $options->{use_tidy} ? $options->{use_tidy} : $t->{use_tidy}; my $content= shift; my $indent= $t->{ErrorContext} ? 1 : 0; $t->set_empty_tag_style( 'html'); my $html2xml= $use_tidy ? \&_tidy_html : \&_html2xml; my $conv_options= $use_tidy ? $t->{tidy_options} || {} : { indent => $indent, html_doctype => $t->{html_doctype} }; $t->parse( $html2xml->( isa( $content, 'GLOB') ? _slurp_fh( $content) : $content, $conv_options), @_); return $t; } sub xparse { my $t= shift; my $to_parse= $_[0]; if( isa( $to_parse, 'GLOB')) { $t->parse( @_); } elsif( $to_parse=~ m{^\s*<}) { $to_parse=~ m{_parse_as_xml_or_html( @_) : $t->parse( @_); } elsif( $to_parse=~ m{^\w+://.*\.html?$}) { _use( 'LWP::Simple') or croak "missing LWP::Simple"; $t->_parse_as_xml_or_html( LWP::Simple::get( shift()), @_); } elsif( $to_parse=~ m{^\w+://}) { _use( 'LWP::Simple') or croak "missing LWP::Simple"; my $doc= LWP::Simple::get( shift); if( ! defined $doc) { $doc=''; } my $xml_parse_ok= $t->safe_parse( $doc, @_); if( $xml_parse_ok) { return $xml_parse_ok; } else { my $diag= $@; if( $doc=~ m{parse_html( $doc, @_); } else { croak $diag; } } } elsif( $to_parse=~ m{\.html?$}) { my $content= _slurp( shift); $t->_parse_as_xml_or_html( $content, @_); } else { $t->parsefile( @_); } } sub _parse_as_xml_or_html { my $t= shift; if( _is_well_formed_xml( $_[0])) { $t->parse( @_) } else { my $html2xml= $t->{use_tidy} ? \&_tidy_html : \&_html2xml; my $options= $t->{use_tidy} ? $t->{tidy_options} || {} : { indent => 0, html_doctype => $t->{html_doctype} }; my $html= $html2xml->( $_[0], $options, @_); if( _is_well_formed_xml( $html)) { $t->parse( $html); } else { croak $@; } # can't really test this because HTML::Parser or HTML::Tidy may change how they deal with bas HTML between versions } } { my $parser; sub _is_well_formed_xml { $parser ||= XML::Parser->new; eval { $parser->parse( $_[0]); }; return $@ ? 0 : 1; } } sub nparse { my $class= shift; my $to_parse= pop; $class->new( @_)->xparse( $to_parse); } sub nparse_pp { shift()->nparse( pretty_print => 'indented', @_); } sub nparse_e { shift()->nparse( error_context => 1, @_); } sub nparse_ppe { shift()->nparse( pretty_print => 'indented', error_context => 1, @_); } sub _html2xml { my( $html, $options)= @_; _use( 'HTML::TreeBuilder', '3.13') or croak "cannot parse HTML: missing HTML::TreeBuilder v >= 3.13\n"; my $tree= HTML::TreeBuilder->new; $tree->ignore_ignorable_whitespace( 0); $tree->ignore_unknown( 0); $tree->no_space_compacting( 1); $tree->store_comments( 1); $tree->store_pis(1); $tree->parse( $html); $tree->eof; my $xml=''; if( $options->{html_doctype} && exists $tree->{_decl} ) { my $decl= $tree->{_decl}->as_XML; # first try to fix declarations that are missing the SYSTEM part $decl =~ s{^\s*} { my $system= $HTML_DECL{$2} || $HTML_DECL{$DEFAULT_HTML_TYPE}; qq{} }xe; # then check that the declaration looks OK (so it parses), if not remove it, # better to parse without the declaration than to die stupidly if( $decl =~ m{}x # PUBLIC then SYSTEM || $decl =~ m{}x # just SYSTEM ) { $xml= $decl; } } $xml.= _as_XML( $tree); _fix_xml( $tree, \$xml); if( $options->{indent}) { _indent_xhtml( \$xml); } $tree->delete; $xml=~ s{\s+$}{}s; # trim end return $xml; } sub _tidy_html { my( $html, $options)= @_; _use( 'HTML::Tidy') or croak "cannot cleanup HTML using HTML::Tidy (required by the use_tidy option): $@\n"; ; my $TIDY_DEFAULTS= { output_xhtml => 1, # duh! tidy_mark => 0, # do not add the "generated by tidy" comment numeric_entities => 1, char_encoding => 'utf8', bare => 1, clean => 1, doctype => 'transitional', fix_backslash => 1, merge_divs => 0, merge_spans => 0, sort_attributes => 'alpha', indent => 0, wrap => 0, break_before_br => 0, }; $options ||= {}; my $tidy_options= { %$TIDY_DEFAULTS, %$options}; my $tidy = HTML::Tidy->new( $tidy_options); $tidy->ignore( type => 1, type => 2 ); # 1 is TIDY_WARNING, 2 is TIDY_ERROR, not clean my $xml= $tidy->clean( $html ); return $xml; } { my %xml_parser_encoding; sub _fix_xml { my( $tree, $xml)= @_; # $xml is a ref to the xml string my $max_tries=5; my $add_decl; while( ! _check_xml( $xml) && $max_tries--) { # a couple of fixes for weird HTML::TreeBuilder errors if( $@=~ m{^\s*xml (or text )?declaration not at start of (external )?entity}i) { $$xml=~ s{<\?xml.*?\?>}{}g; #warn " fixed xml declaration in the wrong place\n"; } elsif( $@=~ m{undefined entity}) { $$xml=~ s{&(amp;)?Amp;}{&}g if $HTML::TreeBuilder::VERSION < 4.00; if( _use( 'HTML::Entities::Numbered')) { $$xml=name2hex_xml( $$xml); } $$xml=~ s{&(\w+);}{ my $ent= $1; if( $ent !~ m{^(amp|lt|gt|apos|quote)$}) { "&$ent;" } }eg; } elsif( $@=~ m{&Amp; used in html}) # if $Amp; is used instead of & then HTML::TreeBuilder's as_xml is tripped (old version) { $$xml=~ s{&(amp;)?Amp;}{&}g if $HTML::TreeBuilder::VERSION < 4.00; } elsif( $@=~ m{^\s*not well-formed \(invalid token\)}) { if( $HTML::TreeBuilder::VERSION < 4.00) { $$xml=~ s{&(amp;)?Amp;}{&}g; $$xml=~ s{(<[^>]* )(\d+=)"}{$1a$2"}g; # comes out as
, "fix the attribute } my $q= ')?}{}s; #warn " added decl (encoding $encoding)\n"; } else { $$xml=~ s{^(<\?xml.*?\?>)?}{}s; #warn " converting to utf8 from $encoding\n"; $$xml= _to_utf8( $encoding, $$xml); } } else { $$xml=~ s{^(<\?xml.*?\?>)?}{}s; #warn " converting to utf8 from $encoding\n"; $$xml= _to_utf8( $encoding, $$xml); } } } } # some versions of HTML::TreeBuilder escape CDATA sections $$xml=~ s{(<!\[CDATA\[.*?\]\]>)}{_unescape_cdata( $1)}eg; } sub _xml_parser_encodings { my @encodings=( 'iso-8859-1'); # this one is included by default, there is no map for it in @INC foreach my $inc (@INC) { push @encodings, map { basename( $_, '.enc') } glob( File::Spec->catdir( $inc => XML => Parser => Encodings => '*.enc')); } return map { $_ => 1 } @encodings; } } sub _unescape_cdata { my( $cdata)= @_; $cdata=~s{<}{<}g; $cdata=~s{>}{>}g; $cdata=~s{&}{&}g; return $cdata; } sub _as_XML { # fork of HTML::Element::as_XML, which is a little too buggy and inconsistent between versions for my liking my ($elt) = @_; my $xml= ''; my $empty_element_map = $elt->_empty_element_map; my ( $tag, $node, $start ); # per-iteration scratch $elt->traverse( sub { ( $node, $start ) = @_; if ( ref $node ) { # it's an element $tag = $node->{'_tag'}; if ($start) { # on the way in foreach my $att ( grep { ! m{^(_|/$)} } keys %$node ) { # fix attribute names instead of dying my $new_att= $att; if( $att=~ m{^\d}) { $new_att= "a$att"; } $new_att=~ s{[^\w\d:_-]}{}g; $new_att ||= 'a'; if( $new_att ne $att) { $node->{$new_att}= delete $node->{$att}; } } if ( $empty_element_map->{$tag} && (!@{ $node->{'_content'} || []}) ) { $xml.= $node->starttag_XML( undef, 1 ); } else { $xml.= $node->starttag_XML(undef); } } else { # on the way out unless ( $empty_element_map->{$tag} and !@{ $node->{'_content'} || [] } ) { $xml.= $node->endtag_XML(); } # otherwise it will have been an <... /> tag. } } elsif( $node=~ /)/s, $node) # chunks are CDATA sections or normal text { $xml.= $chunk =~ m{/>/g; $html =~ s/"/"/g; $html =~ s/'/'/g; return $html; } sub _check_xml { my( $xml)= @_; # $xml is a ref to the xml string my $ok= eval { XML::Parser->new->parse( $$xml); }; #if( $ok) { warn " parse OK\n"; } return $ok; } sub _encoding_from_meta { my( $tree)= @_; my $enc="iso-8859-1"; my @meta= $tree->find( 'meta'); foreach my $meta (@meta) { if( $meta->{'http-equiv'} && ($meta->{'http-equiv'} =~ m{^\s*content-type\s*}i) && $meta->{content} && ($meta->{content} =~ m{^\s*text/html\s*;\s*charset\s*=\s*(\S*)\s*}i) ) { $enc= lc $1; #warn " encoding from meta tag is '$enc'\n"; last; } } return $enc; } { sub _to_utf8 { my( $encoding, $string)= @_; local $SIG{__DIE__}; if( _use( 'Encode')) { Encode::from_to( $string, $encoding => 'utf8', 0x0400); } # 0x0400 is Encode::FB_XMLCREF elsif( _use( 'Text::Iconv')) { my $converter = eval { Text::Iconv->new( $encoding => "utf8") }; if( $converter) { $string= $converter->convert( $string); } } elsif( _use( 'Unicode::Map8') && _use( 'Unicode::String')) { my $map= Unicode::Map8->new( $encoding); $string= $map->tou( $string)->utf8; } $string=~ s{[\x00-\x08\x0B\x0C\x0E-\x1F]}{}g; # get rid of control chars, portable in 5.6 return $string; } } sub _indent_xhtml { my( $xhtml)= @_; # $xhtml is a ref my %block_tag= map { $_ => 1 } qw( html head meta title link script base body h1 h2 h3 h4 h5 h6 p br address blockquote pre ol ul li dd dl dt table tr td th tbody tfoot thead col colgroup caption div frame frameset hr ); my $level=0; $$xhtml=~ s{( (?:|[CDATA[.*?]]>)) # ignore comments and CDATA sections | <(\w+)((?:\s+\w+\s*=\s*(?:"[^"]*"|'[^']*'))*\s*/>) # empty tag | <(\w+) # start tag |}); my $nl= $4 eq 'html' ? '' : "\n"; "$nl$indent<$4"; } elsif( $5 && $block_tag{$5}) { $level--; " 1 } qw( xsl css); my $ss= $t->{twig_elt_class}->new( $PI); if( $text_type{$type}) { $ss->_set_pi( 'xml-stylesheet', qq{type="text/$type" href="$href"}); } else { croak "unsupported style sheet type '$type'"; } $t->_add_cpi_outside_of_root( leading_cpi => $ss); return $t; } { my %used; # module => 1 if require ok, 0 otherwise my %disallowed; # for testing, refuses to _use modules in this hash sub _disallow_use ## no critic (Subroutines::ProhibitNestedSubs); { my( @modules)= @_; $disallowed{$_}= 1 foreach (@modules); } sub _allow_use ## no critic (Subroutines::ProhibitNestedSubs); { my( @modules)= @_; $disallowed{$_}= 0 foreach (@modules); } sub _use ## no critic (Subroutines::ProhibitNestedSubs); { my( $module, $version)= @_; $version ||= 0; if( $disallowed{$module}) { return 0; } if( $used{$module}) { return 1; } if( eval "require $module") { import $module; $used{$module}= 1; # no critic ProhibitStringyEval if( $version) { ## no critic (TestingAndDebugging::ProhibitNoStrict); no strict 'refs'; if( ${"${module}::VERSION"} >= $version ) { return 1; } else { return 0; } } else { return 1; } } else { $used{$module}= 0; return 0; } } } # used to solve the [n] predicates while avoiding getting the entire list # needs a prototype to accept passing bare blocks sub _first_n(&$@) ## no critic (Subroutines::ProhibitSubroutinePrototypes); { my $coderef= shift; my $n= shift; my $i=0; if( $n > 0) { foreach (@_) { if( &$coderef) { $i++; return $_ if( $i == $n); } } } elsif( $n < 0) { foreach (reverse @_) { if( &$coderef) { $i--; return $_ if( $i == $n); } } } else { croak "illegal position number 0"; } return undef; } sub _slurp_uri { my( $uri, $base)= @_; if( $uri=~ m{^\w+://}) { _use( 'LWP::Simple'); return LWP::Simple::get( $uri); } else { return _slurp( _based_filename( $uri, $base)); } } sub _based_filename { my( $filename, $base)= @_; # cf. XML/Parser.pm's file_ext_ent_handler if (defined($base) and not ($filename =~ m{^(?:[\\/]|\w+:)})) { my $newpath = $base; $newpath =~ s{[^\\/:]*$}{$filename}; $filename = $newpath; } return $filename; } sub _slurp { my( $filename)= @_; my $to_slurp; open( $to_slurp, "<$filename") or croak "cannot open '$filename': $!"; local $/= undef; my $content= <$to_slurp>; close $to_slurp; return $content; } sub _slurp_fh { my( $fh)= @_; local $/= undef; my $content= <$fh>; return $content; } # I should really add extra options to allow better configuration of the # LWP::UserAgent object # this method forks (except on VMS!) # - the child gets the data and copies it to the pipe, # - the parent reads the stream and sends it to XML::Parser # the data is cut it chunks the size of the XML::Parser::Expat buffer # the method returns the twig and the status sub _parseurl { my( $t, $safe, $url, $agent)= @_; _use( 'LWP') || croak "LWP not available, needed to use parseurl methods"; if( $^O ne 'VMS') { pipe( README, WRITEME) or croak "cannot create connected pipes: $!"; if( my $pid= fork) { # parent code: parse the incoming file close WRITEME; # no need to write my $result= $safe ? $t->safe_parse( \*README) : $t->parse( \*README); close README; return $@ ? 0 : $t; } else { # child close README; # no need to read local $|=1; $agent ||= LWP::UserAgent->new; my $request = HTTP::Request->new( GET => $url); # _pass_url_content is called with chunks of data the same size as # the XML::Parser buffer my $response = $agent->request( $request, sub { _pass_url_content( \*WRITEME, @_); }, $BUFSIZE); $response->is_success or croak "$url ", $response->message; close WRITEME; CORE::exit(); # CORE is there for mod_perl (which redefines exit) } } else { # VMS branch (hard to test!) local $|=1; $agent ||= LWP::UserAgent->new; my $request = HTTP::Request->new( GET => $url); my $response = $agent->request( $request); $response->is_success or croak "$url ", $response->message; my $result= $safe ? $t->safe_parse($response->content) : $t->parse($response->content); return $@ ? 0 : $t; } } # get the (hopefully!) XML data from the URL and sub _pass_url_content { my( $fh, $data, $response, $protocol)= @_; print {$fh} $data; } sub add_options { my %args= map { $_, 1 } @_; %args= _normalize_args( %args); foreach (keys %args) { $valid_option{$_}++; } } sub _pretty_print_styles { return XML::Twig::Elt::_pretty_print_styles(); } sub _twig_store_internal_dtd { # warn " in _twig_store_internal_dtd...\n"; # DEBUG handler my( $p, $string)= @_; my $t= $p->{twig}; if( $t->{twig_keep_encoding}) { $string= $p->original_string(); } $t->{twig_doctype}->{internal} .= $string; return; } sub _twig_stop_storing_internal_dtd { # warn " in _twig_stop_storing_internal_dtd...\n"; # DEBUG handler my $p= shift; if( @saved_default_handler && defined $saved_default_handler[1]) { $p->setHandlers( @saved_default_handler); } else { $p->setHandlers( Default => undef); } $p->{twig}->{twig_doctype}->{internal}=~ s{^\s*\[}{}; $p->{twig}->{twig_doctype}->{internal}=~ s{\]\s*$}{}; return; } sub _twig_doctype_fin_print { # warn " in _twig_doctype_fin_print...\n"; # DEBUG handler my( $p)= shift; if( $p->{twig}->{twig_doctype}->{has_internal} && !$expat_1_95_2) { print ' ]>'; } return; } sub _normalize_args { my %normalized_args; while( my $key= shift ) { $key= join '', map { ucfirst } split /_/, $key; #$key= "Twig".$key unless( substr( $key, 0, 4) eq 'Twig'); $normalized_args{$key}= shift ; } return %normalized_args; } sub _is_fh { return unless $_[0]; return $_[0] if( isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar')); } sub _set_handler { my( $handlers, $whole_path, $handler)= @_; my $H_SPECIAL = qr{($ALL|$DEFAULT|$COMMENT|$TEXT)}; my $H_PI = qr{(\?|$PI)\s*(([^\s]*)\s*)}; my $H_LEVEL = qr{level \s* \( \s* ([0-9]+) \s* \)}x; my $H_REGEXP = qr{\(\?([\^xism]*)(-[\^xism]*)?:(.*)\)}x; my $H_XPATH = qr{(/?/?$REG_TAG_PART? \s* ($REG_PREDICATE\s*)?)+}x; my $prev_handler; my $cpath= $whole_path; #warn "\$cpath: '$cpath\n"; while( $cpath && $cpath=~ s{^\s*($H_SPECIAL|$H_PI|$H_LEVEL|$H_REGEXP|$H_XPATH)\s*($|\|)}{}) { my $path= $1; #warn "\$cpath: '$cpath' - $path: '$path'\n"; $prev_handler ||= $handlers->{handlers}->{string}->{$path} || undef; # $prev_handler gets the first found handler _set_special_handler ( $handlers, $path, $handler, $prev_handler) || _set_pi_handler ( $handlers, $path, $handler, $prev_handler) || _set_level_handler ( $handlers, $path, $handler, $prev_handler) || _set_regexp_handler ( $handlers, $path, $handler, $prev_handler) || _set_xpath_handler ( $handlers, $path, $handler, $prev_handler) || croak "unrecognized expression in handler: '$whole_path'"; # this both takes care of the simple (gi) handlers and store # the handler code reference for other handlers $handlers->{handlers}->{string}->{$path}= $handler; } if( $cpath) { croak "unrecognized expression in handler: '$whole_path'"; } return $prev_handler; } sub _set_special_handler { my( $handlers, $path, $handler, $prev_handler)= @_; if( $path =~ m{^\s*($ALL|$DEFAULT|$COMMENT|$TEXT)\s*$}io ) { $handlers->{handlers}->{$1}= $handler; return 1; } else { return 0; } } sub _set_xpath_handler { my( $handlers, $path, $handler, $prev_handler)= @_; if( my $handler_data= _parse_xpath_handler( $path, $handler)) { _add_handler( $handlers, $handler_data, $path, $prev_handler); return 1; } else { return 0; } } sub _add_handler { my( $handlers, $handler_data, $path, $prev_handler)= @_; my $tag= $handler_data->{tag}; my @handlers= $handlers->{xpath_handler}->{$tag} ? @{$handlers->{xpath_handler}->{$tag}} : (); if( $prev_handler) { @handlers= grep { $_->{path} ne $path } @handlers; } push @handlers, $handler_data if( $handler_data->{handler}); if( @handlers > 1) { @handlers= sort { (($b->{score}->{type} || 0) <=> ($a->{score}->{type} || 0)) || (($b->{score}->{anchored} || 0) <=> ($a->{score}->{anchored} || 0)) || (($b->{score}->{steps} || 0) <=> ($a->{score}->{steps} || 0)) || (($b->{score}->{predicates} || 0) <=> ($a->{score}->{predicates} || 0)) || (($b->{score}->{tests} || 0) <=> ($a->{score}->{tests} || 0)) || ($a->{path} cmp $b->{path}) } @handlers; } $handlers->{xpath_handler}->{$tag}= \@handlers; } sub _set_pi_handler { my( $handlers, $path, $handler, $prev_handler)= @_; # PI conditions ( '?target' => \&handler or '?' => \&handler # or '#PItarget' => \&handler or '#PI' => \&handler) if( $path=~ /^\s*(?:\?|$PI)\s*(?:([^\s]*)\s*)$/) { my $target= $1 || ''; # update the path_handlers count, knowing that # either the previous or the new handler can be undef $handlers->{pi_handlers}->{$1}= $handler; return 1; } else { return 0; } } sub _set_level_handler { my( $handlers, $path, $handler, $prev_handler)= @_; if( $path =~ m{^ \s* level \s* \( \s* ([0-9]+) \s* \) \s* $}ox ) { my $level= $1; my $sub= sub { my( $stack)= @_; return( ($stack->[-1]->{$ST_TAG} !~ m{^#}) && (scalar @$stack == $level + 1) ) }; my $handler_data= { tag=> '*', score => { type => $LEVEL_TRIGGER}, trigger => $sub, path => $path, handler => $handler, test_on_text => 0 }; _add_handler( $handlers, $handler_data, $path, $prev_handler); return 1; } else { return 0; } } sub _set_regexp_handler { my( $handlers, $path, $handler, $prev_handler)= @_; # if the expression was a regexp it is now a string (it was stringified when it became a hash key) if( $path=~ m{^\(\?([\^xism]*)(?:-[\^xism]*)?:(.*)\)$}) { my $regexp= qr/(?$1:$2)/; # convert it back into a regexp my $sub= sub { my( $stack)= @_; return( $stack->[-1]->{$ST_TAG} =~ $regexp ) }; my $handler_data= { tag=> '*', score => { type => $REGEXP_TRIGGER} , trigger => $sub, path => $path, handler => $handler, test_on_text => 0 }; _add_handler( $handlers, $handler_data, $path, $prev_handler); return 1; } else { return 0; } } my $DEBUG_HANDLER= 0; # 0 or 1 (output the handler checking code) or 2 (super verbose) my $handler_string; # store the handler itself sub _set_debug_handler { $DEBUG_HANDLER= shift; } sub _warn_debug_handler { if( $DEBUG_HANDLER < 3) { warn @_; } else { $handler_string .= join( '', @_); } } sub _return_debug_handler { my $string= $handler_string; $handler_string=''; return $string; } sub _parse_xpath_handler { my( $xpath, $handler)= @_; my $xpath_original= $xpath; if( $DEBUG_HANDLER >=1) { _warn_debug_handler( "\n\nparsing path '$xpath'\n"); } my $path_to_check= $xpath; $path_to_check=~ s{/?/?$REG_TAG_PART?\s*(?:$REG_PREDICATE\s*)?}{}g; if( $DEBUG_HANDLER && $path_to_check=~ /\S/) { _warn_debug_handler( "left: $path_to_check\n"); } return if( $path_to_check=~ /\S/); (my $xpath_to_display= $xpath)=~ s{(["{}'\[\]\@\$])}{\\$1}g; my @xpath_steps; my $last_token_is_sep; while( $xpath=~ s{^\s* ( (//?) # separator | (?:$REG_TAG_PART\s*(?:$REG_PREDICATE\s*)?) # tag name and optional predicate | (?:$REG_PREDICATE) # just a predicate ) } {}x ) { # check that we have alternating separators and steps if( $2) # found a separator { if( $last_token_is_sep) { return 0; } # 2 separators in a row $last_token_is_sep= 1; } else { if( defined( $last_token_is_sep) && !$last_token_is_sep) { return 0; } # 2 steps in a row $last_token_is_sep= 0; } push @xpath_steps, $1; } if( $last_token_is_sep) { return 0; } # expression cannot end with a separator my $i=-1; my $perlfunc= _join_n( $NO_WARNINGS . ';', q|my( $stack)= @_; |, q|my @current_elts= (scalar @$stack); |, q|my @new_current_elts; |, q|my $elt; |, ($DEBUG_HANDLER >= 1) && (qq#warn q{checking path '$xpath_to_display'\n};#), ); my $last_tag=''; my $anchored= $xpath_original=~ m{^\s*/(?!/)} ? 1 : 0; my $score={ type => $XPATH_TRIGGER, anchored => $anchored }; my $flag= { test_on_text => 0 }; my $sep='/'; # '/' or '//' while( my $xpath_step= pop @xpath_steps) { my( $tag, $predicate)= $xpath_step =~ m{^($REG_TAG_PART)?(?:\[(.*)\])?\s*$}; $score->{steps}++; $tag||='*'; my $warn_empty_stack= $DEBUG_HANDLER >= 2 ? qq{warn "return with empty stack\\n";} : ''; if( $predicate) { if( $DEBUG_HANDLER >= 2) { _warn_debug_handler( "predicate is: '$predicate'\n"); } # changes $predicate (from an XPath expression to a Perl one) if( $predicate=~ m{^\s*$REG_NUMBER\s*$}) { croak "position selector [$predicate] not supported on twig_handlers"; } _parse_predicate_in_handler( $predicate, $flag, $score); if( $DEBUG_HANDLER >= 2) { _warn_debug_handler( "predicate becomes: '$predicate'\n"); } } my $tag_cond= _tag_cond( $tag); my $cond= join( " && ", grep { $_ } $tag_cond, $predicate) || 1; if( $css_sel && $tag=~ m{\.}) { $tag=~s{\.[^.]*$}{}; $tag ||='*'; } $tag=~ s{(.)#.+$}{$1}; $last_tag ||= $tag; if( $sep eq '/') { $perlfunc .= sprintf( _join_n( q#foreach my $current_elt (@current_elts) #, q# { next if( !$current_elt); #, q# $current_elt--; #, q# $elt= $stack->[$current_elt]; #, q# if( %s) { push @new_current_elts, $current_elt;} #, q# } #, ), $cond ); } elsif( $sep eq '//') { $perlfunc .= sprintf( _join_n( q#foreach my $current_elt (@current_elts) #, q# { next if( !$current_elt); #, q# $current_elt--; #, q# my $candidate= $current_elt; #, q# while( $candidate >=0) #, q# { $elt= $stack->[$candidate]; #, q# if( %s) { push @new_current_elts, $candidate;} #, q# $candidate--; #, q# } #, q# } #, ), $cond ); } my $warn= $DEBUG_HANDLER >= 2 ? _join_n( qq#warn qq%fail at cond '$cond'%;#) : ''; $perlfunc .= sprintf( _join_n( q#unless( @new_current_elts) { %s return 0; } #, q#@current_elts= @new_current_elts; #, q#@new_current_elts=(); #, ), $warn ); $sep= pop @xpath_steps; } if( $anchored) # there should be a better way, but this works { my $warn= $DEBUG_HANDLER >= 2 ? _join_n( qq#warn qq{fail, stack not empty};#) : ''; $perlfunc .= sprintf( _join_n( q#if( ! grep { $_ == 0 } @current_elts) { %s return 0;}#), $warn); } $perlfunc.= qq{warn "handler for '$xpath_to_display' triggered\\n";\n} if( $DEBUG_HANDLER >=2); $perlfunc.= qq{return q{$xpath_original};\n}; _warn_debug_handler( "\nperlfunc:\n$perlfunc\n") if( $DEBUG_HANDLER>=1); my $s= eval "sub { $perlfunc }"; if( $@) { croak "wrong handler condition '$xpath' ($@);" } _warn_debug_handler( "last tag: '$last_tag', test_on_text: '$flag->{test_on_text}'\n") if( $DEBUG_HANDLER >=1); _warn_debug_handler( "score: ", join( ' ', map { "$_: $score->{$_}" } sort keys %$score), "\n") if( $DEBUG_HANDLER >=1); return { tag=> $last_tag, score => $score, trigger => $s, path => $xpath_original, handler => $handler, test_on_text => $flag->{test_on_text} }; } sub _join_n { return join( "\n", @_, ''); } # the "tag" part can be , . or # (where tag can be *, or start with # for hidden tags) sub _tag_cond { my( $full_tag)= @_; my( $tag, $class, $id); if( $full_tag=~ m{^(.+)#(.+)$}) { ($tag, $id)= ($1, $2); } # # else { ( $tag, $class)= $css_sel ? $full_tag=~ m{^(.*?)(?:\.([^.]*))?$} : ($full_tag, undef); } my $tag_cond = $tag && $tag ne '*' ? qq#(\$elt->{'$ST_TAG'} eq "$tag")# : ''; my $id_cond = defined $id ? qq#(\$elt->{id} eq "$id")# : ''; my $class_cond = defined $class ? qq#(\$elt->{class}=~ m{(^| )$class( |\$)})# : ''; my $full_cond= join( ' && ', grep { $_ } ( $tag_cond, $class_cond, $id_cond)); return $full_cond; } # input: the predicate ($_[0]) which will be changed in place # flags, a hashref with various flags (like test_on_text) # the score sub _parse_predicate_in_handler { my( $flag, $score)= @_[1..2]; $_[0]=~ s{( ($REG_STRING) # strings |\@($REG_TAG_NAME)(\s* $REG_MATCH \s* $REG_REGEXP) # @att and regexp |\@($REG_TAG_NAME)(?=\s*(?:[><=!])) # @att followed by a comparison operator |\@($REG_TAG_NAME) # @att (not followed by a comparison operator) |=~|!~ # matching operators |([><]=?|=|!=)(?=\s*[\d+-]) # test before a number |([><]=?|=|!=) # test, other cases |($REG_FUNCTION) # no arg functions # this bit is a mess, but it is the only solution with this half-baked parser |(string\(\s*$REG_NAME\s*\)\s*$REG_MATCH\s*$REG_REGEXP) # string( child)=~ /regexp/ |(string\(\s*$REG_NAME\s*\)\s*$REG_COMP\s*$REG_STRING) # string( child) = "value" (or other test) |(string\(\s*$REG_NAME\s*\)\s*$REG_COMP\s*$REG_NUMBER) # string( child) = nb (or other test) |(and|or) # |($REG_NAME(?=\s*(and|or|$))) # nested tag name (needs to be after all other unquoted strings) |($REG_TAG_IN_PREDICATE) # nested tag name (needs to be after all other unquoted strings) )} { my( $token, $str, $att_re_name, $att_re_regexp, $att, $bare_att, $num_test, $alpha_test, $func, $str_regexp, $str_test_alpha, $str_test_num, $and_or, $tag) = ( $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14); $score->{predicates}++; # store tests on text (they are not always allowed) if( $func || $str_regexp || $str_test_num || $str_test_alpha ) { $flag->{test_on_text}= 1; } if( defined $str) { $token } elsif( $tag) { qq{(\$elt->{'$ST_ELT'} && \$elt->{'$ST_ELT'}->has_child( '$tag'))} } elsif( $att) { $att=~ m{^#} ? qq{ (\$elt->{'$ST_ELT'} && \$elt->{'$ST_ELT'}->{att}->{'$att'})} : qq{\$elt->{'$att'}} } elsif( $att_re_name) { $att_re_name=~ m{^#} ? qq{ (\$elt->{'$ST_ELT'} && \$elt->{'$ST_ELT'}->{att}->{'$att_re_name'}$att_re_regexp)} : qq{\$elt->{'$att_re_name'}$att_re_regexp} } # for some reason Devel::Cover flags the following lines as not tested. They are though. elsif( $bare_att) { $bare_att=~ m{^#} ? qq{(\$elt->{'$ST_ELT'} && defined(\$elt->{'$ST_ELT'}->{att}->{'$bare_att'}))} : qq{defined( \$elt->{'$bare_att'})} } elsif( $num_test && ($num_test eq '=') ) { "==" } # others tests are unchanged elsif( $alpha_test) { $PERL_ALPHA_TEST{$alpha_test} } elsif( $func && $func=~ m{^string}) { "\$elt->{'$ST_ELT'}->text"; } elsif( $str_regexp && $str_regexp =~ m{string\(\s*($REG_TAG_NAME)\s*\)\s*($REG_MATCH)\s*($REG_REGEXP)}) { "defined( _first_n { \$_->text $2 $3 } 1, \$elt->{'$ST_ELT'}->_children( '$1'))"; } elsif( $str_test_alpha && $str_test_alpha =~ m{string\(\s*($REG_TAG_NAME)\s*\)\s*($REG_COMP)\s*($REG_STRING)}) { my( $tag, $op, $str)= ($1, $2, $3); $str=~ s{(?<=.)'(?=.)}{\\'}g; # escape a quote within the string $str=~ s{^"}{'}; $str=~ s{"$}{'}; "defined( _first_n { \$_->text $PERL_ALPHA_TEST{$op} $str } 1, \$elt->{'$ST_ELT'}->children( '$tag'))"; } elsif( $str_test_num && $str_test_num =~ m{string\(\s*($REG_TAG_NAME)\s*\)\s*($REG_COMP)\s*($REG_NUMBER)}) { my $test= ($2 eq '=') ? '==' : $2; "defined( _first_n { \$_->text $test $3 } 1, \$elt->{'$ST_ELT'}->children( '$1'))"; } elsif( $and_or) { $score->{tests}++; $and_or eq 'and' ? '&&' : '||' ; } else { $token; } }gexs; } sub setCharHandler { my( $t, $handler)= @_; $t->{twig_char_handler}= $handler; } sub _reset_handlers { my $handlers= shift; delete $handlers->{handlers}; delete $handlers->{path_handlers}; delete $handlers->{subpath_handlers}; $handlers->{attcond_handlers_exp}=[] if( $handlers->{attcond_handlers}); delete $handlers->{attcond_handlers}; } sub _set_handlers { my $handlers= shift || return; my $set_handlers= {}; foreach my $path (keys %{$handlers}) { _set_handler( $set_handlers, $path, $handlers->{$path}); } return $set_handlers; } sub setTwigHandler { my( $t, $path, $handler)= @_; $t->{twig_handlers} ||={}; return _set_handler( $t->{twig_handlers}, $path, $handler); } sub setTwigHandlers { my( $t, $handlers)= @_; my $previous_handlers= $t->{twig_handlers} || undef; _reset_handlers( $t->{twig_handlers}); $t->{twig_handlers}= _set_handlers( $handlers); return $previous_handlers; } sub setStartTagHandler { my( $t, $path, $handler)= @_; $t->{twig_starttag_handlers}||={}; return _set_handler( $t->{twig_starttag_handlers}, $path, $handler); } sub setStartTagHandlers { my( $t, $handlers)= @_; my $previous_handlers= $t->{twig_starttag_handlers} || undef; _reset_handlers( $t->{twig_starttag_handlers}); $t->{twig_starttag_handlers}= _set_handlers( $handlers); return $previous_handlers; } sub setIgnoreEltsHandler { my( $t, $path, $action)= @_; $t->{twig_ignore_elts_handlers}||={}; return _set_handler( $t->{twig_ignore_elts_handlers}, $path, $action ); } sub setIgnoreEltsHandlers { my( $t, $handlers)= @_; my $previous_handlers= $t->{twig_ignore_elts_handlers}; _reset_handlers( $t->{twig_ignore_elts_handlers}); $t->{twig_ignore_elts_handlers}= _set_handlers( $handlers); return $previous_handlers; } sub setEndTagHandler { my( $t, $path, $handler)= @_; $t->{twig_endtag_handlers}||={}; return _set_handler( $t->{twig_endtag_handlers}, $path,$handler); } sub setEndTagHandlers { my( $t, $handlers)= @_; my $previous_handlers= $t->{twig_endtag_handlers}; _reset_handlers( $t->{twig_endtag_handlers}); $t->{twig_endtag_handlers}= _set_handlers( $handlers); return $previous_handlers; } # a little more complex: set the twig_handlers only if a code ref is given sub setTwigRoots { my( $t, $handlers)= @_; my $previous_roots= $t->{twig_roots}; _reset_handlers($t->{twig_roots}); $t->{twig_roots}= _set_handlers( $handlers); _check_illegal_twig_roots_handlers( $t->{twig_roots}); foreach my $path (keys %{$handlers}) { $t->{twig_handlers}||= {}; _set_handler( $t->{twig_handlers}, $path, $handlers->{$path}) if( ref($handlers->{$path}) && isa( $handlers->{$path}, 'CODE')); } return $previous_roots; } sub _check_illegal_twig_roots_handlers { my( $handlers)= @_; foreach my $tag_handlers (values %{$handlers->{xpath_handler}}) { foreach my $handler_data (@$tag_handlers) { if( my $type= $handler_data->{test_on_text}) { croak "string() condition not supported on twig_roots option"; } } } return; } # just store the reference to the expat object in the twig sub _twig_init { # warn " in _twig_init...\n"; # DEBUG handler my $p= shift; my $t=$p->{twig}; if( $t->{twig_parsing} ) { croak "cannot reuse a twig that is already parsing"; } $t->{twig_parsing}=1; $t->{twig_parser}= $p; if( $weakrefs) { weaken( $t->{twig_parser}); } # in case they had been created by a previous parse delete $t->{twig_dtd}; delete $t->{twig_doctype}; delete $t->{twig_xmldecl}; delete $t->{twig_root}; # if needed set the output filehandle $t->_set_fh_to_twig_output_fh(); return; } # uses eval to catch the parser's death sub safe_parse { my $t= shift; eval { $t->parse( @_); } ; return $@ ? $t->_reset_twig_after_error : $t; } sub safe_parsefile { my $t= shift; eval { $t->parsefile( @_); } ; return $@ ? $t->_reset_twig_after_error : $t; } # restore a twig in a proper state so it can be reused for a new parse sub _reset_twig { my $t= shift; $t->{twig_parsing}= 0; delete $t->{twig_current}; delete $t->{extra_data}; delete $t->{twig_dtd}; delete $t->{twig_in_pcdata}; delete $t->{twig_in_cdata}; delete $t->{twig_stored_space}; delete $t->{twig_entity_list}; $t->root->delete if( $t->root); delete $t->{twig_root}; return $t; } sub _reset_twig_after_error { my $t= shift; $t->_reset_twig; return undef; } sub _add_or_discard_stored_spaces { my $t= shift; $t->{twig_right_after_root}=0; #XX my $current= $t->{twig_current} or return; # ugly hack, with ignore on, twig_current can disappear return unless length $t->{twig_stored_spaces}; my $current_gi= $XML::Twig::index2gi[$current->{'gi'}]; if( ! $t->{twig_discard_all_spaces}) { if( ! defined( $t->{twig_space_policy}->{$current_gi})) { $t->{twig_space_policy}->{$current_gi}= _space_policy( $t, $current_gi); } if( $t->{twig_space_policy}->{$current_gi} || ($t->{twig_stored_spaces}!~ m{\n}) || $t->{twig_preserve_space}) { _insert_pcdata( $t, $t->{twig_stored_spaces} ); } } $t->{twig_stored_spaces}=''; return; } # the default twig handlers, which build the tree sub _twig_start { # warn " in _twig_start...\n"; # DEBUG handler #foreach my $s (@_) { next if ref $s; warn "$s: ", is_utf8( $s) ? "has flag" : "FLAG NOT SET"; } # YYY my ($p, $gi, @att)= @_; my $t=$p->{twig}; # empty the stored pcdata (space stored in case they are really part of # a pcdata element) or stored it if the space policy dictates so # create a pcdata element with the spaces if need be _add_or_discard_stored_spaces( $t); my $parent= $t->{twig_current}; # if we were parsing PCDATA then we exit the pcdata if( $t->{twig_in_pcdata}) { $t->{twig_in_pcdata}= 0; delete $parent->{'twig_current'}; $parent= $parent->{parent}; } # if we choose to keep the encoding then we need to parse the tag if( my $func = $t->{parse_start_tag}) { ($gi, @att)= &$func($p->original_string); } elsif( $t->{twig_entities_in_attribute}) { ($gi,@att)= _parse_start_tag( $p->recognized_string); $t->{twig_entities_in_attribute}=0; } # if we are using an external DTD, we need to fill the default attributes if( $t->{twig_read_external_dtd}) { _fill_default_atts( $t, $gi, \@att); } # filter the input data if need be if( my $filter= $t->{twig_input_filter}) { $gi= $filter->( $gi); foreach my $att (@att) { $att= $filter->($att); } } my $ns_decl; if( $t->{twig_map_xmlns}) { $ns_decl= _replace_ns( $t, \$gi, \@att); } my $elt= $t->{twig_elt_class}->new( $gi); $elt->set_atts( @att); # now we can store the tag and atts my $context= { $ST_TAG => $gi, $ST_ELT => $elt, @att}; $context->{$ST_NS}= $ns_decl if $ns_decl; if( $weakrefs) { weaken( $context->{$ST_ELT}); } push @{$t->{_twig_context_stack}}, $context; delete $parent->{'twig_current'} if( $parent); $t->{twig_current}= $elt; $elt->{'twig_current'}=1; if( $parent) { my $prev_sibling= $parent->{last_child}; if( $prev_sibling) { $prev_sibling->{next_sibling}= $elt; $elt->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; } $elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; unless( $parent->{first_child}) { $parent->{first_child}= $elt; } $parent->{empty}=0; $parent->{last_child}=$elt; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; } else { # processing root $t->set_root( $elt); # call dtd handler if need be $t->{twig_dtd_handler}->($t, $t->{twig_dtd}) if( defined $t->{twig_dtd_handler}); # set this so we can catch external entities # (the handler was modified during DTD processing) if( $t->{twig_default_print}) { $p->setHandlers( Default => \&_twig_print); } elsif( $t->{twig_roots}) { $p->setHandlers( Default => sub { return }); } else { $p->setHandlers( Default => \&_twig_default); } } $elt->{empty}= $p->recognized_string=~ m{/\s*>$}s ? 1 : 0; $elt->{extra_data}= $t->{extra_data} if( $t->{extra_data}); $t->{extra_data}=''; # if the element is ID-ed then store that info my $id= $elt->{'att'}->{$ID}; if( defined $id) { $t->{twig_id_list}->{$id}= $elt; if( $weakrefs) { weaken( $t->{twig_id_list}->{$id}); } } # call user handler if need be if( $t->{twig_starttag_handlers}) { # call all appropriate handlers my @handlers= _handler( $t, $t->{twig_starttag_handlers}, $gi); local $_= $elt; foreach my $handler ( @handlers) { $handler->($t, $elt) || last; } # call _all_ handler if needed if( my $all= $t->{twig_starttag_handlers}->{handlers}->{$ALL}) { $all->($t, $elt); } } # check if the tag is in the list of tags to be ignored if( $t->{twig_ignore_elts_handlers}) { my @handlers= _handler( $t, $t->{twig_ignore_elts_handlers}, $gi); # only the first handler counts, it contains the action (discard/print/string) if( @handlers) { my $action= shift @handlers; $t->ignore( $elt, $action); } } if( $elt->{'att'}->{'xml:space'} && ( $elt->{'att'}->{'xml:space'} eq 'preserve')) { $t->{twig_preserve_space}++; } return; } sub _replace_ns { my( $t, $gi, $atts)= @_; my $decls; foreach my $new_prefix ( $t->parser->new_ns_prefixes) { my $uri= $t->parser->expand_ns_prefix( $new_prefix); # replace the prefix if it is mapped $decls->{$new_prefix}= $uri; if( !$t->{twig_keep_original_prefix} && (my $mapped_prefix= $t->{twig_map_xmlns}->{$uri})) { $new_prefix= $mapped_prefix; } # now put the namespace declaration back in the element if( $new_prefix eq '#default') { push @$atts, "xmlns" => $uri; } else { push @$atts, "xmlns:$new_prefix" => $uri; } } if( $t->{twig_keep_original_prefix}) { # things become more complex: we need to find the original prefix # and store both prefixes my $ns_info= $t->_ns_info( $$gi); my $map_att; if( $ns_info->{mapped_prefix}) { $$gi= "$ns_info->{mapped_prefix}:$$gi"; $map_att->{$ns_info->{mapped_prefix}}= $ns_info->{prefix}; } my $att_name=1; foreach( @$atts) { if( $att_name) { my $ns_info= $t->_ns_info( $_); if( $ns_info->{mapped_prefix}) { $_= "$ns_info->{mapped_prefix}:$_"; $map_att->{$ns_info->{mapped_prefix}}= $ns_info->{prefix}; } $att_name=0; } else { $att_name=1; } } push @$atts, '#original_gi', $map_att if( $map_att); } else { $$gi= $t->_replace_prefix( $$gi); my $att_name=1; foreach( @$atts) { if( $att_name) { $_= $t->_replace_prefix( $_); $att_name=0; } else { $att_name=1; } } } return $decls; } # extract prefix, local_name, uri, mapped_prefix from a name # will only work if called from a start or end tag handler sub _ns_info { my( $t, $name)= @_; my $ns_info={}; my $p= $t->parser; $ns_info->{uri}= $p->namespace( $name); return $ns_info unless( $ns_info->{uri}); $ns_info->{prefix}= _a_proper_ns_prefix( $p, $ns_info->{uri}); $ns_info->{mapped_prefix}= $t->{twig_map_xmlns}->{$ns_info->{uri}} || $ns_info->{prefix}; return $ns_info; } sub _a_proper_ns_prefix { my( $p, $uri)= @_; foreach my $prefix ($p->current_ns_prefixes) { if( $p->expand_ns_prefix( $prefix) eq $uri) { return $prefix; } } return; } # returns the uri bound to a prefix in the original document # only works in a handler # can be used to deal with xsi:type attributes sub original_uri { my( $t, $prefix)= @_; my $ST_NS = '##ns' ; foreach my $ns (map { $_->{$ST_NS} if $_->{$ST_NS} } reverse @{$t->{_twig_context_stack}}) { return $ns->{$prefix} || next; } return; } sub _fill_default_atts { my( $t, $gi, $atts)= @_; my $dtd= $t->{twig_dtd}; my $attlist= $dtd->{att}->{$gi}; my %value= @$atts; foreach my $att (keys %$attlist) { if( !exists( $value{$att}) && exists( $attlist->{$att}->{default}) && ( $attlist->{$att}->{default} ne '#IMPLIED') ) { # the quotes are included in the default, so we need to remove them my $default_value= substr( $attlist->{$att}->{default}, 1, -1); push @$atts, $att, $default_value; } } return; } # the default function to parse a start tag (in keep_encoding mode) # can be overridden with the parse_start_tag method # only works for 1-byte character sets sub _parse_start_tag { my $string= shift; my( $gi, @atts); # get the gi (between < and the first space, / or > character) #if( $string=~ s{^<\s*([^\s>/]*)[\s>/]*}{}s) if( $string=~ s{^<\s*($REG_TAG_NAME)\s*[\s>/]}{}s) { $gi= $1; } else { croak "error parsing tag '$string'"; } while( $string=~ s{^([^\s=]*)\s*=\s*(["'])(.*?)\2\s*}{}s) { push @atts, $1, $3; } return $gi, @atts; } sub set_root { my( $t, $elt)= @_; $t->{twig_root}= $elt; if( $elt) { $elt->{twig}= $t; if( $weakrefs) { weaken( $elt->{twig}); } } return $t; } sub _twig_end { # warn " in _twig_end...\n"; # DEBUG handler my ($p, $gi) = @_; my $t=$p->{twig}; if( $t->{twig_in_pcdata} && (my $text_handler= $t->{TwigHandlers}->{$TEXT}) ) { local $_= $t->{twig_current}; $text_handler->( $t, $_) if $_; } if( $t->{twig_map_xmlns}) { $gi= $t->_replace_prefix( $gi); } _add_or_discard_stored_spaces( $t); # the new twig_current is the parent my $elt= $t->{twig_current}; delete $elt->{'twig_current'}; # if we were parsing PCDATA then we exit the pcdata too if( $t->{twig_in_pcdata}) { $t->{twig_in_pcdata}= 0; $elt= $elt->{parent} if($elt->{parent}); delete $elt->{'twig_current'}; } # parent is the new current element my $parent= $elt->{parent}; $t->{twig_current}= $parent; if( $parent) { $parent->{'twig_current'}=1; # twig_to_be_normalized if( $parent->{twig_to_be_normalized}) { $parent->normalize; $parent->{twig_to_be_normalized}=0; } } if( $t->{extra_data}) { $elt->_set_extra_data_before_end_tag( $t->{extra_data}); $t->{extra_data}=''; } if( $t->{twig_handlers}) { # look for handlers my @handlers= _handler( $t, $t->{twig_handlers}, $gi); if( $t->{twig_tdh}) { if( @handlers) { push @{$t->{twig_handlers_to_trigger}}, [ $elt, \@handlers ]; } if( my $all= $t->{twig_handlers}->{handlers}->{$ALL}) { push @{$t->{twig_handlers_to_trigger}}, [ $elt, [$all] ]; } } else { local $_= $elt; # so we can use $_ in the handlers foreach my $handler ( @handlers) { $handler->($t, $elt) || last; } # call _all_ handler if needed my $all= $t->{twig_handlers}->{handlers}->{$ALL}; if( $all) { $all->($t, $elt); } if( @handlers || $all) { $t->{twig_right_after_root}=0; } } } # if twig_roots is set for the element then set appropriate handler if( $t->{twig_root_depth} and ($p->depth == $t->{twig_root_depth}) ) { if( $t->{twig_default_print}) { # select the proper fh (and store the currently selected one) $t->_set_fh_to_twig_output_fh(); if( !$p->depth==1) { $t->{twig_right_after_root}=1; } #XX if( $t->{twig_keep_encoding}) { $p->setHandlers( %twig_handlers_roots_print_original); } else { $p->setHandlers( %twig_handlers_roots_print); } } else { $p->setHandlers( %twig_handlers_roots); } } if( $elt->{'att'}->{'xml:space'} && ( $elt->{'att'}->{'xml:space'} eq 'preserve')) { $t->{twig_preserve_space}--; } pop @{$t->{_twig_context_stack}}; return; } sub _trigger_tdh { my( $t)= @_; if( @{$t->{twig_handlers_to_trigger}}) { my @handlers_to_trigger_now= sort { $a->[0]->cmp( $b->[0]) } @{$t->{twig_handlers_to_trigger}}; foreach my $elt_handlers (@handlers_to_trigger_now) { my( $handled_elt, $handlers_to_trigger)= @$elt_handlers; foreach my $handler ( @$handlers_to_trigger) { local $_= $handled_elt; $handler->($t, $handled_elt) || last; } } } return; } # return the list of handler that can be activated for an element # (either of CODE ref's or 1's for twig_roots) sub _handler { my( $t, $handlers, $gi)= @_; my @found_handlers=(); my $found_handler; foreach my $handler ( map { @$_ } grep { $_ } $handlers->{xpath_handler}->{$gi}, $handlers->{xpath_handler}->{'*'}) { my $trigger= $handler->{trigger}; if( my $found_path= $trigger->( $t->{_twig_context_stack})) { my $found_handler= $handler->{handler}; push @found_handlers, $found_handler; } } # if no handler found call default handler if defined if( !@found_handlers && defined $handlers->{handlers}->{$DEFAULT}) { push @found_handlers, $handlers->{handlers}->{$DEFAULT}; } if( @found_handlers and $t->{twig_do_not_chain_handlers}) { @found_handlers= ($found_handlers[0]); } return @found_handlers; # empty if no handler found } sub _replace_prefix { my( $t, $name)= @_; my $p= $t->parser; my $uri= $p->namespace( $name); # try to get the namespace from default if none is found (for attributes) # this should probably be an option if( !$uri and( $name!~/^xml/)) { $uri= $p->expand_ns_prefix( '#default'); } if( $uri) { if (my $mapped_prefix= $t->{twig_map_xmlns}->{$uri} || $DEFAULT_URI2NS{$uri}) { return "$mapped_prefix:$name"; } else { my $prefix= _a_proper_ns_prefix( $p, $uri); if( $prefix eq '#default') { $prefix=''; } return $prefix ? "$prefix:$name" : $name; } } else { return $name; } } sub _twig_char { # warn " in _twig_char...\n"; # DEBUG handler my ($p, $string)= @_; my $t=$p->{twig}; if( $t->{twig_keep_encoding}) { if( !$t->{twig_in_cdata}) { $string= $p->original_string(); } else { use bytes; # > perl 5.5 if( length( $string) < 1024) { $string= $p->original_string(); } else { #warn "dodgy case"; # TODO original_string does not hold the entire string, but $string is wrong # I believe due to a bug in XML::Parser # for now, we use the original string, even if it means that it's been converted to utf8 } } } if( $t->{twig_input_filter}) { $string= $t->{twig_input_filter}->( $string); } if( $t->{twig_char_handler}) { $string= $t->{twig_char_handler}->( $string); } my $elt= $t->{twig_current}; if( $t->{twig_in_cdata}) { # text is the continuation of a previously created cdata $elt->{cdata}.= $t->{twig_stored_spaces} . $string; } elsif( $t->{twig_in_pcdata}) { # text is the continuation of a previously created pcdata if( $t->{extra_data}) { $elt->_push_extra_data_in_pcdata( $t->{extra_data}, length( $elt->{pcdata})); $t->{extra_data}=''; } $elt->{pcdata}.= $string; } else { # text is just space, which might be discarded later if( $string=~/\A\s*\Z/s) { if( $t->{extra_data}) { # we got extra data (comment, pi), lets add the spaces to it $t->{extra_data} .= $string; } else { # no extra data, just store the spaces $t->{twig_stored_spaces}.= $string; } } else { my $new_elt= _insert_pcdata( $t, $t->{twig_stored_spaces}.$string); delete $elt->{'twig_current'}; $new_elt->{'twig_current'}=1; $t->{twig_current}= $new_elt; $t->{twig_in_pcdata}=1; if( $t->{extra_data}) { $new_elt->_push_extra_data_in_pcdata( $t->{extra_data}, 0); $t->{extra_data}=''; } } } return; } sub _twig_cdatastart { # warn " in _twig_cdatastart...\n"; # DEBUG handler my $p= shift; my $t=$p->{twig}; $t->{twig_in_cdata}=1; my $cdata= $t->{twig_elt_class}->new( $CDATA); my $twig_current= $t->{twig_current}; if( $t->{twig_in_pcdata}) { # create the node as a sibling of the PCDATA $cdata->{prev_sibling}=$twig_current; if( $XML::Twig::weakrefs) { weaken( $cdata->{prev_sibling});} ; $twig_current->{next_sibling}= $cdata; my $parent= $twig_current->{parent}; $cdata->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $cdata->{parent});} ; $parent->{empty}=0; $parent->{last_child}=$cdata; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; $t->{twig_in_pcdata}=0; } else { # we have to create a PCDATA element if we need to store spaces if( $t->_space_policy($XML::Twig::index2gi[$twig_current->{'gi'}]) && $t->{twig_stored_spaces}) { _insert_pcdata( $t, $t->{twig_stored_spaces}); } $t->{twig_stored_spaces}=''; # create the node as a child of the current element $cdata->{parent}=$twig_current; if( $XML::Twig::weakrefs) { weaken( $cdata->{parent});} ; if( my $prev_sibling= $twig_current->{last_child}) { $cdata->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $cdata->{prev_sibling});} ; $prev_sibling->{next_sibling}= $cdata; } else { $twig_current->{first_child}= $cdata; } $twig_current->{empty}=0; $twig_current->{last_child}=$cdata; if( $XML::Twig::weakrefs) { weaken( $twig_current->{last_child});} ; } delete $twig_current->{'twig_current'}; $t->{twig_current}= $cdata; $cdata->{'twig_current'}=1; if( $t->{extra_data}) { $cdata->set_extra_data( $t->{extra_data}); $t->{extra_data}='' }; return; } sub _twig_cdataend { # warn " in _twig_cdataend...\n"; # DEBUG handler my $p= shift; my $t=$p->{twig}; $t->{twig_in_cdata}=0; my $elt= $t->{twig_current}; delete $elt->{'twig_current'}; my $cdata= $elt->{cdata}; $elt->_set_cdata( $cdata); push @{$t->{_twig_context_stack}}, { $ST_TAG => $CDATA }; if( $t->{twig_handlers}) { # look for handlers my @handlers= _handler( $t, $t->{twig_handlers}, $CDATA); local $_= $elt; # so we can use $_ in the handlers foreach my $handler ( @handlers) { $handler->($t, $elt) || last; } } pop @{$t->{_twig_context_stack}}; $elt= $elt->{parent}; $t->{twig_current}= $elt; $elt->{'twig_current'}=1; $t->{twig_long_cdata}=0; return; } sub _pi_elt_handlers { my( $t, $pi)= @_; my $pi_handlers= $t->{twig_handlers}->{pi_handlers} || return; foreach my $handler ( $pi_handlers->{$pi->{target}}, $pi_handlers->{''}) { if( $handler) { local $_= $pi; $handler->( $t, $pi) || last; } } } sub _pi_text_handler { my( $t, $target, $data)= @_; if( my $handler= $t->{twig_handlers}->{pi_handlers}->{$target}) { return $handler->( $t, $target, $data); } if( my $handler= $t->{twig_handlers}->{pi_handlers}->{''}) { return $handler->( $t, $target, $data); } return defined( $data) && $data ne '' ? "" : "" ; } sub _comment_elt_handler { my( $t, $comment)= @_; if( my $handler= $t->{twig_handlers}->{handlers}->{$COMMENT}) { local $_= $comment; $handler->($t, $comment); } } sub _comment_text_handler { my( $t, $comment)= @_; if( my $handler= $t->{twig_handlers}->{handlers}->{$COMMENT}) { $comment= $handler->($t, $comment); if( !defined $comment || $comment eq '') { return ''; } } return ""; } sub _twig_comment { # warn " in _twig_comment...\n"; # DEBUG handler my( $p, $comment_text)= @_; my $t=$p->{twig}; if( $t->{twig_keep_encoding}) { $comment_text= substr( $p->original_string(), 4, -3); } $t->_twig_pi_comment( $p, $COMMENT, $t->{twig_keep_comments}, $t->{twig_process_comments}, '_set_comment', '_comment_elt_handler', '_comment_text_handler', $comment_text ); return; } sub _twig_pi { # warn " in _twig_pi...\n"; # DEBUG handler my( $p, $target, $data)= @_; my $t=$p->{twig}; if( $t->{twig_keep_encoding}) { my $pi_text= substr( $p->original_string(), 2, -2); ($target, $data)= split( /\s+/, $pi_text, 2); } $t->_twig_pi_comment( $p, $PI, $t->{twig_keep_pi}, $t->{twig_process_pi}, '_set_pi', '_pi_elt_handlers', '_pi_text_handler', $target, $data ); return; } sub _twig_pi_comment { my( $t, $p, $type, $keep, $process, $set, $elt_handler, $text_handler, @parser_args)= @_; if( $t->{twig_input_filter}) { foreach my $arg (@parser_args) { $arg= $t->{twig_input_filter}->( $arg); } } # if pi/comments are to be kept then we piggyback them to the current element if( $keep) { # first add spaces if( $t->{twig_stored_spaces}) { $t->{extra_data}.= $t->{twig_stored_spaces}; $t->{twig_stored_spaces}= ''; } my $extra_data= $t->$text_handler( @parser_args); $t->{extra_data}.= $extra_data; } elsif( $process) { my $twig_current= $t->{twig_current}; # defined unless we are outside of the root my $elt= $t->{twig_elt_class}->new( $type); $elt->$set( @parser_args); if( $t->{extra_data}) { $elt->set_extra_data( $t->{extra_data}); $t->{extra_data}=''; } unless( $t->root) { $t->_add_cpi_outside_of_root( leading_cpi => $elt); } elsif( $t->{twig_in_pcdata}) { # create the node as a sibling of the PCDATA $elt->paste_after( $twig_current); $t->{twig_in_pcdata}=0; } elsif( $twig_current) { # we have to create a PCDATA element if we need to store spaces if( $t->_space_policy($XML::Twig::index2gi[$twig_current->{'gi'}]) && $t->{twig_stored_spaces}) { _insert_pcdata( $t, $t->{twig_stored_spaces}); } $t->{twig_stored_spaces}=''; # create the node as a child of the current element $elt->paste_last_child( $twig_current); } else { $t->_add_cpi_outside_of_root( trailing_cpi => $elt); } if( $twig_current) { delete $twig_current->{'twig_current'}; my $parent= $elt->{parent}; $t->{twig_current}= $parent; $parent->{'twig_current'}=1; } $t->$elt_handler( $elt); } } # add a comment or pi before the first element sub _add_cpi_outside_of_root { my($t, $type, $elt)= @_; # $type is 'leading_cpi' or 'trailing_cpi' $t->{$type} ||= $t->{twig_elt_class}->new( '#CPI'); # create the node as a child of the current element $elt->paste_last_child( $t->{$type}); return $t; } sub _twig_final { # warn " in _twig_final...\n"; # DEBUG handler my $p= shift; my $t= $p->isa( 'XML::Twig') ? $p : $p->{twig}; # store trailing data if( $t->{extra_data}) { $t->{trailing_cpi_text} = $t->{extra_data}; $t->{extra_data}=''; } $t->{trailing_spaces}= $t->{twig_stored_spaces} || ''; my $s= $t->{twig_stored_spaces}; $s=~s{\n}{\\n}g; if( $t->{twig_stored_spaces}) { my $s= $t->{twig_stored_spaces}; } # restore the selected filehandle if needed $t->_set_fh_to_selected_fh(); $t->_trigger_tdh if( $t->{twig_tdh}); select $t->{twig_original_selected_fh} if($t->{twig_original_selected_fh}); # probably dodgy if( exists $t->{twig_autoflush_data}) { my @args; push @args, $t->{twig_autoflush_data}->{fh} if( $t->{twig_autoflush_data}->{fh}); push @args, @{$t->{twig_autoflush_data}->{args}} if( $t->{twig_autoflush_data}->{args}); $t->flush( @args); delete $t->{twig_autoflush_data}; $t->root->delete if $t->root; } # tries to clean-up (probably not very well at the moment) #undef $p->{twig}; undef $t->{twig_parser}; delete $t->{twig_parsing}; @{$t}{ qw( twig_parser twig_parsing _twig_context_stack twig_current) }=(); return $t; } sub _insert_pcdata { my( $t, $string)= @_; # create a new PCDATA element my $parent= $t->{twig_current}; # always defined my $elt; if( exists $t->{twig_alt_elt_class}) { $elt= $t->{twig_elt_class}->new( $PCDATA); $elt->_set_pcdata( $string); } else { $elt= bless( { gi => $XML::Twig::gi2index{$PCDATA}, pcdata => $string }, 'XML::Twig::Elt'); } my $prev_sibling= $parent->{last_child}; if( $prev_sibling) { $prev_sibling->{next_sibling}= $elt; $elt->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; } else { $parent->{first_child}= $elt; } $elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; $parent->{empty}=0; $parent->{last_child}=$elt; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; $t->{twig_stored_spaces}=''; return $elt; } sub _space_policy { my( $t, $gi)= @_; my $policy; $policy=0 if( $t->{twig_discard_spaces}); $policy=1 if( $t->{twig_keep_spaces}); $policy=1 if( $t->{twig_keep_spaces_in} && $t->{twig_keep_spaces_in}->{$gi}); $policy=0 if( $t->{twig_discard_spaces_in} && $t->{twig_discard_spaces_in}->{$gi}); return $policy; } sub _twig_entity { # warn " in _twig_entity...\n"; # DEBUG handler my( $p, $name, $val, $sysid, $pubid, $ndata, $param)= @_; my $t=$p->{twig}; #{ no warnings; my $base= $p->base; warn "_twig_entity called: expand: '$t->{twig_expand_external_ents}', base: '$base', name: '$name', val: '$val', sysid: '$sysid', pubid: '$pubid', ndata: '$ndata', param: '$param'\n";} my $missing_entity=0; if( $sysid) { if($ndata) { if( ! -f _based_filename( $sysid, $p->base)) { $missing_entity= 1; } } else { if( $t->{twig_expand_external_ents}) { $val= eval { _slurp_uri( $sysid, $p->base) }; if( ! defined $val) { if( $t->{twig_extern_ent_nofail}) { $missing_entity= 1; } else { _croak( "cannot load SYSTEM entity '$name' from '$sysid': $@", 3); } } } } } my $ent=XML::Twig::Entity->new( $name, $val, $sysid, $pubid, $ndata, $param); if( $missing_entity) { $t->{twig_missing_system_entities}->{$name}= $ent; } my $entity_list= $t->entity_list; if( $entity_list) { $entity_list->add( $ent); } if( $parser_version > 2.27) { # this is really ugly, but with some versions of XML::Parser the value # of the entity is not properly returned by the default handler my $ent_decl= $ent->text; if( $t->{twig_keep_encoding}) { if( defined $ent->{val} && ($ent_decl !~ /["']/)) { my $val= $ent->{val}; $ent_decl .= $val =~ /"/ ? qq{'$val' } : qq{"$val" }; } # for my solaris box (perl 5.6.1, XML::Parser 2.31, expat?) $t->{twig_doctype}->{internal}=~ s{{twig_doctype}->{internal} .= $ent_decl unless( $t->{twig_doctype}->{internal}=~ m{original_string, ")\n"; # DEBUG handler my( $p, $base, $sysid, $pubid)= @_; my $t= $p->{twig}; if( $t->{twig_no_expand}) { my $ent_name= $t->{twig_keep_encoding} ? $p->original_string : $p->recognized_string; _twig_insert_ent( $t, $ent_name); return ''; } my $ent_content= eval { $t->{twig_ext_ent_handler}->( $p, $base, $sysid) }; if( ! defined $ent_content) { my $ent_name = $p->recognized_string; my $file = _based_filename( $sysid, $base); my $error_message= "cannot expand $ent_name - cannot load '$file'"; if( $t->{twig_extern_ent_nofail}) { return ""; } else { _croak( $error_message); } } return $ent_content; } # I use this so I can change the $Carp::CarpLevel (which determines how many call frames to skip when reporting an error) sub _croak { my( $message, $level)= @_; $Carp::CarpLevel= $level || 0; croak $message; } sub _twig_xmldecl { # warn " in _twig_xmldecl...\n"; # DEBUG handler my $p= shift; my $t=$p->{twig}; $t->{twig_xmldecl}||={}; # could have been set by set_output_encoding $t->{twig_xmldecl}->{version}= shift; $t->{twig_xmldecl}->{encoding}= shift; $t->{twig_xmldecl}->{standalone}= shift; return; } sub _twig_doctype { # warn " in _twig_doctype...\n"; # DEBUG handler my( $p, $name, $sysid, $pub, $internal)= @_; my $t=$p->{twig}; $t->{twig_doctype}||= {}; # create $t->{twig_doctype}->{name}= $name; # always there $t->{twig_doctype}->{sysid}= $sysid; # $t->{twig_doctype}->{pub}= $pub; # # now let's try to cope with XML::Parser 2.28 and above if( $parser_version > 2.27) { @saved_default_handler= $p->setHandlers( Default => \&_twig_store_internal_dtd, Entity => \&_twig_entity, ); $p->setHandlers( DoctypeFin => \&_twig_stop_storing_internal_dtd); $t->{twig_doctype}->{internal}=''; } else # for XML::Parser before 2.28 { $internal||=''; $internal=~ s{^\s*\[}{}; $internal=~ s{]\s*$}{}; $t->{twig_doctype}->{internal}=$internal; } # now check if we want to get the DTD info if( $t->{twig_read_external_dtd} && $sysid) { # let's build a fake document with an internal DTD my $dtd= "<$name/>"; $t->save_global_state(); # save the globals (they will be reset by the following new) my $t_dtd= XML::Twig->new( load_DTD => 1, ParseParamEnt => 1, error_context => $t->{ErrorContext} || 0); # create a temp twig $t_dtd->parse( $dtd); # parse it $t->{twig_dtd}= $t_dtd->{twig_dtd}; # grab the dtd info #$t->{twig_dtd_is_external}=1; $t->entity_list->_add_list( $t_dtd->entity_list) if( $t_dtd->entity_list); # grab the entity info $t->restore_global_state(); } return; } sub _twig_element { # warn " in _twig_element...\n"; # DEBUG handler my( $p, $name, $model)= @_; my $t=$p->{twig}; $t->{twig_dtd}||= {}; # may create the dtd $t->{twig_dtd}->{model}||= {}; # may create the model hash $t->{twig_dtd}->{elt_list}||= []; # ordered list of elements push @{$t->{twig_dtd}->{elt_list}}, $name; # store the elt $t->{twig_dtd}->{model}->{$name}= $model; # store the model if( ($parser_version > 2.27) && ($t->{twig_doctype}->{internal}=~ m{(^|>)\s*$}) ) { my $text= $XML::Twig::Elt::keep_encoding ? $p->original_string : $p->recognized_string; unless( $text) { # this version of XML::Parser does not return the text in the *_string method # we need to rebuild it $text= ""; } $t->{twig_doctype}->{internal} .= $text; } return; } sub _twig_attlist { # warn " in _twig_attlist...\n"; # DEBUG handler my( $p, $gi, $att, $type, $default, $fixed)= @_; #warn "in attlist: gi: '$gi', att: '$att', type: '$type', default: '$default', fixed: '$fixed'\n"; my $t=$p->{twig}; $t->{twig_dtd}||= {}; # create dtd if need be $t->{twig_dtd}->{$gi}||= {}; # create elt if need be #$t->{twig_dtd}->{$gi}->{att}||= {}; # create att if need be if( ($parser_version > 2.27) && ($t->{twig_doctype}->{internal}=~ m{(^|>)\s*$}) ) { my $text= $XML::Twig::Elt::keep_encoding ? $p->original_string : $p->recognized_string; unless( $text) { # this version of XML::Parser does not return the text in the *_string method # we need to rebuild it my $att_decl="$att $type"; $att_decl .= " #FIXED" if( $fixed); $att_decl .= " $default" if( defined $default); # 2 cases: there is already an attlist on that element or not if( $t->{twig_dtd}->{att}->{$gi}) { # there is already an attlist, add to it $t->{twig_doctype}->{internal}=~ s{(} { "$1$2\n" . ' ' x length( $1) . "$att_decl\n>"}es; } else { # create the attlist $t->{twig_doctype}->{internal}.= "" } } } $t->{twig_dtd}->{att}->{$gi}->{$att}= {} ; $t->{twig_dtd}->{att}->{$gi}->{$att}->{type}= $type; $t->{twig_dtd}->{att}->{$gi}->{$att}->{default}= $default if( defined $default); $t->{twig_dtd}->{att}->{$gi}->{$att}->{fixed}= $fixed; return; } sub _twig_default { # warn " in _twig_default...\n"; # DEBUG handler my( $p, $string)= @_; my $t= $p->{twig}; # we need to process the data in 2 cases: entity, or spaces after the closing tag # after the closing tag (no twig_current and root has been created) if( ! $t->{twig_current} && $t->{twig_root} && $string=~ m{^\s+$}m) { $t->{twig_stored_spaces} .= $string; } # process only if we have an entity if( $string=~ m{^&([^;]*);$}) { # the entity has to be pure pcdata, or we have a problem if( ($p->original_string=~ m{^<}) && ($p->original_string=~ m{>$}) ) { # string is a tag, entity is in an attribute $t->{twig_entities_in_attribute}=1 if( $t->{twig_do_not_escape_amp_in_atts}); } else { my $ent; if( $t->{twig_keep_encoding}) { _twig_char( $p, $string); $ent= substr( $string, 1, -1); } else { $ent= _twig_insert_ent( $t, $string); } return $ent; } } } sub _twig_insert_ent { my( $t, $string)=@_; my $twig_current= $t->{twig_current}; my $ent= $t->{twig_elt_class}->new( $ENT); $ent->{ent}= $string; _add_or_discard_stored_spaces( $t); if( $t->{twig_in_pcdata}) { # create the node as a sibling of the #PCDATA $ent->{prev_sibling}=$twig_current; if( $XML::Twig::weakrefs) { weaken( $ent->{prev_sibling});} ; $twig_current->{next_sibling}= $ent; my $parent= $twig_current->{parent}; $ent->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $ent->{parent});} ; $parent->{empty}=0; $parent->{last_child}=$ent; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; # the twig_current is now the parent delete $twig_current->{'twig_current'}; $t->{twig_current}= $parent; # we left pcdata $t->{twig_in_pcdata}=0; } else { # create the node as a child of the current element $ent->{parent}=$twig_current; if( $XML::Twig::weakrefs) { weaken( $ent->{parent});} ; if( my $prev_sibling= $twig_current->{last_child}) { $ent->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $ent->{prev_sibling});} ; $prev_sibling->{next_sibling}= $ent; } else { if( $twig_current) { $twig_current->{first_child}= $ent; } } if( $twig_current) { $twig_current->{empty}=0; $twig_current->{last_child}=$ent; if( $XML::Twig::weakrefs) { weaken( $twig_current->{last_child});} ; } } # meant to trigger entity handler, does not seem to be activated at this time #if( my $handler= $t->{twig_handlers}->{gi}->{$ENT}) # { local $_= $ent; $handler->( $t, $ent); } return $ent; } sub parser { return $_[0]->{twig_parser}; } # returns the declaration text (or a default one) sub xmldecl { my $t= shift; return '' unless( $t->{twig_xmldecl} || $t->{output_encoding}); my $decl_string; my $decl= $t->{twig_xmldecl}; if( $decl) { my $version= $decl->{version}; $decl_string= q{{output_encoding}) # or come from the document (in $decl->{encoding}) if( $t->{output_encoding}) { my $encoding= $t->{output_encoding}; $decl_string .= qq{ encoding="$encoding"}; } elsif( $decl->{encoding}) { my $encoding= $decl->{encoding}; $decl_string .= qq{ encoding="$encoding"}; } if( defined( $decl->{standalone})) { $decl_string .= q{ standalone="}; $decl_string .= $decl->{standalone} ? "yes" : "no"; $decl_string .= q{"}; } $decl_string .= "?>\n"; } else { my $encoding= $t->{output_encoding}; $decl_string= qq{}; } my $output_filter= XML::Twig::Elt::output_filter(); return $output_filter ? $output_filter->( $decl_string) : $decl_string; } sub set_doctype { my( $t, $name, $system, $public, $internal)= @_; $t->{twig_doctype}= {} unless defined $t->{twig_doctype}; my $doctype= $t->{twig_doctype}; $doctype->{name} = $name if( defined $name); $doctype->{sysid} = $system if( defined $system); $doctype->{pub} = $public if( defined $public); $doctype->{internal} = $internal if( defined $internal); } sub doctype_name { my $t= shift; my $doctype= $t->{twig_doctype} or return ''; return $doctype->{name} || ''; } sub system_id { my $t= shift; my $doctype= $t->{twig_doctype} or return ''; return $doctype->{sysid} || ''; } sub public_id { my $t= shift; my $doctype= $t->{twig_doctype} or return ''; return $doctype->{pub} || ''; } sub internal_subset { my $t= shift; my $doctype= $t->{twig_doctype} or return ''; return $doctype->{internal} || ''; } # return the dtd object sub dtd { my $t= shift; return $t->{twig_dtd}; } # return an element model, or the list of element models sub model { my $t= shift; my $elt= shift; return $t->dtd->{model}->{$elt} if( $elt); return (sort keys %{$t->dtd->{model}}); } # return the entity_list object sub entity_list { my $t= shift; return $t->{twig_entity_list}; } # return the list of entity names sub entity_names { my $t= shift; return $t->entity_list->entity_names; } # return the entity object sub entity { my $t= shift; my $entity_name= shift; return $t->entity_list->ent( $entity_name); } sub print_prolog { my $t= shift; my $fh= isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar') ? shift : $t->{twig_output_fh} || select() || \*STDOUT; ## no critic (TestingAndDebugging::ProhibitNoStrict); no strict 'refs'; print {$fh} $t->prolog( @_); } sub prolog { my $t= shift; if( $t->{no_prolog}){ return ''; } return $t->{no_prolog} ? '' : defined $t->{no_dtd_output} ? $t->xmldecl : $t->xmldecl . $t->doctype( @_); } sub doctype { my $t= shift; my %args= _normalize_args( @_); my $update_dtd = $args{UpdateDTD} || ''; my $doctype_text=''; my $doctype= $t->{twig_doctype}; if( $doctype) { $doctype_text .= qq{{name}} if( $doctype->{name}); $doctype_text .= qq{ PUBLIC "$doctype->{pub}"} if( $doctype->{pub}); $doctype_text .= qq{ SYSTEM} if( $doctype->{sysid} && !$doctype->{pub}); $doctype_text .= qq{ "$doctype->{sysid}"} if( $doctype->{sysid}); } if( $update_dtd) { if( $doctype) { my $internal=$doctype->{internal}; # awful hack, but at least it works a little better that what was there before if( $internal) { # remove entity declarations (they will be re-generated from the updated entity list) $internal=~ s{]*) >\s*}{}xg; $internal=~ s{^\n}{}; } $internal .= $t->entity_list->text ||'' if( $t->entity_list); if( $internal) { $doctype_text .= "[\n$internal]>\n"; } } elsif( !$t->{'twig_dtd'} && keys %{$t->entity_list}) { $doctype_text .= "root->gi . " [\n" . $t->entity_list->text . "\n]>";;} else { $doctype_text= $t->{twig_dtd}; $doctype_text .= $t->dtd_text; } } elsif( $doctype) { if( my $internal= $doctype->{internal}) { # add opening and closing brackets if not already there # plus some spaces and newlines for a nice formating # I test it here because I can't remember which version of # XML::Parser need it or not, nor guess which one will in the # future, so this about the best I can do $internal=~ s{^\s*(\[\s*)?}{ [\n}; $internal=~ s{\s*(\]\s*(>\s*)?)?\s*$}{\n]>\n}; $doctype_text .= $internal; } } if( $doctype_text) { # terrible hack, as I can't figure out in which case the darn prolog # should get an extra > (depends on XML::Parser and expat versions) $doctype_text=~ s/(>\s*)*$/>\n/; # if($doctype_text); my $output_filter= XML::Twig::Elt::output_filter(); return $output_filter ? $output_filter->( $doctype_text) : $doctype_text; } else { return $doctype_text; } } sub _leading_cpi { my $t= shift; my $leading_cpi= $t->{leading_cpi} || return ''; return $leading_cpi->sprint( 1); } sub _trailing_cpi { my $t= shift; my $trailing_cpi= $t->{trailing_cpi} || return ''; return $trailing_cpi->sprint( 1); } sub _trailing_cpi_text { my $t= shift; return $t->{trailing_cpi_text} || ''; } sub print_to_file { my( $t, $filename)= (shift, shift); my $out_fh; # open( $out_fh, ">$filename") or _croak( "cannot create file $filename: $!"); # < perl 5.8 my $mode= $t->{twig_keep_encoding} ? '>' : '>:utf8'; # >= perl 5.8 open( $out_fh, $mode, $filename) or _croak( "cannot create file $filename: $!"); # >= perl 5.8 $t->print( $out_fh, @_); close $out_fh; return $t; } # probably only works on *nix (at least the chmod bit) # first print to a temporary file, then rename that file to the desired file name, then change permissions # to the original file permissions (or to the current umask) sub safe_print_to_file { my( $t, $filename)= (shift, shift); my $perm= -f $filename ? (stat $filename)[2] & 07777 : ~umask() ; XML::Twig::_use( 'File::Temp') || croak "need File::Temp to use safe_print_to_file\n"; my $tmpdir= dirname( $filename); my( $fh, $tmpfilename) = File::Temp::tempfile( DIR => $tmpdir); $t->print_to_file( $tmpfilename, @_); rename( $tmpfilename, $filename) or unlink $tmpfilename && _croak( "cannot move temporary file to $filename: $!"); chmod $perm, $filename; return $t; } sub print { my $t= shift; my $fh= isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar') ? shift : undef; my %args= _normalize_args( @_); my $old_select = defined $fh ? select $fh : undef; my $old_pretty = defined ($args{PrettyPrint}) ? $t->set_pretty_print( $args{PrettyPrint}) : undef; my $old_empty_tag = defined ($args{EmptyTags}) ? $t->set_empty_tag_style( $args{EmptyTags}) : undef; #if( !$t->{encoding} || lc( $t->{encoding}) eq 'utf-8') { my $out= $fh || \*STDOUT; binmode $out, ':utf8'; } if( $perl_version > 5.006 && ! $t->{twig_keep_encoding}) { if( grep /useperlio=define/, `$^X -V`) # we can only use binmode :utf8 if perl was compiled with useperlio { binmode( $fh || \*STDOUT, ":utf8" ); } } print $t->prolog( %args) . $t->_leading_cpi( %args); $t->{twig_root}->print; print $t->_trailing_cpi # trailing comments and pi's (elements, in 'process' mode) . $t->_trailing_cpi_text # trailing comments and pi's (in 'keep' mode) . ( ($t->{twig_keep_spaces}||'') && ($t->{trailing_spaces} || '')) ; $t->set_pretty_print( $old_pretty) if( defined $old_pretty); $t->set_empty_tag_style( $old_empty_tag) if( defined $old_empty_tag); if( $fh) { select $old_select; } return $t; } sub flush { my $t= shift; $t->_trigger_tdh if $t->{twig_tdh}; return if( $t->{twig_completely_flushed}); my $fh= isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar') ? shift : undef; my $old_select= defined $fh ? select $fh : undef; my $up_to= ref $_[0] ? shift : undef; my %args= _normalize_args( @_); my $old_pretty; if( defined $args{PrettyPrint}) { $old_pretty= $t->set_pretty_print( $args{PrettyPrint}); delete $args{PrettyPrint}; } my $old_empty_tag_style; if( $args{EmptyTags}) { $old_empty_tag_style= $t->set_empty_tag_style( $args{EmptyTags}); delete $args{EmptyTags}; } # the "real" last element processed, as _twig_end has closed it my $last_elt; my $flush_trailing_data=0; if( $up_to) { $last_elt= $up_to; } elsif( $t->{twig_current}) { $last_elt= $t->{twig_current}->_last_child; } else { $last_elt= $t->{twig_root}; $flush_trailing_data=1; $t->{twig_completely_flushed}=1; } # flush the DTD unless it has ready flushed (ie root has been flushed) my $elt= $t->{twig_root}; unless( $elt->_flushed) { # store flush info so we can auto-flush later if( $t->{twig_autoflush}) { $t->{twig_autoflush_data}={}; $t->{twig_autoflush_data}->{fh} = $fh if( $fh); $t->{twig_autoflush_data}->{args} = \@_ if( @_); } $t->print_prolog( %args); print $t->_leading_cpi; } while( $elt) { my $next_elt; if( $last_elt && $last_elt->in( $elt)) { unless( $elt->_flushed) { # just output the front tag print $elt->start_tag(); $elt->_set_flushed; } $next_elt= $elt->{first_child}; } else { # an element before the last one or the last one, $next_elt= $elt->{next_sibling}; $elt->_flush(); $elt->delete; last if( $last_elt && ($elt == $last_elt)); } $elt= $next_elt; } if( $flush_trailing_data) { print $t->_trailing_cpi # trailing comments and pi's (elements, in 'process' mode) , $t->_trailing_cpi_text # trailing comments and pi's (in 'keep' mode) } select $old_select if( defined $old_select); $t->set_pretty_print( $old_pretty) if( defined $old_pretty); $t->set_empty_tag_style( $old_empty_tag_style) if( defined $old_empty_tag_style); if( my $ids= $t->{twig_id_list}) { while( my ($id, $elt)= each %$ids) { if( ! defined $elt) { delete $t->{twig_id_list}->{$id} } } } return $t; } # flushes up to an element # this method just reorders the arguments and calls flush sub flush_up_to { my $t= shift; my $up_to= shift; if( isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar')) { my $fh= shift; $t->flush( $fh, $up_to, @_); } else { $t->flush( $up_to, @_); } return $t; } # same as print except the entire document text is returned as a string sub sprint { my $t= shift; my %args= _normalize_args( @_); my $old_pretty; if( defined $args{PrettyPrint}) { $old_pretty= $t->set_pretty_print( $args{PrettyPrint}); delete $args{PrettyPrint}; } my $old_empty_tag_style; if( defined $args{EmptyTags}) { $old_empty_tag_style= $t->set_empty_tag_style( $args{EmptyTags}); delete $args{EmptyTags}; } my $string= $t->prolog( %args) # xml declaration and doctype . $t->_leading_cpi( %args) # leading comments and pi's in 'process' mode . ( ($t->{twig_root} && $t->{twig_root}->sprint) || '') . $t->_trailing_cpi # trailing comments and pi's (elements, in 'process' mode) . $t->_trailing_cpi_text # trailing comments and pi's (in 'keep' mode) ; if( $t->{twig_keep_spaces} && $t->{trailing_spaces}) { $string .= $t->{trailing_spaces}; } $t->set_pretty_print( $old_pretty) if( defined $old_pretty); $t->set_empty_tag_style( $old_empty_tag_style) if( defined $old_empty_tag_style); return $string; } # this method discards useless elements in a tree # it does the same thing as a flush except it does not print it # the second argument is an element, the last purged element # (this argument is usually set through the purge_up_to method) sub purge { my $t= shift; my $up_to= shift; $t->_trigger_tdh if $t->{twig_tdh}; # the "real" last element processed, as _twig_end has closed it my $last_elt; if( $up_to) { $last_elt= $up_to; } elsif( $t->{twig_current}) { $last_elt= $t->{twig_current}->_last_child; } else { $last_elt= $t->{twig_root}; } my $elt= $t->{twig_root}; while( $elt) { my $next_elt; if( $last_elt && $last_elt->in( $elt)) { $elt->_set_flushed; $next_elt= $elt->{first_child}; } else { # an element before the last one or the last one, $next_elt= $elt->{next_sibling}; $elt->delete; last if( $last_elt && ($elt == $last_elt) ); } $elt= $next_elt; } if( my $ids= $t->{twig_id_list}) { while( my ($id, $elt)= each %$ids) { if( ! defined $elt) { delete $t->{twig_id_list}->{$id} } } } return $t; } # flushes up to an element. This method just calls purge sub purge_up_to { my $t= shift; return $t->purge( @_); } sub root { return $_[0]->{twig_root}; } sub normalize { return $_[0]->root->normalize; } # create accessor methods on attribute names { my %accessor; # memorize accessor names so re-creating them won't trigger an error sub att_accessors { my $twig_or_class= shift; my $elt_class= ref $twig_or_class ? $twig_or_class->{twig_elt_class} : 'XML::Twig::Elt' ; ## no critic (TestingAndDebugging::ProhibitNoStrict); no strict 'refs'; foreach my $att (@_) { _croak( "attempt to redefine existing method $att using att_accessors") if( $elt_class->can( $att) && !$accessor{$att}); if( !$accessor{$att}) { *{"$elt_class\::$att"}= sub :lvalue # > perl 5.5 { my $elt= shift; if( @_) { $elt->{att}->{$att}= $_[0]; } $elt->{att}->{$att}; }; $accessor{$att}=1; } } return $twig_or_class; } } { my %accessor; # memorize accessor names so re-creating them won't trigger an error sub elt_accessors { my $twig_or_class= shift; my $elt_class= ref $twig_or_class ? $twig_or_class->{twig_elt_class} : 'XML::Twig::Elt' ; # if arg is a hash ref, it's exp => name, otherwise it's a list of tags my %exp_to_alias= ref( $_[0]) && isa( $_[0], 'HASH') ? %{$_[0]} : map { $_ => $_ } @_; ## no critic (TestingAndDebugging::ProhibitNoStrict); no strict 'refs'; while( my( $alias, $exp)= each %exp_to_alias ) { if( $elt_class->can( $alias) && !$accessor{$alias}) { _croak( "attempt to redefine existing method $alias using elt_accessors"); } if( !$accessor{$alias}) { *{"$elt_class\::$alias"}= sub { my $elt= shift; return wantarray ? $elt->children( $exp) : $elt->first_child( $exp); }; $accessor{$alias}=1; } } return $twig_or_class; } } { my %accessor; # memorize accessor names so re-creating them won't trigger an error sub field_accessors { my $twig_or_class= shift; my $elt_class= ref $twig_or_class ? $twig_or_class->{twig_elt_class} : 'XML::Twig::Elt' ; my %exp_to_alias= ref( $_[0]) && isa( $_[0], 'HASH') ? %{$_[0]} : map { $_ => $_ } @_; ## no critic (TestingAndDebugging::ProhibitNoStrict); no strict 'refs'; while( my( $alias, $exp)= each %exp_to_alias ) { if( $elt_class->can( $alias) && !$accessor{$alias}) { _croak( "attempt to redefine existing method $exp using field_accessors"); } if( !$accessor{$alias}) { *{"$elt_class\::$alias"}= sub { my $elt= shift; $elt->field( $exp) }; $accessor{$alias}=1; } } return $twig_or_class; } } sub first_elt { my( $t, $cond)= @_; my $root= $t->root || return undef; return $root if( $root->passes( $cond)); return $root->next_elt( $cond); } sub last_elt { my( $t, $cond)= @_; my $root= $t->root || return undef; return $root->last_descendant( $cond); } sub next_n_elt { my( $t, $offset, $cond)= @_; $offset -- if( $t->root->matches( $cond) ); return $t->root->next_n_elt( $offset, $cond); } sub get_xpath { my $twig= shift; if( isa( $_[0], 'ARRAY')) { my $elt_array= shift; return _unique_elts( map { $_->get_xpath( @_) } @$elt_array); } else { return $twig->root->get_xpath( @_); } } # get a list of elts and return a sorted list of unique elts sub _unique_elts { my @sorted= sort { $a ->cmp( $b) } @_; my @unique; while( my $current= shift @sorted) { push @unique, $current unless( @unique && ($unique[-1] == $current)); } return @unique; } sub findvalue { my $twig= shift; if( isa( $_[0], 'ARRAY')) { my $elt_array= shift; return join( '', map { $_->findvalue( @_) } @$elt_array); } else { return $twig->root->findvalue( @_); } } sub findvalues { my $twig= shift; if( isa( $_[0], 'ARRAY')) { my $elt_array= shift; return map { $_->findvalues( @_) } @$elt_array; } else { return $twig->root->findvalues( @_); } } sub set_id_seed { my $t= shift; XML::Twig::Elt->set_id_seed( @_); return $t; } # return an array ref to an index, or undef sub index { my( $twig, $name, $index)= @_; return defined( $index) ? $twig->{_twig_index}->{$name}->[$index] : $twig->{_twig_index}->{$name}; } # return a list with just the root # if a condition is given then return an empty list unless the root matches sub children { my( $t, $cond)= @_; my $root= $t->root; unless( $cond && !($root->passes( $cond)) ) { return ($root); } else { return (); } } sub _children { return ($_[0]->root); } # weird, but here for completude # used to solve (non-sensical) /doc[1] XPath queries sub child { my $t= shift; my $nb= shift; return ($t->children( @_))[$nb]; } sub descendants { my( $t, $cond)= @_; my $root= $t->root; if( $root->passes( $cond) ) { return ($root, $root->descendants( $cond)); } else { return ( $root->descendants( $cond)); } } sub simplify { my $t= shift; $t->root->simplify( @_); } sub subs_text { my $t= shift; $t->root->subs_text( @_); } sub trim { my $t= shift; $t->root->trim( @_); } sub set_keep_encoding { my( $t, $keep)= @_; $t->{twig_keep_encoding}= $keep; $t->{NoExpand}= $keep; return XML::Twig::Elt::set_keep_encoding( $keep); } sub set_expand_external_entities { return XML::Twig::Elt::set_expand_external_entities( @_); } sub escape_gt { my $t= shift; $t->{twig_escape_gt}= 1; return XML::Twig::Elt::escape_gt( @_); } sub do_not_escape_gt { my $t= shift; $t->{twig_escape_gt}= 0; return XML::Twig::Elt::do_not_escape_gt( @_); } sub elt_id { return $_[0]->{twig_id_list}->{$_[1]}; } # change it in ALL twigs at the moment sub change_gi { my( $twig, $old_gi, $new_gi)= @_; my $index; return unless($index= $XML::Twig::gi2index{$old_gi}); $XML::Twig::index2gi[$index]= $new_gi; delete $XML::Twig::gi2index{$old_gi}; $XML::Twig::gi2index{$new_gi}= $index; return $twig; } # builds the DTD from the stored (possibly updated) data sub dtd_text { my $t= shift; my $dtd= $t->{twig_dtd}; my $doctype= $t->{twig_doctype} or return ''; my $string= "{name}; $string .= " [\n"; foreach my $gi (@{$dtd->{elt_list}}) { $string.= "{model}->{$gi}.">\n" ; if( $dtd->{att}->{$gi}) { my $attlist= $dtd->{att}->{$gi}; $string.= "{$att}->{fixed}) { $string.= " $att $attlist->{$att}->{type} #FIXED $attlist->{$att}->{default}"; } else { $string.= " $att $attlist->{$att}->{type} $attlist->{$att}->{default}"; } $string.= "\n"; } $string.= ">\n"; } } $string.= $t->entity_list->text if( $t->entity_list); $string.= "\n]>\n"; return $string; } # prints the DTD from the stored (possibly updated) data sub dtd_print { my $t= shift; my $fh= isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar') ? shift : undef; if( $fh) { print $fh $t->dtd_text; } else { print $t->dtd_text; } return $t; } # build the subs that call directly expat BEGIN { my @expat_methods= qw( depth in_element within_element context current_line current_column current_byte recognized_string original_string xpcroak xpcarp base current_element element_index xml_escape position_in_context); foreach my $method (@expat_methods) { ## no critic (TestingAndDebugging::ProhibitNoStrict); no strict 'refs'; *{$method}= sub { my $t= shift; _croak( "calling $method after parsing is finished") unless( $t->{twig_parsing}); return $t->{twig_parser}->$method(@_); }; } } sub path { my( $t, $gi)= @_; if( $t->{twig_map_xmlns}) { return "/" . join( "/", map { $t->_replace_prefix( $_)} ($t->{twig_parser}->context, $gi)); } else { return "/" . join( "/", ($t->{twig_parser}->context, $gi)); } } sub finish { my $t= shift; return $t->{twig_parser}->finish; } # just finish the parse by printing the rest of the document sub finish_print { my( $t, $fh)= @_; my $old_fh; unless( defined $fh) { $t->_set_fh_to_twig_output_fh(); } elsif( defined $fh) { $old_fh= select $fh; $t->{twig_original_selected_fh}= $old_fh if( $old_fh); } my $p=$t->{twig_parser}; if( $t->{twig_keep_encoding}) { $p->setHandlers( %twig_handlers_finish_print); } else { $p->setHandlers( %twig_handlers_finish_print_original); } return $t; } sub set_remove_cdata { return XML::Twig::Elt::set_remove_cdata( @_); } sub output_filter { return XML::Twig::Elt::output_filter( @_); } sub set_output_filter { return XML::Twig::Elt::set_output_filter( @_); } sub output_text_filter { return XML::Twig::Elt::output_text_filter( @_); } sub set_output_text_filter { return XML::Twig::Elt::set_output_text_filter( @_); } sub set_input_filter { my( $t, $input_filter)= @_; my $old_filter= $t->{twig_input_filter}; if( !$input_filter || isa( $input_filter, 'CODE') ) { $t->{twig_input_filter}= $input_filter; } elsif( $input_filter eq 'latin1') { $t->{twig_input_filter}= latin1(); } elsif( $filter{$input_filter}) { $t->{twig_input_filter}= $filter{$input_filter}; } else { _croak( "invalid input filter: $input_filter"); } return $old_filter; } sub set_empty_tag_style { return XML::Twig::Elt::set_empty_tag_style( @_); } sub set_pretty_print { return XML::Twig::Elt::set_pretty_print( @_); } sub set_quote { return XML::Twig::Elt::set_quote( @_); } sub set_indent { return XML::Twig::Elt::set_indent( @_); } sub set_keep_atts_order { shift; return XML::Twig::Elt::set_keep_atts_order( @_); } sub keep_atts_order { return XML::Twig::Elt::keep_atts_order( @_); } sub set_do_not_escape_amp_in_atts { return XML::Twig::Elt::set_do_not_escape_amp_in_atts( @_); } # save and restore package globals (the ones in XML::Twig::Elt) # should probably return the XML::Twig object itself, but instead # returns the state (as a hashref) for backward compatibility sub save_global_state { my $t= shift; return $t->{twig_saved_state}= XML::Twig::Elt::global_state(); } sub restore_global_state { my $t= shift; XML::Twig::Elt::set_global_state( $t->{twig_saved_state}); } sub global_state { return XML::Twig::Elt::global_state(); } sub set_global_state { return XML::Twig::Elt::set_global_state( $_[1]); } sub dispose { my $t= shift; $t->DESTROY; return; } sub DESTROY { my $t= shift; if( $t->{twig_root} && isa( $t->{twig_root}, 'XML::Twig::Elt')) { $t->{twig_root}->delete } # added to break circular references undef $t->{twig}; undef $t->{twig_root}->{twig} if( $t->{twig_root}); undef $t->{twig_parser}; undef %$t;# prevents memory leaks (especially when using mod_perl) undef $t; } # # non standard handlers # # kludge: expat 1.95.2 calls both Default AND Doctype handlers # so if the default handler finds 'recognized_string(); if( $string eq 'setHandlers( Default => undef); $p->setHandlers( Entity => undef); $expat_1_95_2=1; } else { print $string; } return; } sub _twig_print { # warn " in _twig_print...\n"; # DEBUG handler my $p= shift; if( $expat_1_95_2 && ($p->recognized_string eq '[') && !$p->{twig}->{expat_1_95_2_seen_bracket}) { # otherwise the opening square bracket of the doctype gets printed twice $p->{twig}->{expat_1_95_2_seen_bracket}=1; } else { if( $p->{twig}->{twig_right_after_root}) { my $s= $p->recognized_string(); print $s if $s=~ m{\S}; } else { print $p->recognized_string(); } } return; } # recognized_string does not seem to work for entities, go figure! # so this handler is used to print them anyway sub _twig_print_entity { # warn " in _twig_print_entity...\n"; # DEBUG handler my $p= shift; XML::Twig::Entity->new( @_)->print; } # kludge: expat 1.95.2 calls both Default AND Doctype handlers # so if the default handler finds 'original_string(); if( $string eq 'setHandlers( Default => undef); $p->setHandlers( Entity => undef); $expat_1_95_2=1; } else { print $string; } return; } sub _twig_print_original { # warn " in _twig_print_original...\n"; # DEBUG handler my $p= shift; print $p->original_string(); return; } sub _twig_print_original_doctype { # warn " in _twig_print_original_doctype...\n"; # DEBUG handler my( $p, $name, $sysid, $pubid, $internal)= @_; if( $name) { # with recent versions of XML::Parser original_string does not work, # hence we need to rebuild the doctype declaration my $doctype=''; $doctype .= qq{} unless( $internal || $expat_1_95_2); $p->{twig}->{twig_doctype}->{has_internal}=$internal; print $doctype; } $p->setHandlers( Default => \&_twig_print_original); return; } sub _twig_print_doctype { # warn " in _twig_print_doctype...\n"; # DEBUG handler my( $p, $name, $sysid, $pubid, $internal)= @_; if( $name) { # with recent versions of XML::Parser original_string does not work, # hence we need to rebuild the doctype declaration my $doctype=''; $doctype .= qq{} unless( $internal || $expat_1_95_2); $p->{twig}->{twig_doctype}->{has_internal}=$internal; print $doctype; } $p->setHandlers( Default => \&_twig_print); return; } sub _twig_print_original_default { # warn " in _twig_print_original_default...\n"; # DEBUG handler my $p= shift; print $p->original_string(); return; } # account for the case where the element is empty sub _twig_print_end_original { # warn " in _twig_print_end_original...\n"; # DEBUG handler my $p= shift; print $p->original_string(); return; } sub _twig_start_check_roots { # warn " in _twig_start_check_roots...\n"; # DEBUG handler my $p= shift; my $gi= shift; my $t= $p->{twig}; my $fh= $t->{twig_output_fh} || select() || \*STDOUT; my $ns_decl; unless( $p->depth == 0) { if( $t->{twig_map_xmlns}) { $ns_decl= _replace_ns( $t, \$gi, \@_); } } my $context= { $ST_TAG => $gi, @_}; $context->{$ST_NS}= $ns_decl if $ns_decl; push @{$t->{_twig_context_stack}}, $context; my %att= @_; if( _handler( $t, $t->{twig_roots}, $gi)) { $p->setHandlers( %twig_handlers); # restore regular handlers $t->{twig_root_depth}= $p->depth; pop @{$t->{_twig_context_stack}}; # will be pushed back in _twig_start _twig_start( $p, $gi, @_); return; } # $tag will always be true if it needs to be printed (the tag string is never empty) my $tag= $t->{twig_default_print} ? $t->{twig_keep_encoding} ? $p->original_string : $p->recognized_string : ''; if( $p->depth == 0) { ## no critic (TestingAndDebugging::ProhibitNoStrict); no strict 'refs'; print {$fh} $tag if( $tag); pop @{$t->{_twig_context_stack}}; # will be pushed back in _twig_start _twig_start( $p, $gi, @_); $t->root->_set_flushed; # or the root start tag gets output the first time we flush } elsif( $t->{twig_starttag_handlers}) { # look for start tag handlers my @handlers= _handler( $t, $t->{twig_starttag_handlers}, $gi); my $last_handler_res; foreach my $handler ( @handlers) { $last_handler_res= $handler->($t, $gi, %att); last unless $last_handler_res; } ## no critic (TestingAndDebugging::ProhibitNoStrict); no strict 'refs'; print {$fh} $tag if( $tag && (!@handlers || $last_handler_res)); } else { ## no critic (TestingAndDebugging::ProhibitNoStrict); no strict 'refs'; print {$fh} $tag if( $tag); } return; } sub _twig_end_check_roots { # warn " in _twig_end_check_roots...\n"; # DEBUG handler my( $p, $gi, %att)= @_; my $t= $p->{twig}; # $tag can be empty (), hence the undef and the tests for defined my $tag= $t->{twig_default_print} ? $t->{twig_keep_encoding} ? $p->original_string : $p->recognized_string : undef; my $fh= $t->{twig_output_fh} || select() || \*STDOUT; if( $t->{twig_endtag_handlers}) { # look for end tag handlers my @handlers= _handler( $t, $t->{twig_endtag_handlers}, $gi); my $last_handler_res=1; foreach my $handler ( @handlers) { $last_handler_res= $handler->($t, $gi) || last; } #if( ! $last_handler_res) # { pop @{$t->{_twig_context_stack}}; warn "tested"; # return; # } } { ## no critic (TestingAndDebugging::ProhibitNoStrict); no strict 'refs'; print {$fh} $tag if( defined $tag); } if( $p->depth == 0) { _twig_end( $p, $gi); $t->root->{end_tag_flushed}=1; } pop @{$t->{_twig_context_stack}}; return; } sub _twig_pi_check_roots { # warn " in _twig_pi_check_roots...\n"; # DEBUG handler my( $p, $target, $data)= @_; my $t= $p->{twig}; my $pi= $t->{twig_default_print} ? $t->{twig_keep_encoding} ? $p->original_string : $p->recognized_string : undef; my $fh= $t->{twig_output_fh} || select() || \*STDOUT; if( my $handler= $t->{twig_handlers}->{pi_handlers}->{$target} || $t->{twig_handlers}->{pi_handlers}->{''} ) { # if handler is called on pi, then it needs to be processed as a regular node my @flags= qw( twig_process_pi twig_keep_pi); my @save= @{$t}{@flags}; # save pi related flags @{$t}{@flags}= (1, 0); # override them, pi needs to be processed _twig_pi( @_); # call handler on the pi @{$t}{@flags}= @save;; # restore flag } else { ## no critic (TestingAndDebugging::ProhibitNoStrict); no strict 'refs'; print {$fh} $pi if( defined( $pi)); } return; } sub _output_ignored { my( $t, $p)= @_; my $action= $t->{twig_ignore_action}; my $get_string= $t->{twig_keep_encoding} ? 'original_string' : 'recognized_string'; if( $action eq 'print' ) { print $p->$get_string; } else { my $string_ref; if( $action eq 'string') { $string_ref= \$t->{twig_buffered_string}; } elsif( ref( $action) && ref( $action) eq 'SCALAR') { $string_ref= $action; } else { _croak( "wrong ignore action: $action"); } $$string_ref .= $p->$get_string; } } sub _twig_ignore_start { # warn " in _twig_ignore_start...\n"; # DEBUG handler my( $p, $gi)= @_; my $t= $p->{twig}; $t->{twig_ignore_level}++; my $action= $t->{twig_ignore_action}; $t->_output_ignored( $p) unless $action eq 'discard'; return; } sub _twig_ignore_end { # warn " in _twig_ignore_end...\n"; # DEBUG handler my( $p, $gi)= @_; my $t= $p->{twig}; my $action= $t->{twig_ignore_action}; $t->_output_ignored( $p) unless $action eq 'discard'; $t->{twig_ignore_level}--; if( ! $t->{twig_ignore_level}) { $t->{twig_current} = $t->{twig_ignore_elt}; $t->{twig_current}->set_twig_current; $t->{twig_ignore_elt}->cut; # there could possibly be a memory leak here (delete would avoid it, # but could also delete elements that should not be deleted) # restore the saved stack to the current level splice( @{$t->{_twig_context_stack}}, $p->depth+ 1 ); #warn "stack: ", _dump_stack( $t->{_twig_context_stack}), "\n"; $p->setHandlers( @{$t->{twig_saved_handlers}}); # test for handlers if( $t->{twig_endtag_handlers}) { # look for end tag handlers my @handlers= _handler( $t, $t->{twig_endtag_handlers}, $gi); my $last_handler_res=1; foreach my $handler ( @handlers) { $last_handler_res= $handler->($t, $gi) || last; } } pop @{$t->{_twig_context_stack}}; }; return; } #sub _dump_stack { my( $stack)= @_; return join( ":", map { $_->{$ST_TAG} } @$stack); } sub ignore { my( $t, $elt, $action)= @_; my $current= $t->{twig_current}; if( ! ($elt && ref( $elt) && isa( $elt, 'XML::Twig::Elt'))) { $elt= $current; } #warn "ignore: current = ", $current->tag, ", elt = ", $elt->tag, ")\n"; # we need the ($elt == $current->{last_child}) test because the current element is set to the # parent _before_ handlers are called (and I can't figure out how to fix this) unless( ($elt == $current) || ($current->{last_child} && ($elt == $current->{last_child})) || $current->in( $elt)) { _croak( "element to be ignored must be ancestor of current element"); } $t->{twig_ignore_level}= $current == $elt ? 1 : $t->_level_in_stack( $current) - $t->_level_in_stack($elt) + 1; #warn "twig_ignore_level: $t->{twig_ignore_level} (current: ", $current->tag, ", elt: ", $elt->tag, ")\n"; $t->{twig_ignore_elt} = $elt; # save it, so we can delete it later $action ||= 'discard'; if( !($action eq 'print' || $action eq 'string' || ( ref( $action) && ref( $action) eq 'SCALAR'))) { $action= 'discard'; } $t->{twig_ignore_action}= $action; my $p= $t->{twig_parser}; my @saved_handlers= $p->setHandlers( %twig_handlers_ignore); # set handlers my $get_string= $t->{twig_keep_encoding} ? 'original_string' : 'recognized_string'; my $default_handler; if( $action ne 'discard') { if( $action eq 'print') { $p->setHandlers( Default => sub { print $_[0]->$get_string; }); } else { my $string_ref; if( $action eq 'string') { if( ! exists $t->{twig_buffered_string}) { $t->{twig_buffered_string}=''; } $string_ref= \$t->{twig_buffered_string}; } elsif( ref( $action) && ref( $action) eq 'SCALAR') { $string_ref= $action; } $p->setHandlers( Default => sub { $$string_ref .= $_[0]->$get_string; }); } $t->_output_ignored( $p, $action); } $t->{twig_saved_handlers}= \@saved_handlers; # save current handlers } sub _level_in_stack { my( $t, $elt)= @_; my $level=1; foreach my $elt_in_stack ( @{$t->{_twig_context_stack}} ) { if( $elt_in_stack->{$ST_ELT} && ($elt == $elt_in_stack->{$ST_ELT})) { return $level } $level++; } } # select $t->{twig_output_fh} and store the current selected fh sub _set_fh_to_twig_output_fh { my $t= shift; my $output_fh= $t->{twig_output_fh}; if( $output_fh && !$t->{twig_output_fh_selected}) { # there is an output fh $t->{twig_selected_fh}= select(); # store the currently selected fh $t->{twig_output_fh_selected}=1; select $output_fh; # select the output fh for the twig } } # select the fh that was stored in $t->{twig_selected_fh} # (before $t->{twig_output_fh} was selected) sub _set_fh_to_selected_fh { my $t= shift; return unless( $t->{twig_output_fh}); my $selected_fh= $t->{twig_selected_fh}; $t->{twig_output_fh_selected}=0; select $selected_fh; return; } sub encoding { return $_[0]->{twig_xmldecl}->{encoding} if( $_[0]->{twig_xmldecl}); } sub set_encoding { my( $t, $encoding)= @_; $t->{twig_xmldecl} ||={}; $t->set_xml_version( "1.0") unless( $t->xml_version); $t->{twig_xmldecl}->{encoding}= $encoding; return $t; } sub output_encoding { return $_[0]->{output_encoding}; } sub set_output_encoding { my( $t, $encoding)= @_; my $output_filter= $t->output_filter || ''; if( ($encoding && $encoding !~ m{^utf-?8$}i) || $t->{twig_keep_encoding} || $output_filter) { $t->set_output_filter( _encoding_filter( $encoding || '')); } $t->{output_encoding}= $encoding; return $t; } sub xml_version { return $_[0]->{twig_xmldecl}->{version} if( $_[0]->{twig_xmldecl}); } sub set_xml_version { my( $t, $version)= @_; $t->{twig_xmldecl} ||={}; $t->{twig_xmldecl}->{version}= $version; return $t; } sub standalone { return $_[0]->{twig_xmldecl}->{standalone} if( $_[0]->{twig_xmldecl}); } sub set_standalone { my( $t, $standalone)= @_; $t->{twig_xmldecl} ||={}; $t->set_xml_version( "1.0") unless( $t->xml_version); $t->{twig_xmldecl}->{standalone}= $standalone; return $t; } # SAX methods sub toSAX1 { _croak( "cannot use toSAX1 while parsing (use flush_toSAX1)") if (defined $_[0]->{twig_parser}); shift(@_)->_toSAX(@_, \&XML::Twig::Elt::_start_tag_data_SAX1, \&XML::Twig::Elt::_end_tag_data_SAX1 ); } sub toSAX2 { _croak( "cannot use toSAX2 while parsing (use flush_toSAX2)") if (defined $_[0]->{twig_parser}); shift(@_)->_toSAX(@_, \&XML::Twig::Elt::_start_tag_data_SAX2, \&XML::Twig::Elt::_end_tag_data_SAX2 ); } sub _toSAX { my( $t, $handler, $start_tag_data, $end_tag_data) = @_; if( my $start_document = $handler->can( 'start_document')) { $start_document->( $handler); } $t->_prolog_toSAX( $handler); if( $t->root) { $t->root->_toSAX( $handler, $start_tag_data, $end_tag_data) ; } if( my $end_document = $handler->can( 'end_document')) { $end_document->( $handler); } } sub flush_toSAX1 { shift(@_)->_flush_toSAX(@_, \&XML::Twig::Elt::_start_tag_data_SAX1, \&XML::Twig::Elt::_end_tag_data_SAX1 ); } sub flush_toSAX2 { shift(@_)->_flush_toSAX(@_, \&XML::Twig::Elt::_start_tag_data_SAX2, \&XML::Twig::Elt::_end_tag_data_SAX2 ); } sub _flush_toSAX { my( $t, $handler, $start_tag_data, $end_tag_data)= @_; # the "real" last element processed, as _twig_end has closed it my $last_elt; if( $t->{twig_current}) { $last_elt= $t->{twig_current}->_last_child; } else { $last_elt= $t->{twig_root}; } my $elt= $t->{twig_root}; unless( $elt->_flushed) { # init unless already done (ie root has been flushed) if( my $start_document = $handler->can( 'start_document')) { $start_document->( $handler); } # flush the DTD $t->_prolog_toSAX( $handler) } while( $elt) { my $next_elt; if( $last_elt && $last_elt->in( $elt)) { unless( $elt->_flushed) { # just output the front tag if( my $start_element = $handler->can( 'start_element')) { if( my $tag_data= $start_tag_data->( $elt)) { $start_element->( $handler, $tag_data); } } $elt->_set_flushed; } $next_elt= $elt->{first_child}; } else { # an element before the last one or the last one, $next_elt= $elt->{next_sibling}; $elt->_toSAX( $handler, $start_tag_data, $end_tag_data); $elt->delete; last if( $last_elt && ($elt == $last_elt)); } $elt= $next_elt; } if( !$t->{twig_parsing}) { if( my $end_document = $handler->can( 'end_document')) { $end_document->( $handler); } } } sub _prolog_toSAX { my( $t, $handler)= @_; $t->_xmldecl_toSAX( $handler); $t->_DTD_toSAX( $handler); } sub _xmldecl_toSAX { my( $t, $handler)= @_; my $decl= $t->{twig_xmldecl}; my $data= { Version => $decl->{version}, Encoding => $decl->{encoding}, Standalone => $decl->{standalone}, }; if( my $xml_decl= $handler->can( 'xml_decl')) { $xml_decl->( $handler, $data); } } sub _DTD_toSAX { my( $t, $handler)= @_; my $doctype= $t->{twig_doctype}; return unless( $doctype); my $data= { Name => $doctype->{name}, PublicId => $doctype->{pub}, SystemId => $doctype->{sysid}, }; if( my $start_dtd= $handler->can( 'start_dtd')) { $start_dtd->( $handler, $data); } # I should call code to export the internal subset here if( my $end_dtd= $handler->can( 'end_dtd')) { $end_dtd->( $handler); } } # input/output filters sub latin1 { local $SIG{__DIE__}; if( _use( 'Encode')) { return encode_convert( 'ISO-8859-15'); } elsif( _use( 'Text::Iconv')) { return iconv_convert( 'ISO-8859-15'); } elsif( _use( 'Unicode::Map8') && _use( 'Unicode::String')) { return unicode_convert( 'ISO-8859-15'); } else { return \®exp2latin1; } } sub _encoding_filter { { local $SIG{__DIE__}; my $encoding= $_[1] || $_[0]; if( _use( 'Encode')) { my $sub= encode_convert( $encoding); return $sub; } elsif( _use( 'Text::Iconv')) { return iconv_convert( $encoding); } elsif( _use( 'Unicode::Map8') && _use( 'Unicode::String')) { return unicode_convert( $encoding); } } _croak( "Encode, Text::Iconv or Unicode::Map8 and Unicode::String need to be installed in order to use encoding options"); } # shamelessly lifted from XML::TyePYX (works only with XML::Parse 2.27) sub regexp2latin1 { my $text=shift; $text=~s{([\xc0-\xc3])(.)}{ my $hi = ord($1); my $lo = ord($2); chr((($hi & 0x03) <<6) | ($lo & 0x3F)) }ge; return $text; } sub html_encode { _use( 'HTML::Entities') or croak "cannot use html_encode: missing HTML::Entities"; return HTML::Entities::encode_entities($_[0] ); } sub safe_encode { my $str= shift; if( $perl_version < 5.008) { # the no utf8 makes the regexp work in 5.6 no utf8; # = perl 5.6 $str =~ s{([\xC0-\xDF].|[\xE0-\xEF]..|[\xF0-\xFF]...)} {_XmlUtf8Decode($1)}egs; } else { $str= encode( ascii => $str, $FB_HTMLCREF); } return $str; } sub safe_encode_hex { my $str= shift; if( $perl_version < 5.008) { # the no utf8 makes the regexp work in 5.6 no utf8; # = perl 5.6 $str =~ s{([\xC0-\xDF].|[\xE0-\xEF]..|[\xF0-\xFF]...)} {_XmlUtf8Decode($1, 1)}egs; } else { $str= encode( ascii => $str, $FB_XMLCREF); } return $str; } # this one shamelessly lifted from XML::DOM # does NOT work on 5.8.0 sub _XmlUtf8Decode { my ($str, $hex) = @_; my $len = length ($str); my $n; if ($len == 2) { my @n = unpack "C2", $str; $n = (($n[0] & 0x3f) << 6) + ($n[1] & 0x3f); } elsif ($len == 3) { my @n = unpack "C3", $str; $n = (($n[0] & 0x1f) << 12) + (($n[1] & 0x3f) << 6) + ($n[2] & 0x3f); } elsif ($len == 4) { my @n = unpack "C4", $str; $n = (($n[0] & 0x0f) << 18) + (($n[1] & 0x3f) << 12) + (($n[2] & 0x3f) << 6) + ($n[3] & 0x3f); } elsif ($len == 1) # just to be complete... { $n = ord ($str); } else { croak "bad value [$str] for _XmlUtf8Decode"; } my $char= $hex ? sprintf ("&#x%x;", $n) : "&#$n;"; return $char; } sub unicode_convert { my $enc= $_[1] ? $_[1] : $_[0]; # so the method can be called on the twig or directly _use( 'Unicode::Map8') or croak "Unicode::Map8 not available, needed for encoding filter: $!"; _use( 'Unicode::String') or croak "Unicode::String not available, needed for encoding filter: $!"; import Unicode::String qw(utf8); my $sub= eval qq{ { $NO_WARNINGS; my \$cnv; BEGIN { \$cnv= Unicode::Map8->new(\$enc) or croak "Can't create converter to \$enc"; } sub { return \$cnv->to8 (utf8(\$_[0])->ucs2); } } }; unless( $sub) { croak $@; } return $sub; } sub iconv_convert { my $enc= $_[1] ? $_[1] : $_[0]; # so the method can be called on the twig or directly _use( 'Text::Iconv') or croak "Text::Iconv not available, needed for encoding filter: $!"; my $sub= eval qq{ { $NO_WARNINGS; my \$cnv; BEGIN { \$cnv = Text::Iconv->new( 'utf8', \$enc) or croak "Can't create iconv converter to \$enc"; } sub { return \$cnv->convert( \$_[0]); } } }; unless( $sub) { if( $@=~ m{^Unsupported conversion: Invalid argument}) { croak "Unsupported encoding: $enc"; } else { croak $@; } } return $sub; } sub encode_convert { my $enc= $_[1] ? $_[1] : $_[0]; # so the method can be called on the twig or directly my $sub= eval qq{sub { $NO_WARNINGS; return encode( "$enc", \$_[0]); } }; croak "can't create Encode-based filter: $@" unless( $sub); return $sub; } # XML::XPath compatibility sub getRootNode { return $_[0]; } sub getParentNode { return undef; } sub getChildNodes { my @children= ($_[0]->root); return wantarray ? @children : \@children; } sub _weakrefs { return $weakrefs; } sub _set_weakrefs { $weakrefs=shift() || 0; XML::Twig::Elt::set_destroy()if ! $weakrefs; } # for testing purposes sub _dump { my $t= shift; my $dump=''; $dump="document\n"; # should dump twig level data here if( $t->root) { $dump .= $t->root->_dump( @_); } return $dump; } 1; ###################################################################### package XML::Twig::Entity_list; ###################################################################### *isa= *UNIVERSAL::isa; sub new { my $class = shift; my $self={ entities => {}, updated => 0}; bless $self, $class; return $self; } sub add_new_ent { my $ent_list= shift; my $ent= XML::Twig::Entity->new( @_); $ent_list->add( $ent); return $ent_list; } sub _add_list { my( $ent_list, $to_add)= @_; my $ents_to_add= $to_add->{entities}; return $ent_list unless( $ents_to_add && %$ents_to_add); @{$ent_list->{entities}}{keys %$ents_to_add}= values %$ents_to_add; $ent_list->{updated}=1; return $ent_list; } sub add { my( $ent_list, $ent)= @_; $ent_list->{entities}->{$ent->{name}}= $ent; $ent_list->{updated}=1; return $ent_list; } sub ent { my( $ent_list, $ent_name)= @_; return $ent_list->{entities}->{$ent_name}; } # can be called with an entity or with an entity name sub delete { my $ent_list= shift; if( isa( ref $_[0], 'XML::Twig::Entity')) { # the second arg is an entity my $ent= shift; delete $ent_list->{entities}->{$ent->{name}}; } else { # the second arg was not entity, must be a string then my $name= shift; delete $ent_list->{entities}->{$name}; } $ent_list->{updated}=1; return $ent_list; } sub print { my ($ent_list, $fh)= @_; my $old_select= defined $fh ? select $fh : undef; foreach my $ent_name ( sort keys %{$ent_list->{entities}}) { my $ent= $ent_list->{entities}->{$ent_name}; # we have to test what the entity is or un-defined entities can creep in if( isa( $ent, 'XML::Twig::Entity')) { $ent->print(); } } select $old_select if( defined $old_select); return $ent_list; } sub text { my ($ent_list)= @_; return join "\n", map { $ent_list->{entities}->{$_}->text} sort keys %{$ent_list->{entities}}; } # return the list of entity names sub entity_names { my $ent_list= shift; return (sort keys %{$ent_list->{entities}}) ; } sub list { my ($ent_list)= @_; return map { $ent_list->{entities}->{$_} } sort keys %{$ent_list->{entities}}; } 1; ###################################################################### package XML::Twig::Entity; ###################################################################### #*isa= *UNIVERSAL::isa; sub new { my( $class, $name, $val, $sysid, $pubid, $ndata, $param)= @_; $class= ref( $class) || $class; my $self={}; $self->{name} = $name; $self->{val} = $val if( defined $val ); $self->{sysid} = $sysid if( defined $sysid); $self->{pubid} = $pubid if( defined $pubid); $self->{ndata} = $ndata if( defined $ndata); $self->{param} = $param if( defined $param); bless $self, $class; return $self; } sub name { return $_[0]->{name}; } sub val { return $_[0]->{val}; } sub sysid { return defined( $_[0]->{sysid}) ? $_[0]->{sysid} : ''; } sub pubid { return defined( $_[0]->{pubid}) ? $_[0]->{pubid} : ''; } sub ndata { return defined( $_[0]->{ndata}) ? $_[0]->{ndata} : ''; } sub param { return defined( $_[0]->{param}) ? $_[0]->{param} : ''; } sub print { my ($ent, $fh)= @_; my $text= $ent->text; if( $fh) { print $fh $text . "\n"; } else { print $text . "\n"; } } sub sprint { my ($ent)= @_; return $ent->text; } sub text { my ($ent)= @_; #warn "text called: '", $ent->_dump, "'\n"; return '' if( !$ent->{name}); my @tokens; push @tokens, '{param}); push @tokens, $ent->{name}; if( defined $ent->{val} && !defined( $ent->{sysid}) && !defined($ent->{pubid}) ) { push @tokens, _quoted_val( $ent->{val}); } elsif( defined $ent->{sysid}) { push @tokens, 'PUBLIC', _quoted_val( $ent->{pubid}) if( $ent->{pubid}); push @tokens, 'SYSTEM' unless( $ent->{pubid}); push @tokens, _quoted_val( $ent->{sysid}); push @tokens, 'NDATA', $ent->{ndata} if( $ent->{ndata}); } return join( ' ', @tokens) . '>'; } sub _quoted_val { my $q= $_[0]=~ m{"} ? q{'} : q{"}; return qq{$q$_[0]$q}; } sub _dump { my( $ent)= @_; return join( " - ", map { "$_ => '$ent->{$_}'" } grep { defined $ent->{$_} } sort keys %$ent); } 1; ###################################################################### package XML::Twig::Elt; ###################################################################### use Carp; *isa= *UNIVERSAL::isa; my $CDATA_START = ""; my $PI_START = ""; my $COMMENT_START = ""; my $XMLNS_URI = 'http://www.w3.org/2000/xmlns/'; BEGIN { # set some aliases for methods *tag = *gi; *name = *gi; *set_tag = *set_gi; *set_name = *set_gi; *find_nodes = *get_xpath; # as in XML::DOM *findnodes = *get_xpath; # as in XML::LibXML *field = *first_child_text; *trimmed_field = *first_child_trimmed_text; *is_field = *contains_only_text; *is = *passes; *matches = *passes; *has_child = *first_child; *has_children = *first_child; *all_children_pass = *all_children_are; *all_children_match= *all_children_are; *getElementsByTagName= *descendants; *find_by_tag_name= *descendants_or_self; *unwrap = *erase; *inner_xml = *xml_string; *outer_xml = *sprint; *add_class = *add_to_class; *first_child_is = *first_child_matches; *last_child_is = *last_child_matches; *next_sibling_is = *next_sibling_matches; *prev_sibling_is = *prev_sibling_matches; *next_elt_is = *next_elt_matches; *prev_elt_is = *prev_elt_matches; *parent_is = *parent_matches; *child_is = *child_matches; *inherited_att = *inherit_att; *sort_children_by_value= *sort_children_on_value; *has_atts= *att_nb; # imports from XML::Twig *_is_fh= *XML::Twig::_is_fh; # XML::XPath compatibility *string_value = *text; *toString = *sprint; *getName = *gi; *getRootNode = *twig; *getNextSibling = *_next_sibling; *getPreviousSibling = *_prev_sibling; *isElementNode = *is_elt; *isTextNode = *is_text; *isPI = *is_pi; *isPINode = *is_pi; *isProcessingInstructionNode= *is_pi; *isComment = *is_comment; *isCommentNode = *is_comment; *getTarget = *target; *getFirstChild = *_first_child; *getLastChild = *_last_child; # try using weak references # test whether we can use weak references { local $SIG{__DIE__}; if( eval 'require Scalar::Util' && defined( &Scalar::Util::weaken) ) { import Scalar::Util qw(weaken); } elsif( eval 'require WeakRef') { import WeakRef; } } } # can be called as XML::Twig::Elt->new( [[$gi, $atts, [@content]]) # - gi is an optional gi given to the element # - $atts is a hashref to attributes for the element # - @content is an optional list of text and elements that will # be inserted under the element sub new { my $class= shift; $class= ref $class || $class; my $elt = {}; bless ($elt, $class); return $elt unless @_; if( @_ == 1 && $_[0]=~ m{^\s*<}) { return $class->parse( @_); } # if a gi is passed then use it my $gi= shift; $elt->{gi}=$XML::Twig::gi2index{$gi} or $elt->set_gi( $gi); my $atts= ref $_[0] eq 'HASH' ? shift : undef; if( $atts && defined $atts->{$CDATA}) { delete $atts->{$CDATA}; my $cdata= $class->new( $CDATA => @_); return $class->new( $gi, $atts, $cdata); } if( $gi eq $PCDATA) { if( grep { ref $_ } @_) { croak "element $PCDATA can only be created from text"; } $elt->_set_pcdata( join( '', @_)); } elsif( $gi eq $ENT) { $elt->{ent}= shift; } elsif( $gi eq $CDATA) { if( grep { ref $_ } @_) { croak "element $CDATA can only be created from text"; } $elt->_set_cdata( join( '', @_)); } elsif( $gi eq $COMMENT) { if( grep { ref $_ } @_) { croak "element $COMMENT can only be created from text"; } $elt->_set_comment( join( '', @_)); } elsif( $gi eq $PI) { if( grep { ref $_ } @_) { croak "element $PI can only be created from text"; } $elt->_set_pi( shift, join( '', @_)); } else { # the rest of the arguments are the content of the element if( @_) { $elt->set_content( @_); } else { $elt->{empty}= 1; } } if( $atts) { # the attribute hash can be used to pass the asis status if( defined $atts->{$ASIS}) { $elt->set_asis( $atts->{$ASIS} ); delete $atts->{$ASIS}; } if( defined $atts->{$EMPTY}) { $elt->{empty}= $atts->{$EMPTY}; delete $atts->{$EMPTY}; } if( keys %$atts) { $elt->set_atts( $atts); } $elt->_set_id( $atts->{$ID}) if( $atts->{$ID}); } return $elt; } # optimized version of $elt->new( PCDATA, $text); sub _new_pcdata { my $class= $_[0]; $class= ref $class || $class; my $elt = {}; bless $elt, $class; $elt->{gi}=$XML::Twig::gi2index{$PCDATA} or $elt->set_gi( $PCDATA); $elt->_set_pcdata( $_[1]); return $elt; } # this function creates an XM:::Twig::Elt from a string # it is quite clumsy at the moment, as it just creates a # new twig then returns its root # there might also be memory leaks there # additional arguments are passed to new XML::Twig sub parse { my $class= shift; if( ref( $class)) { $class= ref( $class); } my $string= shift; my %args= @_; my $t= XML::Twig->new(%args); $t->parse( $string); my $elt= $t->root; # clean-up the node delete $elt->{twig}; # get rid of the twig data delete $elt->{twig_current}; # better get rid of this too if( $t->{twig_id_list}) { $elt->{twig_id_list}= $t->{twig_id_list}; } $elt->cut; undef $t->{twig_root}; return $elt; } sub set_inner_xml { my( $elt, $xml, @args)= @_; my $new_elt= $elt->parse( "$xml", @args); $elt->cut_children; $new_elt->paste_first_child( $elt); $new_elt->erase; return $elt; } sub set_outer_xml { my( $elt, $xml, @args)= @_; my $new_elt= $elt->parse( "$xml", @args); $elt->cut_children; $new_elt->replace( $elt); $new_elt->erase; return $new_elt; } sub set_inner_html { my( $elt, $html)= @_; my $t= XML::Twig->new->parse_html( "$html"); my $new_elt= $t->root; if( $elt->tag eq 'head') { $new_elt->first_child( 'head')->unwrap; $new_elt->first_child( 'body')->cut; } elsif( $elt->tag ne 'html') { $new_elt->first_child( 'head')->cut; $new_elt->first_child( 'body')->unwrap; } $new_elt->cut; $elt->cut_children; $new_elt->paste_first_child( $elt); $new_elt->erase; return $elt; } sub set_gi { my ($elt, $gi)= @_; unless( defined $XML::Twig::gi2index{$gi}) { # new gi, create entries in %gi2index and @index2gi push @XML::Twig::index2gi, $gi; $XML::Twig::gi2index{$gi}= $#XML::Twig::index2gi; } $elt->{gi}= $XML::Twig::gi2index{$gi}; return $elt; } sub gi { return $XML::Twig::index2gi[$_[0]->{gi}]; } sub local_name { my $elt= shift; return _local_name( $XML::Twig::index2gi[$elt->{'gi'}]); } sub ns_prefix { my $elt= shift; return _ns_prefix( $XML::Twig::index2gi[$elt->{'gi'}]); } # namespace prefix for any qname (can be used for elements or attributes) sub _ns_prefix { my $qname= shift; if( $qname=~ m{^([^:]*):}) { return $1; } else { return( ''); } # should it be '' ? } # local name for any qname (can be used for elements or attributes) sub _local_name { my $qname= shift; (my $local= $qname)=~ s{^[^:]*:}{}; return $local; } #sub get_namespace sub namespace ## no critic (Subroutines::ProhibitNestedSubs); { my $elt= shift; my $prefix= defined $_[0] ? shift() : $elt->ns_prefix; my $ns_att= $prefix ? "xmlns:$prefix" : "xmlns"; my $expanded= $DEFAULT_NS{$prefix} || $elt->_inherit_att_through_cut( $ns_att) || ''; return $expanded; } sub declare_missing_ns ## no critic (Subroutines::ProhibitNestedSubs); { my $root= shift; my %missing_prefix; my $map= $root->_current_ns_prefix_map; foreach my $prefix (keys %$map) { my $prefix_att= $prefix eq '#default' ? 'xmlns' : "xmlns:$prefix"; if( ! $root->{'att'}->{$prefix_att}) { $root->set_att( $prefix_att => $map->{$prefix}); } } return $root; } sub _current_ns_prefix_map { my( $elt)= shift; my $map; while( $elt) { foreach my $att ($elt->att_names) { my $prefix= $att eq 'xmlns' ? '#default' : $att=~ m{^xmlns:(.*)$} ? $1 : next ; if( ! exists $map->{$prefix}) { $map->{$prefix}= $elt->{'att'}->{$att}; } } $elt= $elt->{parent} || ($elt->{former} && $elt->{former}->{parent}); } return $map; } sub set_ns_decl { my( $elt, $uri, $prefix)= @_; my $ns_att= $prefix ? "xmlns:$prefix" : 'xmlns'; $elt->set_att( $ns_att => $uri); return $elt; } sub set_ns_as_default { my( $root, $uri)= @_; my @ns_decl_to_remove; foreach my $elt ($root->descendants_or_self) { if( $elt->_ns_prefix && $elt->namespace eq $uri) { $elt->set_tag( $elt->local_name); } # store any namespace declaration for that uri foreach my $ns_decl (grep { $_=~ m{xmlns(:|$)} && $elt->{'att'}->{$_} eq $uri } $elt->att_names) { push @ns_decl_to_remove, [$elt, $ns_decl]; } } $root->set_ns_decl( $uri); # now remove the ns declarations (if done earlier then descendants of an element with the ns declaration # are not considered being in the namespace foreach my $ns_decl_to_remove ( @ns_decl_to_remove) { my( $elt, $ns_decl)= @$ns_decl_to_remove; $elt->del_att( $ns_decl); } return $root; } # return #ELT for an element and #PCDATA... for others sub get_type { my $gi_nb= $_[0]->{gi}; # the number, not the string return $ELT if( $gi_nb >= $XML::Twig::SPECIAL_GI); return $_[0]->gi; } # return the gi if it's a "real" element, 0 otherwise sub is_elt { if( $_[0]->{gi} >= $XML::Twig::SPECIAL_GI) { return $_[0]->gi; } else { return 0; } } sub is_pcdata { my $elt= shift; return (exists $elt->{'pcdata'}); } sub is_cdata { my $elt= shift; return (exists $elt->{'cdata'}); } sub is_pi { my $elt= shift; return (exists $elt->{'target'}); } sub is_comment { my $elt= shift; return (exists $elt->{'comment'}); } sub is_ent { my $elt= shift; return (exists $elt->{ent} || $elt->{ent_name}); } sub is_text { my $elt= shift; return (exists( $elt->{'pcdata'}) || (exists $elt->{'cdata'})); } sub is_empty { return $_[0]->{empty} || 0; } sub set_empty { $_[0]->{empty}= defined( $_[1]) ? $_[1] : 1; return $_[0]; } sub set_not_empty { delete $_[0]->{empty} if( $_[0]->{'empty'}); return $_[0]; } sub set_asis { my $elt=shift; foreach my $descendant ($elt, $elt->_descendants ) { $descendant->{asis}= 1; if( (exists $descendant->{'cdata'})) { $descendant->{gi}=$XML::Twig::gi2index{$PCDATA} or $descendant->set_gi( $PCDATA); $descendant->_set_pcdata( $descendant->{cdata}); } } return $elt; } sub set_not_asis { my $elt=shift; foreach my $descendant ($elt, $elt->descendants) { delete $descendant->{asis} if $descendant->{asis};} return $elt; } sub is_asis { return $_[0]->{asis}; } sub closed { my $elt= shift; my $t= $elt->twig || return; my $curr_elt= $t->{twig_current}; return 1 unless( $curr_elt); return $curr_elt->in( $elt); } sub set_pcdata { my( $elt, $pcdata)= @_; if( $elt->{extra_data_in_pcdata}) { _try_moving_extra_data( $elt, $pcdata); } $elt->{pcdata}= $pcdata; return $elt; } sub _extra_data_in_pcdata { return $_[0]->{extra_data_in_pcdata}; } sub _set_extra_data_in_pcdata { $_[0]->{extra_data_in_pcdata}= $_[1]; return $_[0]; } sub _del_extra_data_in_pcdata { delete $_[0]->{extra_data_in_pcdata}; return $_[0]; } sub _unshift_extra_data_in_pcdata { my $e= shift; $e->{extra_data_in_pcdata}||=[]; unshift @{$e->{extra_data_in_pcdata}}, { text => shift(), offset => shift() }; } sub _push_extra_data_in_pcdata { my $e= shift; $e->{extra_data_in_pcdata}||=[]; push @{$e->{extra_data_in_pcdata}}, { text => shift(), offset => shift() }; } sub _extra_data_before_end_tag { return $_[0]->{extra_data_before_end_tag} || ''; } sub _set_extra_data_before_end_tag { $_[0]->{extra_data_before_end_tag}= $_[1]; return $_[0]} sub _del_extra_data_before_end_tag { delete $_[0]->{extra_data_before_end_tag}; return $_[0]} sub _prefix_extra_data_before_end_tag { my( $elt, $data)= @_; if($elt->{extra_data_before_end_tag}) { $elt->{extra_data_before_end_tag}= $data . $elt->{extra_data_before_end_tag}; } else { $elt->{extra_data_before_end_tag}= $data; } return $elt; } # internal, in cases where we know there is no extra_data (inlined anyway!) sub _set_pcdata { $_[0]->{pcdata}= $_[1]; } # try to figure out if we can keep the extra_data around sub _try_moving_extra_data { my( $elt, $modified)=@_; my $initial= $elt->{pcdata}; my $cpis= $elt->{extra_data_in_pcdata}; if( (my $offset= index( $modified, $initial)) != -1) { # text has been added foreach (@$cpis) { $_->{offset}+= $offset; } } elsif( ($offset= index( $initial, $modified)) != -1) { # text has been cut my $len= length( $modified); foreach my $cpi (@$cpis) { $cpi->{offset} -= $offset; } $elt->_set_extra_data_in_pcdata( [ grep { $_->{offset} >= 0 && $_->{offset} < $len } @$cpis ]); } else { _match_extra_data_words( $elt, $initial, $modified) || _match_extra_data_chars( $elt, $initial, $modified) || $elt->_del_extra_data_in_pcdata; } } sub _match_extra_data_words { my( $elt, $initial, $modified)= @_; my @initial= split /\b/, $initial; my @modified= split /\b/, $modified; return _match_extra_data( $elt, length( $initial), \@initial, \@modified); } sub _match_extra_data_chars { my( $elt, $initial, $modified)= @_; my @initial= split //, $initial; my @modified= split //, $modified; return _match_extra_data( $elt, length( $initial), \@initial, \@modified); } sub _match_extra_data { my( $elt, $length, $initial, $modified)= @_; my $cpis= $elt->{extra_data_in_pcdata}; if( @$initial <= @$modified) { my( $ok, $positions, $offsets)= _pos_offset( $initial, $modified); if( $ok) { my $offset=0; my $pos= shift @$positions; foreach my $cpi (@$cpis) { while( $cpi->{offset} >= $pos) { $offset= shift @$offsets; $pos= shift @$positions || $length +1; } $cpi->{offset} += $offset; } return 1; } } else { my( $ok, $positions, $offsets)= _pos_offset( $modified, $initial); if( $ok) { #print STDERR "pos: ", join( ':', @$positions), "\n", # "offset: ", join( ':', @$offsets), "\n"; my $offset=0; my $pos= shift @$positions; my $prev_pos= 0; foreach my $cpi (@$cpis) { while( $cpi->{offset} >= $pos) { $offset= shift @$offsets; $prev_pos= $pos; $pos= shift @$positions || $length +1; } $cpi->{offset} -= $offset; if( $cpi->{offset} < $prev_pos) { delete $cpi->{text}; } } $elt->_set_extra_data_in_pcdata( [ grep { exists $_->{text} } @$cpis ]); return 1; } } return 0; } sub _pos_offset { my( $short, $long)= @_; my( @pos, @offset); my( $s_length, $l_length)=(0,0); while (@$short) { my $s_word= shift @$short; my $l_word= shift @$long; if( $s_word ne $l_word) { while( @$long && $s_word ne $l_word) { $l_length += length( $l_word); $l_word= shift @$long; } if( !@$long && $s_word ne $l_word) { return 0; } push @pos, $s_length; push @offset, $l_length - $s_length; } my $length= length( $s_word); $s_length += $length; $l_length += $length; } return( 1, \@pos, \@offset); } sub append_pcdata { $_[0]->{'pcdata'}.= $_[1]; return $_[0]; } sub pcdata { return $_[0]->{pcdata}; } sub append_extra_data { $_[0]->{extra_data}.= $_[1]; return $_[0]; } sub set_extra_data { $_[0]->{extra_data}= $_[1]; return $_[0]; } sub extra_data { return $_[0]->{extra_data} || ''; } sub set_target { my( $elt, $target)= @_; $elt->{target}= $target; return $elt; } sub target { return $_[0]->{target}; } sub set_data { $_[0]->{'data'}= $_[1]; return $_[0]; } sub data { return $_[0]->{data}; } sub set_pi { my $elt= shift; unless( $elt->{gi} == $XML::Twig::gi2index{$PI}) { $elt->cut_children; $elt->{gi}=$XML::Twig::gi2index{$PI} or $elt->set_gi( $PI); } return $elt->_set_pi( @_); } sub _set_pi { $_[0]->set_target( $_[1]); $_[0]->{data}= $_[2]; return $_[0]; } sub pi_string { my $string= $PI_START . $_[0]->{target}; my $data= $_[0]->{data}; if( defined( $data) && $data ne '') { $string .= " $data"; } $string .= $PI_END ; return $string; } sub set_comment { my $elt= shift; unless( $elt->{gi} == $XML::Twig::gi2index{$COMMENT}) { $elt->cut_children; $elt->{gi}=$XML::Twig::gi2index{$COMMENT} or $elt->set_gi( $COMMENT); } return $elt->_set_comment( @_); } sub _set_comment { $_[0]->{comment}= $_[1]; return $_[0]; } sub comment { return $_[0]->{comment}; } sub comment_string { return $COMMENT_START . _comment_escaped_string( $_[0]->{comment}) . $COMMENT_END; } # comments cannot start or end with sub _comment_escaped_string { my( $c)= @_; $c=~ s{^-}{ -}; $c=~ s{-$}{- }; $c=~ s{--}{- -}g; return $c; } sub set_ent { $_[0]->{ent}= $_[1]; return $_[0]; } sub ent { return $_[0]->{ent}; } sub ent_name { return substr( $_[0]->{ent}, 1, -1);} sub set_cdata { my $elt= shift; unless( $elt->{gi} == $XML::Twig::gi2index{$CDATA}) { $elt->cut_children; $elt->insert_new_elt( first_child => $CDATA, @_); return $elt; } return $elt->_set_cdata( @_); } sub _set_cdata { $_[0]->{cdata}= $_[1]; return $_[0]; } sub append_cdata { $_[0]->{cdata}.= $_[1]; return $_[0]; } sub cdata { return $_[0]->{cdata}; } sub contains_only_text { my $elt= shift; return 0 unless $elt->is_elt; foreach my $child ($elt->_children) { return 0 if $child->is_elt; } return $elt; } sub contains_only { my( $elt, $exp)= @_; my @children= do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; foreach my $child (@children) { return 0 unless $child->is( $exp); } return @children || 1; } sub contains_a_single { my( $elt, $exp)= @_; my $child= $elt->{first_child} or return 0; return 0 unless $child->passes( $exp); return 0 if( $child->{next_sibling}); return $child; } sub root { my $elt= shift; while( $elt->{parent}) { $elt= $elt->{parent}; } return $elt; } sub _root_through_cut { my $elt= shift; while( $elt->{parent} || ($elt->{former} && $elt->{former}->{parent})) { $elt= $elt->{parent} || ($elt->{former} && $elt->{former}->{parent}); } return $elt; } sub twig { my $elt= shift; my $root= $elt->root; return $root->{twig}; } sub _twig_through_cut { my $elt= shift; my $root= $elt->_root_through_cut; return $root->{twig}; } # used for navigation # returns undef or the element, depending on whether $elt passes $cond # $cond can be # - empty: the element passes the condition # - ELT ('#ELT'): the element passes the condition if it is a "real" element # - TEXT ('#TEXT'): the element passes if it is a CDATA or PCDATA element # - a string with an XPath condition (only a subset of XPath is actually # supported). # - a regexp: the element passes if its gi matches the regexp # - a code ref: the element passes if the code, applied on the element, # returns true my %cond_cache; # expression => coderef sub reset_cond_cache { %cond_cache=(); } { sub _install_cond { my $cond= shift; my $test; my $init=''; my $original_cond= $cond; my $not= ($cond=~ s{^\s*!}{}) ? '!' : ''; if( ref $cond eq 'CODE') { return $cond; } if( ref $cond eq 'Regexp') { $test = qq{(\$_[0]->gi=~ /$cond/)}; } else { my @tests; while( $cond) { # the condition is a string if( $cond=~ s{$ELT$SEP}{}) { push @tests, qq{\$_[0]->is_elt}; } elsif( $cond=~ s{$TEXT$SEP}{}) { push @tests, qq{\$_[0]->is_text}; } elsif( $cond=~ s{^\s*($REG_TAG_PART)$SEP}{}) { push @tests, _gi_test( $1); } elsif( $cond=~ s{^\s*($REG_REGEXP)$SEP}{}) { # /regexp/ push @tests, qq{ \$_[0]->gi=~ $1 }; } elsif( $cond=~ s{^\s*($REG_TAG_PART)?\s* # $1 \[\s*(-?)\s*(\d+)\s*\] # [$2] $SEP}{}xo ) { my( $gi, $neg, $index)= ($1, $2, $3); my $siblings= $neg ? q{$_[0]->_next_siblings} : q{$_[0]->_prev_siblings}; if( $gi && ($gi ne '*')) #{ $test= qq{((\$_[0]->gi eq "$gi") && (scalar( grep { \$_->gi eq "$gi" } $siblings) + 1 == $index))}; } { push @tests, _and( _gi_test( $gi), qq{ (scalar( grep { \$_->gi eq "$gi" } $siblings) + 1 == $index)}); } else { push @tests, qq{(scalar( $siblings) + 1 == $index)}; } } elsif( $cond=~ s{^\s*($REG_TAG_PART?)\s*($REG_PREDICATE)$SEP}{}) { my( $gi, $predicate)= ( $1, $2); push @tests, _and( _gi_test( $gi), _parse_predicate_in_step( $predicate)); } elsif( $cond=~ s{^\s*($REG_NAKED_PREDICATE)$SEP}{}) { push @tests, _parse_predicate_in_step( $1); } else { croak "wrong navigation condition '$original_cond' ($@)"; } } $test= @tests > 1 ? '(' . join( '||', map { "($_)" } @tests) . ')' : $tests[0]; } #warn "init: '$init' - test: '$test'\n"; my $sub= qq{sub { $NO_WARNINGS; $init; return $not($test) ? \$_[0] : undef; } }; my $s= eval $sub; #warn "cond: $cond\n$sub\n"; if( $@) { croak "wrong navigation condition '$original_cond' ($@);" } return $s; } sub _gi_test { my( $full_gi)= @_; # optimize if the gi exists, including the case where the gi includes a dot my $index= $XML::Twig::gi2index{$full_gi}; if( $index) { return qq{\$_[0]->{gi} == $index}; } my( $gi, $class, $id)= $full_gi=~ m{^(.*?)(?:[.]([^.]*)|[#](.*))?$}; my $gi_test=''; if( $gi && $gi ne '*' ) { # 2 options, depending on whether the gi exists in gi2index # start optimization my $index= $XML::Twig::gi2index{$gi}; if( $index) { # the gi exists, use its index as a faster shortcut $gi_test = qq{\$_[0]->{gi} == $index}; } else # end optimization { # it does not exist (but might be created later), compare the strings $gi_test = qq{ \$_[0]->gi eq "$gi"}; } } else { $gi_test= 1; } my $class_test=''; #warn "class: '$class'"; if( $class) { $class_test = qq{ defined( \$_[0]->{att}->{class}) && \$_[0]->{att}->{class}=~ m{\\b$class\\b} }; } my $id_test=''; #warn "id: '$id'"; if( $id) { $id_test = qq{ defined( \$_[0]->{att}->{$ID}) && \$_[0]->{att}->{$ID} eq '$id' }; } #warn "gi_test: '$gi_test' - class_test: '$class_test' returning ", _and( $gi_test, $class_test); return _and( $gi_test, $class_test, $id_test); } # input: the original predicate sub _parse_predicate_in_step { my $cond= shift; my %PERL_ALPHA_TEST= ( '=' => ' eq ', '!=' => ' ne ', '>' => ' gt ', '>=' => ' ge ', '<' => ' lt ', '<=' => ' le '); $cond=~ s{^\s*\[\s*}{}; $cond=~ s{\s*\]\s*$}{}; $cond=~ s{( ($REG_STRING|$REG_REGEXP) # strings or regexps |\@($REG_TAG_NAME)(?=\s*(?:[><=!]|!~|=~)) # @att (followed by a comparison operator) |\@($REG_TAG_NAME) # @att (not followed by a comparison operator) |=~|!~ # matching operators |([><]=?|=|!=)(?=\s*[\d+-]) # test before a number |([><]=?|=|!=) # test, other cases |($REG_FUNCTION) # no arg functions # this bit is a mess, but it is the only solution with this half-baked parser |((?:string|text)\(\s*$REG_TAG_NAME\s*\)\s*$REG_MATCH\s*$REG_REGEXP) # string( child) =~ /regexp/ |((?:string|text)\(\s*$REG_TAG_NAME\s*\)\s*!?=\s*$REG_VALUE) # string( child) = "value" (or !=) |((?:string|text)\(\s*$REG_TAG_NAME\s*\)\s*[<>]=?\s*$REG_VALUE) # string( child) > "value" |(and|or) )} { my( $token, $string, $att, $bare_att, $num_test, $alpha_test, $func, $string_regexp, $string_eq, $string_test, $and_or) = ( $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11); if( defined $string) { $token } elsif( $att) { "( \$_[0]->{att} && exists( \$_[0]->{att}->{'$att'}) && \$_[0]->{att}->{'$att'})"; } elsif( $bare_att) { "(\$_[0]->{att} && defined( \$_[0]->{att}->{'$bare_att'}))"; } elsif( $num_test && ($num_test eq '=') ) { "==" } # others tests are unchanged elsif( $alpha_test) { $PERL_ALPHA_TEST{$alpha_test} } elsif( $func && $func=~ m{^(?:string|text)}) { "\$_[0]->text"; } elsif( $string_regexp && $string_regexp =~ m{(?:string|text)\(\s*($REG_TAG_NAME)\s*\)\s*($REG_MATCH)\s*($REG_REGEXP)}) { "(XML::Twig::_first_n { (\$_->gi eq '$1') && (\$_->text $2 $3) } 1, \$_[0]->_children)"; } elsif( $string_eq && $string_eq =~ m{(?:string|text)\(\s*($REG_TAG_NAME)\s*\)\s*(!?=)\s*($REG_VALUE)}) {"(XML::Twig::_first_n { (\$_->gi eq '$1') && (\$_->text $PERL_ALPHA_TEST{$2} $3) } 1, \$_[0]->_children)"; } elsif( $string_test && $string_test =~ m{(?:string|text)\(\s*($REG_TAG_NAME)\s*\)\s*([<>]=?)\s*($REG_VALUE)}) { "(XML::Twig::_first_n { (\$_->gi eq '$1') && (\$_->text $2 $3) } 1, \$_[0]->_children)"; } elsif( $and_or) { $and_or eq 'and' ? '&&' : '||' ; } else { $token; } }gexs; return "($cond)"; } sub _op { my $op= shift; if( $op eq '=') { $op= 'eq'; } elsif( $op eq '!=') { $op= 'ne'; } return $op; } sub passes { my( $elt, $cond)= @_; return $elt unless $cond; my $sub= ($cond_cache{$cond} ||= _install_cond( $cond)); return $sub->( $elt); } } sub set_parent { $_[0]->{parent}= $_[1]; if( $XML::Twig::weakrefs) { weaken( $_[0]->{parent}); } } sub parent { my $elt= shift; my $cond= shift || return $elt->{parent}; do { $elt= $elt->{parent} || return; } until ( $elt->passes( $cond)); return $elt; } sub set_first_child { $_[0]->{'first_child'}= $_[1]; } sub first_child { my $elt= shift; my $cond= shift || return $elt->{first_child}; my $child= $elt->{first_child}; my $test_cond= ($cond_cache{$cond} ||= _install_cond( $cond)); while( $child && !$test_cond->( $child)) { $child= $child->{next_sibling}; } return $child; } sub _first_child { return $_[0]->{first_child}; } sub _last_child { return $_[0]->{last_child}; } sub _next_sibling { return $_[0]->{next_sibling}; } sub _prev_sibling { return $_[0]->{prev_sibling}; } sub _parent { return $_[0]->{parent}; } sub _next_siblings { my $elt= shift; my @siblings; while( $elt= $elt->{next_sibling}) { push @siblings, $elt; } return @siblings; } sub _prev_siblings { my $elt= shift; my @siblings; while( $elt= $elt->{prev_sibling}) { push @siblings, $elt; } return @siblings; } # sets a field # arguments $record, $cond, @content sub set_field { my $record = shift; my $cond = shift; my $child= $record->first_child( $cond); if( $child) { $child->set_content( @_); } else { if( $cond=~ m{^\s*($REG_TAG_NAME)}) { my $gi= $1; $child= $record->insert_new_elt( last_child => $gi, @_); } else { croak "can't create a field name from $cond"; } } return $child; } sub set_last_child { $_[0]->{'last_child'}= $_[1]; if( $XML::Twig::weakrefs) { weaken( $_[0]->{'last_child'}); } } sub last_child { my $elt= shift; my $cond= shift || return $elt->{last_child}; my $test_cond= ($cond_cache{$cond} ||= _install_cond( $cond)); my $child= $elt->{last_child}; while( $child && !$test_cond->( $child) ) { $child= $child->{prev_sibling}; } return $child } sub set_prev_sibling { $_[0]->{'prev_sibling'}= $_[1]; if( $XML::Twig::weakrefs) { weaken( $_[0]->{'prev_sibling'}); } } sub prev_sibling { my $elt= shift; my $cond= shift || return $elt->{prev_sibling}; my $test_cond= ($cond_cache{$cond} ||= _install_cond( $cond)); my $sibling= $elt->{prev_sibling}; while( $sibling && !$test_cond->( $sibling) ) { $sibling= $sibling->{prev_sibling}; } return $sibling; } sub set_next_sibling { $_[0]->{'next_sibling'}= $_[1]; } sub next_sibling { my $elt= shift; my $cond= shift || return $elt->{next_sibling}; my $test_cond= ($cond_cache{$cond} ||= _install_cond( $cond)); my $sibling= $elt->{next_sibling}; while( $sibling && !$test_cond->( $sibling) ) { $sibling= $sibling->{next_sibling}; } return $sibling; } # methods dealing with the class attribute, convenient if you work with xhtml sub class { $_[0]->{att}->{class}; } # lvalue version of class. separate from class to avoid problem like RT# sub lclass :lvalue # > perl 5.5 { $_[0]->{att}->{class}; } sub set_class { my( $elt, $class)= @_; $elt->set_att( class => $class); } # adds a class to an element sub add_to_class { my( $elt, $new_class)= @_; return $elt unless $new_class; my $class= $elt->class; my %class= $class ? map { $_ => 1 } split /\s+/, $class : (); $class{$new_class}= 1; $elt->set_class( join( ' ', sort keys %class)); } sub remove_class { my( $elt, $class_to_remove)= @_; return $elt unless $class_to_remove; my $class= $elt->class; my %class= $class ? map { $_ => 1 } split /\s+/, $class : (); delete $class{$class_to_remove}; $elt->set_class( join( ' ', sort keys %class)); } sub att_to_class { my( $elt, $att)= @_; $elt->set_class( $elt->{'att'}->{$att}); } sub add_att_to_class { my( $elt, $att)= @_; $elt->add_to_class( $elt->{'att'}->{$att}); } sub move_att_to_class { my( $elt, $att)= @_; $elt->add_to_class( $elt->{'att'}->{$att}); $elt->del_att( $att); } sub tag_to_class { my( $elt)= @_; $elt->set_class( $elt->tag); } sub add_tag_to_class { my( $elt)= @_; $elt->add_to_class( $elt->tag); } sub set_tag_class { my( $elt, $new_tag)= @_; $elt->add_tag_to_class; $elt->set_tag( $new_tag); } sub tag_to_span { my( $elt)= @_; $elt->set_class( $elt->tag) unless( $elt->tag eq 'span' && $elt->class); # set class to span unless it would mean replacing it with span $elt->set_tag( 'span'); } sub tag_to_div { my( $elt)= @_; $elt->set_class( $elt->tag) unless( $elt->tag eq 'div' && $elt->class); # set class to div unless it would mean replacing it with div $elt->set_tag( 'div'); } sub in_class { my( $elt, $class)= @_; my $elt_class= $elt->class; return unless( defined $elt_class); return $elt->class=~ m{(?:^|\s)\Q$class\E(?:\s|$)} ? $elt : 0; } # get or set all attributes # argument can be a hash or a hashref sub set_atts { my $elt= shift; my %atts; tie %atts, 'Tie::IxHash' if( keep_atts_order()); %atts= ( (ref( $_[0] || '') eq 'HASH') || isa( $_[0] || '', 'HASH')) ? %{$_[0]} : @_; $elt->{att}= \%atts; if( exists $atts{$ID}) { $elt->_set_id( $atts{$ID}); } return $elt; } sub atts { return $_[0]->{att}; } sub att_names { return (sort keys %{$_[0]->{att}}); } sub del_atts { $_[0]->{att}={}; return $_[0]; } # get or set a single attribute (set works for several atts) sub set_att { my $elt= shift; if( $_[0] && ref( $_[0]) && !$_[1]) { croak "improper call to set_att, usage is \$elt->set_att( att1 => 'val1', att2 => 'val2',...)"; } unless( $elt->{att}) { $elt->{att}={}; tie %{$elt->{att}}, 'Tie::IxHash' if( keep_atts_order()); } while(@_) { my( $att, $val)= (shift, shift); $elt->{att}->{$att}= $val; if( $att eq $ID) { $elt->_set_id( $val); } } return $elt; } sub att { $_[0]->{att}->{$_[1]}; } # lvalue version of att. separate from class to avoid problem like RT# sub latt :lvalue # > perl 5.5 { $_[0]->{att}->{$_[1]}; } sub del_att { my $elt= shift; while( @_) { delete $elt->{'att'}->{shift()}; } return $elt; } sub att_exists { return exists $_[0]->{att}->{$_[1]}; } # delete an attribute from all descendants of an element sub strip_att { my( $elt, $att)= @_; $_->del_att( $att) foreach ($elt->descendants_or_self( qq{*[\@$att]})); return $elt; } sub change_att_name { my( $elt, $old_name, $new_name)= @_; my $value= $elt->{'att'}->{$old_name}; return $elt unless( defined $value); $elt->del_att( $old_name) ->set_att( $new_name => $value); return $elt; } sub lc_attnames { my $elt= shift; foreach my $att ($elt->att_names) { if( $att ne lc $att) { $elt->change_att_name( $att, lc $att); } } return $elt; } sub set_twig_current { $_[0]->{twig_current}=1; } sub del_twig_current { delete $_[0]->{twig_current}; } # get or set the id attribute sub set_id { my( $elt, $id)= @_; $elt->del_id() if( exists $elt->{att}->{$ID}); $elt->set_att($ID, $id); $elt->_set_id( $id); return $elt; } # only set id, does not update the attribute value sub _set_id { my( $elt, $id)= @_; my $t= $elt->twig || $elt; $t->{twig_id_list}->{$id}= $elt; if( $XML::Twig::weakrefs) { weaken( $t->{twig_id_list}->{$id}); } return $elt; } sub id { return $_[0]->{att}->{$ID}; } # methods used to add ids to elements that don't have one BEGIN { my $id_nb = "0001"; my $id_seed = "twig_id_"; sub set_id_seed ## no critic (Subroutines::ProhibitNestedSubs); { $id_seed= $_[1]; $id_nb=1; } sub add_id ## no critic (Subroutines::ProhibitNestedSubs); { my $elt= shift; if( defined $elt->{'att'}->{$ID}) { return $elt->{'att'}->{$ID}; } else { my $id= $_[0] && ref( $_[0]) && isa( $_[0], 'CODE') ? $_[0]->( $elt) : $id_seed . $id_nb++; $elt->set_id( $id); return $id; } } } # delete the id attribute and remove the element from the id list sub del_id { my $elt= shift; if( ! exists $elt->{att}->{$ID}) { return $elt }; my $id= $elt->{att}->{$ID}; delete $elt->{att}->{$ID}; my $t= shift || $elt->twig; unless( $t) { return $elt; } if( exists $t->{twig_id_list}->{$id}) { delete $t->{twig_id_list}->{$id}; } return $elt; } # return the list of children sub children { my $elt= shift; my @children; my $child= $elt->first_child( @_); while( $child) { push @children, $child; $child= $child->next_sibling( @_); } return @children; } sub _children { my $elt= shift; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } return @children; } sub children_copy { my $elt= shift; my @children; my $child= $elt->first_child( @_); while( $child) { push @children, $child->copy; $child= $child->next_sibling( @_); } return @children; } sub children_count { my $elt= shift; my $cond= shift; my $count=0; my $child= $elt->{first_child}; while( $child) { $count++ if( $child->passes( $cond)); $child= $child->{next_sibling}; } return $count; } sub children_text { my $elt= shift; return wantarray() ? map { $_->text} $elt->children( @_) : join( '', map { $_->text} $elt->children( @_) ) ; } sub children_trimmed_text { my $elt= shift; return wantarray() ? map { $_->trimmed_text} $elt->children( @_) : join( '', map { $_->trimmed_text} $elt->children( @_) ) ; } sub all_children_are { my( $parent, $cond)= @_; foreach my $child ($parent->_children) { return 0 unless( $child->passes( $cond)); } return $parent; } sub ancestors { my( $elt, $cond)= @_; my @ancestors; while( $elt->{parent}) { $elt= $elt->{parent}; push @ancestors, $elt if( $elt->passes( $cond)); } return @ancestors; } sub ancestors_or_self { my( $elt, $cond)= @_; my @ancestors; while( $elt) { push @ancestors, $elt if( $elt->passes( $cond)); $elt= $elt->{parent}; } return @ancestors; } sub _ancestors { my( $elt, $include_self)= @_; my @ancestors= $include_self ? ($elt) : (); while( $elt= $elt->{parent}) { push @ancestors, $elt; } return @ancestors; } sub inherit_att { my $elt= shift; my $att= shift; my %tags= map { ($_, 1) } @_; do { if( (defined $elt->{'att'}->{$att}) && ( !%tags || $tags{$XML::Twig::index2gi[$elt->{'gi'}]}) ) { return $elt->{'att'}->{$att}; } } while( $elt= $elt->{parent}); return undef; } sub _inherit_att_through_cut { my $elt= shift; my $att= shift; my %tags= map { ($_, 1) } @_; do { if( (defined $elt->{'att'}->{$att}) && ( !%tags || $tags{$XML::Twig::index2gi[$elt->{'gi'}]}) ) { return $elt->{'att'}->{$att}; } } while( $elt= $elt->{parent} || ($elt->{former} && $elt->{former}->{parent})); return undef; } sub current_ns_prefixes { my $elt= shift; my %prefix; $prefix{''}=1 if( $elt->namespace( '')); while( $elt) { my @ns= grep { !m{^xml} } map { m{^([^:]+):} } ($XML::Twig::index2gi[$elt->{'gi'}], $elt->att_names); $prefix{$_}=1 foreach (@ns); $elt= $elt->{parent}; } return (sort keys %prefix); } # kinda counter-intuitive actually: # the next element is found by looking for the next open tag after from the # current one, which is the first child, if it exists, or the next sibling # or the first next sibling of an ancestor # optional arguments are: # - $subtree_root: a reference to an element, when the next element is not # within $subtree_root anymore then next_elt returns undef # - $cond: a condition, next_elt returns the next element matching the condition sub next_elt { my $elt= shift; my $subtree_root= 0; $subtree_root= shift if( ref( $_[0]) && isa( $_[0], 'XML::Twig::Elt')); my $cond= shift; my $next_elt; my $ind; # optimization my $test_cond; if( $cond) # optimization { unless( defined( $ind= $XML::Twig::gi2index{$cond}) ) # optimization { $test_cond= ($cond_cache{$cond} ||= _install_cond( $cond)); } # optimization } # optimization do { if( $next_elt= $elt->{first_child}) { # simplest case: the elt has a child } elsif( $next_elt= $elt->{next_sibling}) { # no child but a next sibling (just check we stay within the subtree) # case where elt is subtree_root, is empty and has a sibling return undef if( $subtree_root && ($elt == $subtree_root)); } else { # case where the element has no child and no next sibling: # get the first next sibling of an ancestor, checking subtree_root # case where elt is subtree_root, is empty and has no sibling return undef if( $subtree_root && ($elt == $subtree_root)); $next_elt= $elt->{parent}; until( $next_elt->{next_sibling}) { return undef if( $subtree_root && ($subtree_root == $next_elt)); $next_elt= $next_elt->{parent} || return undef; } return undef if( $subtree_root && ($subtree_root == $next_elt)); $next_elt= $next_elt->{next_sibling}; } $elt= $next_elt; # just in case we need to loop } until( ! defined $elt || ! defined $cond || (defined $ind && ($elt->{gi} eq $ind)) # optimization || (defined $test_cond && ($test_cond->( $elt))) ); return $elt; } # return the next_elt within the element # just call next_elt with the element as first and second argument sub first_descendant { return $_[0]->next_elt( @_); } # get the last descendant, # then return the element found or call prev_elt with the condition sub last_descendant { my( $elt, $cond)= @_; my $last_descendant= $elt->_last_descendant; if( !$cond || $last_descendant->matches( $cond)) { return $last_descendant; } else { return $last_descendant->prev_elt( $elt, $cond); } } # no argument allowed here, just go down the last_child recursively sub _last_descendant { my $elt= shift; while( my $child= $elt->{last_child}) { $elt= $child; } return $elt; } # counter-intuitive too: # the previous element is found by looking # for the first open tag backwards from the current one # it's the last descendant of the previous sibling # if it exists, otherwise it's simply the parent sub prev_elt { my $elt= shift; my $subtree_root= 0; if( defined $_[0] and (ref( $_[0]) && isa( $_[0], 'XML::Twig::Elt'))) { $subtree_root= shift ; return undef if( $elt == $subtree_root); } my $cond= shift; # get prev elt my $prev_elt; do { return undef if( $elt == $subtree_root); if( $prev_elt= $elt->{prev_sibling}) { while( $prev_elt->{last_child}) { $prev_elt= $prev_elt->{last_child}; } } else { $prev_elt= $elt->{parent} || return undef; } $elt= $prev_elt; # in case we need to loop } until( $elt->passes( $cond)); return $elt; } sub _following_elt { my( $elt)= @_; while( $elt && !$elt->{next_sibling}) { $elt= $elt->{parent}; } return $elt ? $elt->{next_sibling} : undef; } sub following_elt { my( $elt, $cond)= @_; $elt= $elt->_following_elt || return undef; return $elt if( !$cond || $elt->matches( $cond)); return $elt->next_elt( $cond); } sub following_elts { my( $elt, $cond)= @_; if( !$cond) { undef $cond; } my $following= $elt->following_elt( $cond); if( $following) { my @followings= $following; while( $following= $following->next_elt( $cond)) { push @followings, $following; } return( @followings); } else { return (); } } sub _preceding_elt { my( $elt)= @_; while( $elt && !$elt->{prev_sibling}) { $elt= $elt->{parent}; } return $elt ? $elt->{prev_sibling}->_last_descendant : undef; } sub preceding_elt { my( $elt, $cond)= @_; $elt= $elt->_preceding_elt || return undef; return $elt if( !$cond || $elt->matches( $cond)); return $elt->prev_elt( $cond); } sub preceding_elts { my( $elt, $cond)= @_; if( !$cond) { undef $cond; } my $preceding= $elt->preceding_elt( $cond); if( $preceding) { my @precedings= $preceding; while( $preceding= $preceding->prev_elt( $cond)) { push @precedings, $preceding; } return( @precedings); } else { return (); } } # used in get_xpath sub _self { my( $elt, $cond)= @_; return $cond ? $elt->matches( $cond) : $elt; } sub next_n_elt { my $elt= shift; my $offset= shift || return undef; foreach (1..$offset) { $elt= $elt->next_elt( @_) || return undef; } return $elt; } # checks whether $elt is included in $ancestor, returns 1 in that case sub in { my ($elt, $ancestor)= @_; if( ref( $ancestor) && isa( $ancestor, 'XML::Twig::Elt')) { # element while( $elt= $elt->{parent}) { return $elt if( $elt == $ancestor); } } else { # condition while( $elt= $elt->{parent}) { return $elt if( $elt->matches( $ancestor)); } } return 0; } sub first_child_text { my $elt= shift; my $dest=$elt->first_child(@_) or return ''; return $dest->text; } sub fields { my $elt= shift; return map { $elt->field( $_) } @_; } sub first_child_trimmed_text { my $elt= shift; my $dest=$elt->first_child(@_) or return ''; return $dest->trimmed_text; } sub first_child_matches { my $elt= shift; my $dest= $elt->{first_child} or return undef; return $dest->passes( @_); } sub last_child_text { my $elt= shift; my $dest=$elt->last_child(@_) or return ''; return $dest->text; } sub last_child_trimmed_text { my $elt= shift; my $dest=$elt->last_child(@_) or return ''; return $dest->trimmed_text; } sub last_child_matches { my $elt= shift; my $dest= $elt->{last_child} or return undef; return $dest->passes( @_); } sub child_text { my $elt= shift; my $dest=$elt->child(@_) or return ''; return $dest->text; } sub child_trimmed_text { my $elt= shift; my $dest=$elt->child(@_) or return ''; return $dest->trimmed_text; } sub child_matches { my $elt= shift; my $nb= shift; my $dest= $elt->child( $nb) or return undef; return $dest->passes( @_); } sub prev_sibling_text { my $elt= shift; my $dest=$elt->_prev_sibling(@_) or return ''; return $dest->text; } sub prev_sibling_trimmed_text { my $elt= shift; my $dest=$elt->_prev_sibling(@_) or return ''; return $dest->trimmed_text; } sub prev_sibling_matches { my $elt= shift; my $dest= $elt->{prev_sibling} or return undef; return $dest->passes( @_); } sub next_sibling_text { my $elt= shift; my $dest=$elt->next_sibling(@_) or return ''; return $dest->text; } sub next_sibling_trimmed_text { my $elt= shift; my $dest=$elt->next_sibling(@_) or return ''; return $dest->trimmed_text; } sub next_sibling_matches { my $elt= shift; my $dest= $elt->{next_sibling} or return undef; return $dest->passes( @_); } sub prev_elt_text { my $elt= shift; my $dest=$elt->prev_elt(@_) or return ''; return $dest->text; } sub prev_elt_trimmed_text { my $elt= shift; my $dest=$elt->prev_elt(@_) or return ''; return $dest->trimmed_text; } sub prev_elt_matches { my $elt= shift; my $dest= $elt->prev_elt or return undef; return $dest->passes( @_); } sub next_elt_text { my $elt= shift; my $dest=$elt->next_elt(@_) or return ''; return $dest->text; } sub next_elt_trimmed_text { my $elt= shift; my $dest=$elt->next_elt(@_) or return ''; return $dest->trimmed_text; } sub next_elt_matches { my $elt= shift; my $dest= $elt->next_elt or return undef; return $dest->passes( @_); } sub parent_text { my $elt= shift; my $dest=$elt->parent(@_) or return ''; return $dest->text; } sub parent_trimmed_text { my $elt= shift; my $dest=$elt->parent(@_) or return ''; return $dest->trimmed_text; } sub parent_matches { my $elt= shift; my $dest= $elt->{parent} or return undef; return $dest->passes( @_); } sub is_first_child { my $elt= shift; my $parent= $elt->{parent} or return 0; my $first_child= $parent->first_child( @_) or return 0; return ($first_child == $elt) ? $elt : 0; } sub is_last_child { my $elt= shift; my $parent= $elt->{parent} or return 0; my $last_child= $parent->last_child( @_) or return 0; return ($last_child == $elt) ? $elt : 0; } # returns the depth level of the element # if 2 parameter are used then counts the 2cd element name in the # ancestors list sub level { my( $elt, $cond)= @_; my $level=0; my $name=shift || ''; while( $elt= $elt->{parent}) { $level++ if( !$cond || $elt->matches( $cond)); } return $level; } # checks whether $elt has an ancestor that satisfies $cond, returns the ancestor sub in_context { my ($elt, $cond, $level)= @_; $level= -1 unless( $level) ; # $level-- will never hit 0 while( $level) { $elt= $elt->{parent} or return 0; if( $elt->matches( $cond)) { return $elt; } $level--; } return 0; } sub _descendants { my( $subtree_root, $include_self)= @_; my @descendants= $include_self ? ($subtree_root) : (); my $elt= $subtree_root; my $next_elt; MAIN: while( 1) { if( $next_elt= $elt->{first_child}) { # simplest case: the elt has a child } elsif( $next_elt= $elt->{next_sibling}) { # no child but a next sibling (just check we stay within the subtree) # case where elt is subtree_root, is empty and has a sibling last MAIN if( $elt == $subtree_root); } else { # case where the element has no child and no next sibling: # get the first next sibling of an ancestor, checking subtree_root # case where elt is subtree_root, is empty and has no sibling last MAIN if( $elt == $subtree_root); # backtrack until we find a parent with a next sibling $next_elt= $elt->{parent} || last; until( $next_elt->{next_sibling}) { last MAIN if( $subtree_root == $next_elt); $next_elt= $next_elt->{parent} || last MAIN; } last MAIN if( $subtree_root == $next_elt); $next_elt= $next_elt->{next_sibling}; } $elt= $next_elt || last MAIN; push @descendants, $elt; } return @descendants; } sub descendants { my( $subtree_root, $cond)= @_; my @descendants=(); my $elt= $subtree_root; # this branch is pure optimization for speed: if $cond is a gi replace it # by the index of the gi and loop here # start optimization my $ind; if( !$cond || ( defined ( $ind= $XML::Twig::gi2index{$cond})) ) { my $next_elt; while( 1) { if( $next_elt= $elt->{first_child}) { # simplest case: the elt has a child } elsif( $next_elt= $elt->{next_sibling}) { # no child but a next sibling (just check we stay within the subtree) # case where elt is subtree_root, is empty and has a sibling last if( $subtree_root && ($elt == $subtree_root)); } else { # case where the element has no child and no next sibling: # get the first next sibling of an ancestor, checking subtree_root # case where elt is subtree_root, is empty and has no sibling last if( $subtree_root && ($elt == $subtree_root)); # backtrack until we find a parent with a next sibling $next_elt= $elt->{parent} || last undef; until( $next_elt->{next_sibling}) { last if( $subtree_root && ($subtree_root == $next_elt)); $next_elt= $next_elt->{parent} || last; } last if( $subtree_root && ($subtree_root == $next_elt)); $next_elt= $next_elt->{next_sibling}; } $elt= $next_elt || last; push @descendants, $elt if( !$cond || ($elt->{gi} eq $ind)); } } else # end optimization { # branch for a complex condition: use the regular (slow but simple) way while( $elt= $elt->next_elt( $subtree_root, $cond)) { push @descendants, $elt; } } return @descendants; } sub descendants_or_self { my( $elt, $cond)= @_; my @descendants= $elt->passes( $cond) ? ($elt) : (); push @descendants, $elt->descendants( $cond); return @descendants; } sub sibling { my $elt= shift; my $nb= shift; if( $nb > 0) { foreach( 1..$nb) { $elt= $elt->next_sibling( @_) or return undef; } } elsif( $nb < 0) { foreach( 1..(-$nb)) { $elt= $elt->prev_sibling( @_) or return undef; } } else # $nb == 0 { return $elt->passes( $_[0]); } return $elt; } sub sibling_text { my $elt= sibling( @_); return $elt ? $elt->text : undef; } sub child { my $elt= shift; my $nb= shift; if( $nb >= 0) { $elt= $elt->first_child( @_) or return undef; foreach( 1..$nb) { $elt= $elt->next_sibling( @_) or return undef; } } else { $elt= $elt->last_child( @_) or return undef; foreach( 2..(-$nb)) { $elt= $elt->prev_sibling( @_) or return undef; } } return $elt; } sub prev_siblings { my $elt= shift; my @siblings=(); while( $elt= $elt->prev_sibling( @_)) { unshift @siblings, $elt; } return @siblings; } sub siblings { my $elt= shift; return grep { $_ ne $elt } $elt->{parent}->children( @_); } sub pos { my $elt= shift; return 0 if ($_[0] && !$elt->matches( @_)); my $pos=1; $pos++ while( $elt= $elt->prev_sibling( @_)); return $pos; } sub next_siblings { my $elt= shift; my @siblings=(); while( $elt= $elt->next_sibling( @_)) { push @siblings, $elt; } return @siblings; } # used by get_xpath: parses the xpath expression and generates a sub that performs the # search { my %axis2method; BEGIN { %axis2method= ( child => 'children', descendant => 'descendants', 'descendant-or-self' => 'descendants_or_self', parent => 'parent_is', ancestor => 'ancestors', 'ancestor-or-self' => 'ancestors_or_self', 'following-sibling' => 'next_siblings', 'preceding-sibling' => 'prev_siblings', following => 'following_elts', preceding => 'preceding_elts', self => '_self', ); } sub _install_xpath { my( $xpath_exp, $type)= @_; my $original_exp= $xpath_exp; my $sub= 'my $elt= shift; my @results;'; # grab the root if expression starts with a / if( $xpath_exp=~ s{^/}{}) { $sub .= '@results= ($elt->twig) || croak "cannot use an XPath query starting with a / on a node not attached to a whole twig";'; } elsif( $xpath_exp=~ s{^\./}{}) { $sub .= '@results= ($elt);'; } else { $sub .= '@results= ($elt);'; } #warn "xpath_exp= '$xpath_exp'\n"; while( $xpath_exp && $xpath_exp=~s{^\s*(/?) # the xxx=~/regexp/ is a pain as it includes / (\s*(?:(?:($REG_AXIS)::)?(\*|$REG_TAG_PART|\.\.|\.)\s*)?($REG_PREDICATE_ALT*) ) (/|$)}{}xo) { my( $wildcard, $sub_exp, $axis, $gi, $predicates)= ($1, $2, $3, $4, $5); if( $axis && ! $gi) { _croak_and_doublecheck_xpath( $original_exp, "error in xpath expression $original_exp"); } # grab a parent if( $sub_exp eq '..') { _croak_and_doublecheck_xpath( $original_exp, "error in xpath expression $original_exp") if( $wildcard); $sub .= '@results= map { $_->{parent}} @results;'; } # test the element itself elsif( $sub_exp=~ m{^\.(.*)$}s) { $sub .= "\@results= grep { \$_->matches( q{$1}) } \@results;" } # grab children else { if( !$axis) { $axis= $wildcard ? 'descendant' : 'child'; } if( !$gi or $gi eq '*') { $gi=''; } my $function; # "special" predicates, that return just one element if( $predicates && ($predicates =~ m{^\s*\[\s*((-\s*)?\d+)\s*\]\s*$})) { # [] my $offset= $1; $offset-- if( $offset > 0); $function= $axis eq 'descendant' ? "next_n_elt( $offset, '$gi')" : $axis eq 'child' ? "child( $offset, '$gi')" : _croak_and_doublecheck_xpath( $original_exp, "error [$1] not supported along axis '$axis'") ; $sub .= "\@results= grep { \$_ } map { \$_->$function } \@results;" } elsif( $predicates && ($predicates =~ m{^\s*\[\s*last\s*\(\s*\)\s*\]\s*$}) ) { # last() _croak_and_doublecheck_xpath( $original_exp, "error in xpath expression $original_exp, usage of // and last() not supported") if( $wildcard); $sub .= "\@results= map { \$_->last_child( '$gi') } \@results;"; } else { # follow the axis #warn "axis: '$axis' - method: '$axis2method{$axis}' - gi: '$gi'\n"; my $follow_axis= " \$_->$axis2method{$axis}( '$gi')"; my $step= $follow_axis; # now filter using the predicate while( $predicates=~ s{^\s*($REG_PREDICATE_ALT)\s*}{}o) { my $pred= $1; $pred=~ s{^\s*\[\s*}{}; $pred=~ s{\s*\]\s*$}{}; my $test=""; my $pos; if( $pred=~ m{^(-?\s*\d+)$}) { my $pos= $1; if( $step=~ m{^\s*grep(.*) (\$_->\w+\(\s*'[^']*'\s*\))}) { $step= "XML::Twig::_first_n $1 $pos, $2"; } else { if( $pos > 0) { $pos--; } $step= "($step)[$pos]"; } #warn "number predicate '$pos' - generated step '$step'\n"; } else { my $syntax_error=0; do { if( $pred =~ s{^string\(\s*\)\s*=\s*($REG_STRING)\s*}{}o) # string()="string" pred { $test .= "\$_->text eq $1"; } elsif( $pred =~ s{^string\(\s*\)\s*!=\s*($REG_STRING)\s*}{}o) # string()!="string" pred { $test .= "\$_->text ne $1"; } if( $pred =~ s{^string\(\s*\)\s*=\s*($REG_NUMBER)\s*}{}o) # string()= pred { $test .= "\$_->text eq $1"; } elsif( $pred =~ s{^string\(\s*\)\s*!=\s*($REG_NUMBER)\s*}{}o) # string()!= pred { $test .= "\$_->text ne $1"; } elsif( $pred =~ s{^string\(\s*\)\s*(>|<|>=|<=)\s*($REG_NUMBER)\s*}{}o) # string()!= pred { $test .= "\$_->text $1 $2"; } elsif( $pred =~ s{^string\(\s*\)\s*($REG_MATCH)\s*($REG_REGEXP)\s*}{}o) # string()=~/regex/ pred { my( $match, $regexp)= ($1, $2); $test .= "\$_->text $match $regexp"; } elsif( $pred =~ s{^string\(\s*\)\s*}{}o) # string() pred { $test .= "\$_->text"; } elsif( $pred=~ s{^@($REG_TAG_NAME)\s*($REG_OP)\s*($REG_STRING|$REG_NUMBER)}{}o) # @att="val" pred { my( $att, $oper, $val)= ($1, _op( $2), $3); $test .= qq{((defined \$_->{'att'}->{"$att"}) && (\$_->{'att'}->{"$att"} $oper $val))}; } elsif( $pred =~ s{^@($REG_TAG_NAME)\s*($REG_MATCH)\s*($REG_REGEXP)\s*}{}o) # @att=~/regex/ pred XXX { my( $att, $match, $regexp)= ($1, $2, $3); $test .= qq{((defined \$_->{'att'}->{"$att"}) && (\$_->{'att'}->{"$att"} $match $regexp))};; } elsif( $pred=~ s{^@($REG_TAG_NAME)\s*}{}o) # @att pred { $test .= qq{(defined \$_->{'att'}->{"$1"})}; } elsif( $pred=~ s{^\s*(?:not|!)\s*@($REG_TAG_NAME)\s*}{}o) # not @att pred { $test .= qq{((\$_->is_elt) && (not defined \$_->{'att'}->{"$1"}))}; } elsif( $pred=~ s{^\s*([()])}{}) # ( or ) (just add to the test) { $test .= qq{$1}; } elsif( $pred=~ s{^\s*(and|or)\s*}{}) { $test .= lc " $1 "; } else { $syntax_error=1; } } while( !$syntax_error && $pred); _croak_and_doublecheck_xpath( $original_exp, "error in xpath expression $original_exp at $pred") if( $pred); $step= " grep { $test } $step "; } } #warn "step: '$step'"; $sub .= "\@results= grep { \$_ } map { $step } \@results;"; } } } if( $xpath_exp) { _croak_and_doublecheck_xpath( $original_exp, "error in xpath expression $original_exp around $xpath_exp"); } $sub .= q{return XML::Twig::_unique_elts( @results); }; #warn "generated: '$sub'\n"; my $s= eval "sub { $NO_WARNINGS; $sub }"; if( $@) { _croak_and_doublecheck_xpath( $original_exp, "error in xpath expression $original_exp ($@);") } return( $s); } } sub _croak_and_doublecheck_xpath { my $xpath_expression= shift; my $mess= join( "\n", @_); if( $XML::Twig::XPath::VERSION || 0) { my $check_twig= XML::Twig::XPath->new; if( eval { $check_twig->{twig_xp}->_parse( $xpath_expression) }) { $mess .= "\nthe expression is a valid XPath statement, and you are using XML::Twig::XPath, but" . "\nyou are using either 'find_nodes' or 'get_xpath' where the method you likely wanted" . "\nto use is 'findnodes', which is the only one that uses the full XPath engine\n"; } } croak $mess; } { # extremely elaborate caching mechanism my %xpath; # xpath_expression => subroutine_code; sub get_xpath { my( $elt, $xpath_exp, $offset)= @_; my $sub= ($xpath{$xpath_exp} ||= _install_xpath( $xpath_exp)); return $sub->( $elt) unless( defined $offset); my @res= $sub->( $elt); return $res[$offset]; } } sub findvalues { my $elt= shift; return map { $_->text } $elt->get_xpath( @_); } sub findvalue { my $elt= shift; return join '', map { $_->text } $elt->get_xpath( @_); } # XML::XPath compatibility sub getElementById { return $_[0]->twig->elt_id( $_[1]); } sub getChildNodes { my @children= do { my $elt= $_[0]; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; return wantarray ? @children : \@children; } sub _flushed { return $_[0]->{flushed}; } sub _set_flushed { $_[0]->{flushed}=1; } sub _del_flushed { delete $_[0]->{flushed}; } sub cut { my $elt= shift; my( $parent, $prev_sibling, $next_sibling); $parent= $elt->{parent}; my $a= $elt->{'att'}->{'a'} || 'na'; if( ! $parent && $elt->is_elt) { # are we cutting the root? my $t= $elt->{twig}; if( $t && ! $t->{twig_parsing}) { delete $t->{twig_root}; delete $elt->{twig}; return $elt; } # cutt`ing the root else { return; } # cutting an orphan, returning $elt would break backward compatibility } # save the old links, that'll make it easier for some loops foreach my $link ( qw(parent prev_sibling next_sibling) ) { $elt->{former}->{$link}= $elt->{$link}; if( $XML::Twig::weakrefs) { weaken( $elt->{former}->{$link}); } } # if we cut the current element then its parent becomes the current elt if( $elt->{twig_current}) { my $twig_current= $elt->{parent}; $elt->twig->{twig_current}= $twig_current; $twig_current->{'twig_current'}=1; delete $elt->{'twig_current'}; } if( $parent->{first_child} && $parent->{first_child} == $elt) { $parent->{first_child}= $elt->{next_sibling}; # cutting can make the parent empty if( ! $parent->{first_child}) { $parent->{empty}= 1; } } if( $parent->{last_child} && $parent->{last_child} == $elt) { $parent->{empty}=0; $parent->{last_child}=$elt->{prev_sibling}; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; } if( $prev_sibling= $elt->{prev_sibling}) { $prev_sibling->{next_sibling}= $elt->{next_sibling}; } if( $next_sibling= $elt->{next_sibling}) { $next_sibling->{prev_sibling}=$elt->{prev_sibling}; if( $XML::Twig::weakrefs) { weaken( $next_sibling->{prev_sibling});} ; } $elt->{parent}=undef; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; $elt->{prev_sibling}=undef; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; $elt->{next_sibling}= undef; # merge 2 (now) consecutive text nodes if they are of the same type # (type can be PCDATA or CDATA) if( $prev_sibling && $next_sibling && $prev_sibling->is_text && ( $XML::Twig::index2gi[$prev_sibling->{'gi'}] eq $XML::Twig::index2gi[$next_sibling->{'gi'}])) { $prev_sibling->merge_text( $next_sibling); } return $elt; } sub former_next_sibling { return $_[0]->{former}->{next_sibling}; } sub former_prev_sibling { return $_[0]->{former}->{prev_sibling}; } sub former_parent { return $_[0]->{former}->{parent}; } sub cut_children { my( $elt, $exp)= @_; my @children= $elt->children( $exp); foreach (@children) { $_->cut; } if( ! $elt->has_children) { $elt->{empty}= 1; } return @children; } sub cut_descendants { my( $elt, $exp)= @_; my @descendants= $elt->descendants( $exp); foreach ($elt->descendants( $exp)) { $_->cut; } if( ! $elt->has_children) { $elt->{empty}= 1; } return @descendants; } sub erase { my $elt= shift; #you cannot erase the current element if( $elt->{twig_current}) { croak "trying to erase an element before it has been completely parsed"; } unless( $elt->{parent}) { # trying to erase the root (of a twig or of a cut/new element) my @children= do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; unless( @children == 1) { croak "can only erase an element with no parent if it has a single child"; } $elt->_move_extra_data_after_erase; my $child= shift @children; $child->{parent}=undef; if( $XML::Twig::weakrefs) { weaken( $child->{parent});} ; my $twig= $elt->twig; $twig->set_root( $child); } else { # normal case $elt->_move_extra_data_after_erase; my @children= do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; if( @children) { # elt has children, move them up my $first_child= $elt->{first_child}; my $prev_sibling=$elt->{prev_sibling}; if( $prev_sibling) { # connect first child to previous sibling $first_child->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $first_child->{prev_sibling});} ; $prev_sibling->{next_sibling}= $first_child; } else { # elt was the first child $elt->{parent}->set_first_child( $first_child); } my $last_child= $elt->{last_child}; my $next_sibling= $elt->{next_sibling}; if( $next_sibling) { # connect last child to next sibling $last_child->{next_sibling}= $next_sibling; $next_sibling->{prev_sibling}=$last_child; if( $XML::Twig::weakrefs) { weaken( $next_sibling->{prev_sibling});} ; } else { # elt was the last child $elt->{parent}->set_last_child( $last_child); } # update parent for all siblings foreach my $child (@children) { $child->{parent}=$elt->{parent}; if( $XML::Twig::weakrefs) { weaken( $child->{parent});} ; } # merge consecutive text elements if need be if( $prev_sibling && $prev_sibling->is_text && ($XML::Twig::index2gi[$first_child->{'gi'}] eq $XML::Twig::index2gi[$prev_sibling->{'gi'}]) ) { $prev_sibling->merge_text( $first_child); } if( $next_sibling && $next_sibling->is_text && ($XML::Twig::index2gi[$last_child->{'gi'}] eq $XML::Twig::index2gi[$next_sibling->{'gi'}]) ) { $last_child->merge_text( $next_sibling); } # if parsing and have now a PCDATA text, mark so we can normalize later on if need be if( $elt->{parent}->{twig_current} && $elt->{last_child}->is_text) { $elt->{parent}->{twig_to_be_normalized}=1; } # elt is not referenced any more, so it will be DESTROYed # so we'd better break the links to its children ## FIX undef $elt->{first_child}; undef $elt->{last_child}; undef $elt->{parent}; undef $elt->{next_sibling}; undef $elt->{prev_sibling}; } { # elt had no child, delete it $elt->delete; } } return $elt; } sub _move_extra_data_after_erase { my( $elt)= @_; # extra_data if( my $extra_data= $elt->{extra_data}) { my $target= $elt->{first_child} || $elt->{next_sibling}; if( $target) { if( $target->is( $ELT)) { $target->set_extra_data( $extra_data . ($target->extra_data || '')); } elsif( $target->is( $TEXT)) { $target->_unshift_extra_data_in_pcdata( $extra_data, 0); } # TO CHECK } else { my $parent= $elt->{parent}; # always exists or the erase cannot be performed $parent->_prefix_extra_data_before_end_tag( $extra_data); } } # extra_data_before_end_tag if( my $extra_data= $elt->{extra_data_before_end_tag}) { if( my $target= $elt->{next_sibling}) { if( $target->is( $ELT)) { $target->set_extra_data( $extra_data . ($target->extra_data || '')); } elsif( $target->is( $TEXT)) { $target->_unshift_extra_data_in_pcdata( $extra_data, 0); } } elsif( my $parent= $elt->{parent}) { $parent->_prefix_extra_data_before_end_tag( $extra_data); } } return $elt; } BEGIN { my %method= ( before => \&paste_before, after => \&paste_after, first_child => \&paste_first_child, last_child => \&paste_last_child, within => \&paste_within, ); # paste elt somewhere around ref # pos can be first_child (default), last_child, before, after or within sub paste ## no critic (Subroutines::ProhibitNestedSubs); { my $elt= shift; if( $elt->{parent}) { croak "cannot paste an element that belongs to a tree"; } my $pos; my $ref; if( ref $_[0]) { $pos= 'first_child'; croak "wrong argument order in paste, should be $_[1] first" if($_[1]); } else { $pos= shift; } if( my $method= $method{$pos}) { unless( ref( $_[0]) && isa( $_[0], 'XML::Twig::Elt')) { if( ! defined( $_[0])) { croak "missing target in paste"; } elsif( ! ref( $_[0])) { croak "wrong target type in paste (not a reference), should be XML::Twig::Elt or a subclass"; } else { my $ref= ref $_[0]; croak "wrong target type in paste: '$ref', should be XML::Twig::Elt or a subclass"; } } $ref= $_[0]; # check here so error message lists the caller file/line if( !$ref->{parent} && ($pos=~ m{^(before|after)$}) && !(exists $elt->{'target'}) && !(exists $elt->{'comment'})) { croak "cannot paste $1 root"; } $elt->$method( @_); } else { croak "tried to paste in wrong position '$pos', allowed positions " . " are 'first_child', 'last_child', 'before', 'after' and " . "'within'"; } if( (my $ids= $elt->{twig_id_list}) && (my $t= $ref->twig) ) { $t->{twig_id_list}||={}; foreach my $id (keys %$ids) { $t->{twig_id_list}->{$id}= $ids->{$id}; if( $XML::Twig::weakrefs) { weaken( $t->{twig_id_list}->{$id}); } } } return $elt; } sub paste_before { my( $elt, $ref)= @_; my( $parent, $prev_sibling, $next_sibling ); # trying to paste before an orphan (root or detached wlt) unless( $ref->{parent}) { if( my $t= $ref->twig) { if( (exists $elt->{'comment'}) || (exists $elt->{'target'})) # we can still do this { $t->_add_cpi_outside_of_root( leading_cpi => $elt); return; } else { croak "cannot paste before root"; } } else { croak "cannot paste before an orphan element"; } } $parent= $ref->{parent}; $prev_sibling= $ref->{prev_sibling}; $next_sibling= $ref; $elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; if( $parent->{first_child} == $ref) { $parent->{first_child}= $elt; } if( $prev_sibling) { $prev_sibling->{next_sibling}= $elt; } $elt->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; $next_sibling->{prev_sibling}=$elt; if( $XML::Twig::weakrefs) { weaken( $next_sibling->{prev_sibling});} ; $elt->{next_sibling}= $ref; return $elt; } sub paste_after { my( $elt, $ref)= @_; my( $parent, $prev_sibling, $next_sibling ); # trying to paste after an orphan (root or detached wlt) unless( $ref->{parent}) { if( my $t= $ref->twig) { if( (exists $elt->{'comment'}) || (exists $elt->{'target'})) # we can still do this { $t->_add_cpi_outside_of_root( trailing_cpi => $elt); return; } else { croak "cannot paste after root"; } } else { croak "cannot paste after an orphan element"; } } $parent= $ref->{parent}; $prev_sibling= $ref; $next_sibling= $ref->{next_sibling}; $elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; if( $parent->{last_child}== $ref) { $parent->{empty}=0; $parent->{last_child}=$elt; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; } $prev_sibling->{next_sibling}= $elt; $elt->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; if( $next_sibling) { $next_sibling->{prev_sibling}=$elt; if( $XML::Twig::weakrefs) { weaken( $next_sibling->{prev_sibling});} ; } $elt->{next_sibling}= $next_sibling; return $elt; } sub paste_first_child { my( $elt, $ref)= @_; my( $parent, $prev_sibling, $next_sibling ); $parent= $ref; $next_sibling= $ref->{first_child}; $elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; $parent->{first_child}= $elt; unless( $parent->{last_child}) { $parent->{empty}=0; $parent->{last_child}=$elt; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; } $elt->{prev_sibling}=undef; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; if( $next_sibling) { $next_sibling->{prev_sibling}=$elt; if( $XML::Twig::weakrefs) { weaken( $next_sibling->{prev_sibling});} ; } $elt->{next_sibling}= $next_sibling; return $elt; } sub paste_last_child { my( $elt, $ref)= @_; my( $parent, $prev_sibling, $next_sibling ); $parent= $ref; $prev_sibling= $ref->{last_child}; $elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; $parent->{empty}=0; $parent->{last_child}=$elt; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; unless( $parent->{first_child}) { $parent->{first_child}= $elt; } $elt->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; if( $prev_sibling) { $prev_sibling->{next_sibling}= $elt; } $elt->{next_sibling}= undef; return $elt; } sub paste_within { my( $elt, $ref, $offset)= @_; my $text= $ref->is_text ? $ref : $ref->next_elt( $TEXT, $ref); my $new= $text->split_at( $offset); $elt->paste_before( $new); return $elt; } } # load an element into a structure similar to XML::Simple's sub simplify { my $elt= shift; # normalize option names my %options= @_; %options= map { my ($key, $val)= ($_, $options{$_}); $key=~ s{(\w)([A-Z])}{$1_\L$2}g; $key => $val } keys %options; # check options my @allowed_options= qw( keyattr forcearray noattr content_key var var_regexp variables var_attr group_tags forcecontent normalise_space normalize_space ); my %allowed_options= map { $_ => 1 } @allowed_options; foreach my $option (keys %options) { carp "invalid option $option\n" unless( $allowed_options{$option}); } $options{normalise_space} ||= $options{normalize_space} || 0; $options{content_key} ||= 'content'; if( $options{content_key}=~ m{^-}) { # need to remove the - and to activate extra folding $options{content_key}=~ s{^-}{}; $options{extra_folding}= 1; } else { $options{extra_folding}= 0; } $options{forcearray} ||=0; if( isa( $options{forcearray}, 'ARRAY')) { my %forcearray_tags= map { $_ => 1 } @{$options{forcearray}}; $options{forcearray_tags}= \%forcearray_tags; $options{forcearray}= 0; } $options{keyattr} ||= ['name', 'key', 'id']; if( ref $options{keyattr} eq 'ARRAY') { foreach my $keyattr (@{$options{keyattr}}) { my( $prefix, $att)= ($keyattr=~ m{^([+-])?(.*)}); $prefix ||= ''; $options{key_for_all}->{$att}= 1; $options{remove_key_for_all}->{$att}=1 unless( $prefix eq '+'); $options{prefix_key_for_all}->{$att}=1 if( $prefix eq '-'); } } elsif( ref $options{keyattr} eq 'HASH') { while( my( $elt, $keyattr)= each %{$options{keyattr}}) { my( $prefix, $att)= ($keyattr=~ m{^([+-])?(.*)}); $prefix ||=''; $options{key_for_elt}->{$elt}= $att; $options{remove_key_for_elt}->{"$elt#$att"}=1 unless( $prefix); $options{prefix_key_for_elt}->{"$elt#$att"}=1 if( $prefix eq '-'); } } $options{var}||= $options{var_attr}; # for compat with XML::Simple if( $options{var}) { $options{var_values}= {}; } else { $options{var}=''; } if( $options{variables}) { $options{var}||= 1; $options{var_values}= $options{variables}; } if( $options{var_regexp} and !$options{var}) { warn "var option not used, var_regexp option ignored\n"; } $options{var_regexp} ||= '\$\{?(\w+)\}?'; $elt->_simplify( \%options); } sub _simplify { my( $elt, $options)= @_; my $data; my $gi= $XML::Twig::index2gi[$elt->{'gi'}]; my @children= do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; my %atts= $options->{noattr} || !$elt->{att} ? () : %{$elt->{att}}; my $nb_atts= keys %atts; my $nb_children= $elt->children_count + $nb_atts; my %nb_children; foreach (@children) { $nb_children{$_->tag}++; } foreach (keys %atts) { $nb_children{$_}++; } my $arrays; # tag => array where elements are stored # store children foreach my $child (@children) { if( $child->is_text) { # generate with a content key my $text= $elt->_text_with_vars( $options); if( $options->{normalise_space} >= 2) { $text= _normalize_space( $text); } if( $options->{force_content} || $nb_atts || (scalar @children > 1) ) { $data->{$options->{content_key}}= $text; } else { $data= $text; } } else { # element with sub-elements my $child_gi= $XML::Twig::index2gi[$child->{'gi'}]; my $child_data= $child->_simplify( $options); # first see if we need to simplify further the child data # simplify because of grouped tags if( my $grouped_tag= $options->{group_tags}->{$child_gi}) { # check that the child data is a hash with a single field unless( (ref( $child_data) eq 'HASH') && (keys %$child_data == 1) && defined ( my $grouped_child_data= $child_data->{$grouped_tag}) ) { croak "error in grouped tag $child_gi"; } else { $child_data= $grouped_child_data; } } # simplify because of extra folding if( $options->{extra_folding}) { if( (ref( $child_data) eq 'HASH') && (keys %$child_data == 1) && defined( my $content= $child_data->{$options->{content_key}}) ) { $child_data= $content; } } if( my $keyatt= $child->_key_attr( $options)) { # simplify element with key my $key= $child->{'att'}->{$keyatt}; if( $options->{normalise_space} >= 1) { $key= _normalize_space( $key); } $data->{$child_gi}->{$key}= $child_data; } elsif( $options->{forcearray} || $options->{forcearray_tags}->{$child_gi} || ( $nb_children{$child_gi} > 1) ) { # simplify element to store in an array $data->{$child_gi} ||= []; push @{$data->{$child_gi}}, $child_data; } else { # simplify element to store as a hash field $data->{$child_gi}= $child_data; } } } # store atts # TODO: deal with att that already have an element by that name foreach my $att (keys %atts) { # do not store if the att is a key that needs to be removed if( $options->{remove_key_for_all}->{$att} || $options->{remove_key_for_elt}->{"$gi#$att"} ) { next; } my $att_text= $options->{var} ? _replace_vars_in_text( $atts{$att}, $options) : $atts{$att} ; if( $options->{normalise_space} >= 2) { $att_text= _normalize_space( $att_text); } if( $options->{prefix_key_for_all}->{$att} || $options->{prefix_key_for_elt}->{"$gi#$att"} ) { # prefix the att $data->{"-$att"}= $att_text; } else { # normal case $data->{$att}= $att_text; } } return $data; } sub _key_attr { my( $elt, $options)=@_; return if( $options->{noattr}); if( $options->{key_for_all}) { foreach my $att ($elt->att_names) { if( $options->{key_for_all}->{$att}) { return $att; } } } elsif( $options->{key_for_elt}) { if( my $key_for_elt= $options->{key_for_elt}->{$XML::Twig::index2gi[$elt->{'gi'}]} ) { return $key_for_elt if( defined( $elt->{'att'}->{$key_for_elt})); } } return; } sub _text_with_vars { my( $elt, $options)= @_; my $text; if( $options->{var}) { $text= _replace_vars_in_text( $elt->text, $options); $elt->_store_var( $options); } else { $text= $elt->text; } return $text; } sub _normalize_space { my $text= shift; $text=~ s{\s+}{ }sg; $text=~ s{^\s}{}; $text=~ s{\s$}{}; return $text; } sub att_nb { return 0 unless( my $atts= $_[0]->{att}); return scalar keys %$atts; } sub has_no_atts { return 1 unless( my $atts= $_[0]->{att}); return scalar keys %$atts ? 0 : 1; } sub _replace_vars_in_text { my( $text, $options)= @_; $text=~ s{($options->{var_regexp})} { if( defined( my $value= $options->{var_values}->{$2})) { $value } else { warn "unknown variable $2\n"; $1 } }gex; return $text; } sub _store_var { my( $elt, $options)= @_; if( defined (my $var_name= $elt->{'att'}->{$options->{var}})) { $options->{var_values}->{$var_name}= $elt->text; } } # split a text element at a given offset sub split_at { my( $elt, $offset)= @_; my $text_elt= $elt->is_text ? $elt : $elt->first_child( $TEXT) || return ''; my $string= $text_elt->text; my $left_string= substr( $string, 0, $offset); my $right_string= substr( $string, $offset); $text_elt->{pcdata}= (delete $text_elt->{empty} || 1) && $left_string; my $new_elt= $elt->new( $XML::Twig::index2gi[$elt->{'gi'}], $right_string); $new_elt->paste( after => $elt); return $new_elt; } # split an element or its text descendants into several, in place # all elements (new and untouched) are returned sub split { my $elt= shift; my @text_chunks; my @result; if( $elt->is_text) { @text_chunks= ($elt); } else { @text_chunks= $elt->descendants( $TEXT); } foreach my $text_chunk (@text_chunks) { push @result, $text_chunk->_split( 1, @_); } return @result; } # split an element or its text descendants into several, in place # created elements (those which match the regexp) are returned sub mark { my $elt= shift; my @text_chunks; my @result; if( $elt->is_text) { @text_chunks= ($elt); } else { @text_chunks= $elt->descendants( $TEXT); } foreach my $text_chunk (@text_chunks) { push @result, $text_chunk->_split( 0, @_); } return @result; } # split a single text element # return_all defines what is returned: if it is true # only returns the elements created by matches in the split regexp # otherwise all elements (new and untouched) are returned { sub _split { my $elt= shift; my $return_all= shift; my $regexp= shift; my @tags; while( @_) { my $tag= shift(); if( ref $_[0]) { push @tags, { tag => $tag, atts => shift }; } else { push @tags, { tag => $tag }; } } unless( @tags) { @tags= { tag => $elt->{parent}->gi }; } my @result; # the returned list of elements my $text= $elt->text; my $gi= $XML::Twig::index2gi[$elt->{'gi'}]; # 2 uses: if split matches then the first substring reuses $elt # once a split has occurred then the last match needs to be put in # a new element my $previous_match= 0; while( my( $pre_match, @matches)= $text=~ /^(.*?)$regexp(.*)$/gcs) { $text= pop @matches; if( $previous_match) { # match, not the first one, create a new text ($gi) element _utf8_ify( $pre_match) if( $] < 5.010); $elt= $elt->insert_new_elt( after => $gi, $pre_match); push @result, $elt if( $return_all); } else { # first match in $elt, re-use $elt for the first sub-string _utf8_ify( $pre_match) if( $] < 5.010); $elt->set_text( $pre_match); $previous_match++; # store the fact that there was a match push @result, $elt if( $return_all); } # now deal with matches captured in the regexp if( @matches) { # match, with capture my $i=0; foreach my $match (@matches) { # create new element, text is the match _utf8_ify( $match) if( $] < 5.010); my $tag = _repl_match( $tags[$i]->{tag}, @matches) || '#PCDATA'; my $atts = \%{$tags[$i]->{atts}} || {}; my %atts= map { _repl_match( $_, @matches) => _repl_match( $atts->{$_}, @matches) } keys %$atts; $elt= $elt->insert_new_elt( after => $tag, \%atts, $match); push @result, $elt; $i= ($i + 1) % @tags; } } else { # match, no captures my $tag = $tags[0]->{tag}; my $atts = \%{$tags[0]->{atts}} || {}; $elt= $elt->insert_new_elt( after => $tag, $atts); push @result, $elt; } } if( $previous_match && $text) { # there was at least 1 match, and there is text left after the match $elt= $elt->insert_new_elt( after => $gi, $text); } push @result, $elt if( $return_all); return @result; # return all elements } sub _repl_match { my( $val, @matches)= @_; $val=~ s{\$(\d+)}{$matches[$1-1]}g; return $val; } # evil hack needed as sometimes my $encode_is_loaded=0; # so we only load Encode once sub _utf8_ify { if( $perl_version >= 5.008 and $perl_version < 5.010 and !_keep_encoding()) { unless( $encode_is_loaded) { require Encode; import Encode; $encode_is_loaded++; } Encode::_utf8_on( $_[0]); # the flag should be set but is not } } } { my %replace_sub; # cache for complex expressions (expression => sub) sub subs_text { my( $elt, $regexp, $replace)= @_; my $replacement_string; my $is_string= _is_string( $replace); my @parents; foreach my $text_elt ($elt->descendants_or_self( $TEXT)) { if( $is_string) { my $text= $text_elt->text; $text=~ s{$regexp}{ _replace_var( $replace, $1, $2, $3, $4, $5, $6, $7, $8, $9)}egx; $text_elt->set_text( $text); } else { no utf8; # = perl 5.6 my $replace_sub= ( $replace_sub{$replace} ||= _install_replace_sub( $replace)); my $text= $text_elt->text; my $pos=0; # used to skip text that was previously matched my $found_hit; while( my( $pre_match_string, $match_string, @var)= ($text=~ m{(.*?)($regexp)}sg)) { $found_hit=1; my $match_start = length( $pre_match_string); my $match = $match_start ? $text_elt->split_at( $match_start + $pos) : $text_elt; my $match_length = length( $match_string); my $post_match = $match->split_at( $match_length); $replace_sub->( $match, @var); # go to next $text_elt= $post_match; $text= $post_match->text; if( $found_hit) { push @parents, $text_elt->{parent} unless $parents[-1] && $parents[-1]== $text_elt->{parent}; } } } } foreach my $parent (@parents) { $parent->normalize; } return $elt; } sub _is_string { return ($_[0]=~ m{&e[ln]t}) ? 0: 1 } sub _replace_var { my( $string, @var)= @_; unshift @var, undef; $string=~ s{\$(\d)}{$var[$1]}g; return $string; } sub _install_replace_sub { my $replace_exp= shift; my @item= split m{(&e[ln]t\s*\([^)]*\))}, $replace_exp; my $sub= q{ my( $match, @var)= @_; my $new; my $last_inserted=$match;}; my( $gi, $exp); foreach my $item (@item) { next if ! length $item; if( $item=~ m{^&elt\s*\(([^)]*)\)}) { $exp= $1; } elsif( $item=~ m{^&ent\s*\(\s*([^\s)]*)\s*\)}) { $exp= " '#ENT' => $1"; } else { $exp= qq{ '#PCDATA' => "$item"}; } $exp=~ s{\$(\d)}{my $i= $1-1; "\$var[$i]"}eg; # replace references to matches $sub.= qq{ \$new= \$match->new( $exp); }; $sub .= q{ $new->paste( after => $last_inserted); $last_inserted=$new;}; } $sub .= q{ $match->delete; }; #$sub=~ s/;/;\n/g; warn "subs: $sub"; my $coderef= eval "sub { $NO_WARNINGS; $sub }"; if( $@) { croak( "invalid replacement expression $replace_exp: ",$@); } return $coderef; } } sub merge_text { my( $e1, $e2)= @_; croak "invalid merge: can only merge 2 elements" unless( isa( $e2, 'XML::Twig::Elt')); croak "invalid merge: can only merge 2 text elements" unless( $e1->is_text && $e2->is_text && ($e1->gi eq $e2->gi)); my $t1_length= length( $e1->text); $e1->set_text( $e1->text . $e2->text); if( my $extra_data_in_pcdata= $e2->_extra_data_in_pcdata) { foreach my $data (@$extra_data_in_pcdata) { $e1->_push_extra_data_in_pcdata( $data->{text}, $data->{offset} + $t1_length); } } $e2->delete; return $e1; } sub merge { my( $e1, $e2)= @_; my @e2_children= $e2->_children; if( $e1->_last_child && $e1->_last_child->is_pcdata && @e2_children && $e2_children[0]->is_pcdata ) { my $t1_length= length( $e1->_last_child->{pcdata}); my $child1= $e1->_last_child; my $child2= shift @e2_children; $child1->{pcdata} .= $child2->{pcdata}; my $extra_data= $e1->_extra_data_before_end_tag . $e2->extra_data; if( $extra_data) { $e1->_del_extra_data_before_end_tag; $child1->_push_extra_data_in_pcdata( $extra_data, $t1_length); } if( my $extra_data_in_pcdata= $child2->_extra_data_in_pcdata) { foreach my $data (@$extra_data_in_pcdata) { $child1->_push_extra_data_in_pcdata( $data->{text}, $data->{offset} + $t1_length); } } if( my $extra_data_before_end_tag= $e2->_extra_data_before_end_tag) { $e1->_set_extra_data_before_end_tag( $extra_data_before_end_tag); } } foreach my $e (@e2_children) { $e->move( last_child => $e1); } $e2->delete; return $e1; } # recursively copy an element and returns the copy (can be huge and long) sub copy { my $elt= shift; my $copy= $elt->new( $XML::Twig::index2gi[$elt->{'gi'}]); if( $elt->extra_data) { $copy->set_extra_data( $elt->extra_data); } if( $elt->{extra_data_before_end_tag}) { $copy->_set_extra_data_before_end_tag( $elt->{extra_data_before_end_tag}); } if( $elt->is_asis) { $copy->set_asis; } if( (exists $elt->{'pcdata'})) { $copy->{pcdata}= (delete $copy->{empty} || 1) && $elt->{pcdata}; if( $elt->{extra_data_in_pcdata}) { $copy->_set_extra_data_in_pcdata( $elt->{extra_data_in_pcdata}); } } elsif( (exists $elt->{'cdata'})) { $copy->_set_cdata( $elt->{cdata}); if( $elt->{extra_data_in_pcdata}) { $copy->_set_extra_data_in_pcdata( $elt->{extra_data_in_pcdata}); } } elsif( (exists $elt->{'target'})) { $copy->_set_pi( $elt->{target}, $elt->{data}); } elsif( (exists $elt->{'comment'})) { $copy->_set_comment( $elt->{comment}); } elsif( (exists $elt->{'ent'})) { $copy->{ent}= $elt->{ent}; } else { my @children= do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; if( my $atts= $elt->{att}) { my %atts; tie %atts, 'Tie::IxHash' if (keep_atts_order()); %atts= %{$atts}; # we want to do a real copy of the attributes $copy->set_atts( \%atts); } foreach my $child (@children) { my $child_copy= $child->copy; $child_copy->paste( 'last_child', $copy); } } # save links to the original location, which can be convenient and is used for namespace resolution foreach my $link ( qw(parent prev_sibling next_sibling) ) { $copy->{former}->{$link}= $elt->{$link}; if( $XML::Twig::weakrefs) { weaken( $copy->{former}->{$link}); } } $copy->{empty}= $elt->{'empty'}; return $copy; } sub delete { my $elt= shift; $elt->cut; $elt->DESTROY unless $XML::Twig::weakrefs; return undef; } sub __destroy { my $elt= shift; return if( $XML::Twig::weakrefs); my $t= shift || $elt->twig; # optional argument, passed in recursive calls foreach( @{[$elt->_children]}) { $_->DESTROY( $t); } # the id reference needs to be destroyed # lots of tests to avoid warnings during the cleanup phase $elt->del_id( $t) if( $ID && $t && defined( $elt->{att}) && exists( $elt->{att}->{$ID})); if( $elt->{former}) { foreach (keys %{$elt->{former}}) { delete $elt->{former}->{$_}; } delete $elt->{former}; } foreach (qw( keys %$elt)) { delete $elt->{$_}; } undef $elt; } BEGIN { sub set_destroy { if( $XML::Twig::weakrefs) { undef *DESTROY } else { *DESTROY= *__destroy; } } set_destroy(); } # ignores the element sub ignore { my $elt= shift; my $t= $elt->twig; $t->ignore( $elt, @_); } BEGIN { my $pretty = 0; my $quote = '"'; my $INDENT = ' '; my $empty_tag_style = 0; my $remove_cdata = 0; my $keep_encoding = 0; my $expand_external_entities = 0; my $keep_atts_order = 0; my $do_not_escape_amp_in_atts = 0; my $WRAP = '80'; my $REPLACED_ENTS = qq{&<}; my ($NSGMLS, $NICE, $INDENTED, $INDENTEDCT, $INDENTEDC, $WRAPPED, $RECORD1, $RECORD2, $INDENTEDA)= (1..9); my %KEEP_TEXT_TAG_ON_ONE_LINE= map { $_ => 1 } ( $INDENTED, $INDENTEDCT, $INDENTEDC, $INDENTEDA, $WRAPPED); my %WRAPPED = map { $_ => 1 } ( $WRAPPED, $INDENTEDA, $INDENTEDC); my %pretty_print_style= ( none => 0, # no added \n nsgmls => $NSGMLS, # nsgmls-style, \n in tags # below this line styles are UNSAFE (the generated XML can be well-formed but invalid) nice => $NICE, # \n after open/close tags except when the # element starts with text indented => $INDENTED, # nice plus idented indented_close_tag => $INDENTEDCT, # nice plus idented indented_c => $INDENTEDC, # slightly more compact than indented (closing # tags are on the same line) wrapped => $WRAPPED, # text is wrapped at column record_c => $RECORD1, # for record-like data (compact) record => $RECORD2, # for record-like data (not so compact) indented_a => $INDENTEDA, # nice, indented, and with attributes on separate # lines as the nsgmls style, as well as wrapped # lines - to make the xml friendly to line-oriented tools cvs => $INDENTEDA, # alias for indented_a ); my ($HTML, $EXPAND)= (1..2); my %empty_tag_style= ( normal => 0, # html => $HTML, # xhtml => $HTML, # expand => $EXPAND, # ); my %quote_style= ( double => '"', single => "'", # smart => "smart", ); my $xml_space_preserve; # set when an element includes xml:space="preserve" my $output_filter; # filters the entire output (including < and >) my $output_text_filter; # filters only the text part (tag names, attributes, pcdata) my $replaced_ents= $REPLACED_ENTS; # returns those pesky "global" variables so you can switch between twigs sub global_state ## no critic (Subroutines::ProhibitNestedSubs); { return { pretty => $pretty, quote => $quote, indent => $INDENT, empty_tag_style => $empty_tag_style, remove_cdata => $remove_cdata, keep_encoding => $keep_encoding, expand_external_entities => $expand_external_entities, output_filter => $output_filter, output_text_filter => $output_text_filter, keep_atts_order => $keep_atts_order, do_not_escape_amp_in_atts => $do_not_escape_amp_in_atts, wrap => $WRAP, replaced_ents => $replaced_ents, }; } # restores the global variables sub set_global_state { my $state= shift; $pretty = $state->{pretty}; $quote = $state->{quote}; $INDENT = $state->{indent}; $empty_tag_style = $state->{empty_tag_style}; $remove_cdata = $state->{remove_cdata}; $keep_encoding = $state->{keep_encoding}; $expand_external_entities = $state->{expand_external_entities}; $output_filter = $state->{output_filter}; $output_text_filter = $state->{output_text_filter}; $keep_atts_order = $state->{keep_atts_order}; $do_not_escape_amp_in_atts = $state->{do_not_escape_amp_in_atts}; $WRAP = $state->{wrap}; $replaced_ents = $state->{replaced_ents}, } # sets global state to defaults sub init_global_state { set_global_state( { pretty => 0, quote => '"', indent => $INDENT, empty_tag_style => 0, remove_cdata => 0, keep_encoding => 0, expand_external_entities => 0, output_filter => undef, output_text_filter => undef, keep_atts_order => undef, do_not_escape_amp_in_atts => 0, wrap => $WRAP, replaced_ents => $REPLACED_ENTS, }); } # set the pretty_print style (in $pretty) and returns the old one # can be called from outside the package with 2 arguments (elt, style) # or from inside with only one argument (style) # the style can be either a string (one of the keys of %pretty_print_style # or a number (presumably an old value saved) sub set_pretty_print { my $style= lc( defined $_[1] ? $_[1] : $_[0]); # so we cover both cases my $old_pretty= $pretty; if( $style=~ /^\d+$/) { croak "invalid pretty print style $style" unless( $style < keys %pretty_print_style); $pretty= $style; } else { croak "invalid pretty print style '$style'" unless( exists $pretty_print_style{$style}); $pretty= $pretty_print_style{$style}; } if( $WRAPPED{$pretty} ) { XML::Twig::_use( 'Text::Wrap') or croak( "Text::Wrap not available, cannot use style $style"); } return $old_pretty; } sub _pretty_print { return $pretty; } # set the empty tag style (in $empty_tag_style) and returns the old one # can be called from outside the package with 2 arguments (elt, style) # or from inside with only one argument (style) # the style can be either a string (one of the keys of %empty_tag_style # or a number (presumably an old value saved) sub set_empty_tag_style { my $style= lc( defined $_[1] ? $_[1] : $_[0]); # so we cover both cases my $old_style= $empty_tag_style; if( $style=~ /^\d+$/) { croak "invalid empty tag style $style" unless( $style < keys %empty_tag_style); $empty_tag_style= $style; } else { croak "invalid empty tag style '$style'" unless( exists $empty_tag_style{$style}); $empty_tag_style= $empty_tag_style{$style}; } return $old_style; } sub _pretty_print_styles { return (sort { $pretty_print_style{$a} <=> $pretty_print_style{$b} || $a cmp $b } keys %pretty_print_style); } sub set_quote { my $style= $_[1] || $_[0]; my $old_quote= $quote; croak "invalid quote '$style'" unless( exists $quote_style{$style}); $quote= $quote_style{$style}; return $old_quote; } sub set_remove_cdata { my $new_value= defined $_[1] ? $_[1] : $_[0]; my $old_value= $remove_cdata; $remove_cdata= $new_value; return $old_value; } sub set_indent { my $new_value= defined $_[1] ? $_[1] : $_[0]; my $old_value= $INDENT; $INDENT= $new_value; return $old_value; } sub set_wrap { my $new_value= defined $_[1] ? $_[1] : $_[0]; my $old_value= $WRAP; $WRAP= $new_value; return $old_value; } sub set_keep_encoding { my $new_value= defined $_[1] ? $_[1] : $_[0]; my $old_value= $keep_encoding; $keep_encoding= $new_value; return $old_value; } sub set_replaced_ents { my $new_value= defined $_[1] ? $_[1] : $_[0]; my $old_value= $replaced_ents; $replaced_ents= $new_value; return $old_value; } sub do_not_escape_gt { my $old_value= $replaced_ents; $replaced_ents= q{&<}; # & needs to be first return $old_value; } sub escape_gt { my $old_value= $replaced_ents; $replaced_ents= qq{&<>}; # & needs to be first return $old_value; } sub _keep_encoding { return $keep_encoding; } # so I can use elsewhere in the module sub set_do_not_escape_amp_in_atts { my $new_value= defined $_[1] ? $_[1] : $_[0]; my $old_value= $do_not_escape_amp_in_atts; $do_not_escape_amp_in_atts= $new_value; return $old_value; } sub output_filter { return $output_filter; } sub output_text_filter { return $output_text_filter; } sub set_output_filter { my $new_value= defined $_[1] ? $_[1] : $_[0]; # can be called in object/non-object mode # if called in object mode with no argument, the filter is undefined if( isa( $new_value, 'XML::Twig::Elt') || isa( $new_value, 'XML::Twig')) { undef $new_value; } my $old_value= $output_filter; if( !$new_value || isa( $new_value, 'CODE') ) { $output_filter= $new_value; } elsif( $new_value eq 'latin1') { $output_filter= XML::Twig::latin1(); } elsif( $XML::Twig::filter{$new_value}) { $output_filter= $XML::Twig::filter{$new_value}; } else { croak "invalid output filter '$new_value'"; } return $old_value; } sub set_output_text_filter { my $new_value= defined $_[1] ? $_[1] : $_[0]; # can be called in object/non-object mode # if called in object mode with no argument, the filter is undefined if( isa( $new_value, 'XML::Twig::Elt') || isa( $new_value, 'XML::Twig')) { undef $new_value; } my $old_value= $output_text_filter; if( !$new_value || isa( $new_value, 'CODE') ) { $output_text_filter= $new_value; } elsif( $new_value eq 'latin1') { $output_text_filter= XML::Twig::latin1(); } elsif( $XML::Twig::filter{$new_value}) { $output_text_filter= $XML::Twig::filter{$new_value}; } else { croak "invalid output text filter '$new_value'"; } return $old_value; } sub set_expand_external_entities { my $new_value= defined $_[1] ? $_[1] : $_[0]; my $old_value= $expand_external_entities; $expand_external_entities= $new_value; return $old_value; } sub set_keep_atts_order { my $new_value= defined $_[1] ? $_[1] : $_[0]; my $old_value= $keep_atts_order; $keep_atts_order= $new_value; return $old_value; } sub keep_atts_order { return $keep_atts_order; } # so I can use elsewhere in the module my %html_empty_elt; BEGIN { %html_empty_elt= map { $_ => 1} qw( base meta link hr br param img area input col); } sub start_tag { my( $elt, $option)= @_; return if( $elt->{gi} < $XML::Twig::SPECIAL_GI); my $extra_data= $elt->{extra_data} || ''; my $gi= $XML::Twig::index2gi[$elt->{'gi'}]; my $att= $elt->{att}; # should be $elt->{att}, optimized into a pure hash look-up my $ns_map= $att ? $att->{'#original_gi'} : ''; if( $ns_map) { $gi= _restore_original_prefix( $ns_map, $gi); } $gi=~ s{^#default:}{}; # remove default prefix if( $output_text_filter) { $gi= $output_text_filter->( $gi); } # get the attribute and their values my $att_sep = $pretty==$NSGMLS ? "\n" : $pretty==$INDENTEDA ? "\n" . $INDENT x ($elt->level+1) . ' ' : ' ' ; my $replace_in_att_value= $replaced_ents . "$quote\t\r\n"; if( $option->{escape_gt} && $replaced_ents !~ m{>}) { $replace_in_att_value.= '>'; } my $tag; my @att_names= grep { !( $_=~ m{^#(?!default:)} ) } $keep_atts_order ? keys %{$att} : sort keys %{$att}; if( @att_names) { my $atts= join $att_sep, map { my $output_att_name= $ns_map ? _restore_original_prefix( $ns_map, $_) : $_; if( $output_text_filter) { $output_att_name= $output_text_filter->( $output_att_name); } $output_att_name . '=' . $quote . _att_xml_string( $att->{$_}, $replace_in_att_value) . $quote } @att_names ; if( $pretty==$INDENTEDA && @att_names == 1) { $att_sep= ' '; } $tag= "<$gi$att_sep$atts"; } else { $tag= "<$gi"; } $tag .= "\n" if($pretty==$NSGMLS); # force empty if suitable HTML tag, otherwise use the value from the input tree if( ($empty_tag_style eq $HTML) && !$elt->{first_child} && !$elt->{extra_data_before_end_tag} && $html_empty_elt{$gi}) { $elt->{empty}= 1; } my $empty= defined $elt->{empty} ? $elt->{empty} : $elt->{first_child} ? 0 : 1; $tag .= (!$elt->{empty} || $elt->{extra_data_before_end_tag}) ? '>' # element has content : (($empty_tag_style eq $HTML) && $html_empty_elt{$gi}) ? ' />' # html empty element # cvs-friendly format : ( $pretty == $INDENTEDA && @att_names > 1) ? "\n" . $INDENT x $elt->level . "/>" : ( $pretty == $INDENTEDA && @att_names == 1) ? " />" : $empty_tag_style ? ">{'gi'}] . ">" # $empty_tag_style is $HTML or $EXPAND : '/>' ; if( ( (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 1) eq '#') && (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 9) ne '#default:') )) { $tag= ''; } #warn "TRACE: ", $tag,": ", Encode::is_utf8( $tag) ? "has flag" : "FLAG NOT SET"; unless( $pretty) { return defined( $extra_data) ? $extra_data . $tag : $tag; } my $prefix=''; my $return=''; # '' or \n is to be printed before the tag my $indent=0; # number of indents before the tag if( $pretty==$RECORD1) { my $level= $elt->level; $return= "\n" if( $level < 2); $indent= 1 if( $level == 1); } elsif( $pretty==$RECORD2) { $return= "\n"; $indent= $elt->level; } elsif( $pretty==$NICE) { my $parent= $elt->{parent}; unless( !$parent || $parent->{contains_text}) { $return= "\n"; } $elt->{contains_text}= 1 if( ($parent && $parent->{contains_text}) || $elt->contains_text); } elsif( $KEEP_TEXT_TAG_ON_ONE_LINE{$pretty}) { my $parent= $elt->{parent}; unless( !$parent || $parent->{contains_text}) { $return= "\n"; $indent= $elt->level; } $elt->{contains_text}= 1 if( ($parent && $parent->{contains_text}) || $elt->contains_text); } if( $return || $indent) { # check for elements in which spaces should be kept my $t= $elt->twig; return $extra_data . $tag if( $xml_space_preserve); if( $t && $t->{twig_keep_spaces_in}) { foreach my $ancestor ($elt->ancestors) { return $extra_data . $tag if( $t->{twig_keep_spaces_in}->{$XML::Twig::index2gi[$ancestor->{'gi'}]}) } } $prefix= $INDENT x $indent; if( $extra_data) { $extra_data=~ s{\s+$}{}; $extra_data=~ s{^\s+}{}; $extra_data= $prefix . $extra_data . $return; } } return $return . $extra_data . $prefix . $tag; } sub end_tag { my $elt= shift; return '' if( ($elt->{gi}<$XML::Twig::SPECIAL_GI) || ($elt->{'empty'} && !$elt->{extra_data_before_end_tag}) ); my $tag= "<"; my $gi= $XML::Twig::index2gi[$elt->{'gi'}]; if( my $map= $elt->{'att'}->{'#original_gi'}) { $gi= _restore_original_prefix( $map, $gi); } $gi=~ s{^#default:}{}; # remove default prefix if( $output_text_filter) { $gi= $output_text_filter->( $XML::Twig::index2gi[$elt->{'gi'}]); } $tag .= "/$gi>"; $tag = ($elt->{extra_data_before_end_tag} || '') . $tag; if( ( (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 1) eq '#') && (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 9) ne '#default:') )) { $tag= ''; } return $tag unless $pretty; my $prefix=''; my $return=0; # 1 if a \n is to be printed before the tag my $indent=0; # number of indents before the tag if( $pretty==$RECORD1) { $return= 1 if( $elt->level == 0); } elsif( $pretty==$RECORD2) { unless( $elt->contains_text) { $return= 1 ; $indent= $elt->level; } } elsif( $pretty==$NICE) { my $parent= $elt->{parent}; if( ( ($parent && !$parent->{contains_text}) || !$parent ) && ( !$elt->{contains_text} && ($elt->{has_flushed_child} || $elt->{first_child}) ) ) { $return= 1; } } elsif( $KEEP_TEXT_TAG_ON_ONE_LINE{$pretty}) { my $parent= $elt->{parent}; if( ( ($parent && !$parent->{contains_text}) || !$parent ) && ( !$elt->{contains_text} && ($elt->{has_flushed_child} || $elt->{first_child}) ) ) { $return= 1; $indent= $elt->level; } } if( $return || $indent) { # check for elements in which spaces should be kept my $t= $elt->twig; return $tag if( $xml_space_preserve); if( $t && $t->{twig_keep_spaces_in}) { foreach my $ancestor ($elt, $elt->ancestors) { return $tag if( $t->{twig_keep_spaces_in}->{$XML::Twig::index2gi[$ancestor->{'gi'}]}) } } if( $return) { $prefix= ($pretty== $INDENTEDCT) ? "\n$INDENT" : "\n"; } $prefix.= $INDENT x $indent; } # add a \n at the end of the document (after the root element) $tag .= "\n" unless( $elt->{parent}); return $prefix . $tag; } sub _restore_original_prefix { my( $map, $name)= @_; my $prefix= _ns_prefix( $name); if( my $original_prefix= $map->{$prefix}) { if( $original_prefix eq '#default') { $name=~ s{^$prefix:}{}; } else { $name=~ s{^$prefix(?=:)}{$original_prefix}; } } return $name; } # buffer used to hold the text to print/sprint, to avoid passing it back and forth between methods my @sprint; # $elt is an element to print # $fh is an optional filehandle to print to # $pretty is an optional value, if true a \n is printed after the < of the # opening tag sub print { my $elt= shift; my $fh= isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar') ? shift : undef; my $old_select= defined $fh ? select $fh : undef; print $elt->sprint( @_); select $old_select if( defined $old_select); } # those next 2 methods need to be refactored, they are copies of the same methods in XML::Twig sub print_to_file { my( $elt, $filename)= (shift, shift); my $out_fh; # open( $out_fh, ">$filename") or _croak( "cannot create file $filename: $!"); # < perl 5.8 my $mode= $keep_encoding ? '>' : '>:utf8'; # >= perl 5.8 open( $out_fh, $mode, $filename) or _croak( "cannot create file $filename: $!"); # >= perl 5.8 $elt->print( $out_fh, @_); close $out_fh; return $elt; } # probably only works on *nix (at least the chmod bit) # first print to a temporary file, then rename that file to the desired file name, then change permissions # to the original file permissions (or to the current umask) sub safe_print_to_file { my( $elt, $filename)= (shift, shift); my $perm= -f $filename ? (stat $filename)[2] & 07777 : ~umask() ; XML::Twig::_use( 'File::Temp') || croak "need File::Temp to use safe_print_to_file\n"; XML::Twig::_use( 'File::Basename') || croak "need File::Basename to use safe_print_to_file\n"; my $tmpdir= File::Basename::dirname( $filename); my( $fh, $tmpfilename) = File::Temp::tempfile( DIR => $tmpdir); $elt->print_to_file( $tmpfilename, @_); rename( $tmpfilename, $filename) or unlink $tmpfilename && _croak( "cannot move temporary file to $filename: $!"); chmod $perm, $filename; return $elt; } # same as print but does not output the start tag if the element # is marked as flushed sub flush { my $elt= shift; my $up_to= $_[0] && isa( $_[0], 'XML::Twig::Elt') ? shift : $elt; $elt->twig->flush_up_to( $up_to, @_); } sub purge { my $elt= shift; my $up_to= $_[0] && isa( $_[0], 'XML::Twig::Elt') ? shift : $elt; $elt->twig->purge_up_to( $up_to, @_); } sub _flush { my $elt= shift; my $pretty; my $fh= isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar') ? shift : undef; my $old_select= defined $fh ? select $fh : undef; my $old_pretty= defined ($pretty= shift) ? set_pretty_print( $pretty) : undef; $xml_space_preserve= 1 if( ($elt->inherit_att( 'xml:space') || '') eq 'preserve'); $elt->__flush(); $xml_space_preserve= 0; select $old_select if( defined $old_select); set_pretty_print( $old_pretty) if( defined $old_pretty); } sub __flush { my $elt= shift; if( $elt->{gi} >= $XML::Twig::SPECIAL_GI) { my $preserve= ($elt->{'att'}->{'xml:space'} || '') eq 'preserve'; $xml_space_preserve++ if $preserve; unless( $elt->_flushed) { print $elt->start_tag(); } # flush the children my @children= do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; foreach my $child (@children) { $child->_flush( $pretty); } unless( $elt->{end_tag_flushed}) { print $elt->end_tag; } $xml_space_preserve-- if $preserve; # used for pretty printing if( my $parent= $elt->{parent}) { $parent->{has_flushed_child}= 1; } } else # text or special element { my $text; if( (exists $elt->{'pcdata'})) { $text= $elt->pcdata_xml_string; if( my $parent= $elt->{parent}) { $parent->{contains_text}= 1; } } elsif( (exists $elt->{'cdata'})) { $text= $elt->cdata_string; if( my $parent= $elt->{parent}) { $parent->{contains_text}= 1; } } elsif( (exists $elt->{'target'})) { $text= $elt->pi_string; } elsif( (exists $elt->{'comment'})) { $text= $elt->comment_string; } elsif( (exists $elt->{'ent'})) { $text= $elt->ent_string; } print $output_filter ? $output_filter->( $text) : $text; } } sub xml_text { my( $elt, @options)= @_; if( @options && grep { lc( $_) eq 'no_recurse' } @options) { return $elt->xml_text_only; } my $string=''; if( ($elt->{gi} >= $XML::Twig::SPECIAL_GI) ) { # sprint the children my $child= $elt->{first_child} || ''; while( $child) { $string.= $child->xml_text; } continue { $child= $child->{next_sibling}; } } elsif( (exists $elt->{'pcdata'})) { $string .= $output_filter ? $output_filter->($elt->pcdata_xml_string) : $elt->pcdata_xml_string; } elsif( (exists $elt->{'cdata'})) { $string .= $output_filter ? $output_filter->($elt->cdata_string) : $elt->cdata_string; } elsif( (exists $elt->{'ent'})) { $string .= $elt->ent_string; } return $string; } sub xml_text_only { return join '', map { $_->xml_text if( $_->is_text || (exists $_->{'ent'})) } $_[0]->_children; } # same as print but except... it does not print but rather returns the string # if the second parameter is set then only the content is returned, not the # start and end tags of the element (but the tags of the included elements are # returned) sub sprint { my $elt= shift; my( $old_pretty, $old_empty_tag_style); if( $_[0] && isa( $_[0], 'HASH')) { my %args= XML::Twig::_normalize_args( %{shift()}); if( defined $args{PrettyPrint}) { $old_pretty = set_pretty_print( $args{PrettyPrint}); } if( defined $args{EmptyTags}) { $old_empty_tag_style = set_empty_tag_style( $args{EmptyTags}); } } $xml_space_preserve= 1 if( ($elt->inherit_att( 'xml:space') || '') eq 'preserve'); @sprint=(); $elt->_sprint( @_); my $sprint= join( '', @sprint); if( $output_filter) { $sprint= $output_filter->( $sprint); } if( ( ($pretty== $WRAPPED) || ($pretty==$INDENTEDC)) && !$xml_space_preserve) { $sprint= _wrap_text( $sprint); } $xml_space_preserve= 0; if( defined $old_pretty) { set_pretty_print( $old_pretty); } if( defined $old_empty_tag_style) { set_empty_tag_style( $old_empty_tag_style); } return $sprint; } sub _wrap_text { my( $string)= @_; my $wrapped; foreach my $line (split /\n/, $string) { my( $initial_indent)= $line=~ m{^(\s*)}; my $wrapped_line= Text::Wrap::wrap( '', $initial_indent . $INDENT, $line) . "\n"; # fix glitch with Text::wrap when the first line is long and does not include spaces # the first line ends up being too short by 2 chars, but we'll have to live with it! $wrapped_line=~ s{^ +\n }{}s; # this prefix needs to be removed $wrapped .= $wrapped_line; } return $wrapped; } sub _sprint { my $elt= shift; my $no_tag= shift || 0; # in case there's some comments or PI's piggybacking if( $elt->{gi} >= $XML::Twig::SPECIAL_GI) { my $preserve= ($elt->{'att'}->{'xml:space'} || '') eq 'preserve'; $xml_space_preserve++ if $preserve; push @sprint, $elt->start_tag unless( $no_tag); # sprint the children my $child= $elt->{first_child}; while( $child) { $child->_sprint; $child= $child->{next_sibling}; } push @sprint, $elt->end_tag unless( $no_tag); $xml_space_preserve-- if $preserve; } else { push @sprint, $elt->{extra_data} if( $elt->{extra_data}) ; if( (exists $elt->{'pcdata'})) { push @sprint, $elt->pcdata_xml_string; } elsif( (exists $elt->{'cdata'})) { push @sprint, $elt->cdata_string; } elsif( (exists $elt->{'target'})) { if( ($pretty >= $INDENTED) && !$elt->{parent}->{contains_text}) { push @sprint, "\n" . $INDENT x $elt->level; } push @sprint, $elt->pi_string; } elsif( (exists $elt->{'comment'})) { if( ($pretty >= $INDENTED) && !$elt->{parent}->{contains_text}) { push @sprint, "\n" . $INDENT x $elt->level; } push @sprint, $elt->comment_string; } elsif( (exists $elt->{'ent'})) { push @sprint, $elt->ent_string; } } return; } # just a shortcut to $elt->sprint( 1) sub xml_string { my $elt= shift; isa( $_[0], 'HASH') ? $elt->sprint( shift(), 1) : $elt->sprint( 1); } sub pcdata_xml_string { my $elt= shift; if( defined( my $string= $elt->{pcdata}) ) { if( ! $elt->{extra_data_in_pcdata}) { $string=~ s/([$replaced_ents])/$XML::Twig::base_ent{$1}/g unless( !$replaced_ents || $keep_encoding || $elt->{asis}); $string=~ s{\Q]]>}{]]>}g; } else { _gen_mark( $string); # used by _(un)?protect_extra_data foreach my $data (reverse @{$elt->{extra_data_in_pcdata}}) { my $substr= substr( $string, $data->{offset}); if( $keep_encoding || $elt->{asis}) { substr( $string, $data->{offset}, 0, $data->{text}); } else { substr( $string, $data->{offset}, 0, _protect_extra_data( $data->{text})); } } unless( $keep_encoding || $elt->{asis}) { $string=~ s{([$replaced_ents])}{$XML::Twig::base_ent{$1}}g ; $string=~ s{\Q]]>}{]]>}g; _unprotect_extra_data( $string); } } return $output_text_filter ? $output_text_filter->( $string) : $string; } else { return ''; } } { my $mark; my( %char2ent, %ent2char); BEGIN { %char2ent= ( '<' => 'lt', '&' => 'amp', '>' => 'gt'); %ent2char= map { $char2ent{$_} => $_ } keys %char2ent; } # generate a unique mark (a string) not found in the string, # used to mark < and & in the extra data sub _gen_mark { $mark="AAAA"; $mark++ while( index( $_[0], $mark) > -1); return $mark; } sub _protect_extra_data { my( $extra_data)= @_; $extra_data=~ s{([<&>])}{:$mark:$char2ent{$1}:}g; return $extra_data; } sub _unprotect_extra_data { $_[0]=~ s{:$mark:(\w+):}{$ent2char{$1}}g; } } sub cdata_string { my $cdata= $_[0]->{cdata}; unless( defined $cdata) { return ''; } if( $remove_cdata) { $cdata=~ s/([$replaced_ents])/$XML::Twig::base_ent{$1}/g; } else { $cdata= $CDATA_START . $cdata . $CDATA_END; } return $cdata; } sub att_xml_string { my $elt= shift; my $att= shift; my $replace= $replaced_ents . "$quote\n\r\t"; if($_[0] && $_[0]->{escape_gt} && ($replace!~ m{>}) ) { $replace .='>'; } if( defined (my $string= $elt->{att}->{$att})) { return _att_xml_string( $string, $replace); } else { return ''; } } # escaped xml string for an attribute value sub _att_xml_string { my( $string, $escape)= @_; if( !defined( $string)) { return ''; } if( $keep_encoding) { $string=~ s{$quote}{$XML::Twig::base_ent{$quote}}g; } else { if( $do_not_escape_amp_in_atts) { $escape=~ s{^.}{}; # seems like the most backward compatible way to remove & from the list $string=~ s{([$escape])}{$XML::Twig::base_ent{$1}}g; $string=~ s{&(?!(\w+|#\d+|[xX][0-9a-fA-F]+);)}{&}g; # dodgy: escape & that do not start an entity } else { $string=~ s{([$escape])}{$XML::Twig::base_ent{$1}}g; $string=~ s{\Q]]>}{]]>}g; } } return $output_text_filter ? $output_text_filter->( $string) : $string; } sub ent_string { my $ent= shift; my $ent_text= $ent->{ent}; my( $t, $el, $ent_string); if( $expand_external_entities && ($t= $ent->twig) && ($el= $t->entity_list) && ($ent_string= $el->{entities}->{$ent->ent_name}->{val}) ) { return $ent_string; } else { return $ent_text; } } # returns just the text, no tags, for an element sub text { my( $elt, @options)= @_; if( @options && grep { lc( $_) eq 'no_recurse' } @options) { return $elt->text_only; } my $string; if( (exists $elt->{'pcdata'})) { return $elt->{pcdata}; } elsif( (exists $elt->{'cdata'})) { return $elt->{cdata}; } elsif( (exists $elt->{'target'})) { return $elt->pi_string;} elsif( (exists $elt->{'comment'})) { return $elt->{comment}; } elsif( (exists $elt->{'ent'})) { return $elt->{ent} ; } my $child= $elt->{first_child} ||''; while( $child) { my $child_text= $child->text; $string.= defined( $child_text) ? $child_text : ''; } continue { $child= $child->{next_sibling}; } unless( defined $string) { $string=''; } return $output_text_filter ? $output_text_filter->( $string) : $string; } sub text_only { return join '', map { $_->text if( $_->is_text || (exists $_->{'ent'})) } $_[0]->_children; } sub trimmed_text { my $elt= shift; my $text= $elt->text( @_); $text=~ s{\s+}{ }sg; $text=~ s{^\s*}{}; $text=~ s{\s*$}{}; return $text; } sub trim { my( $elt)= @_; my $pcdata= $elt->first_descendant( $TEXT); (my $pcdata_text= $pcdata->text)=~ s{^\s+}{}s; $pcdata->set_text( $pcdata_text); $pcdata= $elt->last_descendant( $TEXT); ($pcdata_text= $pcdata->text)=~ s{\s+$}{}; $pcdata->set_text( $pcdata_text); foreach my $pcdata ($elt->descendants( $TEXT)) { ($pcdata_text= $pcdata->text)=~ s{\s+}{ }g; $pcdata->set_text( $pcdata_text); } return $elt; } # remove cdata sections (turns them into regular pcdata) in an element sub remove_cdata { my $elt= shift; foreach my $cdata ($elt->descendants_or_self( $CDATA)) { if( $keep_encoding) { my $data= $cdata->{cdata}; $data=~ s{([&<"'])}{$XML::Twig::base_ent{$1}}g; $cdata->{pcdata}= (delete $cdata->{empty} || 1) && $data; } else { $cdata->{pcdata}= (delete $cdata->{empty} || 1) && $cdata->{cdata}; } $cdata->{gi}=$XML::Twig::gi2index{$PCDATA} or $cdata->set_gi( $PCDATA); undef $cdata->{cdata}; } } sub _is_private { return _is_private_name( $_[0]->gi); } sub _is_private_name { return $_[0]=~ m{^#(?!default:)}; } } # end of block containing package globals ($pretty_print, $quotes, keep_encoding...) # merges consecutive #PCDATAs in am element sub normalize { my( $elt)= @_; my @descendants= $elt->descendants( $PCDATA); while( my $desc= shift @descendants) { if( ! length $desc->{pcdata}) { $desc->delete; next; } while( @descendants && $desc->{next_sibling} && $desc->{next_sibling}== $descendants[0]) { my $to_merge= shift @descendants; $desc->merge_text( $to_merge); } } return $elt; } # SAX export methods sub toSAX1 { _toSAX(@_, \&_start_tag_data_SAX1, \&_end_tag_data_SAX1); } sub toSAX2 { _toSAX(@_, \&_start_tag_data_SAX2, \&_end_tag_data_SAX2); } sub _toSAX { my( $elt, $handler, $start_tag_data, $end_tag_data)= @_; if( $elt->{gi} >= $XML::Twig::SPECIAL_GI) { my $data= $start_tag_data->( $elt); _start_prefix_mapping( $elt, $handler, $data); if( $data && (my $start_element = $handler->can( 'start_element'))) { unless( $elt->_flushed) { $start_element->( $handler, $data); } } foreach my $child ($elt->_children) { $child->_toSAX( $handler, $start_tag_data, $end_tag_data); } if( (my $data= $end_tag_data->( $elt)) && (my $end_element = $handler->can( 'end_element')) ) { $end_element->( $handler, $data); } _end_prefix_mapping( $elt, $handler); } else # text or special element { if( (exists $elt->{'pcdata'}) && (my $characters= $handler->can( 'characters'))) { $characters->( $handler, { Data => $elt->{pcdata} }); } elsif( (exists $elt->{'cdata'})) { if( my $start_cdata= $handler->can( 'start_cdata')) { $start_cdata->( $handler); } if( my $characters= $handler->can( 'characters')) { $characters->( $handler, {Data => $elt->{cdata} }); } if( my $end_cdata= $handler->can( 'end_cdata')) { $end_cdata->( $handler); } } elsif( ((exists $elt->{'target'})) && (my $pi= $handler->can( 'processing_instruction'))) { $pi->( $handler, { Target =>$elt->{target}, Data => $elt->{data} }); } elsif( ((exists $elt->{'comment'})) && (my $comment= $handler->can( 'comment'))) { $comment->( $handler, { Data => $elt->{comment} }); } elsif( ((exists $elt->{'ent'}))) { if( my $se= $handler->can( 'skipped_entity')) { $se->( $handler, { Name => $elt->ent_name }); } elsif( my $characters= $handler->can( 'characters')) { if( defined $elt->ent_string) { $characters->( $handler, {Data => $elt->ent_string}); } else { $characters->( $handler, {Data => $elt->ent_name}); } } } } } sub _start_tag_data_SAX1 { my( $elt)= @_; my $name= $XML::Twig::index2gi[$elt->{'gi'}]; return if( ( (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 1) eq '#') && (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 9) ne '#default:') )); my $attributes={}; my $atts= $elt->{att}; while( my( $att, $value)= each %$atts) { $attributes->{$att}= $value unless( ( $att=~ m{^#(?!default:)} )); } my $data= { Name => $name, Attributes => $attributes}; return $data; } sub _end_tag_data_SAX1 { my( $elt)= @_; return if( ( (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 1) eq '#') && (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 9) ne '#default:') )); return { Name => $XML::Twig::index2gi[$elt->{'gi'}] }; } sub _start_tag_data_SAX2 { my( $elt)= @_; my $data={}; my $name= $XML::Twig::index2gi[$elt->{'gi'}]; return if( ( (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 1) eq '#') && (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 9) ne '#default:') )); $data->{Name} = $name; $data->{Prefix} = $elt->ns_prefix; $data->{LocalName} = $elt->local_name; $data->{NamespaceURI} = $elt->namespace; # save a copy of the data so we can re-use it for the end tag my %sax2_data= %$data; $elt->{twig_elt_SAX2_data}= \%sax2_data; # add the attributes $data->{Attributes}= $elt->_atts_to_SAX2; return $data; } sub _atts_to_SAX2 { my $elt= shift; my $SAX2_atts= {}; foreach my $att (keys %{$elt->{att}}) { next if( ( $att=~ m{^#(?!default:)} )); my $SAX2_att={}; $SAX2_att->{Name} = $att; $SAX2_att->{Prefix} = _ns_prefix( $att); $SAX2_att->{LocalName} = _local_name( $att); $SAX2_att->{NamespaceURI} = $elt->namespace( $SAX2_att->{Prefix}); $SAX2_att->{Value} = $elt->{'att'}->{$att}; my $SAX2_att_name= "{$SAX2_att->{NamespaceURI}}$SAX2_att->{LocalName}"; $SAX2_atts->{$SAX2_att_name}= $SAX2_att; } return $SAX2_atts; } sub _start_prefix_mapping { my( $elt, $handler, $data)= @_; if( my $start_prefix_mapping= $handler->can( 'start_prefix_mapping') and my @new_prefix_mappings= grep { /^\{[^}]*\}xmlns/ || /^\{$XMLNS_URI\}/ } keys %{$data->{Attributes}} ) { foreach my $prefix (@new_prefix_mappings) { my $prefix_string= $data->{Attributes}->{$prefix}->{LocalName}; if( $prefix_string eq 'xmlns') { $prefix_string=''; } my $prefix_data= { Prefix => $prefix_string, NamespaceURI => $data->{Attributes}->{$prefix}->{Value} }; $start_prefix_mapping->( $handler, $prefix_data); $elt->{twig_end_prefix_mapping} ||= []; push @{$elt->{twig_end_prefix_mapping}}, $prefix_string; } } } sub _end_prefix_mapping { my( $elt, $handler)= @_; if( my $end_prefix_mapping= $handler->can( 'end_prefix_mapping')) { foreach my $prefix (@{$elt->{twig_end_prefix_mapping}}) { $end_prefix_mapping->( $handler, { Prefix => $prefix} ); } } } sub _end_tag_data_SAX2 { my( $elt)= @_; return if( ( (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 1) eq '#') && (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 9) ne '#default:') )); return $elt->{twig_elt_SAX2_data}; } sub contains_text { my $elt= shift; my $child= $elt->{first_child}; while ($child) { return 1 if( $child->is_text || (exists $child->{'ent'})); $child= $child->{next_sibling}; } return 0; } # creates a single pcdata element containing the text as child of the element # options: # - force_pcdata: when set to a true value forces the text to be in a #PCDATA # even if the original element was a #CDATA sub set_text { my( $elt, $string, %option)= @_; if( $XML::Twig::index2gi[$elt->{'gi'}] eq $PCDATA) { return $elt->{pcdata}= (delete $elt->{empty} || 1) && $string; } elsif( $XML::Twig::index2gi[$elt->{'gi'}] eq $CDATA) { if( $option{force_pcdata}) { $elt->{gi}=$XML::Twig::gi2index{$PCDATA} or $elt->set_gi( $PCDATA); $elt->_set_cdata(''); return $elt->{pcdata}= (delete $elt->{empty} || 1) && $string; } else { return $elt->_set_cdata( $string); } } elsif( $elt->contains_a_single( $PCDATA) ) { # optimized so we have a slight chance of not loosing embedded comments and pi's $elt->{first_child}->set_pcdata( $string); return $elt; } foreach my $child (@{[$elt->_children]}) { $child->delete; } my $pcdata= $elt->_new_pcdata( $string); $pcdata->paste( $elt); $elt->{empty}=0; return $elt; } # set the content of an element from a list of strings and elements sub set_content { my $elt= shift; return $elt unless defined $_[0]; # attributes can be given as a hash (passed by ref) if( ref $_[0] eq 'HASH') { my $atts= shift; $elt->del_atts; # usually useless but better safe than sorry $elt->set_atts( $atts); return $elt unless defined $_[0]; } # check next argument for #EMPTY if( !(ref $_[0]) && ($_[0] eq $EMPTY) ) { $elt->{empty}= 1; return $elt; } # case where we really want to do a set_text, the element is '#PCDATA' # or contains a single PCDATA and we only want to add text in it if( ($XML::Twig::index2gi[$elt->{'gi'}] eq $PCDATA || $elt->contains_a_single( $PCDATA)) && (@_ == 1) && !( ref $_[0])) { $elt->set_text( $_[0]); return $elt; } elsif( ($XML::Twig::index2gi[$elt->{'gi'}] eq $CDATA) && (@_ == 1) && !( ref $_[0])) { $elt->_set_cdata( $_[0]); return $elt; } # delete the children foreach my $child (@{[$elt->_children]}) { $child->delete; } if( @_) { $elt->{empty}=0; } foreach my $child (@_) { if( ref( $child) && isa( $child, 'XML::Twig::Elt')) { # argument is an element $child->paste( 'last_child', $elt); } else { # argument is a string if( (my $pcdata= $elt->{last_child}) && $elt->{last_child}->is_pcdata) { # previous child is also pcdata: just concatenate $pcdata->{pcdata}= (delete $pcdata->{empty} || 1) && $pcdata->{pcdata} . $child } else { # previous child is not a string: create a new pcdata element $pcdata= $elt->_new_pcdata( $child); $pcdata->paste( 'last_child', $elt); } } } return $elt; } # inserts an element (whose gi is given) as child of the element # all children of the element are now children of the new element # returns the new element sub insert { my ($elt, @args)= @_; # first cut the children my @children= do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; foreach my $child (@children) { $child->cut; } # insert elements while( my $gi= shift @args) { my $new_elt= $elt->new( $gi); # add attributes if needed if( defined( $args[0]) && ( isa( $args[0], 'HASH')) ) { $new_elt->set_atts( shift @args); } # paste the element $new_elt->paste( $elt); $elt->{empty}=0; $elt= $new_elt; } # paste back the children foreach my $child (@children) { $child->paste( 'last_child', $elt); } return $elt; } # insert a new element # $elt->insert_new_element( $opt_position, $gi, $opt_atts_hash, @opt_content); # the element is created with the same syntax as new # position is the same as in paste, first_child by default sub insert_new_elt { my $elt= shift; my $position= $_[0]; if( ($position eq 'before') || ($position eq 'after') || ($position eq 'first_child') || ($position eq 'last_child')) { shift; } else { $position= 'first_child'; } my $new_elt= $elt->new( @_); $new_elt->paste( $position, $elt); #if( defined $new_elt->{'att'}->{$ID}) { $new_elt->set_id( $new_elt->{'att'}->{$ID}); } return $new_elt; } # wraps an element in elements which gi's are given as arguments # $elt->wrap_in( 'td', 'tr', 'table') wraps the element as a single # cell in a table for example # returns the new element sub wrap_in { my $elt= shift; while( my $gi = shift @_) { my $new_elt = $elt->new( $gi); if( $elt->{twig_current}) { my $t= $elt->twig; $t->{twig_current}= $new_elt; delete $elt->{'twig_current'}; $new_elt->{'twig_current'}=1; } if( my $parent= $elt->{parent}) { $new_elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $new_elt->{parent});} ; if( $parent->{first_child} == $elt) { $parent->{first_child}= $new_elt; } if( $parent->{last_child} == $elt) { $parent->{empty}=0; $parent->{last_child}=$new_elt; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; } } else { # wrapping the root my $twig= $elt->twig; if( $twig && $twig->root && ($twig->root eq $elt) ) { $twig->set_root( $new_elt); } } if( my $prev_sibling= $elt->{prev_sibling}) { $new_elt->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $new_elt->{prev_sibling});} ; $prev_sibling->{next_sibling}= $new_elt; } if( my $next_sibling= $elt->{next_sibling}) { $new_elt->{next_sibling}= $next_sibling; $next_sibling->{prev_sibling}=$new_elt; if( $XML::Twig::weakrefs) { weaken( $next_sibling->{prev_sibling});} ; } $new_elt->{first_child}= $elt; $new_elt->{empty}=0; $new_elt->{last_child}=$elt; if( $XML::Twig::weakrefs) { weaken( $new_elt->{last_child});} ; $elt->{parent}=$new_elt; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; $elt->{prev_sibling}=undef; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; $elt->{next_sibling}= undef; # add the attributes if the next argument is a hash ref if( defined( $_[0]) && (isa( $_[0], 'HASH')) ) { $new_elt->set_atts( shift @_); } $elt= $new_elt; } return $elt; } sub replace { my( $elt, $ref)= @_; if( $elt->{parent}) { $elt->cut; } if( my $parent= $ref->{parent}) { $elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; if( $parent->{first_child} == $ref) { $parent->{first_child}= $elt; } if( $parent->{last_child} == $ref) { $parent->{empty}=0; $parent->{last_child}=$elt; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; } } elsif( $ref->twig && $ref == $ref->twig->root) { $ref->twig->set_root( $elt); } if( my $prev_sibling= $ref->{prev_sibling}) { $elt->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; $prev_sibling->{next_sibling}= $elt; } if( my $next_sibling= $ref->{next_sibling}) { $elt->{next_sibling}= $next_sibling; $next_sibling->{prev_sibling}=$elt; if( $XML::Twig::weakrefs) { weaken( $next_sibling->{prev_sibling});} ; } $ref->{parent}=undef; if( $XML::Twig::weakrefs) { weaken( $ref->{parent});} ; $ref->{prev_sibling}=undef; if( $XML::Twig::weakrefs) { weaken( $ref->{prev_sibling});} ; $ref->{next_sibling}= undef; return $ref; } sub replace_with { my $ref= shift; my $elt= shift; $elt->replace( $ref); foreach my $new_elt (reverse @_) { $new_elt->paste( after => $elt); } return $elt; } # move an element, same syntax as paste, except the element is first cut sub move { my $elt= shift; $elt->cut; $elt->paste( @_); return $elt; } # adds a prefix to an element, creating a pcdata child if needed sub prefix { my ($elt, $prefix, $option)= @_; my $asis= ($option && ($option eq 'asis')) ? 1 : 0; if( (exists $elt->{'pcdata'}) && (($asis && $elt->{asis}) || (!$asis && ! $elt->{asis})) ) { $elt->{pcdata}= (delete $elt->{empty} || 1) && $prefix . $elt->{pcdata}; } elsif( $elt->{first_child} && $elt->{first_child}->is_pcdata && ( ($asis && $elt->{first_child}->{asis}) || (!$asis && ! $elt->{first_child}->{asis})) ) { $elt->{first_child}->set_pcdata( $prefix . $elt->{first_child}->pcdata); } else { my $new_elt= $elt->_new_pcdata( $prefix); my $pos= (exists $elt->{'pcdata'}) ? 'before' : 'first_child'; $new_elt->paste( $pos => $elt); if( $asis) { $new_elt->set_asis; } } return $elt; } # adds a suffix to an element, creating a pcdata child if needed sub suffix { my ($elt, $suffix, $option)= @_; my $asis= ($option && ($option eq 'asis')) ? 1 : 0; if( (exists $elt->{'pcdata'}) && (($asis && $elt->{asis}) || (!$asis && ! $elt->{asis})) ) { $elt->{pcdata}= (delete $elt->{empty} || 1) && $elt->{pcdata} . $suffix; } elsif( $elt->{last_child} && $elt->{last_child}->is_pcdata && ( ($asis && $elt->{last_child}->{asis}) || (!$asis && ! $elt->{last_child}->{asis})) ) { $elt->{last_child}->set_pcdata( $elt->{last_child}->pcdata . $suffix); } else { my $new_elt= $elt->_new_pcdata( $suffix); my $pos= (exists $elt->{'pcdata'}) ? 'after' : 'last_child'; $new_elt->paste( $pos => $elt); if( $asis) { $new_elt->set_asis; } } return $elt; } # create a path to an element ('/root/.../gi) sub path { my $elt= shift; my @context= ( $elt, $elt->ancestors); return "/" . join( "/", reverse map {$_->gi} @context); } sub xpath { my $elt= shift; my $xpath; foreach my $ancestor (reverse $elt->ancestors_or_self) { my $gi= $XML::Twig::index2gi[$ancestor->{'gi'}]; $xpath.= "/$gi"; my $index= $ancestor->prev_siblings( $gi) + 1; unless( ($index == 1) && !$ancestor->next_sibling( $gi)) { $xpath.= "[$index]"; } } return $xpath; } # methods used mainly by wrap_children # return a string with the # for an element ...... # returns '' sub _stringify_struct { my( $elt, %opt)= @_; my $string=''; my $pretty_print= set_pretty_print( 'none'); foreach my $child ($elt->_children) { $child->add_id; $string .= $child->start_tag( { escape_gt => 1 }) ||''; } set_pretty_print( $pretty_print); return $string; } # wrap a series of elements in a new one sub _wrap_range { my $elt= shift; my $gi= shift; my $atts= isa( $_[0], 'HASH') ? shift : undef; my $range= shift; # the string with the tags to wrap my $t= $elt->twig; # get the tags to wrap my @to_wrap; while( $range=~ m{<\w+\s+[^>]*id=("[^"]*"|'[^']*')[^>]*>}g) { push @to_wrap, $t->elt_id( substr( $1, 1, -1)); } return '' unless @to_wrap; my $to_wrap= shift @to_wrap; my %atts= %$atts; my $new_elt= $to_wrap->wrap_in( $gi, \%atts); $_->move( last_child => $new_elt) foreach (@to_wrap); return ''; } # wrap children matching a regexp in a new element sub wrap_children { my( $elt, $regexp, $gi, $atts)= @_; $atts ||={}; my $elt_as_string= $elt->_stringify_struct; # stringify the elt structure $regexp=~ s{(<[^>]*>)}{_match_expr( $1)}eg; # in the regexp, replace gi's by the proper regexp $elt_as_string=~ s{($regexp)}{$elt->_wrap_range( $gi, $atts, $1)}eg; # then do the actual replace return $elt; } sub _match_expr { my $tag= shift; my( $gi, %atts)= XML::Twig::_parse_start_tag( $tag); return _match_tag( $gi, %atts); } sub _match_tag { my( $elt, %atts)= @_; my $string= "<$elt\\b"; foreach my $key (sort keys %atts) { my $val= qq{\Q$atts{$key}\E}; $string.= qq{[^>]*$key=(?:"$val"|'$val')}; } $string.= qq{[^>]*>}; return "(?:$string)"; } sub field_to_att { my( $elt, $cond, $att)= @_; $att ||= $cond; my $child= $elt->first_child( $cond) or return undef; $elt->set_att( $att => $child->text); $child->cut; return $elt; } sub att_to_field { my( $elt, $att, $tag)= @_; $tag ||= $att; my $child= $elt->insert_new_elt( first_child => $tag, $elt->{'att'}->{$att}); $elt->del_att( $att); return $elt; } # sort children methods sub sort_children_on_field { my $elt = shift; my $field = shift; my $get_key= sub { return $_[0]->field( $field) }; return $elt->sort_children( $get_key, @_); } sub sort_children_on_att { my $elt = shift; my $att = shift; my $get_key= sub { return $_[0]->{'att'}->{$att} }; return $elt->sort_children( $get_key, @_); } sub sort_children_on_value { my $elt = shift; #my $get_key= eval qq{ sub { $NO_WARNINGS; return \$_[0]->text } }; my $get_key= \&text; return $elt->sort_children( $get_key, @_); } sub sort_children { my( $elt, $get_key, %opt)=@_; $opt{order} ||= 'normal'; $opt{type} ||= 'alpha'; my( $par_a, $par_b)= ($opt{order} eq 'reverse') ? qw( b a) : qw ( a b) ; my $op= ($opt{type} eq 'numeric') ? '<=>' : 'cmp' ; my @children= $elt->cut_children; if( $opt{type} eq 'numeric') { @children= map { $_->[1] } sort { $a->[0] <=> $b->[0] } map { [ $get_key->( $_), $_] } @children; } elsif( $opt{type} eq 'alpha') { @children= map { $_->[1] } sort { $a->[0] cmp $b->[0] } map { [ $get_key->( $_), $_] } @children; } else { croak "wrong sort type '$opt{type}', should be either 'alpha' or 'numeric'"; } @children= reverse @children if( $opt{order} eq 'reverse'); $elt->set_content( @children); } # comparison methods sub before { my( $a, $b)=@_; if( $a->cmp( $b) == -1) { return 1; } else { return 0; } } sub after { my( $a, $b)=@_; if( $a->cmp( $b) == 1) { return 1; } else { return 0; } } sub lt { my( $a, $b)=@_; return 1 if( $a->cmp( $b) == -1); return 0; } sub le { my( $a, $b)=@_; return 1 unless( $a->cmp( $b) == 1); return 0; } sub gt { my( $a, $b)=@_; return 1 if( $a->cmp( $b) == 1); return 0; } sub ge { my( $a, $b)=@_; return 1 unless( $a->cmp( $b) == -1); return 0; } sub cmp { my( $a, $b)=@_; # easy cases return 0 if( $a == $b); return 1 if( $a->in($b)); # a in b => a starts after b return -1 if( $b->in($a)); # b in a => a starts before b # ancestors does not include the element itself my @a_pile= ($a, $a->ancestors); my @b_pile= ($b, $b->ancestors); # the 2 elements are not in the same twig return undef unless( $a_pile[-1] == $b_pile[-1]); # find the first non common ancestors (they are siblings) my $a_anc= pop @a_pile; my $b_anc= pop @b_pile; while( $a_anc == $b_anc) { $a_anc= pop @a_pile; $b_anc= pop @b_pile; } # from there move left and right and figure out the order my( $a_prev, $a_next, $b_prev, $b_next)= ($a_anc, $a_anc, $b_anc, $b_anc); while() { $a_prev= $a_prev->{prev_sibling} || return( -1); return 1 if( $a_prev == $b_next); $a_next= $a_next->{next_sibling} || return( 1); return -1 if( $a_next == $b_prev); $b_prev= $b_prev->{prev_sibling} || return( 1); return -1 if( $b_prev == $a_next); $b_next= $b_next->{next_sibling} || return( -1); return 1 if( $b_next == $a_prev); } } sub _dump { my( $elt, $option)= @_; my $atts = defined $option->{atts} ? $option->{atts} : 1; my $extra = defined $option->{extra} ? $option->{extra} : 0; my $short_text = defined $option->{short_text} ? $option->{short_text} : 40; my $sp= '| '; my $indent= $sp x $elt->level; my $indent_sp= ' ' x $elt->level; my $dump=''; if( $elt->is_elt) { $dump .= $indent . '|-' . $XML::Twig::index2gi[$elt->{'gi'}]; if( $atts && (my @atts= $elt->att_names) ) { $dump .= ' ' . join( ' ', map { qq{$_="} . $elt->{'att'}->{$_} . qq{"} } @atts); } $dump .= "\n"; if( $extra) { $dump .= $elt->_dump_extra_data( $indent, $indent_sp, $short_text); } $dump .= join( "", map { $_->_dump( $option) } do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }); } else { if( (exists $elt->{'pcdata'})) { $dump .= "$indent|-PCDATA: '" . _short_text( $elt->{pcdata}, $short_text) . "'\n" } elsif( (exists $elt->{'ent'})) { $dump .= "$indent|-ENTITY: '" . _short_text( $elt->{ent}, $short_text) . "'\n" } elsif( (exists $elt->{'cdata'})) { $dump .= "$indent|-CDATA: '" . _short_text( $elt->{cdata}, $short_text) . "'\n" } elsif( (exists $elt->{'comment'})) { $dump .= "$indent|-COMMENT: '" . _short_text( $elt->comment_string, $short_text) . "'\n" } elsif( (exists $elt->{'target'})) { $dump .= "$indent|-PI: '" . $elt->{target} . "' - '" . _short_text( $elt->{data}, $short_text) . "'\n" } if( $extra) { $dump .= $elt->_dump_extra_data( $indent, $indent_sp, $short_text); } } return $dump; } sub _dump_extra_data { my( $elt, $indent, $indent_sp, $short_text)= @_; my $dump=''; if( $elt->extra_data) { my $extra_data = $indent . "|-- (cpi before) '" . _short_text( $elt->extra_data, $short_text) . "'"; $extra_data=~ s{\n}{$indent_sp}g; $dump .= $extra_data . "\n"; } if( $elt->{extra_data_in_pcdata}) { foreach my $data ( @{$elt->{extra_data_in_pcdata}}) { my $extra_data = $indent . "|-- (cpi offset $data->{offset}) '" . _short_text( $data->{text}, $short_text) . "'"; $extra_data=~ s{\n}{$indent_sp}g; $dump .= $extra_data . "\n"; } } if( $elt->{extra_data_before_end_tag}) { my $extra_data = $indent . "|-- (cpi end) '" . _short_text( $elt->{extra_data_before_end_tag}, $short_text) . "'"; $extra_data=~ s{\n}{$indent_sp}g; $dump .= $extra_data . "\n"; } return $dump; } sub _short_text { my( $string, $length)= @_; if( !$length || (length( $string) < $length) ) { return $string; } my $l1= (length( $string) -5) /2; my $l2= length( $string) - ($l1 + 5); return substr( $string, 0, $l1) . ' ... ' . substr( $string, -$l2); } sub _and { return _join_defined( ' && ', @_); } sub _join_defined { return join( shift(), grep { $_ } @_); } 1; __END__ =head1 NAME XML::Twig - A perl module for processing huge XML documents in tree mode. =head1 SYNOPSIS Note that this documentation is intended as a reference to the module. Complete docs, including a tutorial, examples, an easier to use HTML version, a quick reference card and a FAQ are available at L Small documents (loaded in memory as a tree): my $twig=XML::Twig->new(); # create the twig $twig->parsefile( 'doc.xml'); # build it my_process( $twig); # use twig methods to process it $twig->print; # output the twig Huge documents (processed in combined stream/tree mode): # at most one div will be loaded in memory my $twig=XML::Twig->new( twig_handlers => { title => sub { $_->set_tag( 'h2') }, # change title tags to h2 para => sub { $_->set_tag( 'p') }, # change para to p hidden => sub { $_->delete; }, # remove hidden elements list => \&my_list_process, # process list elements div => sub { $_[0]->flush; }, # output and free memory }, pretty_print => 'indented', # output will be nicely formatted empty_tags => 'html', # outputs ); $twig->parsefile( 'my_big.xml'); sub my_list_process { my( $twig, $list)= @_; # ... } See L for other ways to use the module, as a filter for example. =encoding utf8 =head1 DESCRIPTION This module provides a way to process XML documents. It is build on top of C. The module offers a tree interface to the document, while allowing you to output the parts of it that have been completely processed. It allows minimal resource (CPU and memory) usage by building the tree only for the parts of the documents that need actual processing, through the use of the C > and C > options. The C > and C > methods also help to increase performances. XML::Twig tries to make simple things easy so it tries its best to takes care of a lot of the (usually) annoying (but sometimes necessary) features that come with XML and XML::Parser. =head1 TOOLS XML::Twig comes with a few command-line utilities: =head2 xml_pp - xml pretty-printer XML pretty printer using XML::Twig =head2 xml_grep - grep XML files looking for specific elements C does a grep on XML files. Instead of using regular expressions it uses XPath expressions (in fact the subset of XPath supported by XML::Twig). =head2 xml_split - cut a big XML file into smaller chunks C takes a (presumably big) XML file and split it in several smaller files, based on various criteria (level in the tree, size or an XPath expression) =head2 xml_merge - merge back XML files split with xml_split C takes several xml files that have been split using C and recreates a single file. =head2 xml_spellcheck - spellcheck XML files C lets you spell check the content of an XML file. It extracts the text (the content of elements and optionally of attributes), call a spell checker on it and then recreates the XML document. =head1 XML::Twig 101 XML::Twig can be used either on "small" XML documents (that fit in memory) or on huge ones, by processing parts of the document and outputting or discarding them once they are processed. =head2 Loading an XML document and processing it my $t= XML::Twig->new(); $t->parse( 'titlep 1p 2'); my $root= $t->root; $root->set_tag( 'html'); # change doc to html $title= $root->first_child( 'title'); # get the title $title->set_tag( 'h1'); # turn it into h1 my @para= $root->children( 'para'); # get the para children foreach my $para (@para) { $para->set_tag( 'p'); } # turn them into p $t->print; # output the document Other useful methods include: L: C<< $elt->{'att'}->{'foo'} >> return the C attribute for an element, L : C<< $elt->set_att( foo => "bar") >> sets the C attribute to the C value, L: C<< $elt->{next_sibling} >> return the next sibling in the document (in the example C<< $title->{next_sibling} >> is the first C, you can also (and actually should) use C<< $elt->next_sibling( 'para') >> to get it The document can also be transformed through the use of the L, L, L and L methods: C<< $title->cut; $title->paste( after => $p); >> for example And much, much more, see L. =head2 Processing an XML document chunk by chunk One of the strengths of XML::Twig is that it let you work with files that do not fit in memory (BTW storing an XML document in memory as a tree is quite memory-expensive, the expansion factor being often around 10). To do this you can define handlers, that will be called once a specific element has been completely parsed. In these handlers you can access the element and process it as you see fit, using the navigation and the cut-n-paste methods, plus lots of convenient ones like C >. Once the element is completely processed you can then C > it, which will output it and free the memory. You can also C > it if you don't need to output it (if you are just extracting some data from the document for example). The handler will be called again once the next relevant element has been parsed. my $t= XML::Twig->new( twig_handlers => { section => \§ion, para => sub { $_->set_tag( 'p'); } }, ); $t->parsefile( 'doc.xml'); # the handler is called once a section is completely parsed, ie when # the end tag for section is found, it receives the twig itself and # the element (including all its sub-elements) as arguments sub section { my( $t, $section)= @_; # arguments for all twig_handlers $section->set_tag( 'div'); # change the tag name.4, my favourite method... # let's use the attribute nb as a prefix to the title my $title= $section->first_child( 'title'); # find the title my $nb= $title->{'att'}->{'nb'}; # get the attribute $title->prefix( "$nb - "); # easy isn't it? $section->flush; # outputs the section and frees memory } There is of course more to it: you can trigger handlers on more elaborate conditions than just the name of the element, C
for example. my $t= XML::Twig->new( twig_handlers => { 'section/title' => sub { $_->print } } ) ->parsefile( 'doc.xml'); Here C<< sub { $_->print } >> simply prints the current element (C<$_> is aliased to the element in the handler). You can also trigger a handler on a test on an attribute: my $t= XML::Twig->new( twig_handlers => { 'section[@level="1"]' => sub { $_->print } } ); ->parsefile( 'doc.xml'); You can also use C > to process an element as soon as the start tag is found. Besides C > you can also use C >, =head2 Processing just parts of an XML document The twig_roots mode builds only the required sub-trees from the document Anything outside of the twig roots will just be ignored: my $t= XML::Twig->new( # the twig will include just the root and selected titles twig_roots => { 'section/title' => \&print_n_purge, 'annex/title' => \&print_n_purge } ); $t->parsefile( 'doc.xml'); sub print_n_purge { my( $t, $elt)= @_; print $elt->text; # print the text (including sub-element texts) $t->purge; # frees the memory } You can use that mode when you want to process parts of a documents but are not interested in the rest and you don't want to pay the price, either in time or memory, to build the tree for the it. =head2 Building an XML filter You can combine the C and the C options to build filters, which let you modify selected elements and will output the rest of the document as is. This would convert prices in $ to prices in Euro in a document: my $t= XML::Twig->new( twig_roots => { 'price' => \&convert, }, # process prices twig_print_outside_roots => 1, # print the rest ); $t->parsefile( 'doc.xml'); sub convert { my( $t, $price)= @_; my $currency= $price->{'att'}->{'currency'}; # get the currency if( $currency eq 'USD') { $usd_price= $price->text; # get the price # %rate is just a conversion table my $euro_price= $usd_price * $rate{usd2euro}; $price->set_text( $euro_price); # set the new price $price->set_att( currency => 'EUR'); # don't forget this! } $price->print; # output the price } =head2 XML::Twig and various versions of Perl, XML::Parser and expat: XML::Twig is a lot more sensitive to variations in versions of perl, XML::Parser and expat than to the OS, so this should cover some reasonable configurations. The "recommended configuration" is perl 5.8.3+ (for good Unicode support), XML::Parser 2.31+ and expat 1.95.5+ See L for the CPAN testers reports on XML::Twig, which list all tested configurations. An Atom feed of the CPAN Testers results is available at L Finally: =over 4 =item XML::Twig does B work with expat 1.95.4 =item XML::Twig only works with XML::Parser 2.27 in perl 5.6.* Note that I can't compile XML::Parser 2.27 anymore, so I can't guarantee that it still works =item XML::Parser 2.28 does not really work =back When in doubt, upgrade expat, XML::Parser and Scalar::Util Finally, for some optional features, XML::Twig depends on some additional modules. The complete list, which depends somewhat on the version of Perl that you are running, is given by running C =head1 Simplifying XML processing =over 4 =item Whitespaces Whitespaces that look non-significant are discarded, this behaviour can be controlled using the C >, C > and C > options. =item Encoding You can specify that you want the output in the same encoding as the input (provided you have valid XML, which means you have to specify the encoding either in the document or when you create the Twig object) using the C > option You can also use C> to convert the internal UTF-8 format to the required encoding. =item Comments and Processing Instructions (PI) Comments and PI's can be hidden from the processing, but still appear in the output (they are carried by the "real" element closer to them) =item Pretty Printing XML::Twig can output the document pretty printed so it is easier to read for us humans. =item Surviving an untimely death XML parsers are supposed to react violently when fed improper XML. XML::Parser just dies. XML::Twig provides the C > and the C > methods which wrap the parse in an eval and return either the parsed twig or 0 in case of failure. =item Private attributes Attributes with a name starting with # (illegal in XML) will not be output, so you can safely use them to store temporary values during processing. Note that you can store anything in a private attribute, not just text, it's just a regular Perl variable, so a reference to an object or a huge data structure is perfectly fine. =back =head1 CLASSES XML::Twig uses a very limited number of classes. The ones you are most likely to use are C> of course, which represents a complete XML document, including the document itself (the root of the document itself is C>), its handlers, its input or output filters... The other main class is C>, which models an XML element. Element here has a very wide definition: it can be a regular element, or but also text, with an element C> of C<#PCDATA> (or C<#CDATA>), an entity (tag is C<#ENT>), a Processing Instruction (C<#PI>), a comment (C<#COMMENT>). Those are the 2 commonly used classes. You might want to look the C> option if you want to subclass C. Attributes are just attached to their parent element, they are not objects per se. (Please use the provided methods C> and C> to access them, if you access them as a hash, then your code becomes implementation dependent and might break in the future). Other classes that are seldom used are C> and C>. If you use C> instead of C, elements are then created as C> =head1 METHODS =head2 XML::Twig A twig is a subclass of XML::Parser, so all XML::Parser methods can be called on a twig object, including parse and parsefile. C on the other hand cannot be used, see C > =over 4 =item new This is a class method, the constructor for XML::Twig. Options are passed as keyword value pairs. Recognized options are the same as XML::Parser, plus some (in fact a lot!) XML::Twig specifics. New Options: =over 4 =item twig_handlers This argument consists of a hash C<{ expression => \&handler}> where expression is a an I (+ some others). XPath expressions are limited to using the child and descendant axis (indeed you can't specify an axis), and predicates cannot be nested. You can use the C, or C<< string() >> function (except in C triggers). Additionally you can use regexps (/ delimited) to match attribute and string values. Examples: foo foo/bar foo//bar /foo/bar /foo//bar /foo/bar[@att1 = "val1" and @att2 = "val2"]/baz[@a >= 1] foo[string()=~ /^duh!+/] /foo[string(bar)=~ /\d+/]/baz[@att != 3] #CDATA can be used to call a handler for a CDATA section. #COMMENT can be used to call a handler for comments Some additional (non-XPath) expressions are also provided for convenience: =over 4 =item processing instructions C<'?'> or C<'#PI'> triggers the handler for any processing instruction, and C<< '?' >> or C<< '#PI ' >> triggers a handler for processing instruction with the given target( ex: C<'#PI xml-stylesheet'>). =item level() Triggers the handler on any element at that level in the tree (root is level 1) =item _all_ Triggers the handler for B elements in the tree =item _default_ Triggers the handler for each element that does NOT have any other handler. =back Expressions are evaluated against the input document. Which means that even if you have changed the tag of an element (changing the tag of a parent element from a handler for example) the change will not impact the expression evaluation. There is an exception to this: "private" attributes (which name start with a '#', and can only be created during the parsing, as they are not valid XML) are checked against the current twig. Handlers are triggered in fixed order, sorted by their type (xpath expressions first, then regexps, then level), then by whether they specify a full path (starting at the root element) or not, then by number of steps in the expression , then number of predicates, then number of tests in predicates. Handlers where the last step does not specify a step (C) are triggered after other XPath handlers. Finally C<_all_> handlers are triggered last. B: once a handler has been triggered if it returns 0 then no other handler is called, except a C<_all_> handler which will be called anyway. If a handler returns a true value and other handlers apply, then the next applicable handler will be called. Repeat, rinse, lather..; The exception to that rule is when the C> option is set, in which case only the first handler will be called. Note that it might be a good idea to explicitly return a short true value (like 1) from handlers: this ensures that other applicable handlers are called even if the last statement for the handler happens to evaluate to false. This might also speedup the code by avoiding the result of the last statement of the code to be copied and passed to the code managing handlers. It can really pay to have 1 instead of a long string returned. When the closing tag for an element is parsed the corresponding handler is called, with 2 arguments: the twig and the C >. The twig includes the document tree that has been built so far, the element is the complete sub-tree for the element. The fact that the handler is called only when the closing tag for the element is found means that handlers for inner elements are called before handlers for outer elements. C<$_> is also set to the element, so it is easy to write inline handlers like para => sub { $_->set_tag( 'p'); } Text is stored in elements whose tag name is #PCDATA (due to mixed content, text and sub-element in an element there is no way to store the text as just an attribute of the enclosing element). B: if you have used purge or flush on the twig the element might not be complete, some of its children might have been entirely flushed or purged, and the start tag might even have been printed (by C) already, so changing its tag might not give the expected result. =item twig_roots This argument let's you build the tree only for those elements you are interested in. Example: my $t= XML::Twig->new( twig_roots => { title => 1, subtitle => 1}); $t->parsefile( file); my $t= XML::Twig->new( twig_roots => { 'section/title' => 1}); $t->parsefile( file); return a twig containing a document including only C and C<subtitle> elements, as children of the root element. You can use I<generic_attribute_condition>, I<attribute_condition>, I<full_path>, I<partial_path>, I<tag>, I<tag_regexp>, I<_default_> and I<_all_> to trigger the building of the twig. I<string_condition> and I<regexp_condition> cannot be used as the content of the element, and the string, have not yet been parsed when the condition is checked. B<WARNING>: path are checked for the document. Even if the C<twig_roots> option is used they will be checked against the full document tree, not the virtual tree created by XML::Twig B<WARNING>: twig_roots elements should NOT be nested, that would hopelessly confuse XML::Twig ;--( Note: you can set handlers (twig_handlers) using twig_roots Example: my $t= XML::Twig->new( twig_roots => { title => sub { $_[1]->print;}, subtitle => \&process_subtitle } ); $t->parsefile( file); =item twig_print_outside_roots To be used in conjunction with the C<twig_roots> argument. When set to a true value this will print the document outside of the C<twig_roots> elements. Example: my $t= XML::Twig->new( twig_roots => { title => \&number_title }, twig_print_outside_roots => 1, ); $t->parsefile( file); { my $nb; sub number_title { my( $twig, $title); $nb++; $title->prefix( "$nb "); $title->print; } } This example prints the document outside of the title element, calls C<number_title> for each C<title> element, prints it, and then resumes printing the document. The twig is built only for the C<title> elements. If the value is a reference to a file handle then the document outside the C<twig_roots> elements will be output to this file handle: open( my $out, '>', 'out_file.xml') or die "cannot open out file.xml out_file:$!"; my $t= XML::Twig->new( twig_roots => { title => \&number_title }, # default output to $out twig_print_outside_roots => $out, ); { my $nb; sub number_title { my( $twig, $title); $nb++; $title->prefix( "$nb "); $title->print( $out); # you have to print to \*OUT here } } =item start_tag_handlers A hash C<{ expression => \&handler}>. Sets element handlers that are called when the element is open (at the end of the XML::Parser C<Start> handler). The handlers are called with 2 params: the twig and the element. The element is empty at that point, its attributes are created though. You can use I<generic_attribute_condition>, I<attribute_condition>, I<full_path>, I<partial_path>, I<tag>, I<tag_regexp>, I<_default_> and I<_all_> to trigger the handler. I<string_condition> and I<regexp_condition> cannot be used as the content of the element, and the string, have not yet been parsed when the condition is checked. The main uses for those handlers are to change the tag name (you might have to do it as soon as you find the open tag if you plan to C<flush> the twig at some point in the element, and to create temporary attributes that will be used when processing sub-element with C<twig_hanlders>. You should also use it to change tags if you use C<flush>. If you change the tag in a regular C<twig_handler> then the start tag might already have been flushed. B<Note>: C<start_tag> handlers can be called outside of C<twig_roots> if this argument is used, in this case handlers are called with the following arguments: C<$t> (the twig), C<$tag> (the tag of the element) and C<%att> (a hash of the attributes of the element). If the C<twig_print_outside_roots> argument is also used, if the last handler called returns a C<true> value, then the start tag will be output as it appeared in the original document, if the handler returns a C<false> value then the start tag will B<not> be printed (so you can print a modified string yourself for example). Note that you can use the L<ignore> method in C<start_tag_handlers> (and only there). =item end_tag_handlers A hash C<{ expression => \&handler}>. Sets element handlers that are called when the element is closed (at the end of the XML::Parser C<End> handler). The handlers are called with 2 params: the twig and the tag of the element. I<twig_handlers> are called when an element is completely parsed, so why have this redundant option? There is only one use for C<end_tag_handlers>: when using the C<twig_roots> option, to trigger a handler for an element B<outside> the roots. It is for example very useful to number titles in a document using nested sections: my @no= (0); my $no; my $t= XML::Twig->new( start_tag_handlers => { section => sub { $no[$#no]++; $no= join '.', @no; push @no, 0; } }, twig_roots => { title => sub { $_[1]->prefix( $no); $_[1]->print; } }, end_tag_handlers => { section => sub { pop @no; } }, twig_print_outside_roots => 1 ); $t->parsefile( $file); Using the C<end_tag_handlers> argument without C<twig_roots> will result in an error. =item do_not_chain_handlers If this option is set to a true value, then only one handler will be called for each element, even if several satisfy the condition Note that the C<_all_> handler will still be called regardless =item ignore_elts This option lets you ignore elements when building the twig. This is useful in cases where you cannot use C<twig_roots> to ignore elements, for example if the element to ignore is a sibling of elements you are interested in. Example: my $twig= XML::Twig->new( ignore_elts => { elt => 'discard' }); $twig->parsefile( 'doc.xml'); This will build the complete twig for the document, except that all C<elt> elements (and their children) will be left out. The keys in the hash are triggers, limited to the same subset as C<L<start_tag_handlers>>. The values can be C<discard>, to discard the element, C<print>, to output the element as-is, C<string> to store the text of the ignored element(s), including markup, in a field of the twig: C<< $t->{twig_buffered_string} >> or a reference to a scalar, in which case the text of the ignored element(s), including markup, will be stored in the scalar. Any other value will be treated as C<discard>. =item char_handler A reference to a subroutine that will be called every time C<PCDATA> is found. The subroutine receives the string as argument, and returns the modified string: # we want all strings in upper case sub my_char_handler { my( $text)= @_; $text= uc( $text); return $text; } =item elt_class The name of a class used to store elements. this class should inherit from C<XML::Twig::Elt> (and by default it is C<XML::Twig::Elt>). This option is used to subclass the element class and extend it with new methods. This option is needed because during the parsing of the XML, elements are created by C<XML::Twig>, without any control from the user code. =item keep_atts_order Setting this option to a true value causes the attribute hash to be tied to a C<Tie::IxHash> object. This means that C<Tie::IxHash> needs to be installed for this option to be available. It also means that the hash keeps its order, so you will get the attributes in order. This allows outputting the attributes in the same order as they were in the original document. =item keep_encoding This is a (slightly?) evil option: if the XML document is not UTF-8 encoded and you want to keep it that way, then setting keep_encoding will use theC<Expat> original_string method for character, thus keeping the original encoding, as well as the original entities in the strings. See the C<t/test6.t> test file to see what results you can expect from the various encoding options. B<WARNING>: if the original encoding is multi-byte then attribute parsing will be EXTREMELY unsafe under any Perl before 5.6, as it uses regular expressions which do not deal properly with multi-byte characters. You can specify an alternate function to parse the start tags with the C<parse_start_tag> option (see below) B<WARNING>: this option is NOT used when parsing with the non-blocking parser (C<parse_start>, C<parse_more>, parse_done methods) which you probably should not use with XML::Twig anyway as they are totally untested! =item output_encoding This option generates an output_filter using C<Encode>, C<Text::Iconv> or C<Unicode::Map8> and C<Unicode::Strings>, and sets the encoding in the XML declaration. This is the easiest way to deal with encodings, if you need more sophisticated features, look at C<output_filter> below =item output_filter This option is used to convert the character encoding of the output document. It is passed either a string corresponding to a predefined filter or a subroutine reference. The filter will be called every time a document or element is processed by the "print" functions (C<print>, C<sprint>, C<flush>). Pre-defined filters: =over 4 =item latin1 uses either C<Encode>, C<Text::Iconv> or C<Unicode::Map8> and C<Unicode::String> or a regexp (which works only with XML::Parser 2.27), in this order, to convert all characters to ISO-8859-15 (usually latin1 is synonym to ISO-8859-1, but in practice it seems that ISO-8859-15, which includes the euro sign, is more useful and probably what most people want). =item html does the same conversion as C<latin1>, plus encodes entities using C<HTML::Entities> (oddly enough you will need to have HTML::Entities installed for it to be available). This should only be used if the tags and attribute names themselves are in US-ASCII, or they will be converted and the output will not be valid XML any more =item safe converts the output to ASCII (US) only plus I<character entities> (C<&#nnn;>) this should be used only if the tags and attribute names themselves are in US-ASCII, or they will be converted and the output will not be valid XML any more =item safe_hex same as C<safe> except that the character entities are in hex (C<&#xnnn;>) =item encode_convert ($encoding) Return a subref that can be used to convert utf8 strings to C<$encoding>). Uses C<Encode>. my $conv = XML::Twig::encode_convert( 'latin1'); my $t = XML::Twig->new(output_filter => $conv); =item iconv_convert ($encoding) this function is used to create a filter subroutine that will be used to convert the characters to the target encoding using C<Text::Iconv> (which needs to be installed, look at the documentation for the module and for the C<iconv> library to find out which encodings are available on your system) my $conv = XML::Twig::iconv_convert( 'latin1'); my $t = XML::Twig->new(output_filter => $conv); =item unicode_convert ($encoding) this function is used to create a filter subroutine that will be used to convert the characters to the target encoding using C<Unicode::Strings> and C<Unicode::Map8> (which need to be installed, look at the documentation for the modules to find out which encodings are available on your system) my $conv = XML::Twig::unicode_convert( 'latin1'); my $t = XML::Twig->new(output_filter => $conv); =back The C<text> and C<att> methods do not use the filter, so their result are always in unicode. Those predeclared filters are based on subroutines that can be used by themselves (as C<XML::Twig::foo>). =over 4 =item html_encode ($string) Use C<HTML::Entities> to encode a utf8 string =item safe_encode ($string) Use either a regexp (perl < 5.8) or C<Encode> to encode non-ascii characters in the string in C<< &#<nnnn>; >> format =item safe_encode_hex ($string) Use either a regexp (perl < 5.8) or C<Encode> to encode non-ascii characters in the string in C<< &#x<nnnn>; >> format =item regexp2latin1 ($string) Use a regexp to encode a utf8 string into latin 1 (ISO-8859-1). Does not work with Perl 5.8.0! =back =item output_text_filter same as output_filter, except it doesn't apply to the brackets and quotes around attribute values. This is useful for all filters that could change the tagging, basically anything that does not just change the encoding of the output. C<html>, C<safe> and C<safe_hex> are better used with this option. =item input_filter This option is similar to C<output_filter> except the filter is applied to the characters before they are stored in the twig, at parsing time. =item remove_cdata Setting this option to a true value will force the twig to output CDATA sections as regular (escaped) PCDATA =item parse_start_tag If you use the C<keep_encoding> option then this option can be used to replace the default parsing function. You should provide a coderef (a reference to a subroutine) as the argument, this subroutine takes the original tag (given by XML::Parser::Expat C<original_string()> method) and returns a tag and the attributes in a hash (or in a list attribute_name/attribute value). =item expand_external_ents When this option is used external entities (that are defined) are expanded when the document is output using "print" functions such as C<L<print> >, C<L<sprint> >, C<L<flush> > and C<L<xml_string> >. Note that in the twig the entity will be stored as an element with a tag 'C<#ENT>', the entity will not be expanded there, so you might want to process the entities before outputting it. If an external entity is not available, then the parse will fail. A special case is when the value of this option is -1. In that case a missing entity will not cause the parser to die, but its C<name>, C<sysid> and C<pubid> will be stored in the twig as C<< $twig->{twig_missing_system_entities} >> (a reference to an array of hashes { name => <name>, sysid => <sysid>, pubid => <pubid> }). Yes, this is a bit of a hack, but it's useful in some cases. =item load_DTD If this argument is set to a true value, C<parse> or C<parsefile> on the twig will load the DTD information. This information can then be accessed through the twig, in a C<DTD_handler> for example. This will load even an external DTD. Default and fixed values for attributes will also be filled, based on the DTD. Note that to do this the module will generate a temporary file in the current directory. If this is a problem let me know and I will add an option to specify an alternate directory. See L<DTD Handling> for more information =item DTD_handler Set a handler that will be called once the doctype (and the DTD) have been loaded, with 2 arguments, the twig and the DTD. =item no_prolog Does not output a prolog (XML declaration and DTD) =item id This optional argument gives the name of an attribute that can be used as an ID in the document. Elements whose ID is known can be accessed through the elt_id method. id defaults to 'id'. See C<L<BUGS> > =item discard_spaces If this optional argument is set to a true value then spaces are discarded when they look non-significant: strings containing only spaces and at least one line feed are discarded. This argument is set to true by default. The exact algorithm to drop spaces is: strings including only spaces (perl \s) and at least one \n right before an open or close tag are dropped. =item discard_all_spaces If this argument is set to a true value, spaces are discarded more aggressively than with C<discard_spaces>: strings not including a \n are also dropped. This option is appropriate for data-oriented XML. =item keep_spaces If this optional argument is set to a true value then all spaces in the document are kept, and stored as C<PCDATA>. B<Warning>: adding this option can result in changes in the twig generated: space that was previously discarded might end up in a new text element. see the difference by calling the following code with 0 and 1 as arguments: perl -MXML::Twig -e'print XML::Twig->new( keep_spaces => shift)->parse( "<d> \n<e/></d>")->_dump' C<keep_spaces> and C<discard_spaces> cannot be both set. =item discard_spaces_in This argument sets C<keep_spaces> to true but will cause the twig builder to discard spaces in the elements listed. The syntax for using this argument is: XML::Twig->new( discard_spaces_in => [ 'elt1', 'elt2']); =item keep_spaces_in This argument sets C<discard_spaces> to true but will cause the twig builder to keep spaces in the elements listed. The syntax for using this argument is: XML::Twig->new( keep_spaces_in => [ 'elt1', 'elt2']); B<Warning>: adding this option can result in changes in the twig generated: space that was previously discarded might end up in a new text element. =item pretty_print Set the pretty print method, amongst 'C<none>' (default), 'C<nsgmls>', 'C<nice>', 'C<indented>', 'C<indented_c>', 'C<indented_a>', 'C<indented_close_tag>', 'C<cvs>', 'C<wrapped>', 'C<record>' and 'C<record_c>' pretty_print formats: =over 4 =item none The document is output as one ling string, with no line breaks except those found within text elements =item nsgmls Line breaks are inserted in safe places: that is within tags, between a tag and an attribute, between attributes and before the > at the end of a tag. This is quite ugly but better than C<none>, and it is very safe, the document will still be valid (conforming to its DTD). This is how the SGML parser C<sgmls> splits documents, hence the name. =item nice This option inserts line breaks before any tag that does not contain text (so element with textual content are not broken as the \n is the significant). B<WARNING>: this option leaves the document well-formed but might make it invalid (not conformant to its DTD). If you have elements declared as <!ELEMENT foo (#PCDATA|bar)> then a C<foo> element including a C<bar> one will be printed as <foo> <bar>bar is just pcdata</bar> </foo> This is invalid, as the parser will take the line break after the C<foo> tag as a sign that the element contains PCDATA, it will then die when it finds the C<bar> tag. This may or may not be important for you, but be aware of it! =item indented Same as C<nice> (and with the same warning) but indents elements according to their level =item indented_c Same as C<indented> but a little more compact: the closing tags are on the same line as the preceding text =item indented_close_tag Same as C<indented> except that the closing tag is also indented, to line up with the tags within the element =item idented_a This formats XML files in a line-oriented version control friendly way. The format is described in L<http://tinyurl.com/2kwscq> (that's an Oracle document with an insanely long URL). Note that to be totaly conformant to the "spec", the order of attributes should not be changed, so if they are not already in alphabetical order you will need to use the C<L<keep_atts_order>> option. =item cvs Same as C<L<idented_a>>. =item wrapped Same as C<indented_c> but lines are wrapped using L<Text::Wrap::wrap>. The default length for lines is the default for C<$Text::Wrap::columns>, and can be changed by changing that variable. =item record This is a record-oriented pretty print, that display data in records, one field per line (which looks a LOT like C<indented>) =item record_c Stands for record compact, one record per line =back =item empty_tags Set the empty tag display style ('C<normal>', 'C<html>' or 'C<expand>'). C<normal> outputs an empty tag 'C<< <tag/> >>', C<html> adds a space 'C<< <tag /> >>' for elements that can be empty in XHTML and C<expand> outputs 'C<< <tag></tag> >>' =item quote Set the quote character for attributes ('C<single>' or 'C<double>'). =item escape_gt By default XML::Twig does not escape the character > in its output, as it is not mandated by the XML spec. With this option on, > will be replaced by C<>> =item comments Set the way comments are processed: 'C<drop>' (default), 'C<keep>' or 'C<process>' Comments processing options: =over 4 =item drop drops the comments, they are not read, nor printed to the output =item keep comments are loaded and will appear on the output, they are not accessible within the twig and will not interfere with processing though B<Note>: comments in the middle of a text element such as <p>text <!-- comment --> more text --></p> are kept at their original position in the text. Using ˝"print" methods like C<print> or C<sprint> will return the comments in the text. Using C<text> or C<field> on the other hand will not. Any use of C<set_pcdata> on the C<#PCDATA> element (directly or through other methods like C<set_content>) will delete the comment(s). =item process comments are loaded in the twig and will be treated as regular elements (their C<tag> is C<#COMMENT>) this can interfere with processing if you expect C<< $elt->{first_child} >> to be an element but find a comment there. Validation will not protect you from this as comments can happen anywhere. You can use C<< $elt->first_child( 'tag') >> (which is a good habit anyway) to get where you want. Consider using C<process> if you are outputting SAX events from XML::Twig. =back =item pi Set the way processing instructions are processed: 'C<drop>', 'C<keep>' (default) or 'C<process>' Note that you can also set PI handlers in the C<twig_handlers> option: '?' => \&handler '?target' => \&handler 2 The handlers will be called with 2 parameters, the twig and the PI element if C<pi> is set to C<process>, and with 3, the twig, the target and the data if C<pi> is set to C<keep>. Of course they will not be called if C<pi> is set to C<drop>. If C<pi> is set to C<keep> the handler should return a string that will be used as-is as the PI text (it should look like "C< <?target data?> >" or '' if you want to remove the PI), Only one handler will be called, C<?target> or C<?> if no specific handler for that target is available. =item map_xmlns This option is passed a hashref that maps uri's to prefixes. The prefixes in the document will be replaced by the ones in the map. The mapped prefixes can (actually have to) be used to trigger handlers, navigate or query the document. Here is an example: my $t= XML::Twig->new( map_xmlns => {'http://www.w3.org/2000/svg' => "svg"}, twig_handlers => { 'svg:circle' => sub { $_->set_att( r => 20) } }, pretty_print => 'indented', ) ->parse( '<doc xmlns:gr="http://www.w3.org/2000/svg"> <gr:circle cx="10" cy="90" r="10"/> </doc>' ) ->print; This will output: <doc xmlns:svg="http://www.w3.org/2000/svg"> <svg:circle cx="10" cy="90" r="20"/> </doc> =item keep_original_prefix When used with C<L<map_xmlns>> this option will make C<XML::Twig> use the original namespace prefixes when outputting a document. The mapped prefix will still be used for triggering handlers and in navigation and query methods. my $t= XML::Twig->new( map_xmlns => {'http://www.w3.org/2000/svg' => "svg"}, twig_handlers => { 'svg:circle' => sub { $_->set_att( r => 20) } }, keep_original_prefix => 1, pretty_print => 'indented', ) ->parse( '<doc xmlns:gr="http://www.w3.org/2000/svg"> <gr:circle cx="10" cy="90" r="10"/> </doc>' ) ->print; This will output: <doc xmlns:gr="http://www.w3.org/2000/svg"> <gr:circle cx="10" cy="90" r="20"/> </doc> =item original_uri ($prefix) called within a handler, this will return the uri bound to the namespace prefix in the original document. =item index ($arrayref or $hashref) This option creates lists of specific elements during the parsing of the XML. It takes a reference to either a list of triggering expressions or to a hash name => expression, and for each one generates the list of elements that match the expression. The list can be accessed through the C<L<index>> method. example: # using an array ref my $t= XML::Twig->new( index => [ 'div', 'table' ]) ->parsefile( "foo.xml"); my $divs= $t->index( 'div'); my $first_div= $divs->[0]; my $last_table= $t->index( table => -1); # using a hashref to name the indexes my $t= XML::Twig->new( index => { email => 'a[@href=~/^ \s*mailto:/]'}) ->parsefile( "foo.xml"); my $last_emails= $t->index( email => -1); Note that the index is not maintained after the parsing. If elements are deleted, renamed or otherwise hurt during processing, the index is NOT updated. (changing the id element OTOH will update the index) =item att_accessors <list of attribute names> creates methods that give direct access to attribute: my $t= XML::Twig->new( att_accessors => [ 'href', 'src']) ->parsefile( $file); my $first_href= $t->first_elt( 'img')->src; # same as ->att( 'src') $t->first_elt( 'img')->src( 'new_logo.png') # changes the attribute value =item elt_accessors creates methods that give direct access to the first child element (in scalar context) or the list of elements (in list context): the list of accessors to create can be given 1 2 different ways: in an array, or in a hash alias => expression my $t= XML::Twig->new( elt_accessors => [ 'head']) ->parsefile( $file); my $title_text= $t->root->head->field( 'title'); # same as $title_text= $t->root->first_child( 'head')->field( 'title'); my $t= XML::Twig->new( elt_accessors => { warnings => 'p[@class="warning"]', d2 => 'div[2]'}, ) ->parsefile( $file); my $body= $t->first_elt( 'body'); my @warnings= $body->warnings; # same as $body->children( 'p[@class="warning"]'); my $s2= $body->d2; # same as $body->first_child( 'div[2]') =item field_accessors creates methods that give direct access to the first child element text: my $t= XML::Twig->new( field_accessors => [ 'h1']) ->parsefile( $file); my $div_title_text= $t->first_elt( 'div')->title; # same as $title_text= $t->first_elt( 'div')->field( 'title'); =item use_tidy set this option to use HTML::Tidy instead of HTML::TreeBuilder to convert HTML to XML. HTML, especially real (real "crap") HTML found in the wild, so depending on the data, one module or the other does a better job at the conversion. Also, HTML::Tidy can be a bit difficult to install, so XML::Twig offers both option. TIMTOWTDI =item output_html_doctype when using HTML::TreeBuilder to convert HTML, this option causes the DOCTYPE declaration to be output, which may be important for some legacy browsers. Without that option the DOCTYPE definition is NOT output. Also if the definition is completely wrong (ie not easily parsable), it is not output either. =back B<Note>: I _HATE_ the Java-like name of arguments used by most XML modules. So in pure TIMTOWTDI fashion all arguments can be written either as C<UglyJavaLikeName> or as C<readable_perl_name>: C<twig_print_outside_roots> or C<TwigPrintOutsideRoots> (or even C<twigPrintOutsideRoots> {shudder}). XML::Twig normalizes them before processing them. =item parse ( $source) The C<$source> parameter should either be a string containing the whole XML document, or it should be an open C<IO::Handle> (aka a filehandle). A die call is thrown if a parse error occurs. Otherwise it will return the twig built by the parse. Use C<safe_parse> if you want the parsing to return even when an error occurs. If this method is called as a class method (C<< XML::Twig->parse( $some_xml_or_html) >>) then an XML::Twig object is created, using the parameters except the last one (eg C<< XML::Twig->parse( pretty_print => 'indented', $some_xml_or_html) >>) and C<L<xparse>> is called on it. Note that when parsing a filehandle, the handle should NOT be open with an encoding (ie open with C<open( my $in, '<', $filename)>. The file will be parsed by C<expat>, so specifying the encoding actually causes problems for the parser (as in: it can crash it, see https://rt.cpan.org/Ticket/Display.html?id=78877). For parsing a file it is actually recommended to use C<parsefile> on the file name, instead of <parse> on the open file. =item parsestring This is just an alias for C<parse> for backwards compatibility. =item parsefile (FILE [, OPT => OPT_VALUE [...]]) Open C<FILE> for reading, then call C<parse> with the open handle. The file is closed no matter how C<parse> returns. A C<die> call is thrown if a parse error occurs. Otherwise it will return the twig built by the parse. Use C<safe_parsefile> if you want the parsing to return even when an error occurs. =item parsefile_inplace ( $file, $optional_extension) Parse and update a file "in place". It does this by creating a temp file, selecting it as the default for print() statements (and methods), then parsing the input file. If the parsing is successful, then the temp file is moved to replace the input file. If an extension is given then the original file is backed-up (the rules for the extension are the same as the rule for the -i option in perl). =item parsefile_html_inplace ( $file, $optional_extension) Same as parsefile_inplace, except that it parses HTML instead of XML =item parseurl ($url $optional_user_agent) Gets the data from C<$url> and parse it. The data is piped to the parser in chunks the size of the XML::Parser::Expat buffer, so memory consumption and hopefully speed are optimal. For most (read "small") XML it is probably as efficient (and easier to debug) to just C<get> the XML file and then parse it as a string. use XML::Twig; use LWP::Simple; my $twig= XML::Twig->new(); $twig->parse( LWP::Simple::get( $URL )); or use XML::Twig; my $twig= XML::Twig->nparse( $URL); If the C<$optional_user_agent> argument is used then it is used, otherwise a new one is created. =item safe_parse ( SOURCE [, OPT => OPT_VALUE [...]]) This method is similar to C<parse> except that it wraps the parsing in an C<eval> block. It returns the twig on success and 0 on failure (the twig object also contains the parsed twig). C<$@> contains the error message on failure. Note that the parsing still stops as soon as an error is detected, there is no way to keep going after an error. =item safe_parsefile (FILE [, OPT => OPT_VALUE [...]]) This method is similar to C<parsefile> except that it wraps the parsing in an C<eval> block. It returns the twig on success and 0 on failure (the twig object also contains the parsed twig) . C<$@> contains the error message on failure Note that the parsing still stops as soon as an error is detected, there is no way to keep going after an error. =item safe_parseurl ($url $optional_user_agent) Same as C<parseurl> except that it wraps the parsing in an C<eval> block. It returns the twig on success and 0 on failure (the twig object also contains the parsed twig) . C<$@> contains the error message on failure =item parse_html ($string_or_fh) parse an HTML string or file handle (by converting it to XML using HTML::TreeBuilder, which needs to be available). This works nicely, but some information gets lost in the process: newlines are removed, and (at least on the version I use), comments get an extra CDATA section inside ( <!-- foo --> becomes <!-- <![CDATA[ foo ]]> --> =item parsefile_html ($file) parse an HTML file (by converting it to XML using HTML::TreeBuilder, which needs to be available, or HTML::Tidy if the C<use_tidy> option was used). The file is loaded completely in memory and converted to XML before being parsed. this method is to be used with caution though, as it doesn't know about the file encoding, it is usually better to use C<L<parse_html>>, which gives you a chance to open the file with the proper encoding layer. =item parseurl_html ($url $optional_user_agent) parse an URL as html the same way C<L<parse_html>> does =item safe_parseurl_html ($url $optional_user_agent) Same as C<L<parseurl_html>>> except that it wraps the parsing in an C<eval> block. It returns the twig on success and 0 on failure (the twig object also contains the parsed twig) . C<$@> contains the error message on failure =item safe_parsefile_html ($file $optional_user_agent) Same as C<L<parsefile_html>>> except that it wraps the parsing in an C<eval> block. It returns the twig on success and 0 on failure (the twig object also contains the parsed twig) . C<$@> contains the error message on failure =item safe_parse_html ($string_or_fh) Same as C<L<parse_html>> except that it wraps the parsing in an C<eval> block. It returns the twig on success and 0 on failure (the twig object also contains the parsed twig) . C<$@> contains the error message on failure =item xparse ($thing_to_parse) parse the C<$thing_to_parse>, whether it is a filehandle, a string, an HTML file, an HTML URL, an URL or a file. Note that this is mostly a convenience method for one-off scripts. For example files that end in '.htm' or '.html' are parsed first as XML, and if this fails as HTML. This is certainly not the most efficient way to do this in general. =item nparse ($optional_twig_options, $thing_to_parse) create a twig with the C<$optional_options>, and parse the C<$thing_to_parse>, whether it is a filehandle, a string, an HTML file, an HTML URL, an URL or a file. Examples: XML::Twig->nparse( "file.xml"); XML::Twig->nparse( error_context => 1, "file://file.xml"); =item nparse_pp ($optional_twig_options, $thing_to_parse) same as C<L<nparse>> but also sets the C<pretty_print> option to C<indented>. =item nparse_e ($optional_twig_options, $thing_to_parse) same as C<L<nparse>> but also sets the C<error_context> option to 1. =item nparse_ppe ($optional_twig_options, $thing_to_parse) same as C<L<nparse>> but also sets the C<pretty_print> option to C<indented> and the C<error_context> option to 1. =item parser This method returns the C<expat> object (actually the XML::Parser::Expat object) used during parsing. It is useful for example to call XML::Parser::Expat methods on it. To get the line of a tag for example use C<< $t->parser->current_line >>. =item setTwigHandlers ($handlers) Set the twig_handlers. C<$handlers> is a reference to a hash similar to the one in the C<twig_handlers> option of new. All previous handlers are unset. The method returns the reference to the previous handlers. =item setTwigHandler ($exp $handler) Set a single twig_handler for elements matching C<$exp>. C<$handler> is a reference to a subroutine. If the handler was previously set then the reference to the previous handler is returned. =item setStartTagHandlers ($handlers) Set the start_tag handlers. C<$handlers> is a reference to a hash similar to the one in the C<start_tag_handlers> option of new. All previous handlers are unset. The method returns the reference to the previous handlers. =item setStartTagHandler ($exp $handler) Set a single start_tag handlers for elements matching C<$exp>. C<$handler> is a reference to a subroutine. If the handler was previously set then the reference to the previous handler is returned. =item setEndTagHandlers ($handlers) Set the end_tag handlers. C<$handlers> is a reference to a hash similar to the one in the C<end_tag_handlers> option of new. All previous handlers are unset. The method returns the reference to the previous handlers. =item setEndTagHandler ($exp $handler) Set a single end_tag handlers for elements matching C<$exp>. C<$handler> is a reference to a subroutine. If the handler was previously set then the reference to the previous handler is returned. =item setTwigRoots ($handlers) Same as using the C<L<twig_roots>> option when creating the twig =item setCharHandler ($exp $handler) Set a C<char_handler> =item setIgnoreEltsHandler ($exp) Set a C<ignore_elt> handler (elements that match C<$exp> will be ignored =item setIgnoreEltsHandlers ($exp) Set all C<ignore_elt> handlers (previous handlers are replaced) =item dtd Return the dtd (an L<XML::Twig::DTD> object) of a twig =item xmldecl Return the XML declaration for the document, or a default one if it doesn't have one =item doctype Return the doctype for the document =item doctype_name returns the doctype of the document from the doctype declaration =item system_id returns the system value of the DTD of the document from the doctype declaration =item public_id returns the public doctype of the document from the doctype declaration =item internal_subset returns the internal subset of the DTD =item dtd_text Return the DTD text =item dtd_print Print the DTD =item model ($tag) Return the model (in the DTD) for the element C<$tag> =item root Return the root element of a twig =item set_root ($elt) Set the root of a twig =item first_elt ($optional_condition) Return the first element matching C<$optional_condition> of a twig, if no condition is given then the root is returned =item last_elt ($optional_condition) Return the last element matching C<$optional_condition> of a twig, if no condition is given then the last element of the twig is returned =item elt_id ($id) Return the element whose C<id> attribute is $id =item getEltById Same as C<L<elt_id>> =item index ($index_name, $optional_index) If the C<$optional_index> argument is present, return the corresponding element in the index (created using the C<index> option for C<XML::Twig->new>) If the argument is not present, return an arrayref to the index =item normalize merge together all consecutive pcdata elements in the document (if for example you have turned some elements into pcdata using C<L<erase>>, this will give you a "clean" document in which there all text elements are as long as possible). =item encoding This method returns the encoding of the XML document, as defined by the C<encoding> attribute in the XML declaration (ie it is C<undef> if the attribute is not defined) =item set_encoding This method sets the value of the C<encoding> attribute in the XML declaration. Note that if the document did not have a declaration it is generated (with an XML version of 1.0) =item xml_version This method returns the XML version, as defined by the C<version> attribute in the XML declaration (ie it is C<undef> if the attribute is not defined) =item set_xml_version This method sets the value of the C<version> attribute in the XML declaration. If the declaration did not exist it is created. =item standalone This method returns the value of the C<standalone> declaration for the document =item set_standalone This method sets the value of the C<standalone> attribute in the XML declaration. Note that if the document did not have a declaration it is generated (with an XML version of 1.0) =item set_output_encoding Set the C<encoding> "attribute" in the XML declaration =item set_doctype ($name, $system, $public, $internal) Set the doctype of the element. If an argument is C<undef> (or not present) then its former value is retained, if a false ('' or 0) value is passed then the former value is deleted; =item entity_list Return the entity list of a twig =item entity_names Return the list of all defined entities =item entity ($entity_name) Return the entity =item change_gi ($old_gi, $new_gi) Performs a (very fast) global change. All elements C<$old_gi> are now C<$new_gi>. This is a bit dangerous though and should be avoided if < possible, as the new tag might be ignored in subsequent processing. See C<L<BUGS> > =item flush ($optional_filehandle, %options) Flushes a twig up to (and including) the current element, then deletes all unnecessary elements from the tree that's kept in memory. C<flush> keeps track of which elements need to be open/closed, so if you flush from handlers you don't have to worry about anything. Just keep flushing the twig every time you're done with a sub-tree and it will come out well-formed. After the whole parsing don't forget toC<flush> one more time to print the end of the document. The doctype and entity declarations are also printed. flush take an optional filehandle as an argument. If you use C<flush> at any point during parsing, the document will be flushed one last time at the end of the parsing, to the proper filehandle. options: use the C<update_DTD> option if you have updated the (internal) DTD and/or the entity list and you want the updated DTD to be output The C<pretty_print> option sets the pretty printing of the document. Example: $t->flush( Update_DTD => 1); $t->flush( $filehandle, pretty_print => 'indented'); $t->flush( \*FILE); =item flush_up_to ($elt, $optional_filehandle, %options) Flushes up to the C<$elt> element. This allows you to keep part of the tree in memory when you C<flush>. options: see flush. =item purge Does the same as a C<flush> except it does not print the twig. It just deletes all elements that have been completely parsed so far. =item purge_up_to ($elt) Purges up to the C<$elt> element. This allows you to keep part of the tree in memory when you C<purge>. =item print ($optional_filehandle, %options) Prints the whole document associated with the twig. To be used only AFTER the parse. options: see C<flush>. =item print_to_file ($filename, %options) Prints the whole document associated with the twig to file C<$filename>. To be used only AFTER the parse. options: see C<flush>. =item safe_print_to_file ($filename, %options) Prints the whole document associated with the twig to file C<$filename>. This variant, which probably only works on *nix prints to a temp file, then move the temp file to overwrite the original file. This is a bit safer when 2 processes an potentiallywrite the same file: only the last one will succeed, but the file won't be corruted. I often use this for cron jobs, so testing the code doesn't interfere with the cron job running at the same time. options: see C<flush>. =item sprint Return the text of the whole document associated with the twig. To be used only AFTER the parse. options: see C<flush>. =item trim Trim the document: gets rid of initial and trailing spaces, and replaces multiple spaces by a single one. =item toSAX1 ($handler) Send SAX events for the twig to the SAX1 handler C<$handler> =item toSAX2 ($handler) Send SAX events for the twig to the SAX2 handler C<$handler> =item flush_toSAX1 ($handler) Same as flush, except that SAX events are sent to the SAX1 handler C<$handler> instead of the twig being printed =item flush_toSAX2 ($handler) Same as flush, except that SAX events are sent to the SAX2 handler C<$handler> instead of the twig being printed =item ignore This method should be called during parsing, usually in C<start_tag_handlers>. It causes the element to be skipped during the parsing: the twig is not built for this element, it will not be accessible during parsing or after it. The element will not take up any memory and parsing will be faster. Note that this method can also be called on an element. If the element is a parent of the current element then this element will be ignored (the twig will not be built any more for it and what has already been built will be deleted). =item set_pretty_print ($style) Set the pretty print method, amongst 'C<none>' (default), 'C<nsgmls>', 'C<nice>', 'C<indented>', C<indented_c>, 'C<wrapped>', 'C<record>' and 'C<record_c>' B<WARNING:> the pretty print style is a B<GLOBAL> variable, so once set it's applied to B<ALL> C<print>'s (and C<sprint>'s). Same goes if you use XML::Twig with C<mod_perl> . This should not be a problem as the XML that's generated is valid anyway, and XML processors (as well as HTML processors, including browsers) should not care. Let me know if this is a big problem, but at the moment the performance/cleanliness trade-off clearly favors the global approach. =item set_empty_tag_style ($style) Set the empty tag display style ('C<normal>', 'C<html>' or 'C<expand>'). As with C<L<set_pretty_print>> this sets a global flag. C<normal> outputs an empty tag 'C<< <tag/> >>', C<html> adds a space 'C<< <tag /> >>' for elements that can be empty in XHTML and C<expand> outputs 'C<< <tag></tag> >>' =item set_remove_cdata ($flag) set (or unset) the flag that forces the twig to output CDATA sections as regular (escaped) PCDATA =item print_prolog ($optional_filehandle, %options) Prints the prolog (XML declaration + DTD + entity declarations) of a document. options: see C<L<flush>>. =item prolog ($optional_filehandle, %options) Return the prolog (XML declaration + DTD + entity declarations) of a document. options: see C<L<flush>>. =item finish Call Expat C<finish> method. Unsets all handlers (including internal ones that set context), but expat continues parsing to the end of the document or until it finds an error. It should finish up a lot faster than with the handlers set. =item finish_print Stops twig processing, flush the twig and proceed to finish printing the document as fast as possible. Use this method when modifying a document and the modification is done. =item finish_now Stops twig processing, does not finish parsing the document (which could actually be not well-formed after the point where C<finish_now> is called). Execution resumes after the C<Lparse>> or C<L<parsefile>> call. The content of the twig is what has been parsed so far (all open elements at the time C<finish_now> is called are considered closed). =item set_expand_external_entities Same as using the C<L<expand_external_ents>> option when creating the twig =item set_input_filter Same as using the C<L<input_filter>> option when creating the twig =item set_keep_atts_order Same as using the C<L<keep_atts_order>> option when creating the twig =item set_keep_encoding Same as using the C<L<keep_encoding>> option when creating the twig =item escape_gt usually XML::Twig does not escape > in its output. Using this option makes it replace > by > =item do_not_escape_gt reverts XML::Twig behavior to its default of not escaping > in its output. =item set_output_filter Same as using the C<L<output_filter>> option when creating the twig =item set_output_text_filter Same as using the C<L<output_text_filter>> option when creating the twig =item add_stylesheet ($type, @options) Adds an external stylesheet to an XML document. Supported types and options: =over 4 =item xsl option: the url of the stylesheet Example: $t->add_stylesheet( xsl => "xsl_style.xsl"); will generate the following PI at the beginning of the document: <?xml-stylesheet type="text/xsl" href="xsl_style.xsl"?> =item css option: the url of the stylesheet =item active_twig a class method that returns the last processed twig, so you don't necessarily need the object to call methods on it. =back =item Methods inherited from XML::Parser::Expat A twig inherits all the relevant methods from XML::Parser::Expat. These methods can only be used during the parsing phase (they will generate a fatal error otherwise). Inherited methods are: =over 4 =item depth Returns the size of the context list. =item in_element Returns true if NAME is equal to the name of the innermost cur‐ rently opened element. If namespace processing is being used and you want to check against a name that may be in a namespace, then use the generate_ns_name method to create the NAME argument. =item within_element Returns the number of times the given name appears in the context list. If namespace processing is being used and you want to check against a name that may be in a namespace, then use the gener‐ ate_ns_name method to create the NAME argument. =item context Returns a list of element names that represent open elements, with the last one being the innermost. Inside start and end tag han‐ dlers, this will be the tag of the parent element. =item current_line Returns the line number of the current position of the parse. =item current_column Returns the column number of the current position of the parse. =item current_byte Returns the current position of the parse. =item position_in_context Returns a string that shows the current parse position. LINES should be an integer >= 0 that represents the number of lines on either side of the current parse line to place into the returned string. =item base ([NEWBASE]) Returns the current value of the base for resolving relative URIs. If NEWBASE is supplied, changes the base to that value. =item current_element Returns the name of the innermost currently opened element. Inside start or end handlers, returns the parent of the element associated with those tags. =item element_index Returns an integer that is the depth-first visit order of the cur‐ rent element. This will be zero outside of the root element. For example, this will return 1 when called from the start handler for the root element start tag. =item recognized_string Returns the string from the document that was recognized in order to call the current handler. For instance, when called from a start handler, it will give us the start-tag string. The string is encoded in UTF-8. This method doesn't return a meaningful string inside declaration handlers. =item original_string Returns the verbatim string from the document that was recognized in order to call the current handler. The string is in the original document encoding. This method doesn't return a meaningful string inside declaration handlers. =item xpcroak Concatenate onto the given message the current line number within the XML document plus the message implied by ErrorContext. Then croak with the formed message. =item xpcarp Concatenate onto the given message the current line number within the XML document plus the message implied by ErrorContext. Then carp with the formed message. =item xml_escape(TEXT [, CHAR [, CHAR ...]]) Returns TEXT with markup characters turned into character entities. Any additional characters provided as arguments are also turned into character references where found in TEXT. (this method is broken on some versions of expat/XML::Parser) =back =item path ( $optional_tag) Return the element context in a form similar to XPath's short form: 'C</root/tag1/../tag>' =item get_xpath ( $optional_array_ref, $xpath, $optional_offset) Performs a C<get_xpath> on the document root (see <Elt|"Elt">) If the C<$optional_array_ref> argument is used the array must contain elements. The C<$xpath> expression is applied to each element in turn and the result is union of all results. This way a first query can be refined in further steps. =item find_nodes ( $optional_array_ref, $xpath, $optional_offset) same as C<get_xpath> =item findnodes ( $optional_array_ref, $xpath, $optional_offset) same as C<get_xpath> (similar to the XML::LibXML method) =item findvalue ( $optional_array_ref, $xpath, $optional_offset) Return the C<join> of all texts of the results of applying C<L<get_xpath>> to the node (similar to the XML::LibXML method) =item findvalues ( $optional_array_ref, $xpath, $optional_offset) Return an array of all texts of the results of applying C<L<get_xpath>> to the node =item subs_text ($regexp, $replace) subs_text does text substitution on the whole document, similar to perl's C< s///> operator. =item dispose Useful only if you don't have C<Scalar::Util> or C<WeakRef> installed. Reclaims properly the memory used by an XML::Twig object. As the object has circular references it never goes out of scope, so if you want to parse lots of XML documents then the memory leak becomes a problem. Use C<< $twig->dispose >> to clear this problem. =item att_accessors (list_of_attribute_names) A convenience method that creates l-valued accessors for attributes. So C<< $twig->create_accessors( 'foo') >> will create a C<foo> method that can be called on elements: $elt->foo; # equivalent to $elt->{'att'}->{'foo'}; $elt->foo( 'bar'); # equivalent to $elt->set_att( foo => 'bar'); The methods are l-valued only under those perl's that support this feature (5.6 and above) =item create_accessors (list_of_attribute_names) Same as att_accessors =item elt_accessors (list_of_attribute_names) A convenience method that creates accessors for elements. So C<< $twig->create_accessors( 'foo') >> will create a C<foo> method that can be called on elements: $elt->foo; # equivalent to $elt->first_child( 'foo'); =item field_accessors (list_of_attribute_names) A convenience method that creates accessors for element values (C<field>). So C<< $twig->create_accessors( 'foo') >> will create a C<foo> method that can be called on elements: $elt->foo; # equivalent to $elt->field( 'foo'); =item set_do_not_escape_amp_in_atts An evil method, that I only document because Test::Pod::Coverage complaints otherwise, but really, you don't want to know about it. =back =head2 XML::Twig::Elt =over 4 =item new ($optional_tag, $optional_atts, @optional_content) The C<tag> is optional (but then you can't have a content ), the C<$optional_atts> argument is a reference to a hash of attributes, the content can be just a string or a list of strings and element. A content of 'C<#EMPTY>' creates an empty element; Examples: my $elt= XML::Twig::Elt->new(); my $elt= XML::Twig::Elt->new( para => { align => 'center' }); my $elt= XML::Twig::Elt->new( para => { align => 'center' }, 'foo'); my $elt= XML::Twig::Elt->new( br => '#EMPTY'); my $elt= XML::Twig::Elt->new( 'para'); my $elt= XML::Twig::Elt->new( para => 'this is a para'); my $elt= XML::Twig::Elt->new( para => $elt3, 'another para'); The strings are not parsed, the element is not attached to any twig. B<WARNING>: if you rely on ID's then you will have to set the id yourself. At this point the element does not belong to a twig yet, so the ID attribute is not known so it won't be stored in the ID list. Note that C<#COMMENT>, C<#PCDATA> or C<#CDATA> are valid tag names, that will create text elements. To create an element C<foo> containing a CDATA section: my $foo= XML::Twig::Elt->new( '#CDATA' => "content of the CDATA section") ->wrap_in( 'foo'); An attribute of '#CDATA', will create the content of the element as CDATA: my $elt= XML::Twig::Elt->new( 'p' => { '#CDATA' => 1}, 'foo < bar'); creates an element <p><![CDATA[foo < bar]]></> =item parse ($string, %args) Creates an element from an XML string. The string is actually parsed as a new twig, then the root of that twig is returned. The arguments in C<%args> are passed to the twig. As always if the parse fails the parser will die, so use an eval if you want to trap syntax errors. As obviously the element does not exist beforehand this method has to be called on the class: my $elt= parse XML::Twig::Elt( "<a> string to parse, with <sub/> <elements>, actually tons of </elements> h</a>"); =item set_inner_xml ($string) Sets the content of the element to be the tree created from the string =item set_inner_html ($string) Sets the content of the element, after parsing the string with an HTML parser (HTML::Parser) =item set_outer_xml ($string) Replaces the element with the tree created from the string =item print ($optional_filehandle, $optional_pretty_print_style) Prints an entire element, including the tags, optionally to a C<$optional_filehandle>, optionally with a C<$pretty_print_style>. The print outputs XML data so base entities are escaped. =item print_to_file ($filename, %options) Prints the element to file C<$filename>. options: see C<flush>. =item sprint ($elt, $optional_no_enclosing_tag) Return the xml string for an entire element, including the tags. If the optional second argument is true then only the string inside the element is returned (the start and end tag for $elt are not). The text is XML-escaped: base entities (& and < in text, & < and " in attribute values) are turned into entities. =item gi Return the gi of the element (the gi is the C<generic identifier> the tag name in SGML parlance). C<tag> and C<name> are synonyms of C<gi>. =item tag Same as C<L<gi>> =item name Same as C<L<tag>> =item set_gi ($tag) Set the gi (tag) of an element =item set_tag ($tag) Set the tag (=C<L<tag>>) of an element =item set_name ($name) Set the name (=C<L<tag>>) of an element =item root Return the root of the twig in which the element is contained. =item twig Return the twig containing the element. =item parent ($optional_condition) Return the parent of the element, or the first ancestor matching the C<$optional_condition> =item first_child ($optional_condition) Return the first child of the element, or the first child matching the C<$optional_condition> =item has_child ($optional_condition) Return the first child of the element, or the first child matching the C<$optional_condition> (same as L<first_child>) =item has_children ($optional_condition) Return the first child of the element, or the first child matching the C<$optional_condition> (same as L<first_child>) =item first_child_text ($optional_condition) Return the text of the first child of the element, or the first child matching the C<$optional_condition> If there is no first_child then returns ''. This avoids getting the child, checking for its existence then getting the text for trivial cases. Similar methods are available for the other navigation methods: =over 4 =item last_child_text =item prev_sibling_text =item next_sibling_text =item prev_elt_text =item next_elt_text =item child_text =item parent_text =back All this methods also exist in "trimmed" variant: =over 4 =item first_child_trimmed_text =item last_child_trimmed_text =item prev_sibling_trimmed_text =item next_sibling_trimmed_text =item prev_elt_trimmed_text =item next_elt_trimmed_text =item child_trimmed_text =item parent_trimmed_text =back =item field ($condition) Same method as C<first_child_text> with a different name =item fields ($condition_list) Return the list of field (text of first child matching the conditions), missing fields are returned as the empty string. Same method as C<first_child_text> with a different name =item trimmed_field ($optional_condition) Same method as C<first_child_trimmed_text> with a different name =item set_field ($condition, $optional_atts, @list_of_elt_and_strings) Set the content of the first child of the element that matches C<$condition>, the rest of the arguments is the same as for C<L<set_content>> If no child matches C<$condition> _and_ if C<$condition> is a valid XML element name, then a new element by that name is created and inserted as the last child. =item first_child_matches ($optional_condition) Return the element if the first child of the element (if it exists) passes the C<$optional_condition> C<undef> otherwise if( $elt->first_child_matches( 'title')) ... is equivalent to if( $elt->{first_child} && $elt->{first_child}->passes( 'title')) C<first_child_is> is an other name for this method Similar methods are available for the other navigation methods: =over 4 =item last_child_matches =item prev_sibling_matches =item next_sibling_matches =item prev_elt_matches =item next_elt_matches =item child_matches =item parent_matches =back =item is_first_child ($optional_condition) returns true (the element) if the element is the first child of its parent (optionally that satisfies the C<$optional_condition>) =item is_last_child ($optional_condition) returns true (the element) if the element is the last child of its parent (optionally that satisfies the C<$optional_condition>) =item prev_sibling ($optional_condition) Return the previous sibling of the element, or the previous sibling matching C<$optional_condition> =item next_sibling ($optional_condition) Return the next sibling of the element, or the first one matching C<$optional_condition>. =item next_elt ($optional_elt, $optional_condition) Return the next elt (optionally matching C<$optional_condition>) of the element. This is defined as the next element which opens after the current element opens. Which usually means the first child of the element. Counter-intuitive as it might look this allows you to loop through the whole document by starting from the root. The C<$optional_elt> is the root of a subtree. When the C<next_elt> is out of the subtree then the method returns undef. You can then walk a sub-tree with: my $elt= $subtree_root; while( $elt= $elt->next_elt( $subtree_root)) { # insert processing code here } =item prev_elt ($optional_condition) Return the previous elt (optionally matching C<$optional_condition>) of the element. This is the first element which opens before the current one. It is usually either the last descendant of the previous sibling or simply the parent =item next_n_elt ($offset, $optional_condition) Return the C<$offset>-th element that matches the C<$optional_condition> =item following_elt Return the following element (as per the XPath following axis) =item preceding_elt Return the preceding element (as per the XPath preceding axis) =item following_elts Return the list of following elements (as per the XPath following axis) =item preceding_elts Return the list of preceding elements (as per the XPath preceding axis) =item children ($optional_condition) Return the list of children (optionally which matches C<$optional_condition>) of the element. The list is in document order. =item children_count ($optional_condition) Return the number of children of the element (optionally which matches C<$optional_condition>) =item children_text ($optional_condition) In array context, returns an array containing the text of children of the element (optionally which matches C<$optional_condition>) In scalar context, returns the concatenation of the text of children of the element =item children_trimmed_text ($optional_condition) In array context, returns an array containing the trimmed text of children of the element (optionally which matches C<$optional_condition>) In scalar context, returns the concatenation of the trimmed text of children of the element =item children_copy ($optional_condition) Return a list of elements that are copies of the children of the element, optionally which matches C<$optional_condition> =item descendants ($optional_condition) Return the list of all descendants (optionally which matches C<$optional_condition>) of the element. This is the equivalent of the C<getElementsByTagName> of the DOM (by the way, if you are really a DOM addict, you can use C<getElementsByTagName> instead) =item getElementsByTagName ($optional_condition) Same as C<L<descendants>> =item find_by_tag_name ($optional_condition) Same as C<L<descendants>> =item descendants_or_self ($optional_condition) Same as C<L<descendants>> except that the element itself is included in the list if it matches the C<$optional_condition> =item first_descendant ($optional_condition) Return the first descendant of the element that matches the condition =item last_descendant ($optional_condition) Return the last descendant of the element that matches the condition =item ancestors ($optional_condition) Return the list of ancestors (optionally matching C<$optional_condition>) of the element. The list is ordered from the innermost ancestor to the outermost one NOTE: the element itself is not part of the list, in order to include it you will have to use ancestors_or_self =item ancestors_or_self ($optional_condition) Return the list of ancestors (optionally matching C<$optional_condition>) of the element, including the element (if it matches the condition>). The list is ordered from the innermost ancestor to the outermost one =item passes ($condition) Return the element if it passes the C<$condition> =item att ($att) Return the value of attribute C<$att> or C<undef> =item latt ($att) Return the value of attribute C<$att> or C<undef> this method is an lvalue, so you can do C<< $elt->latt( 'foo')= 'bar' >> or C<< $elt->latt( 'foo')++; >> =item set_att ($att, $att_value) Set the attribute of the element to the given value You can actually set several attributes this way: $elt->set_att( att1 => "val1", att2 => "val2"); =item del_att ($att) Delete the attribute for the element You can actually delete several attributes at once: $elt->del_att( 'att1', 'att2', 'att3'); =item att_exists ($att) Returns true if the attribute C<$att> exists for the element, false otherwise =item cut Cut the element from the tree. The element still exists, it can be copied or pasted somewhere else, it is just not attached to the tree anymore. Note that the "old" links to the parent, previous and next siblings can still be accessed using the former_* methods =item former_next_sibling Returns the former next sibling of a cut node (or undef if the node has not been cut) This makes it easier to write loops where you cut elements: my $child= $parent->first_child( 'achild'); while( $child->{'att'}->{'cut'}) { $child->cut; $child= ($child->{former} && $child->{former}->{next_sibling}); } =item former_prev_sibling Returns the former previous sibling of a cut node (or undef if the node has not been cut) =item former_parent Returns the former parent of a cut node (or undef if the node has not been cut) =item cut_children ($optional_condition) Cut all the children of the element (or all of those which satisfy the C<$optional_condition>). Return the list of children =item cut_descendants ($optional_condition) Cut all the descendants of the element (or all of those which satisfy the C<$optional_condition>). Return the list of descendants =item copy ($elt) Return a copy of the element. The copy is a "deep" copy: all sub-elements of the element are duplicated. =item paste ($optional_position, $ref) Paste a (previously C<cut> or newly generated) element. Die if the element already belongs to a tree. Note that the calling element is pasted: $child->paste( first_child => $existing_parent); $new_sibling->paste( after => $this_sibling_is_already_in_the_tree); or my $new_elt= XML::Twig::Elt->new( tag => $content); $new_elt->paste( $position => $existing_elt); Example: my $t= XML::Twig->new->parse( 'doc.xml') my $toc= $t->root->new( 'toc'); $toc->paste( $t->root); # $toc is pasted as first child of the root foreach my $title ($t->findnodes( '/doc/section/title')) { my $title_toc= $title->copy; # paste $title_toc as the last child of toc $title_toc->paste( last_child => $toc) } Position options: =over 4 =item first_child (default) The element is pasted as the first child of C<$ref> =item last_child The element is pasted as the last child of C<$ref> =item before The element is pasted before C<$ref>, as its previous sibling. =item after The element is pasted after C<$ref>, as its next sibling. =item within In this case an extra argument, C<$offset>, should be supplied. The element will be pasted in the reference element (or in its first text child) at the given offset. To achieve this the reference element will be split at the offset. =back Note that you can call directly the underlying method: =over 4 =item paste_before =item paste_after =item paste_first_child =item paste_last_child =item paste_within =back =item move ($optional_position, $ref) Move an element in the tree. This is just a C<cut> then a C<paste>. The syntax is the same as C<paste>. =item replace ($ref) Replaces an element in the tree. Sometimes it is just not possible toC<cut> an element then C<paste> another in its place, so C<replace> comes in handy. The calling element replaces C<$ref>. =item replace_with (@elts) Replaces the calling element with one or more elements =item delete Cut the element and frees the memory. =item prefix ($text, $optional_option) Add a prefix to an element. If the element is a C<PCDATA> element the text is added to the pcdata, if the elements first child is a C<PCDATA> then the text is added to it's pcdata, otherwise a new C<PCDATA> element is created and pasted as the first child of the element. If the option is C<asis> then the prefix is added asis: it is created in a separate C<PCDATA> element with an C<asis> property. You can then write: $elt1->prefix( '<b>', 'asis'); to create a C<< <b> >> in the output of C<print>. =item suffix ($text, $optional_option) Add a suffix to an element. If the element is a C<PCDATA> element the text is added to the pcdata, if the elements last child is a C<PCDATA> then the text is added to it's pcdata, otherwise a new PCDATA element is created and pasted as the last child of the element. If the option is C<asis> then the suffix is added asis: it is created in a separate C<PCDATA> element with an C<asis> property. You can then write: $elt2->suffix( '</b>', 'asis'); =item trim Trim the element in-place: spaces at the beginning and at the end of the element are discarded and multiple spaces within the element (or its descendants) are replaced by a single space. Note that in some cases you can still end up with multiple spaces, if they are split between several elements: <doc> text <b> hah! </b> yep</doc> gets trimmed to <doc>text <b> hah! </b> yep</doc> This is somewhere in between a bug and a feature. =item normalize merge together all consecutive pcdata elements in the element (if for example you have turned some elements into pcdata using C<L<erase>>, this will give you a "clean" element in which there all text fragments are as long as possible). =item simplify (%options) Return a data structure suspiciously similar to XML::Simple's. Options are identical to XMLin options, see XML::Simple doc for more details (or use DATA::dumper or YAML to dump the data structure) B<Note>: there is no magic here, if you write C<< $twig->parsefile( $file )->simplify(); >> then it will load the entire document in memory. I am afraid you will have to put some work into it to get just the bits you want and discard the rest. Look at the synopsis or the XML::Twig 101 section at the top of the docs for more information. =over 4 =item content_key =item forcearray =item keyattr =item noattr =item normalize_space aka normalise_space =item variables (%var_hash) %var_hash is a hash { name => value } This option allows variables in the XML to be expanded when the file is read. (there is no facility for putting the variable names back if you regenerate XML using XMLout). A 'variable' is any text of the form ${name} (or $name) which occurs in an attribute value or in the text content of an element. If 'name' matches a key in the supplied hashref, ${name} will be replaced with the corresponding value from the hashref. If no matching key is found, the variable will not be replaced. =item var_att ($attribute_name) This option gives the name of an attribute that will be used to create variables in the XML: <dirs> <dir name="prefix">/usr/local</dir> <dir name="exec_prefix">$prefix/bin</dir> </dirs> use C<< var => 'name' >> to get $prefix replaced by /usr/local in the generated data structure By default variables are captured by the following regexp: /$(\w+)/ =item var_regexp (regexp) This option changes the regexp used to capture variables. The variable name should be in $1 =item group_tags { grouping tag => grouped tag, grouping tag 2 => grouped tag 2...} Option used to simplify the structure: elements listed will not be used. Their children will be, they will be considered children of the element parent. If the element is: <config host="laptop.xmltwig.org"> <server>localhost</server> <dirs> <dir name="base">/home/mrodrigu/standards</dir> <dir name="tools">$base/tools</dir> </dirs> <templates> <template name="std_def">std_def.templ</template> <template name="dummy">dummy</template> </templates> </config> Then calling simplify with C<< group_tags => { dirs => 'dir', templates => 'template'} >> makes the data structure be exactly as if the start and end tags for C<dirs> and C<templates> were not there. A YAML dump of the structure base: '/home/mrodrigu/standards' host: laptop.xmltwig.org server: localhost template: - std_def.templ - dummy.templ tools: '$base/tools' =back =item split_at ($offset) Split a text (C<PCDATA> or C<CDATA>) element in 2 at C<$offset>, the original element now holds the first part of the string and a new element holds the right part. The new element is returned If the element is not a text element then the first text child of the element is split =item split ( $optional_regexp, $tag1, $atts1, $tag2, $atts2...) Split the text descendants of an element in place, the text is split using the C<$regexp>, if the regexp includes () then the matched separators will be wrapped in elements. C<$1> is wrapped in $tag1, with attributes C<$atts1> if C<$atts1> is given (as a hashref), C<$2> is wrapped in $tag2... if $elt is C<< <p>tati tata <b>tutu tati titi</b> tata tati tata</p> >> $elt->split( qr/(ta)ti/, 'foo', {type => 'toto'} ) will change $elt to <p><foo type="toto">ta</foo> tata <b>tutu <foo type="toto">ta</foo> titi</b> tata <foo type="toto">ta</foo> tata</p> The regexp can be passed either as a string or as C<qr//> (perl 5.005 and later), it defaults to \s+ just as the C<split> built-in (but this would be quite a useless behaviour without the C<$optional_tag> parameter) C<$optional_tag> defaults to PCDATA or CDATA, depending on the initial element type The list of descendants is returned (including un-touched original elements and newly created ones) =item mark ( $regexp, $optional_tag, $optional_attribute_ref) This method behaves exactly as L<split>, except only the newly created elements are returned =item wrap_children ( $regexp_string, $tag, $optional_attribute_hashref) Wrap the children of the element that match the regexp in an element C<$tag>. If $optional_attribute_hashref is passed then the new element will have these attributes. The $regexp_string includes tags, within pointy brackets, as in C<< <title><para>+ >> and the usual Perl modifiers (+*?...). Tags can be further qualified with attributes: C<< <para type="warning" classif="cosmic_secret">+ >>. The values for attributes should be xml-escaped: C<< <candy type="M&Ms">* >> (C<E<lt>>, C<&> B<C<E<gt>>> and C<"> should be escaped). Note that elements might get extra C<id> attributes in the process. See L<add_id>. Use L<strip_att> to remove unwanted id's. Here is an example: If the element C<$elt> has the following content: <elt> <p>para 1</p> <l_l1_1>list 1 item 1 para 1</l_l1_1> <l_l1>list 1 item 1 para 2</l_l1> <l_l1_n>list 1 item 2 para 1 (only para)</l_l1_n> <l_l1_n>list 1 item 3 para 1</l_l1_n> <l_l1>list 1 item 3 para 2</l_l1> <l_l1>list 1 item 3 para 3</l_l1> <l_l1_1>list 2 item 1 para 1</l_l1_1> <l_l1>list 2 item 1 para 2</l_l1> <l_l1_n>list 2 item 2 para 1 (only para)</l_l1_n> <l_l1_n>list 2 item 3 para 1</l_l1_n> <l_l1>list 2 item 3 para 2</l_l1> <l_l1>list 2 item 3 para 3</l_l1> </elt> Then the code $elt->wrap_children( q{<l_l1_1><l_l1>*} , li => { type => "ul1" }); $elt->wrap_children( q{<l_l1_n><l_l1>*} , li => { type => "ul" }); $elt->wrap_children( q{<li type="ul1"><li type="ul">+}, "ul"); $elt->strip_att( 'id'); $elt->strip_att( 'type'); $elt->print; will output: <elt> <p>para 1</p> <ul> <li> <l_l1_1>list 1 item 1 para 1</l_l1_1> <l_l1>list 1 item 1 para 2</l_l1> </li> <li> <l_l1_n>list 1 item 2 para 1 (only para)</l_l1_n> </li> <li> <l_l1_n>list 1 item 3 para 1</l_l1_n> <l_l1>list 1 item 3 para 2</l_l1> <l_l1>list 1 item 3 para 3</l_l1> </li> </ul> <ul> <li> <l_l1_1>list 2 item 1 para 1</l_l1_1> <l_l1>list 2 item 1 para 2</l_l1> </li> <li> <l_l1_n>list 2 item 2 para 1 (only para)</l_l1_n> </li> <li> <l_l1_n>list 2 item 3 para 1</l_l1_n> <l_l1>list 2 item 3 para 2</l_l1> <l_l1>list 2 item 3 para 3</l_l1> </li> </ul> </elt> =item subs_text ($regexp, $replace) subs_text does text substitution, similar to perl's C< s///> operator. C<$regexp> must be a perl regexp, created with the C<qr> operator. C<$replace> can include C<$1, $2>... from the C<$regexp>. It can also be used to create element and entities, by using C<< &elt( tag => { att => val }, text) >> (similar syntax as C<L<new>>) and C<< &ent( name) >>. Here is a rather complex example: $elt->subs_text( qr{(?<!do not )link to (http://([^\s,]*))}, 'see &elt( a =>{ href => $1 }, $2)' ); This will replace text like I<link to http://www.xmltwig.org> by I<< see <a href="www.xmltwig.org">www.xmltwig.org</a> >>, but not I<do not link to...> Generating entities (here replacing spaces with  ): $elt->subs_text( qr{ }, '&ent( " ")'); or, using a variable: my $ent=" "; $elt->subs_text( qr{ }, "&ent( '$ent')"); Note that the substitution is always global, as in using the C<g> modifier in a perl substitution, and that it is performed on all text descendants of the element. B<Bug>: in the C<$regexp>, you can only use C<\1>, C<\2>... if the replacement expression does not include elements or attributes. eg $t->subs_text( qr/((t[aiou])\2)/, '$2'); # ok, replaces toto, tata, titi, tutu by to, ta, ti, tu $t->subs_text( qr/((t[aiou])\2)/, '&elt(p => $1)' ); # NOK, does not find toto... =item add_id ($optional_coderef) Add an id to the element. The id is an attribute, C<id> by default, see the C<id> option for XML::Twig C<new> to change it. Use an id starting with C<#> to get an id that's not output by L<print>, L<flush> or L<sprint>, yet that allows you to use the L<elt_id> method to get the element easily. If the element already has an id, no new id is generated. By default the method create an id of the form C<< twig_id_<nnnn> >>, where C<< <nnnn> >> is a number, incremented each time the method is called successfully. =item set_id_seed ($prefix) by default the id generated by C<L<add_id>> is C<< twig_id_<nnnn> >>, C<set_id_seed> changes the prefix to C<$prefix> and resets the number to 1 =item strip_att ($att) Remove the attribute C<$att> from all descendants of the element (including the element) Return the element =item change_att_name ($old_name, $new_name) Change the name of the attribute from C<$old_name> to C<$new_name>. If there is no attribute C<$old_name> nothing happens. =item lc_attnames Lower cases the name all the attributes of the element. =item sort_children_on_value( %options) Sort the children of the element in place according to their text. All children are sorted. Return the element, with its children sorted. C<%options> are type : numeric | alpha (default: alpha) order : normal | reverse (default: normal) Return the element, with its children sorted =item sort_children_on_att ($att, %options) Sort the children of the element in place according to attribute C<$att>. C<%options> are the same as for C<sort_children_on_value> Return the element. =item sort_children_on_field ($tag, %options) Sort the children of the element in place, according to the field C<$tag> (the text of the first child of the child with this tag). C<%options> are the same as for C<sort_children_on_value>. Return the element, with its children sorted =item sort_children( $get_key, %options) Sort the children of the element in place. The C<$get_key> argument is a reference to a function that returns the sort key when passed an element. For example: $elt->sort_children( sub { $_[0]->{'att'}->{"nb"} + $_[0]->text }, type => 'numeric', order => 'reverse' ); =item field_to_att ($cond, $att) Turn the text of the first sub-element matched by C<$cond> into the value of attribute C<$att> of the element. If C<$att> is omitted then C<$cond> is used as the name of the attribute, which makes sense only if C<$cond> is a valid element (and attribute) name. The sub-element is then cut. =item att_to_field ($att, $tag) Take the value of attribute C<$att> and create a sub-element C<$tag> as first child of the element. If C<$tag> is omitted then C<$att> is used as the name of the sub-element. =item get_xpath ($xpath, $optional_offset) Return a list of elements satisfying the C<$xpath>. C<$xpath> is an XPATH-like expression. A subset of the XPATH abbreviated syntax is covered: tag tag[1] (or any other positive number) tag[last()] tag[@att] (the attribute exists for the element) tag[@att="val"] tag[@att=~ /regexp/] tag[att1="val1" and att2="val2"] tag[att1="val1" or att2="val2"] tag[string()="toto"] (returns tag elements which text (as per the text method) is toto) tag[string()=~/regexp/] (returns tag elements which text (as per the text method) matches regexp) expressions can start with / (search starts at the document root) expressions can start with . (search starts at the current element) // can be used to get all descendants instead of just direct children * matches any tag So the following examples from the F<XPath recommendationL<http://www.w3.org/TR/xpath.html#path-abbrev>> work: para selects the para element children of the context node * selects all element children of the context node para[1] selects the first para child of the context node para[last()] selects the last para child of the context node */para selects all para grandchildren of the context node /doc/chapter[5]/section[2] selects the second section of the fifth chapter of the doc chapter//para selects the para element descendants of the chapter element children of the context node //para selects all the para descendants of the document root and thus selects all para elements in the same document as the context node //olist/item selects all the item elements in the same document as the context node that have an olist parent .//para selects the para element descendants of the context node .. selects the parent of the context node para[@type="warning"] selects all para children of the context node that have a type attribute with value warning employee[@secretary and @assistant] selects all the employee children of the context node that have both a secretary attribute and an assistant attribute The elements will be returned in the document order. If C<$optional_offset> is used then only one element will be returned, the one with the appropriate offset in the list, starting at 0 Quoting and interpolating variables can be a pain when the Perl syntax and the XPATH syntax collide, so use alternate quoting mechanisms like q or qq (I like q{} and qq{} myself). Here are some more examples to get you started: my $p1= "p1"; my $p2= "p2"; my @res= $t->get_xpath( qq{p[string( "$p1") or string( "$p2")]}); my $a= "a1"; my @res= $t->get_xpath( qq{//*[@att="$a"]}); my $val= "a1"; my $exp= qq{//p[ \@att='$val']}; # you need to use \@ or you will get a warning my @res= $t->get_xpath( $exp); Note that the only supported regexps delimiters are / and that you must backslash all / in regexps AND in regular strings. XML::Twig does not provide natively full XPATH support, but you can use C<L<XML::Twig::XPath>> to get C<findnodes> to use C<XML::XPath> as the XPath engine, with full coverage of the spec. C<L<XML::Twig::XPath>> to get C<findnodes> to use C<XML::XPath> as the XPath engine, with full coverage of the spec. =item find_nodes same asC<get_xpath> =item findnodes same as C<get_xpath> =item text @optional_options Return a string consisting of all the C<PCDATA> and C<CDATA> in an element, without any tags. The text is not XML-escaped: base entities such as C<&> and C<< < >> are not escaped. The 'C<no_recurse>' option will only return the text of the element, not of any included sub-elements (same as C<L<text_only>>). =item text_only Same as C<L<text>> except that the text returned doesn't include the text of sub-elements. =item trimmed_text Same as C<text> except that the text is trimmed: leading and trailing spaces are discarded, consecutive spaces are collapsed =item set_text ($string) Set the text for the element: if the element is a C<PCDATA>, just set its text, otherwise cut all the children of the element and create a single C<PCDATA> child for it, which holds the text. =item merge ($elt2) Move the content of C<$elt2> within the element =item insert ($tag1, [$optional_atts1], $tag2, [$optional_atts2],...) For each tag in the list inserts an element C<$tag> as the only child of the element. The element gets the optional attributes inC<< $optional_atts<n>. >> All children of the element are set as children of the new element. The upper level element is returned. $p->insert( table => { border=> 1}, 'tr', 'td') put C<$p> in a table with a visible border, a single C<tr> and a single C<td> and return the C<table> element: <p><table border="1"><tr><td>original content of p</td></tr></table></p> =item wrap_in (@tag) Wrap elements in C<@tag> as the successive ancestors of the element, returns the new element. C<< $elt->wrap_in( 'td', 'tr', 'table') >> wraps the element as a single cell in a table for example. Optionally each tag can be followed by a hashref of attributes, that will be set on the wrapping element: $elt->wrap_in( p => { class => "advisory" }, div => { class => "intro", id => "div_intro" }); =item insert_new_elt ($opt_position, $tag, $opt_atts_hashref, @opt_content) Combines a C<L<new> > and a C<L<paste> >: creates a new element using C<$tag>, C<$opt_atts_hashref >and C<@opt_content> which are arguments similar to those for C<new>, then paste it, using C<$opt_position> or C<'first_child'>, relative to C<$elt>. Return the newly created element =item erase Erase the element: the element is deleted and all of its children are pasted in its place. =item set_content ( $optional_atts, @list_of_elt_and_strings) ( $optional_atts, '#EMPTY') Set the content for the element, from a list of strings and elements. Cuts all the element children, then pastes the list elements as the children. This method will create a C<PCDATA> element for any strings in the list. The C<$optional_atts> argument is the ref of a hash of attributes. If this argument is used then the previous attributes are deleted, otherwise they are left untouched. B<WARNING>: if you rely on ID's then you will have to set the id yourself. At this point the element does not belong to a twig yet, so the ID attribute is not known so it won't be stored in the ID list. A content of 'C<#EMPTY>' creates an empty element; =item namespace ($optional_prefix) Return the URI of the namespace that C<$optional_prefix> or the element name belongs to. If the name doesn't belong to any namespace, C<undef> is returned. =item local_name Return the local name (without the prefix) for the element =item ns_prefix Return the namespace prefix for the element =item current_ns_prefixes Return a list of namespace prefixes valid for the element. The order of the prefixes in the list has no meaning. If the default namespace is currently bound, '' appears in the list. =item inherit_att ($att, @optional_tag_list) Return the value of an attribute inherited from parent tags. The value returned is found by looking for the attribute in the element then in turn in each of its ancestors. If the C<@optional_tag_list> is supplied only those ancestors whose tag is in the list will be checked. =item all_children_are ($optional_condition) return 1 if all children of the element pass the C<$optional_condition>, 0 otherwise =item level ($optional_condition) Return the depth of the element in the twig (root is 0). If C<$optional_condition> is given then only ancestors that match the condition are counted. B<WARNING>: in a tree created using the C<twig_roots> option this will not return the level in the document tree, level 0 will be the document root, level 1 will be the C<twig_roots> elements. During the parsing (in a C<twig_handler>) you can use the C<depth> method on the twig object to get the real parsing depth. =item in ($potential_parent) Return true if the element is in the potential_parent (C<$potential_parent> is an element) =item in_context ($cond, $optional_level) Return true if the element is included in an element which passes C<$cond> optionally within C<$optional_level> levels. The returned value is the including element. =item pcdata Return the text of a C<PCDATA> element or C<undef> if the element is not C<PCDATA>. =item pcdata_xml_string Return the text of a C<PCDATA> element or undef if the element is not C<PCDATA>. The text is "XML-escaped" ('&' and '<' are replaced by '&' and '<') =item set_pcdata ($text) Set the text of a C<PCDATA> element. This method does not check that the element is indeed a C<PCDATA> so usually you should use C<L<set_text>> instead. =item append_pcdata ($text) Add the text at the end of a C<PCDATA> element. =item is_cdata Return 1 if the element is a C<CDATA> element, returns 0 otherwise. =item is_text Return 1 if the element is a C<CDATA> or C<PCDATA> element, returns 0 otherwise. =item cdata Return the text of a C<CDATA> element or C<undef> if the element is not C<CDATA>. =item cdata_string Return the XML string of a C<CDATA> element, including the opening and closing markers. =item set_cdata ($text) Set the text of a C<CDATA> element. =item append_cdata ($text) Add the text at the end of a C<CDATA> element. =item remove_cdata Turns all C<CDATA> sections in the element into regular C<PCDATA> elements. This is useful when converting XML to HTML, as browsers do not support CDATA sections. =item extra_data Return the extra_data (comments and PI's) attached to an element =item set_extra_data ($extra_data) Set the extra_data (comments and PI's) attached to an element =item append_extra_data ($extra_data) Append extra_data to the existing extra_data before the element (if no previous extra_data exists then it is created) =item set_asis Set a property of the element that causes it to be output without being XML escaped by the print functions: if it contains C<< a < b >> it will be output as such and not as C<< a < b >>. This can be useful to create text elements that will be output as markup. Note that all C<PCDATA> descendants of the element are also marked as having the property (they are the ones that are actually impacted by the change). If the element is a C<CDATA> element it will also be output asis, without the C<CDATA> markers. The same goes for any C<CDATA> descendant of the element =item set_not_asis Unsets the C<asis> property for the element and its text descendants. =item is_asis Return the C<asis> property status of the element ( 1 or C<undef>) =item closed Return true if the element has been closed. Might be useful if you are somewhere in the tree, during the parse, and have no idea whether a parent element is completely loaded or not. =item get_type Return the type of the element: 'C<#ELT>' for "real" elements, or 'C<#PCDATA>', 'C<#CDATA>', 'C<#COMMENT>', 'C<#ENT>', 'C<#PI>' =item is_elt Return the tag if the element is a "real" element, or 0 if it is C<PCDATA>, C<CDATA>... =item contains_only_text Return 1 if the element does not contain any other "real" element =item contains_only ($exp) Return the list of children if all children of the element match the expression C<$exp> if( $para->contains_only( 'tt')) { ... } =item contains_a_single ($exp) If the element contains a single child that matches the expression C<$exp> returns that element. Otherwise returns 0. =item is_field same as C<contains_only_text> =item is_pcdata Return 1 if the element is a C<PCDATA> element, returns 0 otherwise. =item is_ent Return 1 if the element is an entity (an unexpanded entity) element, return 0 otherwise. =item is_empty Return 1 if the element is empty, 0 otherwise =item set_empty Flags the element as empty. No further check is made, so if the element is actually not empty the output will be messed. The only effect of this method is that the output will be C<< <tag att="value""/> >>. =item set_not_empty Flags the element as not empty. if it is actually empty then the element will be output as C<< <tag att="value""></tag> >> =item is_pi Return 1 if the element is a processing instruction (C<#PI>) element, return 0 otherwise. =item target Return the target of a processing instruction =item set_target ($target) Set the target of a processing instruction =item data Return the data part of a processing instruction =item set_data ($data) Set the data of a processing instruction =item set_pi ($target, $data) Set the target and data of a processing instruction =item pi_string Return the string form of a processing instruction (C<< <?target data?> >>) =item is_comment Return 1 if the element is a comment (C<#COMMENT>) element, return 0 otherwise. =item set_comment ($comment_text) Set the text for a comment =item comment Return the content of a comment (just the text, not the C<< <!-- >> and C<< --> >>) =item comment_string Return the XML string for a comment (C<< <!-- comment --> >>) Note that an XML comment cannot start or end with a '-', or include '--' (http://www.w3.org/TR/2008/REC-xml-20081126/#sec-comments), if that is the case (because you have created the comment yourself presumably, as it could not be in the input XML), then a space will be inserted before an initial '-', after a trailing one or between two '-' in the comment (which could presumably mangle javascript "hidden" in an XHTML comment); =item set_ent ($entity) Set an (non-expanded) entity (C<#ENT>). C<$entity>) is the entity text (C<&ent;>) =item ent Return the entity for an entity (C<#ENT>) element (C<&ent;>) =item ent_name Return the entity name for an entity (C<#ENT>) element (C<ent>) =item ent_string Return the entity, either expanded if the expanded version is available, or non-expanded (C<&ent;>) otherwise =item child ($offset, $optional_condition) Return the C<$offset>-th child of the element, optionally the C<$offset>-th child that matches C<$optional_condition>. The children are treated as a list, so C<< $elt->child( 0) >> is the first child, while C<< $elt->child( -1) >> is the last child. =item child_text ($offset, $optional_condition) Return the text of a child or C<undef> if the sibling does not exist. Arguments are the same as child. =item last_child ($optional_condition) Return the last child of the element, or the last child matching C<$optional_condition> (ie the last of the element children matching the condition). =item last_child_text ($optional_condition) Same as C<first_child_text> but for the last child. =item sibling ($offset, $optional_condition) Return the next or previous C<$offset>-th sibling of the element, or the C<$offset>-th one matching C<$optional_condition>. If C<$offset> is negative then a previous sibling is returned, if $offset is positive then a next sibling is returned. C<$offset=0> returns the element if there is no condition or if the element matches the condition>, C<undef> otherwise. =item sibling_text ($offset, $optional_condition) Return the text of a sibling or C<undef> if the sibling does not exist. Arguments are the same as C<sibling>. =item prev_siblings ($optional_condition) Return the list of previous siblings (optionally matching C<$optional_condition>) for the element. The elements are ordered in document order. =item next_siblings ($optional_condition) Return the list of siblings (optionally matching C<$optional_condition>) following the element. The elements are ordered in document order. =item siblings ($optional_condition) Return the list of siblings (optionally matching C<$optional_condition>) of the element (excluding the element itself). The elements are ordered in document order. =item pos ($optional_condition) Return the position of the element in the children list. The first child has a position of 1 (as in XPath). If the C<$optional_condition> is given then only siblings that match the condition are counted. If the element itself does not match the condition then 0 is returned. =item atts Return a hash ref containing the element attributes =item set_atts ({ att1=>$att1_val, att2=> $att2_val... }) Set the element attributes with the hash ref supplied as the argument. The previous attributes are lost (ie the attributes set by C<set_atts> replace all of the attributes of the element). You can also pass a list instead of a hashref: C<< $elt->set_atts( att1 => 'val1',...) >> =item del_atts Deletes all the element attributes. =item att_nb Return the number of attributes for the element =item has_atts Return true if the element has attributes (in fact return the number of attributes, thus being an alias to C<L<att_nb>> =item has_no_atts Return true if the element has no attributes, false (0) otherwise =item att_names return a list of the attribute names for the element =item att_xml_string ($att, $options) Return the attribute value, where '&', '<' and quote (" or the value of the quote option at twig creation) are XML-escaped. The options are passed as a hashref, setting C<escape_gt> to a true value will also escape '>' ($elt( 'myatt', { escape_gt => 1 }); =item set_id ($id) Set the C<id> attribute of the element to the value. See C<L<elt_id> > to change the id attribute name =item id Gets the id attribute value =item del_id ($id) Deletes the C<id> attribute of the element and remove it from the id list for the document =item class Return the C<class> attribute for the element (methods on the C<class> attribute are quite convenient when dealing with XHTML, or plain XML that will eventually be displayed using CSS) =item lclass same as class, except that this method is an lvalue, so you can do C<< $elt->lclass= "foo" >> =item set_class ($class) Set the C<class> attribute for the element to C<$class> =item add_class ($class) Add C<$class> to the element C<class> attribute: the new class is added only if it is not already present. Note that classes are then sorted alphabetically, so the C<class> attribute can be changed even if the class is already there =item remove_class ($class) Remove C<$class> from the element C<class> attribute. Note that classes are then sorted alphabetically, so the C<class> attribute can be changed even if the class is already there =item add_to_class ($class) alias for add_class =item att_to_class ($att) Set the C<class> attribute to the value of attribute C<$att> =item add_att_to_class ($att) Add the value of attribute C<$att> to the C<class> attribute of the element =item move_att_to_class ($att) Add the value of attribute C<$att> to the C<class> attribute of the element and delete the attribute =item tag_to_class Set the C<class> attribute of the element to the element tag =item add_tag_to_class Add the element tag to its C<class> attribute =item set_tag_class ($new_tag) Add the element tag to its C<class> attribute and sets the tag to C<$new_tag> =item in_class ($class) Return true (C<1>) if the element is in the class C<$class> (if C<$class> is one of the tokens in the element C<class> attribute) =item tag_to_span Change the element tag tp C<span> and set its class to the old tag =item tag_to_div Change the element tag tp C<div> and set its class to the old tag =item DESTROY Frees the element from memory. =item start_tag Return the string for the start tag for the element, including the C<< /> >> at the end of an empty element tag =item end_tag Return the string for the end tag of an element. For an empty element, this returns the empty string (''). =item xml_string @optional_options Equivalent to C<< $elt->sprint( 1) >>, returns the string for the entire element, excluding the element's tags (but nested element tags are present) The 'C<no_recurse>' option will only return the text of the element, not of any included sub-elements (same as C<L<xml_text_only>>). =item inner_xml Another synonym for xml_string =item outer_xml An other synonym for sprint =item xml_text Return the text of the element, encoded (and processed by the current C<L<output_filter>> or C<L<output_encoding>> options, without any tag. =item xml_text_only Same as C<L<xml_text>> except that the text returned doesn't include the text of sub-elements. =item set_pretty_print ($style) Set the pretty print method, amongst 'C<none>' (default), 'C<nsgmls>', 'C<nice>', 'C<indented>', 'C<record>' and 'C<record_c>' pretty_print styles: =over 4 =item none the default, no C<\n> is used =item nsgmls nsgmls style, with C<\n> added within tags =item nice adds C<\n> wherever possible (NOT SAFE, can lead to invalid XML) =item indented same as C<nice> plus indents elements (NOT SAFE, can lead to invalid XML) =item record table-oriented pretty print, one field per line =item record_c table-oriented pretty print, more compact than C<record>, one record per line =back =item set_empty_tag_style ($style) Set the method to output empty tags, amongst 'C<normal>' (default), 'C<html>', and 'C<expand>', C<normal> outputs an empty tag 'C<< <tag/> >>', C<html> adds a space 'C<< <tag /> >>' for elements that can be empty in XHTML and C<expand> outputs 'C<< <tag></tag> >>' =item set_remove_cdata ($flag) set (or unset) the flag that forces the twig to output CDATA sections as regular (escaped) PCDATA =item set_indent ($string) Set the indentation for the indented pretty print style (default is 2 spaces) =item set_quote ($quote) Set the quotes used for attributes. can be 'C<double>' (default) or 'C<single>' =item cmp ($elt) Compare the order of the 2 elements in a twig. C<$a> is the <A>..</A> element, C<$b> is the <B>...</B> element document $a->cmp( $b) <A> ... </A> ... <B> ... </B> -1 <A> ... <B> ... </B> ... </A> -1 <B> ... </B> ... <A> ... </A> 1 <B> ... <A> ... </A> ... </B> 1 $a == $b 0 $a and $b not in the same tree undef =item before ($elt) Return 1 if C<$elt> starts before the element, 0 otherwise. If the 2 elements are not in the same twig then return C<undef>. if( $a->cmp( $b) == -1) { return 1; } else { return 0; } =item after ($elt) Return 1 if $elt starts after the element, 0 otherwise. If the 2 elements are not in the same twig then return C<undef>. if( $a->cmp( $b) == -1) { return 1; } else { return 0; } =item other comparison methods =over 4 =item lt =item le =item gt =item ge =back =item path Return the element context in a form similar to XPath's short form: 'C</root/tag1/../tag>' =item xpath Return a unique XPath expression that can be used to find the element again. It looks like C</doc/sect[3]/title>: unique elements do not have an index, the others do. =item flush flushes the twig up to the current element (strictly equivalent to C<< $elt->root->flush >>) =item private methods Low-level methods on the twig: =over 4 =item set_parent ($parent) =item set_first_child ($first_child) =item set_last_child ($last_child) =item set_prev_sibling ($prev_sibling) =item set_next_sibling ($next_sibling) =item set_twig_current =item del_twig_current =item twig_current =item contains_text =back Those methods should not be used, unless of course you find some creative and interesting, not to mention useful, ways to do it. =back =head2 cond Most of the navigation functions accept a condition as an optional argument The first element (or all elements for C<L<children> > or C<L<ancestors> >) that passes the condition is returned. The condition is a single step of an XPath expression using the XPath subset defined by C<L<get_xpath>>. Additional conditions are: The condition can be =over 4 =item #ELT return a "real" element (not a PCDATA, CDATA, comment or pi element) =item #TEXT return a PCDATA or CDATA element =item regular expression return an element whose tag matches the regexp. The regexp has to be created with C<qr//> (hence this is available only on perl 5.005 and above) =item code reference applies the code, passing the current element as argument, if the code returns true then the element is returned, if it returns false then the code is applied to the next candidate. =back =head2 XML::Twig::XPath XML::Twig implements a subset of XPath through the C<L<get_xpath>> method. If you want to use the whole XPath power, then you can use C<XML::Twig::XPath> instead. In this case C<XML::Twig> uses C<XML::XPath> to execute XPath queries. You will of course need C<XML::XPath> installed to be able to use C<XML::Twig::XPath>. See L<XML::XPath> for more information. The methods you can use are: =over 4 =item findnodes ($path) return a list of nodes found by C<$path>. =item findnodes_as_string ($path) return the nodes found reproduced as XML. The result is not guaranteed to be valid XML though. =item findvalue ($path) return the concatenation of the text content of the result nodes =back In order for C<XML::XPath> to be used as the XPath engine the following methods are included in C<XML::Twig>: in XML::Twig =over 4 =item getRootNode =item getParentNode =item getChildNodes =back in XML::Twig::Elt =over 4 =item string_value =item toString =item getName =item getRootNode =item getNextSibling =item getPreviousSibling =item isElementNode =item isTextNode =item isPI =item isPINode =item isProcessingInstructionNode =item isComment =item isCommentNode =item getTarget =item getChildNodes =item getElementById =back =head2 XML::Twig::XPath::Elt The methods you can use are the same as on C<XML::Twig::XPath> elements: =over 4 =item findnodes ($path) return a list of nodes found by C<$path>. =item findnodes_as_string ($path) return the nodes found reproduced as XML. The result is not guaranteed to be valid XML though. =item findvalue ($path) return the concatenation of the text content of the result nodes =back =head2 XML::Twig::Entity_list =over 4 =item new Create an entity list. =item add ($ent) Add an entity to an entity list. =item add_new_ent ($name, $val, $sysid, $pubid, $ndata, $param) Create a new entity and add it to the entity list =item delete ($ent or $tag). Delete an entity (defined by its name or by the Entity object) from the list. =item print ($optional_filehandle) Print the entity list. =item list Return the list as an array =back =head2 XML::Twig::Entity =over 4 =item new ($name, $val, $sysid, $pubid, $ndata, $param) Same arguments as the Entity handler for XML::Parser. =item print ($optional_filehandle) Print an entity declaration. =item name Return the name of the entity =item val Return the value of the entity =item sysid Return the system id for the entity (for NDATA entities) =item pubid Return the public id for the entity (for NDATA entities) =item ndata Return true if the entity is an NDATA entity =item param Return true if the entity is a parameter entity =item text Return the entity declaration text. =back =head1 EXAMPLES Additional examples (and a complete tutorial) can be found on the F<XML::Twig PageL<http://www.xmltwig.org/xmltwig/>> To figure out what flush does call the following script with an XML file and an element name as arguments use XML::Twig; my ($file, $elt)= @ARGV; my $t= XML::Twig->new( twig_handlers => { $elt => sub {$_[0]->flush; print "\n[flushed here]\n";} }); $t->parsefile( $file, ErrorContext => 2); $t->flush; print "\n"; =head1 NOTES =head2 Subclassing XML::Twig Useful methods: =over 4 =item elt_class In order to subclass C<XML::Twig> you will probably need to subclass also C<L<XML::Twig::Elt>>. Use the C<elt_class> option when you create the C<XML::Twig> object to get the elements created in a different class (which should be a subclass of C<XML::Twig::Elt>. =item add_options If you inherit C<XML::Twig> new method but want to add more options to it you can use this method to prevent XML::Twig to issue warnings for those additional options. =back =head2 DTD Handling There are 3 possibilities here. They are: =over 4 =item No DTD No doctype, no DTD information, no entity information, the world is simple... =item Internal DTD The XML document includes an internal DTD, and maybe entity declarations. If you use the load_DTD option when creating the twig the DTD information and the entity declarations can be accessed. The DTD and the entity declarations will be C<flush>'ed (or C<print>'ed) either as is (if they have not been modified) or as reconstructed (poorly, comments are lost, order is not kept, due to it's content this DTD should not be viewed by anyone) if they have been modified. You can also modify them directly by changing the C<< $twig->{twig_doctype}->{internal} >> field (straight from XML::Parser, see the C<Doctype> handler doc) =item External DTD The XML document includes a reference to an external DTD, and maybe entity declarations. If you use the C<load_DTD> when creating the twig the DTD information and the entity declarations can be accessed. The entity declarations will be C<flush>'ed (or C<print>'ed) either as is (if they have not been modified) or as reconstructed (badly, comments are lost, order is not kept). You can change the doctype through the C<< $twig->set_doctype >> method and print the dtd through the C<< $twig->dtd_text >> or C<< $twig->dtd_print >> methods. If you need to modify the entity list this is probably the easiest way to do it. =back =head2 Flush Remember that element handlers are called when the element is CLOSED, so if you have handlers for nested elements the inner handlers will be called first. It makes it for example trickier than it would seem to number nested sections (or clauses, or divs), as the titles in the inner sections are handled before the outer sections. =head1 BUGS =over 4 =item segfault during parsing This happens when parsing huge documents, or lots of small ones, with a version of Perl before 5.16. This is due to a bug in the way weak references are handled in Perl itself. The fix is either to upgrade to Perl 5.16 or later (C<perlbrew> is a great tool to manage several installations of perl on the same machine). An other, NOT RECOMMENDED, way of fixing the problem, is to switch off weak references by writing C<XML::Twig::_set_weakrefs( 0);> at the top of the code. This is totally unsupported, and may lead to other problems though, =item entity handling Due to XML::Parser behaviour, non-base entities in attribute values disappear if they are not declared in the document: C<att="val&ent;"> will be turned into C<< att => val >>, unless you use the C<keep_encoding> argument to C<< XML::Twig->new >> =item DTD handling The DTD handling methods are quite bugged. No one uses them and it seems very difficult to get them to work in all cases, including with several slightly incompatible versions of XML::Parser and of libexpat. Basically you can read the DTD, output it back properly, and update entities, but not much more. So use XML::Twig with standalone documents, or with documents referring to an external DTD, but don't expect it to properly parse and even output back the DTD. =item memory leak If you use a REALLY old Perl (5.005!) and a lot of twigs you might find that you leak quite a lot of memory (about 2Ks per twig). You can use the C<L<dispose> > method to free that memory after you are done. If you create elements the same thing might happen, use the C<L<delete>> method to get rid of them. Alternatively installing the C<Scalar::Util> (or C<WeakRef>) module on a version of Perl that supports it (>5.6.0) will get rid of the memory leaks automagically. =item ID list The ID list is NOT updated when elements are cut or deleted. =item change_gi This method will not function properly if you do: $twig->change_gi( $old1, $new); $twig->change_gi( $old2, $new); $twig->change_gi( $new, $even_newer); =item sanity check on XML::Parser method calls XML::Twig should really prevent calls to some XML::Parser methods, especially the C<setHandlers> method. =item pretty printing Pretty printing (at least using the 'C<indented>' style) is hard to get right! Only elements that belong to the document will be properly indented. Printing elements that do not belong to the twig makes it impossible for XML::Twig to figure out their depth, and thus their indentation level. Also there is an unavoidable bug when using C<flush> and pretty printing for elements with mixed content that start with an embedded element: <elt><b>b</b>toto<b>bold</b></elt> will be output as <elt> <b>b</b>toto<b>bold</b></elt> if you flush the twig when you find the C<< <b> >> element =back =head1 Globals These are the things that can mess up calling code, especially if threaded. They might also cause problem under mod_perl. =over 4 =item Exported constants Whether you want them or not you get them! These are subroutines to use as constant when creating or testing elements PCDATA return '#PCDATA' CDATA return '#CDATA' PI return '#PI', I had the choice between PROC and PI :--( =item Module scoped values: constants these should cause no trouble: %base_ent= ( '>' => '>', '<' => '<', '&' => '&', "'" => ''', '"' => '"', ); CDATA_START = "<![CDATA["; CDATA_END = "]]>"; PI_START = "<?"; PI_END = "?>"; COMMENT_START = "<!--"; COMMENT_END = "-->"; pretty print styles ( $NSGMLS, $NICE, $INDENTED, $INDENTED_C, $WRAPPED, $RECORD1, $RECORD2)= (1..7); empty tag output style ( $HTML, $EXPAND)= (1..2); =item Module scoped values: might be changed Most of these deal with pretty printing, so the worst that can happen is probably that XML output does not look right, but is still valid and processed identically by XML processors. C<$empty_tag_style> can mess up HTML bowsers though and changing C<$ID> would most likely create problems. $pretty=0; # pretty print style $quote='"'; # quote for attributes $INDENT= ' '; # indent for indented pretty print $empty_tag_style= 0; # how to display empty tags $ID # attribute used as an id ('id' by default) =item Module scoped values: definitely changed These 2 variables are used to replace tags by an index, thus saving some space when creating a twig. If they really cause you too much trouble, let me know, it is probably possible to create either a switch or at least a version of XML::Twig that does not perform this optimization. %gi2index; # tag => index @index2gi; # list of tags =back If you need to manipulate all those values, you can use the following methods on the XML::Twig object: =over 4 =item global_state Return a hashref with all the global variables used by XML::Twig The hash has the following fields: C<pretty>, C<quote>, C<indent>, C<empty_tag_style>, C<keep_encoding>, C<expand_external_entities>, C<output_filter>, C<output_text_filter>, C<keep_atts_order> =item set_global_state ($state) Set the global state, C<$state> is a hashref =item save_global_state Save the current global state =item restore_global_state Restore the previously saved (using C<Lsave_global_state>> state =back =head1 TODO =over 4 =item SAX handlers Allowing XML::Twig to work on top of any SAX parser =item multiple twigs are not well supported A number of twig features are just global at the moment. These include the ID list and the "tag pool" (if you use C<change_gi> then you change the tag for ALL twigs). A future version will try to support this while trying not to be to hard on performance (at least when a single twig is used!). =back =head1 AUTHOR Michel Rodriguez <mirod@cpan.org> =head1 LICENSE This library is free software; you can redistribute it and/or modify it under the same terms as Perl itself. Bug reports should be sent using: F<RT L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=XML-Twig>> Comments can be sent to mirod@cpan.org The XML::Twig page is at L<http://www.xmltwig.org/xmltwig/> It includes the development version of the module, a slightly better version of the documentation, examples, a tutorial and a: F<Processing XML efficiently with Perl and XML::Twig: L<http://www.xmltwig.org/xmltwig/tutorial/index.html>> =head1 SEE ALSO Complete docs, including a tutorial, examples, an easier to use HTML version of the docs, a quick reference card and a FAQ are available at L<http://www.xmltwig.org/xmltwig/> git repository at L<http://github.com/mirod/xmltwig> L<XML::Parser>, L<XML::Parser::Expat>, L<XML::XPath>, L<Encode>, L<Text::Iconv>, L<Scalar::Utils> =head2 Alternative Modules XML::Twig is not the only XML::Processing module available on CPAN (far from it!). The main alternative I would recommend is L<XML::LibXML>. Here is a quick comparison of the 2 modules: XML::LibXML, actually C<libxml2> on which it is based, sticks to the standards, and implements a good number of them in a rather strict way: XML, XPath, DOM, RelaxNG, I must be forgetting a couple (XInclude?). It is fast and rather frugal memory-wise. XML::Twig is older: when I started writing it XML::Parser/expat was the only game in town. It implements XML and that's about it (plus a subset of XPath, and you can use XML::Twig::XPath if you have XML::XPathEngine installed for full support). It is slower and requires more memory for a full tree than XML::LibXML. On the plus side (yes, there is a plus side!) it lets you process a big document in chunks, and thus let you tackle documents that couldn't be loaded in memory by XML::LibXML, and it offers a lot (and I mean a LOT!) of higher-level methods, for everything, from adding structure to "low-level" XML, to shortcuts for XHTML conversions and more. It also DWIMs quite a bit, getting comments and non-significant whitespaces out of the way but preserving them in the output for example. As it does not stick to the DOM, is also usually leads to shorter code than in XML::LibXML. Beyond the pure features of the 2 modules, XML::LibXML seems to be preferred by "XML-purists", while XML::Twig seems to be more used by Perl Hackers who have to deal with XML. As you have noted, XML::Twig also comes with quite a lot of docs, but I am sure if you ask for help about XML::LibXML here or on Perlmonks you will get answers. Note that it is actually quite hard for me to compare the 2 modules: on one hand I know XML::Twig inside-out and I can get it to do pretty much anything I need to (or I improve it ;--), while I have a very basic knowledge of XML::LibXML. So feature-wise, I'd rather use XML::Twig ;--). On the other hand, I am painfully aware of some of the deficiencies, potential bugs and plain ugly code that lurk in XML::Twig, even though you are unlikely to be affected by them (unless for example you need to change the DTD of a document programmatically), while I haven't looked much into XML::LibXML so it still looks shinny and clean to me. That said, if you need to process a document that is too big to fit memory and XML::Twig is too slow for you, my reluctant advice would be to use "bare" XML::Parser. It won't be as easy to use as XML::Twig: basically with XML::Twig you trade some speed (depending on what you do from a factor 3 to... none) for ease-of-use, but it will be easier IMHO than using SAX (albeit not standard), and at this point a LOT faster (see the last test in L<http://www.xmltwig.org/article/simple_benchmark/>). =cut �����������������������������������������������������������������������������������������������������������������������������������������������������������gdata/inst/perl/XML/Twig/���������������������������������������������������������������������������0000755�0001751�0000144�00000000000�13115545572�014651� 5����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������gdata/inst/perl/XML/Twig/XPath.pm�������������������������������������������������������������������0000644�0001751�0000144�00000017023�13003720416�016222� 0����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������# $Id: /xmltwig/trunk/Twig/XPath.pm 32 2008-01-18T13:11:52.128782Z mrodrigu $ package XML::Twig::XPath; use strict; use XML::Twig; my $XPATH; # XPath engine (XML::XPath or XML::XPathEngine); my $XPATH_NUMBER; # <$XPATH>::Number, the XPath number class BEGIN { foreach my $xpath_engine ( qw( XML::XPathEngine XML::XPath) ) { if( XML::Twig::_use( $xpath_engine) ) { $XPATH= $xpath_engine; last; } } unless( $XPATH) { die "cannot use XML::Twig::XPath: neither XML::XPathEngine 0.09+ nor XML::XPath are available"; } $XPATH_NUMBER= "${XPATH}::Number"; } use vars qw($VERSION); $VERSION="0.02"; BEGIN { package XML::XPath::NodeSet; no warnings; # to avoid the "Subroutine sort redefined" message # replace the native sort routine by a Twig'd one sub sort { my $self = CORE::shift; @$self = CORE::sort { $a->node_cmp( $b) } @$self; return $self; } package XML::XPathEngine::NodeSet; no warnings; # to avoid the "Subroutine sort redefined" message # replace the native sort routine by a Twig'd one sub sort { my $self = CORE::shift; @$self = CORE::sort { $a->node_cmp( $b) } @$self; return $self; } } package XML::Twig::XPath; use base 'XML::Twig'; my $XP; # the global xp object; sub to_number { return $XPATH_NUMBER->new( $_[0]->root->text); } sub new { my $class= shift; my $t= XML::Twig->new( elt_class => 'XML::Twig::XPath::Elt', @_); $t->{twig_xp}= $XPATH->new(); bless $t, $class; return $t; } sub set_namespace { my $t= shift; $t->{twig_xp}->set_namespace( @_); } sub set_strict_namespaces { my $t= shift; $t->{twig_xp}->set_strict_namespaces( @_); } sub node_cmp($$) { return $_[1] == $_[0] ? 0 : -1; } # document is before anything but itself sub isElementNode { 0 } sub isAttributeNode { 0 } sub isTextNode { 0 } sub isProcessingInstructionNode { 0 } sub isPINode { 0 } sub isCommentNode { 0 } sub isNamespaceNode { 0 } sub getAttributes { [] } sub getValue { return $_[0]->root->text; } sub findnodes { my( $t, $path)= @_; return $t->{twig_xp}->findnodes( $path, $t); } sub findnodes_as_string { my( $t, $path)= @_; return $t->{twig_xp}->findnodes_as_string( $path, $t); } sub findvalue { my( $t, $path)= @_; return $t->{twig_xp}->findvalue( $path, $t); } sub exists { my( $t, $path)= @_; return $t->{twig_xp}->exists( $path, $t); } sub find { my( $t, $path)= @_; return $t->{twig_xp}->find( $path, $t); } sub matches { my( $t, $path, $node)= @_; $node ||= $t; return $t->{twig_xp}->matches( $node, $path, $t) || 0; } #TODO: it would be nice to be able to pass in any object in this #distribution and cast it to the proper $XPATH class to use as a #variable (via 'nodes' argument or something) sub set_var { my ($t, $name, $value) = @_; if( ! ref $value) { $value= $t->findnodes( qq{"$value"}); } $t->{twig_xp}->set_var($name, $value); } 1; # adds the appropriate methods to XML::Twig::Elt so XML::XPath can be used as the XPath engine package XML::Twig::XPath::Elt; use base 'XML::Twig::Elt'; *getLocalName= *XML::Twig::Elt::local_name; *getValue = *XML::Twig::Elt::text; sub isAttributeNode { 0 } sub isNamespaceNode { 0 } sub to_number { return $XPATH_NUMBER->new( $_[0]->text); } sub getAttributes { my $elt= shift; my $atts= $elt->atts; # alternate, faster but less clean, way my @atts= map { bless( { name => $_, value => $atts->{$_}, elt => $elt }, 'XML::Twig::XPath::Attribute') } sort keys %$atts; # my @atts= map { XML::Twig::XPath::Attribute->new( $elt, $_) } sort keys %$atts; return wantarray ? @atts : \@atts; } sub getNamespace { my $elt= shift; my $prefix= shift() || $elt->ns_prefix; if( my $expanded= $elt->namespace( $prefix)) { return XML::Twig::XPath::Namespace->new( $prefix, $expanded); } else { return XML::Twig::XPath::Namespace->new( $prefix, ''); } } sub node_cmp($$) { my( $a, $b)= @_; if( UNIVERSAL::isa( $b, 'XML::Twig::XPath::Elt')) { # 2 elts, compare them return $a->cmp( $b); } elsif( UNIVERSAL::isa( $b, 'XML::Twig::XPath::Attribute')) { # elt <=> att, compare the elt to the att->{elt} # if the elt is the att->{elt} (cmp return 0) then -1, elt is before att return ($a->cmp( $b->{elt}) ) || -1 ; } elsif( UNIVERSAL::isa( $b, 'XML::Twig::XPath')) { # elt <=> document, elt is after document return 1; } else { die "unknown node type ", ref( $b); } } sub getParentNode { return $_[0]->_parent || $_[0]->twig; } sub findnodes { my( $elt, $path)= @_; return $elt->twig->{twig_xp}->findnodes( $path, $elt); } sub findnodes_as_string { my( $elt, $path)= @_; return $elt->twig->{twig_xp}->findnodes_as_string( $path, $elt); } sub findvalue { my( $elt, $path)= @_; return $elt->twig->{twig_xp}->findvalue( $path, $elt); } sub exists { my( $elt, $path)= @_; return $elt->twig->{twig_xp}->exists( $path, $elt); } sub find { my( $elt, $path)= @_; return $elt->twig->{twig_xp}->find( $path, $elt); } sub matches { my( $elt, $path)= @_; return $elt->twig->{twig_xp}->matches( $elt, $path, $elt->getParentNode) || 0; } 1; # this package is only used to allow XML::XPath as the XPath engine, otherwise # attributes are just attached to their parent element and are not considered objects package XML::Twig::XPath::Attribute; sub new { my( $class, $elt, $att)= @_; return bless { name => $att, value => $elt->att( $att), elt => $elt }, $class; } sub getValue { return $_[0]->{value}; } sub getName { return $_[0]->{name} ; } sub getLocalName { (my $name= $_[0]->{name}) =~ s{^.*:}{}; $name; } sub string_value { return $_[0]->{value}; } sub to_number { return $XPATH_NUMBER->new( $_[0]->{value}); } sub isElementNode { 0 } sub isAttributeNode { 1 } sub isNamespaceNode { 0 } sub isTextNode { 0 } sub isProcessingInstructionNode { 0 } sub isPINode { 0 } sub isCommentNode { 0 } sub toString { return qq{$_[0]->{name}="$_[0]->{value}"}; } sub getNamespace { my $att= shift; my $prefix= shift(); if( ! defined( $prefix)) { if($att->{name}=~ m{^(.*):}) { $prefix= $1; } else { $prefix=''; } } if( my $expanded= $att->{elt}->namespace( $prefix)) { return XML::Twig::XPath::Namespace->new( $prefix, $expanded); } } sub node_cmp($$) { my( $a, $b)= @_; if( UNIVERSAL::isa( $b, 'XML::Twig::XPath::Attribute')) { # 2 attributes, compare their elements, then their name return ($a->{elt}->cmp( $b->{elt}) ) || ($a->{name} cmp $b->{name}); } elsif( UNIVERSAL::isa( $b, 'XML::Twig::XPath::Elt')) { # att <=> elt : compare the att->elt and the elt # if att->elt is the elt (cmp returns 0) then 1 (elt is before att) return ($a->{elt}->cmp( $b) ) || 1 ; } elsif( UNIVERSAL::isa( $b, 'XML::Twig::XPath')) { # att <=> document, att is after document return 1; } else { die "unknown node type ", ref( $b); } } *cmp=*node_cmp; 1; package XML::Twig::XPath::Namespace; sub new { my( $class, $prefix, $expanded)= @_; bless { prefix => $prefix, expanded => $expanded }, $class; } sub isNamespaceNode { 1; } sub getPrefix { $_[0]->{prefix}; } sub getExpanded { $_[0]->{expanded}; } sub getValue { $_[0]->{expanded}; } sub getData { $_[0]->{expanded}; } 1 �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������gdata/inst/perl/xls2tab.pl��������������������������������������������������������������������������0000755�0001751�0000144�00000015755�13003720416�015217� 0����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/perl BEGIN { use File::Basename; # Add current path to perl library search path use lib dirname($0); } use strict; #use Spreadsheet::ParseExcel; #use Spreadsheet::ParseXLSX; use File::Spec::Functions; use Getopt::Std; ## # Try to load the modules we need ## require 'module_tools.pl'; my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_ParseXLSX ) = check_modules_and_notify(); # declare some varibles local my($row, $col, $sheet, $cell, $usage, $targetfile,$basename, $sheetnumber, $filename, $volume, $directories, $whoami, $sep, $sepName, $sepLabel, $sepExt, $skipBlankLines, %switches, $parser, $oBook, $formatter, $using_1904_date ); ## ## Figure out whether I'm called as xls2csv.pl or xls2tab.pl ## ($volume,$directories,$whoami) = File::Spec->splitpath( $0 ); if($whoami eq "xls2csv.pl") { $sep=","; $sepName="comma"; $sepLabel="CSV"; $sepExt="csv"; } elsif ($whoami eq "xls2tsv.pl") { $sep="\t"; $sepName="tab"; $sepLabel="TSV"; $sepExt="tsv"; } elsif ($whoami eq "xls2tab.pl") { $sep="\t"; $sepName="tab"; $sepLabel="TAB"; $sepExt="tab"; } else { die("This script is named '$whoami', but must be named 'xls2csv.pl', 'xls2tsv', or 'xls2tab.pl' to function properly.\n"); } ## ## Usage information ## $usage = <<EOF; $whoami [-s] <excel file> [<output file>] [<worksheet number>] Translate the Microsoft Excel spreadsheet file contained in <excel file> into $sepName separated value format ($sepLabel) and store in <output file>, skipping blank lines unless "-s" is present. If <output file> is not specified, the output file will have the same name as the input file with '.xls', or 'xlsx' removed and '.$sepExt' appended. If no worksheet number is given, each worksheet will be written to a separate file with the name '<output file>_<worksheet name>.$sepExt'. EOF ## ## parse arguments ## # Handle switches (currently, just -s) getopts('s', \%switches); $skipBlankLines=!$switches{s}; # Now the rest of the arguments if( !defined($ARGV[0]) ) { print $usage; exit 1; } if( defined($ARGV[1]) ) { $basename = $targetfile = $ARGV[1]; $basename =~ s/\.$sepExt$//i; } else { ($volume,$directories,$basename) = File::Spec->splitpath( $ARGV[0] ); $basename =~ s/\.xlsx*//i; } my $targetsheetname; my $sheetnumber; if(defined($ARGV[2]) ) { if ( $ARGV[2] =~ m|^\d+$| ) { $sheetnumber = $ARGV[2]; die "Sheetnumber must be an integer larger than 0.\n" if $sheetnumber < 1; } else { $targetsheetname = $ARGV[2]; } } ## ## open spreadsheet ## my $oExcel; my $oBook; $oExcel = new Spreadsheet::ParseExcel; $formatter = Spreadsheet::ParseExcel::FmtDefault->new(); open(FH, "<$ARGV[0]") or die "Unable to open file '$ARGV[0]'.\n"; close(FH); print "\n"; print "Loading '$ARGV[0]'...\n"; ## First try as a Excel 2007+ 'xml' file eval { local $SIG{__WARN__} = sub {}; $parser = Spreadsheet::ParseXLSX -> new(); $oBook = $parser->parse ($ARGV[0]); }; ## Then Excel 97-2004 Format if ( !defined $oBook ) { $parser = Spreadsheet::ParseExcel -> new(); $oBook = $parser->parse($ARGV[0]) or \ die "Error parsing file '$ARGV[0]'.\n"; } print "Done.\n"; ## Does this file use 1904-01-01 as the reference date instead of ## 1900-01-01? $using_1904_date = ( $oBook->using_1904_date() == 1 ) || # ParseExcel ( $oBook->{Flag1904} == 1 ); # ParseXLSX ## Show the user some summary information before we start extracting ## date print "\n"; print "Orignal Filename: ", $ARGV[0], "\n"; print "Number of Sheets: ", $oBook->{SheetCount} , "\n"; if($using_1904_date) { print "Date reference : 1904-01-01\n"; } else { print "Date reference : 1900-01-01\n"; } print "\n"; ## Get list all worksheets in the file my @sheetlist = (@{$oBook->{Worksheet}}); my $sheet; ## If we want a specific sheet drop everything else if ( defined($sheetnumber) ) { $sheet = $oBook->Worksheet($sheetnumber-1) or die "No sheet number $sheetnumber.\n"; @sheetlist = ( $sheet ); } elsif ( defined($targetsheetname) ) { $sheet = $oBook->Worksheet($targetsheetname) or die "No sheet named '$targetsheetname'.\n"; @sheetlist = ( $sheet ); } ## ## iterate across each worksheet, writing out a separat csv file ## my $i=0; my $sheetname; my $found=0; foreach my $sheet (@sheetlist) { $i++; $sheetname = $sheet->{Name}; if( defined($sheetnumber) || defined($targetsheetname) || $oBook->{SheetCount}==1 ) { if( defined($targetfile) ) { $filename = $targetfile; } else { $filename = "${basename}.$sepExt"; } } else { $filename = "${basename}_${sheetname}.$sepExt"; } if( defined($sheetnumber) ) { print "Writing sheet number $sheetnumber ('$sheetname') to file '$filename'\n"; } elsif ( defined($targetsheetname) ) { print "Writing sheet '$sheetname' to file '$filename'\n"; } else { print "Writing sheet number $i ('$sheetname') to file '$filename'\n"; } open(OutFile,">$filename"); my $cumulativeBlankLines=0; my $minrow = $sheet->{MinRow}; my $maxrow = $sheet->{MaxRow}; my $mincol = $sheet->{MinCol}; my $maxcol = $sheet->{MaxCol}; print "Minrow=$minrow Maxrow=$maxrow Mincol=$mincol Maxcol=$maxcol\n"; for(my $row = $minrow; $row <= $maxrow; $row++) { my $outputLine = ""; for(my $col = $mincol; $col <= $maxcol; $col++) { my $cell = $sheet->{Cells}[$row][$col]; my $format = $formatter->FmtString($cell, $oBook); if( defined($cell) ) { if ($cell->type() eq "Date") # && $using_1904_date ) { my $is_date = ( $format =~ m/y/ && $format =~ m/m/ && $format =~ m/d/ ); my $is_time = ( $format =~ m/h[:\]]*m/ || $format =~ m/m[:\]]*s/ ); if($is_date && $is_time) { $format = "yyyy-mm-dd hh:mm:ss.00"; } elsif ($is_date) { $format = "yyyy-mm-dd"; } elsif ($is_time) { $format = "hh:mm:ss.00" } $_ = ExcelFmt($format, $cell->unformatted(), $using_1904_date); } else { $_=$cell->value(); } # convert '#NUM!' strings to missing (empty) values s/#NUM!//; # convert "#DIV/0!" strings to missing (emtpy) values s|#DIV/0!||; # escape double-quote characters in the data since # they are used as field delimiters s/\"/\\\"/g; } else { $_ = ''; } $outputLine .= "\"" . $_ . "\"" if(length($_)>0); # separate cells with specified separator $outputLine .= $sep if( $col != $maxcol) ; } # skip blank/empty lines if( $skipBlankLines && ($outputLine =~ /^[$sep ]*$/) ) { $cumulativeBlankLines++ } else { print OutFile "$outputLine\n" } } close OutFile; print " (Ignored $cumulativeBlankLines blank lines.)\n" if $skipBlankLines; print "\n"; } �������������������gdata/inst/perl/xls2csv.pl��������������������������������������������������������������������������0000755�0001751�0000144�00000015755�13003720416�015244� 0����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/perl BEGIN { use File::Basename; # Add current path to perl library search path use lib dirname($0); } use strict; #use Spreadsheet::ParseExcel; #use Spreadsheet::ParseXLSX; use File::Spec::Functions; use Getopt::Std; ## # Try to load the modules we need ## require 'module_tools.pl'; my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_ParseXLSX ) = check_modules_and_notify(); # declare some varibles local my($row, $col, $sheet, $cell, $usage, $targetfile,$basename, $sheetnumber, $filename, $volume, $directories, $whoami, $sep, $sepName, $sepLabel, $sepExt, $skipBlankLines, %switches, $parser, $oBook, $formatter, $using_1904_date ); ## ## Figure out whether I'm called as xls2csv.pl or xls2tab.pl ## ($volume,$directories,$whoami) = File::Spec->splitpath( $0 ); if($whoami eq "xls2csv.pl") { $sep=","; $sepName="comma"; $sepLabel="CSV"; $sepExt="csv"; } elsif ($whoami eq "xls2tsv.pl") { $sep="\t"; $sepName="tab"; $sepLabel="TSV"; $sepExt="tsv"; } elsif ($whoami eq "xls2tab.pl") { $sep="\t"; $sepName="tab"; $sepLabel="TAB"; $sepExt="tab"; } else { die("This script is named '$whoami', but must be named 'xls2csv.pl', 'xls2tsv', or 'xls2tab.pl' to function properly.\n"); } ## ## Usage information ## $usage = <<EOF; $whoami [-s] <excel file> [<output file>] [<worksheet number>] Translate the Microsoft Excel spreadsheet file contained in <excel file> into $sepName separated value format ($sepLabel) and store in <output file>, skipping blank lines unless "-s" is present. If <output file> is not specified, the output file will have the same name as the input file with '.xls', or 'xlsx' removed and '.$sepExt' appended. If no worksheet number is given, each worksheet will be written to a separate file with the name '<output file>_<worksheet name>.$sepExt'. EOF ## ## parse arguments ## # Handle switches (currently, just -s) getopts('s', \%switches); $skipBlankLines=!$switches{s}; # Now the rest of the arguments if( !defined($ARGV[0]) ) { print $usage; exit 1; } if( defined($ARGV[1]) ) { $basename = $targetfile = $ARGV[1]; $basename =~ s/\.$sepExt$//i; } else { ($volume,$directories,$basename) = File::Spec->splitpath( $ARGV[0] ); $basename =~ s/\.xlsx*//i; } my $targetsheetname; my $sheetnumber; if(defined($ARGV[2]) ) { if ( $ARGV[2] =~ m|^\d+$| ) { $sheetnumber = $ARGV[2]; die "Sheetnumber must be an integer larger than 0.\n" if $sheetnumber < 1; } else { $targetsheetname = $ARGV[2]; } } ## ## open spreadsheet ## my $oExcel; my $oBook; $oExcel = new Spreadsheet::ParseExcel; $formatter = Spreadsheet::ParseExcel::FmtDefault->new(); open(FH, "<$ARGV[0]") or die "Unable to open file '$ARGV[0]'.\n"; close(FH); print "\n"; print "Loading '$ARGV[0]'...\n"; ## First try as a Excel 2007+ 'xml' file eval { local $SIG{__WARN__} = sub {}; $parser = Spreadsheet::ParseXLSX -> new(); $oBook = $parser->parse ($ARGV[0]); }; ## Then Excel 97-2004 Format if ( !defined $oBook ) { $parser = Spreadsheet::ParseExcel -> new(); $oBook = $parser->parse($ARGV[0]) or \ die "Error parsing file '$ARGV[0]'.\n"; } print "Done.\n"; ## Does this file use 1904-01-01 as the reference date instead of ## 1900-01-01? $using_1904_date = ( $oBook->using_1904_date() == 1 ) || # ParseExcel ( $oBook->{Flag1904} == 1 ); # ParseXLSX ## Show the user some summary information before we start extracting ## date print "\n"; print "Orignal Filename: ", $ARGV[0], "\n"; print "Number of Sheets: ", $oBook->{SheetCount} , "\n"; if($using_1904_date) { print "Date reference : 1904-01-01\n"; } else { print "Date reference : 1900-01-01\n"; } print "\n"; ## Get list all worksheets in the file my @sheetlist = (@{$oBook->{Worksheet}}); my $sheet; ## If we want a specific sheet drop everything else if ( defined($sheetnumber) ) { $sheet = $oBook->Worksheet($sheetnumber-1) or die "No sheet number $sheetnumber.\n"; @sheetlist = ( $sheet ); } elsif ( defined($targetsheetname) ) { $sheet = $oBook->Worksheet($targetsheetname) or die "No sheet named '$targetsheetname'.\n"; @sheetlist = ( $sheet ); } ## ## iterate across each worksheet, writing out a separat csv file ## my $i=0; my $sheetname; my $found=0; foreach my $sheet (@sheetlist) { $i++; $sheetname = $sheet->{Name}; if( defined($sheetnumber) || defined($targetsheetname) || $oBook->{SheetCount}==1 ) { if( defined($targetfile) ) { $filename = $targetfile; } else { $filename = "${basename}.$sepExt"; } } else { $filename = "${basename}_${sheetname}.$sepExt"; } if( defined($sheetnumber) ) { print "Writing sheet number $sheetnumber ('$sheetname') to file '$filename'\n"; } elsif ( defined($targetsheetname) ) { print "Writing sheet '$sheetname' to file '$filename'\n"; } else { print "Writing sheet number $i ('$sheetname') to file '$filename'\n"; } open(OutFile,">$filename"); my $cumulativeBlankLines=0; my $minrow = $sheet->{MinRow}; my $maxrow = $sheet->{MaxRow}; my $mincol = $sheet->{MinCol}; my $maxcol = $sheet->{MaxCol}; print "Minrow=$minrow Maxrow=$maxrow Mincol=$mincol Maxcol=$maxcol\n"; for(my $row = $minrow; $row <= $maxrow; $row++) { my $outputLine = ""; for(my $col = $mincol; $col <= $maxcol; $col++) { my $cell = $sheet->{Cells}[$row][$col]; my $format = $formatter->FmtString($cell, $oBook); if( defined($cell) ) { if ($cell->type() eq "Date") # && $using_1904_date ) { my $is_date = ( $format =~ m/y/ && $format =~ m/m/ && $format =~ m/d/ ); my $is_time = ( $format =~ m/h[:\]]*m/ || $format =~ m/m[:\]]*s/ ); if($is_date && $is_time) { $format = "yyyy-mm-dd hh:mm:ss.00"; } elsif ($is_date) { $format = "yyyy-mm-dd"; } elsif ($is_time) { $format = "hh:mm:ss.00" } $_ = ExcelFmt($format, $cell->unformatted(), $using_1904_date); } else { $_=$cell->value(); } # convert '#NUM!' strings to missing (empty) values s/#NUM!//; # convert "#DIV/0!" strings to missing (emtpy) values s|#DIV/0!||; # escape double-quote characters in the data since # they are used as field delimiters s/\"/\\\"/g; } else { $_ = ''; } $outputLine .= "\"" . $_ . "\"" if(length($_)>0); # separate cells with specified separator $outputLine .= $sep if( $col != $maxcol) ; } # skip blank/empty lines if( $skipBlankLines && ($outputLine =~ /^[$sep ]*$/) ) { $cumulativeBlankLines++ } else { print OutFile "$outputLine\n" } } close OutFile; print " (Ignored $cumulativeBlankLines blank lines.)\n" if $skipBlankLines; print "\n"; } �������������������gdata/inst/perl/xls2tsv.pl��������������������������������������������������������������������������0000755�0001751�0000144�00000015755�13003720416�015265� 0����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/perl BEGIN { use File::Basename; # Add current path to perl library search path use lib dirname($0); } use strict; #use Spreadsheet::ParseExcel; #use Spreadsheet::ParseXLSX; use File::Spec::Functions; use Getopt::Std; ## # Try to load the modules we need ## require 'module_tools.pl'; my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_ParseXLSX ) = check_modules_and_notify(); # declare some varibles local my($row, $col, $sheet, $cell, $usage, $targetfile,$basename, $sheetnumber, $filename, $volume, $directories, $whoami, $sep, $sepName, $sepLabel, $sepExt, $skipBlankLines, %switches, $parser, $oBook, $formatter, $using_1904_date ); ## ## Figure out whether I'm called as xls2csv.pl or xls2tab.pl ## ($volume,$directories,$whoami) = File::Spec->splitpath( $0 ); if($whoami eq "xls2csv.pl") { $sep=","; $sepName="comma"; $sepLabel="CSV"; $sepExt="csv"; } elsif ($whoami eq "xls2tsv.pl") { $sep="\t"; $sepName="tab"; $sepLabel="TSV"; $sepExt="tsv"; } elsif ($whoami eq "xls2tab.pl") { $sep="\t"; $sepName="tab"; $sepLabel="TAB"; $sepExt="tab"; } else { die("This script is named '$whoami', but must be named 'xls2csv.pl', 'xls2tsv', or 'xls2tab.pl' to function properly.\n"); } ## ## Usage information ## $usage = <<EOF; $whoami [-s] <excel file> [<output file>] [<worksheet number>] Translate the Microsoft Excel spreadsheet file contained in <excel file> into $sepName separated value format ($sepLabel) and store in <output file>, skipping blank lines unless "-s" is present. If <output file> is not specified, the output file will have the same name as the input file with '.xls', or 'xlsx' removed and '.$sepExt' appended. If no worksheet number is given, each worksheet will be written to a separate file with the name '<output file>_<worksheet name>.$sepExt'. EOF ## ## parse arguments ## # Handle switches (currently, just -s) getopts('s', \%switches); $skipBlankLines=!$switches{s}; # Now the rest of the arguments if( !defined($ARGV[0]) ) { print $usage; exit 1; } if( defined($ARGV[1]) ) { $basename = $targetfile = $ARGV[1]; $basename =~ s/\.$sepExt$//i; } else { ($volume,$directories,$basename) = File::Spec->splitpath( $ARGV[0] ); $basename =~ s/\.xlsx*//i; } my $targetsheetname; my $sheetnumber; if(defined($ARGV[2]) ) { if ( $ARGV[2] =~ m|^\d+$| ) { $sheetnumber = $ARGV[2]; die "Sheetnumber must be an integer larger than 0.\n" if $sheetnumber < 1; } else { $targetsheetname = $ARGV[2]; } } ## ## open spreadsheet ## my $oExcel; my $oBook; $oExcel = new Spreadsheet::ParseExcel; $formatter = Spreadsheet::ParseExcel::FmtDefault->new(); open(FH, "<$ARGV[0]") or die "Unable to open file '$ARGV[0]'.\n"; close(FH); print "\n"; print "Loading '$ARGV[0]'...\n"; ## First try as a Excel 2007+ 'xml' file eval { local $SIG{__WARN__} = sub {}; $parser = Spreadsheet::ParseXLSX -> new(); $oBook = $parser->parse ($ARGV[0]); }; ## Then Excel 97-2004 Format if ( !defined $oBook ) { $parser = Spreadsheet::ParseExcel -> new(); $oBook = $parser->parse($ARGV[0]) or \ die "Error parsing file '$ARGV[0]'.\n"; } print "Done.\n"; ## Does this file use 1904-01-01 as the reference date instead of ## 1900-01-01? $using_1904_date = ( $oBook->using_1904_date() == 1 ) || # ParseExcel ( $oBook->{Flag1904} == 1 ); # ParseXLSX ## Show the user some summary information before we start extracting ## date print "\n"; print "Orignal Filename: ", $ARGV[0], "\n"; print "Number of Sheets: ", $oBook->{SheetCount} , "\n"; if($using_1904_date) { print "Date reference : 1904-01-01\n"; } else { print "Date reference : 1900-01-01\n"; } print "\n"; ## Get list all worksheets in the file my @sheetlist = (@{$oBook->{Worksheet}}); my $sheet; ## If we want a specific sheet drop everything else if ( defined($sheetnumber) ) { $sheet = $oBook->Worksheet($sheetnumber-1) or die "No sheet number $sheetnumber.\n"; @sheetlist = ( $sheet ); } elsif ( defined($targetsheetname) ) { $sheet = $oBook->Worksheet($targetsheetname) or die "No sheet named '$targetsheetname'.\n"; @sheetlist = ( $sheet ); } ## ## iterate across each worksheet, writing out a separat csv file ## my $i=0; my $sheetname; my $found=0; foreach my $sheet (@sheetlist) { $i++; $sheetname = $sheet->{Name}; if( defined($sheetnumber) || defined($targetsheetname) || $oBook->{SheetCount}==1 ) { if( defined($targetfile) ) { $filename = $targetfile; } else { $filename = "${basename}.$sepExt"; } } else { $filename = "${basename}_${sheetname}.$sepExt"; } if( defined($sheetnumber) ) { print "Writing sheet number $sheetnumber ('$sheetname') to file '$filename'\n"; } elsif ( defined($targetsheetname) ) { print "Writing sheet '$sheetname' to file '$filename'\n"; } else { print "Writing sheet number $i ('$sheetname') to file '$filename'\n"; } open(OutFile,">$filename"); my $cumulativeBlankLines=0; my $minrow = $sheet->{MinRow}; my $maxrow = $sheet->{MaxRow}; my $mincol = $sheet->{MinCol}; my $maxcol = $sheet->{MaxCol}; print "Minrow=$minrow Maxrow=$maxrow Mincol=$mincol Maxcol=$maxcol\n"; for(my $row = $minrow; $row <= $maxrow; $row++) { my $outputLine = ""; for(my $col = $mincol; $col <= $maxcol; $col++) { my $cell = $sheet->{Cells}[$row][$col]; my $format = $formatter->FmtString($cell, $oBook); if( defined($cell) ) { if ($cell->type() eq "Date") # && $using_1904_date ) { my $is_date = ( $format =~ m/y/ && $format =~ m/m/ && $format =~ m/d/ ); my $is_time = ( $format =~ m/h[:\]]*m/ || $format =~ m/m[:\]]*s/ ); if($is_date && $is_time) { $format = "yyyy-mm-dd hh:mm:ss.00"; } elsif ($is_date) { $format = "yyyy-mm-dd"; } elsif ($is_time) { $format = "hh:mm:ss.00" } $_ = ExcelFmt($format, $cell->unformatted(), $using_1904_date); } else { $_=$cell->value(); } # convert '#NUM!' strings to missing (empty) values s/#NUM!//; # convert "#DIV/0!" strings to missing (emtpy) values s|#DIV/0!||; # escape double-quote characters in the data since # they are used as field delimiters s/\"/\\\"/g; } else { $_ = ''; } $outputLine .= "\"" . $_ . "\"" if(length($_)>0); # separate cells with specified separator $outputLine .= $sep if( $col != $maxcol) ; } # skip blank/empty lines if( $skipBlankLines && ($outputLine =~ /^[$sep ]*$/) ) { $cumulativeBlankLines++ } else { print OutFile "$outputLine\n" } } close OutFile; print " (Ignored $cumulativeBlankLines blank lines.)\n" if $skipBlankLines; print "\n"; } �������������������gdata/inst/perl/Digest/�����������������������������������������������������������������������������0000755�0001751�0000144�00000000000�13003720416�014502� 5����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������gdata/inst/perl/Digest/Perl/������������������������������������������������������������������������0000755�0001751�0000144�00000000000�13003720416�015404� 5����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������gdata/inst/perl/Digest/Perl/MD5.pm������������������������������������������������������������������0000755�0001751�0000144�00000030732�13003720416�016337� 0����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������package Digest::Perl::MD5; use strict; use integer; use Exporter; use vars qw($VERSION @ISA @EXPORTER @EXPORT_OK); @EXPORT_OK = qw(md5 md5_hex md5_base64); @ISA = 'Exporter'; $VERSION = '1.9'; # I-Vektor sub A() { 0x67_45_23_01 } sub B() { 0xef_cd_ab_89 } sub C() { 0x98_ba_dc_fe } sub D() { 0x10_32_54_76 } # for internal use sub MAX() { 0xFFFFFFFF } # pad a message to a multiple of 64 sub padding { my $l = length (my $msg = shift() . chr(128)); $msg .= "\0" x (($l%64<=56?56:120)-$l%64); $l = ($l-1)*8; $msg .= pack 'VV', $l & MAX , ($l >> 16 >> 16); } sub rotate_left($$) { #$_[0] << $_[1] | $_[0] >> (32 - $_[1]); #my $right = $_[0] >> (32 - $_[1]); #my $rmask = (1 << $_[1]) - 1; ($_[0] << $_[1]) | (( $_[0] >> (32 - $_[1]) ) & ((1 << $_[1]) - 1)); #$_[0] << $_[1] | (($_[0]>> (32 - $_[1])) & (1 << (32 - $_[1])) - 1); } sub gen_code { # Discard upper 32 bits on 64 bit archs. my $MSK = ((1 << 16) << 16) ? ' & ' . MAX : ''; # FF => "X0=rotate_left(((X1&X2)|(~X1&X3))+X0+X4+X6$MSK,X5)+X1$MSK;", # GG => "X0=rotate_left(((X1&X3)|(X2&(~X3)))+X0+X4+X6$MSK,X5)+X1$MSK;", my %f = ( FF => "X0=rotate_left((X3^(X1&(X2^X3)))+X0+X4+X6$MSK,X5)+X1$MSK;", GG => "X0=rotate_left((X2^(X3&(X1^X2)))+X0+X4+X6$MSK,X5)+X1$MSK;", HH => "X0=rotate_left((X1^X2^X3)+X0+X4+X6$MSK,X5)+X1$MSK;", II => "X0=rotate_left((X2^(X1|(~X3)))+X0+X4+X6$MSK,X5)+X1$MSK;", ); #unless ( (1 << 16) << 16) { %f = %{$CODES{'32bit'}} } #else { %f = %{$CODES{'64bit'}} } my %s = ( # shift lengths S11 => 7, S12 => 12, S13 => 17, S14 => 22, S21 => 5, S22 => 9, S23 => 14, S24 => 20, S31 => 4, S32 => 11, S33 => 16, S34 => 23, S41 => 6, S42 => 10, S43 => 15, S44 => 21 ); my $insert = "\n"; while(defined( my $data = <DATA> )) { chomp $data; next unless $data =~ /^[FGHI]/; my ($func,@x) = split /,/, $data; my $c = $f{$func}; $c =~ s/X(\d)/$x[$1]/g; $c =~ s/(S\d{2})/$s{$1}/; $c =~ s/^(.*)=rotate_left\((.*),(.*)\)\+(.*)$//; my $su = 32 - $3; my $sh = (1 << $3) - 1; $c = "$1=(((\$r=$2)<<$3)|((\$r>>$su)&$sh))+$4"; #my $rotate = "(($2 << $3) || (($2 >> (32 - $3)) & (1 << $2) - 1)))"; # $c = "\$r = $2; # $1 = ((\$r << $3) | ((\$r >> (32 - $3)) & ((1 << $3) - 1))) + $4"; $insert .= "\t$c\n"; } close DATA; my $dump = ' sub round { my ($a,$b,$c,$d) = @_[0 .. 3]; my $r;' . $insert . ' $_[0]+$a' . $MSK . ', $_[1]+$b ' . $MSK . ', $_[2]+$c' . $MSK . ', $_[3]+$d' . $MSK . '; }'; eval $dump; # print "$dump\n"; # exit 0; } gen_code(); ######################################### # Private output converter functions: sub _encode_hex { unpack 'H*', $_[0] } sub _encode_base64 { my $res; while ($_[0] =~ /(.{1,45})/gs) { $res .= substr pack('u', $1), 1; chop $res; } $res =~ tr|` -_|AA-Za-z0-9+/|;#` chop $res; chop $res; $res } ######################################### # OOP interface: sub new { my $proto = shift; my $class = ref $proto || $proto; my $self = {}; bless $self, $class; $self->reset(); $self } sub reset { my $self = shift; delete $self->{_data}; $self->{_state} = [A,B,C,D]; $self->{_length} = 0; $self } sub add { my $self = shift; $self->{_data} .= join '', @_ if @_; my ($i,$c); for $i (0 .. (length $self->{_data})/64-1) { my @X = unpack 'V16', substr $self->{_data}, $i*64, 64; @{$self->{_state}} = round(@{$self->{_state}},@X); ++$c; } if ($c) { substr ($self->{_data}, 0, $c*64) = ''; $self->{_length} += $c*64; } $self } sub finalize { my $self = shift; $self->{_data} .= chr(128); my $l = $self->{_length} + length $self->{_data}; $self->{_data} .= "\0" x (($l%64<=56?56:120)-$l%64); $l = ($l-1)*8; $self->{_data} .= pack 'VV', $l & MAX , ($l >> 16 >> 16); $self->add(); $self } sub addfile { my ($self,$fh) = @_; if (!ref($fh) && ref(\$fh) ne "GLOB") { require Symbol; $fh = Symbol::qualify($fh, scalar caller); } # $self->{_data} .= do{local$/;<$fh>}; my $read = 0; my $buffer = ''; $self->add($buffer) while $read = read $fh, $buffer, 8192; die __PACKAGE__, " read failed: $!" unless defined $read; $self } sub add_bits { my $self = shift; return $self->add( pack 'B*', shift ) if @_ == 1; my ($b,$n) = @_; die __PACKAGE__, " Invalid number of bits\n" if $n%8; $self->add( substr $b, 0, $n/8 ) } sub digest { my $self = shift; $self->finalize(); my $res = pack 'V4', @{$self->{_state}}; $self->reset(); $res } sub hexdigest { _encode_hex($_[0]->digest) } sub b64digest { _encode_base64($_[0]->digest) } sub clone { my $self = shift; my $clone = { _state => [@{$self->{_state}}], _length => $self->{_length}, _data => $self->{_data} }; bless $clone, ref $self || $self; } ######################################### # Procedural interface: sub md5 { my $message = padding(join'',@_); my ($a,$b,$c,$d) = (A,B,C,D); my $i; for $i (0 .. (length $message)/64-1) { my @X = unpack 'V16', substr $message,$i*64,64; ($a,$b,$c,$d) = round($a,$b,$c,$d,@X); } pack 'V4',$a,$b,$c,$d; } sub md5_hex { _encode_hex &md5 } sub md5_base64 { _encode_base64 &md5 } 1; =head1 NAME Digest::MD5::Perl - Perl implementation of Ron Rivests MD5 Algorithm =head1 DISCLAIMER This is B<not> an interface (like C<Digest::MD5>) but a Perl implementation of MD5. It is written in perl only and because of this it is slow but it works without C-Code. You should use C<Digest::MD5> instead of this module if it is available. This module is only useful for =over 4 =item computers where you cannot install C<Digest::MD5> (e.g. lack of a C-Compiler) =item encrypting only small amounts of data (less than one million bytes). I use it to hash passwords. =item educational purposes =back =head1 SYNOPSIS # Functional style use Digest::MD5 qw(md5 md5_hex md5_base64); $hash = md5 $data; $hash = md5_hex $data; $hash = md5_base64 $data; # OO style use Digest::MD5; $ctx = Digest::MD5->new; $ctx->add($data); $ctx->addfile(*FILE); $digest = $ctx->digest; $digest = $ctx->hexdigest; $digest = $ctx->b64digest; =head1 DESCRIPTION This modules has the same interface as the much faster C<Digest::MD5>. So you can easily exchange them, e.g. BEGIN { eval { require Digest::MD5; import Digest::MD5 'md5_hex' }; if ($@) { # ups, no Digest::MD5 require Digest::Perl::MD5; import Digest::Perl::MD5 'md5_hex' } } If the C<Digest::MD5> module is available it is used and if not you take C<Digest::Perl::MD5>. You can also install the Perl part of Digest::MD5 together with Digest::Perl::MD5 and use Digest::MD5 as normal, it falls back to Digest::Perl::MD5 if it cannot load its object files. For a detailed Documentation see the C<Digest::MD5> module. =head1 EXAMPLES The simplest way to use this library is to import the md5_hex() function (or one of its cousins): use Digest::Perl::MD5 'md5_hex'; print 'Digest is ', md5_hex('foobarbaz'), "\n"; The above example would print out the message Digest is 6df23dc03f9b54cc38a0fc1483df6e21 provided that the implementation is working correctly. The same checksum can also be calculated in OO style: use Digest::MD5; $md5 = Digest::MD5->new; $md5->add('foo', 'bar'); $md5->add('baz'); $digest = $md5->hexdigest; print "Digest is $digest\n"; The digest methods are destructive. That means you can only call them once and the $md5 objects is reset after use. You can make a copy with clone: $md5->clone->hexdigest =head1 LIMITATIONS This implementation of the MD5 algorithm has some limitations: =over 4 =item It's slow, very slow. I've done my very best but Digest::MD5 is still about 100 times faster. You can only encrypt Data up to one million bytes in an acceptable time. But it's very useful for encrypting small amounts of data like passwords. =item You can only encrypt up to 2^32 bits = 512 MB on 32bit archs. But You should use C<Digest::MD5> for those amounts of data anyway. =back =head1 SEE ALSO L<Digest::MD5> L<md5(1)> RFC 1321 tools/md5: a small BSD compatible md5 tool written in pure perl. =head1 COPYRIGHT This library is free software; you can redistribute it and/or modify it under the same terms as Perl itself. Copyright 2000 Christian Lackas, Imperia Software Solutions Copyright 1998-1999 Gisle Aas. Copyright 1995-1996 Neil Winton. Copyright 1991-1992 RSA Data Security, Inc. The MD5 algorithm is defined in RFC 1321. The basic C code implementing the algorithm is derived from that in the RFC and is covered by the following copyright: =over 4 =item Copyright (C) 1991-1992, RSA Data Security, Inc. Created 1991. All rights reserved. License to copy and use this software is granted provided that it is identified as the "RSA Data Security, Inc. MD5 Message-Digest Algorithm" in all material mentioning or referencing this software or this function. License is also granted to make and use derivative works provided that such works are identified as "derived from the RSA Data Security, Inc. MD5 Message-Digest Algorithm" in all material mentioning or referencing the derived work. RSA Data Security, Inc. makes no representations concerning either the merchantability of this software or the suitability of this software for any particular purpose. It is provided "as is" without express or implied warranty of any kind. These notices must be retained in any copies of any part of this documentation and/or software. =back This copyright does not prohibit distribution of any version of Perl containing this extension under the terms of the GNU or Artistic licenses. =head1 AUTHORS The original MD5 interface was written by Neil Winton (<N.Winton (at) axion.bt.co.uk>). C<Digest::MD5> was made by Gisle Aas <gisle (at) aas.no> (I took his Interface and part of the documentation). Thanks to Guido Flohr for his 'use integer'-hint. This release was made by Christian Lackas <delta (at) lackas.net>. =cut __DATA__ FF,$a,$b,$c,$d,$_[4],7,0xd76aa478,/* 1 */ FF,$d,$a,$b,$c,$_[5],12,0xe8c7b756,/* 2 */ FF,$c,$d,$a,$b,$_[6],17,0x242070db,/* 3 */ FF,$b,$c,$d,$a,$_[7],22,0xc1bdceee,/* 4 */ FF,$a,$b,$c,$d,$_[8],7,0xf57c0faf,/* 5 */ FF,$d,$a,$b,$c,$_[9],12,0x4787c62a,/* 6 */ FF,$c,$d,$a,$b,$_[10],17,0xa8304613,/* 7 */ FF,$b,$c,$d,$a,$_[11],22,0xfd469501,/* 8 */ FF,$a,$b,$c,$d,$_[12],7,0x698098d8,/* 9 */ FF,$d,$a,$b,$c,$_[13],12,0x8b44f7af,/* 10 */ FF,$c,$d,$a,$b,$_[14],17,0xffff5bb1,/* 11 */ FF,$b,$c,$d,$a,$_[15],22,0x895cd7be,/* 12 */ FF,$a,$b,$c,$d,$_[16],7,0x6b901122,/* 13 */ FF,$d,$a,$b,$c,$_[17],12,0xfd987193,/* 14 */ FF,$c,$d,$a,$b,$_[18],17,0xa679438e,/* 15 */ FF,$b,$c,$d,$a,$_[19],22,0x49b40821,/* 16 */ GG,$a,$b,$c,$d,$_[5],5,0xf61e2562,/* 17 */ GG,$d,$a,$b,$c,$_[10],9,0xc040b340,/* 18 */ GG,$c,$d,$a,$b,$_[15],14,0x265e5a51,/* 19 */ GG,$b,$c,$d,$a,$_[4],20,0xe9b6c7aa,/* 20 */ GG,$a,$b,$c,$d,$_[9],5,0xd62f105d,/* 21 */ GG,$d,$a,$b,$c,$_[14],9,0x2441453,/* 22 */ GG,$c,$d,$a,$b,$_[19],14,0xd8a1e681,/* 23 */ GG,$b,$c,$d,$a,$_[8],20,0xe7d3fbc8,/* 24 */ GG,$a,$b,$c,$d,$_[13],5,0x21e1cde6,/* 25 */ GG,$d,$a,$b,$c,$_[18],9,0xc33707d6,/* 26 */ GG,$c,$d,$a,$b,$_[7],14,0xf4d50d87,/* 27 */ GG,$b,$c,$d,$a,$_[12],20,0x455a14ed,/* 28 */ GG,$a,$b,$c,$d,$_[17],5,0xa9e3e905,/* 29 */ GG,$d,$a,$b,$c,$_[6],9,0xfcefa3f8,/* 30 */ GG,$c,$d,$a,$b,$_[11],14,0x676f02d9,/* 31 */ GG,$b,$c,$d,$a,$_[16],20,0x8d2a4c8a,/* 32 */ HH,$a,$b,$c,$d,$_[9],4,0xfffa3942,/* 33 */ HH,$d,$a,$b,$c,$_[12],11,0x8771f681,/* 34 */ HH,$c,$d,$a,$b,$_[15],16,0x6d9d6122,/* 35 */ HH,$b,$c,$d,$a,$_[18],23,0xfde5380c,/* 36 */ HH,$a,$b,$c,$d,$_[5],4,0xa4beea44,/* 37 */ HH,$d,$a,$b,$c,$_[8],11,0x4bdecfa9,/* 38 */ HH,$c,$d,$a,$b,$_[11],16,0xf6bb4b60,/* 39 */ HH,$b,$c,$d,$a,$_[14],23,0xbebfbc70,/* 40 */ HH,$a,$b,$c,$d,$_[17],4,0x289b7ec6,/* 41 */ HH,$d,$a,$b,$c,$_[4],11,0xeaa127fa,/* 42 */ HH,$c,$d,$a,$b,$_[7],16,0xd4ef3085,/* 43 */ HH,$b,$c,$d,$a,$_[10],23,0x4881d05,/* 44 */ HH,$a,$b,$c,$d,$_[13],4,0xd9d4d039,/* 45 */ HH,$d,$a,$b,$c,$_[16],11,0xe6db99e5,/* 46 */ HH,$c,$d,$a,$b,$_[19],16,0x1fa27cf8,/* 47 */ HH,$b,$c,$d,$a,$_[6],23,0xc4ac5665,/* 48 */ II,$a,$b,$c,$d,$_[4],6,0xf4292244,/* 49 */ II,$d,$a,$b,$c,$_[11],10,0x432aff97,/* 50 */ II,$c,$d,$a,$b,$_[18],15,0xab9423a7,/* 51 */ II,$b,$c,$d,$a,$_[9],21,0xfc93a039,/* 52 */ II,$a,$b,$c,$d,$_[16],6,0x655b59c3,/* 53 */ II,$d,$a,$b,$c,$_[7],10,0x8f0ccc92,/* 54 */ II,$c,$d,$a,$b,$_[14],15,0xffeff47d,/* 55 */ II,$b,$c,$d,$a,$_[5],21,0x85845dd1,/* 56 */ II,$a,$b,$c,$d,$_[12],6,0x6fa87e4f,/* 57 */ II,$d,$a,$b,$c,$_[19],10,0xfe2ce6e0,/* 58 */ II,$c,$d,$a,$b,$_[10],15,0xa3014314,/* 59 */ II,$b,$c,$d,$a,$_[17],21,0x4e0811a1,/* 60 */ II,$a,$b,$c,$d,$_[8],6,0xf7537e82,/* 61 */ II,$d,$a,$b,$c,$_[15],10,0xbd3af235,/* 62 */ II,$c,$d,$a,$b,$_[6],15,0x2ad7d2bb,/* 63 */ II,$b,$c,$d,$a,$_[13],21,0xeb86d391,/* 64 */ ��������������������������������������gdata/inst/perl/OLE/��������������������������������������������������������������������������������0000755�0001751�0000144�00000000000�13003720416�013702� 5����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������gdata/inst/perl/OLE/README-OLE-Storage_Lite���������������������������������������������������������0000644�0001751�0000144�00000001673�13003720416�017565� 0����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������NAME OLE::Storage_Lite - Simple Class for OLE document interface. DESCRIPTION This module allows you to read and write an OLE-Structured file. The module will work on the majority of Windows, UNIX and Macintosh platforms. REQUIREMENT Perl 5.005 or later. INSTALLATION The module can be installed using the standard Perl procedure: perl Makefile.PL make make test make install # You may need to be root make clean # or make realclean Windows users without a working "make" can get nmake from: ftp://ftp.microsoft.com/Softlib/MSLFILES/nmake15.exe SAMPLE Samples scripts are in sample directory. smplls.pl : displays PPS structure of specified file (subset of "lls" distributed with OLE::Storage) smpsv.pl : creates and save a sample OLE-file(tsv.dat). AUTHOR Kawai Takanori (kwitknr@cpan.org) ���������������������������������������������������������������������gdata/inst/perl/OLE/Storage_Lite.pm�����������������������������������������������������������������0000644�0001751�0000144�00000152117�13003720416�016630� 0����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������# OLE::Storage_Lite # by Kawai, Takanori (Hippo2000) 2000.11.4, 8, 14 # This Program is Still ALPHA version. #////////////////////////////////////////////////////////////////////////////// # OLE::Storage_Lite::PPS Object #////////////////////////////////////////////////////////////////////////////// #============================================================================== # OLE::Storage_Lite::PPS #============================================================================== package OLE::Storage_Lite::PPS; require Exporter; use strict; use vars qw($VERSION @ISA); @ISA = qw(Exporter); $VERSION = '0.19'; #------------------------------------------------------------------------------ # new (OLE::Storage_Lite::PPS) #------------------------------------------------------------------------------ sub new ($$$$$$$$$$;$$) { #1. Constructor for General Usage my($sClass, $iNo, $sNm, $iType, $iPrev, $iNext, $iDir, $raTime1st, $raTime2nd, $iStart, $iSize, $sData, $raChild) = @_; if($iType == OLE::Storage_Lite::PpsType_File()) { #FILE return OLE::Storage_Lite::PPS::File->_new ($iNo, $sNm, $iType, $iPrev, $iNext, $iDir, $raTime1st, $raTime2nd, $iStart, $iSize, $sData, $raChild); } elsif($iType == OLE::Storage_Lite::PpsType_Dir()) { #DIRECTRY return OLE::Storage_Lite::PPS::Dir->_new ($iNo, $sNm, $iType, $iPrev, $iNext, $iDir, $raTime1st, $raTime2nd, $iStart, $iSize, $sData, $raChild); } elsif($iType == OLE::Storage_Lite::PpsType_Root()) { #ROOT return OLE::Storage_Lite::PPS::Root->_new ($iNo, $sNm, $iType, $iPrev, $iNext, $iDir, $raTime1st, $raTime2nd, $iStart, $iSize, $sData, $raChild); } else { die "Error PPS:$iType $sNm\n"; } } #------------------------------------------------------------------------------ # _new (OLE::Storage_Lite::PPS) # for OLE::Storage_Lite #------------------------------------------------------------------------------ sub _new ($$$$$$$$$$$;$$) { my($sClass, $iNo, $sNm, $iType, $iPrev, $iNext, $iDir, $raTime1st, $raTime2nd, $iStart, $iSize, $sData, $raChild) = @_; #1. Constructor for OLE::Storage_Lite my $oThis = { No => $iNo, Name => $sNm, Type => $iType, PrevPps => $iPrev, NextPps => $iNext, DirPps => $iDir, Time1st => $raTime1st, Time2nd => $raTime2nd, StartBlock => $iStart, Size => $iSize, Data => $sData, Child => $raChild, }; bless $oThis, $sClass; return $oThis; } #------------------------------------------------------------------------------ # _DataLen (OLE::Storage_Lite::PPS) # Check for update #------------------------------------------------------------------------------ sub _DataLen($) { my($oSelf) =@_; return 0 unless(defined($oSelf->{Data})); return ($oSelf->{_PPS_FILE})? ($oSelf->{_PPS_FILE}->stat())[7] : length($oSelf->{Data}); } #------------------------------------------------------------------------------ # _makeSmallData (OLE::Storage_Lite::PPS) #------------------------------------------------------------------------------ sub _makeSmallData($$$) { my($oThis, $aList, $rhInfo) = @_; my ($sRes); my $FILE = $rhInfo->{_FILEH_}; my $iSmBlk = 0; foreach my $oPps (@$aList) { #1. Make SBD, small data string if($oPps->{Type}==OLE::Storage_Lite::PpsType_File()) { next if($oPps->{Size}<=0); if($oPps->{Size} < $rhInfo->{_SMALL_SIZE}) { my $iSmbCnt = int($oPps->{Size} / $rhInfo->{_SMALL_BLOCK_SIZE}) + (($oPps->{Size} % $rhInfo->{_SMALL_BLOCK_SIZE})? 1: 0); #1.1 Add to SBD for (my $i = 0; $i<($iSmbCnt-1); $i++) { print {$FILE} (pack("V", $i+$iSmBlk+1)); } print {$FILE} (pack("V", -2)); #1.2 Add to Data String(this will be written for RootEntry) #Check for update if($oPps->{_PPS_FILE}) { my $sBuff; $oPps->{_PPS_FILE}->seek(0, 0); #To The Top while($oPps->{_PPS_FILE}->read($sBuff, 4096)) { $sRes .= $sBuff; } } else { $sRes .= $oPps->{Data}; } $sRes .= ("\x00" x ($rhInfo->{_SMALL_BLOCK_SIZE} - ($oPps->{Size}% $rhInfo->{_SMALL_BLOCK_SIZE}))) if($oPps->{Size}% $rhInfo->{_SMALL_BLOCK_SIZE}); #1.3 Set for PPS $oPps->{StartBlock} = $iSmBlk; $iSmBlk += $iSmbCnt; } } } my $iSbCnt = int($rhInfo->{_BIG_BLOCK_SIZE}/ OLE::Storage_Lite::LongIntSize()); print {$FILE} (pack("V", -1) x ($iSbCnt - ($iSmBlk % $iSbCnt))) if($iSmBlk % $iSbCnt); #2. Write SBD with adjusting length for block return $sRes; } #------------------------------------------------------------------------------ # _savePpsWk (OLE::Storage_Lite::PPS) #------------------------------------------------------------------------------ sub _savePpsWk($$) { my($oThis, $rhInfo) = @_; #1. Write PPS my $FILE = $rhInfo->{_FILEH_}; print {$FILE} ( $oThis->{Name} . ("\x00" x (64 - length($oThis->{Name}))) #64 , pack("v", length($oThis->{Name}) + 2) #66 , pack("c", $oThis->{Type}) #67 , pack("c", 0x00) #UK #68 , pack("V", $oThis->{PrevPps}) #Prev #72 , pack("V", $oThis->{NextPps}) #Next #76 , pack("V", $oThis->{DirPps}) #Dir #80 , "\x00\x09\x02\x00" #84 , "\x00\x00\x00\x00" #88 , "\xc0\x00\x00\x00" #92 , "\x00\x00\x00\x46" #96 , "\x00\x00\x00\x00" #100 , OLE::Storage_Lite::LocalDate2OLE($oThis->{Time1st}) #108 , OLE::Storage_Lite::LocalDate2OLE($oThis->{Time2nd}) #116 , pack("V", defined($oThis->{StartBlock})? $oThis->{StartBlock}:0) #116 , pack("V", defined($oThis->{Size})? $oThis->{Size} : 0) #124 , pack("V", 0), #128 ); } #////////////////////////////////////////////////////////////////////////////// # OLE::Storage_Lite::PPS::Root Object #////////////////////////////////////////////////////////////////////////////// #============================================================================== # OLE::Storage_Lite::PPS::Root #============================================================================== package OLE::Storage_Lite::PPS::Root; require Exporter; use strict; use IO::File; use IO::Handle; use Fcntl; use vars qw($VERSION @ISA); @ISA = qw(OLE::Storage_Lite::PPS Exporter); $VERSION = '0.19'; sub _savePpsSetPnt($$$); sub _savePpsSetPnt2($$$); #------------------------------------------------------------------------------ # new (OLE::Storage_Lite::PPS::Root) #------------------------------------------------------------------------------ sub new ($;$$$) { my($sClass, $raTime1st, $raTime2nd, $raChild) = @_; OLE::Storage_Lite::PPS::_new( $sClass, undef, OLE::Storage_Lite::Asc2Ucs('Root Entry'), 5, undef, undef, undef, $raTime1st, $raTime2nd, undef, undef, undef, $raChild); } #------------------------------------------------------------------------------ # save (OLE::Storage_Lite::PPS::Root) #------------------------------------------------------------------------------ sub save($$;$$) { my($oThis, $sFile, $bNoAs, $rhInfo) = @_; #0.Initial Setting for saving $rhInfo = {} unless($rhInfo); $rhInfo->{_BIG_BLOCK_SIZE} = 2** (($rhInfo->{_BIG_BLOCK_SIZE})? _adjust2($rhInfo->{_BIG_BLOCK_SIZE}) : 9); $rhInfo->{_SMALL_BLOCK_SIZE}= 2 ** (($rhInfo->{_SMALL_BLOCK_SIZE})? _adjust2($rhInfo->{_SMALL_BLOCK_SIZE}): 6); $rhInfo->{_SMALL_SIZE} = 0x1000; $rhInfo->{_PPS_SIZE} = 0x80; my $closeFile = 1; #1.Open File #1.1 $sFile is Ref of scalar if(ref($sFile) eq 'SCALAR') { require IO::Scalar; my $oIo = new IO::Scalar $sFile, O_WRONLY; $rhInfo->{_FILEH_} = $oIo; } #1.1.1 $sFile is a IO::Scalar object # Now handled as a filehandle ref below. #1.2 $sFile is a IO::Handle object elsif(UNIVERSAL::isa($sFile, 'IO::Handle')) { # Not all filehandles support binmode() so try it in an eval. eval{ binmode $sFile }; $rhInfo->{_FILEH_} = $sFile; } #1.3 $sFile is a simple filename string elsif(!ref($sFile)) { if($sFile ne '-') { my $oIo = new IO::File; $oIo->open(">$sFile") || return undef; binmode($oIo); $rhInfo->{_FILEH_} = $oIo; } else { my $oIo = new IO::Handle; $oIo->fdopen(fileno(STDOUT),"w") || return undef; binmode($oIo); $rhInfo->{_FILEH_} = $oIo; } } #1.4 Assume that if $sFile is a ref then it is a valid filehandle else { # Not all filehandles support binmode() so try it in an eval. eval{ binmode $sFile }; $rhInfo->{_FILEH_} = $sFile; # Caller controls filehandle closing $closeFile = 0; } my $iBlk = 0; #1. Make an array of PPS (for Save) my @aList=(); if($bNoAs) { _savePpsSetPnt2([$oThis], \@aList, $rhInfo); } else { _savePpsSetPnt([$oThis], \@aList, $rhInfo); } my ($iSBDcnt, $iBBcnt, $iPPScnt) = $oThis->_calcSize(\@aList, $rhInfo); #2.Save Header $oThis->_saveHeader($rhInfo, $iSBDcnt, $iBBcnt, $iPPScnt); #3.Make Small Data string (write SBD) my $sSmWk = $oThis->_makeSmallData(\@aList, $rhInfo); $oThis->{Data} = $sSmWk; #Small Datas become RootEntry Data #4. Write BB my $iBBlk = $iSBDcnt; $oThis->_saveBigData(\$iBBlk, \@aList, $rhInfo); #5. Write PPS $oThis->_savePps(\@aList, $rhInfo); #6. Write BD and BDList and Adding Header informations $oThis->_saveBbd($iSBDcnt, $iBBcnt, $iPPScnt, $rhInfo); #7.Close File return $rhInfo->{_FILEH_}->close if $closeFile; } #------------------------------------------------------------------------------ # _calcSize (OLE::Storage_Lite::PPS) #------------------------------------------------------------------------------ sub _calcSize($$) { my($oThis, $raList, $rhInfo) = @_; #0. Calculate Basic Setting my ($iSBDcnt, $iBBcnt, $iPPScnt) = (0,0,0); my $iSmallLen = 0; my $iSBcnt = 0; foreach my $oPps (@$raList) { if($oPps->{Type}==OLE::Storage_Lite::PpsType_File()) { $oPps->{Size} = $oPps->_DataLen(); #Mod if($oPps->{Size} < $rhInfo->{_SMALL_SIZE}) { $iSBcnt += int($oPps->{Size} / $rhInfo->{_SMALL_BLOCK_SIZE}) + (($oPps->{Size} % $rhInfo->{_SMALL_BLOCK_SIZE})? 1: 0); } else { $iBBcnt += (int($oPps->{Size}/ $rhInfo->{_BIG_BLOCK_SIZE}) + (($oPps->{Size}% $rhInfo->{_BIG_BLOCK_SIZE})? 1: 0)); } } } $iSmallLen = $iSBcnt * $rhInfo->{_SMALL_BLOCK_SIZE}; my $iSlCnt = int($rhInfo->{_BIG_BLOCK_SIZE}/ OLE::Storage_Lite::LongIntSize()); $iSBDcnt = int($iSBcnt / $iSlCnt)+ (($iSBcnt % $iSlCnt)? 1:0); $iBBcnt += (int($iSmallLen/ $rhInfo->{_BIG_BLOCK_SIZE}) + (( $iSmallLen% $rhInfo->{_BIG_BLOCK_SIZE})? 1: 0)); my $iCnt = scalar(@$raList); my $iBdCnt = $rhInfo->{_BIG_BLOCK_SIZE}/OLE::Storage_Lite::PpsSize(); $iPPScnt = (int($iCnt/$iBdCnt) + (($iCnt % $iBdCnt)? 1: 0)); return ($iSBDcnt, $iBBcnt, $iPPScnt); } #------------------------------------------------------------------------------ # _adjust2 (OLE::Storage_Lite::PPS::Root) #------------------------------------------------------------------------------ sub _adjust2($) { my($i2) = @_; my $iWk; $iWk = log($i2)/log(2); return ($iWk > int($iWk))? int($iWk)+1:$iWk; } #------------------------------------------------------------------------------ # _saveHeader (OLE::Storage_Lite::PPS::Root) #------------------------------------------------------------------------------ sub _saveHeader($$$$$) { my($oThis, $rhInfo, $iSBDcnt, $iBBcnt, $iPPScnt) = @_; my $FILE = $rhInfo->{_FILEH_}; #0. Calculate Basic Setting my $iBlCnt = $rhInfo->{_BIG_BLOCK_SIZE} / OLE::Storage_Lite::LongIntSize(); my $i1stBdL = int(($rhInfo->{_BIG_BLOCK_SIZE} - 0x4C) / OLE::Storage_Lite::LongIntSize()); my $i1stBdMax = $i1stBdL * $iBlCnt - $i1stBdL; my $iBdExL = 0; my $iAll = $iBBcnt + $iPPScnt + $iSBDcnt; my $iAllW = $iAll; my $iBdCntW = int($iAllW / $iBlCnt) + (($iAllW % $iBlCnt)? 1: 0); my $iBdCnt = int(($iAll + $iBdCntW) / $iBlCnt) + ((($iAllW+$iBdCntW) % $iBlCnt)? 1: 0); my $i; if ($iBdCnt > $i1stBdL) { #0.1 Calculate BD count $iBlCnt--; #the BlCnt is reduced in the count of the last sect is used for a pointer the next Bl my $iBBleftover = $iAll - $i1stBdMax; if ($iAll >$i1stBdMax) { while(1) { $iBdCnt = int(($iBBleftover) / $iBlCnt) + ((($iBBleftover) % $iBlCnt)? 1: 0); $iBdExL = int(($iBdCnt) / $iBlCnt) + ((($iBdCnt) % $iBlCnt)? 1: 0); $iBBleftover = $iBBleftover + $iBdExL; last if($iBdCnt == (int(($iBBleftover) / $iBlCnt) + ((($iBBleftover) % $iBlCnt)? 1: 0))); } } $iBdCnt += $i1stBdL; #print "iBdCnt = $iBdCnt \n"; } #1.Save Header print {$FILE} ( "\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1" , "\x00\x00\x00\x00" x 4 , pack("v", 0x3b) , pack("v", 0x03) , pack("v", -2) , pack("v", 9) , pack("v", 6) , pack("v", 0) , "\x00\x00\x00\x00" x 2 , pack("V", $iBdCnt), , pack("V", $iBBcnt+$iSBDcnt), #ROOT START , pack("V", 0) , pack("V", 0x1000) , pack("V", $iSBDcnt ? 0 : -2) #Small Block Depot , pack("V", $iSBDcnt) ); #2. Extra BDList Start, Count if($iAll <= $i1stBdMax) { print {$FILE} ( pack("V", -2), #Extra BDList Start pack("V", 0), #Extra BDList Count ); } else { print {$FILE} ( pack("V", $iAll+$iBdCnt), pack("V", $iBdExL), ); } #3. BDList for($i=0; $i<$i1stBdL and $i < $iBdCnt; $i++) { print {$FILE} (pack("V", $iAll+$i)); } print {$FILE} ((pack("V", -1)) x($i1stBdL-$i)) if($i<$i1stBdL); } #------------------------------------------------------------------------------ # _saveBigData (OLE::Storage_Lite::PPS) #------------------------------------------------------------------------------ sub _saveBigData($$$$) { my($oThis, $iStBlk, $raList, $rhInfo) = @_; my $iRes = 0; my $FILE = $rhInfo->{_FILEH_}; #1.Write Big (ge 0x1000) Data into Block foreach my $oPps (@$raList) { if($oPps->{Type}!=OLE::Storage_Lite::PpsType_Dir()) { #print "PPS: $oPps DEF:", defined($oPps->{Data}), "\n"; $oPps->{Size} = $oPps->_DataLen(); #Mod if(($oPps->{Size} >= $rhInfo->{_SMALL_SIZE}) || (($oPps->{Type} == OLE::Storage_Lite::PpsType_Root()) && defined($oPps->{Data}))) { #1.1 Write Data #Check for update if($oPps->{_PPS_FILE}) { my $sBuff; my $iLen = 0; $oPps->{_PPS_FILE}->seek(0, 0); #To The Top while($oPps->{_PPS_FILE}->read($sBuff, 4096)) { $iLen += length($sBuff); print {$FILE} ($sBuff); #Check for update } } else { print {$FILE} ($oPps->{Data}); } print {$FILE} ( "\x00" x ($rhInfo->{_BIG_BLOCK_SIZE} - ($oPps->{Size} % $rhInfo->{_BIG_BLOCK_SIZE})) ) if ($oPps->{Size} % $rhInfo->{_BIG_BLOCK_SIZE}); #1.2 Set For PPS $oPps->{StartBlock} = $$iStBlk; $$iStBlk += (int($oPps->{Size}/ $rhInfo->{_BIG_BLOCK_SIZE}) + (($oPps->{Size}% $rhInfo->{_BIG_BLOCK_SIZE})? 1: 0)); } } } } #------------------------------------------------------------------------------ # _savePps (OLE::Storage_Lite::PPS::Root) #------------------------------------------------------------------------------ sub _savePps($$$) { my($oThis, $raList, $rhInfo) = @_; #0. Initial my $FILE = $rhInfo->{_FILEH_}; #2. Save PPS foreach my $oItem (@$raList) { $oItem->_savePpsWk($rhInfo); } #3. Adjust for Block my $iCnt = scalar(@$raList); my $iBCnt = $rhInfo->{_BIG_BLOCK_SIZE} / $rhInfo->{_PPS_SIZE}; print {$FILE} ("\x00" x (($iBCnt - ($iCnt % $iBCnt)) * $rhInfo->{_PPS_SIZE})) if($iCnt % $iBCnt); return int($iCnt / $iBCnt) + (($iCnt % $iBCnt)? 1: 0); } #------------------------------------------------------------------------------ # _savePpsSetPnt2 (OLE::Storage_Lite::PPS::Root) # For Test #------------------------------------------------------------------------------ sub _savePpsSetPnt2($$$) { my($aThis, $raList, $rhInfo) = @_; #1. make Array as Children-Relations #1.1 if No Children if($#$aThis < 0) { return 0xFFFFFFFF; } elsif($#$aThis == 0) { #1.2 Just Only one push @$raList, $aThis->[0]; $aThis->[0]->{No} = $#$raList; $aThis->[0]->{PrevPps} = 0xFFFFFFFF; $aThis->[0]->{NextPps} = 0xFFFFFFFF; $aThis->[0]->{DirPps} = _savePpsSetPnt2($aThis->[0]->{Child}, $raList, $rhInfo); return $aThis->[0]->{No}; } else { #1.3 Array my $iCnt = $#$aThis + 1; #1.3.1 Define Center my $iPos = 0; #int($iCnt/ 2); #$iCnt my @aWk = @$aThis; my @aPrev = ($#$aThis > 1)? splice(@aWk, 1, 1) : (); #$iPos); my @aNext = splice(@aWk, 1); #, $iCnt - $iPos -1); $aThis->[$iPos]->{PrevPps} = _savePpsSetPnt2( \@aPrev, $raList, $rhInfo); push @$raList, $aThis->[$iPos]; $aThis->[$iPos]->{No} = $#$raList; #1.3.2 Devide a array into Previous,Next $aThis->[$iPos]->{NextPps} = _savePpsSetPnt2( \@aNext, $raList, $rhInfo); $aThis->[$iPos]->{DirPps} = _savePpsSetPnt2($aThis->[$iPos]->{Child}, $raList, $rhInfo); return $aThis->[$iPos]->{No}; } } #------------------------------------------------------------------------------ # _savePpsSetPnt2 (OLE::Storage_Lite::PPS::Root) # For Test #------------------------------------------------------------------------------ sub _savePpsSetPnt2s($$$) { my($aThis, $raList, $rhInfo) = @_; #1. make Array as Children-Relations #1.1 if No Children if($#$aThis < 0) { return 0xFFFFFFFF; } elsif($#$aThis == 0) { #1.2 Just Only one push @$raList, $aThis->[0]; $aThis->[0]->{No} = $#$raList; $aThis->[0]->{PrevPps} = 0xFFFFFFFF; $aThis->[0]->{NextPps} = 0xFFFFFFFF; $aThis->[0]->{DirPps} = _savePpsSetPnt2($aThis->[0]->{Child}, $raList, $rhInfo); return $aThis->[0]->{No}; } else { #1.3 Array my $iCnt = $#$aThis + 1; #1.3.1 Define Center my $iPos = 0; #int($iCnt/ 2); #$iCnt push @$raList, $aThis->[$iPos]; $aThis->[$iPos]->{No} = $#$raList; my @aWk = @$aThis; #1.3.2 Devide a array into Previous,Next my @aPrev = splice(@aWk, 0, $iPos); my @aNext = splice(@aWk, 1, $iCnt - $iPos -1); $aThis->[$iPos]->{PrevPps} = _savePpsSetPnt2( \@aPrev, $raList, $rhInfo); $aThis->[$iPos]->{NextPps} = _savePpsSetPnt2( \@aNext, $raList, $rhInfo); $aThis->[$iPos]->{DirPps} = _savePpsSetPnt2($aThis->[$iPos]->{Child}, $raList, $rhInfo); return $aThis->[$iPos]->{No}; } } #------------------------------------------------------------------------------ # _savePpsSetPnt (OLE::Storage_Lite::PPS::Root) #------------------------------------------------------------------------------ sub _savePpsSetPnt($$$) { my($aThis, $raList, $rhInfo) = @_; #1. make Array as Children-Relations #1.1 if No Children if($#$aThis < 0) { return 0xFFFFFFFF; } elsif($#$aThis == 0) { #1.2 Just Only one push @$raList, $aThis->[0]; $aThis->[0]->{No} = $#$raList; $aThis->[0]->{PrevPps} = 0xFFFFFFFF; $aThis->[0]->{NextPps} = 0xFFFFFFFF; $aThis->[0]->{DirPps} = _savePpsSetPnt($aThis->[0]->{Child}, $raList, $rhInfo); return $aThis->[0]->{No}; } else { #1.3 Array my $iCnt = $#$aThis + 1; #1.3.1 Define Center my $iPos = int($iCnt/ 2); #$iCnt push @$raList, $aThis->[$iPos]; $aThis->[$iPos]->{No} = $#$raList; my @aWk = @$aThis; #1.3.2 Devide a array into Previous,Next my @aPrev = splice(@aWk, 0, $iPos); my @aNext = splice(@aWk, 1, $iCnt - $iPos -1); $aThis->[$iPos]->{PrevPps} = _savePpsSetPnt( \@aPrev, $raList, $rhInfo); $aThis->[$iPos]->{NextPps} = _savePpsSetPnt( \@aNext, $raList, $rhInfo); $aThis->[$iPos]->{DirPps} = _savePpsSetPnt($aThis->[$iPos]->{Child}, $raList, $rhInfo); return $aThis->[$iPos]->{No}; } } #------------------------------------------------------------------------------ # _savePpsSetPnt (OLE::Storage_Lite::PPS::Root) #------------------------------------------------------------------------------ sub _savePpsSetPnt1($$$) { my($aThis, $raList, $rhInfo) = @_; #1. make Array as Children-Relations #1.1 if No Children if($#$aThis < 0) { return 0xFFFFFFFF; } elsif($#$aThis == 0) { #1.2 Just Only one push @$raList, $aThis->[0]; $aThis->[0]->{No} = $#$raList; $aThis->[0]->{PrevPps} = 0xFFFFFFFF; $aThis->[0]->{NextPps} = 0xFFFFFFFF; $aThis->[0]->{DirPps} = _savePpsSetPnt($aThis->[0]->{Child}, $raList, $rhInfo); return $aThis->[0]->{No}; } else { #1.3 Array my $iCnt = $#$aThis + 1; #1.3.1 Define Center my $iPos = int($iCnt/ 2); #$iCnt push @$raList, $aThis->[$iPos]; $aThis->[$iPos]->{No} = $#$raList; my @aWk = @$aThis; #1.3.2 Devide a array into Previous,Next my @aPrev = splice(@aWk, 0, $iPos); my @aNext = splice(@aWk, 1, $iCnt - $iPos -1); $aThis->[$iPos]->{PrevPps} = _savePpsSetPnt( \@aPrev, $raList, $rhInfo); $aThis->[$iPos]->{NextPps} = _savePpsSetPnt( \@aNext, $raList, $rhInfo); $aThis->[$iPos]->{DirPps} = _savePpsSetPnt($aThis->[$iPos]->{Child}, $raList, $rhInfo); return $aThis->[$iPos]->{No}; } } #------------------------------------------------------------------------------ # _saveBbd (OLE::Storage_Lite) #------------------------------------------------------------------------------ sub _saveBbd($$$$) { my($oThis, $iSbdSize, $iBsize, $iPpsCnt, $rhInfo) = @_; my $FILE = $rhInfo->{_FILEH_}; #0. Calculate Basic Setting my $iBbCnt = $rhInfo->{_BIG_BLOCK_SIZE} / OLE::Storage_Lite::LongIntSize(); my $iBlCnt = $iBbCnt - 1; my $i1stBdL = int(($rhInfo->{_BIG_BLOCK_SIZE} - 0x4C) / OLE::Storage_Lite::LongIntSize()); my $i1stBdMax = $i1stBdL * $iBbCnt - $i1stBdL; my $iBdExL = 0; my $iAll = $iBsize + $iPpsCnt + $iSbdSize; my $iAllW = $iAll; my $iBdCntW = int($iAllW / $iBbCnt) + (($iAllW % $iBbCnt)? 1: 0); my $iBdCnt = 0; my $i; #0.1 Calculate BD count my $iBBleftover = $iAll - $i1stBdMax; if ($iAll >$i1stBdMax) { while(1) { $iBdCnt = int(($iBBleftover) / $iBlCnt) + ((($iBBleftover) % $iBlCnt)? 1: 0); $iBdExL = int(($iBdCnt) / $iBlCnt) + ((($iBdCnt) % $iBlCnt)? 1: 0); $iBBleftover = $iBBleftover + $iBdExL; last if($iBdCnt == (int(($iBBleftover) / $iBlCnt) + ((($iBBleftover) % $iBlCnt)? 1: 0))); } } $iAllW += $iBdExL; $iBdCnt += $i1stBdL; #print "iBdCnt = $iBdCnt \n"; #1. Making BD #1.1 Set for SBD if($iSbdSize > 0) { for ($i = 0; $i<($iSbdSize-1); $i++) { print {$FILE} (pack("V", $i+1)); } print {$FILE} (pack("V", -2)); } #1.2 Set for B for ($i = 0; $i<($iBsize-1); $i++) { print {$FILE} (pack("V", $i+$iSbdSize+1)); } print {$FILE} (pack("V", -2)); #1.3 Set for PPS for ($i = 0; $i<($iPpsCnt-1); $i++) { print {$FILE} (pack("V", $i+$iSbdSize+$iBsize+1)); } print {$FILE} (pack("V", -2)); #1.4 Set for BBD itself ( 0xFFFFFFFD : BBD) for($i=0; $i<$iBdCnt;$i++) { print {$FILE} (pack("V", 0xFFFFFFFD)); } #1.5 Set for ExtraBDList for($i=0; $i<$iBdExL;$i++) { print {$FILE} (pack("V", 0xFFFFFFFC)); } #1.6 Adjust for Block print {$FILE} (pack("V", -1) x ($iBbCnt - (($iAllW + $iBdCnt) % $iBbCnt))) if(($iAllW + $iBdCnt) % $iBbCnt); #2.Extra BDList if($iBdCnt > $i1stBdL) { my $iN=0; my $iNb=0; for($i=$i1stBdL;$i<$iBdCnt; $i++, $iN++) { if($iN>=($iBbCnt-1)) { $iN = 0; $iNb++; print {$FILE} (pack("V", $iAll+$iBdCnt+$iNb)); } print {$FILE} (pack("V", $iBsize+$iSbdSize+$iPpsCnt+$i)); } print {$FILE} (pack("V", -1) x (($iBbCnt-1) - (($iBdCnt-$i1stBdL) % ($iBbCnt-1)))) if(($iBdCnt-$i1stBdL) % ($iBbCnt-1)); print {$FILE} (pack("V", -2)); } } #////////////////////////////////////////////////////////////////////////////// # OLE::Storage_Lite::PPS::File Object #////////////////////////////////////////////////////////////////////////////// #============================================================================== # OLE::Storage_Lite::PPS::File #============================================================================== package OLE::Storage_Lite::PPS::File; require Exporter; use strict; use vars qw($VERSION @ISA); @ISA = qw(OLE::Storage_Lite::PPS Exporter); $VERSION = '0.19'; #------------------------------------------------------------------------------ # new (OLE::Storage_Lite::PPS::File) #------------------------------------------------------------------------------ sub new ($$$) { my($sClass, $sNm, $sData) = @_; OLE::Storage_Lite::PPS::_new( $sClass, undef, $sNm, 2, undef, undef, undef, undef, undef, undef, undef, $sData, undef); } #------------------------------------------------------------------------------ # newFile (OLE::Storage_Lite::PPS::File) #------------------------------------------------------------------------------ sub newFile ($$;$) { my($sClass, $sNm, $sFile) = @_; my $oSelf = OLE::Storage_Lite::PPS::_new( $sClass, undef, $sNm, 2, undef, undef, undef, undef, undef, undef, undef, '', undef); # if((!defined($sFile)) or ($sFile eq '')) { $oSelf->{_PPS_FILE} = IO::File->new_tmpfile(); } elsif(UNIVERSAL::isa($sFile, 'IO::Handle')) { $oSelf->{_PPS_FILE} = $sFile; } elsif(!ref($sFile)) { #File Name $oSelf->{_PPS_FILE} = new IO::File; return undef unless($oSelf->{_PPS_FILE}); $oSelf->{_PPS_FILE}->open("$sFile", "r+") || return undef; } else { return undef; } if($oSelf->{_PPS_FILE}) { $oSelf->{_PPS_FILE}->seek(0, 2); binmode($oSelf->{_PPS_FILE}); $oSelf->{_PPS_FILE}->autoflush(1); } return $oSelf; } #------------------------------------------------------------------------------ # append (OLE::Storage_Lite::PPS::File) #------------------------------------------------------------------------------ sub append ($$) { my($oSelf, $sData) = @_; if($oSelf->{_PPS_FILE}) { print {$oSelf->{_PPS_FILE}} $sData; } else { $oSelf->{Data} .= $sData; } } #////////////////////////////////////////////////////////////////////////////// # OLE::Storage_Lite::PPS::Dir Object #////////////////////////////////////////////////////////////////////////////// #------------------------------------------------------------------------------ # new (OLE::Storage_Lite::PPS::Dir) #------------------------------------------------------------------------------ package OLE::Storage_Lite::PPS::Dir; require Exporter; use strict; use vars qw($VERSION @ISA); @ISA = qw(OLE::Storage_Lite::PPS Exporter); $VERSION = '0.19'; sub new ($$;$$$) { my($sClass, $sName, $raTime1st, $raTime2nd, $raChild) = @_; OLE::Storage_Lite::PPS::_new( $sClass, undef, $sName, 1, undef, undef, undef, $raTime1st, $raTime2nd, undef, undef, undef, $raChild); } #============================================================================== # OLE::Storage_Lite #============================================================================== package OLE::Storage_Lite; require Exporter; use strict; use IO::File; use Time::Local 'timegm'; use vars qw($VERSION @ISA @EXPORT); @ISA = qw(Exporter); $VERSION = '0.19'; sub _getPpsSearch($$$$$;$); sub _getPpsTree($$$;$); #------------------------------------------------------------------------------ # Const for OLE::Storage_Lite #------------------------------------------------------------------------------ #0. Constants sub PpsType_Root {5}; sub PpsType_Dir {1}; sub PpsType_File {2}; sub DataSizeSmall{0x1000}; sub LongIntSize {4}; sub PpsSize {0x80}; #------------------------------------------------------------------------------ # new OLE::Storage_Lite #------------------------------------------------------------------------------ sub new($$) { my($sClass, $sFile) = @_; my $oThis = { _FILE => $sFile, }; bless $oThis; return $oThis; } #------------------------------------------------------------------------------ # getPpsTree: OLE::Storage_Lite #------------------------------------------------------------------------------ sub getPpsTree($;$) { my($oThis, $bData) = @_; #0.Init my $rhInfo = _initParse($oThis->{_FILE}); return undef unless($rhInfo); #1. Get Data my ($oPps) = _getPpsTree(0, $rhInfo, $bData); close(IN); return $oPps; } #------------------------------------------------------------------------------ # getSearch: OLE::Storage_Lite #------------------------------------------------------------------------------ sub getPpsSearch($$;$$) { my($oThis, $raName, $bData, $iCase) = @_; #0.Init my $rhInfo = _initParse($oThis->{_FILE}); return undef unless($rhInfo); #1. Get Data my @aList = _getPpsSearch(0, $rhInfo, $raName, $bData, $iCase); close(IN); return @aList; } #------------------------------------------------------------------------------ # getNthPps: OLE::Storage_Lite #------------------------------------------------------------------------------ sub getNthPps($$;$) { my($oThis, $iNo, $bData) = @_; #0.Init my $rhInfo = _initParse($oThis->{_FILE}); return undef unless($rhInfo); #1. Get Data my $oPps = _getNthPps($iNo, $rhInfo, $bData); close IN; return $oPps; } #------------------------------------------------------------------------------ # _initParse: OLE::Storage_Lite #------------------------------------------------------------------------------ sub _initParse($) { my($sFile)=@_; my $oIo; #1. $sFile is Ref of scalar if(ref($sFile) eq 'SCALAR') { require IO::Scalar; $oIo = new IO::Scalar; $oIo->open($sFile); } #2. $sFile is a IO::Handle object elsif(UNIVERSAL::isa($sFile, 'IO::Handle')) { $oIo = $sFile; binmode($oIo); } #3. $sFile is a simple filename string elsif(!ref($sFile)) { $oIo = new IO::File; $oIo->open("<$sFile") || return undef; binmode($oIo); } #4 Assume that if $sFile is a ref then it is a valid filehandle else { $oIo = $sFile; # Not all filehandles support binmode() so try it in an eval. eval{ binmode $oIo }; } return _getHeaderInfo($oIo); } #------------------------------------------------------------------------------ # _getPpsTree: OLE::Storage_Lite #------------------------------------------------------------------------------ sub _getPpsTree($$$;$) { my($iNo, $rhInfo, $bData, $raDone) = @_; if(defined($raDone)) { return () if(grep {$_ ==$iNo} @$raDone); } else { $raDone=[]; } push @$raDone, $iNo; my $iRootBlock = $rhInfo->{_ROOT_START} ; #1. Get Information about itself my $oPps = _getNthPps($iNo, $rhInfo, $bData); #2. Child if($oPps->{DirPps} != 0xFFFFFFFF) { my @aChildL = _getPpsTree($oPps->{DirPps}, $rhInfo, $bData, $raDone); $oPps->{Child} = \@aChildL; } else { $oPps->{Child} = undef; } #3. Previous,Next PPSs my @aList = (); push @aList, _getPpsTree($oPps->{PrevPps}, $rhInfo, $bData, $raDone) if($oPps->{PrevPps} != 0xFFFFFFFF); push @aList, $oPps; push @aList, _getPpsTree($oPps->{NextPps}, $rhInfo, $bData, $raDone) if($oPps->{NextPps} != 0xFFFFFFFF); return @aList; } #------------------------------------------------------------------------------ # _getPpsSearch: OLE::Storage_Lite #------------------------------------------------------------------------------ sub _getPpsSearch($$$$$;$) { my($iNo, $rhInfo, $raName, $bData, $iCase, $raDone) = @_; my $iRootBlock = $rhInfo->{_ROOT_START} ; my @aRes; #1. Check it self if(defined($raDone)) { return () if(grep {$_==$iNo} @$raDone); } else { $raDone=[]; } push @$raDone, $iNo; my $oPps = _getNthPps($iNo, $rhInfo, undef); # if(grep($_ eq $oPps->{Name}, @$raName)) { if(($iCase && (grep(/^\Q$oPps->{Name}\E$/i, @$raName))) || (grep($_ eq $oPps->{Name}, @$raName))) { $oPps = _getNthPps($iNo, $rhInfo, $bData) if ($bData); @aRes = ($oPps); } else { @aRes = (); } #2. Check Child, Previous, Next PPSs push @aRes, _getPpsSearch($oPps->{DirPps}, $rhInfo, $raName, $bData, $iCase, $raDone) if($oPps->{DirPps} != 0xFFFFFFFF) ; push @aRes, _getPpsSearch($oPps->{PrevPps}, $rhInfo, $raName, $bData, $iCase, $raDone) if($oPps->{PrevPps} != 0xFFFFFFFF ); push @aRes, _getPpsSearch($oPps->{NextPps}, $rhInfo, $raName, $bData, $iCase, $raDone) if($oPps->{NextPps} != 0xFFFFFFFF); return @aRes; } #=================================================================== # Get Header Info (BASE Informain about that file) #=================================================================== sub _getHeaderInfo($){ my($FILE) = @_; my($iWk); my $rhInfo = {}; $rhInfo->{_FILEH_} = $FILE; my $sWk; #0. Check ID $rhInfo->{_FILEH_}->seek(0, 0); $rhInfo->{_FILEH_}->read($sWk, 8); return undef unless($sWk eq "\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1"); #BIG BLOCK SIZE $iWk = _getInfoFromFile($rhInfo->{_FILEH_}, 0x1E, 2, "v"); return undef unless(defined($iWk)); $rhInfo->{_BIG_BLOCK_SIZE} = 2 ** $iWk; #SMALL BLOCK SIZE $iWk = _getInfoFromFile($rhInfo->{_FILEH_}, 0x20, 2, "v"); return undef unless(defined($iWk)); $rhInfo->{_SMALL_BLOCK_SIZE} = 2 ** $iWk; #BDB Count $iWk = _getInfoFromFile($rhInfo->{_FILEH_}, 0x2C, 4, "V"); return undef unless(defined($iWk)); $rhInfo->{_BDB_COUNT} = $iWk; #START BLOCK $iWk = _getInfoFromFile($rhInfo->{_FILEH_}, 0x30, 4, "V"); return undef unless(defined($iWk)); $rhInfo->{_ROOT_START} = $iWk; #MIN SIZE OF BB # $iWk = _getInfoFromFile($rhInfo->{_FILEH_}, 0x38, 4, "V"); # return undef unless(defined($iWk)); # $rhInfo->{_MIN_SIZE_BB} = $iWk; #SMALL BD START $iWk = _getInfoFromFile($rhInfo->{_FILEH_}, 0x3C, 4, "V"); return undef unless(defined($iWk)); $rhInfo->{_SBD_START} = $iWk; #SMALL BD COUNT $iWk = _getInfoFromFile($rhInfo->{_FILEH_}, 0x40, 4, "V"); return undef unless(defined($iWk)); $rhInfo->{_SBD_COUNT} = $iWk; #EXTRA BBD START $iWk = _getInfoFromFile($rhInfo->{_FILEH_}, 0x44, 4, "V"); return undef unless(defined($iWk)); $rhInfo->{_EXTRA_BBD_START} = $iWk; #EXTRA BD COUNT $iWk = _getInfoFromFile($rhInfo->{_FILEH_}, 0x48, 4, "V"); return undef unless(defined($iWk)); $rhInfo->{_EXTRA_BBD_COUNT} = $iWk; #GET BBD INFO $rhInfo->{_BBD_INFO}= _getBbdInfo($rhInfo); #GET ROOT PPS my $oRoot = _getNthPps(0, $rhInfo, undef); $rhInfo->{_SB_START} = $oRoot->{StartBlock}; $rhInfo->{_SB_SIZE} = $oRoot->{Size}; return $rhInfo; } #------------------------------------------------------------------------------ # _getInfoFromFile #------------------------------------------------------------------------------ sub _getInfoFromFile($$$$) { my($FILE, $iPos, $iLen, $sFmt) =@_; my($sWk); return undef unless($FILE); return undef if($FILE->seek($iPos, 0)==0); return undef if($FILE->read($sWk, $iLen)!=$iLen); return unpack($sFmt, $sWk); } #------------------------------------------------------------------------------ # _getBbdInfo #------------------------------------------------------------------------------ sub _getBbdInfo($) { my($rhInfo) =@_; my @aBdList = (); my $iBdbCnt = $rhInfo->{_BDB_COUNT}; my $iGetCnt; my $sWk; my $i1stCnt = int(($rhInfo->{_BIG_BLOCK_SIZE} - 0x4C) / OLE::Storage_Lite::LongIntSize()); my $iBdlCnt = int($rhInfo->{_BIG_BLOCK_SIZE} / OLE::Storage_Lite::LongIntSize()) - 1; #1. 1st BDlist $rhInfo->{_FILEH_}->seek(0x4C, 0); $iGetCnt = ($iBdbCnt < $i1stCnt)? $iBdbCnt: $i1stCnt; $rhInfo->{_FILEH_}->read($sWk, OLE::Storage_Lite::LongIntSize()*$iGetCnt); push @aBdList, unpack("V$iGetCnt", $sWk); $iBdbCnt -= $iGetCnt; #2. Extra BDList my $iBlock = $rhInfo->{_EXTRA_BBD_START}; while(($iBdbCnt> 0) && _isNormalBlock($iBlock)){ _setFilePos($iBlock, 0, $rhInfo); $iGetCnt= ($iBdbCnt < $iBdlCnt)? $iBdbCnt: $iBdlCnt; $rhInfo->{_FILEH_}->read($sWk, OLE::Storage_Lite::LongIntSize()*$iGetCnt); push @aBdList, unpack("V$iGetCnt", $sWk); $iBdbCnt -= $iGetCnt; $rhInfo->{_FILEH_}->read($sWk, OLE::Storage_Lite::LongIntSize()); $iBlock = unpack("V", $sWk); } #3.Get BDs my @aWk; my %hBd; my $iBlkNo = 0; my $iBdL; my $i; my $iBdCnt = int($rhInfo->{_BIG_BLOCK_SIZE} / OLE::Storage_Lite::LongIntSize()); foreach $iBdL (@aBdList) { _setFilePos($iBdL, 0, $rhInfo); $rhInfo->{_FILEH_}->read($sWk, $rhInfo->{_BIG_BLOCK_SIZE}); @aWk = unpack("V$iBdCnt", $sWk); for($i=0;$i<$iBdCnt;$i++, $iBlkNo++) { if($aWk[$i] != ($iBlkNo+1)){ $hBd{$iBlkNo} = $aWk[$i]; } } } return \%hBd; } #------------------------------------------------------------------------------ # getNthPps (OLE::Storage_Lite) #------------------------------------------------------------------------------ sub _getNthPps($$$){ my($iPos, $rhInfo, $bData) = @_; my($iPpsStart) = ($rhInfo->{_ROOT_START}); my($iPpsBlock, $iPpsPos); my $sWk; my $iBlock; my $iBaseCnt = $rhInfo->{_BIG_BLOCK_SIZE} / OLE::Storage_Lite::PpsSize(); $iPpsBlock = int($iPos / $iBaseCnt); $iPpsPos = $iPos % $iBaseCnt; $iBlock = _getNthBlockNo($iPpsStart, $iPpsBlock, $rhInfo); return undef unless(defined($iBlock)); _setFilePos($iBlock, OLE::Storage_Lite::PpsSize()*$iPpsPos, $rhInfo); $rhInfo->{_FILEH_}->read($sWk, OLE::Storage_Lite::PpsSize()); return undef unless($sWk); my $iNmSize = unpack("v", substr($sWk, 0x40, 2)); $iNmSize = ($iNmSize > 2)? $iNmSize - 2 : $iNmSize; my $sNm= substr($sWk, 0, $iNmSize); my $iType = unpack("C", substr($sWk, 0x42, 2)); my $lPpsPrev = unpack("V", substr($sWk, 0x44, OLE::Storage_Lite::LongIntSize())); my $lPpsNext = unpack("V", substr($sWk, 0x48, OLE::Storage_Lite::LongIntSize())); my $lDirPps = unpack("V", substr($sWk, 0x4C, OLE::Storage_Lite::LongIntSize())); my @raTime1st = (($iType == OLE::Storage_Lite::PpsType_Root()) or ($iType == OLE::Storage_Lite::PpsType_Dir()))? OLEDate2Local(substr($sWk, 0x64, 8)) : undef , my @raTime2nd = (($iType == OLE::Storage_Lite::PpsType_Root()) or ($iType == OLE::Storage_Lite::PpsType_Dir()))? OLEDate2Local(substr($sWk, 0x6C, 8)) : undef, my($iStart, $iSize) = unpack("VV", substr($sWk, 0x74, 8)); if($bData) { my $sData = _getData($iType, $iStart, $iSize, $rhInfo); return OLE::Storage_Lite::PPS->new( $iPos, $sNm, $iType, $lPpsPrev, $lPpsNext, $lDirPps, \@raTime1st, \@raTime2nd, $iStart, $iSize, $sData, undef); } else { return OLE::Storage_Lite::PPS->new( $iPos, $sNm, $iType, $lPpsPrev, $lPpsNext, $lDirPps, \@raTime1st, \@raTime2nd, $iStart, $iSize, undef, undef); } } #------------------------------------------------------------------------------ # _setFilePos (OLE::Storage_Lite) #------------------------------------------------------------------------------ sub _setFilePos($$$){ my($iBlock, $iPos, $rhInfo) = @_; $rhInfo->{_FILEH_}->seek(($iBlock+1)*$rhInfo->{_BIG_BLOCK_SIZE}+$iPos, 0); } #------------------------------------------------------------------------------ # _getNthBlockNo (OLE::Storage_Lite) #------------------------------------------------------------------------------ sub _getNthBlockNo($$$){ my($iStBlock, $iNth, $rhInfo) = @_; my $iSv; my $iNext = $iStBlock; for(my $i =0; $i<$iNth; $i++) { $iSv = $iNext; $iNext = _getNextBlockNo($iSv, $rhInfo); return undef unless _isNormalBlock($iNext); } return $iNext; } #------------------------------------------------------------------------------ # _getData (OLE::Storage_Lite) #------------------------------------------------------------------------------ sub _getData($$$$) { my($iType, $iBlock, $iSize, $rhInfo) = @_; if ($iType == OLE::Storage_Lite::PpsType_File()) { if($iSize < OLE::Storage_Lite::DataSizeSmall()) { return _getSmallData($iBlock, $iSize, $rhInfo); } else { return _getBigData($iBlock, $iSize, $rhInfo); } } elsif($iType == OLE::Storage_Lite::PpsType_Root()) { #Root return _getBigData($iBlock, $iSize, $rhInfo); } elsif($iType == OLE::Storage_Lite::PpsType_Dir()) { # Directory return undef; } } #------------------------------------------------------------------------------ # _getBigData (OLE::Storage_Lite) #------------------------------------------------------------------------------ sub _getBigData($$$) { my($iBlock, $iSize, $rhInfo) = @_; my($iRest, $sWk, $sRes); return '' unless(_isNormalBlock($iBlock)); $iRest = $iSize; my($i, $iGetSize, $iNext); $sRes = ''; my @aKeys= sort({$a<=>$b} keys(%{$rhInfo->{_BBD_INFO}})); while ($iRest > 0) { my @aRes = grep($_ >= $iBlock, @aKeys); my $iNKey = $aRes[0]; $i = $iNKey - $iBlock; $iNext = $rhInfo->{_BBD_INFO}{$iNKey}; _setFilePos($iBlock, 0, $rhInfo); my $iGetSize = ($rhInfo->{_BIG_BLOCK_SIZE} * ($i+1)); $iGetSize = $iRest if($iRest < $iGetSize); $rhInfo->{_FILEH_}->read( $sWk, $iGetSize); $sRes .= $sWk; $iRest -= $iGetSize; $iBlock= $iNext; } return $sRes; } #------------------------------------------------------------------------------ # _getNextBlockNo (OLE::Storage_Lite) #------------------------------------------------------------------------------ sub _getNextBlockNo($$){ my($iBlockNo, $rhInfo) = @_; my $iRes = $rhInfo->{_BBD_INFO}->{$iBlockNo}; return defined($iRes)? $iRes: $iBlockNo+1; } #------------------------------------------------------------------------------ # _isNormalBlock (OLE::Storage_Lite) # 0xFFFFFFFC : BDList, 0xFFFFFFFD : BBD, # 0xFFFFFFFE: End of Chain 0xFFFFFFFF : unused #------------------------------------------------------------------------------ sub _isNormalBlock($){ my($iBlock) = @_; return ($iBlock < 0xFFFFFFFC)? 1: undef; } #------------------------------------------------------------------------------ # _getSmallData (OLE::Storage_Lite) #------------------------------------------------------------------------------ sub _getSmallData($$$) { my($iSmBlock, $iSize, $rhInfo) = @_; my($sRes, $sWk); my $iRest = $iSize; $sRes = ''; while ($iRest > 0) { _setFilePosSmall($iSmBlock, $rhInfo); $rhInfo->{_FILEH_}->read($sWk, ($iRest >= $rhInfo->{_SMALL_BLOCK_SIZE})? $rhInfo->{_SMALL_BLOCK_SIZE}: $iRest); $sRes .= $sWk; $iRest -= $rhInfo->{_SMALL_BLOCK_SIZE}; $iSmBlock= _getNextSmallBlockNo($iSmBlock, $rhInfo); } return $sRes; } #------------------------------------------------------------------------------ # _setFilePosSmall(OLE::Storage_Lite) #------------------------------------------------------------------------------ sub _setFilePosSmall($$) { my($iSmBlock, $rhInfo) = @_; my $iSmStart = $rhInfo->{_SB_START}; my $iBaseCnt = $rhInfo->{_BIG_BLOCK_SIZE} / $rhInfo->{_SMALL_BLOCK_SIZE}; my $iNth = int($iSmBlock/$iBaseCnt); my $iPos = $iSmBlock % $iBaseCnt; my $iBlk = _getNthBlockNo($iSmStart, $iNth, $rhInfo); _setFilePos($iBlk, $iPos * $rhInfo->{_SMALL_BLOCK_SIZE}, $rhInfo); } #------------------------------------------------------------------------------ # _getNextSmallBlockNo (OLE::Storage_Lite) #------------------------------------------------------------------------------ sub _getNextSmallBlockNo($$) { my($iSmBlock, $rhInfo) = @_; my($sWk); my $iBaseCnt = $rhInfo->{_BIG_BLOCK_SIZE} / OLE::Storage_Lite::LongIntSize(); my $iNth = int($iSmBlock/$iBaseCnt); my $iPos = $iSmBlock % $iBaseCnt; my $iBlk = _getNthBlockNo($rhInfo->{_SBD_START}, $iNth, $rhInfo); _setFilePos($iBlk, $iPos * OLE::Storage_Lite::LongIntSize(), $rhInfo); $rhInfo->{_FILEH_}->read($sWk, OLE::Storage_Lite::LongIntSize()); return unpack("V", $sWk); } #------------------------------------------------------------------------------ # Asc2Ucs: OLE::Storage_Lite #------------------------------------------------------------------------------ sub Asc2Ucs($) { my($sAsc) = @_; return join("\x00", split //, $sAsc) . "\x00"; } #------------------------------------------------------------------------------ # Ucs2Asc: OLE::Storage_Lite #------------------------------------------------------------------------------ sub Ucs2Asc($) { my($sUcs) = @_; return join('', map(pack('c', $_), unpack('v*', $sUcs))); } #------------------------------------------------------------------------------ # OLEDate2Local() # # Convert from a Window FILETIME structure to a localtime array. FILETIME is # a 64-bit value representing the number of 100-nanosecond intervals since # January 1 1601. # # We first convert the FILETIME to seconds and then subtract the difference # between the 1601 epoch and the 1970 Unix epoch. # sub OLEDate2Local { my $oletime = shift; # Unpack the FILETIME into high and low longs. my ( $lo, $hi ) = unpack 'V2', $oletime; # Convert the longs to a double. my $nanoseconds = $hi * 2**32 + $lo; # Convert the 100 nanosecond units into seconds. my $time = $nanoseconds / 1e7; # Subtract the number of seconds between the 1601 and 1970 epochs. $time -= 11644473600; # Convert to a localtime (actually gmtime) structure. my @localtime = gmtime($time); return @localtime; } #------------------------------------------------------------------------------ # LocalDate2OLE() # # Convert from a a localtime array to a Window FILETIME structure. FILETIME is # a 64-bit value representing the number of 100-nanosecond intervals since # January 1 1601. # # We first convert the localtime (actually gmtime) to seconds and then add the # difference between the 1601 epoch and the 1970 Unix epoch. We convert that to # 100 nanosecond units, divide it into high and low longs and return it as a # packed 64bit structure. # sub LocalDate2OLE { my $localtime = shift; return "\x00" x 8 unless $localtime; # Convert from localtime (actually gmtime) to seconds. my $time = timegm( @{$localtime} ); # Add the number of seconds between the 1601 and 1970 epochs. $time += 11644473600; # The FILETIME seconds are in units of 100 nanoseconds. my $nanoseconds = $time * 1E7; use POSIX 'fmod'; # Pack the total nanoseconds into 64 bits... my $hi = int( $nanoseconds / 2**32 ); my $lo = fmod($nanoseconds, 2**32); my $oletime = pack "VV", $lo, $hi; return $oletime; } 1; __END__ =head1 NAME OLE::Storage_Lite - Simple Class for OLE document interface. =head1 SYNOPSIS use OLE::Storage_Lite; # Initialize. # From a file my $oOl = OLE::Storage_Lite->new("some.xls"); # From a filehandle object use IO::File; my $oIo = new IO::File; $oIo->open("<iofile.xls"); binmode($oIo); my $oOl = OLE::Storage_Lite->new($oFile); # Read data my $oPps = $oOl->getPpsTree(1); # Save Data # To a File $oPps->save("kaba.xls"); #kaba.xls $oPps->save('-'); #STDOUT # To a filehandle object my $oIo = new IO::File; $oIo->open(">iofile.xls"); bimode($oIo); $oPps->save($oIo); =head1 DESCRIPTION OLE::Storage_Lite allows you to read and write an OLE structured file. OLE::Storage_Lite::PPS is a class representing PPS. OLE::Storage_Lite::PPS::Root, OLE::Storage_Lite::PPS::File and OLE::Storage_Lite::PPS::Dir are subclasses of OLE::Storage_Lite::PPS. =head2 new() Constructor. $oOle = OLE::Storage_Lite->new($sFile); Creates a OLE::Storage_Lite object for C<$sFile>. C<$sFile> must be a correct file name. The C<new()> constructor also accepts a valid filehandle. Remember to C<binmode()> the filehandle first. =head2 getPpsTree() $oPpsRoot = $oOle->getPpsTree([$bData]); Returns PPS as an OLE::Storage_Lite::PPS::Root object. Other PPS objects will be included as its children. If C<$bData> is true, the objects will have data in the file. =head2 getPpsSearch() $oPpsRoot = $oOle->getPpsTree($raName [, $bData][, $iCase] ); Returns PPSs as OLE::Storage_Lite::PPS objects that has the name specified in C<$raName> array. If C<$bData> is true, the objects will have data in the file. If C<$iCase> is true, search is case insensitive. =head2 getNthPps() $oPpsRoot = $oOle->getNthPps($iNth [, $bData]); Returns PPS as C<OLE::Storage_Lite::PPS> object specified number C<$iNth>. If C<$bData> is true, the objects will have data in the file. =head2 Asc2Ucs() $sUcs2 = OLE::Storage_Lite::Asc2Ucs($sAsc>); Utility function. Just adds 0x00 after every characters in C<$sAsc>. =head2 Ucs2Asc() $sAsc = OLE::Storage_Lite::Ucs2Asc($sUcs2); Utility function. Just deletes 0x00 after words in C<$sUcs>. =head1 OLE::Storage_Lite::PPS OLE::Storage_Lite::PPS has these properties: =over 4 =item No Order number in saving. =item Name Its name in UCS2 (a.k.a Unicode). =item Type Its type (1:Dir, 2:File (Data), 5: Root) =item PrevPps Previous pps (as No) =item NextPps Next pps (as No) =item DirPps Dir pps (as No). =item Time1st Timestamp 1st in array ref as similar fomat of localtime. =item Time2nd Timestamp 2nd in array ref as similar fomat of localtime. =item StartBlock Start block number =item Size Size of the pps =item Data Its data =item Child Its child PPSs in array ref =back =head1 OLE::Storage_Lite::PPS::Root OLE::Storage_Lite::PPS::Root has 2 methods. =head2 new() $oRoot = OLE::Storage_Lite::PPS::Root->new( $raTime1st, $raTime2nd, $raChild); Constructor. C<$raTime1st>, C<$raTime2nd> are array refs with ($iSec, $iMin, $iHour, $iDay, $iMon, $iYear). $iSec means seconds, $iMin means minutes. $iHour means hours. $iDay means day. $iMon is month -1. $iYear is year - 1900. C<$raChild> is a array ref of children PPSs. =head2 save() $oRoot = $oRoot>->save( $sFile, $bNoAs); Saves information into C<$sFile>. If C<$sFile> is '-', this will use STDOUT. The C<new()> constructor also accepts a valid filehandle. Remember to C<binmode()> the filehandle first. If C<$bNoAs> is defined, this function will use the No of PPSs for saving order. If C<$bNoAs> is undefined, this will calculate PPS saving order. =head1 OLE::Storage_Lite::PPS::Dir OLE::Storage_Lite::PPS::Dir has 1 method. =head2 new() $oRoot = OLE::Storage_Lite::PPS::Dir->new( $sName, [, $raTime1st] [, $raTime2nd] [, $raChild>]); Constructor. C<$sName> is a name of the PPS. C<$raTime1st>, C<$raTime2nd> is a array ref as ($iSec, $iMin, $iHour, $iDay, $iMon, $iYear). $iSec means seconds, $iMin means minutes. $iHour means hours. $iDay means day. $iMon is month -1. $iYear is year - 1900. C<$raChild> is a array ref of children PPSs. =head1 OLE::Storage_Lite::PPS::File OLE::Storage_Lite::PPS::File has 3 method. =head2 new $oRoot = OLE::Storage_Lite::PPS::File->new($sName, $sData); C<$sName> is name of the PPS. C<$sData> is data of the PPS. =head2 newFile() $oRoot = OLE::Storage_Lite::PPS::File->newFile($sName, $sFile); This function makes to use file handle for geting and storing data. C<$sName> is name of the PPS. If C<$sFile> is scalar, it assumes that is a filename. If C<$sFile> is an IO::Handle object, it uses that specified handle. If C<$sFile> is undef or '', it uses temporary file. CAUTION: Take care C<$sFile> will be updated by C<append> method. So if you want to use IO::Handle and append a data to it, you should open the handle with "r+". =head2 append() $oRoot = $oPps->append($sData); appends specified data to that PPS. C<$sData> is appending data for that PPS. =head1 CAUTION A saved file with VBA (a.k.a Macros) by this module will not work correctly. However modules can get the same information from the file, the file occurs a error in application(Word, Excel ...). =head1 DEPRECATED FEATURES Older version of C<OLE::Storage_Lite> autovivified a scalar ref in the C<new()> constructors into a scalar filehandle. This functionality is still there for backwards compatibility but it is highly recommended that you do not use it. Instead create a filehandle (scalar or otherwise) and pass that in. =head1 COPYRIGHT The OLE::Storage_Lite module is Copyright (c) 2000,2001 Kawai Takanori. Japan. All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =head1 ACKNOWLEDGEMENTS First of all, I would like to acknowledge to Martin Schwartz and his module OLE::Storage. =head1 AUTHOR Kawai Takanori kwitknr@cpan.org This module is currently maintained by John McNamara jmcnamara@cpan.org =head1 SEE ALSO OLE::Storage Documentation for the OLE Compound document has been released by Microsoft under the I<Open Specification Promise>. See http://www.microsoft.com/interop/docs/supportingtechnologies.mspx The Digital Imaging Group have also detailed the OLE format in the JPEG2000 specification: see Appendix A of http://www.i3a.org/pdf/wg1n1017.pdf =cut �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������gdata/inst/perl/module_tools.pl���������������������������������������������������������������������0000755�0001751�0000144�00000005572�13003720416�016341� 0����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/perl BEGIN { use File::Basename; # Add current path to perl library search path use lib dirname($0); } use strict; use warnings; use Data::Dumper; use Cwd; sub check_modules(;$) { my( $VERBOSE, $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_ParseXLSX ); $VERBOSE=$_[0]; # Check if we can load the libraries we need eval { require Spreadsheet::ParseExcel; use Spreadsheet::ParseExcel::Utility qw(ExcelFmt); $HAS_Spreadsheet_ParseExcel=1; print "Loaded Spreadsheet::ParseExcel\n" if $VERBOSE; }; eval { require Compress::Raw::Zlib; $HAS_Compress_Raw_Zlib=1; print "Loaded Compress::Raw::Zlib\n" if $VERBOSE; }; eval { require Spreadsheet::ParseXLSX; $HAS_Spreadsheet_ParseXLSX=1; print "Loaded Spreadsheet::ParseXLSX\n" if $VERBOSE; }; if($VERBOSE) { print "ERROR: Unable to load Spreadsheet::ParseExcel perl module! \n" if !$HAS_Spreadsheet_ParseExcel; print "ERROR: Unable to load Compress::Raw::Zlib perl module! \n" if ! $HAS_Compress_Raw_Zlib; print "ERROR: Unable to load Spreadsheet::ParseXLSX perl module! \n" if ! $HAS_Spreadsheet_ParseXLSX; } return $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_ParseXLSX; } sub check_modules_and_notify() { my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_ParseXLSX) = check_modules(0); $HAS_Spreadsheet_ParseExcel or die("ERROR: Perl module Spreadsheet::ParseExcel cannot be loaded. Exiting.\n"); $HAS_Compress_Raw_Zlib or warn("WARNING: Perl module Compress::Raw::Zlib cannot be loaded.\n"); $HAS_Spreadsheet_ParseXLSX or warn("WARNING: Perl module Spreadsheet::ParseXLSX cannot be loaded.\n"); ($HAS_Compress_Raw_Zlib && $HAS_Spreadsheet_ParseXLSX ) or warn("WARNING: Microsoft Excel 2007 'XLSX' formatted files will not be processed.\n"); return $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_ParseXLSX; } sub install_modules() { my($mod, $obj, $here); $here = dirname($0); # load the module require CPAN; # initialize CPAN components CPAN::HandleConfig->load(); CPAN::Shell::setup_output(); CPAN::Index->reload(); # set the target install path CPAN::Shell->o("conf", "mbuildpl_arg", "PREFIX=$here LIB=$here --prefix $here --install-base $here"); CPAN::Shell->o("conf", "makepl_arg", "PREFIX=$here LIB=$here --prefix $here --install-base $here"); CPAN::Shell->install("Compress::Raw::Zlib"); #return 0; # install the libraries we want for $mod (qw( Compress::Raw::Zlib Spreadsheet::ParseXLSX )){ my $obj = CPAN::Shell->expand('Module',$mod); $obj->install; } } 1; ��������������������������������������������������������������������������������������������������������������������������������������gdata/inst/perl/IO/���������������������������������������������������������������������������������0000755�0001751�0000144�00000000000�13003720416�013572� 5����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������gdata/inst/perl/IO/AtomicFile.pm��������������������������������������������������������������������0000644�0001751�0000144�00000011345�13003720416�016150� 0����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������package IO::AtomicFile; ### DOCUMENTATION AT BOTTOM OF FILE # Be strict: use strict; # External modules: use IO::File; #------------------------------ # # GLOBALS... # #------------------------------ use vars qw($VERSION @ISA); # The package version, both in 1.23 style *and* usable by MakeMaker: $VERSION = "2.110"; # Inheritance: @ISA = qw(IO::File); #------------------------------ # new ARGS... #------------------------------ # Class method, constructor. # Any arguments are sent to open(). # sub new { my $class = shift; my $self = $class->SUPER::new(); ${*$self}{'io_atomicfile_suffix'} = ''; $self->open(@_) if @_; $self; } #------------------------------ # DESTROY #------------------------------ # Destructor. # sub DESTROY { shift->close(1); ### like close, but raises fatal exception on failure } #------------------------------ # open PATH, MODE #------------------------------ # Class/instance method. # sub open { my ($self, $path, $mode) = @_; ref($self) or $self = $self->new; ### now we have an instance! ### Create tmp path, and remember this info: my $temp = "${path}..TMP" . ${*$self}{'io_atomicfile_suffix'}; ${*$self}{'io_atomicfile_temp'} = $temp; ${*$self}{'io_atomicfile_path'} = $path; ### Open the file! Returns filehandle on success, for use as a constructor: $self->SUPER::open($temp, $mode) ? $self : undef; } #------------------------------ # _closed [YESNO] #------------------------------ # Instance method, private. # Are we already closed? Argument sets new value, returns previous one. # sub _closed { my $self = shift; my $oldval = ${*$self}{'io_atomicfile_closed'}; ${*$self}{'io_atomicfile_closed'} = shift if @_; $oldval; } #------------------------------ # close #------------------------------ # Instance method. # Close the handle, and rename the temp file to its final name. # sub close { my ($self, $die) = @_; unless ($self->_closed(1)) { ### sentinel... $self->SUPER::close(); rename(${*$self}{'io_atomicfile_temp'}, ${*$self}{'io_atomicfile_path'}) or ($die ? die "close atomic file: $!\n" : return undef); } 1; } #------------------------------ # delete #------------------------------ # Instance method. # Close the handle, and delete the temp file. # sub delete { my $self = shift; unless ($self->_closed(1)) { ### sentinel... $self->SUPER::close(); return unlink(${*$self}{'io_atomicfile_temp'}); } 1; } #------------------------------ # detach #------------------------------ # Instance method. # Close the handle, but DO NOT delete the temp file. # sub detach { my $self = shift; $self->SUPER::close() unless ($self->_closed(1)); 1; } #------------------------------ 1; __END__ =head1 NAME IO::AtomicFile - write a file which is updated atomically =head1 SYNOPSIS use IO::AtomicFile; ### Write a temp file, and have it install itself when closed: my $FH = IO::AtomicFile->open("bar.dat", "w"); print $FH "Hello!\n"; $FH->close || die "couldn't install atomic file: $!"; ### Write a temp file, but delete it before it gets installed: my $FH = IO::AtomicFile->open("bar.dat", "w"); print $FH "Hello!\n"; $FH->delete; ### Write a temp file, but neither install it nor delete it: my $FH = IO::AtomicFile->open("bar.dat", "w"); print $FH "Hello!\n"; $FH->detach; =head1 DESCRIPTION This module is intended for people who need to update files reliably in the face of unexpected program termination. For example, you generally don't want to be halfway in the middle of writing I</etc/passwd> and have your program terminate! Even the act of writing a single scalar to a filehandle is I<not> atomic. But this module gives you true atomic updates, via rename(). When you open a file I</foo/bar.dat> via this module, you are I<actually> opening a temporary file I</foo/bar.dat..TMP>, and writing your output there. The act of closing this file (either explicitly via close(), or implicitly via the destruction of the object) will cause rename() to be called... therefore, from the point of view of the outside world, the file's contents are updated in a single time quantum. To ensure that problems do not go undetected, the "close" method done by the destructor will raise a fatal exception if the rename() fails. The explicit close() just returns undef. You can also decide at any point to trash the file you've been building. =head1 AUTHOR =head2 Primary Maintainer David F. Skoll (F<dfs@roaringpenguin.com>). =head2 Original Author Eryq (F<eryq@zeegee.com>). President, ZeeGee Software Inc (F<http://www.zeegee.com>). =head1 REVISION $Revision: 1248 $ =cut �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������gdata/inst/perl/IO/Wrap.pm��������������������������������������������������������������������������0000644�0001751�0000144�00000011622�13003720416�015043� 0����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������package IO::Wrap; # SEE DOCUMENTATION AT BOTTOM OF FILE require 5.002; use strict; use vars qw(@ISA @EXPORT $VERSION); @ISA = qw(Exporter); @EXPORT = qw(wraphandle); use FileHandle; use Carp; # The package version, both in 1.23 style *and* usable by MakeMaker: $VERSION = "2.110"; #------------------------------ # wraphandle RAW #------------------------------ sub wraphandle { my $raw = shift; new IO::Wrap $raw; } #------------------------------ # new STREAM #------------------------------ sub new { my ($class, $stream) = @_; no strict 'refs'; ### Convert raw scalar to globref: ref($stream) or $stream = \*$stream; ### Wrap globref and incomplete objects: if ((ref($stream) eq 'GLOB') or ### globref (ref($stream) eq 'FileHandle') && !defined(&FileHandle::read)) { return bless \$stream, $class; } $stream; ### already okay! } #------------------------------ # I/O methods... #------------------------------ sub close { my $self = shift; return close($$self); } sub getline { my $self = shift; my $fh = $$self; return scalar(<$fh>); } sub getlines { my $self = shift; wantarray or croak("Can't call getlines in scalar context!"); my $fh = $$self; <$fh>; } sub print { my $self = shift; print { $$self } @_; } sub read { my $self = shift; return read($$self, $_[0], $_[1]); } sub seek { my $self = shift; return seek($$self, $_[0], $_[1]); } sub tell { my $self = shift; return tell($$self); } #------------------------------ 1; __END__ =head1 NAME IO::Wrap - wrap raw filehandles in IO::Handle interface =head1 SYNOPSIS use IO::Wrap; ### Do stuff with any kind of filehandle (including a bare globref), or ### any kind of blessed object that responds to a print() message. ### sub do_stuff { my $fh = shift; ### At this point, we have no idea what the user gave us... ### a globref? a FileHandle? a scalar filehandle name? $fh = wraphandle($fh); ### At this point, we know we have an IO::Handle-like object! $fh->print("Hey there!"); ... } =head1 DESCRIPTION Let's say you want to write some code which does I/O, but you don't want to force the caller to provide you with a FileHandle or IO::Handle object. You want them to be able to say: do_stuff(\*STDOUT); do_stuff('STDERR'); do_stuff($some_FileHandle_object); do_stuff($some_IO_Handle_object); And even: do_stuff($any_object_with_a_print_method); Sure, one way to do it is to force the caller to use tiehandle(). But that puts the burden on them. Another way to do it is to use B<IO::Wrap>, which provides you with the following functions: =over 4 =item wraphandle SCALAR This function will take a single argument, and "wrap" it based on what it seems to be... =over 4 =item * B<A raw scalar filehandle name,> like C<"STDOUT"> or C<"Class::HANDLE">. In this case, the filehandle name is wrapped in an IO::Wrap object, which is returned. =item * B<A raw filehandle glob,> like C<\*STDOUT>. In this case, the filehandle glob is wrapped in an IO::Wrap object, which is returned. =item * B<A blessed FileHandle object.> In this case, the FileHandle is wrapped in an IO::Wrap object if and only if your FileHandle class does not support the C<read()> method. =item * B<Any other kind of blessed object,> which is assumed to be already conformant to the IO::Handle interface. In this case, you just get back that object. =back =back If you get back an IO::Wrap object, it will obey a basic subset of the IO:: interface. That is, the following methods (note: I said I<methods>, not named operators) should work on the thing you get back: close getline getlines print ARGS... read BUFFER,NBYTES seek POS,WHENCE tell =head1 NOTES Clearly, when wrapping a raw external filehandle (like \*STDOUT), I didn't want to close the file descriptor when the "wrapper" object is destroyed... since the user might not appreciate that! Hence, there's no DESTROY method in this class. When wrapping a FileHandle object, however, I believe that Perl will invoke the FileHandle::DESTROY when the last reference goes away, so in that case, the filehandle is closed if the wrapped FileHandle really was the last reference to it. =head1 WARNINGS This module does not allow you to wrap filehandle names which are given as strings that lack the package they were opened in. That is, if a user opens FOO in package Foo, they must pass it to you either as C<\*FOO> or as C<"Foo::FOO">. However, C<"STDIN"> and friends will work just fine. =head1 VERSION $Id: Wrap.pm 1248 2008-03-25 00:51:31Z warnes $ =head1 AUTHOR =item Primary Maintainer David F. Skoll (F<dfs@roaringpenguin.com>). =item Original Author Eryq (F<eryq@zeegee.com>). President, ZeeGee Software Inc (F<http://www.zeegee.com>). =cut ��������������������������������������������������������������������������������������������������������������gdata/inst/perl/IO/InnerFile.pm���������������������������������������������������������������������0000644�0001751�0000144�00000012642�13003720416�016010� 0����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������package IO::InnerFile; =head1 NAME IO::InnerFile - define a file inside another file =head1 SYNOPSIS ### Read a subset of a file: $inner = IO::InnerFile->new($fh, $start, $length); while (<$inner>) { ... } =head1 DESCRIPTION If you have a filehandle that can seek() and tell(), then you can open an IO::InnerFile on a range of the underlying file. =head1 PUBLIC INTERFACE =over =cut use Symbol; # The package version, both in 1.23 style *and* usable by MakeMaker: $VERSION = "2.110"; #------------------------------ =item new FILEHANDLE, [START, [LENGTH]] I<Class method, constructor.> Create a new inner-file opened on the given FILEHANDLE, from bytes START to START+LENGTH. Both START and LENGTH default to 0; negative values are silently coerced to zero. Note that FILEHANDLE must be able to seek() and tell(), in addition to whatever other methods you may desire for reading it. =cut sub new { my ($class, $fh, $start, $lg) = @_; $start = 0 if (!$start or ($start < 0)); $lg = 0 if (!$lg or ($lg < 0)); ### Create the underlying "object": my $a = { FH => $fh, CRPOS => 0, START => $start, LG => $lg, }; ### Create a new filehandle tied to this object: $fh = gensym; tie(*$fh, $class, $a); return bless($fh, $class); } sub TIEHANDLE { my ($class, $data) = @_; return bless($data, $class); } sub DESTROY { my ($self) = @_; $self->close() if (ref($self) eq 'SCALAR'); } #------------------------------ =item set_length LENGTH =item get_length =item add_length NBYTES I<Instance methods.> Get/set the virtual length of the inner file. =cut sub set_length { tied(${$_[0]})->{LG} = $_[1]; } sub get_length { tied(${$_[0]})->{LG}; } sub add_length { tied(${$_[0]})->{LG} += $_[1]; } #------------------------------ =item set_start START =item get_start =item add_start NBYTES I<Instance methods.> Get/set the virtual start position of the inner file. =cut sub set_start { tied(${$_[0]})->{START} = $_[1]; } sub get_start { tied(${$_[0]})->{START}; } sub set_end { tied(${$_[0]})->{LG} = $_[1] - tied(${$_[0]})->{START}; } sub get_end { tied(${$_[0]})->{LG} + tied(${$_[0]})->{START}; } #------------------------------ =item binmode =item close =item flush =item getc =item getline =item print LIST =item printf LIST =item read BUF, NBYTES =item readline =item seek OFFFSET, WHENCE =item tell =item write ARGS... I<Instance methods.> Standard filehandle methods. =cut sub write { shift->WRITE(@_) } sub print { shift->PRINT(@_) } sub printf { shift->PRINTF(@_) } sub flush { "0 but true"; } sub binmode { 1; } sub getc { return GETC(tied(${$_[0]}) ); } sub read { return READ( tied(${$_[0]}), @_[1,2,3] ); } sub readline { return READLINE( tied(${$_[0]}) ); } sub getline { return READLINE( tied(${$_[0]}) ); } sub close { return CLOSE(tied(${$_[0]}) ); } sub seek { my ($self, $ofs, $whence) = @_; $self = tied( $$self ); $self->{CRPOS} = $ofs if ($whence == 0); $self->{CRPOS}+= $ofs if ($whence == 1); $self->{CRPOS} = $self->{LG} + $ofs if ($whence == 2); $self->{CRPOS} = 0 if ($self->{CRPOS} < 0); $self->{CRPOS} = $self->{LG} if ($self->{CRPOS} > $self->{LG}); return 1; } sub tell { return tied(${$_[0]})->{CRPOS}; } sub WRITE { die "inner files can only open for reading\n"; } sub PRINT { die "inner files can only open for reading\n"; } sub PRINTF { die "inner files can only open for reading\n"; } sub GETC { my ($self) = @_; return 0 if ($self->{CRPOS} >= $self->{LG}); my $data; ### Save and seek... my $old_pos = $self->{FH}->tell; $self->{FH}->seek($self->{CRPOS}+$self->{START}, 0); ### ...read... my $lg = $self->{FH}->read($data, 1); $self->{CRPOS} += $lg; ### ...and restore: $self->{FH}->seek($old_pos, 0); $self->{LG} = $self->{CRPOS} unless ($lg); return ($lg ? $data : undef); } sub READ { my ($self, $undefined, $lg, $ofs) = @_; $undefined = undef; return 0 if ($self->{CRPOS} >= $self->{LG}); $lg = $self->{LG} - $self->{CRPOS} if ($self->{CRPOS} + $lg > $self->{LG}); return 0 unless ($lg); ### Save and seek... my $old_pos = $self->{FH}->tell; $self->{FH}->seek($self->{CRPOS}+$self->{START}, 0); ### ...read... $lg = $self->{FH}->read($_[1], $lg, $_[3] ); $self->{CRPOS} += $lg; ### ...and restore: $self->{FH}->seek($old_pos, 0); $self->{LG} = $self->{CRPOS} unless ($lg); return $lg; } sub READLINE { my ($self) = @_; return undef if ($self->{CRPOS} >= $self->{LG}); ### Save and seek... my $old_pos = $self->{FH}->tell; $self->{FH}->seek($self->{CRPOS}+$self->{START}, 0); ### ...read... my $text = $self->{FH}->getline; ### ...and restore: $self->{FH}->seek($old_pos, 0); #### If we detected a new EOF ... unless (defined $text) { $self->{LG} = $self->{CRPOS}; return undef; } my $lg=length($text); $lg = $self->{LG} - $self->{CRPOS} if ($self->{CRPOS} + $lg > $self->{LG}); $self->{CRPOS} += $lg; return substr($text, 0,$lg); } sub CLOSE { %{$_[0]}=(); } 1; __END__ =back =head1 VERSION $Id: InnerFile.pm 1248 2008-03-25 00:51:31Z warnes $ =head1 AUTHOR Original version by Doru Petrescu (pdoru@kappa.ro). Documentation and by Eryq (eryq@zeegee.com). Currently maintained by David F. Skoll (dfs@roaringpenguin.com). =cut ����������������������������������������������������������������������������������������������gdata/inst/perl/IO/Scalar.pm������������������������������������������������������������������������0000644�0001751�0000144�00000040540�13003720416�015340� 0����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������package IO::Scalar; =head1 NAME IO::Scalar - IO:: interface for reading/writing a scalar =head1 SYNOPSIS Perform I/O on strings, using the basic OO interface... use 5.005; use IO::Scalar; $data = "My message:\n"; ### Open a handle on a string, and append to it: $SH = new IO::Scalar \$data; $SH->print("Hello"); $SH->print(", world!\nBye now!\n"); print "The string is now: ", $data, "\n"; ### Open a handle on a string, read it line-by-line, then close it: $SH = new IO::Scalar \$data; while (defined($_ = $SH->getline)) { print "Got line: $_"; } $SH->close; ### Open a handle on a string, and slurp in all the lines: $SH = new IO::Scalar \$data; print "All lines:\n", $SH->getlines; ### Get the current position (either of two ways): $pos = $SH->getpos; $offset = $SH->tell; ### Set the current position (either of two ways): $SH->setpos($pos); $SH->seek($offset, 0); ### Open an anonymous temporary scalar: $SH = new IO::Scalar; $SH->print("Hi there!"); print "I printed: ", ${$SH->sref}, "\n"; ### get at value Don't like OO for your I/O? No problem. Thanks to the magic of an invisible tie(), the following now works out of the box, just as it does with IO::Handle: use 5.005; use IO::Scalar; $data = "My message:\n"; ### Open a handle on a string, and append to it: $SH = new IO::Scalar \$data; print $SH "Hello"; print $SH ", world!\nBye now!\n"; print "The string is now: ", $data, "\n"; ### Open a handle on a string, read it line-by-line, then close it: $SH = new IO::Scalar \$data; while (<$SH>) { print "Got line: $_"; } close $SH; ### Open a handle on a string, and slurp in all the lines: $SH = new IO::Scalar \$data; print "All lines:\n", <$SH>; ### Get the current position (WARNING: requires 5.6): $offset = tell $SH; ### Set the current position (WARNING: requires 5.6): seek $SH, $offset, 0; ### Open an anonymous temporary scalar: $SH = new IO::Scalar; print $SH "Hi there!"; print "I printed: ", ${$SH->sref}, "\n"; ### get at value And for you folks with 1.x code out there: the old tie() style still works, though this is I<unnecessary and deprecated>: use IO::Scalar; ### Writing to a scalar... my $s; tie *OUT, 'IO::Scalar', \$s; print OUT "line 1\nline 2\n", "line 3\n"; print "String is now: $s\n" ### Reading and writing an anonymous scalar... tie *OUT, 'IO::Scalar'; print OUT "line 1\nline 2\n", "line 3\n"; tied(OUT)->seek(0,0); while (<OUT>) { print "Got line: ", $_; } Stringification works, too! my $SH = new IO::Scalar \$data; print $SH "Hello, "; print $SH "world!"; print "I printed: $SH\n"; =head1 DESCRIPTION This class is part of the IO::Stringy distribution; see L<IO::Stringy> for change log and general information. The IO::Scalar class implements objects which behave just like IO::Handle (or FileHandle) objects, except that you may use them to write to (or read from) scalars. These handles are automatically tiehandle'd (though please see L<"WARNINGS"> for information relevant to your Perl version). Basically, this: my $s; $SH = new IO::Scalar \$s; $SH->print("Hel", "lo, "); ### OO style $SH->print("world!\n"); ### ditto Or this: my $s; $SH = tie *OUT, 'IO::Scalar', \$s; print OUT "Hel", "lo, "; ### non-OO style print OUT "world!\n"; ### ditto Causes $s to be set to: "Hello, world!\n" =head1 PUBLIC INTERFACE =cut use Carp; use strict; use vars qw($VERSION @ISA); use IO::Handle; use 5.005; ### Stringification, courtesy of B. K. Oxley (binkley): :-) use overload '""' => sub { ${*{$_[0]}->{SR}} }; use overload 'bool' => sub { 1 }; ### have to do this, so object is true! ### The package version, both in 1.23 style *and* usable by MakeMaker: $VERSION = "2.110"; ### Inheritance: @ISA = qw(IO::Handle); ### This stuff should be got rid of ASAP. require IO::WrapTie and push @ISA, 'IO::WrapTie::Slave' if ($] >= 5.004); #============================== =head2 Construction =over 4 =cut #------------------------------ =item new [ARGS...] I<Class method.> Return a new, unattached scalar handle. If any arguments are given, they're sent to open(). =cut sub new { my $proto = shift; my $class = ref($proto) || $proto; my $self = bless \do { local *FH }, $class; tie *$self, $class, $self; $self->open(@_); ### open on anonymous by default $self; } sub DESTROY { shift->close; } #------------------------------ =item open [SCALARREF] I<Instance method.> Open the scalar handle on a new scalar, pointed to by SCALARREF. If no SCALARREF is given, a "private" scalar is created to hold the file data. Returns the self object on success, undefined on error. =cut sub open { my ($self, $sref) = @_; ### Sanity: defined($sref) or do {my $s = ''; $sref = \$s}; (ref($sref) eq "SCALAR") or croak "open() needs a ref to a scalar"; ### Setup: *$self->{Pos} = 0; ### seek position *$self->{SR} = $sref; ### scalar reference $self; } #------------------------------ =item opened I<Instance method.> Is the scalar handle opened on something? =cut sub opened { *{shift()}->{SR}; } #------------------------------ =item close I<Instance method.> Disassociate the scalar handle from its underlying scalar. Done automatically on destroy. =cut sub close { my $self = shift; %{*$self} = (); 1; } =back =cut #============================== =head2 Input and output =over 4 =cut #------------------------------ =item flush I<Instance method.> No-op, provided for OO compatibility. =cut sub flush { "0 but true" } #------------------------------ =item getc I<Instance method.> Return the next character, or undef if none remain. =cut sub getc { my $self = shift; ### Return undef right away if at EOF; else, move pos forward: return undef if $self->eof; substr(${*$self->{SR}}, *$self->{Pos}++, 1); } #------------------------------ =item getline I<Instance method.> Return the next line, or undef on end of string. Can safely be called in an array context. Currently, lines are delimited by "\n". =cut sub getline { my $self = shift; ### Return undef right away if at EOF: return undef if $self->eof; ### Get next line: my $sr = *$self->{SR}; my $i = *$self->{Pos}; ### Start matching at this point. ### Minimal impact implementation! ### We do the fast fast thing (no regexps) if using the ### classic input record separator. ### Case 1: $/ is undef: slurp all... if (!defined($/)) { *$self->{Pos} = length $$sr; return substr($$sr, $i); } ### Case 2: $/ is "\n": zoom zoom zoom... elsif ($/ eq "\012") { ### Seek ahead for "\n"... yes, this really is faster than regexps. my $len = length($$sr); for (; $i < $len; ++$i) { last if ord (substr ($$sr, $i, 1)) == 10; } ### Extract the line: my $line; if ($i < $len) { ### We found a "\n": $line = substr ($$sr, *$self->{Pos}, $i - *$self->{Pos} + 1); *$self->{Pos} = $i+1; ### Remember where we finished up. } else { ### No "\n"; slurp the remainder: $line = substr ($$sr, *$self->{Pos}, $i - *$self->{Pos}); *$self->{Pos} = $len; } return $line; } ### Case 3: $/ is ref to int. Do fixed-size records. ### (Thanks to Dominique Quatravaux.) elsif (ref($/)) { my $len = length($$sr); my $i = ${$/} + 0; my $line = substr ($$sr, *$self->{Pos}, $i); *$self->{Pos} += $i; *$self->{Pos} = $len if (*$self->{Pos} > $len); return $line; } ### Case 4: $/ is either "" (paragraphs) or something weird... ### This is Graham's general-purpose stuff, which might be ### a tad slower than Case 2 for typical data, because ### of the regexps. else { pos($$sr) = $i; ### If in paragraph mode, skip leading lines (and update i!): length($/) or (($$sr =~ m/\G\n*/g) and ($i = pos($$sr))); ### If we see the separator in the buffer ahead... if (length($/) ? $$sr =~ m,\Q$/\E,g ### (ordinary sep) TBD: precomp! : $$sr =~ m,\n\n,g ### (a paragraph) ) { *$self->{Pos} = pos $$sr; return substr($$sr, $i, *$self->{Pos}-$i); } ### Else if no separator remains, just slurp the rest: else { *$self->{Pos} = length $$sr; return substr($$sr, $i); } } } #------------------------------ =item getlines I<Instance method.> Get all remaining lines. It will croak() if accidentally called in a scalar context. =cut sub getlines { my $self = shift; wantarray or croak("can't call getlines in scalar context!"); my ($line, @lines); push @lines, $line while (defined($line = $self->getline)); @lines; } #------------------------------ =item print ARGS... I<Instance method.> Print ARGS to the underlying scalar. B<Warning:> this continues to always cause a seek to the end of the string, but if you perform seek()s and tell()s, it is still safer to explicitly seek-to-end before subsequent print()s. =cut sub print { my $self = shift; *$self->{Pos} = length(${*$self->{SR}} .= join('', @_) . (defined($\) ? $\ : "")); 1; } sub _unsafe_print { my $self = shift; my $append = join('', @_) . $\; ${*$self->{SR}} .= $append; *$self->{Pos} += length($append); 1; } sub _old_print { my $self = shift; ${*$self->{SR}} .= join('', @_) . $\; *$self->{Pos} = length(${*$self->{SR}}); 1; } #------------------------------ =item read BUF, NBYTES, [OFFSET] I<Instance method.> Read some bytes from the scalar. Returns the number of bytes actually read, 0 on end-of-file, undef on error. =cut sub read { my $self = $_[0]; my $n = $_[2]; my $off = $_[3] || 0; my $read = substr(${*$self->{SR}}, *$self->{Pos}, $n); $n = length($read); *$self->{Pos} += $n; ($off ? substr($_[1], $off) : $_[1]) = $read; return $n; } #------------------------------ =item write BUF, NBYTES, [OFFSET] I<Instance method.> Write some bytes to the scalar. =cut sub write { my $self = $_[0]; my $n = $_[2]; my $off = $_[3] || 0; my $data = substr($_[1], $off, $n); $n = length($data); $self->print($data); return $n; } #------------------------------ =item sysread BUF, LEN, [OFFSET] I<Instance method.> Read some bytes from the scalar. Returns the number of bytes actually read, 0 on end-of-file, undef on error. =cut sub sysread { my $self = shift; $self->read(@_); } #------------------------------ =item syswrite BUF, NBYTES, [OFFSET] I<Instance method.> Write some bytes to the scalar. =cut sub syswrite { my $self = shift; $self->write(@_); } =back =cut #============================== =head2 Seeking/telling and other attributes =over 4 =cut #------------------------------ =item autoflush I<Instance method.> No-op, provided for OO compatibility. =cut sub autoflush {} #------------------------------ =item binmode I<Instance method.> No-op, provided for OO compatibility. =cut sub binmode {} #------------------------------ =item clearerr I<Instance method.> Clear the error and EOF flags. A no-op. =cut sub clearerr { 1 } #------------------------------ =item eof I<Instance method.> Are we at end of file? =cut sub eof { my $self = shift; (*$self->{Pos} >= length(${*$self->{SR}})); } #------------------------------ =item seek OFFSET, WHENCE I<Instance method.> Seek to a given position in the stream. =cut sub seek { my ($self, $pos, $whence) = @_; my $eofpos = length(${*$self->{SR}}); ### Seek: if ($whence == 0) { *$self->{Pos} = $pos } ### SEEK_SET elsif ($whence == 1) { *$self->{Pos} += $pos } ### SEEK_CUR elsif ($whence == 2) { *$self->{Pos} = $eofpos + $pos} ### SEEK_END else { croak "bad seek whence ($whence)" } ### Fixup: if (*$self->{Pos} < 0) { *$self->{Pos} = 0 } if (*$self->{Pos} > $eofpos) { *$self->{Pos} = $eofpos } return 1; } #------------------------------ =item sysseek OFFSET, WHENCE I<Instance method.> Identical to C<seek OFFSET, WHENCE>, I<q.v.> =cut sub sysseek { my $self = shift; $self->seek (@_); } #------------------------------ =item tell I<Instance method.> Return the current position in the stream, as a numeric offset. =cut sub tell { *{shift()}->{Pos} } #------------------------------ # # use_RS [YESNO] # # I<Instance method.> # Obey the curent setting of $/, like IO::Handle does? # Default is false in 1.x, but cold-welded true in 2.x and later. # sub use_RS { my ($self, $yesno) = @_; carp "use_RS is deprecated and ignored; \$/ is always consulted\n"; } #------------------------------ =item setpos POS I<Instance method.> Set the current position, using the opaque value returned by C<getpos()>. =cut sub setpos { shift->seek($_[0],0) } #------------------------------ =item getpos I<Instance method.> Return the current position in the string, as an opaque object. =cut *getpos = \&tell; #------------------------------ =item sref I<Instance method.> Return a reference to the underlying scalar. =cut sub sref { *{shift()}->{SR} } #------------------------------ # Tied handle methods... #------------------------------ # Conventional tiehandle interface: sub TIEHANDLE { ((defined($_[1]) && UNIVERSAL::isa($_[1], "IO::Scalar")) ? $_[1] : shift->new(@_)); } sub GETC { shift->getc(@_) } sub PRINT { shift->print(@_) } sub PRINTF { shift->print(sprintf(shift, @_)) } sub READ { shift->read(@_) } sub READLINE { wantarray ? shift->getlines(@_) : shift->getline(@_) } sub WRITE { shift->write(@_); } sub CLOSE { shift->close(@_); } sub SEEK { shift->seek(@_); } sub TELL { shift->tell(@_); } sub EOF { shift->eof(@_); } #------------------------------------------------------------ 1; __END__ =back =cut =head1 WARNINGS Perl's TIEHANDLE spec was incomplete prior to 5.005_57; it was missing support for C<seek()>, C<tell()>, and C<eof()>. Attempting to use these functions with an IO::Scalar will not work prior to 5.005_57. IO::Scalar will not have the relevant methods invoked; and even worse, this kind of bug can lie dormant for a while. If you turn warnings on (via C<$^W> or C<perl -w>), and you see something like this... attempt to seek on unopened filehandle ...then you are probably trying to use one of these functions on an IO::Scalar with an old Perl. The remedy is to simply use the OO version; e.g.: $SH->seek(0,0); ### GOOD: will work on any 5.005 seek($SH,0,0); ### WARNING: will only work on 5.005_57 and beyond =head1 VERSION $Id: Scalar.pm 1248 2008-03-25 00:51:31Z warnes $ =head1 AUTHORS =head2 Primary Maintainer David F. Skoll (F<dfs@roaringpenguin.com>). =head2 Principal author Eryq (F<eryq@zeegee.com>). President, ZeeGee Software Inc (F<http://www.zeegee.com>). =head2 Other contributors The full set of contributors always includes the folks mentioned in L<IO::Stringy/"CHANGE LOG">. But just the same, special thanks to the following individuals for their invaluable contributions (if I've forgotten or misspelled your name, please email me!): I<Andy Glew,> for contributing C<getc()>. I<Brandon Browning,> for suggesting C<opened()>. I<David Richter,> for finding and fixing the bug in C<PRINTF()>. I<Eric L. Brine,> for his offset-using read() and write() implementations. I<Richard Jones,> for his patches to massively improve the performance of C<getline()> and add C<sysread> and C<syswrite>. I<B. K. Oxley (binkley),> for stringification and inheritance improvements, and sundry good ideas. I<Doug Wilson,> for the IO::Handle inheritance and automatic tie-ing. =head1 SEE ALSO L<IO::String>, which is quite similar but which was designed more-recently and with an IO::Handle-like interface in mind, so you could mix OO- and native-filehandle usage without using tied(). I<Note:> as of version 2.x, these classes all work like their IO::Handle counterparts, so we have comparable functionality to IO::String. =cut ����������������������������������������������������������������������������������������������������������������������������������������������������������������gdata/inst/perl/IO/Lines.pm�������������������������������������������������������������������������0000644�0001751�0000144�00000010333�13003720416�015202� 0����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������package IO::Lines; =head1 NAME IO::Lines - IO:: interface for reading/writing an array of lines =head1 SYNOPSIS use IO::Lines; ### See IO::ScalarArray for details =head1 DESCRIPTION This class implements objects which behave just like FileHandle (or IO::Handle) objects, except that you may use them to write to (or read from) an array of lines. They can be tiehandle'd as well. This is a subclass of L<IO::ScalarArray|IO::ScalarArray> in which the underlying array has its data stored in a line-oriented-format: that is, every element ends in a C<"\n">, with the possible exception of the final element. This makes C<getline()> I<much> more efficient; if you plan to do line-oriented reading/printing, you want this class. The C<print()> method will enforce this rule, so you can print arbitrary data to the line-array: it will break the data at newlines appropriately. See L<IO::ScalarArray> for full usage and warnings. =cut use Carp; use strict; use IO::ScalarArray; use vars qw($VERSION @ISA); # The package version, both in 1.23 style *and* usable by MakeMaker: $VERSION = "2.110"; # Inheritance: @ISA = qw(IO::ScalarArray); ### also gets us new_tie :-) #------------------------------ # # getline # # Instance method, override. # Return the next line, or undef on end of data. # Can safely be called in an array context. # Currently, lines are delimited by "\n". # sub getline { my $self = shift; if (!defined $/) { return join( '', $self->_getlines_for_newlines ); } elsif ($/ eq "\n") { if (!*$self->{Pos}) { ### full line... return *$self->{AR}[*$self->{Str}++]; } else { ### partial line... my $partial = substr(*$self->{AR}[*$self->{Str}++], *$self->{Pos}); *$self->{Pos} = 0; return $partial; } } else { croak 'unsupported $/: must be "\n" or undef'; } } #------------------------------ # # getlines # # Instance method, override. # Return an array comprised of the remaining lines, or () on end of data. # Must be called in an array context. # Currently, lines are delimited by "\n". # sub getlines { my $self = shift; wantarray or croak("can't call getlines in scalar context!"); if ((defined $/) and ($/ eq "\n")) { return $self->_getlines_for_newlines(@_); } else { ### slow but steady return $self->SUPER::getlines(@_); } } #------------------------------ # # _getlines_for_newlines # # Instance method, private. # If $/ is newline, do fast getlines. # This CAN NOT invoke getline! # sub _getlines_for_newlines { my $self = shift; my ($rArray, $Str, $Pos) = @{*$self}{ qw( AR Str Pos ) }; my @partial = (); if ($Pos) { ### partial line... @partial = (substr( $rArray->[ $Str++ ], $Pos )); *$self->{Pos} = 0; } *$self->{Str} = scalar @$rArray; ### about to exhaust @$rArray return (@partial, @$rArray[ $Str .. $#$rArray ]); ### remaining full lines... } #------------------------------ # # print ARGS... # # Instance method, override. # Print ARGS to the underlying line array. # sub print { if (defined $\ && $\ ne "\n") { croak 'unsupported $\: must be "\n" or undef'; } my $self = shift; ### print STDERR "\n[[ARRAY WAS...\n", @{*$self->{AR}}, "<<EOF>>\n"; my @lines = split /^/, join('', @_); @lines or return 1; ### Did the previous print not end with a newline? ### If so, append first line: if (@{*$self->{AR}} and (*$self->{AR}[-1] !~ /\n\Z/)) { *$self->{AR}[-1] .= shift @lines; } push @{*$self->{AR}}, @lines; ### add the remainder ### print STDERR "\n[[ARRAY IS NOW...\n", @{*$self->{AR}}, "<<EOF>>\n"; 1; } #------------------------------ 1; __END__ =head1 VERSION $Id: Lines.pm 1248 2008-03-25 00:51:31Z warnes $ =head1 AUTHORS =head2 Primary Maintainer David F. Skoll (F<dfs@roaringpenguin.com>). =head2 Principal author Eryq (F<eryq@zeegee.com>). President, ZeeGee Software Inc (F<http://www.zeegee.com>). =head2 Other contributors Thanks to the following individuals for their invaluable contributions (if I've forgotten or misspelled your name, please email me!): I<Morris M. Siegel,> for his $/ patch and the new C<getlines()>. I<Doug Wilson,> for the IO::Handle inheritance and automatic tie-ing. =cut �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������gdata/inst/perl/IO/WrapTie.pm�����������������������������������������������������������������������0000644�0001751�0000144�00000034644�13003720416�015516� 0����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������# SEE DOCUMENTATION AT BOTTOM OF FILE #------------------------------------------------------------ package IO::WrapTie; #------------------------------------------------------------ require 5.004; ### for tie use strict; use vars qw(@ISA @EXPORT $VERSION); use Exporter; # Inheritance, exporting, and package version: @ISA = qw(Exporter); @EXPORT = qw(wraptie); $VERSION = "2.110"; # Function, exported. sub wraptie { IO::WrapTie::Master->new(@_); } # Class method; BACKWARDS-COMPATIBILITY ONLY! sub new { shift; IO::WrapTie::Master->new(@_); } #------------------------------------------------------------ package IO::WrapTie::Master; #------------------------------------------------------------ use strict; use vars qw(@ISA $AUTOLOAD); use IO::Handle; # We inherit from IO::Handle to get methods which invoke i/o operators, # like print(), on our tied handle: @ISA = qw(IO::Handle); #------------------------------ # new SLAVE, TIEARGS... #------------------------------ # Create a new subclass of IO::Handle which... # # (1) Handles i/o OPERATORS because it is tied to an instance of # an i/o-like class, like IO::Scalar. # # (2) Handles i/o METHODS by delegating them to that same tied object!. # # Arguments are the slave class (e.g., IO::Scalar), followed by all # the arguments normally sent into that class's TIEHANDLE method. # In other words, much like the arguments to tie(). :-) # # NOTE: # The thing $x we return must be a BLESSED REF, for ($x->print()). # The underlying symbol must be a FILEHANDLE, for (print $x "foo"). # It has to have a way of getting to the "real" back-end object... # sub new { my $master = shift; my $io = IO::Handle->new; ### create a new handle my $slave = shift; tie *$io, $slave, @_; ### tie: will invoke slave's TIEHANDLE bless $io, $master; ### return a master } #------------------------------ # AUTOLOAD #------------------------------ # Delegate method invocations on the master to the underlying slave. # sub AUTOLOAD { my $method = $AUTOLOAD; $method =~ s/.*:://; my $self = shift; tied(*$self)->$method(\@_); } #------------------------------ # PRELOAD #------------------------------ # Utility. # # Most methods like print(), getline(), etc. which work on the tied object # via Perl's i/o operators (like 'print') are inherited from IO::Handle. # # Other methods, like seek() and sref(), we must delegate ourselves. # AUTOLOAD takes care of these. # # However, it may be necessary to preload delegators into your # own class. PRELOAD will do this. # sub PRELOAD { my $class = shift; foreach (@_) { eval "sub ${class}::$_ { my \$s = shift; tied(*\$s)->$_(\@_) }"; } } # Preload delegators for some standard methods which we can't simply # inherit from IO::Handle... for example, some IO::Handle methods # assume that there is an underlying file descriptor. # PRELOAD IO::WrapTie::Master qw(open opened close read clearerr eof seek tell setpos getpos); #------------------------------------------------------------ package IO::WrapTie::Slave; #------------------------------------------------------------ # Teeny private class providing a new_tie constructor... # # HOW IT ALL WORKS: # # Slaves inherit from this class. # # When you send a new_tie() message to a tie-slave class (like IO::Scalar), # it first determines what class should provide its master, via TIE_MASTER. # In this case, IO::Scalar->TIE_MASTER would return IO::Scalar::Master. # Then, we create a new master (an IO::Scalar::Master) with the same args # sent to new_tie. # # In general, the new() method of the master is inherited directly # from IO::WrapTie::Master. # sub new_tie { my $self = shift; $self->TIE_MASTER->new($self,@_); ### e.g., IO::Scalar::Master->new(@_) } # Default class method for new_tie(). # All your tie-slave class (like IO::Scalar) has to do is override this # method with a method that returns the name of an appropriate "master" # class for tying that slave. # sub TIE_MASTER { 'IO::WrapTie::Master' } #------------------------------ 1; __END__ package IO::WrapTie; ### for doc generator =head1 NAME IO::WrapTie - wrap tieable objects in IO::Handle interface I<This is currently Alpha code, released for comments. Please give me your feedback!> =head1 SYNOPSIS First of all, you'll need tie(), so: require 5.004; I<Function interface (experimental).> Use this with any existing class... use IO::WrapTie; use FooHandle; ### implements TIEHANDLE interface ### Suppose we want a "FooHandle->new(&FOO_RDWR, 2)". ### We can instead say... $FH = wraptie('FooHandle', &FOO_RDWR, 2); ### Now we can use... print $FH "Hello, "; ### traditional operator syntax... $FH->print("world!\n"); ### ...and OO syntax as well! I<OO interface (preferred).> You can inherit from the IO::WrapTie::Slave mixin to get a nifty C<new_tie()> constructor... #------------------------------ package FooHandle; ### a class which can TIEHANDLE use IO::WrapTie; @ISA = qw(IO::WrapTie::Slave); ### inherit new_tie() ... #------------------------------ package main; $FH = FooHandle->new_tie(&FOO_RDWR, 2); ### $FH is an IO::WrapTie::Master print $FH "Hello, "; ### traditional operator syntax $FH->print("world!\n"); ### OO syntax See IO::Scalar as an example. It also shows you how to create classes which work both with and without 5.004. =head1 DESCRIPTION Suppose you have a class C<FooHandle>, where... =over 4 =item * B<FooHandle does not inherit from IO::Handle;> that is, it performs filehandle-like I/O, but to something other than an underlying file descriptor. Good examples are IO::Scalar (for printing to a string) and IO::Lines (for printing to an array of lines). =item * B<FooHandle implements the TIEHANDLE interface> (see L<perltie>); that is, it provides methods TIEHANDLE, GETC, PRINT, PRINTF, READ, and READLINE. =item * B<FooHandle implements the traditional OO interface> of FileHandle and IO::Handle; i.e., it contains methods like getline(), read(), print(), seek(), tell(), eof(), etc. =back Normally, users of your class would have two options: =over 4 =item * B<Use only OO syntax,> and forsake named I/O operators like 'print'. =item * B<Use with tie,> and forsake treating it as a first-class object (i.e., class-specific methods can only be invoked through the underlying object via tied()... giving the object a "split personality"). =back But now with IO::WrapTie, you can say: $WT = wraptie('FooHandle', &FOO_RDWR, 2); $WT->print("Hello, world\n"); ### OO syntax print $WT "Yes!\n"; ### Named operator syntax too! $WT->weird_stuff; ### Other methods! And if you're authoring a class like FooHandle, just have it inherit from C<IO::WrapTie::Slave> and that first line becomes even prettier: $WT = FooHandle->new_tie(&FOO_RDWR, 2); B<The bottom line:> now, almost any class can look and work exactly like an IO::Handle... and be used both with OO and non-OO filehandle syntax. =head1 HOW IT ALL WORKS =head2 The data structures Consider this example code, using classes in this distribution: use IO::Scalar; use IO::WrapTie; $WT = wraptie('IO::Scalar',\$s); print $WT "Hello, "; $WT->print("world!\n"); In it, the wraptie() function creates a data structure as follows: * $WT is a blessed reference to a tied filehandle $WT glob; that glob is tied to the "Slave" object. | * You would do all your i/o with $WT directly. | | | ,---isa--> IO::WrapTie::Master >--isa--> IO::Handle V / .-------------. | | | | * Perl i/o operators work on the tied object, | "Master" | invoking the TIEHANDLE methods. | | * Method invocations are delegated to the tied | | slave. `-------------' | tied(*$WT) | .---isa--> IO::WrapTie::Slave V / .-------------. | | | "Slave" | * Instance of FileHandle-like class which doesn't | | actually use file descriptors, like IO::Scalar. | IO::Scalar | * The slave can be any kind of object. | | * Must implement the TIEHANDLE interface. `-------------' I<NOTE:> just as an IO::Handle is really just a blessed reference to a I<traditional> filehandle glob... so also, an IO::WrapTie::Master is really just a blessed reference to a filehandle glob I<which has been tied to some "slave" class.> =head2 How wraptie() works =over 4 =item 1. The call to function C<wraptie(SLAVECLASS, TIEARGS...)> is passed onto C<IO::WrapTie::Master::new()>. Note that class IO::WrapTie::Master is a subclass of IO::Handle. =item 2. The C<IO::WrapTie::Master::new> method creates a new IO::Handle object, reblessed into class IO::WrapTie::Master. This object is the I<master>, which will be returned from the constructor. At the same time... =item 3. The C<new> method also creates the I<slave>: this is an instance of SLAVECLASS which is created by tying the master's IO::Handle to SLAVECLASS via C<tie(HANDLE, SLAVECLASS, TIEARGS...)>. This call to C<tie()> creates the slave in the following manner: =item 4. Class SLAVECLASS is sent the message C<TIEHANDLE(TIEARGS...)>; it will usually delegate this to C<SLAVECLASS::new(TIEARGS...)>, resulting in a new instance of SLAVECLASS being created and returned. =item 5. Once both master and slave have been created, the master is returned to the caller. =back =head2 How I/O operators work (on the master) Consider using an i/o operator on the master: print $WT "Hello, world!\n"; Since the master ($WT) is really a [blessed] reference to a glob, the normal Perl i/o operators like C<print> may be used on it. They will just operate on the symbol part of the glob. Since the glob is tied to the slave, the slave's PRINT method (part of the TIEHANDLE interface) will be automatically invoked. If the slave is an IO::Scalar, that means IO::Scalar::PRINT will be invoked, and that method happens to delegate to the C<print()> method of the same class. So the I<real> work is ultimately done by IO::Scalar::print(). =head2 How methods work (on the master) Consider using a method on the master: $WT->print("Hello, world!\n"); Since the master ($WT) is blessed into the class IO::WrapTie::Master, Perl first attempts to find a C<print()> method there. Failing that, Perl next attempts to find a C<print()> method in the superclass, IO::Handle. It just so happens that there I<is> such a method; that method merely invokes the C<print> i/o operator on the self object... and for that, see above! But let's suppose we're dealing with a method which I<isn't> part of IO::Handle... for example: my $sref = $WT->sref; In this case, the intuitive behavior is to have the master delegate the method invocation to the slave (now do you see where the designations come from?). This is indeed what happens: IO::WrapTie::Master contains an AUTOLOAD method which performs the delegation. So: when C<sref()> can't be found in IO::Handle, the AUTOLOAD method of IO::WrapTie::Master is invoked, and the standard behavior of delegating the method to the underlying slave (here, an IO::Scalar) is done. Sometimes, to get this to work properly, you may need to create a subclass of IO::WrapTie::Master which is an effective master for I<your> class, and do the delegation there. =head1 NOTES B<Why not simply use the object's OO interface?> Because that means forsaking the use of named operators like print(), and you may need to pass the object to a subroutine which will attempt to use those operators: $O = FooHandle->new(&FOO_RDWR, 2); $O->print("Hello, world\n"); ### OO syntax is okay, BUT.... sub nope { print $_[0] "Nope!\n" } X nope($O); ### ERROR!!! (not a glob ref) B<Why not simply use tie()?> Because (1) you have to use tied() to invoke methods in the object's public interface (yuck), and (2) you may need to pass the tied symbol to another subroutine which will attempt to treat it in an OO-way... and that will break it: tie *T, 'FooHandle', &FOO_RDWR, 2; print T "Hello, world\n"; ### Operator is okay, BUT... tied(*T)->other_stuff; ### yuck! AND... sub nope { shift->print("Nope!\n") } X nope(\*T); ### ERROR!!! (method "print" on unblessed ref) B<Why a master and slave? Why not simply write FooHandle to inherit from IO::Handle?> I tried this, with an implementation similar to that of IO::Socket. The problem is that I<the whole point is to use this with objects that don't have an underlying file/socket descriptor.>. Subclassing IO::Handle will work fine for the OO stuff, and fine with named operators I<if> you tie()... but if you just attempt to say: $IO = FooHandle->new(&FOO_RDWR, 2); print $IO "Hello!\n"; you get a warning from Perl like: Filehandle GEN001 never opened because it's trying to do system-level i/o on an (unopened) file descriptor. To avoid this, you apparently have to tie() the handle... which brings us right back to where we started! At least the IO::WrapTie mixin lets us say: $IO = FooHandle->new_tie(&FOO_RDWR, 2); print $IO "Hello!\n"; and so is not I<too> bad. C<:-)> =head1 WARNINGS Remember: this stuff is for doing FileHandle-like i/o on things I<without underlying file descriptors>. If you have an underlying file descriptor, you're better off just inheriting from IO::Handle. B<Be aware that new_tie() always returns an instance of a kind of IO::WrapTie::Master...> it does B<not> return an instance of the i/o class you're tying to! Invoking some methods on the master object causes AUTOLOAD to delegate them to the slave object... so it I<looks> like you're manipulating a "FooHandle" object directly, but you're not. I have not explored all the ramifications of this use of tie(). I<Here there be dragons>. =head1 VERSION $Id: WrapTie.pm 1248 2008-03-25 00:51:31Z warnes $ =head1 AUTHOR =item Primary Maintainer David F. Skoll (F<dfs@roaringpenguin.com>). =item Original Author Eryq (F<eryq@zeegee.com>). President, ZeeGee Software Inc (F<http://www.zeegee.com>). =cut ��������������������������������������������������������������������������������������������gdata/inst/perl/IO/Scalar.pm.html�������������������������������������������������������������������0000644�0001751�0000144�00000034162�13003720416�016306� 0����������������������������������������������������������������������������������������������������ustar �hornik��������������������������users������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������<HTML> <HEAD> <TITLE>IO::Scalar ZeeGee Software

IO::Scalar


Top NAME

IO::Scalar - IO:: interface for reading/writing a scalar


Top SYNOPSIS

Perform I/O on strings, using the basic OO interface...

    use 5.005;
    use IO::Scalar;
    $data = "My message:\n";
    ### Open a handle on a string, and append to it:
    $SH = new IO::Scalar \$data;
    $SH->print("Hello");       
    $SH->print(", world!\nBye now!\n");  
    print "The string is now: ", $data, "\n";
    ### Open a handle on a string, read it line-by-line, then close it:
    $SH = new IO::Scalar \$data;
    while (defined($_ = $SH->getline)) { 
	print "Got line: $_";
    }
    $SH->close;
    ### Open a handle on a string, and slurp in all the lines:
    $SH = new IO::Scalar \$data;
    print "All lines:\n", $SH->getlines; 
    ### Get the current position (either of two ways):
    $pos = $SH->getpos;         
    $offset = $SH->tell;  
    ### Set the current position (either of two ways):
    $SH->setpos($pos);        
    $SH->seek($offset, 0);
    ### Open an anonymous temporary scalar:
    $SH = new IO::Scalar;
    $SH->print("Hi there!");
    print "I printed: ", ${$SH->sref}, "\n";      ### get at value

Don't like OO for your I/O? No problem. Thanks to the magic of an invisible tie(), the following now works out of the box, just as it does with IO::Handle:

    use 5.005;
    use IO::Scalar;
    $data = "My message:\n";
     
    ### Open a handle on a string, and append to it:
    $SH = new IO::Scalar \$data;
    print $SH "Hello";    
    print $SH ", world!\nBye now!\n";
    print "The string is now: ", $data, "\n";
    ### Open a handle on a string, read it line-by-line, then close it:
    $SH = new IO::Scalar \$data;
    while (<$SH>) {
	print "Got line: $_";
    }
    close $SH;
    ### Open a handle on a string, and slurp in all the lines:
    $SH = new IO::Scalar \$data;
    print "All lines:\n", <$SH>;
    ### Get the current position (WARNING: requires 5.6):
    $offset = tell $SH;
    ### Set the current position (WARNING: requires 5.6):
    seek $SH, $offset, 0;
    ### Open an anonymous temporary scalar:
    $SH = new IO::Scalar;
    print $SH "Hi there!";
    print "I printed: ", ${$SH->sref}, "\n";      ### get at value

And for you folks with 1.x code out there: the old tie() style still works, though this is unnecessary and deprecated:

    use IO::Scalar;
    ### Writing to a scalar...
    my $s; 
    tie *OUT, 'IO::Scalar', \$s;
    print OUT "line 1\nline 2\n", "line 3\n";
    print "String is now: $s\n"
    ### Reading and writing an anonymous scalar... 
    tie *OUT, 'IO::Scalar';
    print OUT "line 1\nline 2\n", "line 3\n";
    tied(OUT)->seek(0,0);
    while (<OUT>) { 
        print "Got line: ", $_;
    }

Stringification works, too!

    my $SH = new IO::Scalar \$data;
    print $SH "Hello, ";
    print $SH "world!";
    print "I printed: $SH\n";


Top DESCRIPTION

This class is part of the IO::Stringy distribution; see IO::Stringy for change log and general information.

The IO::Scalar class implements objects which behave just like IO::Handle (or FileHandle) objects, except that you may use them to write to (or read from) scalars. These handles are automatically tiehandle'd (though please see WARNINGS for information relevant to your Perl version).

Basically, this:

    my $s;
    $SH = new IO::Scalar \$s;
    $SH->print("Hel", "lo, ");         ### OO style
    $SH->print("world!\n");            ### ditto

Or this:

    my $s;
    $SH = tie *OUT, 'IO::Scalar', \$s;
    print OUT "Hel", "lo, ";           ### non-OO style
    print OUT "world!\n";              ### ditto

Causes $s to be set to:

    "Hello, world!\n" 


Top PUBLIC INTERFACE


Top Construction

new [ARGS...]
Class method. Return a new, unattached scalar handle. If any arguments are given, they're sent to open().

open [SCALARREF]
Instance method. Open the scalar handle on a new scalar, pointed to by SCALARREF. If no SCALARREF is given, a "private" scalar is created to hold the file data.

Returns the self object on success, undefined on error.

opened
Instance method. Is the scalar handle opened on something?

close
Instance method. Disassociate the scalar handle from its underlying scalar. Done automatically on destroy.


Top Input and output

flush
Instance method. No-op, provided for OO compatibility.

getc
Instance method. Return the next character, or undef if none remain.

getline
Instance method. Return the next line, or undef on end of string. Can safely be called in an array context. Currently, lines are delimited by "\n".

getlines
Instance method. Get all remaining lines. It will croak() if accidentally called in a scalar context.

print ARGS...
Instance method. Print ARGS to the underlying scalar.

Warning: this continues to always cause a seek to the end of the string, but if you perform seek()s and tell()s, it is still safer to explicitly seek-to-end before subsequent print()s.

read BUF, NBYTES, [OFFSET]
Instance method. Read some bytes from the scalar. Returns the number of bytes actually read, 0 on end-of-file, undef on error.

write BUF, NBYTES, [OFFSET]
Instance method. Write some bytes to the scalar.

sysread BUF, LEN, [OFFSET]
Instance method. Read some bytes from the scalar. Returns the number of bytes actually read, 0 on end-of-file, undef on error.

syswrite BUF, NBYTES, [OFFSET]
Instance method. Write some bytes to the scalar.


Top Seeking/telling and other attributes

autoflush
Instance method. No-op, provided for OO compatibility.

binmode
Instance method. No-op, provided for OO compatibility.

clearerr
Instance method. Clear the error and EOF flags. A no-op.

eof
Instance method. Are we at end of file?

seek OFFSET, WHENCE
Instance method. Seek to a given position in the stream.

sysseek OFFSET, WHENCE
Instance method. Identical to seek OFFSET, WHENCE, q.v.

tell
Instance method. Return the current position in the stream, as a numeric offset.

use_RS [YESNO]
Instance method. Obey the curent setting of $/, like IO::Handle does? Default is false in 1.x, true in 2.x and later.

setpos POS
Instance method. Set the current position, using the opaque value returned by getpos().

getpos
Instance method. Return the current position in the string, as an opaque object.

sref
Instance method. Return a reference to the underlying scalar.


Top WARNINGS

Perl's TIEHANDLE spec was incomplete prior to 5.005_57; it was missing support for seek(), tell(), and eof(). Attempting to use these functions with an IO::Scalar will not work prior to 5.005_57. IO::Scalar will not have the relevant methods invoked; and even worse, this kind of bug can lie dormant for a while. If you turn warnings on (via $^W or perl -w), and you see something like this...

    attempt to seek on unopened filehandle

...then you are probably trying to use one of these functions on an IO::Scalar with an old Perl. The remedy is to simply use the OO version; e.g.:

    $SH->seek(0,0);    ### GOOD: will work on any 5.005
    seek($SH,0,0);     ### WARNING: will only work on 5.005_57 and beyond


Top VERSION

$Id: Scalar.pm.html 625 2005-06-09 14:20:30Z nj7w $


Top AUTHORS


Top Principal author

Eryq (eryq@zeegee.com). President, ZeeGee Software Inc (http://www.zeegee.com).


Top Other contributors

The full set of contributors always includes the folks mentioned in CHANGE LOG. But just the same, special thanks to the following individuals for their invaluable contributions (if I've forgotten or misspelled your name, please email me!):

Andy Glew, for contributing getc().

Brandon Browning, for suggesting opened().

David Richter, for finding and fixing the bug in PRINTF().

Eric L. Brine, for his offset-using read() and write() implementations.

Richard Jones, for his patches to massively improve the performance of getline() and add sysread and syswrite.

B. K. Oxley (binkley), for stringification and inheritance improvements, and sundry good ideas.

Doug Wilson, for the IO::Handle inheritance and automatic tie-ing.


Top SEE ALSO

IO::String, which is quite similar but which was designed more-recently and with an IO::Handle-like interface in mind, so you could mix OO- and native-filehandle usage without using tied().

Note: as of version 2.x, these classes all work like their IO::Handle counterparts, so we have comparable functionality to IO::String.


Generated Wed Aug 8 03:39:45 2001 by cvu_pod2html
gdata/inst/perl/IO/ScalarArray.pm0000644000175100001440000004226413003720416016344 0ustar hornikuserspackage IO::ScalarArray; =head1 NAME IO::ScalarArray - IO:: interface for reading/writing an array of scalars =head1 SYNOPSIS Perform I/O on strings, using the basic OO interface... use IO::ScalarArray; @data = ("My mes", "sage:\n"); ### Open a handle on an array, and append to it: $AH = new IO::ScalarArray \@data; $AH->print("Hello"); $AH->print(", world!\nBye now!\n"); print "The array is now: ", @data, "\n"; ### Open a handle on an array, read it line-by-line, then close it: $AH = new IO::ScalarArray \@data; while (defined($_ = $AH->getline)) { print "Got line: $_"; } $AH->close; ### Open a handle on an array, and slurp in all the lines: $AH = new IO::ScalarArray \@data; print "All lines:\n", $AH->getlines; ### Get the current position (either of two ways): $pos = $AH->getpos; $offset = $AH->tell; ### Set the current position (either of two ways): $AH->setpos($pos); $AH->seek($offset, 0); ### Open an anonymous temporary array: $AH = new IO::ScalarArray; $AH->print("Hi there!"); print "I printed: ", @{$AH->aref}, "\n"; ### get at value Don't like OO for your I/O? No problem. Thanks to the magic of an invisible tie(), the following now works out of the box, just as it does with IO::Handle: use IO::ScalarArray; @data = ("My mes", "sage:\n"); ### Open a handle on an array, and append to it: $AH = new IO::ScalarArray \@data; print $AH "Hello"; print $AH ", world!\nBye now!\n"; print "The array is now: ", @data, "\n"; ### Open a handle on a string, read it line-by-line, then close it: $AH = new IO::ScalarArray \@data; while (<$AH>) { print "Got line: $_"; } close $AH; ### Open a handle on a string, and slurp in all the lines: $AH = new IO::ScalarArray \@data; print "All lines:\n", <$AH>; ### Get the current position (WARNING: requires 5.6): $offset = tell $AH; ### Set the current position (WARNING: requires 5.6): seek $AH, $offset, 0; ### Open an anonymous temporary scalar: $AH = new IO::ScalarArray; print $AH "Hi there!"; print "I printed: ", @{$AH->aref}, "\n"; ### get at value And for you folks with 1.x code out there: the old tie() style still works, though this is I: use IO::ScalarArray; ### Writing to a scalar... my @a; tie *OUT, 'IO::ScalarArray', \@a; print OUT "line 1\nline 2\n", "line 3\n"; print "Array is now: ", @a, "\n" ### Reading and writing an anonymous scalar... tie *OUT, 'IO::ScalarArray'; print OUT "line 1\nline 2\n", "line 3\n"; tied(OUT)->seek(0,0); while () { print "Got line: ", $_; } =head1 DESCRIPTION This class is part of the IO::Stringy distribution; see L for change log and general information. The IO::ScalarArray class implements objects which behave just like IO::Handle (or FileHandle) objects, except that you may use them to write to (or read from) arrays of scalars. Logically, an array of scalars defines an in-core "file" whose contents are the concatenation of the scalars in the array. The handles created by this class are automatically tiehandle'd (though please see L<"WARNINGS"> for information relevant to your Perl version). For writing large amounts of data with individual print() statements, this class is likely to be more efficient than IO::Scalar. Basically, this: my @a; $AH = new IO::ScalarArray \@a; $AH->print("Hel", "lo, "); ### OO style $AH->print("world!\n"); ### ditto Or this: my @a; $AH = new IO::ScalarArray \@a; print $AH "Hel", "lo, "; ### non-OO style print $AH "world!\n"; ### ditto Causes @a to be set to the following array of 3 strings: ( "Hel" , "lo, " , "world!\n" ) See L and compare with this class. =head1 PUBLIC INTERFACE =cut use Carp; use strict; use vars qw($VERSION @ISA); use IO::Handle; # The package version, both in 1.23 style *and* usable by MakeMaker: $VERSION = "2.110"; # Inheritance: @ISA = qw(IO::Handle); require IO::WrapTie and push @ISA, 'IO::WrapTie::Slave' if ($] >= 5.004); #============================== =head2 Construction =over 4 =cut #------------------------------ =item new [ARGS...] I Return a new, unattached array handle. If any arguments are given, they're sent to open(). =cut sub new { my $proto = shift; my $class = ref($proto) || $proto; my $self = bless \do { local *FH }, $class; tie *$self, $class, $self; $self->open(@_); ### open on anonymous by default $self; } sub DESTROY { shift->close; } #------------------------------ =item open [ARRAYREF] I Open the array handle on a new array, pointed to by ARRAYREF. If no ARRAYREF is given, a "private" array is created to hold the file data. Returns the self object on success, undefined on error. =cut sub open { my ($self, $aref) = @_; ### Sanity: defined($aref) or do {my @a; $aref = \@a}; (ref($aref) eq "ARRAY") or croak "open needs a ref to a array"; ### Setup: $self->setpos([0,0]); *$self->{AR} = $aref; $self; } #------------------------------ =item opened I Is the array handle opened on something? =cut sub opened { *{shift()}->{AR}; } #------------------------------ =item close I Disassociate the array handle from its underlying array. Done automatically on destroy. =cut sub close { my $self = shift; %{*$self} = (); 1; } =back =cut #============================== =head2 Input and output =over 4 =cut #------------------------------ =item flush I No-op, provided for OO compatibility. =cut sub flush { "0 but true" } #------------------------------ =item getc I Return the next character, or undef if none remain. This does a read(1), which is somewhat costly. =cut sub getc { my $buf = ''; ($_[0]->read($buf, 1) ? $buf : undef); } #------------------------------ =item getline I Return the next line, or undef on end of data. Can safely be called in an array context. Currently, lines are delimited by "\n". =cut sub getline { my $self = shift; my ($str, $line) = (undef, ''); ### Minimal impact implementation! ### We do the fast fast thing (no regexps) if using the ### classic input record separator. ### Case 1: $/ is undef: slurp all... if (!defined($/)) { return undef if ($self->eof); ### Get the rest of the current string, followed by remaining strings: my $ar = *$self->{AR}; my @slurp = ( substr($ar->[*$self->{Str}], *$self->{Pos}), @$ar[(1 + *$self->{Str}) .. $#$ar ] ); ### Seek to end: $self->_setpos_to_eof; return join('', @slurp); } ### Case 2: $/ is "\n": elsif ($/ eq "\012") { ### Until we hit EOF (or exitted because of a found line): until ($self->eof) { ### If at end of current string, go fwd to next one (won't be EOF): if ($self->_eos) {++*$self->{Str}, *$self->{Pos}=0}; ### Get ref to current string in array, and set internal pos mark: $str = \(*$self->{AR}[*$self->{Str}]); ### get current string pos($$str) = *$self->{Pos}; ### start matching from here ### Get from here to either \n or end of string, and add to line: $$str =~ m/\G(.*?)((\n)|\Z)/g; ### match to 1st \n or EOS $line .= $1.$2; ### add it *$self->{Pos} += length($1.$2); ### move fwd by len matched return $line if $3; ### done, got line with "\n" } return ($line eq '') ? undef : $line; ### return undef if EOF } ### Case 3: $/ is ref to int. Bail out. elsif (ref($/)) { croak '$/ given as a ref to int; currently unsupported'; } ### Case 4: $/ is either "" (paragraphs) or something weird... ### Bail for now. else { croak '$/ as given is currently unsupported'; } } #------------------------------ =item getlines I Get all remaining lines. It will croak() if accidentally called in a scalar context. =cut sub getlines { my $self = shift; wantarray or croak("can't call getlines in scalar context!"); my ($line, @lines); push @lines, $line while (defined($line = $self->getline)); @lines; } #------------------------------ =item print ARGS... I Print ARGS to the underlying array. Currently, this always causes a "seek to the end of the array" and generates a new array entry. This may change in the future. =cut sub print { my $self = shift; push @{*$self->{AR}}, join('', @_) . (defined($\) ? $\ : ""); ### add the data $self->_setpos_to_eof; 1; } #------------------------------ =item read BUF, NBYTES, [OFFSET]; I Read some bytes from the array. Returns the number of bytes actually read, 0 on end-of-file, undef on error. =cut sub read { my $self = $_[0]; ### we must use $_[1] as a ref my $n = $_[2]; my $off = $_[3] || 0; ### print "getline\n"; my $justread; my $len; ($off ? substr($_[1], $off) : $_[1]) = ''; ### Stop when we have zero bytes to go, or when we hit EOF: my @got; until (!$n or $self->eof) { ### If at end of current string, go forward to next one (won't be EOF): if ($self->_eos) { ++*$self->{Str}; *$self->{Pos} = 0; } ### Get longest possible desired substring of current string: $justread = substr(*$self->{AR}[*$self->{Str}], *$self->{Pos}, $n); $len = length($justread); push @got, $justread; $n -= $len; *$self->{Pos} += $len; } $_[1] .= join('', @got); return length($_[1])-$off; } #------------------------------ =item write BUF, NBYTES, [OFFSET]; I Write some bytes into the array. =cut sub write { my $self = $_[0]; my $n = $_[2]; my $off = $_[3] || 0; my $data = substr($_[1], $n, $off); $n = length($data); $self->print($data); return $n; } =back =cut #============================== =head2 Seeking/telling and other attributes =over 4 =cut #------------------------------ =item autoflush I No-op, provided for OO compatibility. =cut sub autoflush {} #------------------------------ =item binmode I No-op, provided for OO compatibility. =cut sub binmode {} #------------------------------ =item clearerr I Clear the error and EOF flags. A no-op. =cut sub clearerr { 1 } #------------------------------ =item eof I Are we at end of file? =cut sub eof { ### print "checking EOF [*$self->{Str}, *$self->{Pos}]\n"; ### print "SR = ", $#{*$self->{AR}}, "\n"; return 0 if (*{$_[0]}->{Str} < $#{*{$_[0]}->{AR}}); ### before EOA return 1 if (*{$_[0]}->{Str} > $#{*{$_[0]}->{AR}}); ### after EOA ### ### at EOA, past EOS: ((*{$_[0]}->{Str} == $#{*{$_[0]}->{AR}}) && ($_[0]->_eos)); } #------------------------------ # # _eos # # I Are we at end of the CURRENT string? # sub _eos { (*{$_[0]}->{Pos} >= length(*{$_[0]}->{AR}[*{$_[0]}->{Str}])); ### past last char } #------------------------------ =item seek POS,WHENCE I Seek to a given position in the stream. Only a WHENCE of 0 (SEEK_SET) is supported. =cut sub seek { my ($self, $pos, $whence) = @_; ### Seek: if ($whence == 0) { $self->_seek_set($pos); } elsif ($whence == 1) { $self->_seek_cur($pos); } elsif ($whence == 2) { $self->_seek_end($pos); } else { croak "bad seek whence ($whence)" } return 1; } #------------------------------ # # _seek_set POS # # Instance method, private. # Seek to $pos relative to start: # sub _seek_set { my ($self, $pos) = @_; ### Advance through array until done: my $istr = 0; while (($pos >= 0) && ($istr < scalar(@{*$self->{AR}}))) { if (length(*$self->{AR}[$istr]) > $pos) { ### it's in this string! return $self->setpos([$istr, $pos]); } else { ### it's in next string $pos -= length(*$self->{AR}[$istr++]); ### move forward one string } } ### If we reached this point, pos is at or past end; zoom to EOF: return $self->_setpos_to_eof; } #------------------------------ # # _seek_cur POS # # Instance method, private. # Seek to $pos relative to current position. # sub _seek_cur { my ($self, $pos) = @_; $self->_seek_set($self->tell + $pos); } #------------------------------ # # _seek_end POS # # Instance method, private. # Seek to $pos relative to end. # We actually seek relative to beginning, which is simple. # sub _seek_end { my ($self, $pos) = @_; $self->_seek_set($self->_tell_eof + $pos); } #------------------------------ =item tell I Return the current position in the stream, as a numeric offset. =cut sub tell { my $self = shift; my $off = 0; my ($s, $str_s); for ($s = 0; $s < *$self->{Str}; $s++) { ### count all "whole" scalars defined($str_s = *$self->{AR}[$s]) or $str_s = ''; ###print STDERR "COUNTING STRING $s (". length($str_s) . ")\n"; $off += length($str_s); } ###print STDERR "COUNTING POS ($self->{Pos})\n"; return ($off += *$self->{Pos}); ### plus the final, partial one } #------------------------------ # # _tell_eof # # Instance method, private. # Get position of EOF, as a numeric offset. # This is identical to the size of the stream - 1. # sub _tell_eof { my $self = shift; my $len = 0; foreach (@{*$self->{AR}}) { $len += length($_) } $len; } #------------------------------ =item setpos POS I Seek to a given position in the array, using the opaque getpos() value. Don't expect this to be a number. =cut sub setpos { my ($self, $pos) = @_; (ref($pos) eq 'ARRAY') or die "setpos: only use a value returned by getpos!\n"; (*$self->{Str}, *$self->{Pos}) = @$pos; } #------------------------------ # # _setpos_to_eof # # Fast-forward to EOF. # sub _setpos_to_eof { my $self = shift; $self->setpos([scalar(@{*$self->{AR}}), 0]); } #------------------------------ =item getpos I Return the current position in the array, as an opaque value. Don't expect this to be a number. =cut sub getpos { [*{$_[0]}->{Str}, *{$_[0]}->{Pos}]; } #------------------------------ =item aref I Return a reference to the underlying array. =cut sub aref { *{shift()}->{AR}; } =back =cut #------------------------------ # Tied handle methods... #------------------------------ ### Conventional tiehandle interface: sub TIEHANDLE { (defined($_[1]) && UNIVERSAL::isa($_[1],"IO::ScalarArray")) ? $_[1] : shift->new(@_) } sub GETC { shift->getc(@_) } sub PRINT { shift->print(@_) } sub PRINTF { shift->print(sprintf(shift, @_)) } sub READ { shift->read(@_) } sub READLINE { wantarray ? shift->getlines(@_) : shift->getline(@_) } sub WRITE { shift->write(@_); } sub CLOSE { shift->close(@_); } sub SEEK { shift->seek(@_); } sub TELL { shift->tell(@_); } sub EOF { shift->eof(@_); } #------------------------------------------------------------ 1; __END__ # SOME PRIVATE NOTES: # # * The "current position" is the position before the next # character to be read/written. # # * Str gives the string index of the current position, 0-based # # * Pos gives the offset within AR[Str], 0-based. # # * Inital pos is [0,0]. After print("Hello"), it is [1,0]. =head1 WARNINGS Perl's TIEHANDLE spec was incomplete prior to 5.005_57; it was missing support for C, C, and C. Attempting to use these functions with an IO::ScalarArray will not work prior to 5.005_57. IO::ScalarArray will not have the relevant methods invoked; and even worse, this kind of bug can lie dormant for a while. If you turn warnings on (via C<$^W> or C), and you see something like this... attempt to seek on unopened filehandle ...then you are probably trying to use one of these functions on an IO::ScalarArray with an old Perl. The remedy is to simply use the OO version; e.g.: $AH->seek(0,0); ### GOOD: will work on any 5.005 seek($AH,0,0); ### WARNING: will only work on 5.005_57 and beyond =head1 VERSION $Id: ScalarArray.pm 1248 2008-03-25 00:51:31Z warnes $ =head1 AUTHOR =head2 Primary Maintainer David F. Skoll (F). =head2 Principal author Eryq (F). President, ZeeGee Software Inc (F). =head2 Other contributors Thanks to the following individuals for their invaluable contributions (if I've forgotten or misspelled your name, please email me!): I for suggesting C. I for suggesting C. I for his offset-using read() and write() implementations. I for the IO::Handle inheritance and automatic tie-ing. =cut #------------------------------ 1; gdata/inst/perl/IO/Stringy.pm0000644000175100001440000002751013003720416015574 0ustar hornikuserspackage IO::Stringy; use vars qw($VERSION); $VERSION = "2.110"; 1; __END__ =head1 NAME IO-stringy - I/O on in-core objects like strings and arrays =head1 SYNOPSIS IO:: ::AtomicFile adpO Write a file which is updated atomically ERYQ ::Lines bdpO I/O handle to read/write to array of lines ERYQ ::Scalar RdpO I/O handle to read/write to a string ERYQ ::ScalarArray RdpO I/O handle to read/write to array of scalars ERYQ ::Wrap RdpO Wrap old-style FHs in standard OO interface ERYQ ::WrapTie adpO Tie your handles & retain full OO interface ERYQ =head1 DESCRIPTION This toolkit primarily provides modules for performing both traditional and object-oriented i/o) on things I than normal filehandles; in particular, L, L, and L. In the more-traditional IO::Handle front, we have L which may be used to painlessly create files which are updated atomically. And in the "this-may-prove-useful" corner, we have L, whose exported wraphandle() function will clothe anything that's not a blessed object in an IO::Handle-like wrapper... so you can just use OO syntax and stop worrying about whether your function's caller handed you a string, a globref, or a FileHandle. =head1 WARNINGS Perl's TIEHANDLE spec was incomplete prior to 5.005_57; it was missing support for C, C, and C. Attempting to use these functions with an IO::Scalar, IO::ScalarArray, IO::Lines, etc. B prior to 5.005_57. None of the relevant methods will be invoked by Perl; and even worse, this kind of bug can lie dormant for a while. If you turn warnings on (via C<$^W> or C), and you see something like this... seek() on unopened file ...then you are probably trying to use one of these functions on one of our IO:: classes with an old Perl. The remedy is to simply use the OO version; e.g.: $SH->seek(0,0); ### GOOD: will work on any 5.005 seek($SH,0,0); ### WARNING: will only work on 5.005_57 and beyond =head1 INSTALLATION =head2 Requirements As of version 2.x, this toolkit requires Perl 5.005 for the IO::Handle subclasses, and 5.005_57 or better is B recommended. See L<"WARNINGS"> for details. =head2 Directions Most of you already know the drill... perl Makefile.PL make make test make install For everyone else out there... if you've never installed Perl code before, or you're trying to use this in an environment where your sysadmin or ISP won't let you do interesting things, B since this module contains no binary extensions, you can cheat. That means copying the directory tree under my "./lib" directory into someplace where your script can "see" it. For example, under Linux: cp -r IO-stringy-1.234/lib/* /path/to/my/perl/ Now, in your Perl code, do this: use lib "/path/to/my/perl"; use IO::Scalar; ### or whatever Ok, now you've been told. At this point, anyone who whines about not being given enough information gets an unflattering haiku written about them in the next change log. I'll do it. Don't think I won't. =head1 VERSION $Id: Stringy.pm 1248 2008-03-25 00:51:31Z warnes $ =head1 TO DO =over 4 =item (2000/08/02) Finalize $/ support Graham Barr submitted this patch half a I ago; Like a moron, I lost his message under a ton of others, and only now have the experimental implementation done. Will the sudden sensitivity to $/ hose anyone out there? I'm worried, so you have to enable it explicitly in 1.x. It will be on by default in 2.x, though only IO::Scalar has been implemented. =item (2001/08/08) Remove IO::WrapTie from new IO:: classes It's not needed. Backwards compatibility could be maintained by having new_tie() be identical to new(). Heck, I'll bet that IO::WrapTie should be reimplemented so the returned object is just like an IO::Scalar in its use of globrefs. =back =head1 CHANGE LOG =over 4 =item Version 2.110 (2005/02/10) Maintainership taken over by DSKOLL Closed the following bugs at https://rt.cpan.org/NoAuth/Bugs.html?Dist=IO-stringy: =item 2208 IO::ScalarArray->getline does not return undef for EOF if undef($/) =item 7132 IO-stringy/Makefile.PL bug - name should be module name =item 11249 IO::Scalar flush shouldn't return undef =item 2172 $\ (output record separator) not respected =item 8605 IO::InnerFile::seek() should return 1 on success =item 4798 *.html in lib/ =item 4369 Improvement: handling of fixed-size reads in IO::Scalar (Actually, bug 4369 was closed in Version 2.109) =item Version 2.109 (2003/12/21) IO::Scalar::getline now works with ref to int. I =item Version 2.108 (2001/08/20) The terms-of-use have been placed in the distribution file "COPYING". Also, small documentation tweaks were made. =item Version 2.105 (2001/08/09) Added support for various seek() whences to IO::ScalarArray. Added support for consulting $/ in IO::Scalar and IO::ScalarArray. The old C is not even an option. Unsupported record separators will cause a croak(). Added a lot of regression tests to supoprt the above. Better on-line docs (hyperlinks to individual functions). =item Version 2.103 (2001/08/08) After sober consideration I have reimplemented IO::Scalar::print() so that it once again always seeks to the end of the string. Benchmarks show the new implementation to be just as fast as Juergen's contributed patch; until someone can convince me otherwise, the current, safer implementation stays. I thought more about giving IO::Scalar two separate handles, one for reading and one for writing, as suggested by Binkley. His points about what tell() and eof() return are, I think, show-stoppers for this feature. Even the manpages for stdio's fseek() seem to imply a I file position indicator, not two. So I think I will take this off the TO DO list. B you can always have two handles open on the same scalar, one which you only write to, and one which you only read from. That should give the same effect. =item Version 2.101 (2001/08/07) B This is the initial release of the "IO::Scalar and friends are now subclasses of IO::Handle". I'm flinging it against the wall. Please tell me if the banana sticks. When it does, the banana will be called 2.2x. First off, I, who has provided an I service by patching IO::Scalar and friends so that they (1) inherit from IO::Handle, (2) automatically tie themselves so that the C objects can be used in native i/o constructs, and (3) doing it so that the whole damn thing passes its regression tests. As Doug knows, my globref Kung-Fu was not up to the task; he graciously provided the patches. This has earned him a seat at the L table, and the right to have me address him as I. Performance of IO::Scalar::print() has been improved by as much as 2x for lots of little prints, with the cost of forcing those who print-then-seek-then-print to explicitly seek to end-of-string before printing again. I Added the COPYING file, which had been missing from prior versions. I IO::Scalar consults $/ by default (1.x ignored it by default). Yes, I still need to support IO::ScalarArray. =item Version 1.221 (2001/08/07) I threatened in L<"INSTALLATION"> to write an unflattering haiku about anyone who whined that I gave them insufficient information... but it turns out that I left out a crucial direction. D'OH! I Enough info there? Here's unflattering haiku: Forgot the line, "make"! ;-) =item Version 1.220 (2001/04/03) Added untested SEEK, TELL, and EOF methods to IO::Scalar and IO::ScalarArray to support corresponding functions for tied filehandles: untested, because I'm still running 5.00556 and Perl is complaining about "tell() on unopened file". I Removed not-fully-blank lines from modules; these were causing lots of POD-related warnings. I =item Version 1.219 (2001/02/23) IO::Scalar objects can now be made sensitive to $/ . Pains were taken to keep the fast code fast while adding this feature. I =item Version 1.218 (2001/02/23) IO::Scalar has a new sysseek() method. I New "TO DO" section, because people who submit patches/ideas should at least know that they're in the system... and that I won't lose their stuff. Please read it. New entries in L<"AUTHOR">. Please read those too. =item Version 1.216 (2000/09/28) B I thought I'd remembered a problem with this ages ago, related to the fact that these IO:: modules don't have "real" filehandles, but the problem apparently isn't surfacing now. If you suddenly encounter Perl warnings during global destruction (especially if you're using tied filehandles), then please let me know! I B Apparently, the offset and the number-of-bytes arguments were, for all practical purposes, I You were okay if you did all your writing with print(), but boy was I a stupid bug! I Newspaper headline typeset by dyslexic man loses urgency BABY EATS FISH is simply not equivalent to FISH EATS BABY B I =item Version 1.215 (2000/09/05) Added 'bool' overload to '""' overload, so object always evaluates to true. (Whew. Glad I caught this before it went to CPAN.) =item Version 1.214 (2000/09/03) Evaluating an IO::Scalar in a string context now yields the underlying string. I =item Version 1.213 (2000/08/16) Minor documentation fixes. =item Version 1.212 (2000/06/02) Fixed IO::InnerFile incompatibility with Perl5.004. I =item Version 1.210 (2000/04/17) Added flush() and other no-op methods. I =item Version 1.209 (2000/03/17) Small bug fixes. =item Version 1.208 (2000/03/14) Incorporated a number of contributed patches and extensions, mostly related to speed hacks, support for "offset", and WRITE/CLOSE methods. I =item Version 1.206 (1999/04/18) Added creation of ./testout when Makefile.PL is run. =item Version 1.205 (1999/01/15) Verified for Perl5.005. =item Version 1.202 (1998/04/18) New IO::WrapTie and IO::AtomicFile added. =item Version 1.110 Added IO::WrapTie. =item Version 1.107 Added IO::Lines, and made some bug fixes to IO::ScalarArray. Also, added getc(). =item Version 1.105 No real changes; just upgraded IO::Wrap to have a $VERSION string. =back =head1 AUTHOR =over 4 =item Primary Maintainer David F. Skoll (F). =item Original Author Eryq (F). President, ZeeGee Software Inc (F). =item Co-Authors For all their bug reports and patch submissions, the following are officially recognized: Richard Jones B. K. Oxley (binkley) Doru Petrescu Doug Wilson (for picking up the ball I dropped, and doing tie() right) =back Go to F for the latest downloads and on-line documentation for this module. Enjoy. Yell if it breaks. =cut gdata/inst/perl/Spreadsheet/0000755000175100001440000000000013003720416015532 5ustar hornikusersgdata/inst/perl/Spreadsheet/ParseExcel/0000755000175100001440000000000013115545572017601 5ustar hornikusersgdata/inst/perl/Spreadsheet/ParseExcel/FmtDefault.pm0000644000175100001440000001442313003720416022162 0ustar hornikuserspackage Spreadsheet::ParseExcel::FmtDefault; ############################################################################### # # Spreadsheet::ParseExcel::FmtDefault - A class for Cell formats. # # Used in conjunction with Spreadsheet::ParseExcel. # # Copyright (c) 2014 Douglas Wilson # Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # # perltidy with standard settings. # # Documentation after __END__ # use strict; use warnings; use Spreadsheet::ParseExcel::Utility qw(ExcelFmt); our $VERSION = '0.65'; my %hFmtDefault = ( 0x00 => 'General', 0x01 => '0', 0x02 => '0.00', 0x03 => '#,##0', 0x04 => '#,##0.00', 0x05 => '($#,##0_);($#,##0)', 0x06 => '($#,##0_);[Red]($#,##0)', 0x07 => '($#,##0.00_);($#,##0.00_)', 0x08 => '($#,##0.00_);[Red]($#,##0.00_)', 0x09 => '0%', 0x0A => '0.00%', 0x0B => '0.00E+00', 0x0C => '# ?/?', 0x0D => '# ??/??', 0x0E => 'yyyy-mm-dd', # Was 'm-d-yy', which is bad as system default 0x0F => 'd-mmm-yy', 0x10 => 'd-mmm', 0x11 => 'mmm-yy', 0x12 => 'h:mm AM/PM', 0x13 => 'h:mm:ss AM/PM', 0x14 => 'h:mm', 0x15 => 'h:mm:ss', 0x16 => 'm-d-yy h:mm', #0x17-0x24 -- Differs in Natinal 0x25 => '(#,##0_);(#,##0)', 0x26 => '(#,##0_);[Red](#,##0)', 0x27 => '(#,##0.00);(#,##0.00)', 0x28 => '(#,##0.00);[Red](#,##0.00)', 0x29 => '_(*#,##0_);_(*(#,##0);_(*"-"_);_(@_)', 0x2A => '_($*#,##0_);_($*(#,##0);_(*"-"_);_(@_)', 0x2B => '_(*#,##0.00_);_(*(#,##0.00);_(*"-"??_);_(@_)', 0x2C => '_($*#,##0.00_);_($*(#,##0.00);_(*"-"??_);_(@_)', 0x2D => 'mm:ss', 0x2E => '[h]:mm:ss', 0x2F => 'mm:ss.0', 0x30 => '##0.0E+0', 0x31 => '@', ); #------------------------------------------------------------------------------ # new (for Spreadsheet::ParseExcel::FmtDefault) #------------------------------------------------------------------------------ sub new { my ( $sPkg, %hKey ) = @_; my $oThis = {}; bless $oThis; return $oThis; } #------------------------------------------------------------------------------ # TextFmt (for Spreadsheet::ParseExcel::FmtDefault) #------------------------------------------------------------------------------ sub TextFmt { my ( $oThis, $sTxt, $sCode ) = @_; return $sTxt if ( ( !defined($sCode) ) || ( $sCode eq '_native_' ) ); return pack( 'U*', unpack( 'n*', $sTxt ) ); } #------------------------------------------------------------------------------ # FmtStringDef (for Spreadsheet::ParseExcel::FmtDefault) #------------------------------------------------------------------------------ sub FmtStringDef { my ( $oThis, $iFmtIdx, $oBook, $rhFmt ) = @_; my $sFmtStr = $oBook->{FormatStr}->{$iFmtIdx}; if ( !( defined($sFmtStr) ) && defined($rhFmt) ) { $sFmtStr = $rhFmt->{$iFmtIdx}; } $sFmtStr = $hFmtDefault{$iFmtIdx} unless ($sFmtStr); return $sFmtStr; } #------------------------------------------------------------------------------ # FmtString (for Spreadsheet::ParseExcel::FmtDefault) #------------------------------------------------------------------------------ sub FmtString { my ( $oThis, $oCell, $oBook ) = @_; no warnings; my $sFmtStr = $oThis->FmtStringDef( $oBook->{Format}[ $oCell->{FormatNo} ]->{FmtIdx}, $oBook ); use warnings; # Special case for cells that use Lotus123 style leading # apostrophe to designate text formatting. no warnings; if ( $oBook->{Format}[ $oCell->{FormatNo} ]->{Key123} ) { $sFmtStr = '@'; } use warnings; unless ( defined($sFmtStr) ) { if ( $oCell->{Type} eq 'Numeric' ) { if ( int( $oCell->{Val} ) != $oCell->{Val} ) { $sFmtStr = '0.00'; } else { $sFmtStr = '0'; } } elsif ( $oCell->{Type} eq 'Date' ) { if ( int( $oCell->{Val} ) <= 0 ) { $sFmtStr = 'h:mm:ss'; } else { $sFmtStr = 'yyyy-mm-dd'; } } else { $sFmtStr = '@'; } } return $sFmtStr; } #------------------------------------------------------------------------------ # ValFmt (for Spreadsheet::ParseExcel::FmtDefault) #------------------------------------------------------------------------------ sub ValFmt { my ( $oThis, $oCell, $oBook ) = @_; my ( $Dt, $iFmtIdx, $iNumeric, $Flg1904 ); if ( $oCell->{Type} eq 'Text' ) { $Dt = ( ( defined $oCell->{Val} ) && ( $oCell->{Val} ne '' ) ) ? $oThis->TextFmt( $oCell->{Val}, $oCell->{Code} ) : ''; return $Dt; } else { $Dt = $oCell->{Val}; $Flg1904 = $oBook->{Flg1904}; my $sFmtStr = $oThis->FmtString( $oCell, $oBook ); return ExcelFmt( $sFmtStr, $Dt, $Flg1904, $oCell->{Type} ); } } #------------------------------------------------------------------------------ # ChkType (for Spreadsheet::ParseExcel::FmtDefault) #------------------------------------------------------------------------------ sub ChkType { my ( $oPkg, $iNumeric, $iFmtIdx ) = @_; if ($iNumeric) { if ( ( ( $iFmtIdx >= 0x0E ) && ( $iFmtIdx <= 0x16 ) ) || ( ( $iFmtIdx >= 0x2D ) && ( $iFmtIdx <= 0x2F ) ) ) { return "Date"; } else { return "Numeric"; } } else { return "Text"; } } 1; __END__ =pod =head1 NAME Spreadsheet::ParseExcel::FmtDefault - A class for Cell formats. =head1 SYNOPSIS See the documentation for Spreadsheet::ParseExcel. =head1 DESCRIPTION This module is used in conjunction with Spreadsheet::ParseExcel. See the documentation for Spreadsheet::ParseExcel. =head1 AUTHOR Current maintainer 0.60+: Douglas Wilson dougw@cpan.org Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori kwitknr@cpan.org =head1 COPYRIGHT Copyright (c) 2014 Douglas Wilson Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo Copyright (c) 2000-2006 Kawai Takanori All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =cut gdata/inst/perl/Spreadsheet/ParseExcel/Workbook.pm0000644000175100001440000001602613003720416021725 0ustar hornikuserspackage Spreadsheet::ParseExcel::Workbook; ############################################################################### # # Spreadsheet::ParseExcel::Workbook - A class for Workbooks. # # Used in conjunction with Spreadsheet::ParseExcel. # # Copyright (c) 2014 Douglas Wilson # Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # # perltidy with standard settings. # # Documentation after __END__ # use strict; use warnings; our $VERSION = '0.65'; ############################################################################### # # new() # # Constructor. # sub new { my ($class) = @_; my $self = {}; bless $self, $class; } ############################################################################### sub color_idx_to_rgb { my( $workbook, $iidx ) = @_; my $palette = $workbook->{aColor}; return ( ( defined $palette->[$iidx] ) ? $palette->[$iidx] : $palette->[0] ); } ############################################################################### # # worksheet() # # This method returns a single Worksheet object using either its name or index. # sub worksheet { my ( $oBook, $sName ) = @_; my $oWkS; foreach $oWkS ( @{ $oBook->{Worksheet} } ) { return $oWkS if ( $oWkS->{Name} eq $sName ); } if ( $sName =~ /^\d+$/ ) { return $oBook->{Worksheet}->[$sName]; } return undef; } ############################################################################### # # worksheets() # # Returns an array of Worksheet objects. # sub worksheets { my $self = shift; return @{ $self->{Worksheet} }; } ############################################################################### # # worksheet_count() # # Returns the number Woksheet objects in the Workbook. # sub worksheet_count { my $self = shift; return $self->{SheetCount}; } ############################################################################### # # get_filename() # # Returns the name of the Excel file of C if the data was read from a filehandle rather than a file. # sub get_filename { my $self = shift; return $self->{File}; } ############################################################################### # # get_print_areas() # # Returns an array ref of print areas. # # TODO. This should really be a Worksheet method. # sub get_print_areas { my $self = shift; return $self->{PrintArea}; } ############################################################################### # # get_print_titles() # # Returns an array ref of print title hash refs. # # TODO. This should really be a Worksheet method. # sub get_print_titles { my $self = shift; return $self->{PrintTitle}; } ############################################################################### # # using_1904_date() # # Returns true if the Excel file is using the 1904 date epoch. # sub using_1904_date { my $self = shift; return $self->{Flg1904}; } ############################################################################### # # ParseAbort() # # Todo # sub ParseAbort { my ( $self, $val ) = @_; $self->{_ParseAbort} = $val; } =head2 get_active_sheet() Return the number of the active (open) worksheet (at the time the workbook was saved. May return undef. =cut sub get_active_sheet { my $workbook = shift; return $workbook->{ActiveSheet}; } ############################################################################### # # Parse(). Deprecated. # # Syntactic wrapper around Spreadsheet::ParseExcel::Parse(). # This method is *deprecated* since it doesn't conform to the current # error handling in the S::PE Parse() method. # sub Parse { my ( $class, $source, $formatter ) = @_; my $excel = Spreadsheet::ParseExcel->new(); my $workbook = $excel->Parse( $source, $formatter ); $workbook->{_Excel} = $excel; return $workbook; } ############################################################################### # # Mapping between legacy method names and new names. # { no warnings; # Ignore warnings about variables used only once. *Worksheet = *worksheet; } 1; __END__ =pod =head1 NAME Spreadsheet::ParseExcel::Workbook - A class for Workbooks. =head1 SYNOPSIS See the documentation for Spreadsheet::ParseExcel. =head1 DESCRIPTION This module is used in conjunction with Spreadsheet::ParseExcel. See the documentation for L. =head1 Methods The following Workbook methods are available: $workbook->worksheets() $workbook->worksheet() $workbook->worksheet_count() $workbook->get_filename() $workbook->get_print_areas() $workbook->get_print_titles() $workbook->using_1904_date() =head2 worksheets() The C method returns an array of Worksheet objects. This was most commonly used to iterate over the worksheets in a workbook: for my $worksheet ( $workbook->worksheets() ) { ... } =head2 worksheet() The C method returns a single C object using either its name or index: $worksheet = $workbook->worksheet('Sheet1'); $worksheet = $workbook->worksheet(0); Returns C if the sheet name or index doesn't exist. =head2 worksheet_count() The C method returns the number of Woksheet objects in the Workbook. my $worksheet_count = $workbook->worksheet_count(); =head2 get_filename() The C method returns the name of the Excel file of C if the data was read from a filehandle rather than a file. my $filename = $workbook->get_filename(); =head2 get_print_areas() The C method returns an array ref of print areas. my $print_areas = $workbook->get_print_areas(); Each print area is as follows: [ $start_row, $start_col, $end_row, $end_col ] Returns undef if there are no print areas. =head2 get_print_titles() The C method returns an array ref of print title hash refs. my $print_titles = $workbook->get_print_titles(); Each print title array ref is as follows: { Row => [ $start_row, $end_row ], Column => [ $start_col, $end_col ], } Returns undef if there are no print titles. =head2 using_1904_date() The C method returns true if the Excel file is using the 1904 date epoch instead of the 1900 epoch. my $using_1904_date = $workbook->using_1904_date(); The Windows version of Excel generally uses the 1900 epoch while the Mac version of Excel generally uses the 1904 epoch. Returns 0 if the 1900 epoch is in use. =head1 AUTHOR Current maintainer 0.60+: Douglas Wilson dougw@cpan.org Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori kwitknr@cpan.org =head1 COPYRIGHT Copyright (c) 2014 Douglas Wilson Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo Copyright (c) 2000-2006 Kawai Takanori All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =cut gdata/inst/perl/Spreadsheet/ParseExcel/SaveParser.pm0000644000175100001440000001727513003720416022212 0ustar hornikuserspackage Spreadsheet::ParseExcel::SaveParser; ############################################################################### # # Spreadsheet::ParseExcel::SaveParser - Rewrite an existing Excel file. # # Used in conjunction with Spreadsheet::ParseExcel. # # Copyright (c) 2014 Douglas Wilson # Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # # perltidy with standard settings. # # Documentation after __END__ # use strict; use warnings; use Spreadsheet::ParseExcel; use Spreadsheet::ParseExcel::SaveParser::Workbook; use Spreadsheet::ParseExcel::SaveParser::Worksheet; use Spreadsheet::WriteExcel; use base 'Spreadsheet::ParseExcel'; our $VERSION = '0.65'; ############################################################################### # # new() # sub new { my ( $package, %params ) = @_; $package->SUPER::new(%params); } ############################################################################### # # Create() # sub Create { my ( $self, $formatter ) = @_; #0. New $workbook my $workbook = Spreadsheet::ParseExcel::Workbook->new(); $workbook->{SheetCount} = 0; # User specified formatter class. if ($formatter) { $workbook->{FmtClass} = $formatter; } else { $workbook->{FmtClass} = Spreadsheet::ParseExcel::FmtDefault->new(); } return Spreadsheet::ParseExcel::SaveParser::Workbook->new($workbook); } ############################################################################### # # Parse() # sub Parse { my ( $self, $sFile, $formatter ) = @_; my $workbook = $self->SUPER::Parse( $sFile, $formatter ); return undef unless defined $workbook; return Spreadsheet::ParseExcel::SaveParser::Workbook->new($workbook); } ############################################################################### # # SaveAs() # sub SaveAs { my ( $self, $workbook, $filename ) = @_; $workbook->SaveAs($filename); } 1; __END__ =head1 NAME Spreadsheet::ParseExcel::SaveParser - Rewrite an existing Excel file. =head1 SYNOPSIS Say we start with an Excel file that looks like this: ----------------------------------------------------- | | A | B | C | ----------------------------------------------------- | 1 | Hello | ... | ... | ... | 2 | World | ... | ... | ... | 3 | *Bold text* | ... | ... | ... | 4 | ... | ... | ... | ... | 5 | ... | ... | ... | ... Then we process it with the following program: #!/usr/bin/perl use strict; use warnings; use Spreadsheet::ParseExcel; use Spreadsheet::ParseExcel::SaveParser; # Open an existing file with SaveParser my $parser = Spreadsheet::ParseExcel::SaveParser->new(); my $template = $parser->Parse('template.xls'); # Get the first worksheet. my $worksheet = $template->worksheet(0); my $row = 0; my $col = 0; # Overwrite the string in cell A1 $worksheet->AddCell( $row, $col, 'New string' ); # Add a new string in cell B1 $worksheet->AddCell( $row, $col + 1, 'Newer' ); # Add a new string in cell C1 with the format from cell A3. my $cell = $worksheet->get_cell( $row + 2, $col ); my $format_number = $cell->{FormatNo}; $worksheet->AddCell( $row, $col + 2, 'Newest', $format_number ); # Write over the existing file or write a new file. $template->SaveAs('newfile.xls'); We should now have an Excel file that looks like this: ----------------------------------------------------- | | A | B | C | ----------------------------------------------------- | 1 | New string | Newer | *Newest* | ... | 2 | World | ... | ... | ... | 3 | *Bold text* | ... | ... | ... | 4 | ... | ... | ... | ... | 5 | ... | ... | ... | ... =head1 DESCRIPTION The C module rewrite an existing Excel file by reading it with C and rewriting it with C. =head1 METHODS =head1 Parser =head2 new() $parse = new Spreadsheet::ParseExcel::SaveParser(); Constructor. =head2 Parse() $workbook = $parse->Parse($sFileName); $workbook = $parse->Parse($sFileName , $formatter); Returns a L object. If an error occurs, returns undef. The optional C<$formatter> is a Formatter Class to format the value of cells. =head1 Workbook The C method returns a C object. This is a subclass of the L and has the following methods: =head2 worksheets() Returns an array of L objects. This was most commonly used to iterate over the worksheets in a workbook: for my $worksheet ( $workbook->worksheets() ) { ... } =head2 worksheet() The C method returns a single C object using either its name or index: $worksheet = $workbook->worksheet('Sheet1'); $worksheet = $workbook->worksheet(0); Returns C if the sheet name or index doesn't exist. =head2 AddWorksheet() $workbook = $workbook->AddWorksheet($name, %properties); Create a new Worksheet object of type C. The C<%properties> hash contains the properties of new Worksheet. =head2 AddFont $workbook = $workbook->AddFont(%properties); Create new Font object of type C. The C<%properties> hash contains the properties of new Font. =head2 AddFormat $workbook = $workbook->AddFormat(%properties); The C<%properties> hash contains the properties of new Font. =head1 Worksheet Spreadsheet::ParseExcel::SaveParser::Worksheet Worksheet is a subclass of Spreadsheet::ParseExcel::Worksheet. And has these methods : The C method returns a C object. This is a subclass of the L and has the following methods: =head1 AddCell $workbook = $worksheet->AddCell($row, $col, $value, $format [$encoding]); Create new Cell object of type C. The C<$format> parameter is the format number rather than a full format object. To specify just same as another cell, you can set it like below: $row = 0; $col = 0; $worksheet = $template->worksheet(0); $cell = $worksheet->get_cell( $row, $col ); $format_number = $cell->{FormatNo}; $worksheet->AddCell($row +1, $coll, 'New data', $format_number); =head1 TODO Please note that this module is currently (versions 0.50-0.60) undergoing a major restructuring and rewriting. =head1 Known Problems You can only rewrite the features that Spreadsheet::WriteExcel supports so macros, graphs and some other features in the original Excel file will be lost. Also, formulas aren't rewritten, only the result of a formula is written. Only last print area will remain. (Others will be removed) =head1 AUTHOR Current maintainer 0.60+: Douglas Wilson dougw@cpan.org Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori kwitknr@cpan.org =head1 COPYRIGHT Copyright (c) 2014 Douglas Wilson Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo Copyright (c) 2000-2002 Kawai Takanori and Nippon-RAD Co. OP Division All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =cut gdata/inst/perl/Spreadsheet/ParseExcel/Format.pm0000644000175100001440000000271513003720416021360 0ustar hornikuserspackage Spreadsheet::ParseExcel::Format; ############################################################################### # # Spreadsheet::ParseExcel::Format - A class for Cell formats. # # Used in conjunction with Spreadsheet::ParseExcel. # # Copyright (c) 2014 Douglas Wilson # Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # # perltidy with standard settings. # # Documentation after __END__ # use strict; use warnings; our $VERSION = '0.65'; sub new { my ( $class, %rhIni ) = @_; my $self = \%rhIni; bless $self, $class; } 1; __END__ =pod =head1 NAME Spreadsheet::ParseExcel::Format - A class for Cell formats. =head1 SYNOPSIS See the documentation for Spreadsheet::ParseExcel. =head1 DESCRIPTION This module is used in conjunction with Spreadsheet::ParseExcel. See the documentation for Spreadsheet::ParseExcel. =head1 AUTHOR Current maintainer 0.60+: Douglas Wilson dougw@cpan.org Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori kwitknr@cpan.org =head1 COPYRIGHT Copyright (c) 2014 Douglas Wilson Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo Copyright (c) 2000-2006 Kawai Takanori All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =cut gdata/inst/perl/Spreadsheet/ParseExcel/Utility.pm0000644000175100001440000015455713003720416021607 0ustar hornikuserspackage Spreadsheet::ParseExcel::Utility; ############################################################################### # # Spreadsheet::ParseExcel::Utility - Utility functions for ParseExcel. # # Used in conjunction with Spreadsheet::ParseExcel. # # Copyright (c) 2014 Douglas Wilson # Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # # perltidy with standard settings. # # Documentation after __END__ # use strict; use warnings; require Exporter; use vars qw(@ISA @EXPORT_OK); @ISA = qw(Exporter); @EXPORT_OK = qw(ExcelFmt LocaltimeExcel ExcelLocaltime col2int int2col sheetRef xls2csv); our $VERSION = '0.65'; my $qrNUMBER = qr/(^[+-]?\d+(\.\d+)?$)|(^[+-]?\d+\.?(\d*)[eE][+-](\d+))$/; ############################################################################### # # ExcelFmt() # # This function takes an Excel style number format and converts a number into # that format. for example: 'hh:mm:ss AM/PM' + 0.01023148 = '12:14:44 AM'. # # It does this with a type of templating mechanism. The format string is parsed # to identify tokens that need to be replaced and their position within the # string is recorded. These can be thought of as placeholders. The number is # then converted to the required formats and substituted into the placeholders. # # Interested parties should refer to the Excel documentation on cell formats for # more information: http://office.microsoft.com/en-us/excel/HP051995001033.aspx # The Microsoft documentation for the Excel Binary File Format, [MS-XLS].pdf, # also contains a ABNF grammar for number format strings. # # Maintainers notes: # ================== # # Note on format subsections: # A format string can contain 4 possible sub-sections separated by semi-colons: # Positive numbers, negative numbers, zero values, and text. # For example: _(* #,##0_);_(* (#,##0);_(* "-"_);_(@_) # # Note on conditional formats. # A number format in Excel can have a conditional expression such as: # [>9999999](000)000-0000;000-0000 # This is equivalent to the following in Perl: # $format = $number > 9999999 ? '(000)000-0000' : '000-0000'; # Nested conditionals are also possible but we don't support them. # # Efficiency: The excessive use of substr() isn't very efficient. However, # it probably doesn't merit rewriting this function with a parser or regular # expressions and \G. # # TODO: I think the single quote handling may not be required. Check. # sub ExcelFmt { my ( $format_str, $number, $is_1904, $number_type, $want_subformats ) = @_; # Return text strings without further formatting. return $number unless $number =~ $qrNUMBER; # Handle OpenOffice.org GENERAL format. $format_str = '@' if uc($format_str) eq "GENERAL"; # Check for a conditional at the start of the format. See notes above. my $conditional; if ( $format_str =~ /^\[([<>=][^\]]+)\](.*)$/ ) { $conditional = $1; $format_str = $2; } # Ignore the underscore token which is used to indicate a padding space. $format_str =~ s/_/ /g; # Split the format string into 4 possible sub-sections: positive numbers, # negative numbers, zero values, and text. See notes above. my @formats; my $section = 0; my $double_quote = 0; my $single_quote = 0; # Initial parsing of the format string to remove escape characters. This # also handles quoted strings. See note about single quotes above. CHARACTER: for my $char ( split //, $format_str ) { if ( $double_quote or $single_quote ) { $formats[$section] .= $char; $double_quote = 0 if $char eq '"'; $single_quote = 0; next CHARACTER; } if ( $char eq ';' ) { $section++; next CHARACTER; } elsif ( $char eq '"' ) { $double_quote = 1; } elsif ( $char eq '!' ) { $single_quote = 1; } elsif ( $char eq '\\' ) { $single_quote = 1; } elsif ( $char eq '(' ) { next CHARACTER; # Ignore. } elsif ( $char eq ')' ) { next CHARACTER; # Ignore. } # Convert upper case OpenOffice.org date/time formats to lowercase.. $char = lc($char) if $char =~ /[DMYHS]/; $formats[$section] .= $char; } # Select the appropriate format from the 4 possible sub-sections: # positive numbers, negative numbers, zero values, and text. # We ignore the Text section since non-numeric values are returned # unformatted at the start of the function. my $format; $section = 0; if ( @formats == 1 ) { $section = 0; } elsif ( @formats == 2 ) { if ( $number < 0 ) { $section = 1; } else { $section = 0; } } elsif ( @formats == 3 ) { if ( $number == 0 ) { $section = 2; } elsif ( $number < 0 ) { $section = 1; } else { $section = 0; } } else { $section = 0; } # Override the previous choice if the format is conditional. if ($conditional) { # TODO. Replace string eval with a function. $section = eval "$number $conditional" ? 0 : 1; } # We now have the required format. $format = $formats[$section]; # The format string can contain one of the following colours: # [Black] [Blue] [Cyan] [Green] [Magenta] [Red] [White] [Yellow] # or the string [ColorX] where x is a colour index from 1 to 56. # We don't use the colour but we return it to the caller. # my $color = ''; if ( $format =~ s/^(\[[A-Za-z]{3,}(\d{1,2})?\])// ) { $color = $1; } # Remove the locale, such as [$-409], from the format string. my $locale = ''; if ( $format =~ s/^(\[\$?-F?\d+\])// ) { $locale = $1; } # Replace currency locale, such as [$$-409], with $ in the format string. # See the RT#60547 test cases in 21_number_format_user.t. if ( $format =~ s/(\[\$([^-]+)(-\d+)?\])/$2/s ) { $locale = $1; } # Remove leading # from '# ?/?', '# ??/??' fraction formats. $format =~ s{# \?}{?}g; # Parse the format string and create an AoA of placeholders that contain # the parts of the string to be replaced. The format of the information # stored is: [ $token, $start_pos, $end_pos, $option_info ]. # my $format_mode = ''; # Either: '', 'number', 'date' my $pos = 0; # Character position within format string. my @placeholders = (); # Arefs with parts of the format to be replaced. my $token = ''; # The actual format extracted from the total str. my $start_pos; # A position variable. Initial parser position. my $token_start = -1; # A position variable. my $decimal_pos = -1; # Position of the punctuation char "." or ",". my $comma_count = 0; # Count of the commas in the format. my $is_fraction = 0; # Number format is a fraction. my $is_currency = 0; # Number format is a currency. my $is_percent = 0; # Number format is a percentage. my $is_12_hour = 0; # Time format is using 12 hour clock. my $seen_dot = 0; # Treat only the first "." as the decimal point. # Parse the format. PARSER: while ( $pos < length $format ) { $start_pos = $pos; my $char = substr( $format, $pos, 1 ); # Ignore control format characters such as '#0+-.?eE,%'. However, # only ignore '.' if it is the first one encountered. RT 45502. if ( ( !$seen_dot && $char !~ /[#0\+\-\.\?eE\,\%]/ ) || $char !~ /[#0\+\-\?eE\,\%]/ ) { if ( $token_start != -1 ) { push @placeholders, [ substr( $format, $token_start, $pos - $token_start ), $decimal_pos, $pos - $token_start ]; $token_start = -1; } } # Processing for quoted strings within the format. See notes above. if ( $char eq '"' ) { $double_quote = $double_quote ? 0 : 1; $pos++; next PARSER; } elsif ( $char eq '!' ) { $single_quote = 1; $pos++; next PARSER; } elsif ( $char eq '\\' ) { if ( $single_quote != 1 ) { $single_quote = 1; $pos++; next PARSER; } } if ( ( defined($double_quote) and ($double_quote) ) or ( defined($single_quote) and ($single_quote) ) or ( $seen_dot && $char eq '.' ) ) { $single_quote = 0; if ( ( $format_mode ne 'date' ) and ( ( substr( $format, $pos, 2 ) eq "\x81\xA2" ) || ( substr( $format, $pos, 2 ) eq "\x81\xA3" ) || ( substr( $format, $pos, 2 ) eq "\xA2\xA4" ) || ( substr( $format, $pos, 2 ) eq "\xA2\xA5" ) ) ) { # The above matches are currency symbols. push @placeholders, [ substr( $format, $pos, 2 ), length($token), 2 ]; $is_currency = 1; $pos += 2; } else { $pos++; } } elsif ( ( $char =~ /[#0\+\.\?eE\,\%]/ ) || ( ( $format_mode ne 'date' ) and ( ( $char eq '-' ) || ( $char eq '(' ) || ( $char eq ')' ) ) ) ) { $format_mode = 'number' unless $format_mode; if ( substr( $format, $pos, 1 ) =~ /[#0]/ ) { if ( substr( $format, $pos ) =~ /^([#0]+[\.]?[0#]*[eE][\+\-][0#]+)/ ) { push @placeholders, [ $1, $pos, length($1) ]; $pos += length($1); } else { if ( $token_start == -1 ) { $token_start = $pos; $decimal_pos = length($token); } } } elsif ( substr( $format, $pos, 1 ) eq '?' ) { # Look for a fraction format like ?/? or ??/?? if ( $token_start != -1 ) { push @placeholders, [ substr( $format, $token_start, $pos - $token_start + 1 ), $decimal_pos, $pos - $token_start + 1 ]; } $token_start = $pos; # Find the end of the fraction format. FRACTION: while ( $pos < length($format) ) { if ( substr( $format, $pos, 1 ) eq '/' ) { $is_fraction = 1; } elsif ( substr( $format, $pos, 1 ) eq '?' ) { $pos++; next FRACTION; } else { if ( $is_fraction && ( substr( $format, $pos, 1 ) =~ /[0-9]/ ) ) { # TODO: Could invert if() logic and remove this. $pos++; next FRACTION; } else { last FRACTION; } } $pos++; } $pos--; push @placeholders, [ substr( $format, $token_start, $pos - $token_start + 1 ), length($token), $pos - $token_start + 1 ]; $token_start = -1; } elsif ( substr( $format, $pos, 3 ) =~ /^[eE][\+\-][0#]$/ ) { if ( substr( $format, $pos ) =~ /([eE][\+\-][0#]+)/ ) { push @placeholders, [ $1, $pos, length($1) ]; $pos += length($1); } $token_start = -1; } else { if ( $token_start != -1 ) { push @placeholders, [ substr( $format, $token_start, $pos - $token_start ), $decimal_pos, $pos - $token_start ]; $token_start = -1; } if ( substr( $format, $pos, 1 ) =~ /[\+\-]/ ) { push @placeholders, [ substr( $format, $pos, 1 ), length($token), 1 ]; $is_currency = 1; } elsif ( substr( $format, $pos, 1 ) eq '.' ) { push @placeholders, [ substr( $format, $pos, 1 ), length($token), 1 ]; $seen_dot = 1; } elsif ( substr( $format, $pos, 1 ) eq ',' ) { $comma_count++; push @placeholders, [ substr( $format, $pos, 1 ), length($token), 1 ]; } elsif ( substr( $format, $pos, 1 ) eq '%' ) { $is_percent = 1; } elsif (( substr( $format, $pos, 1 ) eq '(' ) || ( substr( $format, $pos, 1 ) eq ')' ) ) { push @placeholders, [ substr( $format, $pos, 1 ), length($token), 1 ]; $is_currency = 1; } } $pos++; } elsif ( $char =~ /[ymdhsapg]/i ) { $format_mode = 'date' unless $format_mode; if ( substr( $format, $pos, 5 ) =~ /am\/pm/i ) { push @placeholders, [ 'am/pm', length($token), 5 ]; $is_12_hour = 1; $pos += 5; } elsif ( substr( $format, $pos, 3 ) =~ /a\/p/i ) { push @placeholders, [ 'a/p', length($token), 3 ]; $is_12_hour = 1; $pos += 3; } elsif ( substr( $format, $pos, 5 ) eq 'mmmmm' ) { push @placeholders, [ 'mmmmm', length($token), 5 ]; $pos += 5; } elsif (( substr( $format, $pos, 4 ) eq 'mmmm' ) || ( substr( $format, $pos, 4 ) eq 'dddd' ) || ( substr( $format, $pos, 4 ) eq 'yyyy' ) || ( substr( $format, $pos, 4 ) eq 'ggge' ) ) { push @placeholders, [ substr( $format, $pos, 4 ), length($token), 4 ]; $pos += 4; } elsif (( substr( $format, $pos, 3 ) eq 'ddd' ) || ( substr( $format, $pos, 3 ) eq 'mmm' ) || ( substr( $format, $pos, 3 ) eq 'yyy' ) ) { push @placeholders, [ substr( $format, $pos, 3 ), length($token), 3 ]; $pos += 3; } elsif (( substr( $format, $pos, 2 ) eq 'yy' ) || ( substr( $format, $pos, 2 ) eq 'mm' ) || ( substr( $format, $pos, 2 ) eq 'dd' ) || ( substr( $format, $pos, 2 ) eq 'hh' ) || ( substr( $format, $pos, 2 ) eq 'ss' ) || ( substr( $format, $pos, 2 ) eq 'ge' ) ) { if ( ( substr( $format, $pos, 2 ) eq 'mm' ) && (@placeholders) && ( ( $placeholders[-1]->[0] eq 'h' ) or ( $placeholders[-1]->[0] eq 'hh' ) ) ) { # For this case 'm' is minutes not months. push @placeholders, [ 'mm', length($token), 2, 'minutes' ]; } else { push @placeholders, [ substr( $format, $pos, 2 ), length($token), 2 ]; } if ( ( substr( $format, $pos, 2 ) eq 'ss' ) && ( @placeholders > 1 ) ) { if ( ( $placeholders[-2]->[0] eq 'm' ) || ( $placeholders[-2]->[0] eq 'mm' ) ) { # For this case 'm' is minutes not months. push( @{ $placeholders[-2] }, 'minutes' ); } } $pos += 2; } elsif (( substr( $format, $pos, 1 ) eq 'm' ) || ( substr( $format, $pos, 1 ) eq 'd' ) || ( substr( $format, $pos, 1 ) eq 'h' ) || ( substr( $format, $pos, 1 ) eq 's' ) ) { if ( ( substr( $format, $pos, 1 ) eq 'm' ) && (@placeholders) && ( ( $placeholders[-1]->[0] eq 'h' ) or ( $placeholders[-1]->[0] eq 'hh' ) ) ) { # For this case 'm' is minutes not months. push @placeholders, [ 'm', length($token), 1, 'minutes' ]; } else { push @placeholders, [ substr( $format, $pos, 1 ), length($token), 1 ]; } if ( ( substr( $format, $pos, 1 ) eq 's' ) && ( @placeholders > 1 ) ) { if ( ( $placeholders[-2]->[0] eq 'm' ) || ( $placeholders[-2]->[0] eq 'mm' ) ) { # For this case 'm' is minutes not months. push( @{ $placeholders[-2] }, 'minutes' ); } } $pos += 1; } } elsif ( ( substr( $format, $pos, 3 ) eq '[h]' ) ) { $format_mode = 'date' unless $format_mode; push @placeholders, [ '[h]', length($token), 3 ]; $pos += 3; } elsif ( ( substr( $format, $pos, 4 ) eq '[mm]' ) ) { $format_mode = 'date' unless $format_mode; push @placeholders, [ '[mm]', length($token), 4 ]; $pos += 4; } elsif ( $char eq '@' ) { push @placeholders, [ '@', length($token), 1 ]; $pos++; } elsif ( $char eq '*' ) { push @placeholders, [ substr( $format, $pos, 1 ), length($token), 1 ]; } else { $pos++; } $pos++ if ( $pos == $start_pos ); #No Format match $token .= substr( $format, $start_pos, $pos - $start_pos ); } # End of parsing. # Copy the located format string to a result string that we will perform # the substitutions on and return to the user. my $result = $token; # Add a placeholder between the decimal/comma and end of the token, if any. if ( $token_start != -1 ) { push @placeholders, [ substr( $format, $token_start, $pos - $token_start + 1 ), $decimal_pos, $pos - $token_start + 1 ]; } # # In the next sections we process date, number and text formats. We take a # format such as yyyy/mm/dd and replace it with something like 2008/12/25. # if ( ( $format_mode eq 'date' ) && ( $number =~ $qrNUMBER ) ) { # The maximum allowable date in Excel is 9999-12-31T23:59:59.000 which # equates to 2958465.999+ in the 1900 epoch and 2957003.999+ in the # 1904 epoch. We use 0 as the minimum in both epochs. The 1904 system # actually supports negative numbers but that isn't worth the effort. my $min_date = 0; my $max_date = 2958466; $max_date = 2957004 if $is_1904; if ( $number < $min_date || $number >= $max_date ) { return $number; # Return unformatted number. } # Process date formats. my @time = ExcelLocaltime( $number, $is_1904 ); # 0 1 2 3 4 5 6 7 my ( $sec, $min, $hour, $day, $month, $year, $wday, $msec ) = @time; $month++; # localtime() zero indexed month. $year += 1900; # localtime() year. my @full_month_name = qw( None January February March April May June July August September October November December ); my @short_month_name = qw( None Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec ); my @full_day_name = qw( Sunday Monday Tuesday Wednesday Thursday Friday Saturday ); my @short_day_name = qw( Sun Mon Tue Wed Thu Fri Sat ); # Replace the placeholders in the template such as yyyy mm dd with # actual numbers or strings. my $replacement; for my $placeholder ( reverse @placeholders ) { if ( $placeholder->[-1] eq 'minutes' ) { # For this case 'm/mm' is minutes not months. if ( $placeholder->[0] eq 'mm' ) { $replacement = sprintf( "%02d", $min ); } else { $replacement = sprintf( "%d", $min ); } } elsif ( $placeholder->[0] eq 'yyyy' ) { # 4 digit Year. 2000 -> 2000. $replacement = sprintf( '%04d', $year ); } elsif ( $placeholder->[0] eq 'yy' ) { # 2 digit Year. 2000 -> 00. $replacement = sprintf( '%02d', $year % 100 ); } elsif ( $placeholder->[0] eq 'mmmmm' ) { # First character of the month name. 1 -> J. $replacement = substr( $short_month_name[$month], 0, 1 ); } elsif ( $placeholder->[0] eq 'mmmm' ) { # Full month name. 1 -> January. $replacement = $full_month_name[$month]; } elsif ( $placeholder->[0] eq 'mmm' ) { # Short month name. 1 -> Jan. $replacement = $short_month_name[$month]; } elsif ( $placeholder->[0] eq 'mm' ) { # 2 digit month. 1 -> 01. $replacement = sprintf( '%02d', $month ); } elsif ( $placeholder->[0] eq 'm' ) { # 1 digit month. 1 -> 1. $replacement = sprintf( '%d', $month ); } elsif ( $placeholder->[0] eq 'dddd' ) { # Full day name. Wednesday (for example.) $replacement = $full_day_name[$wday]; } elsif ( $placeholder->[0] eq 'ddd' ) { # Short day name. Wed (for example.) $replacement = $short_day_name[$wday]; } elsif ( $placeholder->[0] eq 'dd' ) { # 2 digit day. 1 -> 01. $replacement = sprintf( '%02d', $day ); } elsif ( $placeholder->[0] eq 'd' ) { # 1 digit day. 1 -> 1. $replacement = sprintf( '%d', $day ); } elsif ( $placeholder->[0] eq 'hh' ) { # 2 digit hour. if ($is_12_hour) { my $hour_tmp = $hour % 12; $hour_tmp = 12 if $hour % 12 == 0; $replacement = sprintf( '%d', $hour_tmp ); } else { $replacement = sprintf( '%02d', $hour ); } } elsif ( $placeholder->[0] eq 'h' ) { # 1 digit hour. if ($is_12_hour) { my $hour_tmp = $hour % 12; $hour_tmp = 12 if $hour % 12 == 0; $replacement = sprintf( '%2d', $hour_tmp ); } else { $replacement = sprintf( '%d', $hour ); } } elsif ( $placeholder->[0] eq 'ss' ) { # 2 digit seconds. $replacement = sprintf( '%02d', $sec ); } elsif ( $placeholder->[0] eq 's' ) { # 1 digit seconds. $replacement = sprintf( '%d', $sec ); } elsif ( $placeholder->[0] eq 'am/pm' ) { # AM/PM. $replacement = ( $hour >= 12 ) ? 'PM' : 'AM'; } elsif ( $placeholder->[0] eq 'a/p' ) { # AM/PM. $replacement = ( $hour >= 12 ) ? 'P' : 'A'; } elsif ( $placeholder->[0] eq '.' ) { # Decimal point for seconds. $replacement = '.'; } elsif ( $placeholder->[0] =~ /(^0+$)/ ) { # Milliseconds. For example h:ss.000. my $length = length($1); $replacement = substr( sprintf( "%.${length}f", $msec / 1000 ), 2, $length ); } elsif ( $placeholder->[0] eq '[h]' ) { # Hours modulus 24. 25 displays as 25 not as 1. $replacement = sprintf( '%d', int($number) * 24 + $hour ); } elsif ( $placeholder->[0] eq '[mm]' ) { # Mins modulus 60. 72 displays as 72 not as 12. $replacement = sprintf( '%d', ( int($number) * 24 + $hour ) * 60 + $min ); } elsif ( $placeholder->[0] eq 'ge' ) { require Spreadsheet::ParseExcel::FmtJapan; # Japanese Nengo (aka Gengo) in initialism (abbr. name) $replacement = Spreadsheet::ParseExcel::FmtJapan::CnvNengo( abbr_name => @time ); } elsif ( $placeholder->[0] eq 'ggge' ) { require Spreadsheet::ParseExcel::FmtJapan; # Japanese Nengo (aka Gengo) in Kanji (full name) $replacement = Spreadsheet::ParseExcel::FmtJapan::CnvNengo( name => @time ); } elsif ( $placeholder->[0] eq '@' ) { # Text format. $replacement = $number; } elsif ( $placeholder->[0] eq ',' ) { next; } # Substitute the replacement string back into the template. substr( $result, $placeholder->[1], $placeholder->[2], $replacement ); } } elsif ( ( $format_mode eq 'number' ) && ( $number =~ $qrNUMBER ) ) { # Process non date formats. if (@placeholders) { while ( $placeholders[-1]->[0] eq ',' ) { $comma_count--; substr( $result, $placeholders[-1]->[1], $placeholders[-1]->[2], '' ); $number /= 1000; pop @placeholders; } my $number_format = join( '', map { $_->[0] } @placeholders ); my $number_result; my $str_length = 0; my $engineering = 0; my $is_decimal = 0; my $is_integer = 0; my $after_decimal = undef; for my $token ( split //, $number_format ) { if ( $token eq '.' ) { $str_length++; $is_decimal = 1; } elsif ( ( $token eq 'E' ) || ( $token eq 'e' ) ) { $engineering = 1; } elsif ( $token eq '0' ) { $str_length++; $after_decimal++ if $is_decimal; $is_integer = 1; } elsif ( $token eq '#' ) { $after_decimal++ if $is_decimal; $is_integer = 1; } elsif ( $token eq '?' ) { $after_decimal++ if $is_decimal; } } $number *= 100.0 if $is_percent; my $data = ($is_currency) ? abs($number) : $number + 0; if ($is_fraction) { $number_result = sprintf( "%0${str_length}d", int($data) ); } else { if ($is_decimal) { if ( defined $after_decimal ) { $number_result = sprintf "%0${str_length}.${after_decimal}f", $data; } else { $number_result = sprintf "%0${str_length}f", $data; } # Fix for Perl and sprintf not rounding up like Excel. # http://rt.cpan.org/Public/Bug/Display.html?id=45626 if ( $data =~ /^${number_result}5/ ) { $number_result = sprintf "%0${str_length}.${after_decimal}f", $data . '1'; } } else { $number_result = sprintf( "%0${str_length}.0f", $data ); } } $number_result = AddComma($number_result) if $comma_count > 0; my $number_length = length($number_result); my $decimal_pos = -1; my $replacement; for ( my $i = @placeholders - 1 ; $i >= 0 ; $i-- ) { my $placeholder = $placeholders[$i]; if ( $placeholder->[0] =~ /([#0]*)([\.]?)([0#]*)([eE])([\+\-])([0#]+)/ ) { substr( $result, $placeholder->[1], $placeholder->[2], MakeE( $placeholder->[0], $number ) ); } elsif ( $placeholder->[0] =~ /\// ) { substr( $result, $placeholder->[1], $placeholder->[2], MakeFraction( $placeholder->[0], $number, $is_integer ) ); } elsif ( $placeholder->[0] eq '.' ) { $number_length--; $decimal_pos = $number_length; } elsif ( $placeholder->[0] eq '+' ) { substr( $result, $placeholder->[1], $placeholder->[2], ( $number > 0 ) ? '+' : ( ( $number == 0 ) ? '+' : '-' ) ); } elsif ( $placeholder->[0] eq '-' ) { substr( $result, $placeholder->[1], $placeholder->[2], ( $number > 0 ) ? '' : ( ( $number == 0 ) ? '' : '-' ) ); } elsif ( $placeholder->[0] eq '@' ) { substr( $result, $placeholder->[1], $placeholder->[2], $number ); } elsif ( $placeholder->[0] eq '*' ) { substr( $result, $placeholder->[1], $placeholder->[2], '' ); } elsif (( $placeholder->[0] eq "\xA2\xA4" ) or ( $placeholder->[0] eq "\xA2\xA5" ) or ( $placeholder->[0] eq "\x81\xA2" ) or ( $placeholder->[0] eq "\x81\xA3" ) ) { substr( $result, $placeholder->[1], $placeholder->[2], $placeholder->[0] ); } elsif (( $placeholder->[0] eq '(' ) or ( $placeholder->[0] eq ')' ) ) { substr( $result, $placeholder->[1], $placeholder->[2], $placeholder->[0] ); } else { if ( $number_length > 0 ) { if ( $i <= 0 ) { $replacement = substr( $number_result, 0, $number_length ); $number_length = 0; } else { my $real_part_length = length( $placeholder->[0] ); if ( $decimal_pos >= 0 ) { my $format = $placeholder->[0]; $format =~ s/^#+//; $real_part_length = length $format; $real_part_length = ( $number_length <= $real_part_length ) ? $number_length : $real_part_length; } else { $real_part_length = ( $number_length <= $real_part_length ) ? $number_length : $real_part_length; } $replacement = substr( $number_result, $number_length - $real_part_length, $real_part_length ); $number_length -= $real_part_length; } } else { $replacement = ''; } substr( $result, $placeholder->[1], $placeholder->[2], "\x00" . $replacement ); } } $replacement = ( $number_length > 0 ) ? substr( $number_result, 0, $number_length ) : ''; $result =~ s/\x00/$replacement/; $result =~ s/\x00//g; } } else { # Process text formats my $is_text = 0; for ( my $i = @placeholders - 1 ; $i >= 0 ; $i-- ) { my $placeholder = $placeholders[$i]; if ( $placeholder->[0] eq '@' ) { substr( $result, $placeholder->[1], $placeholder->[2], $number ); $is_text++; } else { substr( $result, $placeholder->[1], $placeholder->[2], '' ); } } $result = $number unless $is_text; } # End of placeholder substitutions. # Trim the leading and trailing whitespace from the results. $result =~ s/^\s+//; $result =~ s/\s+$//; # Fix for negative currency. $result =~ s/^\$\-/\-\$/; $result =~ s/^\$ \-/\-\$ /; # Return color and locale strings if required. if ($want_subformats) { return ( $result, $color, $locale ); } else { return $result; } } #------------------------------------------------------------------------------ # AddComma (for Spreadsheet::ParseExcel::Utility) #------------------------------------------------------------------------------ sub AddComma { my ($sNum) = @_; if ( $sNum =~ /^([^\d]*)(\d\d\d\d+)(\.*.*)$/ ) { my ( $sPre, $sObj, $sAft ) = ( $1, $2, $3 ); for ( my $i = length($sObj) - 3 ; $i > 0 ; $i -= 3 ) { substr( $sObj, $i, 0, ',' ); } return $sPre . $sObj . $sAft; } else { return $sNum; } } #------------------------------------------------------------------------------ # MakeFraction (for Spreadsheet::ParseExcel::Utility) #------------------------------------------------------------------------------ sub MakeFraction { my ( $sFmt, $iData, $iFlg ) = @_; my $iBunbo; my $iShou; #1. Init # print "FLG: $iFlg\n"; if ($iFlg) { $iShou = $iData - int($iData); return '' if ( $iShou == 0 ); } else { $iShou = $iData; } $iShou = abs($iShou); my $sSWk; #2.Calc BUNBO #2.1 BUNBO defined if ( $sFmt =~ /\/(\d+)$/ ) { $iBunbo = $1; return sprintf( "%d/%d", $iShou * $iBunbo, $iBunbo ); } else { #2.2 Calc BUNBO $sFmt =~ /\/(\?+)$/; my $iKeta = length($1); my $iSWk = 1; my $sSWk = ''; my $iBunsi; for ( my $iBunbo = 2 ; $iBunbo < 10**$iKeta ; $iBunbo++ ) { $iBunsi = int( $iShou * $iBunbo + 0.5 ); my $iCmp = abs( $iShou - ( $iBunsi / $iBunbo ) ); if ( $iCmp < $iSWk ) { $iSWk = $iCmp; $sSWk = sprintf( "%d/%d", $iBunsi, $iBunbo ); last if ( $iSWk == 0 ); } } return $sSWk; } } #------------------------------------------------------------------------------ # MakeE (for Spreadsheet::ParseExcel::Utility) #------------------------------------------------------------------------------ sub MakeE { my ( $sFmt, $iData ) = @_; $sFmt =~ /(([#0]*)[\.]?[#0]*)([eE])([\+\-][0#]+)/; my ( $sKari, $iKeta, $sE, $sSisu ) = ( $1, length($2), $3, $4 ); $iKeta = 1 if ( $iKeta <= 0 ); my $iLog10 = 0; $iLog10 = ( $iData == 0 ) ? 0 : ( log( abs($iData) ) / log(10) ); $iLog10 = ( int( $iLog10 / $iKeta ) + ( ( ( $iLog10 - int( $iLog10 / $iKeta ) ) < 0 ) ? -1 : 0 ) ) * $iKeta; my $sUe = ExcelFmt( $sKari, $iData * ( 10**( $iLog10 * -1 ) ), 0 ); my $sShita = ExcelFmt( $sSisu, $iLog10, 0 ); return $sUe . $sE . $sShita; } #------------------------------------------------------------------------------ # LeapYear (for Spreadsheet::ParseExcel::Utility) #------------------------------------------------------------------------------ sub LeapYear { my ($iYear) = @_; return 1 if ( $iYear == 1900 ); #Special for Excel return ( ( ( $iYear % 4 ) == 0 ) && ( ( $iYear % 100 ) || ( $iYear % 400 ) == 0 ) ) ? 1 : 0; } #------------------------------------------------------------------------------ # LocaltimeExcel (for Spreadsheet::ParseExcel::Utility) #------------------------------------------------------------------------------ sub LocaltimeExcel { my ( $iSec, $iMin, $iHour, $iDay, $iMon, $iYear, $iwDay, $iMSec, $flg1904 ) = @_; #0. Init $iMon++; $iYear += 1900; #1. Calc Time my $iTime; $iTime = $iHour; $iTime *= 60; $iTime += $iMin; $iTime *= 60; $iTime += $iSec; $iTime += $iMSec / 1000.0 if ( defined($iMSec) ); $iTime /= 86400.0; #3600*24(1day in seconds) my $iY; my $iYDays; #2. Calc Days if ($flg1904) { $iY = 1904; $iTime--; #Start from Jan 1st $iYDays = 366; } else { $iY = 1900; $iYDays = 366; #In Excel 1900 is leap year (That's not TRUE!) } while ( $iY < $iYear ) { $iTime += $iYDays; $iY++; $iYDays = ( LeapYear($iY) ) ? 366 : 365; } for ( my $iM = 1 ; $iM < $iMon ; $iM++ ) { if ( $iM == 1 || $iM == 3 || $iM == 5 || $iM == 7 || $iM == 8 || $iM == 10 || $iM == 12 ) { $iTime += 31; } elsif ( $iM == 4 || $iM == 6 || $iM == 9 || $iM == 11 ) { $iTime += 30; } elsif ( $iM == 2 ) { $iTime += ( LeapYear($iYear) ) ? 29 : 28; } } $iTime += $iDay; return $iTime; } my @month_days = qw( 0 31 28 31 30 31 30 31 31 30 31 30 31 ); #------------------------------------------------------------------------------ # ExcelLocaltime (for Spreadsheet::ParseExcel::Utility) #------------------------------------------------------------------------------ sub ExcelLocaltime { my ( $dObj, $flg1904 ) = @_; my ( $iSec, $iMin, $iHour, $iDay, $iMon, $iYear, $iwDay, $iMSec ); my ( $iDt, $iTime, $iYDays, $iMD ); $iDt = int($dObj); $iTime = $dObj - $iDt; #1. Calc Days if ($flg1904) { $iYear = 1904; $iDt++; #Start from Jan 1st $iYDays = 366; $iwDay = ( ( $iDt + 4 ) % 7 ); } else { $iYear = 1900; $iYDays = 366; #In Excel 1900 is leap year (That's not TRUE!) $iwDay = ( ( $iDt + 6 ) % 7 ); } while ( $iDt > $iYDays ) { $iDt -= $iYDays; $iYear++; $iYDays = ( ( ( $iYear % 4 ) == 0 ) && ( ( $iYear % 100 ) || ( $iYear % 400 ) == 0 ) ) ? 366 : 365; } $iYear -= 1900; # Localtime year is relative to 1900. for ( $iMon = 1 ; $iMon <= 12 ; $iMon++ ) { $iMD = $month_days[$iMon]; $iMD++ if $iMon == 2 and $iYear % 4 == 0; last if ( $iDt <= $iMD ); $iDt -= $iMD; } #2. Calc Time $iDay = $iDt; $iTime += ( 0.0005 / 86400.0 ); if ($iTime >= 1.0) { $iTime -= int($iTime); $iwDay = ($iwDay == 6) ? 0 : $iwDay + 1; if ($iDay == $iMD) { if ($iMon == 12) { $iMon = 1; $iYear++; } else { $iMon++; } $iDay = 1; } else { $iDay++; } } # Localtime month is 0 based. $iMon -= 1; $iTime *= 24.0; $iHour = int($iTime); $iTime -= $iHour; $iTime *= 60.0; $iMin = int($iTime); $iTime -= $iMin; $iTime *= 60.0; $iSec = int($iTime); $iTime -= $iSec; $iTime *= 1000.0; $iMSec = int($iTime); return ( $iSec, $iMin, $iHour, $iDay, $iMon, $iYear, $iwDay, $iMSec ); } # ----------------------------------------------------------------------------- # col2int (for Spreadsheet::ParseExcel::Utility) #------------------------------------------------------------------------------ # converts a excel row letter into an int for use in an array sub col2int { my $result = 0; my $str = shift; my $incr = 0; for ( my $i = length($str) ; $i > 0 ; $i-- ) { my $char = substr( $str, $i - 1 ); my $curr += ord( lc($char) ) - ord('a') + 1; $curr *= $incr if ($incr); $result += $curr; $incr += 26; } # this is one out as we range 0..x-1 not 1..x $result--; return $result; } # ----------------------------------------------------------------------------- # int2col (for Spreadsheet::ParseExcel::Utility) #------------------------------------------------------------------------------ ### int2col # convert a column number into column letters # @note this is quite a brute force coarse method # does not manage values over 701 (ZZ) # @arg number, to convert # @returns string, column name # sub int2col { my $out = ""; my $val = shift; do { $out .= chr( ( $val % 26 ) + ord('A') ); $val = int( $val / 26 ) - 1; } while ( $val >= 0 ); return scalar reverse $out; } # ----------------------------------------------------------------------------- # sheetRef (for Spreadsheet::ParseExcel::Utility) #------------------------------------------------------------------------------ # ----------------------------------------------------------------------------- ### sheetRef # convert an excel letter-number address into a useful array address # @note that also Excel uses X-Y notation, we normally use Y-X in arrays # @args $str, excel coord eg. A2 # @returns an array - 2 elements - column, row, or undefined # sub sheetRef { my $str = shift; my @ret; $str =~ m/^(\D+)(\d+)$/; if ( $1 && $2 ) { push( @ret, $2 - 1, col2int($1) ); } if ( $ret[0] < 0 ) { undef @ret; } return @ret; } # ----------------------------------------------------------------------------- # xls2csv (for Spreadsheet::ParseExcel::Utility) #------------------------------------------------------------------------------ ### xls2csv # convert a chunk of an excel file into csv text chunk # @args $param, sheet-colrow:colrow (1-A1:B2 or A1:B2 for sheet 1 # @args $rotate, 0 or 1 decides if output should be rotated or not # @returns string containing a chunk of csv # sub xls2csv { my ( $filename, $regions, $rotate ) = @_; my $sheet = 0; # We need Text::CSV_XS for proper CSV handling. require Text::CSV_XS; # extract any sheet number from the region string $regions =~ m/^(\d+)-(.*)/; if ($2) { $sheet = $1 - 1; $regions = $2; } # now extract the start and end regions $regions =~ m/(.*):(.*)/; if ( !$1 || !$2 ) { print STDERR "Bad Params"; return ""; } my @start = sheetRef($1); my @end = sheetRef($2); if ( !@start ) { print STDERR "Bad coorinates - $1"; return ""; } if ( !@end ) { print STDERR "Bad coorinates - $2"; return ""; } if ( $start[1] > $end[1] ) { print STDERR "Bad COLUMN ordering\n"; print STDERR "Start column " . int2col( $start[1] ); print STDERR " after end column " . int2col( $end[1] ) . "\n"; return ""; } if ( $start[0] > $end[0] ) { print STDERR "Bad ROW ordering\n"; print STDERR "Start row " . ( $start[0] + 1 ); print STDERR " after end row " . ( $end[0] + 1 ) . "\n"; exit; } # start the excel object now my $oExcel = new Spreadsheet::ParseExcel; my $oBook = $oExcel->Parse($filename); # open the sheet my $oWkS = $oBook->{Worksheet}[$sheet]; # now check that the region exists in the file # if not truncate to the possible region # output a warning msg if ( $start[1] < $oWkS->{MinCol} ) { print STDERR int2col( $start[1] ) . " < min col " . int2col( $oWkS->{MinCol} ) . " Resetting\n"; $start[1] = $oWkS->{MinCol}; } if ( $end[1] > $oWkS->{MaxCol} ) { print STDERR int2col( $end[1] ) . " > max col " . int2col( $oWkS->{MaxCol} ) . " Resetting\n"; $end[1] = $oWkS->{MaxCol}; } if ( $start[0] < $oWkS->{MinRow} ) { print STDERR "" . ( $start[0] + 1 ) . " < min row " . ( $oWkS->{MinRow} + 1 ) . " Resetting\n"; $start[0] = $oWkS->{MinCol}; } if ( $end[0] > $oWkS->{MaxRow} ) { print STDERR "" . ( $end[0] + 1 ) . " > max row " . ( $oWkS->{MaxRow} + 1 ) . " Resetting\n"; $end[0] = $oWkS->{MaxRow}; } my $x1 = $start[1]; my $y1 = $start[0]; my $x2 = $end[1]; my $y2 = $end[0]; my @cell_data; my $row = 0; if ( !$rotate ) { for ( my $y = $y1 ; $y <= $y2 ; $y++ ) { for ( my $x = $x1 ; $x <= $x2 ; $x++ ) { my $cell = $oWkS->{Cells}[$y][$x]; my $value; if ( defined $cell ) { $value .= $cell->value(); } else { $value = ''; } push @{ $cell_data[$row] }, $value; } $row++; } } else { for ( my $x = $x1 ; $x <= $x2 ; $x++ ) { for ( my $y = $y1 ; $y <= $y2 ; $y++ ) { my $cell = $oWkS->{Cells}[$y][$x]; my $value; if ( defined $cell ) { $value .= $cell->value(); } else { $value = ''; } push @{ $cell_data[$row] }, $value; } $row++; } } # Create the CSV output string. my $csv = Text::CSV_XS->new( { binary => 1, eol => $/ } ); my $output = ""; for my $row (@cell_data) { $csv->combine(@$row); $output .= $csv->string(); } return $output; } 1; __END__ =pod =head1 NAME Spreadsheet::ParseExcel::Utility - Utility functions for Spreadsheet::ParseExcel. =head1 SYNOPSIS use Spreadsheet::ParseExcel::Utility qw(ExcelFmt ExcelLocaltime LocaltimeExcel); # Convert localtime to Excel time my $datetime = LocaltimeExcel(11, 10, 12, 23, 2, 64); # 1964-3-23 12:10:11 print $datetime, "\n"; # 23459.5070717593 (Excel date/time format) # Convert Excel Time to localtime my @time = ExcelLocaltime($datetime); print join(":", @time), "\n"; # 11:10:12:23:2:64:1:0 # Formatting print ExcelFmt('yyyy-mm-dd', $datetime), "\n"; # 1964-3-23 print ExcelFmt('m-d-yy', $datetime), "\n"; # 3-23-64 print ExcelFmt('#,##0', $datetime), "\n"; # 23,460 print ExcelFmt('#,##0.00', $datetime), "\n"; # 23,459.51 =head1 DESCRIPTION The C module provides utility functions for working with ParseExcel and Excel data. =head1 Functions C can export the following functions: ExcelFmt ExcelLocaltime LocaltimeExcel col2int int2col sheetRef xls2csv These functions must be imported implicitly: # Just one function. use Spreadsheet::ParseExcel::Utility 'col2int'; # More than one. use Spreadsheet::ParseExcel::Utility qw(ExcelFmt ExcelLocaltime LocaltimeExcel); =head2 ExcelFmt($format_string, $number, $is_1904) Excel stores data such as dates and currency values as numbers. The way these numbers are displayed is controlled by the number format string for the cell. For example a cell with a number format of C<'$#,##0.00'> for currency and a value of 1234.567 would be displayed as follows: '$#,##0.00' + 1234.567 = '$1,234.57'. The C function tries to emulate this formatting so that the user can convert raw numbers returned by C to a desired format. For example: print ExcelFmt('$#,##0.00', 1234.567); # $1,234.57. The syntax of the function is: my $text = ExcelFmt($format_string, $number, $is_1904); Where C<$format_string> is an Excel number format string, C<$number> is a real or integer number and C is an optional flag to indicate that dates should use Excel's 1904 epoch instead of the default 1900 epoch. C is also used internally to convert numbers returned by the C method to the formatted value returned by the C method: my $cell = $worksheet->get_cell( 0, 0 ); print $cell->unformatted(), "\n"; # 1234.567 print $cell->value(), "\n"; # $1,234.57 The most common usage for C is to convert numbers to dates. Dates and times in Excel are represented by real numbers, for example "1 Jan 2001 12:30 PM" is represented by the number 36892.521. The integer part of the number stores the number of days since the epoch and the fractional part stores the percentage of the day. By applying an Excel number format the number is converted to the desired string representation: print ExcelFmt('d mmm yyyy h:mm AM/PM', 36892.521); # 1 Jan 2001 12:30 PM C<$is_1904> is an optional flag to indicate that dates should use Excel's 1904 epoch instead of the default 1900 epoch. Excel for Windows generally uses 1900 and Excel for Mac OS uses 1904. The C<$is1904> flag isn't required very often by a casual user and can usually be ignored. =head2 ExcelLocaltime($excel_datetime, $is_1904) The C function converts from an Excel date/time number to a C-like array of values: my @time = ExcelLocaltime($excel_datetime); # 0 1 2 3 4 5 6 7 my ( $sec, $min, $hour, $day, $month, $year, $wday, $msec ) = @time; The array elements from C<(0 .. 6)> are the same as Perl's C. The last element C<$msec> is milliseconds. In particular it should be noted that, in common with C, the month is zero indexed and the year is the number of years since 1900. This means that you will usually need to do the following: $month++; $year += 1900; See also Perl's documentation for L: The C<$is_1904> flag is an optional. It is used to indicate that dates should use Excel's 1904 epoch instead of the default 1900 epoch. =head2 LocaltimeExcel($sec, $min, $hour, $day, $month, $year, $wday, $msec, $is_1904) The C function converts from a C-like array of values to an Excel date/time number: $excel_datetime = LocaltimeExcel($sec, $min, $hour, $day, $month, $year, $wday, $msec); The array elements from C<(0 .. 6)> are the same as Perl's C. The last element C<$msec> is milliseconds. In particular it should be noted that, in common with C, the month is zero indexed and the year is the number of years since 1900. See also Perl's documentation for L: The C<$wday> and C<$msec> elements are usually optional. This time elements can also be zeroed if they aren't of interest: # sec, min, hour, day, month, year $excel_datetime = LocaltimeExcel( 0, 0, 0, 1, 0, 101 ); print ExcelFmt('d mmm yyyy', $excel_datetime); # 1 Jan 2001 The C<$is_1904> flag is also optional. It is used to indicate that dates should use Excel's 1904 epoch instead of the default 1900 epoch. =head2 col2int($column) The C function converts an Excel column letter to an zero-indexed column number: print col2int('A'); # 0 print col2int('AA'); # 26 This function was contributed by Kevin Mulholland. =head2 int2col($column_number) The C function converts an zero-indexed Excel column number to a column letter: print int2col(0); # 'A' print int2col(26); # 'AA' This function was contributed by Kevin Mulholland. =head2 sheetRef($cell_string) The C function converts an Excel cell reference in 'A1' notation to a zero-indexed C<(row, col)> pair. my ($row, $col) = sheetRef('A1'); # ( 0, 0 ) my ($row, $col) = sheetRef('C2'); # ( 1, 2 ) This function was contributed by Kevin Mulholland. =head2 xls2csv($filename, $region, $rotate) The C function converts a section of an Excel file into a CSV text string. $csv_text = xls2csv($filename, $region, $rotate); Where: $region = "sheet-colrow:colrow" For example '1-A1:B2' means 'A1:B2' for sheet 1. and $rotate = 0 or 1 (output is rotated/transposed or not) This function requires C to be installed. It was contributed by Kevin Mulholland along with the C script in the C directory of the distro. See also the following xls2csv utilities: Ken Prows' C: http://search.cpan.org/~ken/xls2csv/script/xls2csv and H.Merijn Brand's C (which is part of Spreadsheet::Read): http://search.cpan.org/~hmbrand/Spreadsheet-Read/ =head1 AUTHOR Current maintainer 0.60+: Douglas Wilson dougw@cpan.org Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori kwitknr@cpan.org =head1 COPYRIGHT Copyright (c) 2014 Douglas Wilson Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo Copyright (c) 2000-2006 Kawai Takanori All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =cut gdata/inst/perl/Spreadsheet/ParseExcel/Dump.pm0000644000175100001440000001756713003720416021050 0ustar hornikuserspackage Spreadsheet::ParseExcel::Dump; ############################################################################### # # Spreadsheet::ParseExcel::Dump - A class for dumping Excel records. # # Used in conjunction with Spreadsheet::ParseExcel. # # Copyright (c) 2014 Douglas Wilson # Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # # perltidy with standard settings. # # Documentation after __END__ # use strict; use warnings; our $VERSION = '0.65'; my %NameTbl = ( #P291 0x0A => 'EOF', 0x0C => 'CALCCOUNT', 0x0D => 'CALCMODE', 0x0E => 'PRECISION', 0x0F => 'REFMODE', 0x10 => 'DELTA', 0x11 => 'ITERATION', 0x12 => 'PROTECT', 0x13 => 'PASSWORD', 0x14 => 'HEADER', 0x15 => 'FOOTER', 0x16 => 'EXTERNCOUNT', 0x17 => 'EXTERNSHEET', 0x19 => 'WINDOWPROTECT', 0x1A => 'VERTICALPAGEBREAKS', 0x1B => 'HORIZONTALPAGEBREAKS', 0x1C => 'NOTE', 0x1D => 'SELECTION', 0x22 => '1904', 0x26 => 'LEFTMARGIN', 0x27 => 'RIGHTMARGIN', 0x28 => 'TOPMARGIN', 0x29 => 'BOTTOMMARGIN', 0x2A => 'PRINTHEADERS', 0x2B => 'PRINTGRIDLINES', 0x2F => 'FILEPASS', 0x3C => 'COUNTINUE', 0x3D => 'WINDOW1', 0x40 => 'BACKUP', 0x41 => 'PANE', 0x42 => 'CODEPAGE', 0x4D => 'PLS', 0x50 => 'DCON', 0x51 => 'DCONREF', #P292 0x52 => 'DCONNAME', 0x55 => 'DEFCOLWIDTH', 0x59 => 'XCT', 0x5A => 'CRN', 0x5B => 'FILESHARING', 0x5C => 'WRITEACCES', 0x5D => 'OBJ', 0x5E => 'UNCALCED', 0x5F => 'SAVERECALC', 0x60 => 'TEMPLATE', 0x63 => 'OBJPROTECT', 0x7D => 'COLINFO', 0x7E => 'RK', 0x7F => 'IMDATA', 0x80 => 'GUTS', 0x81 => 'WSBOOL', 0x82 => 'GRIDSET', 0x83 => 'HCENTER', 0x84 => 'VCENTER', 0x85 => 'BOUNDSHEET', 0x86 => 'WRITEPROT', 0x87 => 'ADDIN', 0x88 => 'EDG', 0x89 => 'PUB', 0x8C => 'COUNTRY', 0x8D => 'HIDEOBJ', 0x90 => 'SORT', 0x91 => 'SUB', 0x92 => 'PALETTE', 0x94 => 'LHRECORD', 0x95 => 'LHNGRAPH', 0x96 => 'SOUND', 0x98 => 'LPR', 0x99 => 'STANDARDWIDTH', 0x9A => 'FNGROUPNAME', 0x9B => 'FILTERMODE', 0x9C => 'FNGROUPCOUNT', #P293 0x9D => 'AUTOFILTERINFO', 0x9E => 'AUTOFILTER', 0xA0 => 'SCL', 0xA1 => 'SETUP', 0xA9 => 'COORDLIST', 0xAB => 'GCW', 0xAE => 'SCENMAN', 0xAF => 'SCENARIO', 0xB0 => 'SXVIEW', 0xB1 => 'SXVD', 0xB2 => 'SXV', 0xB4 => 'SXIVD', 0xB5 => 'SXLI', 0xB6 => 'SXPI', 0xB8 => 'DOCROUTE', 0xB9 => 'RECIPNAME', 0xBC => 'SHRFMLA', 0xBD => 'MULRK', 0xBE => 'MULBLANK', 0xBF => 'TOOLBARHDR', 0xC0 => 'TOOLBAREND', 0xC1 => 'MMS', 0xC2 => 'ADDMENU', 0xC3 => 'DELMENU', 0xC5 => 'SXDI', 0xC6 => 'SXDB', 0xCD => 'SXSTRING', 0xD0 => 'SXTBL', 0xD1 => 'SXTBRGIITM', 0xD2 => 'SXTBPG', 0xD3 => 'OBPROJ', 0xD5 => 'SXISDTM', 0xD6 => 'RSTRING', 0xD7 => 'DBCELL', 0xDA => 'BOOKBOOL', 0xDC => 'PARAMQRY', 0xDC => 'SXEXT', 0xDD => 'SCENPROTECT', 0xDE => 'OLESIZE', #P294 0xDF => 'UDDESC', 0xE0 => 'XF', 0xE1 => 'INTERFACEHDR', 0xE2 => 'INTERFACEEND', 0xE3 => 'SXVS', 0xEA => 'TABIDCONF', 0xEB => 'MSODRAWINGGROUP', 0xEC => 'MSODRAWING', 0xED => 'MSODRAWINGSELECTION', 0xEF => 'PHONETICINFO', 0xF0 => 'SXRULE', 0xF1 => 'SXEXT', 0xF2 => 'SXFILT', 0xF6 => 'SXNAME', 0xF7 => 'SXSELECT', 0xF8 => 'SXPAIR', 0xF9 => 'SXFMLA', 0xFB => 'SXFORMAT', 0xFC => 'SST', 0xFD => 'LABELSST', 0xFF => 'EXTSST', 0x100 => 'SXVDEX', 0x103 => 'SXFORMULA', 0x122 => 'SXDBEX', 0x13D => 'TABID', 0x160 => 'USESELFS', 0x161 => 'DSF', 0x162 => 'XL5MODIFY', 0x1A5 => 'FILESHARING2', 0x1A9 => 'USERBVIEW', 0x1AA => 'USERVIEWBEGIN', 0x1AB => 'USERSVIEWEND', 0x1AD => 'QSI', 0x1AE => 'SUPBOOK', 0x1AF => 'PROT4REV', 0x1B0 => 'CONDFMT', 0x1B1 => 'CF', 0x1B2 => 'DVAL', #P295 0x1B5 => 'DCONBIN', 0x1B6 => 'TXO', 0x1B7 => 'REFRESHALL', 0x1B8 => 'HLINK', 0x1BA => 'CODENAME', 0x1BB => 'SXFDBTYPE', 0x1BC => 'PROT4REVPASS', 0x1BE => 'DV', 0x200 => 'DIMENSIONS', 0x201 => 'BLANK', 0x202 => 'Integer', #Not Documented 0x203 => 'NUMBER', 0x204 => 'LABEL', 0x205 => 'BOOLERR', 0x207 => 'STRING', 0x208 => 'ROW', 0x20B => 'INDEX', 0x218 => 'NAME', 0x221 => 'ARRAY', 0x223 => 'EXTERNNAME', 0x225 => 'DEFAULTROWHEIGHT', 0x231 => 'FONT', 0x236 => 'TABLE', 0x23E => 'WINDOW2', 0x293 => 'STYLE', 0x406 => 'FORMULA', 0x41E => 'FORMAT', 0x18 => 'NAME', 0x06 => 'FORMULA', 0x09 => 'BOF(BIFF2)', 0x209 => 'BOF(BIFF3)', 0x409 => 'BOF(BIFF4)', 0x809 => 'BOF(BIFF5-7)', 0x31 => 'FONT', 0x27E => 'RK', #Chart/Graph 0x1001 => 'UNITS', 0x1002 => 'CHART', 0x1003 => 'SERISES', 0x1006 => 'DATAFORMAT', 0x1007 => 'LINEFORMAT', 0x1009 => 'MAKERFORMAT', 0x100A => 'AREAFORMAT', 0x100B => 'PIEFORMAT', 0x100C => 'ATTACHEDLABEL', 0x100D => 'SERIESTEXT', 0x1014 => 'CHARTFORMAT', 0x1015 => 'LEGEND', 0x1016 => 'SERIESLIST', 0x1017 => 'BAR', 0x1018 => 'LINE', 0x1019 => 'PIE', 0x101A => 'AREA', 0x101B => 'SCATTER', 0x101C => 'CHARTLINE', 0x101D => 'AXIS', 0x101E => 'TICK', 0x101F => 'VALUERANGE', 0x1020 => 'CATSERRANGE', 0x1021 => 'AXISLINEFORMAT', 0x1022 => 'CHARTFORMATLINK', 0x1024 => 'DEFAULTTEXT', 0x1025 => 'TEXT', 0x1026 => 'FONTX', 0x1027 => 'OBJECTLINK', 0x1032 => 'FRAME', 0x1033 => 'BEGIN', 0x1034 => 'END', 0x1035 => 'PLOTAREA', 0x103A => '3D', 0x103C => 'PICF', 0x103D => 'DROPBAR', 0x103E => 'RADAR', 0x103F => 'SURFACE', 0x1040 => 'RADARAREA', 0x1041 => 'AXISPARENT', 0x1043 => 'LEGENDXN', 0x1044 => 'SHTPROPS', 0x1045 => 'SERTOCRT', 0x1046 => 'AXESUSED', 0x1048 => 'SBASEREF', 0x104A => 'SERPARENT', 0x104B => 'SERAUXTREND', 0x104E => 'IFMT', 0x104F => 'POS', 0x1050 => 'ALRUNS', 0x1051 => 'AI', 0x105B => 'SERAUXERRBAR', 0x105D => 'SERFMT', 0x1060 => 'FBI', 0x1061 => 'BOPPOP', 0x1062 => 'AXCEXT', 0x1063 => 'DAT', 0x1064 => 'PLOTGROWTH', 0x1065 => 'SINDEX', 0x1066 => 'GELFRAME', 0x1067 => 'BPOPPOPCUSTOM', ); #------------------------------------------------------------------------------ # subDUMP (for Spreadsheet::ParseExcel) #------------------------------------------------------------------------------ sub subDUMP { my ( $oBook, $bOp, $bLen, $sWk ) = @_; printf "%04X:%-23s (Len:%3d) : %s\n", $bOp, OpName($bOp), $bLen, unpack( "H40", $sWk ); } #------------------------------------------------------------------------------ # Spreadsheet::ParseExcel->OpName #------------------------------------------------------------------------------ sub OpName { my ($bOp) = @_; return ( defined $NameTbl{$bOp} ) ? $NameTbl{$bOp} : 'undef'; } 1; __END__ =pod =head1 NAME Spreadsheet::ParseExcel::Dump - A class for dumping Excel records. =head1 SYNOPSIS See the documentation for Spreadsheet::ParseExcel. =head1 DESCRIPTION This module is used in conjunction with Spreadsheet::ParseExcel. See the documentation for Spreadsheet::ParseExcel. =head1 AUTHOR Current maintainer 0.60+: Douglas Wilson dougw@cpan.org Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori kwitknr@cpan.org =head1 COPYRIGHT Copyright (c) 2014 Douglas Wilson Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo Copyright (c) 2000-2006 Kawai Takanori All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =cut gdata/inst/perl/Spreadsheet/ParseExcel/FmtJapan2.pm0000644000175100001440000000527113003720416021712 0ustar hornikuserspackage Spreadsheet::ParseExcel::FmtJapan2; ############################################################################### # # Spreadsheet::ParseExcel::FmtJapan2 - A class for Cell formats. # # Used in conjunction with Spreadsheet::ParseExcel. # # Copyright (c) 2014 Douglas Wilson # Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # # perltidy with standard settings. # # Documentation after __END__ # use strict; use warnings; use Jcode; use Unicode::Map; use base 'Spreadsheet::ParseExcel::FmtJapan'; our $VERSION = '0.65'; #------------------------------------------------------------------------------ # new (for Spreadsheet::ParseExcel::FmtJapan2) #------------------------------------------------------------------------------ sub new { my ( $sPkg, %hKey ) = @_; my $oMap = Unicode::Map->new('CP932Excel'); die "NO MAP FILE CP932Excel!!" unless ( -r Unicode::Map->mapping("CP932Excel") ); my $oThis = { Code => $hKey{Code}, _UniMap => $oMap, }; bless $oThis; $oThis->SUPER::new(%hKey); return $oThis; } #------------------------------------------------------------------------------ # TextFmt (for Spreadsheet::ParseExcel::FmtJapan2) #------------------------------------------------------------------------------ sub TextFmt { my ( $oThis, $sTxt, $sCode ) = @_; # $sCode = 'sjis' if((! defined($sCode)) || ($sCode eq '_native_')); if ( $oThis->{Code} ) { if ( !defined($sCode) ) { $sTxt =~ s/(.)/\x00$1/sg; $sTxt = $oThis->{_UniMap}->from_unicode($sTxt); } elsif ( $sCode eq 'ucs2' ) { $sTxt = $oThis->{_UniMap}->from_unicode($sTxt); } return Jcode::convert( $sTxt, $oThis->{Code}, 'sjis' ); } else { return $sTxt; } } 1; __END__ =pod =head1 NAME Spreadsheet::ParseExcel::FmtJapan2 - A class for Cell formats. =head1 SYNOPSIS See the documentation for Spreadsheet::ParseExcel. =head1 DESCRIPTION This module is used in conjunction with Spreadsheet::ParseExcel. See the documentation for Spreadsheet::ParseExcel. =head1 AUTHOR Current maintainer 0.60+: Douglas Wilson dougw@cpan.org Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori kwitknr@cpan.org =head1 COPYRIGHT Copyright (c) 2014 Douglas Wilson Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo Copyright (c) 2000-2006 Kawai Takanori All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =cut gdata/inst/perl/Spreadsheet/ParseExcel/Worksheet.pm0000644000175100001440000005765613003720416022121 0ustar hornikuserspackage Spreadsheet::ParseExcel::Worksheet; ############################################################################### # # Spreadsheet::ParseExcel::Worksheet - A class for Worksheets. # # Used in conjunction with Spreadsheet::ParseExcel. # # Copyright (c) 2014 Douglas Wilson # Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # # perltidy with standard settings. # # Documentation after __END__ # use strict; use warnings; use Scalar::Util qw(weaken); our $VERSION = '0.65'; ############################################################################### # # new() # sub new { my ( $class, %properties ) = @_; my $self = \%properties; weaken $self->{_Book}; $self->{Cells} = undef; $self->{DefColWidth} = 8.43; return bless $self, $class; } ############################################################################### # # get_cell( $row, $col ) # # Returns the Cell object at row $row and column $col, if defined. # sub get_cell { my ( $self, $row, $col ) = @_; if ( !defined $row || !defined $col || !defined $self->{MaxRow} || !defined $self->{MaxCol} ) { # Return undef if no arguments are given or if no cells are defined. return undef; } elsif ($row < $self->{MinRow} || $row > $self->{MaxRow} || $col < $self->{MinCol} || $col > $self->{MaxCol} ) { # Return undef if outside allowable row/col range. return undef; } else { # Return the Cell object. return $self->{Cells}->[$row]->[$col]; } } ############################################################################### # # row_range() # # Returns a two-element list ($min, $max) containing the minimum and maximum # defined rows in the worksheet. # # If there is no row defined $max is smaller than $min. # sub row_range { my $self = shift; my $min = $self->{MinRow} || 0; my $max = defined( $self->{MaxRow} ) ? $self->{MaxRow} : ( $min - 1 ); return ( $min, $max ); } ############################################################################### # # col_range() # # Returns a two-element list ($min, $max) containing the minimum and maximum # defined cols in the worksheet. # # If there is no column defined $max is smaller than $min. # sub col_range { my $self = shift; my $min = $self->{MinCol} || 0; my $max = defined( $self->{MaxCol} ) ? $self->{MaxCol} : ( $min - 1 ); return ( $min, $max ); } ############################################################################### # # get_name() # # Returns the name of the worksheet. # sub get_name { my $self = shift; return $self->{Name}; } ############################################################################### # # sheet_num() # sub sheet_num { my $self = shift; return $self->{_SheetNo}; } ############################################################################### # # get_h_pagebreaks() # # Returns an array ref of row numbers where a horizontal page break occurs. # sub get_h_pagebreaks { my $self = shift; return $self->{HPageBreak}; } ############################################################################### # # get_v_pagebreaks() # # Returns an array ref of column numbers where a vertical page break occurs. # sub get_v_pagebreaks { my $self = shift; return $self->{VPageBreak}; } ############################################################################### # # get_merged_areas() # # Returns an array ref of cells that are merged. # sub get_merged_areas { my $self = shift; return $self->{MergedArea}; } ############################################################################### # # get_row_heights() # # Returns an array of row heights. # sub get_row_heights { my $self = shift; if ( wantarray() ) { return unless $self->{RowHeight}; return @{ $self->{RowHeight} }; } return $self->{RowHeight}; } ############################################################################### # # get_col_widths() # # Returns an array of column widths. # sub get_col_widths { my $self = shift; if ( wantarray() ) { return unless $self->{ColWidth}; return @{ $self->{ColWidth} }; } return $self->{ColWidth}; } ############################################################################### # # get_default_row_height() # # Returns the default row height for the worksheet. Generally 12.75. # sub get_default_row_height { my $self = shift; return $self->{DefRowHeight}; } ############################################################################### # # get_default_col_width() # # Returns the default column width for the worksheet. Generally 8.43. # sub get_default_col_width { my $self = shift; return $self->{DefColWidth}; } ############################################################################### # # _get_row_properties() # # Returns an array_ref of row properties. # TODO. This is a placeholder for a future method. # sub _get_row_properties { my $self = shift; return $self->{RowProperties}; } ############################################################################### # # _get_col_properties() # # Returns an array_ref of column properties. # TODO. This is a placeholder for a future method. # sub _get_col_properties { my $self = shift; return $self->{ColProperties}; } ############################################################################### # # get_header() # # Returns the worksheet header string. # sub get_header { my $self = shift; return $self->{Header}; } ############################################################################### # # get_footer() # # Returns the worksheet footer string. # sub get_footer { my $self = shift; return $self->{Footer}; } ############################################################################### # # get_margin_left() # # Returns the left margin of the worksheet in inches. # sub get_margin_left { my $self = shift; return $self->{LeftMargin}; } ############################################################################### # # get_margin_right() # # Returns the right margin of the worksheet in inches. # sub get_margin_right { my $self = shift; return $self->{RightMargin}; } ############################################################################### # # get_margin_top() # # Returns the top margin of the worksheet in inches. # sub get_margin_top { my $self = shift; return $self->{TopMargin}; } ############################################################################### # # get_margin_bottom() # # Returns the bottom margin of the worksheet in inches. # sub get_margin_bottom { my $self = shift; return $self->{BottomMargin}; } ############################################################################### # # get_margin_header() # # Returns the header margin of the worksheet in inches. # sub get_margin_header { my $self = shift; return $self->{HeaderMargin}; } ############################################################################### # # get_margin_footer() # # Returns the footer margin of the worksheet in inches. # sub get_margin_footer { my $self = shift; return $self->{FooterMargin}; } ############################################################################### # # get_paper() # # Returns the printer paper size. # sub get_paper { my $self = shift; return $self->{PaperSize}; } ############################################################################### # # get_start_page() # # Returns the page number that printing will start from. # sub get_start_page { my $self = shift; # Only return the page number if the "First page number" option is set. if ( $self->{UsePage} ) { return $self->{PageStart}; } else { return 0; } } ############################################################################### # # get_print_order() # # Returns the Worksheet page printing order. # sub get_print_order { my $self = shift; return $self->{LeftToRight}; } ############################################################################### # # get_print_scale() # # Returns the workbook scale for printing. # sub get_print_scale { my $self = shift; return $self->{Scale}; } ############################################################################### # # get_fit_to_pages() # # Returns the number of pages wide and high that the printed worksheet page # will fit to. # sub get_fit_to_pages { my $self = shift; if ( !$self->{PageFit} ) { return ( 0, 0 ); } else { return ( $self->{FitWidth}, $self->{FitHeight} ); } } ############################################################################### # # is_portrait() # # Returns true if the worksheet has been set for printing in portrait mode. # sub is_portrait { my $self = shift; return $self->{Landscape}; } ############################################################################### # # is_centered_horizontally() # # Returns true if the worksheet has been centered horizontally for printing. # sub is_centered_horizontally { my $self = shift; return $self->{HCenter}; } ############################################################################### # # is_centered_vertically() # # Returns true if the worksheet has been centered vertically for printing. # sub is_centered_vertically { my $self = shift; return $self->{HCenter}; } ############################################################################### # # is_print_gridlines() # # Returns true if the worksheet print "gridlines" option is turned on. # sub is_print_gridlines { my $self = shift; return $self->{PrintGrid}; } ############################################################################### # # is_print_row_col_headers() # # Returns true if the worksheet print "row and column headings" option is on. # sub is_print_row_col_headers { my $self = shift; return $self->{PrintHeaders}; } ############################################################################### # # is_print_black_and_white() # # Returns true if the worksheet print "black and white" option is turned on. # sub is_print_black_and_white { my $self = shift; return $self->{NoColor}; } ############################################################################### # # is_print_draft() # # Returns true if the worksheet print "draft" option is turned on. # sub is_print_draft { my $self = shift; return $self->{Draft}; } ############################################################################### # # is_print_comments() # # Returns true if the worksheet print "comments" option is turned on. # sub is_print_comments { my $self = shift; return $self->{Notes}; } =head2 get_tab_color() Return color index of tab, or undef if not set. =cut sub get_tab_color { my $worksheet = shift; return $worksheet->{TabColor}; } =head2 is_sheet_hidden() Return true if sheet is hidden =cut sub is_sheet_hidden { my $worksheet = shift; return $worksheet->{SheetHidden}; } =head2 is_row_hidden($row) In scalar context, return true if $row is hidden In array context, return an array whose elements are true if the corresponding row is hidden. =cut sub is_row_hidden { my $worksheet = shift; my ($row) = @_; unless ( $worksheet->{RowHidden} ) { return () if (wantarray); return 0; } return @{ $worksheet->{RowHidden} } if (wantarray); return $worksheet->{RowHidden}[$row]; } =head2 is_col_hidden($col) In scalar context, return true if $col is hidden In array context, return an array whose elements are true if the corresponding column is hidden. =cut sub is_col_hidden { my $worksheet = shift; my ($col) = @_; unless ( $worksheet->{ColHidden} ) { return () if (wantarray); return 0; } return @{ $worksheet->{ColHidden} } if (wantarray); return $worksheet->{ColHidden}[$col]; } ############################################################################### # # Mapping between legacy method names and new names. # { no warnings; # Ignore warnings about variables used only once. *sheetNo = *sheet_num; *Cell = *get_cell; *RowRange = *row_range; *ColRange = *col_range; } 1; __END__ =pod =head1 NAME Spreadsheet::ParseExcel::Worksheet - A class for Worksheets. =head1 SYNOPSIS See the documentation for L. =head1 DESCRIPTION This module is used in conjunction with Spreadsheet::ParseExcel. See the documentation for Spreadsheet::ParseExcel. =head1 Methods The C class encapsulates the properties of an Excel worksheet. It has the following methods: $worksheet->get_cell() $worksheet->row_range() $worksheet->col_range() $worksheet->get_name() $worksheet->get_h_pagebreaks() $worksheet->get_v_pagebreaks() $worksheet->get_merged_areas() $worksheet->get_row_heights() $worksheet->get_col_widths() $worksheet->get_default_row_height() $worksheet->get_default_col_width() $worksheet->get_header() $worksheet->get_footer() $worksheet->get_margin_left() $worksheet->get_margin_right() $worksheet->get_margin_top() $worksheet->get_margin_bottom() $worksheet->get_margin_header() $worksheet->get_margin_footer() $worksheet->get_paper() $worksheet->get_start_page() $worksheet->get_print_order() $worksheet->get_print_scale() $worksheet->get_fit_to_pages() $worksheet->is_portrait() $worksheet->is_centered_horizontally() $worksheet->is_centered_vertically() $worksheet->is_print_gridlines() $worksheet->is_print_row_col_headers() $worksheet->is_print_black_and_white() $worksheet->is_print_draft() $worksheet->is_print_comments() =head2 get_cell($row, $col) Return the L object at row C<$row> and column C<$col> if it is defined. Otherwise returns undef. my $cell = $worksheet->get_cell($row, $col); =head2 row_range() Returns a two-element list C<($min, $max)> containing the minimum and maximum defined rows in the worksheet. If there is no row defined C<$max> is smaller than C<$min>. my ( $row_min, $row_max ) = $worksheet->row_range(); =head2 col_range() Returns a two-element list C<($min, $max)> containing the minimum and maximum of defined columns in the worksheet. If there is no column defined C<$max> is smaller than C<$min>. my ( $col_min, $col_max ) = $worksheet->col_range(); =head2 get_name() The C method returns the name of the worksheet. my $name = $worksheet->get_name(); =head2 get_h_pagebreaks() The C method returns an array ref of row numbers where a horizontal page break occurs. my $h_pagebreaks = $worksheet->get_h_pagebreaks(); Returns C if there are no pagebreaks. =head2 get_v_pagebreaks() The C method returns an array ref of column numbers where a vertical page break occurs. my $v_pagebreaks = $worksheet->get_v_pagebreaks(); Returns C if there are no pagebreaks. =head2 get_merged_areas() The C method returns an array ref of cells that are merged. my $merged_areas = $worksheet->get_merged_areas(); Each merged area is represented as follows: [ $start_row, $start_col, $end_row, $end_col] Returns C if there are no merged areas. =head2 get_row_heights() The C method returns an array_ref of row heights in scalar context, and an array in list context. my $row_heights = $worksheet->get_row_heights(); Returns C if the property isn't set. =head2 get_col_widths() The C method returns an array_ref of column widths in scalar context, and an array in list context. my $col_widths = $worksheet->get_col_widths(); Returns C if the property isn't set. =head2 get_default_row_height() The C method returns the default row height for the worksheet. Generally 12.75. my $default_row_height = $worksheet->get_default_row_height(); =head2 get_default_col_width() The C method returns the default column width for the worksheet. Generally 8.43. my $default_col_width = $worksheet->get_default_col_width(); =head2 get_header() The C method returns the worksheet header string. This string can contain control codes for alignment and font properties. Refer to the Excel on-line help on headers and footers or to the Spreadsheet::WriteExcel documentation for set_header(). my $header = $worksheet->get_header(); Returns C if the property isn't set. =head2 get_footer() The C method returns the worksheet footer string. This string can contain control codes for alignment and font properties. Refer to the Excel on-line help on headers and footers or to the Spreadsheet::WriteExcel documentation for set_header(). my $footer = $worksheet->get_footer(); Returns C if the property isn't set. =head2 get_margin_left() The C method returns the left margin of the worksheet in inches. my $margin_left = $worksheet->get_margin_left(); Returns C if the property isn't set. =head2 get_margin_right() The C method returns the right margin of the worksheet in inches. my $margin_right = $worksheet->get_margin_right(); Returns C if the property isn't set. =head2 get_margin_top() The C method returns the top margin of the worksheet in inches. my $margin_top = $worksheet->get_margin_top(); Returns C if the property isn't set. =head2 get_margin_bottom() The C method returns the bottom margin of the worksheet in inches. my $margin_bottom = $worksheet->get_margin_bottom(); Returns C if the property isn't set. =head2 get_margin_header() The C method returns the header margin of the worksheet in inches. my $margin_header = $worksheet->get_margin_header(); Returns a default value of 0.5 if not set. =head2 get_margin_footer() The C method returns the footer margin of the worksheet in inches. my $margin_footer = $worksheet->get_margin_footer(); Returns a default value of 0.5 if not set. =head2 get_paper() The C method returns the printer paper size. my $paper = $worksheet->get_paper(); The value corresponds to the formats shown below: Index Paper format Paper size ===== ============ ========== 0 Printer default - 1 Letter 8 1/2 x 11 in 2 Letter Small 8 1/2 x 11 in 3 Tabloid 11 x 17 in 4 Ledger 17 x 11 in 5 Legal 8 1/2 x 14 in 6 Statement 5 1/2 x 8 1/2 in 7 Executive 7 1/4 x 10 1/2 in 8 A3 297 x 420 mm 9 A4 210 x 297 mm 10 A4 Small 210 x 297 mm 11 A5 148 x 210 mm 12 B4 250 x 354 mm 13 B5 182 x 257 mm 14 Folio 8 1/2 x 13 in 15 Quarto 215 x 275 mm 16 - 10x14 in 17 - 11x17 in 18 Note 8 1/2 x 11 in 19 Envelope 9 3 7/8 x 8 7/8 20 Envelope 10 4 1/8 x 9 1/2 21 Envelope 11 4 1/2 x 10 3/8 22 Envelope 12 4 3/4 x 11 23 Envelope 14 5 x 11 1/2 24 C size sheet - 25 D size sheet - 26 E size sheet - 27 Envelope DL 110 x 220 mm 28 Envelope C3 324 x 458 mm 29 Envelope C4 229 x 324 mm 30 Envelope C5 162 x 229 mm 31 Envelope C6 114 x 162 mm 32 Envelope C65 114 x 229 mm 33 Envelope B4 250 x 353 mm 34 Envelope B5 176 x 250 mm 35 Envelope B6 176 x 125 mm 36 Envelope 110 x 230 mm 37 Monarch 3.875 x 7.5 in 38 Envelope 3 5/8 x 6 1/2 in 39 Fanfold 14 7/8 x 11 in 40 German Std Fanfold 8 1/2 x 12 in 41 German Legal Fanfold 8 1/2 x 13 in 256 User defined The two most common paper sizes are C<1 = "US Letter"> and C<9 = A4>. Returns 9 by default. =head2 get_start_page() The C method returns the page number that printing will start from. my $start_page = $worksheet->get_start_page(); Returns 0 if the property isn't set. =head2 get_print_order() The C method returns 0 if the worksheet print "page order" is "Down then over" (the default) or 1 if it is "Over then down". my $print_order = $worksheet->get_print_order(); =head2 get_print_scale() The C method returns the workbook scale for printing. The print scale factor can be in the range 10 .. 400. my $print_scale = $worksheet->get_print_scale(); Returns 100 by default. =head2 get_fit_to_pages() The C method returns the number of pages wide and high that the printed worksheet page will fit to. my ($pages_wide, $pages_high) = $worksheet->get_fit_to_pages(); Returns (0, 0) if the property isn't set. =head2 is_portrait() The C method returns true if the worksheet has been set for printing in portrait mode. my $is_portrait = $worksheet->is_portrait(); Returns 0 if the worksheet has been set for printing in horizontal mode. =head2 is_centered_horizontally() The C method returns true if the worksheet has been centered horizontally for printing. my $is_centered_horizontally = $worksheet->is_centered_horizontally(); Returns 0 if the property isn't set. =head2 is_centered_vertically() The C method returns true if the worksheet has been centered vertically for printing. my $is_centered_vertically = $worksheet->is_centered_vertically(); Returns 0 if the property isn't set. =head2 is_print_gridlines() The C method returns true if the worksheet print "gridlines" option is turned on. my $is_print_gridlines = $worksheet->is_print_gridlines(); Returns 0 if the property isn't set. =head2 is_print_row_col_headers() The C method returns true if the worksheet print "row and column headings" option is turned on. my $is_print_row_col_headers = $worksheet->is_print_row_col_headers(); Returns 0 if the property isn't set. =head2 is_print_black_and_white() The C method returns true if the worksheet print "black and white" option is turned on. my $is_print_black_and_white = $worksheet->is_print_black_and_white(); Returns 0 if the property isn't set. =head2 is_print_draft() The C method returns true if the worksheet print "draft" option is turned on. my $is_print_draft = $worksheet->is_print_draft(); Returns 0 if the property isn't set. =head2 is_print_comments() The C method returns true if the worksheet print "comments" option is turned on. my $is_print_comments = $worksheet->is_print_comments(); Returns 0 if the property isn't set. =head1 AUTHOR Current maintainer 0.60+: Douglas Wilson dougw@cpan.org Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori kwitknr@cpan.org =head1 COPYRIGHT Copyright (c) 2014 Douglas Wilson Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo Copyright (c) 2000-2006 Kawai Takanori All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =cut gdata/inst/perl/Spreadsheet/ParseExcel/Cell.pm0000644000175100001440000002240113003720416021001 0ustar hornikuserspackage Spreadsheet::ParseExcel::Cell; ############################################################################### # # Spreadsheet::ParseExcel::Cell - A class for Cell data and formatting. # # Used in conjunction with Spreadsheet::ParseExcel. # # Copyright (c) 2014 Douglas Wilson # Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # # perltidy with standard settings. # # Documentation after __END__ # use strict; use warnings; our $VERSION = '0.65'; ############################################################################### # # new() # # Constructor. # sub new { my ( $package, %properties ) = @_; my $self = \%properties; bless $self, $package; } ############################################################################### # # value() # # Returns the formatted value of the cell. # sub value { my $self = shift; return $self->{_Value}; } ############################################################################### # # unformatted() # # Returns the unformatted value of the cell. # sub unformatted { my $self = shift; return $self->{Val}; } ############################################################################### # # get_format() # # Returns the Format object for the cell. # sub get_format { my $self = shift; return $self->{Format}; } ############################################################################### # # type() # # Returns the type of cell such as Text, Numeric or Date. # sub type { my $self = shift; return $self->{Type}; } ############################################################################### # # encoding() # # Returns the character encoding of the cell. # sub encoding { my $self = shift; if ( !defined $self->{Code} ) { return 1; } elsif ( $self->{Code} eq 'ucs2' ) { return 2; } elsif ( $self->{Code} eq '_native_' ) { return 3; } else { return 0; } return $self->{Code}; } ############################################################################### # # is_merged() # # Returns true if the cell is merged. # sub is_merged { my $self = shift; return $self->{Merged}; } ############################################################################### # # get_rich_text() # # Returns an array ref of font information about each string block in a "rich", # i.e. multi-format, string. # sub get_rich_text { my $self = shift; return $self->{Rich}; } ############################################################################### # # get_hyperlink { # # Returns an array ref of hyperlink information if the cell contains a hyperlink. # Returns undef otherwise # # [0] : Description of link (You may want $cell->value, as it will have rich text) # [1] : URL - the link expressed as a URL. N.B. relative URLs will be defaulted to # the directory of the input file, if the input file name is known. Otherwise # %REL% will be inserted as a place-holder. Depending on your application, # you should either remove %REL% or replace it with the appropriate path. # [2] : Target frame (or undef if none) sub get_hyperlink { my $self = shift; return $self->{Hyperlink} if exists $self->{Hyperlink}; return undef; } # ############################################################################### # # Mapping between legacy method names and new names. # { no warnings; # Ignore warnings about variables used only once. *Value = \&value; } 1; __END__ =pod =head1 NAME Spreadsheet::ParseExcel::Cell - A class for Cell data and formatting. =head1 SYNOPSIS See the documentation for Spreadsheet::ParseExcel. =head1 DESCRIPTION This module is used in conjunction with Spreadsheet::ParseExcel. See the documentation for Spreadsheet::ParseExcel. =head1 Methods The following Cell methods are available: $cell->value() $cell->unformatted() $cell->get_format() $cell->type() $cell->encoding() $cell->is_merged() $cell->get_rich_text() $cell->get_hyperlink() =head2 value() The C method returns the formatted value of the cell. my $value = $cell->value(); Formatted in this sense refers to the numeric format of the cell value. For example a number such as 40177 might be formatted as 40,117, 40117.000 or even as the date 2009/12/30. If the cell doesn't contain a numeric format then the formatted and unformatted cell values are the same, see the C method below. For a defined C<$cell> the C method will always return a value. In the case of a cell with formatting but no numeric or string contents the method will return the empty string C<''>. =head2 unformatted() The C method returns the unformatted value of the cell. my $unformatted = $cell->unformatted(); Returns the cell value without a numeric format. See the C method above. =head2 get_format() The C method returns the L object for the cell. my $format = $cell->get_format(); If a user defined format hasn't been applied to the cell then the default cell format is returned. =head2 type() The C method returns the type of cell such as Text, Numeric or Date. If the type was detected as Numeric, and the Cell Format matches C, it will be treated as a Date type. my $type = $cell->type(); See also L. =head2 encoding() The C method returns the character encoding of the cell. my $encoding = $cell->encoding(); This method is only of interest to developers. In general Spreadsheet::ParseExcel will return all character strings in UTF-8 regardless of the encoding used by Excel. The C method returns one of the following values: =over =item * 0: Unknown format. This shouldn't happen. In the default case the format should be 1. =item * 1: 8bit ASCII or single byte UTF-16. This indicates that the characters are encoded in a single byte. In Excel 95 and earlier This usually meant ASCII or an international variant. In Excel 97 it refers to a compressed UTF-16 character string where all of the high order bytes are 0 and are omitted to save space. =item * 2: UTF-16BE. =item * 3: Native encoding. In Excel 95 and earlier this encoding was used to represent multi-byte character encodings such as SJIS. =back =head2 is_merged() The C method returns true if the cell is merged. my $is_merged = $cell->is_merged(); Returns C if the property isn't set. =head2 get_rich_text() The C method returns an array ref of font information about each string block in a "rich", i.e. multi-format, string. my $rich_text = $cell->get_rich_text(); The return value is an arrayref of arrayrefs in the form: [ [ $start_position, $font_object ], ..., ] Returns undef if the property isn't set. =head2 get_hyperlink() If a cell contains a hyperlink, the C method returns an array ref of information about it. A cell can contain at most one hyperlink. If it does, it contains no other value. Otherwise, it returns undef; The array contains: =over =item * 0: Description (what's displayed); undef if not present =item * 1: Link, converted to an appropriate URL - Note: Relative links are based on the input file. %REL% is used if the input file is unknown (e.g. a file handle or scalar) =item * 2: Target - target frame (or undef if none) =back =head1 Dates and Time in Excel Dates and times in Excel are represented by real numbers, for example "Jan 1 2001 12:30 PM" is represented by the number 36892.521. The integer part of the number stores the number of days since the epoch and the fractional part stores the percentage of the day. A date or time in Excel is just like any other number. The way in which it is displayed is controlled by the number format: Number format $cell->value() $cell->unformatted() ============= ============== ============== 'dd/mm/yy' '28/02/08' 39506.5 'mm/dd/yy' '02/28/08' 39506.5 'd-m-yyyy' '28-2-2008' 39506.5 'dd/mm/yy hh:mm' '28/02/08 12:00' 39506.5 'd mmm yyyy' '28 Feb 2008' 39506.5 'mmm d yyyy hh:mm AM/PM' 'Feb 28 2008 12:00 PM' 39506.5 The L module contains a function called C which will convert between an unformatted Excel date/time number and a C like array. For date conversions using the CPAN C framework see L http://search.cpan.org/search?dist=DateTime-Format-Excel =head1 AUTHOR Current maintainer 0.60+: Douglas Wilson dougw@cpan.org Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori kwitknr@cpan.org =head1 COPYRIGHT Copyright (c) 2014 Douglas Wilson Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo Copyright (c) 2000-2006 Kawai Takanori All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =cut gdata/inst/perl/Spreadsheet/ParseExcel/SaveParser/0000755000175100001440000000000013115545572021654 5ustar hornikusersgdata/inst/perl/Spreadsheet/ParseExcel/SaveParser/Workbook.pm0000644000175100001440000003601113003720416023774 0ustar hornikuserspackage Spreadsheet::ParseExcel::SaveParser::Workbook; ############################################################################### # # Spreadsheet::ParseExcel::SaveParser::Workbook - A class for SaveParser Workbooks. # # Used in conjunction with Spreadsheet::ParseExcel. # # Copyright (c) 2014 Douglas Wilson # Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # # perltidy with standard settings. # # Documentation after __END__ # use strict; use warnings; use base 'Spreadsheet::ParseExcel::Workbook'; our $VERSION = '0.65'; #============================================================================== # Spreadsheet::ParseExcel::SaveParser::Workbook #============================================================================== sub new { my ( $sPkg, $oBook ) = @_; return undef unless ( defined $oBook ); my %oThis = %$oBook; bless \%oThis, $sPkg; # re-bless worksheets (and set their _Book properties !!!) my $sWkP = ref($sPkg) || "$sPkg"; $sWkP =~ s/Workbook$/Worksheet/; map { bless( $_, $sWkP ); } @{ $oThis{Worksheet} }; map { $_->{_Book} = \%oThis; } @{ $oThis{Worksheet} }; return \%oThis; } #------------------------------------------------------------------------------ # Parse (for Spreadsheet::ParseExcel::SaveParser::Workbook) #------------------------------------------------------------------------------ sub Parse { my ( $sClass, $sFile, $oWkFmt ) = @_; my $oBook = Spreadsheet::ParseExcel::Workbook->Parse( $sFile, $oWkFmt ); bless $oBook, $sClass; # re-bless worksheets (and set their _Book properties !!!) my $sWkP = ref($sClass) || "$sClass"; $sWkP =~ s/Workbook$/Worksheet/; map { bless( $_, $sWkP ); } @{ $oBook->{Worksheet} }; map { $_->{_Book} = $oBook; } @{ $oBook->{Worksheet} }; return $oBook; } #------------------------------------------------------------------------------ # SaveAs (for Spreadsheet::ParseExcel::SaveParser::Workbook) #------------------------------------------------------------------------------ sub SaveAs { my ( $oBook, $sName ) = @_; # Create a new Excel workbook my $oWrEx = Spreadsheet::WriteExcel->new($sName); $oWrEx->compatibility_mode(); my %hFmt; my $iNo = 0; my @aAlH = ( 'left', 'left', 'center', 'right', 'fill', 'justify', 'merge', 'equal_space' ); my @aAlV = ( 'top', 'vcenter', 'bottom', 'vjustify', 'vequal_space' ); foreach my $pFmt ( @{ $oBook->{Format} } ) { my $oFmt = $oWrEx->addformat(); # Add Formats unless ( $pFmt->{Style} ) { $hFmt{$iNo} = $oFmt; my $rFont = $pFmt->{Font}; $oFmt->set_font( $rFont->{Name} ); $oFmt->set_size( $rFont->{Height} ); $oFmt->set_color( $rFont->{Color} ); $oFmt->set_bold( $rFont->{Bold} ); $oFmt->set_italic( $rFont->{Italic} ); $oFmt->set_underline( $rFont->{Underline} ); $oFmt->set_font_strikeout( $rFont->{Strikeout} ); $oFmt->set_font_script( $rFont->{Super} ); $oFmt->set_hidden( $rFont->{Hidden} ); #Add $oFmt->set_locked( $pFmt->{Lock} ); $oFmt->set_align( $aAlH[ $pFmt->{AlignH} ] ); $oFmt->set_align( $aAlV[ $pFmt->{AlignV} ] ); $oFmt->set_rotation( $pFmt->{Rotate} ); $oFmt->set_num_format( $oBook->{FmtClass}->FmtStringDef( $pFmt->{FmtIdx}, $oBook ) ); $oFmt->set_text_wrap( $pFmt->{Wrap} ); $oFmt->set_pattern( $pFmt->{Fill}->[0] ); $oFmt->set_fg_color( $pFmt->{Fill}->[1] ) if ( ( $pFmt->{Fill}->[1] >= 8 ) && ( $pFmt->{Fill}->[1] <= 63 ) ); $oFmt->set_bg_color( $pFmt->{Fill}->[2] ) if ( ( $pFmt->{Fill}->[2] >= 8 ) && ( $pFmt->{Fill}->[2] <= 63 ) ); $oFmt->set_left( ( $pFmt->{BdrStyle}->[0] > 7 ) ? 3 : $pFmt->{BdrStyle}->[0] ); $oFmt->set_right( ( $pFmt->{BdrStyle}->[1] > 7 ) ? 3 : $pFmt->{BdrStyle}->[1] ); $oFmt->set_top( ( $pFmt->{BdrStyle}->[2] > 7 ) ? 3 : $pFmt->{BdrStyle}->[2] ); $oFmt->set_bottom( ( $pFmt->{BdrStyle}->[3] > 7 ) ? 3 : $pFmt->{BdrStyle}->[3] ); $oFmt->set_left_color( $pFmt->{BdrColor}->[0] ) if ( ( $pFmt->{BdrColor}->[0] >= 8 ) && ( $pFmt->{BdrColor}->[0] <= 63 ) ); $oFmt->set_right_color( $pFmt->{BdrColor}->[1] ) if ( ( $pFmt->{BdrColor}->[1] >= 8 ) && ( $pFmt->{BdrColor}->[1] <= 63 ) ); $oFmt->set_top_color( $pFmt->{BdrColor}->[2] ) if ( ( $pFmt->{BdrColor}->[2] >= 8 ) && ( $pFmt->{BdrColor}->[2] <= 63 ) ); $oFmt->set_bottom_color( $pFmt->{BdrColor}->[3] ) if ( ( $pFmt->{BdrColor}->[3] >= 8 ) && ( $pFmt->{BdrColor}->[3] <= 63 ) ); } $iNo++; } for ( my $iSheet = 0 ; $iSheet < $oBook->{SheetCount} ; $iSheet++ ) { my $oWkS = $oBook->{Worksheet}[$iSheet]; my $oWrS = $oWrEx->addworksheet( $oWkS->{Name} ); #Landscape if ( !$oWkS->{Landscape} ) { # Landscape (0:Horizontal, 1:Vertical) $oWrS->set_landscape(); } else { $oWrS->set_portrait(); } #Protect if ( defined $oWkS->{Protect} ) { # Protect ('':NoPassword, Password:Password) if ( $oWkS->{Protect} ne '' ) { $oWrS->protect( $oWkS->{Protect} ); } else { $oWrS->protect(); } } if ( $oWkS->{Scale} != 100 ) { # Pages on fit with width and Heigt $oWrS->fit_to_pages( $oWkS->{FitWidth}, $oWkS->{FitHeight} ); #Print Scale and reset FitWidth/FitHeight $oWrS->set_print_scale( $oWkS->{Scale} ); } else { #Print Scale $oWrS->set_print_scale( $oWkS->{Scale} ); # Pages on fit with width and Heigt $oWrS->fit_to_pages( $oWkS->{FitWidth}, $oWkS->{FitHeight} ); } # Paper Size $oWrS->set_paper( $oWkS->{PaperSize} ); # Margin $oWrS->set_margin_left( $oWkS->{LeftMargin} ); $oWrS->set_margin_right( $oWkS->{RightMargin} ); $oWrS->set_margin_top( $oWkS->{TopMargin} ); $oWrS->set_margin_bottom( $oWkS->{BottomMargin} ); # HCenter $oWrS->center_horizontally() if ( $oWkS->{HCenter} ); # VCenter $oWrS->center_vertically() if ( $oWkS->{VCenter} ); # Header, Footer $oWrS->set_header( $oWkS->{Header}, $oWkS->{HeaderMargin} ); $oWrS->set_footer( $oWkS->{Footer}, $oWkS->{FooterMargin} ); # Print Area if ( ref( $oBook->{PrintArea}[$iSheet] ) eq 'ARRAY' ) { my $raP; for $raP ( @{ $oBook->{PrintArea}[$iSheet] } ) { $oWrS->print_area(@$raP); } } # Print Title my $raW; foreach $raW ( @{ $oBook->{PrintTitle}[$iSheet]->{Row} } ) { $oWrS->repeat_rows(@$raW); } foreach $raW ( @{ $oBook->{PrintTitle}[$iSheet]->{Column} } ) { $oWrS->repeat_columns(@$raW); } # Print Gridlines if ( $oWkS->{PrintGrid} == 1 ) { $oWrS->hide_gridlines(0); } else { $oWrS->hide_gridlines(1); } # Print Headings if ( $oWkS->{PrintHeaders} ) { $oWrS->print_row_col_headers(); } # Horizontal Page Breaks $oWrS->set_h_pagebreaks( @{ $oWkS->{HPageBreak} } ); # Veritical Page Breaks $oWrS->set_v_pagebreaks( @{ $oWkS->{VPageBreak} } ); # PageStart => $oWkS->{PageStart}, # Page number for start # UsePage => $oWkS->{UsePage}, # Use own start page number # NoColor => $oWkS->{NoColor}, # Print in black-white # Draft => $oWkS->{Draft}, # Print in draft mode # Notes => $oWkS->{Notes}, # Print notes # LeftToRight => $oWkS->{LeftToRight}, # Left to Right for ( my $iC = $oWkS->{MinCol} ; defined $oWkS->{MaxCol} && $iC <= $oWkS->{MaxCol} ; $iC++ ) { if ( defined $oWkS->{ColWidth}[$iC] ) { if ( $oWkS->{ColWidth}[$iC] > 0 ) { $oWrS->set_column( $iC, $iC, $oWkS->{ColWidth}[$iC] ) ; #, undef, 1) ; } else { $oWrS->set_column( $iC, $iC, 0, undef, 1 ); } } } my $merged_areas = $oWkS->get_merged_areas(); my $merged_areas_h = {}; if ($merged_areas) { foreach my $range (@$merged_areas) { $merged_areas_h->{$range->[0]}{$range->[1]} = $range; } } for ( my $iR = $oWkS->{MinRow} ; defined $oWkS->{MaxRow} && $iR <= $oWkS->{MaxRow} ; $iR++ ) { $oWrS->set_row( $iR, $oWkS->{RowHeight}[$iR] ); for ( my $iC = $oWkS->{MinCol} ; defined $oWkS->{MaxCol} && $iC <= $oWkS->{MaxCol} ; $iC++ ) { my $oWkC = $oWkS->{Cells}[$iR][$iC]; if ($oWkC) { if ( $oWkC->{Merged} and exists $merged_areas_h->{$iR}{$iC} ) { my $oFmtN = $oWrEx->addformat(); $oFmtN->copy( $hFmt{ $oWkC->{FormatNo} } ); $oWrS->merge_range ( @{$merged_areas_h->{$iR}{$iC}}, $oBook->{FmtClass} ->TextFmt( $oWkC->{Val}, $oWkC->{Code} ), $oFmtN ); } else { $oWrS->write( $iR, $iC, $oBook->{FmtClass} ->TextFmt( $oWkC->{Val}, $oWkC->{Code} ), $hFmt{ $oWkC->{FormatNo} } ); } } } } } return $oWrEx; } #------------------------------------------------------------------------------ # AddWorksheet (for Spreadsheet::ParseExcel::SaveParser::Workbook) #------------------------------------------------------------------------------ sub AddWorksheet { my ( $oBook, $sName, %hAttr ) = @_; $oBook->AddFormat if ( $#{ $oBook->{Format} } < 0 ); $hAttr{Name} ||= $sName; $hAttr{LeftMargin} ||= 0; $hAttr{RightMargin} ||= 0; $hAttr{TopMargin} ||= 0; $hAttr{BottomMargin} ||= 0; $hAttr{HeaderMargin} ||= 0; $hAttr{FooterMargin} ||= 0; $hAttr{FitWidth} ||= 0; $hAttr{FitHeight} ||= 0; $hAttr{PrintGrid} ||= 0; my $oWkS = Spreadsheet::ParseExcel::SaveParser::Worksheet->new(%hAttr); $oWkS->{_Book} = $oBook; $oWkS->{_SheetNo} = $oBook->{SheetCount}; $oBook->{Worksheet}[ $oBook->{SheetCount} ] = $oWkS; $oBook->{SheetCount}++; return $oWkS; #$oBook->{SheetCount} - 1; } #------------------------------------------------------------------------------ # AddFont (for Spreadsheet::ParseExcel::SaveParser::Workbook) #------------------------------------------------------------------------------ sub AddFont { my ( $oBook, %hAttr ) = @_; $hAttr{Name} ||= 'Arial'; $hAttr{Height} ||= 10; $hAttr{Bold} ||= 0; $hAttr{Italic} ||= 0; $hAttr{Underline} ||= 0; $hAttr{Strikeout} ||= 0; $hAttr{Super} ||= 0; push @{ $oBook->{Font} }, Spreadsheet::ParseExcel::Font->new(%hAttr); return $#{ $oBook->{Font} }; } #------------------------------------------------------------------------------ # AddFormat (for Spreadsheet::ParseExcel::SaveParser::Workbook) #------------------------------------------------------------------------------ sub AddFormat { my ( $oBook, %hAttr ) = @_; $hAttr{Fill} ||= [ 0, 0, 0 ]; $hAttr{BdrStyle} ||= [ 0, 0, 0, 0 ]; $hAttr{BdrColor} ||= [ 0, 0, 0, 0 ]; $hAttr{AlignH} ||= 0; $hAttr{AlignV} ||= 0; $hAttr{Rotate} ||= 0; $hAttr{Landscape} ||= 0; $hAttr{FmtIdx} ||= 0; if ( !defined( $hAttr{Font} ) ) { my $oFont; if ( defined $hAttr{FontNo} ) { $oFont = $oBook->{Font}[ $hAttr{FontNo} ]; } elsif ( !defined $oFont ) { if ( $#{ $oBook->{Font} } >= 0 ) { $oFont = $oBook->{Font}[0]; } else { my $iNo = $oBook->AddFont; $oFont = $oBook->{Font}[$iNo]; } } $hAttr{Font} = $oFont; } push @{ $oBook->{Format} }, Spreadsheet::ParseExcel::Format->new(%hAttr); return $#{ $oBook->{Format} }; } #------------------------------------------------------------------------------ # AddCell (for Spreadsheet::ParseExcel::SaveParser::Workbook) #------------------------------------------------------------------------------ sub AddCell { my ( $oBook, $iSheet, $iR, $iC, $sVal, $oCell, $sCode ) = @_; my %rhKey; $oCell ||= $oBook->{Worksheet}[$iSheet] ->{Cells}[$iR][$iC]->{FormatNo} || 0; my $iFmt = ( UNIVERSAL::isa( $oCell, 'Spreadsheet::ParseExcel::Cell' ) ) ? $oCell->{FormatNo} : ( ref($oCell) ) ? 0 : $oCell + 0; $rhKey{FormatNo} = $iFmt; $rhKey{Format} = $oBook->{Format}[$iFmt]; $rhKey{Val} = $sVal; $rhKey{Code} = $sCode || '_native_'; $oBook->{_CurSheet} = $iSheet; my $merged_areas = $oBook->{Worksheet}[$iSheet]->get_merged_areas(); my $merged_areas_h = {}; if ($merged_areas) { foreach my $range (@$merged_areas) { $merged_areas_h->{$range->[0]}{$range->[1]} = $range; } } my $oNewCell = Spreadsheet::ParseExcel::_NewCell( $oBook, $iR, $iC, %rhKey ); Spreadsheet::ParseExcel::_SetDimension( $oBook, $iR, $iC, $iC ); $oNewCell->{Merged} = 1 if exists $merged_areas_h->{$iR}{$iC}; return $oNewCell; } 1; __END__ =pod =head1 NAME Spreadsheet::ParseExcel::SaveParser::Workbook - A class for SaveParser Workbooks. =head1 SYNOPSIS See the documentation for Spreadsheet::ParseExcel. =head1 DESCRIPTION This module is used in conjunction with Spreadsheet::ParseExcel. See the documentation for Spreadsheet::ParseExcel. =head1 AUTHOR Current maintainer 0.60+: Douglas Wilson dougw@cpan.org Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori kwitknr@cpan.org =head1 COPYRIGHT Copyright (c) 2014 Douglas Wilson Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo Copyright (c) 2000-2006 Kawai Takanori All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =cut gdata/inst/perl/Spreadsheet/ParseExcel/SaveParser/Worksheet.pm0000644000175100001440000000514113003720416024152 0ustar hornikuserspackage Spreadsheet::ParseExcel::SaveParser::Worksheet; ############################################################################### # # Spreadsheet::ParseExcel::SaveParser::Worksheet - A class for SaveParser Worksheets. # # Used in conjunction with Spreadsheet::ParseExcel. # # Copyright (c) 2014 Douglas Wilson # Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # # perltidy with standard settings. # # Documentation after __END__ # use strict; use warnings; #============================================================================== # Spreadsheet::ParseExcel::SaveParser::Worksheet #============================================================================== use base 'Spreadsheet::ParseExcel::Worksheet'; our $VERSION = '0.65'; sub new { my ( $sClass, %rhIni ) = @_; $sClass->SUPER::new(%rhIni); # returns object } #------------------------------------------------------------------------------ # AddCell (for Spreadsheet::ParseExcel::SaveParser::Worksheet) #------------------------------------------------------------------------------ sub AddCell { my ( $oSelf, $iR, $iC, $sVal, $oCell, $sCode ) = @_; $oSelf->{_Book} ->AddCell( $oSelf->{_SheetNo}, $iR, $iC, $sVal, $oCell, $sCode ); } #------------------------------------------------------------------------------ # Protect (for Spreadsheet::ParseExcel::SaveParser::Worksheet) # - Password = undef -> No protect # - Password = '' -> Protected. No password # - Password = $pwd -> Protected. Password = $pwd #------------------------------------------------------------------------------ sub Protect { my ( $oSelf, $sPassword ) = @_; $oSelf->{Protect} = $sPassword; } 1; __END__ =pod =head1 NAME Spreadsheet::ParseExcel::SaveParser::Worksheet - A class for SaveParser Worksheets. =head1 SYNOPSIS See the documentation for Spreadsheet::ParseExcel. =head1 DESCRIPTION This module is used in conjunction with Spreadsheet::ParseExcel. See the documentation for Spreadsheet::ParseExcel. =head1 AUTHOR Current maintainer 0.60+: Douglas Wilson dougw@cpan.org Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori kwitknr@cpan.org =head1 COPYRIGHT Copyright (c) 2014 Douglas Wilson Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo Copyright (c) 2000-2006 Kawai Takanori All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =cut gdata/inst/perl/Spreadsheet/ParseExcel/Font.pm0000644000175100001440000000270413003720416021034 0ustar hornikuserspackage Spreadsheet::ParseExcel::Font; ############################################################################### # # Spreadsheet::ParseExcel::Font - A class for Cell fonts. # # Used in conjunction with Spreadsheet::ParseExcel. # # Copyright (c) 2014 Douglas Wilson # Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # # perltidy with standard settings. # # Documentation after __END__ # use strict; use warnings; our $VERSION = '0.65'; sub new { my ( $class, %rhIni ) = @_; my $self = \%rhIni; bless $self, $class; } 1; __END__ =pod =head1 NAME Spreadsheet::ParseExcel::Font - A class for Cell fonts. =head1 SYNOPSIS See the documentation for Spreadsheet::ParseExcel. =head1 DESCRIPTION This module is used in conjunction with Spreadsheet::ParseExcel. See the documentation for Spreadsheet::ParseExcel. =head1 AUTHOR Current maintainer 0.60+: Douglas Wilson dougw@cpan.org Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori kwitknr@cpan.org =head1 COPYRIGHT Copyright (c) 2014 Douglas Wilson Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo Copyright (c) 2000-2006 Kawai Takanori All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =cut gdata/inst/perl/Spreadsheet/ParseExcel/FmtUnicode.pm0000644000175100001440000000530613003720416022164 0ustar hornikuserspackage Spreadsheet::ParseExcel::FmtUnicode; ############################################################################### # # Spreadsheet::ParseExcel::FmtUnicode - A class for Cell formats. # # Used in conjunction with Spreadsheet::ParseExcel. # # Copyright (c) 2014 Douglas Wilson # Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # # perltidy with standard settings. # # Documentation after __END__ # use strict; use warnings; use Unicode::Map; use base 'Spreadsheet::ParseExcel::FmtDefault'; our $VERSION = '0.65'; #------------------------------------------------------------------------------ # new (for Spreadsheet::ParseExcel::FmtUnicode) #------------------------------------------------------------------------------ sub new { my ( $sPkg, %hKey ) = @_; my $sMap = $hKey{Unicode_Map}; my $oMap; $oMap = Unicode::Map->new($sMap) if $sMap; my $oThis = { Unicode_Map => $sMap, _UniMap => $oMap, }; bless $oThis; return $oThis; } #------------------------------------------------------------------------------ # TextFmt (for Spreadsheet::ParseExcel::FmtUnicode) #------------------------------------------------------------------------------ sub TextFmt { my ( $oThis, $sTxt, $sCode ) = @_; if ( $oThis->{_UniMap} ) { if ( !defined($sCode) ) { my $sSv = $sTxt; $sTxt =~ s/(.)/\x00$1/sg; $sTxt = $oThis->{_UniMap}->from_unicode($sTxt); $sTxt = $sSv unless ($sTxt); } elsif ( $sCode eq 'ucs2' ) { $sTxt = $oThis->{_UniMap}->from_unicode($sTxt); } # $sTxt = $oThis->{_UniMap}->from_unicode($sTxt) # if(defined($sCode) && $sCode eq 'ucs2'); return $sTxt; } else { return $sTxt; } } 1; __END__ =pod =head1 NAME Spreadsheet::ParseExcel::FmtUnicode - A class for Cell formats. =head1 SYNOPSIS See the documentation for Spreadsheet::ParseExcel. =head1 DESCRIPTION This module is used in conjunction with Spreadsheet::ParseExcel. See the documentation for Spreadsheet::ParseExcel. =head1 AUTHOR Current maintainer 0.60+: Douglas Wilson dougw@cpan.org Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori kwitknr@cpan.org =head1 COPYRIGHT Copyright (c) 2014 Douglas Wilson Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo Copyright (c) 2000-2006 Kawai Takanori All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =cut gdata/inst/perl/Spreadsheet/ParseExcel/FmtJapan.pm0000644000175100001440000001304013003720416021621 0ustar hornikuserspackage Spreadsheet::ParseExcel::FmtJapan; use utf8; ############################################################################### # # Spreadsheet::ParseExcel::FmtJapan - A class for Cell formats. # # Used in conjunction with Spreadsheet::ParseExcel. # # Copyright (c) 2014 Douglas Wilson # Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # # perltidy with standard settings. # # Documentation after __END__ # use strict; use warnings; use Encode qw(find_encoding decode); use base 'Spreadsheet::ParseExcel::FmtDefault'; our $VERSION = '0.65'; my %FormatTable = ( 0x00 => 'General', 0x01 => '0', 0x02 => '0.00', 0x03 => '#,##0', 0x04 => '#,##0.00', 0x05 => '(\\#,##0_);(\\#,##0)', 0x06 => '(\\#,##0_);[Red](\\#,##0)', 0x07 => '(\\#,##0.00_);(\\#,##0.00_)', 0x08 => '(\\#,##0.00_);[Red](\\#,##0.00_)', 0x09 => '0%', 0x0A => '0.00%', 0x0B => '0.00E+00', 0x0C => '# ?/?', 0x0D => '# ??/??', # 0x0E => 'm/d/yy', 0x0E => 'yyyy/m/d', 0x0F => 'd-mmm-yy', 0x10 => 'd-mmm', 0x11 => 'mmm-yy', 0x12 => 'h:mm AM/PM', 0x13 => 'h:mm:ss AM/PM', 0x14 => 'h:mm', 0x15 => 'h:mm:ss', # 0x16 => 'm/d/yy h:mm', 0x16 => 'yyyy/m/d h:mm', #0x17-0x24 -- Differs in Natinal 0x1E => 'm/d/yy', 0x1F => 'yyyy"年"m"月"d"日"', 0x20 => 'h"時"mm"分"', 0x21 => 'h"時"mm"分"ss"秒"', #0x17-0x24 -- Differs in Natinal 0x25 => '(#,##0_);(#,##0)', 0x26 => '(#,##0_);[Red](#,##0)', 0x27 => '(#,##0.00);(#,##0.00)', 0x28 => '(#,##0.00);[Red](#,##0.00)', 0x29 => '_(*#,##0_);_(*(#,##0);_(*"-"_);_(@_)', 0x2A => '_(\\*#,##0_);_(\\*(#,##0);_(*"-"_);_(@_)', 0x2B => '_(*#,##0.00_);_(*(#,##0.00);_(*"-"??_);_(@_)', 0x2C => '_(\\*#,##0.00_);_(\\*(#,##0.00);_(*"-"??_);_(@_)', 0x2D => 'mm:ss', 0x2E => '[h]:mm:ss', 0x2F => 'mm:ss.0', 0x30 => '##0.0E+0', 0x31 => '@', 0x37 => 'yyyy"年"m"月"', 0x38 => 'm"月"d"日"', 0x39 => 'ge.m.d', 0x3A => 'ggge"年"m"月"d"日"', ); #------------------------------------------------------------------------------ # new (for Spreadsheet::ParseExcel::FmtJapan) #------------------------------------------------------------------------------ sub new { my ( $class, %args ) = @_; my $encoding = $args{Code} || $args{encoding}; my $self = { Code => $encoding }; if($encoding){ $self->{encoding} = find_encoding($encoding eq 'sjis' ? 'cp932' : $encoding) or do{ require Carp; Carp::croak(qq{Unknown encoding '$encoding'}); }; } return bless $self, $class; } #------------------------------------------------------------------------------ # TextFmt (for Spreadsheet::ParseExcel::FmtJapan) #------------------------------------------------------------------------------ sub TextFmt { my ( $self, $text, $input_encoding ) = @_; if(!defined $input_encoding){ $input_encoding = 'utf8'; } elsif($input_encoding eq '_native_'){ $input_encoding = 'cp932'; # Shift_JIS in Microsoft products } $text = decode($input_encoding, $text); return $self->{Code} ? $self->{encoding}->encode($text) : $text; } #------------------------------------------------------------------------------ # FmtStringDef (for Spreadsheet::ParseExcel::FmtJapan) #------------------------------------------------------------------------------ sub FmtStringDef { my ( $self, $format_index, $book ) = @_; return $self->SUPER::FmtStringDef( $format_index, $book, \%FormatTable ); } #------------------------------------------------------------------------------ # CnvNengo (for Spreadsheet::ParseExcel::FmtJapan) #------------------------------------------------------------------------------ # Convert A.D. into Japanese Nengo (aka Gengo) my @Nengo = ( { name => '平成', # Heisei abbr_name => 'H', base => 1988, start => 19890108, }, { name => '昭和', # Showa abbr_name => 'S', base => 1925, start => 19261225, }, { name => '大正', # Taisho abbr_name => 'T', base => 1911, start => 19120730, }, { name => '明治', # Meiji abbr_name => 'M', base => 1867, start => 18680908, }, ); # Usage: CnvNengo(name => @tm) or CnvNeng(abbr_name => @tm) sub CnvNengo { my ( $kind, @tm ) = @_; my $year = $tm[5] + 1900; my $wk = ($year * 10000) + ($tm[4] * 100) + ($tm[3] * 1); #my $wk = sprintf( '%04d%02d%02d', $year, $tm[4], $tm[3] ); foreach my $nengo(@Nengo){ if( $wk >= $nengo->{start} ){ return $nengo->{$kind} . ($year - $nengo->{base}); } } return $year; } 1; __END__ =pod =head1 NAME Spreadsheet::ParseExcel::FmtJapan - A class for Cell formats. =head1 SYNOPSIS See the documentation for Spreadsheet::ParseExcel. =head1 DESCRIPTION This module is used in conjunction with Spreadsheet::ParseExcel. See the documentation for Spreadsheet::ParseExcel. =head1 AUTHOR Current maintainer 0.60+: Douglas Wilson dougw@cpan.org Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori kwitknr@cpan.org =head1 COPYRIGHT Copyright (c) 2014 Douglas Wilson Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo Copyright (c) 2000-2006 Kawai Takanori All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =cut gdata/inst/perl/Spreadsheet/README-ParseExcel0000644000175100001440000000453413003720416020451 0ustar hornikusersNAME Spreadsheet::ParseExcel - Extract information from Excel file. DESCRIPTION This module allows you to extract information from Excel file. This module can handle files in Excel95, 97 and 2000 format. The module will work on the majority of Windows, UNIX and Macintosh platforms. REQUIREMENT This module requires these modules: OLE::Storage_Lite Jcode.pm (if you are using FmtJapan, or FmtJapan2) Unicode::Map (if you are using FmtJapan2 or FmtUnicode) IO::Scalar (if PERLIO is not available) Spreadsheet::WriteExcel (to use Spreadsheet::ParseExcel::SaveParser) INSTALLATION The module can be installed using the standard Perl procedure: perl Build.PL ./Build ./Build test ./Bbuild install or perl Makefile.PL make make test make install # You may need to be root make clean # or make realclean or using CPAN.pm or CPANPLUS.pm cpan Spreadsheet::ParseExcel Windows users without a working "make" can get nmake from: ftp://ftp.microsoft.com/Softlib/MSLFILES/nmake15.exe For FmtJapan2 If you use FmtJapan2, you must do following actions: (1) Copy "CP932Excel.map" included with this distribution to an applicatable directry. (2) To add "CP932Excel" as map name, append following lines to REGISTRY in the Unicode::Map hierarchy (changing map directy to applicatable directry): name: CP932Excel srcURL: $SrcUnicode/VENDORS/MICSFT/WINDOWS/CP932.TXT src: $DestUnicode/VENDORS/MICSFT/WINDOWS/CP932.TXT map: (which you copied directry)/CP932Excel.map # Don't remove this line AUTHOR Current maintainer 0.40+: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori (Hippo2000) kwitknr@cpan.org SUPPORT Before you report problems, please check the "KNOWN PROBLEMS" section in the manual page for Spreadsheet::ParseExcel. There is a Google group for discussing and asking questions about Spreadsheet::ParseExcel. http://groups-beta.google.com/group/spreadsheet-parseexcel/ Bugs can be reported via rt.cpan.org. See the following for instructions: http://rt.cpan.org/Public/Dist/Display.html?Name=Spreadsheet-ParseExcel Test cases are always welcome. gdata/inst/perl/Spreadsheet/ParseXLSX.pm0000644000175100001440000006714113003720416017672 0ustar hornikuserspackage Spreadsheet::ParseXLSX; BEGIN { $Spreadsheet::ParseXLSX::AUTHORITY = 'cpan:DOY'; } $Spreadsheet::ParseXLSX::VERSION = '0.16'; use strict; use warnings; # ABSTRACT: parse XLSX files use Archive::Zip; use Graphics::ColorUtils 'rgb2hls', 'hls2rgb'; use Scalar::Util 'openhandle'; use Spreadsheet::ParseExcel 0.61; use XML::Twig; sub new { bless {}, shift; } sub parse { my $self = shift; my ($file, $formatter) = @_; my $zip = Archive::Zip->new; my $workbook = Spreadsheet::ParseExcel::Workbook->new; if (openhandle($file)) { bless $file, 'IO::File' if ref($file) eq 'GLOB'; # sigh $zip->readFromFileHandle($file) == Archive::Zip::AZ_OK or die "Can't open filehandle as a zip file"; $workbook->{File} = undef; } elsif (!ref($file)) { $zip->read($file) == Archive::Zip::AZ_OK or die "Can't open file '$file' as a zip file"; $workbook->{File} = $file; } else { die "Argument to 'new' must be a filename or open filehandle"; } return $self->_parse_workbook($zip, $workbook, $formatter); } sub _parse_workbook { my $self = shift; my ($zip, $workbook, $formatter) = @_; my $files = $self->_extract_files($zip); my ($version) = $files->{workbook}->find_nodes('//fileVersion'); my ($properties) = $files->{workbook}->find_nodes('//workbookPr'); if ($version) { $workbook->{Version} = $version->att('appName') . ($version->att('lowestEdited') ? ('-' . $version->att('lowestEdited')) : ("")); } $workbook->{Flag1904} = $properties && $properties->att('date1904') ? 1 : 0; $workbook->{FmtClass} = $formatter || Spreadsheet::ParseExcel::FmtDefault->new; my $themes = $self->_parse_themes((values %{ $files->{themes} })[0]); # XXX $workbook->{Color} = $themes->{Color}; my $styles = $self->_parse_styles($workbook, $files->{styles}); $workbook->{Format} = $styles->{Format}; $workbook->{FormatStr} = $styles->{FormatStr}; $workbook->{Font} = $styles->{Font}; $workbook->{PkgStr} = $self->_parse_shared_strings($files->{strings}) if $files->{strings}; # $workbook->{StandardWidth} = ...; # $workbook->{Author} = ...; # $workbook->{PrintArea} = ...; # $workbook->{PrintTitle} = ...; my @sheets = map { my $idx = $_->att('r:id'); my $sheet = Spreadsheet::ParseExcel::Worksheet->new( Name => $_->att('name'), _Book => $workbook, _SheetNo => $idx, ); $self->_parse_sheet($sheet, $files->{sheets}{$idx}); $sheet } $files->{workbook}->find_nodes('//sheets/sheet'); $workbook->{Worksheet} = \@sheets; $workbook->{SheetCount} = scalar(@sheets); my ($node) = $files->{workbook}->find_nodes('//workbookView'); my $selected = $node->att('activeTab'); $workbook->{SelectedSheet} = defined($selected) ? 0+$selected : 0; return $workbook; } sub _parse_sheet { my $self = shift; my ($sheet, $sheet_file) = @_; $sheet->{MinRow} = 0; $sheet->{MinCol} = 0; $sheet->{MaxRow} = -1; $sheet->{MaxCol} = -1; $sheet->{Selection} = [ 0, 0 ]; my @merged_cells; my @column_formats; my @column_widths; my @row_heights; my $default_row_height = 15; my $default_column_width = 10; my $sheet_xml = XML::Twig->new( twig_roots => { #XXX need a fallback here, the dimension tag is optional 'dimension' => sub { my ($twig, $dimension) = @_; my ($rmin, $cmin, $rmax, $cmax) = $self->_dimensions( $dimension->att('ref') ); $sheet->{MinRow} = $rmin; $sheet->{MinCol} = $cmin; $sheet->{MaxRow} = $rmax; $sheet->{MaxCol} = $cmax; $twig->purge; }, 'mergeCells/mergeCell' => sub { my ( $twig, $merge_area ) = @_; if (my $ref = $merge_area->att('ref')) { my ($topleft, $bottomright) = $ref =~ /([^:]+):([^:]+)/; my ($toprow, $leftcol) = $self->_cell_to_row_col($topleft); my ($bottomrow, $rightcol) = $self->_cell_to_row_col($bottomright); push @{ $sheet->{MergedArea} }, [ $toprow, $leftcol, $bottomrow, $rightcol, ]; for my $row ($toprow .. $bottomrow) { for my $col ($leftcol .. $rightcol) { push(@merged_cells, [$row, $col]); } } } $twig->purge; }, 'sheetFormatPr' => sub { my ( $twig, $format ) = @_; $default_row_height //= $format->att('defaultRowHeight'); $default_column_width //= $format->att('baseColWidth'); $twig->purge; }, 'col' => sub { my ( $twig, $col ) = @_; for my $colnum ($col->att('min')..$col->att('max')) { $column_widths[$colnum - 1] = $col->att('width'); $column_formats[$colnum - 1] = $col->att('style'); } $twig->purge; }, 'row' => sub { my ( $twig, $row ) = @_; $row_heights[ $row->att('r') - 1 ] = $row->att('ht'); $twig->purge; }, 'selection' => sub { my ( $twig, $selection ) = @_; if (my $cell = $selection->att('activeCell')) { $sheet->{Selection} = [ $self->_cell_to_row_col($cell) ]; } elsif (my $range = $selection->att('sqref')) { my ($topleft, $bottomright) = $range =~ /([^:]+):([^:]+)/; $sheet->{Selection} = [ $self->_cell_to_row_col($topleft), $self->_cell_to_row_col($bottomright), ]; } $twig->purge; }, 'sheetPr/tabColor' => sub { my ( $twig, $tab_color ) = @_; $sheet->{TabColor} = $self->_color($sheet->{_Book}{Color}, $tab_color); $twig->purge; }, } ); $sheet_xml->parse( $sheet_file ); # 2nd pass: cell/row building is dependent on having parsed the merge definitions # beforehand. $sheet_xml = XML::Twig->new( twig_roots => { 'sheetData/row' => sub { my ( $twig, $row_elt ) = @_; for my $cell ( $row_elt->children('c') ){ my ($row, $col) = $self->_cell_to_row_col($cell->att('r')); my $type = $cell->att('t') || 'n'; my $val_xml = $type eq 'inlineStr' ? $cell->first_child('is')->first_child('t') : $cell->first_child('v'); my $val = $val_xml ? $val_xml->text : undef; my $long_type; if (!defined($val)) { $long_type = 'Text'; $val = ''; } elsif ($type eq 's') { $long_type = 'Text'; $val = $sheet->{_Book}{PkgStr}[$val]; } elsif ($type eq 'n') { $long_type = 'Numeric'; $val = defined($val) ? 0+$val : undef; } elsif ($type eq 'd') { $long_type = 'Date'; } elsif ($type eq 'b') { $long_type = 'Text'; $val = $val ? "TRUE" : "FALSE"; } elsif ($type eq 'e') { $long_type = 'Text'; } elsif ($type eq 'str' || $type eq 'inlineStr') { $long_type = 'Text'; } else { die "unimplemented type $type"; # XXX } my $format_idx = $cell->att('s') || 0; my $format = $sheet->{_Book}{Format}[$format_idx]; $format->{Merged} = !!grep { $row == $_->[0] && $col == $_->[1] } @merged_cells; # see the list of built-in formats below in _parse_styles # XXX probably should figure this out from the actual format string, # but that's not entirely trivial if (grep { $format->{FmtIdx} == $_ } 14..22, 45..47) { $long_type = 'Date'; } my $cell = Spreadsheet::ParseExcel::Cell->new( Val => $val, Type => $long_type, Merged => $format->{Merged}, Format => $format, FormatNo => $format_idx, ($cell->first_child('f') ? (Formula => $cell->first_child('f')->text) : ()), ); $cell->{_Value} = $sheet->{_Book}{FmtClass}->ValFmt( $cell, $sheet->{_Book} ); $sheet->{Cells}[$row][$col] = $cell; } $twig->purge; }, } ); $sheet_xml->parse( $sheet_file ); if ( ! $sheet->{Cells} ){ $sheet->{MaxRow} = $sheet->{MaxCol} = -1; } $sheet->{DefRowHeight} = 0+$default_row_height; $sheet->{DefColWidth} = 0+$default_column_width; $sheet->{RowHeight} = [ map { defined $_ ? 0+$_ : 0+$default_row_height } @row_heights ]; $sheet->{ColWidth} = [ map { defined $_ ? 0+$_ : 0+$default_column_width } @column_widths ]; $sheet->{ColFmtNo} = \@column_formats; } sub _parse_shared_strings { my $self = shift; my ($strings) = @_; my $PkgStr = []; if ($strings) { my $xml = XML::Twig->new( twig_handlers => { 'si' => sub { my ( $twig, $si ) = @_; # XXX this discards information about formatting within cells # not sure how to represent that push @$PkgStr, join( '', map { $_->text } $si->find_nodes('.//t') ); $twig->purge; }, } ); $xml->parse( $strings ); } return $PkgStr; } sub _parse_themes { my $self = shift; my ($themes) = @_; return {} unless $themes; my @color = map { $_->name eq 'a:sysClr' ? $_->att('lastClr') : $_->att('val') } $themes->find_nodes('//a:clrScheme/*/*'); # this shouldn't be necessary, but the documentation is wrong here # see http://stackoverflow.com/questions/2760976/theme-confusion-in-spreadsheetml ($color[0], $color[1]) = ($color[1], $color[0]); ($color[2], $color[3]) = ($color[3], $color[2]); return { Color => \@color, } } sub _parse_styles { my $self = shift; my ($workbook, $styles) = @_; my %halign = ( center => 2, centerContinuous => 6, distributed => 7, fill => 4, general => 0, justify => 5, left => 1, right => 3, ); my %valign = ( bottom => 2, center => 1, distributed => 4, justify => 3, top => 0, ); my %border = ( dashDot => 9, dashDotDot => 11, dashed => 3, dotted => 4, double => 6, hair => 7, medium => 2, mediumDashDot => 10, mediumDashDotDot => 12, mediumDashed => 8, none => 0, slantDashDot => 13, thick => 5, thin => 1, ); my %fill = ( darkDown => 7, darkGray => 3, darkGrid => 9, darkHorizontal => 5, darkTrellis => 10, darkUp => 8, darkVertical => 6, gray0625 => 18, gray125 => 17, lightDown => 13, lightGray => 4, lightGrid => 15, lightHorizontal => 11, lightTrellis => 16, lightUp => 14, lightVertical => 12, mediumGray => 2, none => 0, solid => 1, ); my @fills = map { [ $fill{$_->att('patternType')}, $self->_color($workbook->{Color}, $_->first_child('fgColor'), 1), $self->_color($workbook->{Color}, $_->first_child('bgColor'), 1), ] } $styles->find_nodes('//fills/fill/patternFill'); my @borders = map { my $border = $_; # XXX specs say "begin" and "end" rather than "left" and "right", # but... that's not what seems to be in the file itself (sigh) { colors => [ map { $self->_color( $workbook->{Color}, $border->first_child($_)->first_child('color') ) } qw(left right top bottom) ], styles => [ map { $border{$border->first_child($_)->att('style') || 'none'} } qw(left right top bottom) ], diagonal => [ 0, # XXX ->att('diagonalDown') and ->att('diagonalUp') 0, # XXX ->att('style') $self->_color( $workbook->{Color}, $border->first_child('diagonal')->first_child('color') ), ], } } $styles->find_nodes('//borders/border'); # these defaults are from # http://social.msdn.microsoft.com/Forums/en-US/oxmlsdk/thread/e27aaf16-b900-4654-8210-83c5774a179c my %format_str = ( 0 => 'GENERAL', 1 => '0', 2 => '0.00', 3 => '#,##0', 4 => '#,##0.00', 5 => '$#,##0_);($#,##0)', 6 => '$#,##0_);[Red]($#,##0)', 7 => '$#,##0.00_);($#,##0.00)', 8 => '$#,##0.00_);[Red]($#,##0.00)', 9 => '0%', 10 => '0.00%', 11 => '0.00E+00', 12 => '# ?/?', 13 => '# ??/??', 14 => 'm/d/yyyy', 15 => 'd-mmm-yy', 16 => 'd-mmm', 17 => 'mmm-yy', 18 => 'h:mm AM/PM', 19 => 'h:mm:ss AM/PM', 20 => 'h:mm', 21 => 'h:mm:ss', 22 => 'm/d/yyyy h:mm', 37 => '#,##0_);(#,##0)', 38 => '#,##0_);[Red](#,##0)', 39 => '#,##0.00_);(#,##0.00)', 40 => '#,##0.00_);[Red](#,##0.00)', 45 => 'mm:ss', 46 => '[h]:mm:ss', 47 => 'mm:ss.0', 48 => '##0.0E+0', 49 => '@', (map { $_->att('numFmtId') => $_->att('formatCode') } $styles->find_nodes('//numFmts/numFmt')), ); my @font = map { my $vert = $_->first_child('vertAlign'); my $under = $_->first_child('u'); Spreadsheet::ParseExcel::Font->new( Height => 0+$_->first_child('sz')->att('val'), # Attr => $iAttr, # XXX not sure if there's a better way to keep the indexing stuff # intact rather than just going straight to #xxxxxx # XXX also not sure what it means for the color tag to be missing, # just assuming black for now Color => ($_->first_child('color') ? $self->_color( $workbook->{Color}, $_->first_child('color') ) : '#000000' ), Super => ($vert ? ($vert->att('val') eq 'superscript' ? 1 : $vert->att('val') eq 'subscript' ? 2 : 0) : 0 ), # XXX not sure what the single accounting and double accounting # underline styles map to in xlsx. also need to map the new # underline styles UnderlineStyle => ($under # XXX sometimes style xml files can contain just with no # val attribute. i think this means single underline, but not # sure ? (!$under->att('val') ? 1 : $under->att('val') eq 'single' ? 1 : $under->att('val') eq 'double' ? 2 : 0) : 0 ), Name => $_->first_child('name')->att('val'), Bold => $_->has_child('b') ? 1 : 0, Italic => $_->has_child('i') ? 1 : 0, Underline => $_->has_child('u') ? 1 : 0, Strikeout => $_->has_child('strike') ? 1 : 0, ) } $styles->find_nodes('//fonts/font'); my @format = map { my $alignment = $_->first_child('alignment'); my $protection = $_->first_child('protection'); Spreadsheet::ParseExcel::Format->new( IgnoreFont => !$_->att('applyFont'), IgnoreFill => !$_->att('applyFill'), IgnoreBorder => !$_->att('applyBorder'), IgnoreAlignment => !$_->att('applyAlignment'), IgnoreNumberFormat => !$_->att('applyNumberFormat'), IgnoreProtection => !$_->att('applyProtection'), FontNo => 0+$_->att('fontId'), Font => $font[$_->att('fontId')], FmtIdx => 0+$_->att('numFmtId'), Lock => $protection && defined $protection->att('locked') ? $protection->att('locked') : 1, Hidden => $protection ? $protection->att('hidden') : 0, # Style => $iStyle, # Key123 => $i123, AlignH => $alignment ? $halign{$alignment->att('horizontal') || 'general'} : 0, Wrap => $alignment ? $alignment->att('wrapText') : 0, AlignV => $alignment ? $valign{$alignment->att('vertical') || 'bottom'} : 2, # JustLast => $iJustL, # Rotate => $iRotate, # Indent => $iInd, # Shrink => $iShrink, # Merge => $iMerge, # ReadDir => $iReadDir, BdrStyle => $borders[$_->att('borderId')]{styles}, BdrColor => $borders[$_->att('borderId')]{colors}, BdrDiag => $borders[$_->att('borderId')]{diagonal}, Fill => $fills[$_->att('fillId')], ) } $styles->find_nodes('//cellXfs/xf'); return { FormatStr => \%format_str, Font => \@font, Format => \@format, } } sub _extract_files { my $self = shift; my ($zip) = @_; my $type_base = 'http://schemas.openxmlformats.org/officeDocument/2006/relationships'; my $rels = $self->_parse_xml( $zip, $self->_rels_for('') ); my $wb_name = ($rels->find_nodes( qq ))[0]->att('Target'); my $wb_xml = $self->_parse_xml($zip, $wb_name); my $path_base = $self->_base_path_for($wb_name); my $wb_rels = $self->_parse_xml( $zip, $self->_rels_for($wb_name) ); my ($strings_xml) = map { $zip->memberNamed($path_base . $_->att('Target'))->contents } $wb_rels->find_nodes(qq); my $styles_xml = $self->_parse_xml( $zip, $path_base . ($wb_rels->find_nodes( qq ))[0]->att('Target') ); my %worksheet_xml = map { if ( my $sheetfile = $zip->memberNamed($path_base . $_->att('Target'))->contents ) { ( $_->att('Id') => $sheetfile ); } } $wb_rels->find_nodes(qq); my %themes_xml = map { $_->att('Id') => $self->_parse_xml($zip, $path_base . $_->att('Target')) } $wb_rels->find_nodes(qq); return { workbook => $wb_xml, styles => $styles_xml, sheets => \%worksheet_xml, themes => \%themes_xml, ($strings_xml ? (strings => $strings_xml) : ()), }; } sub _parse_xml { my $self = shift; my ($zip, $subfile) = @_; my $member = $zip->memberNamed($subfile); die "no subfile named $subfile" unless $member; my $xml = XML::Twig->new; $xml->parse(scalar $member->contents); return $xml; } sub _rels_for { my $self = shift; my ($file) = @_; my @path = split '/', $file; my $name = pop @path; $name = '' unless defined $name; push @path, '_rels'; push @path, "$name.rels"; return join '/', @path; } sub _base_path_for { my $self = shift; my ($file) = @_; my @path = split '/', $file; pop @path; return join('/', @path) . '/'; } sub _dimensions { my $self = shift; my ($dim) = @_; my ($topleft, $bottomright) = split ':', $dim; $bottomright = $topleft unless defined $bottomright; my ($rmin, $cmin) = $self->_cell_to_row_col($topleft); my ($rmax, $cmax) = $self->_cell_to_row_col($bottomright); return ($rmin, $cmin, $rmax, $cmax); } sub _cell_to_row_col { my $self = shift; my ($cell) = @_; my ($col, $row) = $cell =~ /([A-Z]+)([0-9]+)/; my $ncol = 0; for my $char (split //, $col) { $ncol *= 26; $ncol += ord($char) - ord('A') + 1; } $ncol = $ncol - 1; my $nrow = $row - 1; return ($nrow, $ncol); } sub _color { my $self = shift; my ($colors, $color_node, $fill) = @_; my $color; if ($color_node && !$color_node->att('auto')) { if (defined $color_node->att('indexed')) { # see https://rt.cpan.org/Public/Bug/Display.html?id=93065 if ($fill && $color_node->att('indexed') == 64) { return '#FFFFFF'; } else { $color = '#' . Spreadsheet::ParseExcel->ColorIdxToRGB( $color_node->att('indexed') ); } } elsif (defined $color_node->att('rgb')) { $color = '#' . substr($color_node->att('rgb'), 2, 6); } elsif (defined $color_node->att('theme')) { $color = '#' . $colors->[$color_node->att('theme')]; } $color = $self->_apply_tint($color, $color_node->att('tint')) if $color_node->att('tint'); } return $color; } sub _apply_tint { my $self = shift; my ($color, $tint) = @_; my ($r, $g, $b) = map { oct("0x$_") } $color =~ /#(..)(..)(..)/; my ($h, $l, $s) = rgb2hls($r, $g, $b); if ($tint < 0) { $l = $l * (1.0 + $tint); } else { $l = $l * (1.0 - $tint) + (1.0 - 1.0 * (1.0 - $tint)); } return scalar hls2rgb($h, $l, $s); } 1; __END__ =pod =encoding UTF-8 =head1 NAME Spreadsheet::ParseXLSX - parse XLSX files =head1 VERSION version 0.16 =head1 SYNOPSIS use Spreadsheet::ParseXLSX; my $parser = Spreadsheet::ParseXLSX->new; my $workbook = $parser->parse("file.xlsx"); # see Spreadsheet::ParseExcel for further documentation =head1 DESCRIPTION This module is an adaptor for L that reads XLSX files. =head1 METHODS =head2 new Returns a new parser instance. Takes no parameters. =head2 parse($file, $formatter) Parses an XLSX file. Parsing errors throw an exception. C<$file> can be either a filename or an open filehandle. Returns a L instance containing the parsed data. The C<$formatter> argument is an optional formatter class as described in L. =head1 INCOMPATIBILITIES This module returns data using classes from L, so for the most part, it should just be a drop-in replacement. That said, there are a couple areas where the data returned is intentionally different: =over 4 =item Colors In Spreadsheet::ParseExcel, colors are represented by integers which index into the color table, and you have to use C<< Spreadsheet::ParseExcel->ColorIdxToRGB >> in order to get the actual value out. In Spreadsheet::ParseXLSX, while the color table still exists, cells are also allowed to specify their color directly rather than going through the color table. In order to avoid confusion, I normalize all color values in Spreadsheet::ParseXLSX to their string RGB format (C<"#0088ff">). This affects the C, C, and C properties of formats, and the C property of fonts. Note that the default color is represented by C (the same thing that C would return). =item Formulas Spreadsheet::ParseExcel doesn't support formulas. Spreadsheet::ParseXLSX provides basic formula support by returning the text of the formula as part of the cell data. You can access it via C<< $cell->{Formula} >>. Note that the restriction still holds that formula cell values aren't available unless they were explicitly provided when the spreadsheet was written. =back =head1 BUGS =over 4 =item Large spreadsheets may cause segfaults on perl 5.14 and earlier This module internally uses XML::Twig, which makes it potentially subject to L on perl versions 5.14 and below (the underlying bug with perl weak references was fixed in perl 5.15.5). The larger and more complex the spreadsheet, the more likely to be affected, but the actual size at which it segfaults is platform dependent. On a 64-bit perl with 7.6gb memory, it was seen on spreadsheets about 300mb and above. You can work around this adding C to your code before parsing the spreadsheet, although this may have other consequences such as memory leaks. =item Worksheets without the C tag are not supported =item Intra-cell formatting is discarded =item Diagonal border styles are ignored =back In addition, there are still a few areas which are not yet implemented (the XLSX spec is quite large). If you run into any of those, bug reports are quite welcome. Please report any bugs to GitHub Issues at L. =head1 SEE ALSO L: The equivalent, for XLS files. L: An older, less robust and featureful implementation. =head1 SUPPORT You can find this documentation for this module with the perldoc command. perldoc Spreadsheet::ParseXLSX You can also look for information at: =over 4 =item * MetaCPAN L =item * RT: CPAN's request tracker L =item * Github L =item * CPAN Ratings L =back =head1 SPONSORS Parts of this code were paid for by =over 4 =item Socialflow L =back =head1 AUTHOR Jesse Luehrs =head1 COPYRIGHT AND LICENSE This software is Copyright (c) 2014 by Jesse Luehrs. This is free software, licensed under: The MIT (X11) License =cut gdata/inst/perl/Spreadsheet/ParseExcel.pm0000644000175100001440000034117413003720416020135 0ustar hornikuserspackage Spreadsheet::ParseExcel; ############################################################################## # # Spreadsheet::ParseExcel - Extract information from an Excel file. # # Copyright (c) 2014 Douglas Wilson # Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2008 Takanori Kawai # # perltidy with standard settings. # # Documentation after __END__ # use strict; use warnings; use 5.008; use OLE::Storage_Lite; use File::Basename qw(fileparse); use IO::File; use Config; use Crypt::RC4; use Digest::Perl::MD5; our $VERSION = '0.65'; use Spreadsheet::ParseExcel::Workbook; use Spreadsheet::ParseExcel::Worksheet; use Spreadsheet::ParseExcel::Font; use Spreadsheet::ParseExcel::Format; use Spreadsheet::ParseExcel::Cell; use Spreadsheet::ParseExcel::FmtDefault; my $currentbook; my @aColor = ( '000000', # 0x00 'FFFFFF', 'FFFFFF', 'FFFFFF', 'FFFFFF', 'FFFFFF', 'FFFFFF', 'FFFFFF', '000000', # 0x08 'FFFFFF', 'FF0000', '00FF00', '0000FF', 'FFFF00', 'FF00FF', '00FFFF', '800000', # 0x10 '008000', '000080', '808000', '800080', '008080', 'C0C0C0', '808080', '9999FF', # 0x18 '993366', 'FFFFCC', 'CCFFFF', '660066', 'FF8080', '0066CC', 'CCCCFF', '000080', # 0x20 'FF00FF', 'FFFF00', '00FFFF', '800080', '800000', '008080', '0000FF', '00CCFF', # 0x28 'CCFFFF', 'CCFFCC', 'FFFF99', '99CCFF', 'FF99CC', 'CC99FF', 'FFCC99', '3366FF', # 0x30 '33CCCC', '99CC00', 'FFCC00', 'FF9900', 'FF6600', '666699', '969696', '003366', # 0x38 '339966', '003300', '333300', '993300', '993366', '333399', '333333', '000000' # 0x40 ); use constant verExcel95 => 0x500; use constant verExcel97 => 0x600; use constant verBIFF2 => 0x00; use constant verBIFF3 => 0x02; use constant verBIFF4 => 0x04; use constant verBIFF5 => 0x08; use constant verBIFF8 => 0x18; use constant MS_BIFF_CRYPTO_NONE => 0; use constant MS_BIFF_CRYPTO_XOR => 1; use constant MS_BIFF_CRYPTO_RC4 => 2; use constant sizeof_BIFF_8_FILEPASS => ( 6 + 3 * 16 ); use constant REKEY_BLOCK => 0x400; # Error code for some of the common parsing errors. use constant ErrorNone => 0; use constant ErrorNoFile => 1; use constant ErrorNoExcelData => 2; use constant ErrorFileEncrypted => 3; # Color index for the 'auto' color use constant AutoColor => 64; our %error_strings = ( ErrorNone, '', # 0 ErrorNoFile, 'File not found', # 1 ErrorNoExcelData, 'No Excel data found in file', # 2 ErrorFileEncrypted, 'File is encrypted', # 3 ); our %ProcTbl = ( #Develpers' Kit P291 0x14 => \&_subHeader, # Header 0x15 => \&_subFooter, # Footer 0x18 => \&_subName, # NAME(?) 0x1A => \&_subVPageBreak, # Vertical Page Break 0x1B => \&_subHPageBreak, # Horizontal Page Break 0x22 => \&_subFlg1904, # 1904 Flag 0x26 => \&_subMargin, # Left Margin 0x27 => \&_subMargin, # Right Margin 0x28 => \&_subMargin, # Top Margin 0x29 => \&_subMargin, # Bottom Margin 0x2A => \&_subPrintHeaders, # Print Headers 0x2B => \&_subPrintGridlines, # Print Gridlines 0x3C => \&_subContinue, # Continue 0x3D => \&_subWindow1, # Window1 0x43 => \&_subXF, # XF for Excel < 4. 0x0443 => \&_subXF, # XF for Excel = 4. 0x862 => \&_subSheetLayout, # Sheet Layout 0x1B8 => \&_subHyperlink, # HYPERLINK #Develpers' Kit P292 0x55 => \&_subDefColWidth, # Consider 0x5C => \&_subWriteAccess, # WRITEACCESS 0x7D => \&_subColInfo, # Colinfo 0x7E => \&_subRK, # RK 0x81 => \&_subWSBOOL, # WSBOOL 0x83 => \&_subHcenter, # HCENTER 0x84 => \&_subVcenter, # VCENTER 0x85 => \&_subBoundSheet, # BoundSheet 0x92 => \&_subPalette, # Palette, fgp 0x99 => \&_subStandardWidth, # Standard Col #Develpers' Kit P293 0xA1 => \&_subSETUP, # SETUP 0xBD => \&_subMulRK, # MULRK 0xBE => \&_subMulBlank, # MULBLANK 0xD6 => \&_subRString, # RString #Develpers' Kit P294 0xE0 => \&_subXF, # ExTended Format 0xE5 => \&_subMergeArea, # MergeArea (Not Documented) 0xFC => \&_subSST, # Shared String Table 0xFD => \&_subLabelSST, # Label SST #Develpers' Kit P295 0x201 => \&_subBlank, # Blank 0x202 => \&_subInteger, # Integer(Not Documented) 0x203 => \&_subNumber, # Number 0x204 => \&_subLabel, # Label 0x205 => \&_subBoolErr, # BoolErr 0x207 => \&_subString, # STRING 0x208 => \&_subRow, # RowData 0x221 => \&_subArray, # Array (Consider) 0x225 => \&_subDefaultRowHeight, # Consider 0x31 => \&_subFont, # Font 0x231 => \&_subFont, # Font 0x27E => \&_subRK, # RK 0x41E => \&_subFormat, # Format 0x06 => \&_subFormula, # Formula 0x406 => \&_subFormula, # Formula 0x009 => \&_subBOF, # BOF(BIFF2) 0x209 => \&_subBOF, # BOF(BIFF3) 0x409 => \&_subBOF, # BOF(BIFF4) 0x809 => \&_subBOF, # BOF(BIFF5-8) ); our $BIGENDIAN; our $PREFUNC; our $_use_perlio; #------------------------------------------------------------------------------ # Spreadsheet::ParseExcel->new #------------------------------------------------------------------------------ sub new { my ( $class, %hParam ) = @_; if ( not defined $_use_perlio ) { if ( exists $Config{useperlio} && defined $Config{useperlio} && $Config{useperlio} eq "define" ) { $_use_perlio = 1; } else { $_use_perlio = 0; require IO::Scalar; import IO::Scalar; } } # Check ENDIAN(Little: Intel etc. BIG: Sparc etc) $BIGENDIAN = ( defined $hParam{Endian} ) ? $hParam{Endian} : ( unpack( "H08", pack( "L", 2 ) ) eq '02000000' ) ? 0 : 1; my $self = {}; bless $self, $class; $self->{GetContent} = \&_subGetContent; if ( $hParam{EventHandlers} ) { $self->SetEventHandlers( $hParam{EventHandlers} ); } else { $self->SetEventHandlers( \%ProcTbl ); } if ( $hParam{AddHandlers} ) { foreach my $sKey ( keys( %{ $hParam{AddHandlers} } ) ) { $self->SetEventHandler( $sKey, $hParam{AddHandlers}->{$sKey} ); } } $self->{CellHandler} = $hParam{CellHandler}; $self->{NotSetCell} = $hParam{NotSetCell}; $self->{Object} = $hParam{Object}; if ( defined $hParam{Password} ) { $self->{Password} = $hParam{Password}; } else { $self->{Password} = 'VelvetSweatshop'; } $self->{_error_status} = ErrorNone; return $self; } #------------------------------------------------------------------------------ # Spreadsheet::ParseExcel->SetEventHandler #------------------------------------------------------------------------------ sub SetEventHandler { my ( $self, $key, $sub_ref ) = @_; $self->{FuncTbl}->{$key} = $sub_ref; } #------------------------------------------------------------------------------ # Spreadsheet::ParseExcel->SetEventHandlers #------------------------------------------------------------------------------ sub SetEventHandlers { my ( $self, $rhTbl ) = @_; $self->{FuncTbl} = undef; foreach my $sKey ( keys %$rhTbl ) { $self->{FuncTbl}->{$sKey} = $rhTbl->{$sKey}; } } #------------------------------------------------------------------------------ # Decryption routines # based on sources of gnumeric (ms-biff.c ms-excel-read.c) #------------------------------------------------------------------------------ sub md5state { my ( $md5 ) = @_; my $s = ''; for ( my $i = 0 ; $i < 4 ; $i++ ) { my $v = $md5->{_state}[$i]; $s .= chr( $v & 0xff ); $s .= chr( ( $v >> 8 ) & 0xff ); $s .= chr( ( $v >> 16 ) & 0xff ); $s .= chr( ( $v >> 24 ) & 0xff ); } return $s; } sub MakeKey { my ( $block, $key, $valContext ) = @_; my $pwarray = "\0" x 64; substr( $pwarray, 0, 5 ) = substr( $valContext, 0, 5 ); substr( $pwarray, 5, 1 ) = chr( $block & 0xff ); substr( $pwarray, 6, 1 ) = chr( ( $block >> 8 ) & 0xff ); substr( $pwarray, 7, 1 ) = chr( ( $block >> 16 ) & 0xff ); substr( $pwarray, 8, 1 ) = chr( ( $block >> 24 ) & 0xff ); substr( $pwarray, 9, 1 ) = "\x80"; substr( $pwarray, 56, 1 ) = "\x48"; my $md5 = Digest::Perl::MD5->new(); $md5->add( $pwarray ); my $s = md5state( $md5 ); ${$key} = Crypt::RC4->new( $s ); } sub VerifyPassword { my ( $password, $docid, $salt_data, $hashedsalt_data, $valContext ) = @_; my $pwarray = "\0" x 64; my $i; my $md5 = Digest::Perl::MD5->new(); for ( $i = 0 ; $i < length( $password ) ; $i++ ) { my $o = ord( substr( $password, $i, 1 ) ); substr( $pwarray, 2 * $i, 1 ) = chr( $o & 0xff ); substr( $pwarray, 2 * $i + 1, 1 ) = chr( ( $o >> 8 ) & 0xff ); } substr( $pwarray, 2 * $i, 1 ) = chr( 0x80 ); substr( $pwarray, 56, 1 ) = chr( ( $i << 4 ) & 0xff ); $md5->add( $pwarray ); my $mdContext1 = md5state( $md5 ); my $offset = 0; my $keyoffset = 0; my $tocopy = 5; $md5->reset; while ( $offset != 16 ) { if ( ( 64 - $offset ) < 5 ) { $tocopy = 64 - $offset; } substr( $pwarray, $offset, $tocopy ) = substr( $mdContext1, $keyoffset, $tocopy ); $offset += $tocopy; if ( $offset == 64 ) { $md5->add( $pwarray ); $keyoffset = $tocopy; $tocopy = 5 - $tocopy; $offset = 0; next; } $keyoffset = 0; $tocopy = 5; substr( $pwarray, $offset, 16 ) = $docid; $offset += 16; } substr( $pwarray, 16, 1 ) = "\x80"; substr( $pwarray, 17, 47 ) = "\0" x 47; substr( $pwarray, 56, 1 ) = "\x80"; substr( $pwarray, 57, 1 ) = "\x0a"; $md5->add( $pwarray ); ${$valContext} = md5state( $md5 ); my $key; MakeKey( 0, \$key, ${$valContext} ); my $salt = $key->RC4( $salt_data ); my $hashedsalt = $key->RC4( $hashedsalt_data ); $salt .= "\x80" . "\0" x 47; substr( $salt, 56, 1 ) = "\x80"; $md5->reset; $md5->add( $salt ); my $mdContext2 = md5state( $md5 ); return ( $mdContext2 eq $hashedsalt ); } sub SkipBytes { my ( $q, $start, $count ) = @_; my $scratch = "\0" x REKEY_BLOCK; my $block; $block = int( ( $start + $count ) / REKEY_BLOCK ); if ( $block != $q->{block} ) { MakeKey( $q->{block} = $block, \$q->{rc4_key}, $q->{md5_ctxt} ); $count = ( $start + $count ) % REKEY_BLOCK; } $q->{rc4_key}->RC4( substr( $scratch, 0, $count ) ); return 1; } sub SetDecrypt { my ( $q, $version, $password ) = @_; if ( $q->{opcode} != 0x2f ) { return 0; } if ( $password eq '' ) { return 0; } # TODO old versions decryption #if (version < MS_BIFF_V8 || q->data[0] == 0) # return ms_biff_pre_biff8_query_set_decrypt (q, password); if ( $q->{length} != sizeof_BIFF_8_FILEPASS ) { return 0; } unless ( VerifyPassword( $password, substr( $q->{data}, 6, 16 ), substr( $q->{data}, 22, 16 ), substr( $q->{data}, 38, 16 ), \$q->{md5_ctxt} ) ) { return 0; } $q->{encryption} = MS_BIFF_CRYPTO_RC4; $q->{block} = -1; # The first record after FILEPASS seems to be unencrypted $q->{dont_decrypt_next_record} = 1; # Pretend to decrypt the entire stream up till this point, it was # encrypted, but do it anyway to keep the rc4 state in sync SkipBytes( $q, 0, $q->{streamPos} ); return 1; } sub InitStream { my ( $stream_data ) = @_; my %q; $q{opcode} = 0; $q{length} = 0; $q{data} = ''; $q{stream} = $stream_data; # data stream $q{streamLen} = length( $stream_data ); # stream length $q{streamPos} = 0; # stream position $q{encryption} = 0; $q{xor_key} = ''; $q{rc4_key} = ''; $q{md5_ctxt} = ''; $q{block} = 0; $q{dont_decrypt_next_record} = 0; return \%q; } sub QueryNext { my ( $q ) = @_; if ( $q->{streamPos} + 4 >= $q->{streamLen} ) { return 0; } my $data = substr( $q->{stream}, $q->{streamPos}, 4 ); ( $q->{opcode}, $q->{length} ) = unpack( 'v2', $data ); # No biff record should be larger than around 20,000. if ( $q->{length} >= 20000 ) { return 0; } if ( $q->{length} > 0 ) { $q->{data} = substr( $q->{stream}, $q->{streamPos} + 4, $q->{length} ); } else { $q->{data} = undef; $q->{dont_decrypt_next_record} = 1; } if ( $q->{encryption} == MS_BIFF_CRYPTO_RC4 ) { if ( $q->{dont_decrypt_next_record} ) { SkipBytes( $q, $q->{streamPos}, 4 + $q->{length} ); $q->{dont_decrypt_next_record} = 0; } else { my $pos = $q->{streamPos}; my $data = $q->{data}; my $len = $q->{length}; my $res = ''; # Pretend to decrypt header. SkipBytes( $q, $pos, 4 ); $pos += 4; while ( $q->{block} != int( ( $pos + $len ) / REKEY_BLOCK ) ) { my $step = REKEY_BLOCK - ( $pos % REKEY_BLOCK ); $res .= $q->{rc4_key}->RC4( substr( $data, 0, $step ) ); $data = substr( $data, $step ); $pos += $step; $len -= $step; MakeKey( ++$q->{block}, \$q->{rc4_key}, $q->{md5_ctxt} ); } $res .= $q->{rc4_key}->RC4( substr( $data, 0, $len ) ); $q->{data} = $res; } } elsif ( $q->{encryption} == MS_BIFF_CRYPTO_XOR ) { # not implemented return 0; } elsif ( $q->{encryption} == MS_BIFF_CRYPTO_NONE ) { } $q->{streamPos} += 4 + $q->{length}; return 1; } ############################################################################### # # Parse() # # Parse the Excel file and convert it into a tree of objects.. # sub parse { my ( $self, $source, $formatter ) = @_; my $workbook = Spreadsheet::ParseExcel::Workbook->new(); $currentbook = $workbook; $workbook->{SheetCount} = 0; $workbook->{CellHandler} = $self->{CellHandler}; $workbook->{NotSetCell} = $self->{NotSetCell}; $workbook->{Object} = $self->{Object}; $workbook->{aColor} = [ @aColor ]; my ( $biff_data, $data_length ) = $self->_get_content( $source, $workbook ); return undef if not $biff_data; if ( $formatter ) { $workbook->{FmtClass} = $formatter; } else { $workbook->{FmtClass} = Spreadsheet::ParseExcel::FmtDefault->new(); } # Parse the BIFF data. my $stream = InitStream( $biff_data ); while ( QueryNext( $stream ) ) { my $record = $stream->{opcode}; my $record_length = $stream->{length}; my $record_header = $stream->{data}; # If the file contains a FILEPASS record we assume that it is encrypted # and cannot be parsed. if ( $record == 0x002F ) { unless ( SetDecrypt( $stream, '', $self->{Password} ) ) { $self->{_error_status} = ErrorFileEncrypted; return undef; } } # Special case of a formula String with no string. if ( $workbook->{_PrevPos} && ( defined $self->{FuncTbl}->{$record} ) && ( $record != 0x207 ) ) { my $iPos = $workbook->{_PrevPos}; $workbook->{_PrevPos} = undef; my ( $row, $col, $format_index ) = @$iPos; _NewCell( $workbook, $row, $col, Kind => 'Formula String', Val => '', FormatNo => $format_index, Format => $workbook->{Format}[$format_index], Numeric => 0, Code => undef, Book => $workbook, ); } # If the BIFF record matches 0x0*09 then it is a BOF record. # We reset the _skip_chart flag to ensure we check the sheet type. if ( ( $record & 0xF0FF ) == 0x09 ) { $workbook->{_skip_chart} = 0; } if ( defined $self->{FuncTbl}->{$record} && !$workbook->{_skip_chart} ) { $self->{FuncTbl}->{$record} ->( $workbook, $record, $record_length, $record_header ); } $PREFUNC = $record if ( $record != 0x3C ); #Not Continue last if defined $workbook->{_ParseAbort}; } foreach my $worksheet (@{$workbook->{Worksheet}} ) { # Install hyperlinks into each cell # Range is undocumented for user; allows reuse of data if ($worksheet->{HyperLinks}) { foreach my $link (@{$worksheet->{HyperLinks}}) { for( my $row = $link->[3]; $row <= $link->[4]; $row++ ) { for( my $col = $link->[5]; $col <= $link->[6]; $col++ ) { $worksheet->{Cells}[$row][$col]{Hyperlink} = $link; } } } } } return $workbook; } ############################################################################### # # _get_content() # # Get the Excel BIFF content from the file or filehandle. # sub _get_content { my ( $self, $source, $workbook ) = @_; my ( $biff_data, $data_length ); # Reset the error status in case method is called more than once. $self->{_error_status} = ErrorNone; my $ref = ref($source); if ( $ref ) { if ( $ref eq 'SCALAR' ) { # Specified by a scalar buffer. ( $biff_data, $data_length ) = $self->{GetContent}->( $source ); } elsif ( $ref eq 'ARRAY' ) { # Specified by file content $workbook->{File} = undef; my $sData = join( '', @$source ); ( $biff_data, $data_length ) = $self->{GetContent}->( \$sData ); } else { # Assume filehandle # For CGI.pm (Light FileHandle) my $sBuff = ''; if ( eval { binmode( $source ) } ) { my $sWk; while ( read( $source, $sWk, 4096 ) ) { $sBuff .= $sWk; } } else { # Assume IO::Wrap or some other filehandle-like OO-only object my $sWk; # IO::Wrap does not implement binmode eval { $source->binmode() }; while ( $source->read( $sWk, 4096 ) ) { $sBuff .= $sWk; } } ( $biff_data, $data_length ) = $self->{GetContent}->( \$sBuff ); } } else { # Specified by filename . $workbook->{File} = $source; if ( !-e $source ) { $self->{_error_status} = ErrorNoFile; return undef; } ( $biff_data, $data_length ) = $self->{GetContent}->( $source ); } # If the read was successful return the data. if ( $data_length ) { return ( $biff_data, $data_length ); } else { $self->{_error_status} = ErrorNoExcelData; return undef; } } #------------------------------------------------------------------------------ # _subGetContent (for Spreadsheet::ParseExcel) #------------------------------------------------------------------------------ sub _subGetContent { my ( $sFile ) = @_; my $oOl = OLE::Storage_Lite->new( $sFile ); return ( undef, undef ) unless ( $oOl ); my @aRes = $oOl->getPpsSearch( [ OLE::Storage_Lite::Asc2Ucs( 'Book' ), OLE::Storage_Lite::Asc2Ucs( 'Workbook' ) ], 1, 1 ); return ( undef, undef ) if ( $#aRes < 0 ); #Hack from Herbert if ( $aRes[0]->{Data} ) { return ( $aRes[0]->{Data}, length( $aRes[0]->{Data} ) ); } #Same as OLE::Storage_Lite my $oIo; #1. $sFile is Ref of scalar if ( ref( $sFile ) eq 'SCALAR' ) { if ( $_use_perlio ) { open $oIo, "<", \$sFile; } else { $oIo = IO::Scalar->new; $oIo->open( $sFile ); } } #2. $sFile is a IO::Handle object elsif ( UNIVERSAL::isa( $sFile, 'IO::Handle' ) ) { $oIo = $sFile; binmode( $oIo ); } #3. $sFile is a simple filename string elsif ( !ref( $sFile ) ) { $oIo = IO::File->new; $oIo->open( "<$sFile" ) || return undef; binmode( $oIo ); } my $sWk; my $sBuff = ''; while ( $oIo->read( $sWk, 4096 ) ) { #4_096 has no special meanings $sBuff .= $sWk; } $oIo->close(); #Not Excel file (simple method) return ( undef, undef ) if ( substr( $sBuff, 0, 1 ) ne "\x09" ); return ( $sBuff, length( $sBuff ) ); } #------------------------------------------------------------------------------ # _subBOF (for Spreadsheet::ParseExcel) Developers' Kit : P303 #------------------------------------------------------------------------------ sub _subBOF { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iVer, $iDt ) = unpack( "v2", $sWk ); #Workbook Global if ( $iDt == 0x0005 ) { $oBook->{Version} = unpack( "v", $sWk ); $oBook->{BIFFVersion} = ( $oBook->{Version} == verExcel95 ) ? verBIFF5 : verBIFF8; $oBook->{_CurSheet} = undef; $oBook->{_CurSheet_} = -1; } #Worksheet or Dialogsheet elsif ( $iDt != 0x0020 ) { #if($iDt == 0x0010) if ( defined $oBook->{_CurSheet_} ) { $oBook->{_CurSheet} = $oBook->{_CurSheet_} + 1; $oBook->{_CurSheet_}++; ( $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{SheetVersion}, $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{SheetType}, ) = unpack( "v2", $sWk ) if ( length( $sWk ) > 4 ); } else { $oBook->{BIFFVersion} = int( $bOp / 0x100 ); if ( ( $oBook->{BIFFVersion} == verBIFF2 ) || ( $oBook->{BIFFVersion} == verBIFF3 ) || ( $oBook->{BIFFVersion} == verBIFF4 ) ) { $oBook->{Version} = $oBook->{BIFFVersion}; $oBook->{_CurSheet} = 0; $oBook->{Worksheet}[ $oBook->{SheetCount} ] = Spreadsheet::ParseExcel::Worksheet->new( _Name => '', Name => '', _Book => $oBook, _SheetNo => $oBook->{SheetCount}, ); $oBook->{SheetCount}++; } } } else { # Set flag to ignore all chart records until we reach another BOF. $oBook->{_skip_chart} = 1; } } #------------------------------------------------------------------------------ # _subBlank (for Spreadsheet::ParseExcel) DK:P303 #------------------------------------------------------------------------------ sub _subBlank { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iR, $iC, $iF ) = unpack( "v3", $sWk ); _NewCell( $oBook, $iR, $iC, Kind => 'BLANK', Val => '', FormatNo => $iF, Format => $oBook->{Format}[$iF], Numeric => 0, Code => undef, Book => $oBook, ); #2.MaxRow, MaxCol, MinRow, MinCol _SetDimension( $oBook, $iR, $iC, $iC ); } #------------------------------------------------------------------------------ # _subInteger (for Spreadsheet::ParseExcel) Not in DK #------------------------------------------------------------------------------ sub _subInteger { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iR, $iC, $iF, $sTxt, $sDum ); ( $iR, $iC, $iF, $sDum, $sTxt ) = unpack( "v3cv", $sWk ); _NewCell( $oBook, $iR, $iC, Kind => 'INTEGER', Val => $sTxt, FormatNo => $iF, Format => $oBook->{Format}[$iF], Numeric => 0, Code => undef, Book => $oBook, ); #2.MaxRow, MaxCol, MinRow, MinCol _SetDimension( $oBook, $iR, $iC, $iC ); } #------------------------------------------------------------------------------ # _subNumber (for Spreadsheet::ParseExcel) : DK: P354 #------------------------------------------------------------------------------ sub _subNumber { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iR, $iC, $iF ) = unpack( "v3", $sWk ); my $dVal = _convDval( substr( $sWk, 6, 8 ) ); _NewCell( $oBook, $iR, $iC, Kind => 'Number', Val => $dVal, FormatNo => $iF, Format => $oBook->{Format}[$iF], Numeric => 1, Code => undef, Book => $oBook, ); #2.MaxRow, MaxCol, MinRow, MinCol _SetDimension( $oBook, $iR, $iC, $iC ); } #------------------------------------------------------------------------------ # _convDval (for Spreadsheet::ParseExcel) #------------------------------------------------------------------------------ sub _convDval { my ( $sWk ) = @_; return unpack( "d", ( $BIGENDIAN ) ? pack( "c8", reverse( unpack( "c8", $sWk ) ) ) : $sWk ); } #------------------------------------------------------------------------------ # _subRString (for Spreadsheet::ParseExcel) DK:P405 #------------------------------------------------------------------------------ sub _subRString { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iR, $iC, $iF, $iL, $sTxt ); ( $iR, $iC, $iF, $iL ) = unpack( "v4", $sWk ); $sTxt = substr( $sWk, 8, $iL ); #Has STRUN if ( length( $sWk ) > ( 8 + $iL ) ) { _NewCell( $oBook, $iR, $iC, Kind => 'RString', Val => $sTxt, FormatNo => $iF, Format => $oBook->{Format}[$iF], Numeric => 0, Code => '_native_', #undef, Book => $oBook, Rich => substr( $sWk, ( 8 + $iL ) + 1 ), ); } else { _NewCell( $oBook, $iR, $iC, Kind => 'RString', Val => $sTxt, FormatNo => $iF, Format => $oBook->{Format}[$iF], Numeric => 0, Code => '_native_', Book => $oBook, ); } #2.MaxRow, MaxCol, MinRow, MinCol _SetDimension( $oBook, $iR, $iC, $iC ); } #------------------------------------------------------------------------------ # _subBoolErr (for Spreadsheet::ParseExcel) DK:P306 #------------------------------------------------------------------------------ sub _subBoolErr { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iR, $iC, $iF ) = unpack( "v3", $sWk ); my ( $iVal, $iFlg ) = unpack( "cc", substr( $sWk, 6, 2 ) ); my $sTxt = DecodeBoolErr( $iVal, $iFlg ); _NewCell( $oBook, $iR, $iC, Kind => 'BoolError', Val => $sTxt, FormatNo => $iF, Format => $oBook->{Format}[$iF], Numeric => 0, Code => undef, Book => $oBook, ); #2.MaxRow, MaxCol, MinRow, MinCol _SetDimension( $oBook, $iR, $iC, $iC ); } ############################################################################### # # _subRK() # # Decode the RK BIFF record. # sub _subRK { my ( $workbook, $biff_number, $length, $data ) = @_; my ( $row, $col, $format_index, $rk_number ) = unpack( 'vvvV', $data ); my $number = _decode_rk_number( $rk_number ); _NewCell( $workbook, $row, $col, Kind => 'RK', Val => $number, FormatNo => $format_index, Format => $workbook->{Format}->[$format_index], Numeric => 1, Code => undef, Book => $workbook, ); # Store the max and min row/col values. _SetDimension( $workbook, $row, $col, $col ); } #------------------------------------------------------------------------------ # _subArray (for Spreadsheet::ParseExcel) DK:P297 #------------------------------------------------------------------------------ sub _subArray { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iBR, $iER, $iBC, $iEC ) = unpack( "v2c2", $sWk ); } #------------------------------------------------------------------------------ # _subFormula (for Spreadsheet::ParseExcel) DK:P336 #------------------------------------------------------------------------------ sub _subFormula { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iR, $iC, $iF ) = unpack( "v3", $sWk ); my ( $iFlg ) = unpack( "v", substr( $sWk, 12, 2 ) ); if ( $iFlg == 0xFFFF ) { my ( $iKind ) = unpack( "c", substr( $sWk, 6, 1 ) ); my ( $iVal ) = unpack( "c", substr( $sWk, 8, 1 ) ); if ( ( $iKind == 1 ) or ( $iKind == 2 ) ) { my $sTxt = ( $iKind == 1 ) ? DecodeBoolErr( $iVal, 0 ) : DecodeBoolErr( $iVal, 1 ); _NewCell( $oBook, $iR, $iC, Kind => 'Formula Bool', Val => $sTxt, FormatNo => $iF, Format => $oBook->{Format}[$iF], Numeric => 0, Code => undef, Book => $oBook, ); } else { # Result (Reserve Only) $oBook->{_PrevPos} = [ $iR, $iC, $iF ]; } } else { my $dVal = _convDval( substr( $sWk, 6, 8 ) ); _NewCell( $oBook, $iR, $iC, Kind => 'Formula Number', Val => $dVal, FormatNo => $iF, Format => $oBook->{Format}[$iF], Numeric => 1, Code => undef, Book => $oBook, ); } #2.MaxRow, MaxCol, MinRow, MinCol _SetDimension( $oBook, $iR, $iC, $iC ); } #------------------------------------------------------------------------------ # _subString (for Spreadsheet::ParseExcel) DK:P414 #------------------------------------------------------------------------------ sub _subString { my ( $oBook, $bOp, $bLen, $sWk ) = @_; #Position (not enough for ARRAY) my $iPos = $oBook->{_PrevPos}; return undef unless ( $iPos ); $oBook->{_PrevPos} = undef; my ( $iR, $iC, $iF ) = @$iPos; my ( $iLen, $sTxt, $sCode ); if ( $oBook->{BIFFVersion} == verBIFF8 ) { my ( $raBuff, $iLen ) = _convBIFF8String( $oBook, $sWk, 1 ); $sTxt = $raBuff->[0]; $sCode = ( $raBuff->[1] ) ? 'ucs2' : undef; } elsif ( $oBook->{BIFFVersion} == verBIFF5 ) { $sCode = '_native_'; $iLen = unpack( "v", $sWk ); $sTxt = substr( $sWk, 2, $iLen ); } else { $sCode = '_native_'; $iLen = unpack( "c", $sWk ); $sTxt = substr( $sWk, 1, $iLen ); } _NewCell( $oBook, $iR, $iC, Kind => 'String', Val => $sTxt, FormatNo => $iF, Format => $oBook->{Format}[$iF], Numeric => 0, Code => $sCode, Book => $oBook, ); #2.MaxRow, MaxCol, MinRow, MinCol _SetDimension( $oBook, $iR, $iC, $iC ); } #------------------------------------------------------------------------------ # _subLabel (for Spreadsheet::ParseExcel) DK:P344 #------------------------------------------------------------------------------ sub _subLabel { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iR, $iC, $iF ) = unpack( "v3", $sWk ); my ( $sLbl, $sCode ); #BIFF8 if ( $oBook->{BIFFVersion} >= verBIFF8 ) { my ( $raBuff, $iLen, $iStPos, $iLenS ) = _convBIFF8String( $oBook, substr( $sWk, 6 ), 1 ); $sLbl = $raBuff->[0]; $sCode = ( $raBuff->[1] ) ? 'ucs2' : undef; } #Before BIFF8 else { $sLbl = substr( $sWk, 8 ); $sCode = '_native_'; } _NewCell( $oBook, $iR, $iC, Kind => 'Label', Val => $sLbl, FormatNo => $iF, Format => $oBook->{Format}[$iF], Numeric => 0, Code => $sCode, Book => $oBook, ); #2.MaxRow, MaxCol, MinRow, MinCol _SetDimension( $oBook, $iR, $iC, $iC ); } ############################################################################### # # _subMulRK() # # Decode the Multiple RK BIFF record. # sub _subMulRK { my ( $workbook, $biff_number, $length, $data ) = @_; # JMN: I don't know why this is here. return if $workbook->{SheetCount} <= 0; my ( $row, $first_col ) = unpack( "v2", $data ); my $last_col = unpack( "v", substr( $data, length( $data ) - 2, 2 ) ); # Iterate over the RK array and decode the data. my $pos = 4; for my $col ( $first_col .. $last_col ) { my $data = substr( $data, $pos, 6 ); my ( $format_index, $rk_number ) = unpack 'vV', $data; my $number = _decode_rk_number( $rk_number ); _NewCell( $workbook, $row, $col, Kind => 'MulRK', Val => $number, FormatNo => $format_index, Format => $workbook->{Format}->[$format_index], Numeric => 1, Code => undef, Book => $workbook, ); $pos += 6; } # Store the max and min row/col values. _SetDimension( $workbook, $row, $first_col, $last_col ); } #------------------------------------------------------------------------------ # _subMulBlank (for Spreadsheet::ParseExcel) DK:P349 #------------------------------------------------------------------------------ sub _subMulBlank { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iR, $iSc ) = unpack( "v2", $sWk ); my $iEc = unpack( "v", substr( $sWk, length( $sWk ) - 2, 2 ) ); my $iPos = 4; for ( my $iC = $iSc ; $iC <= $iEc ; $iC++ ) { my $iF = unpack( 'v', substr( $sWk, $iPos, 2 ) ); _NewCell( $oBook, $iR, $iC, Kind => 'MulBlank', Val => '', FormatNo => $iF, Format => $oBook->{Format}[$iF], Numeric => 0, Code => undef, Book => $oBook, ); $iPos += 2; } #2.MaxRow, MaxCol, MinRow, MinCol _SetDimension( $oBook, $iR, $iSc, $iEc ); } #------------------------------------------------------------------------------ # _subLabelSST (for Spreadsheet::ParseExcel) DK: P345 #------------------------------------------------------------------------------ sub _subLabelSST { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iR, $iC, $iF, $iIdx ) = unpack( 'v3V', $sWk ); _NewCell( $oBook, $iR, $iC, Kind => 'PackedIdx', Val => $oBook->{PkgStr}[$iIdx]->{Text}, FormatNo => $iF, Format => $oBook->{Format}[$iF], Numeric => 0, Code => ( $oBook->{PkgStr}[$iIdx]->{Unicode} ) ? 'ucs2' : undef, Book => $oBook, Rich => $oBook->{PkgStr}[$iIdx]->{Rich}, ); #2.MaxRow, MaxCol, MinRow, MinCol _SetDimension( $oBook, $iR, $iC, $iC ); } #------------------------------------------------------------------------------ # _subFlg1904 (for Spreadsheet::ParseExcel) DK:P296 #------------------------------------------------------------------------------ sub _subFlg1904 { my ( $oBook, $bOp, $bLen, $sWk ) = @_; $oBook->{Flg1904} = unpack( "v", $sWk ); } #------------------------------------------------------------------------------ # _subRow (for Spreadsheet::ParseExcel) DK:P403 #------------------------------------------------------------------------------ sub _subRow { my ( $oBook, $bOp, $bLen, $sWk ) = @_; return undef unless ( defined $oBook->{_CurSheet} ); #0. Get Worksheet info (MaxRow, MaxCol, MinRow, MinCol) my ( $iR, $iSc, $iEc, $iHght, $undef1, $undef2, $iGr, $iXf ) = unpack( "v8", $sWk ); $iEc--; if ( $iGr & 0x20 ) { $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{RowHidden}[$iR] = 1; } $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{RowHeight}[$iR] = $iHght / 20; #2.MaxRow, MaxCol, MinRow, MinCol _SetDimension( $oBook, $iR, $iSc, $iEc ); } #------------------------------------------------------------------------------ # _SetDimension (for Spreadsheet::ParseExcel) #------------------------------------------------------------------------------ sub _SetDimension { my ( $oBook, $iR, $iSc, $iEc ) = @_; return undef unless ( defined $oBook->{_CurSheet} ); #2.MaxRow, MaxCol, MinRow, MinCol #2.1 MinRow $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{MinRow} = $iR unless ( defined $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{MinRow} ) and ( $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{MinRow} <= $iR ); #2.2 MaxRow $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{MaxRow} = $iR unless ( defined $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{MaxRow} ) and ( $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{MaxRow} > $iR ); #2.3 MinCol $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{MinCol} = $iSc unless ( defined $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{MinCol} ) and ( $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{MinCol} <= $iSc ); #2.4 MaxCol $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{MaxCol} = $iEc unless ( defined $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{MaxCol} ) and ( $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{MaxCol} > $iEc ); } #------------------------------------------------------------------------------ # _subDefaultRowHeight (for Spreadsheet::ParseExcel) DK: P318 #------------------------------------------------------------------------------ sub _subDefaultRowHeight { my ( $oBook, $bOp, $bLen, $sWk ) = @_; return undef unless ( defined $oBook->{_CurSheet} ); #1. RowHeight my ( $iDum, $iHght ) = unpack( "v2", $sWk ); $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{DefRowHeight} = $iHght / 20; } #------------------------------------------------------------------------------ # _subStandardWidth(for Spreadsheet::ParseExcel) DK:P413 #------------------------------------------------------------------------------ sub _subStandardWidth { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my $iW = unpack( "v", $sWk ); $oBook->{StandardWidth} = _convert_col_width( $oBook, $iW ); } ############################################################################### # # _subDefColWidth() # # Read the DEFCOLWIDTH Biff record. This gives the width in terms of chars # and is different from the width in the COLINFO record. # sub _subDefColWidth { my ( $self, $record, $length, $data ) = @_; my $width = unpack 'v', $data; # Adjustment for default Arial 10 width. $width = 8.43 if $width == 8; $self->{Worksheet}->[ $self->{_CurSheet} ]->{DefColWidth} = $width; } ############################################################################### # # _convert_col_width() # # Converts from the internal Excel column width units to user units seen in the # interface. It is first necessary to convert the internal width to pixels and # then to user units. The conversion is specific to a default font of Arial 10. # TODO, the conversion should be extended to other fonts and sizes. # sub _convert_col_width { my $self = shift; my $excel_width = shift; # Convert from Excel units to pixels (rounded up). my $pixels = int( 0.5 + $excel_width * 7 / 256 ); # Convert from pixels to user units. # The conversion is different for columns <= 1 user unit (12 pixels). my $user_width; if ( $pixels <= 12 ) { $user_width = $pixels / 12; } else { $user_width = ( $pixels - 5 ) / 7; } # Round up to 2 decimal places. $user_width = int( $user_width * 100 + 0.5 ) / 100; return $user_width; } #------------------------------------------------------------------------------ # _subColInfo (for Spreadsheet::ParseExcel) DK:P309 #------------------------------------------------------------------------------ sub _subColInfo { my ( $oBook, $bOp, $bLen, $sWk ) = @_; return undef unless defined $oBook->{_CurSheet}; my ( $iSc, $iEc, $iW, $iXF, $iGr ) = unpack( "v5", $sWk ); for ( my $i = $iSc ; $i <= $iEc ; $i++ ) { $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{ColWidth}[$i] = _convert_col_width( $oBook, $iW ); $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{ColFmtNo}[$i] = $iXF; if ( $iGr & 0x01 ) { $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{ColHidden}[$i] = 1; } } } #------------------------------------------------------------------------------ # _subWindow1 Window information P 273 #------------------------------------------------------------------------------ sub _subWindow1 { my ( $workbook, $op, $len, $wk ) = @_; return if ( $workbook->{BIFFVersion} <= verBIFF4() ); my ( $hpos, $vpos, $width, $height, $options, $active, $firsttab, $numselected, $tabbarwidth ) = unpack( "v9", $wk ); $workbook->{ActiveSheet} = $active; } #------------------------------------------------------------------------------ # _subSheetLayout OpenOffice 5.96 (P207) #------------------------------------------------------------------------------ sub _subSheetLayout { my ( $workbook, $op, $len, $wk ) = @_; my @unused; ( my $rc, @unused[ 1 .. 10 ], @unused[ 11 .. 14 ], my $color, @unused[ 15, 16 ] ) = unpack( "vC10C4vC2", $wk ); return unless ( $rc == 0x0862 ); $workbook->{Worksheet}[ $workbook->{_CurSheet} ]->{TabColor} = $color; } #------------------------------------------------------------------------------ # _subHyperlink OpenOffice 5.96 (P182) # # Also see: http://msdn.microsoft.com/en-us/library/gg615407(v=office.14).aspx #------------------------------------------------------------------------------ # Helper: Extract a GID, returns as text string sub _getguid { my( $wk ) = @_; my( $text, $guidl, $guids1, $guids2, @guidb ); ( $guidl, $guids1, $guids2, @guidb[0..7] ) = unpack( 'Vv2C8', $wk ); $text = sprintf( '%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X', $guidl, $guids1, $guids2, @guidb); return $text; } # Helper: Extract a counted (16-bit) unicode string, returns string, # updates $offset # $zterm == 1 if string is null-terminated. # $bc if length is in bytes (not chars) sub _getustr { my( $wk, $offset, $zterm, $bc ) = @_; my $len = unpack( 'V', substr( $wk, $offset ) ); $offset += 4; if( $bc ) { $len /= 2; } $len -= $zterm; my $text = join( '', map { chr $_ } unpack( "v$len", substr( $wk, $offset ) ) ); $text =~ s/\0.*\z// if( $zterm ); $_[1] = ( $offset += ($len + $zterm) *2 ); return $text; } # HYPERLINK record sub _subHyperlink { my ( $workbook, $op, $len, $wk ) = @_; # REF my( $srow, $erow, $scol, $ecol ) = unpack( 'v4', $wk ); my $guid = _getguid( substr( $wk, 8 ) ); return unless( $guid eq '79EAC9D0-BAF9-11CE-8C82-00AA004BA90B' ); my( $stmvers, $flags ) = unpack( 'VV', substr( $wk, 24 ) ); return if( $flags & 0x60 || $stmvers != 2 ); my $offset = 32; my( $desc,$frame, $link, $mark ); if( ($flags & 0x14) == 0x14 ) { $desc = _getustr( $wk, $offset, 1, 0 ); } if( $flags & 0x80 ) { $frame = _getustr( $wk, $offset, 1, 0 ); } $link = ''; if( $flags & 0x100 ) { # UNC path $link = 'file:///' . _getustr( $wk, $offset, 1, 0 ); } elsif( $flags & 0x1 ) { # Has link (URI) $guid = _getguid( substr( $wk, $offset ) ); $offset += 16; if( $guid eq '79EAC9E0-BAF9-11CE-8C82-00AA004BA90B' ) { # URI $link = _getustr( $wk, $offset, 1, 1 ); } elsif( $guid eq '00000303-0000-0000-C000-000000000046' ) { # Local file $link = 'file:///'; # !($flags & 2) = 'relative path' if( !($flags & 0x2) ) { my $file = $workbook->{File}; if( defined $file && length $file ) { $link .= (fileparse($file))[1]; } else { $link .= '%REL%' } } my $dirn = unpack( 'v', substr( $wk, $offset ) ); $offset += 2; $link .= '..\\' x $dirn; my $namelen = unpack( 'V', substr( $wk, $offset ) ); $offset += 4; my $name = unpack( 'Z*', substr( $wk, $offset ) ); $offset += $namelen; $offset += 24; my $size = unpack( 'V', substr( $wk, $offset ) ); $offset += 4; if( $size ) { my $xlen = unpack( 'V', substr( $wk, $offset ) ) / 2; $name = join( '', map { chr $_} unpack( "v$xlen", substr( $wk, $offset+4+2) ) ); $offset += $size; } $link .= $name; } else { return; } } # Text mark (Fragment identifier) if( $flags & 0x8 ) { # Cellrefs contain reserved characters, so url-encode my $fragment = _getustr( $wk, $offset, 1 ); $fragment =~ s/([^\w.~-])/sprintf( '%%%02X', ord( $1 ) )/gems; $link .= '#' . $fragment; } # Update loop at end of parse() if this changes push @{ $workbook->{Worksheet}[ $workbook->{_CurSheet} ]->{HyperLinks} }, [ $desc, $link, $frame, $srow, $erow, $scol, $ecol ]; } #------------------------------------------------------------------------------ # _subSST (for Spreadsheet::ParseExcel) DK:P413 #------------------------------------------------------------------------------ sub _subSST { my ( $oBook, $bOp, $bLen, $sWk ) = @_; _subStrWk( $oBook, substr( $sWk, 8 ) ); } #------------------------------------------------------------------------------ # _subContinue (for Spreadsheet::ParseExcel) DK:P311 #------------------------------------------------------------------------------ sub _subContinue { my ( $oBook, $bOp, $bLen, $sWk ) = @_; #if(defined $self->{FuncTbl}->{$bOp}) { # $self->{FuncTbl}->{$PREFUNC}->($oBook, $bOp, $bLen, $sWk); #} _subStrWk( $oBook, $sWk, 1 ) if ( $PREFUNC == 0xFC ); } #------------------------------------------------------------------------------ # _subWriteAccess (for Spreadsheet::ParseExcel) DK:P451 #------------------------------------------------------------------------------ sub _subWriteAccess { my ( $oBook, $bOp, $bLen, $sWk ) = @_; return if ( defined $oBook->{_Author} ); #BIFF8 if ( $oBook->{BIFFVersion} >= verBIFF8 ) { $oBook->{Author} = _convBIFF8String( $oBook, $sWk ); } #Before BIFF8 else { my ( $iLen ) = unpack( "c", $sWk ); $oBook->{Author} = $oBook->{FmtClass}->TextFmt( substr( $sWk, 1, $iLen ), '_native_' ); } } #------------------------------------------------------------------------------ # _convBIFF8String (for Spreadsheet::ParseExcel) #------------------------------------------------------------------------------ sub _convBIFF8String { my ( $oBook, $sWk, $iCnvFlg ) = @_; my ( $iLen, $iFlg ) = unpack( "vc", $sWk ); my ( $iHigh, $iExt, $iRich ) = ( $iFlg & 0x01, $iFlg & 0x04, $iFlg & 0x08 ); my ( $iStPos, $iExtCnt, $iRichCnt, $sStr ); #2. Rich and Ext if ( $iRich && $iExt ) { $iStPos = 9; ( $iRichCnt, $iExtCnt ) = unpack( 'vV', substr( $sWk, 3, 6 ) ); } elsif ( $iRich ) { #Only Rich $iStPos = 5; $iRichCnt = unpack( 'v', substr( $sWk, 3, 2 ) ); $iExtCnt = 0; } elsif ( $iExt ) { #Only Ext $iStPos = 7; $iRichCnt = 0; $iExtCnt = unpack( 'V', substr( $sWk, 3, 4 ) ); } else { #Nothing Special $iStPos = 3; $iExtCnt = 0; $iRichCnt = 0; } #3.Get String if ( $iHigh ) { #Compressed $iLen *= 2; $sStr = substr( $sWk, $iStPos, $iLen ); _SwapForUnicode( \$sStr ); $sStr = $oBook->{FmtClass}->TextFmt( $sStr, 'ucs2' ) unless ( $iCnvFlg ); } else { #Not Compressed $sStr = substr( $sWk, $iStPos, $iLen ); $sStr = $oBook->{FmtClass}->TextFmt( $sStr, undef ) unless ( $iCnvFlg ); } #4. return if ( wantarray ) { #4.1 Get Rich and Ext if ( length( $sWk ) < $iStPos + $iLen + $iRichCnt * 4 + $iExtCnt ) { return ( [ undef, $iHigh, undef, undef ], $iStPos + $iLen + $iRichCnt * 4 + $iExtCnt, $iStPos, $iLen ); } else { return ( [ $sStr, $iHigh, substr( $sWk, $iStPos + $iLen, $iRichCnt * 4 ), substr( $sWk, $iStPos + $iLen + $iRichCnt * 4, $iExtCnt ) ], $iStPos + $iLen + $iRichCnt * 4 + $iExtCnt, $iStPos, $iLen ); } } else { return $sStr; } } #------------------------------------------------------------------------------ # _subXF (for Spreadsheet::ParseExcel) DK:P453 #------------------------------------------------------------------------------ sub _subXF { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iFnt, $iIdx ); my ( $iLock, $iHidden, $iStyle, $i123, $iAlH, $iWrap, $iAlV, $iJustL, $iRotate, $iInd, $iShrink, $iMerge, $iReadDir, $iBdrD, $iBdrSL, $iBdrSR, $iBdrST, $iBdrSB, $iBdrSD, $iBdrCL, $iBdrCR, $iBdrCT, $iBdrCB, $iBdrCD, $iFillP, $iFillCF, $iFillCB ); if ( $oBook->{BIFFVersion} == verBIFF2 ) { die "Unsupported file format: Excel Version 2.0 (4.0 or later required)"; } elsif ( $oBook->{BIFFVersion} == verBIFF3 ) { die "Unsupported file format: Excel Version 3.0 (4.0 or later required)"; } elsif ( $oBook->{BIFFVersion} == verBIFF4 ) { # Minimal support for Excel 4. We just get the font and format indices # so that the cell data value can be formatted. ( $iFnt, $iIdx, ) = unpack( "CC", $sWk ); } elsif ( $oBook->{BIFFVersion} == verBIFF8 ) { my ( $iGen, $iAlign, $iGen2, $iBdr1, $iBdr2, $iBdr3, $iPtn ); ( $iFnt, $iIdx, $iGen, $iAlign, $iGen2, $iBdr1, $iBdr2, $iBdr3, $iPtn ) = unpack( "v7Vv", $sWk ); $iLock = ( $iGen & 0x01 ) ? 1 : 0; $iHidden = ( $iGen & 0x02 ) ? 1 : 0; $iStyle = ( $iGen & 0x04 ) ? 1 : 0; $i123 = ( $iGen & 0x08 ) ? 1 : 0; $iAlH = ( $iAlign & 0x07 ); $iWrap = ( $iAlign & 0x08 ) ? 1 : 0; $iAlV = ( $iAlign & 0x70 ) / 0x10; $iJustL = ( $iAlign & 0x80 ) ? 1 : 0; $iRotate = ( ( $iAlign & 0xFF00 ) / 0x100 ) & 0x00FF; $iRotate = 90 if ( $iRotate == 255 ); $iRotate = 90 - $iRotate if ( $iRotate > 90 ); $iInd = ( $iGen2 & 0x0F ); $iShrink = ( $iGen2 & 0x10 ) ? 1 : 0; $iMerge = ( $iGen2 & 0x20 ) ? 1 : 0; $iReadDir = ( ( $iGen2 & 0xC0 ) / 0x40 ) & 0x03; $iBdrSL = $iBdr1 & 0x0F; $iBdrSR = ( ( $iBdr1 & 0xF0 ) / 0x10 ) & 0x0F; $iBdrST = ( ( $iBdr1 & 0xF00 ) / 0x100 ) & 0x0F; $iBdrSB = ( ( $iBdr1 & 0xF000 ) / 0x1000 ) & 0x0F; $iBdrCL = ( ( $iBdr2 & 0x7F ) ) & 0x7F; $iBdrCR = ( ( $iBdr2 & 0x3F80 ) / 0x80 ) & 0x7F; $iBdrD = ( ( $iBdr2 & 0xC000 ) / 0x4000 ) & 0x3; $iBdrCT = ( ( $iBdr3 & 0x7F ) ) & 0x7F; $iBdrCB = ( ( $iBdr3 & 0x3F80 ) / 0x80 ) & 0x7F; $iBdrCD = ( ( $iBdr3 & 0x1FC000 ) / 0x4000 ) & 0x7F; $iBdrSD = ( ( $iBdr3 & 0x1E00000 ) / 0x200000 ) & 0xF; $iFillP = ( ( $iBdr3 & 0xFC000000 ) / 0x4000000 ) & 0x3F; $iFillCF = ( $iPtn & 0x7F ); $iFillCB = ( ( $iPtn & 0x3F80 ) / 0x80 ) & 0x7F; } else { my ( $iGen, $iAlign, $iPtn, $iPtn2, $iBdr1, $iBdr2 ); ( $iFnt, $iIdx, $iGen, $iAlign, $iPtn, $iPtn2, $iBdr1, $iBdr2 ) = unpack( "v8", $sWk ); $iLock = ( $iGen & 0x01 ) ? 1 : 0; $iHidden = ( $iGen & 0x02 ) ? 1 : 0; $iStyle = ( $iGen & 0x04 ) ? 1 : 0; $i123 = ( $iGen & 0x08 ) ? 1 : 0; $iAlH = ( $iAlign & 0x07 ); $iWrap = ( $iAlign & 0x08 ) ? 1 : 0; $iAlV = ( $iAlign & 0x70 ) / 0x10; $iJustL = ( $iAlign & 0x80 ) ? 1 : 0; $iRotate = ( ( $iAlign & 0x300 ) / 0x100 ) & 0x3; $iFillCF = ( $iPtn & 0x7F ); $iFillCB = ( ( $iPtn & 0x1F80 ) / 0x80 ) & 0x7F; $iFillP = ( $iPtn2 & 0x3F ); $iBdrSB = ( ( $iPtn2 & 0x1C0 ) / 0x40 ) & 0x7; $iBdrCB = ( ( $iPtn2 & 0xFE00 ) / 0x200 ) & 0x7F; $iBdrST = ( $iBdr1 & 0x07 ); $iBdrSL = ( ( $iBdr1 & 0x38 ) / 0x8 ) & 0x07; $iBdrSR = ( ( $iBdr1 & 0x1C0 ) / 0x40 ) & 0x07; $iBdrCT = ( ( $iBdr1 & 0xFE00 ) / 0x200 ) & 0x7F; $iBdrCL = ( $iBdr2 & 0x7F ) & 0x7F; $iBdrCR = ( ( $iBdr2 & 0x3F80 ) / 0x80 ) & 0x7F; } push @{ $oBook->{Format} }, Spreadsheet::ParseExcel::Format->new( FontNo => $iFnt, Font => $oBook->{Font}[$iFnt], FmtIdx => $iIdx, Lock => $iLock, Hidden => $iHidden, Style => $iStyle, Key123 => $i123, AlignH => $iAlH, Wrap => $iWrap, AlignV => $iAlV, JustLast => $iJustL, Rotate => $iRotate, Indent => $iInd, Shrink => $iShrink, Merge => $iMerge, ReadDir => $iReadDir, BdrStyle => [ $iBdrSL, $iBdrSR, $iBdrST, $iBdrSB ], BdrColor => [ $iBdrCL, $iBdrCR, $iBdrCT, $iBdrCB ], BdrDiag => [ $iBdrD, $iBdrSD, $iBdrCD ], Fill => [ $iFillP, $iFillCF, $iFillCB ], ); } #------------------------------------------------------------------------------ # _subFormat (for Spreadsheet::ParseExcel) DK: P336 #------------------------------------------------------------------------------ sub _subFormat { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my $sFmt; if ( $oBook->{BIFFVersion} <= verBIFF5 ) { $sFmt = substr( $sWk, 3, unpack( 'c', substr( $sWk, 2, 1 ) ) ); $sFmt = $oBook->{FmtClass}->TextFmt( $sFmt, '_native_' ); } else { $sFmt = _convBIFF8String( $oBook, substr( $sWk, 2 ) ); } my $format_index = unpack( 'v', substr( $sWk, 0, 2 ) ); # Excel 4 and earlier used an index of 0 to indicate that a built-in format # that was stored implicitly. if ( $oBook->{BIFFVersion} <= verBIFF4 && $format_index == 0 ) { $format_index = keys %{ $oBook->{FormatStr} }; } $oBook->{FormatStr}->{$format_index} = $sFmt; } #------------------------------------------------------------------------------ # _subPalette (for Spreadsheet::ParseExcel) DK: P393 #------------------------------------------------------------------------------ sub _subPalette { my ( $oBook, $bOp, $bLen, $sWk ) = @_; for ( my $i = 0 ; $i < unpack( 'v', $sWk ) ; $i++ ) { # push @aColor, unpack('H6', substr($sWk, $i*4+2)); $oBook->{aColor}[ $i + 8 ] = unpack( 'H6', substr( $sWk, $i * 4 + 2 ) ); } } #------------------------------------------------------------------------------ # _subFont (for Spreadsheet::ParseExcel) DK:P333 #------------------------------------------------------------------------------ sub _subFont { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iHeight, $iAttr, $iCIdx, $iBold, $iSuper, $iUnderline, $sFntName ); my ( $bBold, $bItalic, $bUnderline, $bStrikeout ); if ( $oBook->{BIFFVersion} == verBIFF8 ) { ( $iHeight, $iAttr, $iCIdx, $iBold, $iSuper, $iUnderline ) = unpack( "v5c", $sWk ); my ( $iSize, $iHigh ) = unpack( 'cc', substr( $sWk, 14, 2 ) ); if ( $iHigh ) { $sFntName = substr( $sWk, 16, $iSize * 2 ); _SwapForUnicode( \$sFntName ); $sFntName = $oBook->{FmtClass}->TextFmt( $sFntName, 'ucs2' ); } else { $sFntName = substr( $sWk, 16, $iSize ); $sFntName = $oBook->{FmtClass}->TextFmt( $sFntName, '_native_' ); } $bBold = ( $iBold >= 0x2BC ) ? 1 : 0; $bItalic = ( $iAttr & 0x02 ) ? 1 : 0; $bStrikeout = ( $iAttr & 0x08 ) ? 1 : 0; $bUnderline = ( $iUnderline ) ? 1 : 0; } elsif ( $oBook->{BIFFVersion} == verBIFF5 ) { ( $iHeight, $iAttr, $iCIdx, $iBold, $iSuper, $iUnderline ) = unpack( "v5c", $sWk ); $sFntName = $oBook->{FmtClass} ->TextFmt( substr( $sWk, 15, unpack( "c", substr( $sWk, 14, 1 ) ) ), '_native_' ); $bBold = ( $iBold >= 0x2BC ) ? 1 : 0; $bItalic = ( $iAttr & 0x02 ) ? 1 : 0; $bStrikeout = ( $iAttr & 0x08 ) ? 1 : 0; $bUnderline = ( $iUnderline ) ? 1 : 0; } else { ( $iHeight, $iAttr ) = unpack( "v2", $sWk ); $iCIdx = undef; $iSuper = 0; $bBold = ( $iAttr & 0x01 ) ? 1 : 0; $bItalic = ( $iAttr & 0x02 ) ? 1 : 0; $bUnderline = ( $iAttr & 0x04 ) ? 1 : 0; $bStrikeout = ( $iAttr & 0x08 ) ? 1 : 0; $sFntName = substr( $sWk, 5, unpack( "c", substr( $sWk, 4, 1 ) ) ); } push @{ $oBook->{Font} }, Spreadsheet::ParseExcel::Font->new( Height => $iHeight / 20.0, Attr => $iAttr, Color => $iCIdx, Super => $iSuper, UnderlineStyle => $iUnderline, Name => $sFntName, Bold => $bBold, Italic => $bItalic, Underline => $bUnderline, Strikeout => $bStrikeout, ); #Skip Font[4] push @{ $oBook->{Font} }, {} if ( scalar( @{ $oBook->{Font} } ) == 4 ); } #------------------------------------------------------------------------------ # _subBoundSheet (for Spreadsheet::ParseExcel): DK: P307 #------------------------------------------------------------------------------ sub _subBoundSheet { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iPos, $iGr, $iKind ) = unpack( "Lc2", $sWk ); $iKind &= 0x0F; return if ( ( $iKind != 0x00 ) && ( $iKind != 0x01 ) ); if ( $oBook->{BIFFVersion} >= verBIFF8 ) { my ( $iSize, $iUni ) = unpack( "cc", substr( $sWk, 6, 2 ) ); my $sWsName = substr( $sWk, 8 ); if ( $iUni & 0x01 ) { _SwapForUnicode( \$sWsName ); $sWsName = $oBook->{FmtClass}->TextFmt( $sWsName, 'ucs2' ); } $oBook->{Worksheet}[ $oBook->{SheetCount} ] = Spreadsheet::ParseExcel::Worksheet->new( Name => $sWsName, Kind => $iKind, _Pos => $iPos, _Book => $oBook, _SheetNo => $oBook->{SheetCount}, SheetHidden => $iGr & 0x03 ); } else { $oBook->{Worksheet}[ $oBook->{SheetCount} ] = Spreadsheet::ParseExcel::Worksheet->new( Name => $oBook->{FmtClass}->TextFmt( substr( $sWk, 7 ), '_native_' ), Kind => $iKind, _Pos => $iPos, _Book => $oBook, _SheetNo => $oBook->{SheetCount}, SheetHidden => $iGr & 0x03 ); } $oBook->{SheetCount}++; } #------------------------------------------------------------------------------ # _subHeader (for Spreadsheet::ParseExcel) DK: P340 #------------------------------------------------------------------------------ sub _subHeader { my ( $oBook, $bOp, $bLen, $sWk ) = @_; return undef unless ( defined $oBook->{_CurSheet} ); my $sW; if ( !defined $sWk ) { $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{Header} = undef; return; } #BIFF8 if ( $oBook->{BIFFVersion} >= verBIFF8 ) { $sW = _convBIFF8String( $oBook, $sWk ); $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{Header} = ( $sW eq "\x00" ) ? undef : $sW; } #Before BIFF8 else { my ( $iLen ) = unpack( "c", $sWk ); $sW = $oBook->{FmtClass}->TextFmt( substr( $sWk, 1, $iLen ), '_native_' ); $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{Header} = ( $sW eq "\x00\x00\x00" ) ? undef : $sW; } } #------------------------------------------------------------------------------ # _subFooter (for Spreadsheet::ParseExcel) DK: P335 #------------------------------------------------------------------------------ sub _subFooter { my ( $oBook, $bOp, $bLen, $sWk ) = @_; return undef unless ( defined $oBook->{_CurSheet} ); my $sW; if ( !defined $sWk ) { $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{Footer} = undef; return; } #BIFF8 if ( $oBook->{BIFFVersion} >= verBIFF8 ) { $sW = _convBIFF8String( $oBook, $sWk ); $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{Footer} = ( $sW eq "\x00" ) ? undef : $sW; } #Before BIFF8 else { my ( $iLen ) = unpack( "c", $sWk ); $sW = $oBook->{FmtClass}->TextFmt( substr( $sWk, 1, $iLen ), '_native_' ); $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{Footer} = ( $sW eq "\x00\x00\x00" ) ? undef : $sW; } } #------------------------------------------------------------------------------ # _subHPageBreak (for Spreadsheet::ParseExcel) DK: P341 #------------------------------------------------------------------------------ sub _subHPageBreak { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my @aBreak; my $iCnt = unpack( "v", $sWk ); return undef unless ( defined $oBook->{_CurSheet} ); #BIFF8 if ( $oBook->{BIFFVersion} >= verBIFF8 ) { for ( my $i = 0 ; $i < $iCnt ; $i++ ) { my ( $iRow, $iColB, $iColE ) = unpack( 'v3', substr( $sWk, 2 + $i * 6, 6 ) ); # push @aBreak, [$iRow, $iColB, $iColE]; push @aBreak, $iRow; } } #Before BIFF8 else { for ( my $i = 0 ; $i < $iCnt ; $i++ ) { my ( $iRow ) = unpack( 'v', substr( $sWk, 2 + $i * 2, 2 ) ); push @aBreak, $iRow; # push @aBreak, [$iRow, 0, 255]; } } @aBreak = sort { $a <=> $b } @aBreak; $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{HPageBreak} = \@aBreak; } #------------------------------------------------------------------------------ # _subVPageBreak (for Spreadsheet::ParseExcel) DK: P447 #------------------------------------------------------------------------------ sub _subVPageBreak { my ( $oBook, $bOp, $bLen, $sWk ) = @_; return undef unless ( defined $oBook->{_CurSheet} ); my @aBreak; my $iCnt = unpack( "v", $sWk ); #BIFF8 if ( $oBook->{BIFFVersion} >= verBIFF8 ) { for ( my $i = 0 ; $i < $iCnt ; $i++ ) { my ( $iCol, $iRowB, $iRowE ) = unpack( 'v3', substr( $sWk, 2 + $i * 6, 6 ) ); push @aBreak, $iCol; # push @aBreak, [$iCol, $iRowB, $iRowE]; } } #Before BIFF8 else { for ( my $i = 0 ; $i < $iCnt ; $i++ ) { my ( $iCol ) = unpack( 'v', substr( $sWk, 2 + $i * 2, 2 ) ); push @aBreak, $iCol; # push @aBreak, [$iCol, 0, 65535]; } } @aBreak = sort { $a <=> $b } @aBreak; $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{VPageBreak} = \@aBreak; } #------------------------------------------------------------------------------ # _subMargin (for Spreadsheet::ParseExcel) DK: P306, 345, 400, 440 #------------------------------------------------------------------------------ sub _subMargin { my ( $oBook, $bOp, $bLen, $sWk ) = @_; return undef unless ( defined $oBook->{_CurSheet} ); # The "Mergin" options are a workaround for a backward compatible typo. my $dWk = _convDval( substr( $sWk, 0, 8 ) ); if ( $bOp == 0x26 ) { $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{LeftMergin} = $dWk; $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{LeftMargin} = $dWk; } elsif ( $bOp == 0x27 ) { $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{RightMergin} = $dWk; $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{RightMargin} = $dWk; } elsif ( $bOp == 0x28 ) { $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{TopMergin} = $dWk; $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{TopMargin} = $dWk; } elsif ( $bOp == 0x29 ) { $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{BottomMergin} = $dWk; $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{BottomMargin} = $dWk; } } #------------------------------------------------------------------------------ # _subHcenter (for Spreadsheet::ParseExcel) DK: P340 #------------------------------------------------------------------------------ sub _subHcenter { my ( $oBook, $bOp, $bLen, $sWk ) = @_; return undef unless ( defined $oBook->{_CurSheet} ); my $iWk = unpack( "v", $sWk ); $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{HCenter} = $iWk; } #------------------------------------------------------------------------------ # _subVcenter (for Spreadsheet::ParseExcel) DK: P447 #------------------------------------------------------------------------------ sub _subVcenter { my ( $oBook, $bOp, $bLen, $sWk ) = @_; return undef unless ( defined $oBook->{_CurSheet} ); my $iWk = unpack( "v", $sWk ); $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{VCenter} = $iWk; } #------------------------------------------------------------------------------ # _subPrintGridlines (for Spreadsheet::ParseExcel) DK: P397 #------------------------------------------------------------------------------ sub _subPrintGridlines { my ( $oBook, $bOp, $bLen, $sWk ) = @_; return undef unless ( defined $oBook->{_CurSheet} ); my $iWk = unpack( "v", $sWk ); $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{PrintGrid} = $iWk; } #------------------------------------------------------------------------------ # _subPrintHeaders (for Spreadsheet::ParseExcel) DK: P397 #------------------------------------------------------------------------------ sub _subPrintHeaders { my ( $oBook, $bOp, $bLen, $sWk ) = @_; return undef unless ( defined $oBook->{_CurSheet} ); my $iWk = unpack( "v", $sWk ); $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{PrintHeaders} = $iWk; } #------------------------------------------------------------------------------ # _subSETUP (for Spreadsheet::ParseExcel) DK: P409 #------------------------------------------------------------------------------ sub _subSETUP { my ( $oBook, $bOp, $bLen, $sWk ) = @_; return undef unless ( defined $oBook->{_CurSheet} ); # Workaround for some apps and older Excels that don't write a # complete SETUP record. return undef if $bLen != 34; my $oWkS = $oBook->{Worksheet}[ $oBook->{_CurSheet} ]; my $iGrBit; ( $oWkS->{PaperSize}, $oWkS->{Scale}, $oWkS->{PageStart}, $oWkS->{FitWidth}, $oWkS->{FitHeight}, $iGrBit, $oWkS->{Res}, $oWkS->{VRes}, ) = unpack( 'v8', $sWk ); $oWkS->{HeaderMargin} = _convDval( substr( $sWk, 16, 8 ) ); $oWkS->{FooterMargin} = _convDval( substr( $sWk, 24, 8 ) ); $oWkS->{Copis} = unpack( 'v2', substr( $sWk, 32, 2 ) ); $oWkS->{LeftToRight} = ( ( $iGrBit & 0x01 ) ? 1 : 0 ); $oWkS->{Landscape} = ( ( $iGrBit & 0x02 ) ? 1 : 0 ); $oWkS->{NoPls} = ( ( $iGrBit & 0x04 ) ? 1 : 0 ); $oWkS->{NoColor} = ( ( $iGrBit & 0x08 ) ? 1 : 0 ); $oWkS->{Draft} = ( ( $iGrBit & 0x10 ) ? 1 : 0 ); $oWkS->{Notes} = ( ( $iGrBit & 0x20 ) ? 1 : 0 ); $oWkS->{NoOrient} = ( ( $iGrBit & 0x40 ) ? 1 : 0 ); $oWkS->{UsePage} = ( ( $iGrBit & 0x80 ) ? 1 : 0 ); # The NoPls flag indicates that the values have not been taken from an # actual printer and thus may not be accurate. # Set default scale if NoPls otherwise it may be an invalid value of 0XFF. $oWkS->{Scale} = 100 if $oWkS->{NoPls}; # Workaround for a backward compatible typo. $oWkS->{HeaderMergin} = $oWkS->{HeaderMargin}; $oWkS->{FooterMergin} = $oWkS->{FooterMargin}; } #------------------------------------------------------------------------------ # _subName (for Spreadsheet::ParseExcel) DK: P350 #------------------------------------------------------------------------------ sub _subName { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iGrBit, $cKey, $cCh, $iCce, $ixAls, $iTab, $cchCust, $cchDsc, $cchHep, $cchStatus ) = unpack( 'vc2v3c4', $sWk ); #Builtin Name + Length == 1 if ( ( $iGrBit & 0x20 ) && ( $cCh == 1 ) ) { #BIFF8 if ( $oBook->{BIFFVersion} >= verBIFF8 ) { my $iName = unpack( 'n', substr( $sWk, 14 ) ); my $iSheet = unpack( 'v', substr( $sWk, 8 ) ) - 1; # Workaround for mal-formed Excel workbooks where Print_Title is # set as Global (i.e. itab = 0). Note, this will have to be # treated differently when we get around to handling global names. return undef if $iSheet == -1; if ( $iName == 6 ) { #PrintArea my ( $iSheetW, $raArea ) = _ParseNameArea( substr( $sWk, 16 ) ); $oBook->{PrintArea}[$iSheet] = $raArea; } elsif ( $iName == 7 ) { #Title my ( $iSheetW, $raArea ) = _ParseNameArea( substr( $sWk, 16 ) ); my @aTtlR = (); my @aTtlC = (); foreach my $raI ( @$raArea ) { if ( $raI->[3] == 0xFF ) { #Row Title push @aTtlR, [ $raI->[0], $raI->[2] ]; } else { #Col Title push @aTtlC, [ $raI->[1], $raI->[3] ]; } } $oBook->{PrintTitle}[$iSheet] = { Row => \@aTtlR, Column => \@aTtlC }; } } else { my $iName = unpack( 'c', substr( $sWk, 14 ) ); if ( $iName == 6 ) { #PrintArea my ( $iSheet, $raArea ) = _ParseNameArea95( substr( $sWk, 15 ) ); $oBook->{PrintArea}[$iSheet] = $raArea; } elsif ( $iName == 7 ) { #Title my ( $iSheet, $raArea ) = _ParseNameArea95( substr( $sWk, 15 ) ); my @aTtlR = (); my @aTtlC = (); foreach my $raI ( @$raArea ) { if ( $raI->[3] == 0xFF ) { #Row Title push @aTtlR, [ $raI->[0], $raI->[2] ]; } else { #Col Title push @aTtlC, [ $raI->[1], $raI->[3] ]; } } $oBook->{PrintTitle}[$iSheet] = { Row => \@aTtlR, Column => \@aTtlC }; } } } } #------------------------------------------------------------------------------ # ParseNameArea (for Spreadsheet::ParseExcel) DK: 494 (ptgAread3d) #------------------------------------------------------------------------------ sub _ParseNameArea { my ( $sObj ) = @_; my ( $iOp ); my @aRes = (); $iOp = unpack( 'C', $sObj ); my $iSheet; if ( $iOp == 0x3b ) { my ( $iWkS, $iRs, $iRe, $iCs, $iCe ) = unpack( 'v5', substr( $sObj, 1 ) ); $iSheet = $iWkS; push @aRes, [ $iRs, $iCs, $iRe, $iCe ]; } elsif ( $iOp == 0x29 ) { my $iLen = unpack( 'v', substr( $sObj, 1, 2 ) ); my $iSt = 0; while ( $iSt < $iLen ) { my ( $iOpW, $iWkS, $iRs, $iRe, $iCs, $iCe ) = unpack( 'cv5', substr( $sObj, $iSt + 3, 11 ) ); if ( $iOpW == 0x3b ) { $iSheet = $iWkS; push @aRes, [ $iRs, $iCs, $iRe, $iCe ]; } if ( $iSt == 0 ) { $iSt += 11; } else { $iSt += 12; #Skip 1 byte; } } } return ( $iSheet, \@aRes ); } #------------------------------------------------------------------------------ # ParseNameArea95 (for Spreadsheet::ParseExcel) DK: 494 (ptgAread3d) #------------------------------------------------------------------------------ sub _ParseNameArea95 { my ( $sObj ) = @_; my ( $iOp ); my @aRes = (); $iOp = unpack( 'C', $sObj ); my $iSheet; if ( $iOp == 0x3b ) { $iSheet = unpack( 'v', substr( $sObj, 11, 2 ) ); my ( $iRs, $iRe, $iCs, $iCe ) = unpack( 'v2C2', substr( $sObj, 15, 6 ) ); push @aRes, [ $iRs, $iCs, $iRe, $iCe ]; } elsif ( $iOp == 0x29 ) { my $iLen = unpack( 'v', substr( $sObj, 1, 2 ) ); my $iSt = 0; while ( $iSt < $iLen ) { my $iOpW = unpack( 'c', substr( $sObj, $iSt + 3, 6 ) ); $iSheet = unpack( 'v', substr( $sObj, $iSt + 14, 2 ) ); my ( $iRs, $iRe, $iCs, $iCe ) = unpack( 'v2C2', substr( $sObj, $iSt + 18, 6 ) ); push @aRes, [ $iRs, $iCs, $iRe, $iCe ] if ( $iOpW == 0x3b ); if ( $iSt == 0 ) { $iSt += 21; } else { $iSt += 22; #Skip 1 byte; } } } return ( $iSheet, \@aRes ); } #------------------------------------------------------------------------------ # _subBOOL (for Spreadsheet::ParseExcel) DK: P452 #------------------------------------------------------------------------------ sub _subWSBOOL { my ( $oBook, $bOp, $bLen, $sWk ) = @_; return undef unless ( defined $oBook->{_CurSheet} ); $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{PageFit} = ( ( unpack( 'v', $sWk ) & 0x100 ) ? 1 : 0 ); } #------------------------------------------------------------------------------ # _subMergeArea (for Spreadsheet::ParseExcel) DK: (Not) #------------------------------------------------------------------------------ sub _subMergeArea { my ( $oBook, $bOp, $bLen, $sWk ) = @_; return undef unless ( defined $oBook->{_CurSheet} ); my $iCnt = unpack( "v", $sWk ); my $oWkS = $oBook->{Worksheet}[ $oBook->{_CurSheet} ]; $oWkS->{MergedArea} = [] unless ( defined $oWkS->{MergedArea} ); for ( my $i = 0 ; $i < $iCnt ; $i++ ) { my ( $iRs, $iRe, $iCs, $iCe ) = unpack( 'v4', substr( $sWk, $i * 8 + 2, 8 ) ); for ( my $iR = $iRs ; $iR <= $iRe ; $iR++ ) { for ( my $iC = $iCs ; $iC <= $iCe ; $iC++ ) { $oWkS->{Cells}[$iR][$iC]->{Merged} = 1 if ( defined $oWkS->{Cells}[$iR][$iC] ); } } push @{ $oWkS->{MergedArea} }, [ $iRs, $iCs, $iRe, $iCe ]; } } #------------------------------------------------------------------------------ # DecodeBoolErr (for Spreadsheet::ParseExcel) DK: P306 #------------------------------------------------------------------------------ sub DecodeBoolErr { my ( $iVal, $iFlg ) = @_; if ( $iFlg ) { # ERROR if ( $iVal == 0x00 ) { return "#NULL!"; } elsif ( $iVal == 0x07 ) { return "#DIV/0!"; } elsif ( $iVal == 0x0F ) { return "#VALUE!"; } elsif ( $iVal == 0x17 ) { return "#REF!"; } elsif ( $iVal == 0x1D ) { return "#NAME?"; } elsif ( $iVal == 0x24 ) { return "#NUM!"; } elsif ( $iVal == 0x2A ) { return "#N/A!"; } else { return "#ERR"; } } else { return ( $iVal ) ? "TRUE" : "FALSE"; } } ############################################################################### # # _decode_rk_number() # # Convert an encoded RK number into a real number. The RK encoding is # explained in some detail in the MS docs. It is a way of storing applicable # ints and doubles in 32bits (30 data + 2 info bits) in order to save space. # sub _decode_rk_number { my $rk_number = shift; my $number; # Check the main RK type. if ( $rk_number & 0x02 ) { # RK Type 2 and 4, a packed integer. # Shift off the info bits. $number = $rk_number >> 2; # Convert from unsigned to signed if required. $number -= 0x40000000 if $number & 0x20000000; } else { # RK Type 1 and 3, a truncated IEEE Double. # Pack the RK number into the high 30 bits of an IEEE double. $number = pack "VV", 0x0000, $rk_number & 0xFFFFFFFC; # Reverse the packed IEEE double on big-endian machines. $number = reverse $number if $BIGENDIAN; # Unpack the number. $number = unpack "d", $number; } # RK Types 3 and 4 were multiplied by 100 prior to encoding. $number /= 100 if $rk_number & 0x01; return $number; } ############################################################################### # # _subStrWk() # # Extract the workbook strings from the SST (Shared String Table) record and # any following CONTINUE records. # # The workbook strings are initially contained in the SST block but may also # occupy one or more CONTINUE blocks. Reading the CONTINUE blocks is made a # little tricky by the fact that they can contain an additional initial byte # if a string is continued from a previous block. # # Parsing is further complicated by the fact that the continued section of the # string may have a different encoding (ASCII or UTF-8) from the previous # section. Excel does this to save space. # sub _subStrWk { my ( $self, $biff_data, $is_continue ) = @_; if ( $is_continue ) { # We are reading a CONTINUE record. if ( $self->{_buffer} eq '' ) { # A CONTINUE block with no previous SST. $self->{_buffer} .= $biff_data; } elsif ( !defined $self->{_string_continued} ) { # The CONTINUE block starts with a new (non-continued) string. # Strip the Grbit byte and store the string data. $self->{_buffer} .= substr $biff_data, 1; } else { # A CONTINUE block that starts with a continued string. # The first byte (Grbit) of the CONTINUE record indicates if (0) # the continued string section is single bytes or (1) double bytes. my $grbit = ord $biff_data; my ( $str_position, $str_length ) = @{ $self->{_previous_info} }; my $buff_length = length $self->{_buffer}; if ( $buff_length >= ( $str_position + $str_length ) ) { # Not in a string. $self->{_buffer} .= $biff_data; } elsif ( ( $self->{_string_continued} & 0x01 ) == ( $grbit & 0x01 ) ) { # Same encoding as the previous block of the string. $self->{_buffer} .= substr( $biff_data, 1 ); } else { # Different encoding to the previous block of the string. if ( $grbit & 0x01 ) { # Current block is UTF-16, previous was ASCII. my ( undef, $cch ) = unpack 'vc', $self->{_buffer}; substr( $self->{_buffer}, 2, 1 ) = pack( 'C', $cch | 0x01 ); # Convert the previous ASCII, single character, portion of # the string into a double character UTF-16 string by # inserting zero bytes. for ( my $i = ( $buff_length - $str_position ) ; $i >= 1 ; $i-- ) { substr( $self->{_buffer}, $str_position + $i, 0 ) = "\x00"; } } else { # Current block is ASCII, previous was UTF-16. # Convert the current ASCII, single character, portion of # the string into a double character UTF-16 string by # inserting null bytes. my $change_length = ( $str_position + $str_length ) - $buff_length; # Length of the current CONTINUE record data. my $biff_length = length $biff_data; # Restrict the portion to be changed to the current block # if the string extends over more than one block. if ( $change_length > ( $biff_length - 1 ) * 2 ) { $change_length = ( $biff_length - 1 ) * 2; } # Insert the null bytes. for ( my $i = ( $change_length / 2 ) ; $i >= 1 ; $i-- ) { substr( $biff_data, $i + 1, 0 ) = "\x00"; } } # Strip the Grbit byte and store the string data. $self->{_buffer} .= substr $biff_data, 1; } } } else { # Not a CONTINUE block therefore an SST block. $self->{_buffer} .= $biff_data; } # Reset the state variables. $self->{_string_continued} = undef; $self->{_previous_info} = undef; # Extract out any full strings from the current buffer leaving behind a # partial string that is continued into the next block, or an empty # buffer is no string is continued. while ( length $self->{_buffer} >= 4 ) { my ( $str_info, $length, $str_position, $str_length ) = _convBIFF8String( $self, $self->{_buffer}, 1 ); if ( defined $str_info->[0] ) { push @{ $self->{PkgStr} }, { Text => $str_info->[0], Unicode => $str_info->[1], Rich => $str_info->[2], Ext => $str_info->[3], }; $self->{_buffer} = substr( $self->{_buffer}, $length ); } else { $self->{_string_continued} = $str_info->[1]; $self->{_previous_info} = [ $str_position, $str_length ]; last; } } } #------------------------------------------------------------------------------ # _SwapForUnicode (for Spreadsheet::ParseExcel) #------------------------------------------------------------------------------ sub _SwapForUnicode { my ( $sObj ) = @_; # for(my $i = 0; $i{_CurSheet} ); my $FmtClass = $oBook->{FmtClass}; $rhKey{Type} = $FmtClass->ChkType( $rhKey{Numeric}, $rhKey{Format}{FmtIdx} ); my $FmtStr = $oBook->{FormatStr}{ $rhKey{Format}{FmtIdx} }; # Set "Date" type if required for numbers in a MulRK BIFF block. if ( defined $FmtStr && $rhKey{Type} eq "Numeric" ) { # Match a range of possible date formats. Note: this isn't important # except for reporting. The number will still be converted to a date # by ExcelFmt() even if 'Type' isn't set to 'Date'. if ( $FmtStr =~ m{^[dmy][-\\/dmy]*$}i ) { $rhKey{Type} = "Date"; } } my $oCell = Spreadsheet::ParseExcel::Cell->new( Val => $rhKey{Val}, FormatNo => $rhKey{FormatNo}, Format => $rhKey{Format}, Code => $rhKey{Code}, Type => $rhKey{Type}, ); $oCell->{_Kind} = $rhKey{Kind}; $oCell->{_Value} = $FmtClass->ValFmt( $oCell, $oBook ); if ( $rhKey{Rich} ) { my @aRich = (); my $sRich = $rhKey{Rich}; for ( my $iWk = 0 ; $iWk < length( $sRich ) ; $iWk += 4 ) { my ( $iPos, $iFnt ) = unpack( 'v2', substr( $sRich, $iWk ) ); push @aRich, [ $iPos, $oBook->{Font}[$iFnt] ]; } $oCell->{Rich} = \@aRich; } if ( defined $oBook->{CellHandler} ) { if ( defined $oBook->{Object} ) { no strict; ref( $oBook->{CellHandler} ) eq "CODE" ? $oBook->{CellHandler}->( $_Object, $oBook, $oBook->{_CurSheet}, $iR, $iC, $oCell ) : $oBook->{CellHandler}->callback( $_Object, $oBook, $oBook->{_CurSheet}, $iR, $iC, $oCell ); } else { $oBook->{CellHandler}->( $oBook, $oBook->{_CurSheet}, $iR, $iC, $oCell ); } } unless ( $oBook->{NotSetCell} ) { $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{Cells}[$iR][$iC] = $oCell; } return $oCell; } #------------------------------------------------------------------------------ # ColorIdxToRGB (for Spreadsheet::ParseExcel) # # Returns for most recently opened book for compatibility, use # Workbook::color_idx_to_rgb instead # #------------------------------------------------------------------------------ sub ColorIdxToRGB { my ( $sPkg, $iIdx ) = @_; unless( defined $currentbook ) { return ( ( defined $aColor[$iIdx] ) ? $aColor[$iIdx] : $aColor[0] ); } return $currentbook->color_idx_to_rgb( $iIdx ); } ############################################################################### # # error(). # # Return an error string for a failed parse(). # sub error { my $self = shift; my $parse_error = $self->{_error_status}; if ( exists $error_strings{$parse_error} ) { return $error_strings{$parse_error}; } else { return 'Unknown parse error'; } } ############################################################################### # # error_code(). # # Return an error code for a failed parse(). # sub error_code { my $self = shift; return $self->{_error_status}; } ############################################################################### # # Mapping between legacy method names and new names. # { no warnings; # Ignore warnings about variables used only once. *Parse = *parse; } 1; __END__ =head1 NAME Spreadsheet::ParseExcel - Read information from an Excel file. =head1 SYNOPSIS #!/usr/bin/perl -w use strict; use Spreadsheet::ParseExcel; my $parser = Spreadsheet::ParseExcel->new(); my $workbook = $parser->parse('Book1.xls'); if ( !defined $workbook ) { die $parser->error(), ".\n"; } for my $worksheet ( $workbook->worksheets() ) { my ( $row_min, $row_max ) = $worksheet->row_range(); my ( $col_min, $col_max ) = $worksheet->col_range(); for my $row ( $row_min .. $row_max ) { for my $col ( $col_min .. $col_max ) { my $cell = $worksheet->get_cell( $row, $col ); next unless $cell; print "Row, Col = ($row, $col)\n"; print "Value = ", $cell->value(), "\n"; print "Unformatted = ", $cell->unformatted(), "\n"; print "\n"; } } } =head1 DESCRIPTION The Spreadsheet::ParseExcel module can be used to read information from Excel 95-2003 binary files. The module cannot read files in the Excel 2007 Open XML XLSX format. See the L module instead. =head1 Parser =head2 new() The C method is used to create a new C parser object. my $parser = Spreadsheet::ParseExcel->new(); It is possible to pass a password to decrypt an encrypted file: $parser = Spreadsheet::ParseExcel->new( Password => 'secret' ); Only the default Excel encryption scheme is currently supported. See L. As an advanced feature it is also possible to pass a call-back handler to the parser to control the parsing of the spreadsheet. $parser = Spreadsheet::ParseExcel->new( CellHandler => \&cell_handler, NotSetCell => 1, ); The call-back can be used to ignore certain cells or to reduce memory usage. See the section L for more information. =head2 parse($filename, $formatter) The Parser C method returns a L object. my $parser = Spreadsheet::ParseExcel->new(); my $workbook = $parser->parse('Book1.xls'); If an error occurs C returns C. In general, programs should contain a test for failed parsing as follows: my $parser = Spreadsheet::ParseExcel->new(); my $workbook = $parser->parse('Book1.xls'); if ( !defined $workbook ) { die $parser->error(), ".\n"; } The C<$filename> parameter is generally the file to be parsed. However, it can also be a filehandle or a scalar reference. The optional C<$formatter> parameter can be an reference to a L to format the value of cells. This is useful for parsing workbooks with Unicode or Asian characters: my $parser = Spreadsheet::ParseExcel->new(); my $formatter = Spreadsheet::ParseExcel::FmtJapan->new(); my $workbook = $parser->parse( 'Book1.xls', $formatter ); The L formatter also supports Unicode. If you encounter any encoding problems with the default formatter try that instead. =head2 error() The Parser C method returns an error string if a C fails: my $parser = Spreadsheet::ParseExcel->new(); my $workbook = $parser->parse('Book1.xls'); if ( !defined $workbook ) { die $parser->error(), ".\n"; } If you wish to generate you own error string you can use the C method instead (see below). The C and C values are as follows: error() error_code() ======= ============ '' 0 'File not found' 1 'No Excel data found in file' 2 'File is encrypted' 3 The C method is explained below. Spreadsheet::ParseExcel will try to decrypt an encrypted Excel file using the default password or a user supplied password passed to C, see above. If these fail the module will return the C<'File is encrypted'> error. Only the default Excel encryption scheme is currently supported, see L. =head2 error_code() The Parser C method returns an error code if a C fails: my $parser = Spreadsheet::ParseExcel->new(); my $workbook = $parser->parse('Book1.xls'); if ( !defined $workbook ) { die "Got error code ", $parser->error_code, ".\n"; } This can be useful if you wish to employ you own error strings or error handling methods. =head1 Workbook A C is created via the C C method: my $parser = Spreadsheet::ParseExcel->new(); my $workbook = $parser->parse('Book1.xls'); The main methods of the Workbook class are: $workbook->worksheets() $workbook->worksheet() $workbook->worksheet_count() $workbook->get_filename() These more commonly used methods of the Workbook class are outlined below. The other, less commonly used, methods are documented in L. =head2 worksheets() Returns an array of L objects. This was most commonly used to iterate over the worksheets in a workbook: for my $worksheet ( $workbook->worksheets() ) { ... } =head2 worksheet() The C method returns a single C object using either its name or index: $worksheet = $workbook->worksheet('Sheet1'); $worksheet = $workbook->worksheet(0); Returns C if the sheet name or index doesn't exist. =head2 worksheet_count() The C method returns the number of Worksheet objects in the Workbook. my $worksheet_count = $workbook->worksheet_count(); =head2 get_filename() The C method returns the name of the Excel file of C if the data was read from a filehandle rather than a file. my $filename = $workbook->get_filename(); =head2 Other Workbook Methods For full documentation of the methods available via a Workbook object see L. =head1 Worksheet The C class encapsulates the properties of an Excel worksheet. A Worksheet object is obtained via the L or L methods. for my $worksheet ( $workbook->worksheets() ) { ... } # Or: $worksheet = $workbook->worksheet('Sheet1'); $worksheet = $workbook->worksheet(0); The most commonly used methods of the Worksheet class are: $worksheet->get_cell() $worksheet->row_range() $worksheet->col_range() $worksheet->get_name() The Spreadsheet::ParseExcel::Worksheet class exposes a lot of methods but in general very few are required unless you are writing an advanced filter. The most commonly used methods are detailed below. The others are documented in L. =head2 get_cell($row, $col) Return the L object at row C<$row> and column C<$col> if it is defined. Otherwise returns undef. my $cell = $worksheet->get_cell($row, $col); =head2 row_range() Returns a two-element list C<($min, $max)> containing the minimum and maximum defined rows in the worksheet. If there is no row defined C<$max> is smaller than C<$min>. my ( $row_min, $row_max ) = $worksheet->row_range(); =head2 col_range() Returns a two-element list C<($min, $max)> containing the minimum and maximum of defined columns in the worksheet. If there is no column defined C<$max> is smaller than C<$min>. my ( $col_min, $col_max ) = $worksheet->col_range(); =head2 get_name() The C method returns the name of the worksheet, such as 'Sheet1'. my $name = $worksheet->get_name(); =head2 Other Worksheet Methods For other, less commonly used, Worksheet methods see L. =head1 Cell The C class has the following main methods. $cell->value() $cell->unformatted() =head2 value() The C method returns the formatted value of the cell. my $value = $cell->value(); Formatted in this sense refers to the numeric format of the cell value. For example a number such as 40177 might be formatted as 40,117, 40117.000 or even as the date 2009/12/30. If the cell doesn't contain a numeric format then the formatted and unformatted cell values are the same, see the C method below. For a defined C<$cell> the C method will always return a value. In the case of a cell with formatting but no numeric or string contents the method will return the empty string C<''>. =head2 unformatted() The C method returns the unformatted value of the cell. my $unformatted = $cell->unformatted(); Returns the cell value without a numeric format. See the C method above. =head2 Other Cell Methods For other, less commonly used, Worksheet methods see L. =head1 Format The C class has the following properties: =head2 Format properties $format->{Font} $format->{AlignH} $format->{AlignV} $format->{Indent} $format->{Wrap} $format->{Shrink} $format->{Rotate} $format->{JustLast} $format->{ReadDir} $format->{BdrStyle} $format->{BdrColor} $format->{BdrDiag} $format->{Fill} $format->{Lock} $format->{Hidden} $format->{Style} These properties are generally only of interest to advanced users. Casual users can skip this section. =head2 $format->{Font} Returns the L object for the Format. =head2 $format->{AlignH} Returns the horizontal alignment of the format where the value has the following meaning: 0 => No alignment 1 => Left 2 => Center 3 => Right 4 => Fill 5 => Justify 6 => Center across 7 => Distributed/Equal spaced =head2 $format->{AlignV} Returns the vertical alignment of the format where the value has the following meaning: 0 => Top 1 => Center 2 => Bottom 3 => Justify 4 => Distributed/Equal spaced =head2 $format->{Indent} Returns the indent level of the C horizontal alignment. =head2 $format->{Wrap} Returns true if textwrap is on. =head2 $format->{Shrink} Returns true if "Shrink to fit" is set for the format. =head2 $format->{Rotate} Returns the text rotation. In Excel97+, it returns the angle in degrees of the text rotation. In Excel95 or earlier it returns a value as follows: 0 => No rotation 1 => Top down 2 => 90 degrees anti-clockwise, 3 => 90 clockwise =head2 $format->{JustLast} Return true if the "justify last" property is set for the format. =head2 $format->{ReadDir} Returns the direction that the text is read from. =head2 $format->{BdrStyle} Returns an array ref of border styles as follows: [ $left, $right, $top, $bottom ] =head2 $format->{BdrColor} Returns an array ref of border color indexes as follows: [ $left, $right, $top, $bottom ] =head2 $format->{BdrDiag} Returns an array ref of diagonal border kind, style and color index as follows: [$kind, $style, $color ] Where kind is: 0 => None 1 => Right-Down 2 => Right-Up 3 => Both =head2 $format->{Fill} Returns an array ref of fill pattern and color indexes as follows: [ $pattern, $front_color, $back_color ] =head2 $format->{Lock} Returns true if the cell is locked. =head2 $format->{Hidden} Returns true if the cell is Hidden. =head2 $format->{Style} Returns true if the format is a Style format. =head1 Font I Format class has these properties: =head1 Font Properties $font->{Name} $font->{Bold} $font->{Italic} $font->{Height} $font->{Underline} $font->{UnderlineStyle} $font->{Color} $font->{Strikeout} $font->{Super} =head2 $font->{Name} Returns the name of the font, for example 'Arial'. =head2 $font->{Bold} Returns true if the font is bold. =head2 $font->{Italic} Returns true if the font is italic. =head2 $font->{Height} Returns the size (height) of the font. =head2 $font->{Underline} Returns true if the font in underlined. =head2 $font->{UnderlineStyle} Returns the style of an underlined font where the value has the following meaning: 0 => None 1 => Single 2 => Double 33 => Single accounting 34 => Double accounting =head2 $font->{Color} Returns the color index for the font. The mapping to an RGB color is defined by each workbook. The index can be converted to a RGB string using the C<$workbook->ColorIdxToRGB()> Parser method. (Older versions of C provided the C class method, which is deprecated.) =head2 $font->{Strikeout} Returns true if the font has the strikeout property set. =head2 $font->{Super} Returns one of the following values if the superscript or subscript property of the font is set: 0 => None 1 => Superscript 2 => Subscript =head1 Formatter Class Formatters can be passed to the C method to deal with Unicode or Asian formatting. Spreadsheet::ParseExcel includes 2 formatter classes. C and C. It is also possible to create a user defined formatting class. The formatter class C should provide the following functions: =head2 ChkType($self, $is_numeric, $format_index) Method to check the type of data in the cell. Should return C, C or C. It is passed the following parameters: =over =item $self A scalar reference to the Formatter object. =item $is_numeric If true, the value seems to be number. =item $format_index The index number for the cell Format object. =back =head2 TextFmt($self, $string_data, $string_encoding) Converts the string data in the cell into the correct encoding. It is passed the following parameters: =over =item $self A scalar reference to the Formatter object. =item $string_data The original string/text data. =item $string_encoding The character encoding of original string/text. =back =head2 ValFmt($self, $cell, $workbook) Convert the original unformatted cell value into the appropriate formatted value. For instance turn a number into a formatted date. It is passed the following parameters: =over =item $self A scalar reference to the Formatter object. =item $cell A scalar reference to the Cell object. =item $workbook A scalar reference to the Workbook object. =back =head2 FmtString($self, $cell, $workbook) Get the format string for the Cell. It is passed the following parameters: =over =item $self A scalar reference to the Formatter object. =item $cell A scalar reference to the Cell object. =item $workbook A scalar reference to the Workbook object. =back =head1 Reducing the memory usage of Spreadsheet::ParseExcel In some cases a C application may consume a lot of memory when processing a large Excel file and, as a result, may fail to complete. The following explains why this can occur and how to resolve it. C processes an Excel file in two stages. In the first stage it extracts the Excel binary stream from the OLE container file using C. In the second stage it parses the binary stream to read workbook, worksheet and cell data which it then stores in memory. The majority of the memory usage is required for storing cell data. The reason for this is that as the Excel file is parsed and each cell is encountered a cell handling function creates a relatively large nested cell object that contains the cell value and all of the data that relates to the cell formatting. For large files (a 10MB Excel file on a 256MB system) this overhead can cause the system to grind to a halt. However, in a lot of cases when an Excel file is being processed the only information that is required are the cell values. In these cases it is possible to avoid most of the memory overhead by specifying your own cell handling function and by telling Spreadsheet::ParseExcel not to store the parsed cell data. This is achieved by passing a cell handler function to C when creating the parse object. Here is an example. #!/usr/bin/perl -w use strict; use Spreadsheet::ParseExcel; my $parser = Spreadsheet::ParseExcel->new( CellHandler => \&cell_handler, NotSetCell => 1 ); my $workbook = $parser->parse('file.xls'); sub cell_handler { my $workbook = $_[0]; my $sheet_index = $_[1]; my $row = $_[2]; my $col = $_[3]; my $cell = $_[4]; # Do something useful with the formatted cell value print $cell->value(), "\n"; } The user specified cell handler is passed as a code reference to C along with the parameter C which tells Spreadsheet::ParseExcel not to store the parsed cell. Note, you don't have to iterate over the rows and columns, this happens automatically as part of the parsing. The cell handler is passed 5 arguments. The first, C<$workbook>, is a reference to the C object that represent the parsed workbook. This can be used to access any of the C methods, see L. The second C<$sheet_index> is the zero-based index of the worksheet being parsed. The third and fourth, C<$row> and C<$col>, are the zero-based row and column number of the cell. The fifth, C<$cell>, is a reference to the C object. This is used to extract the data from the cell. See L for more information. This technique can be useful if you are writing an Excel to database filter since you can put your DB calls in the cell handler. If you don't want all of the data in the spreadsheet you can add some control logic to the cell handler. For example we can extend the previous example so that it only prints the first 10 rows of the first two worksheets in the parsed workbook by adding some C statements to the cell handler: #!/usr/bin/perl -w use strict; use Spreadsheet::ParseExcel; my $parser = Spreadsheet::ParseExcel->new( CellHandler => \&cell_handler, NotSetCell => 1 ); my $workbook = $parser->parse('file.xls'); sub cell_handler { my $workbook = $_[0]; my $sheet_index = $_[1]; my $row = $_[2]; my $col = $_[3]; my $cell = $_[4]; # Skip some worksheets and rows (inefficiently). return if $sheet_index >= 3; return if $row >= 10; # Do something with the formatted cell value print $cell->value(), "\n"; } However, this still processes the entire workbook. If you wish to save some additional processing time you can abort the parsing after you have read the data that you want, using the workbook C method: #!/usr/bin/perl -w use strict; use Spreadsheet::ParseExcel; my $parser = Spreadsheet::ParseExcel->new( CellHandler => \&cell_handler, NotSetCell => 1 ); my $workbook = $parser->parse('file.xls'); sub cell_handler { my $workbook = $_[0]; my $sheet_index = $_[1]; my $row = $_[2]; my $col = $_[3]; my $cell = $_[4]; # Skip some worksheets and rows (more efficiently). if ( $sheet_index >= 1 and $row >= 10 ) { $workbook->ParseAbort(1); return; } # Do something with the formatted cell value print $cell->value(), "\n"; } =head1 Decryption If a workbook is "protected" then Excel will encrypt the file whether a password is supplied or not. As of version 0.59 Spreadsheet::ParseExcel supports decrypting Excel workbooks using a default or user supplied password. However, only the following encryption scheme is supported: Office 97/2000 Compatible encryption The following encryption methods are not supported: Weak Encryption (XOR) RC4, Microsoft Base Cryptographic Provider v1.0 RC4, Microsoft Base DSS and Diffie-Hellman Cryptographic Provider RC4, Microsoft DH SChannel Cryptographic Provider RC4, Microsoft Enhanced Cryptographic Provider v1.0 RC4, Microsoft Enhanced DSS and Diffie-Hellman Cryptographic Provider RC4, Microsoft Enhanced RSA and AES Cryptographic Provider RC4, Microsoft RSA SChannel Cryptographic Provider RC4, Microsoft Strong Cryptographic Provider See the following for more information on Excel encryption: L. =head1 KNOWN PROBLEMS =over =item * Issues reported by users: L =item * This module cannot read the values of formulas from files created with Spreadsheet::WriteExcel unless the user specified the values when creating the file (which is generally not the case). The reason for this is that Spreadsheet::WriteExcel writes the formula but not the formula result since it isn't in a position to calculate arbitrary Excel formulas without access to Excel's formula engine. =item * If Excel has date fields where the specified format is equal to the system-default for the short-date locale, Excel does not store the format, but defaults to an internal format which is system dependent. In these cases ParseExcel uses the date format 'yyyy-mm-dd'. =back =head1 REPORTING A BUG Bugs can be reported via rt.cpan.org. See the following for instructions on bug reporting for Spreadsheet::ParseExcel L =head1 SEE ALSO =over =item * xls2csv by Ken Prows L. =item * xls2csv and xlscat by H.Merijn Brand (these utilities are part of Spreadsheet::Read, see below). =item * excel2txt by Ken Youens-Clark, L. This is an excellent example of an Excel filter using Spreadsheet::ParseExcel. It can produce CSV, Tab delimited, Html, XML and Yaml. =item * XLSperl by Jon Allen L. This application allows you to use Perl "one-liners" with Microsoft Excel files. =item * Spreadsheet::XLSX L by Dmitry Ovsyanko. A module with a similar interface to Spreadsheet::ParseExcel for parsing Excel 2007 XLSX OpenXML files. =item * Spreadsheet::Read L by H.Merijn Brand. A single interface for reading several different spreadsheet formats. =item * Spreadsheet::WriteExcel L. A perl module for creating new Excel files. =item * Spreadsheet::ParseExcel::SaveParser L. This is a combination of Spreadsheet::ParseExcel and Spreadsheet::WriteExcel and it allows you to "rewrite" an Excel file. See the following example L. It is part of the Spreadsheet::ParseExcel distro. =item * Text::CSV_XS L by H.Merijn Brand. A fast and rigorous module for reading and writing CSV data. Don't consider rolling your own CSV handling, use this module instead. =back =head1 MAILING LIST There is a Google group for discussing and asking questions about Spreadsheet::ParseExcel. This is a good place to search to see if your question has been asked before: L =head1 DONATIONS If you'd care to donate to the Spreadsheet::ParseExcel project, you can do so via PayPal: L =head1 TODO =over =item * The current maintenance work is directed towards making the documentation more useful, improving and simplifying the API, and improving the maintainability of the code base. After that new features will be added. =item * Fix open bugs and documentation for SaveParser. =item * Add Formula support, Hyperlink support, Named Range support. =item * Improve Spreadsheet::ParseExcel::SaveParser compatibility with Spreadsheet::WriteExcel. =item * Improve Unicode and other encoding support. This will probably require dropping support for perls prior to 5.8+. =back =head1 ACKNOWLEDGEMENTS From Kawai Takanori: First of all, I would like to acknowledge the following valuable programs and modules: XHTML, OLE::Storage and Spreadsheet::WriteExcel. In no particular order: Yamaji Haruna, Simamoto Takesi, Noguchi Harumi, Ikezawa Kazuhiro, Suwazono Shugo, Hirofumi Morisada, Michael Edwards, Kim Namusk, Slaven Rezic, Grant Stevens, H.Merijn Brand and many many people + Kawai Mikako. Alexey Mazurin added the decryption facility. =head1 DISCLAIMER OF WARRANTY Because this software is licensed free of charge, there is no warranty for the software, to the extent permitted by applicable law. Except when otherwise stated in writing the copyright holders and/or other parties provide the software "as is" without warranty of any kind, either expressed or implied, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose. The entire risk as to the quality and performance of the software is with you. Should the software prove defective, you assume the cost of all necessary servicing, repair, or correction. In no event unless required by applicable law or agreed to in writing will any copyright holder, or any other party who may modify and/or redistribute the software as permitted by the above licence, be liable to you for damages, including any general, special, incidental, or consequential damages arising out of the use or inability to use the software (including but not limited to loss of data or data being rendered inaccurate or losses sustained by you or third parties or a failure of the software to operate with any other software), even if such holder or other party has been advised of the possibility of such damages. =head1 LICENSE Either the Perl Artistic Licence L or the GPL L =head1 AUTHOR Current maintainer 0.60+: Douglas Wilson dougw@cpan.org Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org Original author: Kawai Takanori (Hippo2000) kwitknr@cpan.org =head1 COPYRIGHT Copyright (c) 2014 Douglas Wilson Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo Copyright (c) 2000-2006 Kawai Takanori All rights reserved. This is free software. You may distribute under the terms of either the GNU General Public License or the Artistic License. =cut gdata/inst/perl/Spreadsheet/README-XLS0000755000175100001440000000113413003720416017060 0ustar hornikusersSpreadsheet-XLSC version 0.01 ============================= Spreadsheet::XLSX - Perl extension for reading MS Excel 2007 files; INSTALLATION To install this module type the following: perl Makefile.PL make make test make install DEPENDENCIES This module requires these other modules and libraries: Archive::Zip COPYRIGHT AND LICENCE Copyright (C) 2008 by Dmitry Ovsyanko This library is free software; you can redistribute it and/or modify it under the same terms as Perl itself, either Perl version 5.8.8 or, at your option, any later version of Perl 5 you may have available. gdata/inst/perl/Archive/0000755000175100001440000000000013003720416014644 5ustar hornikusersgdata/inst/perl/Archive/README-Archive-Zip0000644000175100001440000013411113003720416017644 0ustar hornikusersNAME Archive::Zip - Provide an interface to ZIP archive files. SYNOPSIS # Create a Zip file use Archive::Zip qw( :ERROR_CODES :CONSTANTS ); my $zip = Archive::Zip->new(); # Add a directory my $dir_member = $zip->addDirectory( 'dirname/' ); # Add a file from a string with compression my $string_member = $zip->addString( 'This is a test', 'stringMember.txt' ); $string_member->desiredCompressionMethod( COMPRESSION_DEFLATED ); # Add a file from disk my $file_member = $zip->addFile( 'xyz.pl', 'AnotherName.pl' ); # Save the Zip file unless ( $zip->writeToFileNamed('someZip.zip') == AZ_OK ) { die 'write error'; } # Read a Zip file my $somezip = Archive::Zip->new(); unless ( $somezip->read( 'someZip.zip' ) == AZ_OK ) { die 'read error'; } # Change the compression type for a file in the Zip my $member = $somezip->memberNamed( 'stringMember.txt' ); $member->desiredCompressionMethod( COMPRESSION_STORED ); unless ( $zip->writeToFileNamed( 'someOtherZip.zip' ) == AZ_OK ) { die 'write error'; } DESCRIPTION The Archive::Zip module allows a Perl program to create, manipulate, read, and write Zip archive files. Zip archives can be created, or you can read from existing zip files. Once created, they can be written to files, streams, or strings. Members can be added, removed, extracted, replaced, rearranged, and enumerated. They can also be renamed or have their dates, comments, or other attributes queried or modified. Their data can be compressed or uncompressed as needed. Members can be created from members in existing Zip files, or from existing directories, files, or strings. This module uses the Compress::Raw::Zlib library to read and write the compressed streams inside the files. One can use Archive::Zip::MemberRead to read the zip file archive members as if they were files. File Naming Regardless of what your local file system uses for file naming, names in a Zip file are in Unix format (*forward* slashes (/) separating directory names, etc.). "Archive::Zip" tries to be consistent with file naming conventions, and will translate back and forth between native and Zip file names. However, it can't guess which format names are in. So two rules control what kind of file name you must pass various routines: Names of files are in local format. "File::Spec" and "File::Basename" are used for various file operations. When you're referring to a file on your system, use its file naming conventions. Names of archive members are in Unix format. This applies to every method that refers to an archive member, or provides a name for new archive members. The "extract()" methods that can take one or two names will convert from local to zip names if you call them with a single name. Archive::Zip Object Model Overview Archive::Zip::Archive objects are what you ordinarily deal with. These maintain the structure of a zip file, without necessarily holding data. When a zip is read from a disk file, the (possibly compressed) data still lives in the file, not in memory. Archive members hold information about the individual members, but not (usually) the actual member data. When the zip is written to a (different) file, the member data is compressed or copied as needed. It is possible to make archive members whose data is held in a string in memory, but this is not done when a zip file is read. Directory members don't have any data. Inheritance Exporter Archive::Zip Common base class, has defs. Archive::Zip::Archive A Zip archive. Archive::Zip::Member Abstract superclass for all members. Archive::Zip::StringMember Member made from a string Archive::Zip::FileMember Member made from an external file Archive::Zip::ZipFileMember Member that lives in a zip file Archive::Zip::NewFileMember Member whose data is in a file Archive::Zip::DirectoryMember Member that is a directory EXPORTS :CONSTANTS Exports the following constants: FA_MSDOS FA_UNIX GPBF_ENCRYPTED_MASK GPBF_DEFLATING_COMPRESSION_MASK GPBF_HAS_DATA_DESCRIPTOR_MASK COMPRESSION_STORED COMPRESSION_DEFLATED IFA_TEXT_FILE_MASK IFA_TEXT_FILE IFA_BINARY_FILE COMPRESSION_LEVEL_NONE COMPRESSION_LEVEL_DEFAULT COMPRESSION_LEVEL_FASTEST COMPRESSION_LEVEL_BEST_COMPRESSION :MISC_CONSTANTS Exports the following constants (only necessary for extending the module): FA_AMIGA FA_VAX_VMS FA_VM_CMS FA_ATARI_ST FA_OS2_HPFS FA_MACINTOSH FA_Z_SYSTEM FA_CPM FA_WINDOWS_NTFS GPBF_IMPLODING_8K_SLIDING_DICTIONARY_MASK GPBF_IMPLODING_3_SHANNON_FANO_TREES_MASK GPBF_IS_COMPRESSED_PATCHED_DATA_MASK COMPRESSION_SHRUNK DEFLATING_COMPRESSION_NORMAL DEFLATING_COMPRESSION_MAXIMUM DEFLATING_COMPRESSION_FAST DEFLATING_COMPRESSION_SUPER_FAST COMPRESSION_REDUCED_1 COMPRESSION_REDUCED_2 COMPRESSION_REDUCED_3 COMPRESSION_REDUCED_4 COMPRESSION_IMPLODED COMPRESSION_TOKENIZED COMPRESSION_DEFLATED_ENHANCED COMPRESSION_PKWARE_DATA_COMPRESSION_LIBRARY_IMPLODED :ERROR_CODES Explained below. Returned from most methods. AZ_OK AZ_STREAM_END AZ_ERROR AZ_FORMAT_ERROR AZ_IO_ERROR ERROR CODES Many of the methods in Archive::Zip return error codes. These are implemented as inline subroutines, using the "use constant" pragma. They can be imported into your namespace using the ":ERROR_CODES" tag: use Archive::Zip qw( :ERROR_CODES ); ... unless ( $zip->read( 'myfile.zip' ) == AZ_OK ) { die "whoops!"; } AZ_OK (0) Everything is fine. AZ_STREAM_END (1) The read stream (or central directory) ended normally. AZ_ERROR (2) There was some generic kind of error. AZ_FORMAT_ERROR (3) There is a format error in a ZIP file being read. AZ_IO_ERROR (4) There was an IO error. Compression Archive::Zip allows each member of a ZIP file to be compressed (using the Deflate algorithm) or uncompressed. Other compression algorithms that some versions of ZIP have been able to produce are not supported. Each member has two compression methods: the one it's stored as (this is always COMPRESSION_STORED for string and external file members), and the one you desire for the member in the zip file. These can be different, of course, so you can make a zip member that is not compressed out of one that is, and vice versa. You can inquire about the current compression and set the desired compression method: my $member = $zip->memberNamed( 'xyz.txt' ); $member->compressionMethod(); # return current compression # set to read uncompressed $member->desiredCompressionMethod( COMPRESSION_STORED ); # set to read compressed $member->desiredCompressionMethod( COMPRESSION_DEFLATED ); There are two different compression methods: COMPRESSION_STORED File is stored (no compression) COMPRESSION_DEFLATED File is Deflated Compression Levels If a member's desiredCompressionMethod is COMPRESSION_DEFLATED, you can choose different compression levels. This choice may affect the speed of compression and decompression, as well as the size of the compressed member data. $member->desiredCompressionLevel( 9 ); The levels given can be: 0 or COMPRESSION_LEVEL_NONE This is the same as saying $member->desiredCompressionMethod( COMPRESSION_STORED ); 1 .. 9 1 gives the best speed and worst compression, and 9 gives the best compression and worst speed. COMPRESSION_LEVEL_FASTEST This is a synonym for level 1. COMPRESSION_LEVEL_BEST_COMPRESSION This is a synonym for level 9. COMPRESSION_LEVEL_DEFAULT This gives a good compromise between speed and compression, and is currently equivalent to 6 (this is in the zlib code). This is the level that will be used if not specified. Archive::Zip Methods The Archive::Zip class (and its invisible subclass Archive::Zip::Archive) implement generic zip file functionality. Creating a new Archive::Zip object actually makes an Archive::Zip::Archive object, but you don't have to worry about this unless you're subclassing. Constructor new( [$fileName] ) Make a new, empty zip archive. my $zip = Archive::Zip->new(); If an additional argument is passed, new() will call read() to read the contents of an archive: my $zip = Archive::Zip->new( 'xyz.zip' ); If a filename argument is passed and the read fails for any reason, new will return undef. For this reason, it may be better to call read separately. Zip Archive Utility Methods These Archive::Zip methods may be called as functions or as object methods. Do not call them as class methods: $zip = Archive::Zip->new(); $crc = Archive::Zip::computeCRC32( 'ghijkl' ); # OK $crc = $zip->computeCRC32( 'ghijkl' ); # also OK $crc = Archive::Zip->computeCRC32( 'ghijkl' ); # NOT OK Archive::Zip::computeCRC32( $string [, $crc] ) This is a utility function that uses the Compress::Raw::Zlib CRC routine to compute a CRC-32. You can get the CRC of a string: $crc = Archive::Zip::computeCRC32( $string ); Or you can compute the running CRC: $crc = 0; $crc = Archive::Zip::computeCRC32( 'abcdef', $crc ); $crc = Archive::Zip::computeCRC32( 'ghijkl', $crc ); Archive::Zip::setChunkSize( $number ) Report or change chunk size used for reading and writing. This can make big differences in dealing with large files. Currently, this defaults to 32K. This also changes the chunk size used for Compress::Raw::Zlib. You must call setChunkSize() before reading or writing. This is not exportable, so you must call it like: Archive::Zip::setChunkSize( 4096 ); or as a method on a zip (though this is a global setting). Returns old chunk size. Archive::Zip::chunkSize() Returns the current chunk size: my $chunkSize = Archive::Zip::chunkSize(); Archive::Zip::setErrorHandler( \&subroutine ) Change the subroutine called with error strings. This defaults to \&Carp::carp, but you may want to change it to get the error strings. This is not exportable, so you must call it like: Archive::Zip::setErrorHandler( \&myErrorHandler ); If myErrorHandler is undef, resets handler to default. Returns old error handler. Note that if you call Carp::carp or a similar routine or if you're chaining to the default error handler from your error handler, you may want to increment the number of caller levels that are skipped (do not just set it to a number): $Carp::CarpLevel++; Archive::Zip::tempFile( [$tmpdir] ) Create a uniquely named temp file. It will be returned open for read/write. If $tmpdir is given, it is used as the name of a directory to create the file in. If not given, creates the file using "File::Spec::tmpdir()". Generally, you can override this choice using the $ENV{TMPDIR} environment variable. But see the File::Spec documentation for your system. Note that on many systems, if you're running in taint mode, then you must make sure that $ENV{TMPDIR} is untainted for it to be used. Will *NOT* create $tmpdir if it doesn't exist (this is a change from prior versions!). Returns file handle and name: my ($fh, $name) = Archive::Zip::tempFile(); my ($fh, $name) = Archive::Zip::tempFile('myTempDir'); my $fh = Archive::Zip::tempFile(); # if you don't need the name Zip Archive Accessors members() Return a copy of the members array my @members = $zip->members(); numberOfMembers() Return the number of members I have memberNames() Return a list of the (internal) file names of the zip members memberNamed( $string ) Return ref to member whose filename equals given filename or undef. $string must be in Zip (Unix) filename format. membersMatching( $regex ) Return array of members whose filenames match given regular expression in list context. Returns number of matching members in scalar context. my @textFileMembers = $zip->membersMatching( '.*\.txt' ); # or my $numberOfTextFiles = $zip->membersMatching( '.*\.txt' ); diskNumber() Return the disk that I start on. Not used for writing zips, but might be interesting if you read a zip in. This should be 0, as Archive::Zip does not handle multi-volume archives. diskNumberWithStartOfCentralDirectory() Return the disk number that holds the beginning of the central directory. Not used for writing zips, but might be interesting if you read a zip in. This should be 0, as Archive::Zip does not handle multi-volume archives. numberOfCentralDirectoriesOnThisDisk() Return the number of CD structures in the zipfile last read in. Not used for writing zips, but might be interesting if you read a zip in. numberOfCentralDirectories() Return the number of CD structures in the zipfile last read in. Not used for writing zips, but might be interesting if you read a zip in. centralDirectorySize() Returns central directory size, as read from an external zip file. Not used for writing zips, but might be interesting if you read a zip in. centralDirectoryOffsetWRTStartingDiskNumber() Returns the offset into the zip file where the CD begins. Not used for writing zips, but might be interesting if you read a zip in. zipfileComment( [$string] ) Get or set the zipfile comment. Returns the old comment. print $zip->zipfileComment(); $zip->zipfileComment( 'New Comment' ); eocdOffset() Returns the (unexpected) number of bytes between where the EOCD was found and where it expected to be. This is normally 0, but would be positive if something (a virus, perhaps) had added bytes somewhere before the EOCD. Not used for writing zips, but might be interesting if you read a zip in. Here is an example of how you can diagnose this: my $zip = Archive::Zip->new('somefile.zip'); if ($zip->eocdOffset()) { warn "A virus has added ", $zip->eocdOffset, " bytes of garbage\n"; } The "eocdOffset()" is used to adjust the starting position of member headers, if necessary. fileName() Returns the name of the file last read from. If nothing has been read yet, returns an empty string; if read from a file handle, returns the handle in string form. Zip Archive Member Operations Various operations on a zip file modify members. When a member is passed as an argument, you can either use a reference to the member itself, or the name of a member. Of course, using the name requires that names be unique within a zip (this is not enforced). removeMember( $memberOrName ) Remove and return the given member, or match its name and remove it. Returns undef if member or name doesn't exist in this Zip. No-op if member does not belong to this zip. replaceMember( $memberOrName, $newMember ) Remove and return the given member, or match its name and remove it. Replace with new member. Returns undef if member or name doesn't exist in this Zip, or if $newMember is undefined. It is an (undiagnosed) error to provide a $newMember that is a member of the zip being modified. my $member1 = $zip->removeMember( 'xyz' ); my $member2 = $zip->replaceMember( 'abc', $member1 ); # now, $member2 (named 'abc') is not in $zip, # and $member1 (named 'xyz') is, having taken $member2's place. extractMember( $memberOrName [, $extractedName ] ) Extract the given member, or match its name and extract it. Returns undef if member doesn't exist in this Zip. If optional second arg is given, use it as the name of the extracted member. Otherwise, the internal filename of the member is used as the name of the extracted file or directory. If you pass $extractedName, it should be in the local file system's format. All necessary directories will be created. Returns "AZ_OK" on success. extractMemberWithoutPaths( $memberOrName [, $extractedName ] ) Extract the given member, or match its name and extract it. Does not use path information (extracts into the current directory). Returns undef if member doesn't exist in this Zip. If optional second arg is given, use it as the name of the extracted member (its paths will be deleted too). Otherwise, the internal filename of the member (minus paths) is used as the name of the extracted file or directory. Returns "AZ_OK" on success. addMember( $member ) Append a member (possibly from another zip file) to the zip file. Returns the new member. Generally, you will use addFile(), addDirectory(), addFileOrDirectory(), addString(), or read() to add members. # Move member named 'abc' to end of zip: my $member = $zip->removeMember( 'abc' ); $zip->addMember( $member ); updateMember( $memberOrName, $fileName ) Update a single member from the file or directory named $fileName. Returns the (possibly added or updated) member, if any; "undef" on errors. The comparison is based on "lastModTime()" and (in the case of a non-directory) the size of the file. addFile( $fileName [, $newName ] ) Append a member whose data comes from an external file, returning the member or undef. The member will have its file name set to the name of the external file, and its desiredCompressionMethod set to COMPRESSION_DEFLATED. The file attributes and last modification time will be set from the file. If the name given does not represent a readable plain file or symbolic link, undef will be returned. $fileName must be in the format required for the local file system. The optional $newName argument sets the internal file name to something different than the given $fileName. $newName, if given, must be in Zip name format (i.e. Unix). The text mode bit will be set if the contents appears to be text (as returned by the "-T" perl operator). *NOTE* that you shouldn't (generally) use absolute path names in zip member names, as this will cause problems with some zip tools as well as introduce a security hole and make the zip harder to use. addDirectory( $directoryName [, $fileName ] ) Append a member created from the given directory name. The directory name does not have to name an existing directory. If the named directory exists, the file modification time and permissions are set from the existing directory, otherwise they are set to now and permissive default permissions. $directoryName must be in local file system format. The optional second argument sets the name of the archive member (which defaults to $directoryName). If given, it must be in Zip (Unix) format. Returns the new member. addFileOrDirectory( $name [, $newName ] ) Append a member from the file or directory named $name. If $newName is given, use it for the name of the new member. Will add or remove trailing slashes from $newName as needed. $name must be in local file system format. The optional second argument sets the name of the archive member (which defaults to $name). If given, it must be in Zip (Unix) format. addString( $stringOrStringRef, $name ) Append a member created from the given string or string reference. The name is given by the second argument. Returns the new member. The last modification time will be set to now, and the file attributes will be set to permissive defaults. my $member = $zip->addString( 'This is a test', 'test.txt' ); contents( $memberOrMemberName [, $newContents ] ) Returns the uncompressed data for a particular member, or undef. print "xyz.txt contains " . $zip->contents( 'xyz.txt' ); Also can change the contents of a member: $zip->contents( 'xyz.txt', 'This is the new contents' ); If called expecting an array as the return value, it will include the status as the second value in the array. ($content, $status) = $zip->contents( 'xyz.txt'); Zip Archive I/O operations A Zip archive can be written to a file or file handle, or read from one. writeToFileNamed( $fileName ) Write a zip archive to named file. Returns "AZ_OK" on success. my $status = $zip->writeToFileNamed( 'xx.zip' ); die "error somewhere" if $status != AZ_OK; Note that if you use the same name as an existing zip file that you read in, you will clobber ZipFileMembers. So instead, write to a different file name, then delete the original. If you use the "overwrite()" or "overwriteAs()" methods, you can re-write the original zip in this way. $fileName should be a valid file name on your system. writeToFileHandle( $fileHandle [, $seekable] ) Write a zip archive to a file handle. Return AZ_OK on success. The optional second arg tells whether or not to try to seek backwards to re-write headers. If not provided, it is set if the Perl "-f" test returns true. This could fail on some operating systems, though. my $fh = IO::File->new( 'someFile.zip', 'w' ); unless ( $zip->writeToFileHandle( $fh ) == AZ_OK ) { # error handling } If you pass a file handle that is not seekable (like if you're writing to a pipe or a socket), pass a false second argument: my $fh = IO::File->new( '| cat > somefile.zip', 'w' ); $zip->writeToFileHandle( $fh, 0 ); # fh is not seekable If this method fails during the write of a member, that member and all following it will return false from "wasWritten()". See writeCentralDirectory() for a way to deal with this. If you want, you can write data to the file handle before passing it to writeToFileHandle(); this could be used (for instance) for making self-extracting archives. However, this only works reliably when writing to a real file (as opposed to STDOUT or some other possible non-file). See examples/selfex.pl for how to write a self-extracting archive. writeCentralDirectory( $fileHandle [, $offset ] ) Writes the central directory structure to the given file handle. Returns AZ_OK on success. If given an $offset, will seek to that point before writing. This can be used for recovery in cases where writeToFileHandle or writeToFileNamed returns an IO error because of running out of space on the destination file. You can truncate the zip by seeking backwards and then writing the directory: my $fh = IO::File->new( 'someFile.zip', 'w' ); my $retval = $zip->writeToFileHandle( $fh ); if ( $retval == AZ_IO_ERROR ) { my @unwritten = grep { not $_->wasWritten() } $zip->members(); if (@unwritten) { $zip->removeMember( $member ) foreach my $member ( @unwritten ); $zip->writeCentralDirectory( $fh, $unwritten[0]->writeLocalHeaderRelativeOffset()); } } overwriteAs( $newName ) Write the zip to the specified file, as safely as possible. This is done by first writing to a temp file, then renaming the original if it exists, then renaming the temp file, then deleting the renamed original if it exists. Returns AZ_OK if successful. overwrite() Write back to the original zip file. See overwriteAs() above. If the zip was not ever read from a file, this generates an error. read( $fileName ) Read zipfile headers from a zip file, appending new members. Returns "AZ_OK" or error code. my $zipFile = Archive::Zip->new(); my $status = $zipFile->read( '/some/FileName.zip' ); readFromFileHandle( $fileHandle, $filename ) Read zipfile headers from an already-opened file handle, appending new members. Does not close the file handle. Returns "AZ_OK" or error code. Note that this requires a seekable file handle; reading from a stream is not yet supported. my $fh = IO::File->new( '/some/FileName.zip', 'r' ); my $zip1 = Archive::Zip->new(); my $status = $zip1->readFromFileHandle( $fh ); my $zip2 = Archive::Zip->new(); $status = $zip2->readFromFileHandle( $fh ); Zip Archive Tree operations These used to be in Archive::Zip::Tree but got moved into Archive::Zip. They enable operation on an entire tree of members or files. A usage example: use Archive::Zip; my $zip = Archive::Zip->new(); # add all readable files and directories below . as xyz/* $zip->addTree( '.', 'xyz' ); # add all readable plain files below /abc as def/* $zip->addTree( '/abc', 'def', sub { -f && -r } ); # add all .c files below /tmp as stuff/* $zip->addTreeMatching( '/tmp', 'stuff', '\.c$' ); # add all .o files below /tmp as stuff/* if they aren't writable $zip->addTreeMatching( '/tmp', 'stuff', '\.o$', sub { ! -w } ); # add all .so files below /tmp that are smaller than 200 bytes as stuff/* $zip->addTreeMatching( '/tmp', 'stuff', '\.o$', sub { -s < 200 } ); # and write them into a file $zip->writeToFileNamed('xxx.zip'); # now extract the same files into /tmpx $zip->extractTree( 'stuff', '/tmpx' ); $zip->addTree( $root, $dest [,$pred] ) -- Add tree of files to a zip $root is the root of the tree of files and directories to be added. It is a valid directory name on your system. $dest is the name for the root in the zip file (undef or blank means to use relative pathnames). It is a valid ZIP directory name (that is, it uses forward slashes (/) for separating directory components). $pred is an optional subroutine reference to select files: it is passed the name of the prospective file or directory using $_, and if it returns true, the file or directory will be included. The default is to add all readable files and directories. For instance, using my $pred = sub { /\.txt/ }; $zip->addTree( '.', '', $pred ); will add all the .txt files in and below the current directory, using relative names, and making the names identical in the zipfile: original name zip member name ./xyz xyz ./a/ a/ ./a/b a/b To translate absolute to relative pathnames, just pass them in: $zip->addTree( '/c/d', 'a' ); original name zip member name /c/d/xyz a/xyz /c/d/a/ a/a/ /c/d/a/b a/a/b Returns AZ_OK on success. Note that this will not follow symbolic links to directories. Note also that this does not check for the validity of filenames. Note that you generally *don't* want to make zip archive member names absolute. $zip->addTreeMatching( $root, $dest, $pattern [,$pred] ) $root is the root of the tree of files and directories to be added $dest is the name for the root in the zip file (undef means to use relative pathnames) $pattern is a (non-anchored) regular expression for filenames to match $pred is an optional subroutine reference to select files: it is passed the name of the prospective file or directory in $_, and if it returns true, the file or directory will be included. The default is to add all readable files and directories. To add all files in and below the current dirctory whose names end in ".pl", and make them extract into a subdirectory named "xyz", do this: $zip->addTreeMatching( '.', 'xyz', '\.pl$' ) To add all *writable* files in and below the dirctory named "/abc" whose names end in ".pl", and make them extract into a subdirectory named "xyz", do this: $zip->addTreeMatching( '/abc', 'xyz', '\.pl$', sub { -w } ) Returns AZ_OK on success. Note that this will not follow symbolic links to directories. $zip->updateTree( $root, [ $dest, [ $pred [, $mirror]]] ); Update a zip file from a directory tree. "updateTree()" takes the same arguments as "addTree()", but first checks to see whether the file or directory already exists in the zip file, and whether it has been changed. If the fourth argument $mirror is true, then delete all my members if corresponding files weren't found. Returns an error code or AZ_OK if all is well. $zip->extractTree() $zip->extractTree( $root ) $zip->extractTree( $root, $dest ) $zip->extractTree( $root, $dest, $volume ) If you don't give any arguments at all, will extract all the files in the zip with their original names. If you supply one argument for $root, "extractTree" will extract all the members whose names start with $root into the current directory, stripping off $root first. $root is in Zip (Unix) format. For instance, $zip->extractTree( 'a' ); when applied to a zip containing the files: a/x a/b/c ax/d/e d/e will extract: a/x as ./x a/b/c as ./b/c If you give two arguments, "extractTree" extracts all the members whose names start with $root. It will translate $root into $dest to construct the destination file name. $root and $dest are in Zip (Unix) format. For instance, $zip->extractTree( 'a', 'd/e' ); when applied to a zip containing the files: a/x a/b/c ax/d/e d/e will extract: a/x to d/e/x a/b/c to d/e/b/c and ignore ax/d/e and d/e If you give three arguments, "extractTree" extracts all the members whose names start with $root. It will translate $root into $dest to construct the destination file name, and then it will convert to local file system format, using $volume as the name of the destination volume. $root and $dest are in Zip (Unix) format. $volume is in local file system format. For instance, under Windows, $zip->extractTree( 'a', 'd/e', 'f:' ); when applied to a zip containing the files: a/x a/b/c ax/d/e d/e will extract: a/x to f:d/e/x a/b/c to f:d/e/b/c and ignore ax/d/e and d/e If you want absolute paths (the prior example used paths relative to the current directory on the destination volume, you can specify these in $dest: $zip->extractTree( 'a', '/d/e', 'f:' ); when applied to a zip containing the files: a/x a/b/c ax/d/e d/e will extract: a/x to f:\d\e\x a/b/c to f:\d\e\b\c and ignore ax/d/e and d/e Returns an error code or AZ_OK if everything worked OK. MEMBER OPERATIONS Member Class Methods Several constructors allow you to construct members without adding them to a zip archive. These work the same as the addFile(), addDirectory(), and addString() zip instance methods described above, but they don't add the new members to a zip. Archive::Zip::Member->newFromString( $stringOrStringRef [, $fileName] ) Construct a new member from the given string. Returns undef on error. my $member = Archive::Zip::Member->newFromString( 'This is a test', 'xyz.txt' ); newFromFile( $fileName ) Construct a new member from the given file. Returns undef on error. my $member = Archive::Zip::Member->newFromFile( 'xyz.txt' ); newDirectoryNamed( $directoryName [, $zipname ] ) Construct a new member from the given directory. $directoryName must be a valid name on your file system; it doesn't have to exist. If given, $zipname will be the name of the zip member; it must be a valid Zip (Unix) name. If not given, it will be converted from $directoryName. Returns undef on error. my $member = Archive::Zip::Member->newDirectoryNamed( 'CVS/' ); Member Simple accessors These methods get (and/or set) member attribute values. versionMadeBy() Gets the field from the member header. fileAttributeFormat( [$format] ) Gets or sets the field from the member header. These are "FA_*" values. versionNeededToExtract() Gets the field from the member header. bitFlag() Gets the general purpose bit field from the member header. This is where the "GPBF_*" bits live. compressionMethod() Returns the member compression method. This is the method that is currently being used to compress the member data. This will be COMPRESSION_STORED for added string or file members, or any of the "COMPRESSION_*" values for members from a zip file. However, this module can only handle members whose data is in COMPRESSION_STORED or COMPRESSION_DEFLATED format. desiredCompressionMethod( [$method] ) Get or set the member's "desiredCompressionMethod". This is the compression method that will be used when the member is written. Returns prior desiredCompressionMethod. Only COMPRESSION_DEFLATED or COMPRESSION_STORED are valid arguments. Changing to COMPRESSION_STORED will change the member desiredCompressionLevel to 0; changing to COMPRESSION_DEFLATED will change the member desiredCompressionLevel to COMPRESSION_LEVEL_DEFAULT. desiredCompressionLevel( [$method] ) Get or set the member's desiredCompressionLevel This is the method that will be used to write. Returns prior desiredCompressionLevel. Valid arguments are 0 through 9, COMPRESSION_LEVEL_NONE, COMPRESSION_LEVEL_DEFAULT, COMPRESSION_LEVEL_BEST_COMPRESSION, and COMPRESSION_LEVEL_FASTEST. 0 or COMPRESSION_LEVEL_NONE will change the desiredCompressionMethod to COMPRESSION_STORED. All other arguments will change the desiredCompressionMethod to COMPRESSION_DEFLATED. externalFileName() Return the member's external file name, if any, or undef. fileName() Get or set the member's internal filename. Returns the (possibly new) filename. Names will have backslashes converted to forward slashes, and will have multiple consecutive slashes converted to single ones. lastModFileDateTime() Return the member's last modification date/time stamp in MS-DOS format. lastModTime() Return the member's last modification date/time stamp, converted to unix localtime format. print "Mod Time: " . scalar( localtime( $member->lastModTime() ) ); setLastModFileDateTimeFromUnix() Set the member's lastModFileDateTime from the given unix time. $member->setLastModFileDateTimeFromUnix( time() ); internalFileAttributes() Return the internal file attributes field from the zip header. This is only set for members read from a zip file. externalFileAttributes() Return member attributes as read from the ZIP file. Note that these are NOT UNIX! unixFileAttributes( [$newAttributes] ) Get or set the member's file attributes using UNIX file attributes. Returns old attributes. my $oldAttribs = $member->unixFileAttributes( 0666 ); Note that the return value has more than just the file permissions, so you will have to mask off the lowest bits for comparisions. localExtraField( [$newField] ) Gets or sets the extra field that was read from the local header. This is not set for a member from a zip file until after the member has been written out. The extra field must be in the proper format. cdExtraField( [$newField] ) Gets or sets the extra field that was read from the central directory header. The extra field must be in the proper format. extraFields() Return both local and CD extra fields, concatenated. fileComment( [$newComment] ) Get or set the member's file comment. hasDataDescriptor() Get or set the data descriptor flag. If this is set, the local header will not necessarily have the correct data sizes. Instead, a small structure will be stored at the end of the member data with these values. This should be transparent in normal operation. crc32() Return the CRC-32 value for this member. This will not be set for members that were constructed from strings or external files until after the member has been written. crc32String() Return the CRC-32 value for this member as an 8 character printable hex string. This will not be set for members that were constructed from strings or external files until after the member has been written. compressedSize() Return the compressed size for this member. This will not be set for members that were constructed from strings or external files until after the member has been written. uncompressedSize() Return the uncompressed size for this member. isEncrypted() Return true if this member is encrypted. The Archive::Zip module does not currently create or extract encrypted members. isTextFile( [$flag] ) Returns true if I am a text file. Also can set the status if given an argument (then returns old state). Note that this module does not currently do anything with this flag upon extraction or storage. That is, bytes are stored in native format whether or not they came from a text file. isBinaryFile() Returns true if I am a binary file. Also can set the status if given an argument (then returns old state). Note that this module does not currently do anything with this flag upon extraction or storage. That is, bytes are stored in native format whether or not they came from a text file. extractToFileNamed( $fileName ) Extract me to a file with the given name. The file will be created with default modes. Directories will be created as needed. The $fileName argument should be a valid file name on your file system. Returns AZ_OK on success. isDirectory() Returns true if I am a directory. writeLocalHeaderRelativeOffset() Returns the file offset in bytes the last time I was written. wasWritten() Returns true if I was successfully written. Reset at the beginning of a write attempt. Low-level member data reading It is possible to use lower-level routines to access member data streams, rather than the extract* methods and contents(). For instance, here is how to print the uncompressed contents of a member in chunks using these methods: my ( $member, $status, $bufferRef ); $member = $zip->memberNamed( 'xyz.txt' ); $member->desiredCompressionMethod( COMPRESSION_STORED ); $status = $member->rewindData(); die "error $status" unless $status == AZ_OK; while ( ! $member->readIsDone() ) { ( $bufferRef, $status ) = $member->readChunk(); die "error $status" if $status != AZ_OK && $status != AZ_STREAM_END; # do something with $bufferRef: print $$bufferRef; } $member->endRead(); readChunk( [$chunkSize] ) This reads the next chunk of given size from the member's data stream and compresses or uncompresses it as necessary, returning a reference to the bytes read and a status. If size argument is not given, defaults to global set by Archive::Zip::setChunkSize. Status is AZ_OK on success until the last chunk, where it returns AZ_STREAM_END. Returns "( \$bytes, $status)". my ( $outRef, $status ) = $self->readChunk(); print $$outRef if $status != AZ_OK && $status != AZ_STREAM_END; rewindData() Rewind data and set up for reading data streams or writing zip files. Can take options for "inflateInit()" or "deflateInit()", but this isn't likely to be necessary. Subclass overrides should call this method. Returns "AZ_OK" on success. endRead() Reset the read variables and free the inflater or deflater. Must be called to close files, etc. Returns AZ_OK on success. readIsDone() Return true if the read has run out of data or errored out. contents() Return the entire uncompressed member data or undef in scalar context. When called in array context, returns "( $string, $status )"; status will be AZ_OK on success: my $string = $member->contents(); # or my ( $string, $status ) = $member->contents(); die "error $status" unless $status == AZ_OK; Can also be used to set the contents of a member (this may change the class of the member): $member->contents( "this is my new contents" ); extractToFileHandle( $fh ) Extract (and uncompress, if necessary) the member's contents to the given file handle. Return AZ_OK on success. Archive::Zip::FileMember methods The Archive::Zip::FileMember class extends Archive::Zip::Member. It is the base class for both ZipFileMember and NewFileMember classes. This class adds an "externalFileName" and an "fh" member to keep track of the external file. externalFileName() Return the member's external filename. fh() Return the member's read file handle. Automatically opens file if necessary. Archive::Zip::ZipFileMember methods The Archive::Zip::ZipFileMember class represents members that have been read from external zip files. diskNumberStart() Returns the disk number that the member's local header resides in. Should be 0. localHeaderRelativeOffset() Returns the offset into the zip file where the member's local header is. dataOffset() Returns the offset from the beginning of the zip file to the member's data. REQUIRED MODULES Archive::Zip requires several other modules: Carp Compress::Raw::Zlib Cwd File::Basename File::Copy File::Find File::Path File::Spec IO::File IO::Seekable Time::Local BUGS AND CAVEATS When not to use Archive::Zip If you are just going to be extracting zips (and/or other archives) you are recommended to look at using Archive::Extract instead, as it is much easier to use and factors out archive-specific functionality. Try to avoid IO::Scalar One of the most common ways to use Archive::Zip is to generate Zip files in-memory. Most people have use IO::Scalar for this purpose. Unfortunately, as of 1.11 this module no longer works with IO::Scalar as it incorrectly implements seeking. Anybody using IO::Scalar should consider porting to IO::String, which is smaller, lighter, and is implemented to be perfectly compatible with regular seekable filehandles. Support for IO::Scalar most likely will not be restored in the future, as IO::Scalar itself cannot change the way it is implemented due to back-compatibility issues. TO DO * auto-choosing storing vs compression * extra field hooks (see notes.txt) * check for dups on addition/renaming? * Text file extraction (line end translation) * Reading zip files from non-seekable inputs (Perhaps by proxying through IO::String?) * separate unused constants into separate module * cookbook style docs * Handle tainted paths correctly * Work on better compatability with other IO:: modules SUPPORT Bugs should be reported via the CPAN bug tracker For other issues contact the maintainer AUTHOR Adam Kennedy Previously maintained by Steve Peters . File attributes code by Maurice Aubrey . Originally by Ned Konz . COPYRIGHT Some parts copyright 2006 - 2009 Adam Kennedy. Some parts copyright 2005 Steve Peters. Original work copyright 2000 - 2004 Ned Konz. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. SEE ALSO Look at Archive::Zip::MemberRead which is a wrapper that allows one to read Zip archive members as if they were files. Compress::Raw::Zlib, Archive::Tar, Archive::Extract There is a Japanese translation of this document at that was done by DEQ . Thanks! gdata/inst/perl/Archive/Zip.pm0000644000175100001440000016612313003720416015755 0ustar hornikuserspackage Archive::Zip; use strict; BEGIN { require 5.003_96; } use UNIVERSAL (); use Carp (); use Cwd (); use IO::File (); use IO::Seekable (); use Compress::Raw::Zlib (); use File::Spec (); use File::Temp (); use FileHandle (); use vars qw( $VERSION @ISA ); BEGIN { $VERSION = '1.30'; require Exporter; @ISA = qw( Exporter ); } use vars qw( $ChunkSize $ErrorHandler ); BEGIN { # This is the size we'll try to read, write, and (de)compress. # You could set it to something different if you had lots of memory # and needed more speed. $ChunkSize ||= 32768; $ErrorHandler = \&Carp::carp; } # BEGIN block is necessary here so that other modules can use the constants. use vars qw( @EXPORT_OK %EXPORT_TAGS ); BEGIN { @EXPORT_OK = ('computeCRC32'); %EXPORT_TAGS = ( CONSTANTS => [ qw( FA_MSDOS FA_UNIX GPBF_ENCRYPTED_MASK GPBF_DEFLATING_COMPRESSION_MASK GPBF_HAS_DATA_DESCRIPTOR_MASK COMPRESSION_STORED COMPRESSION_DEFLATED COMPRESSION_LEVEL_NONE COMPRESSION_LEVEL_DEFAULT COMPRESSION_LEVEL_FASTEST COMPRESSION_LEVEL_BEST_COMPRESSION IFA_TEXT_FILE_MASK IFA_TEXT_FILE IFA_BINARY_FILE ) ], MISC_CONSTANTS => [ qw( FA_AMIGA FA_VAX_VMS FA_VM_CMS FA_ATARI_ST FA_OS2_HPFS FA_MACINTOSH FA_Z_SYSTEM FA_CPM FA_TOPS20 FA_WINDOWS_NTFS FA_QDOS FA_ACORN FA_VFAT FA_MVS FA_BEOS FA_TANDEM FA_THEOS GPBF_IMPLODING_8K_SLIDING_DICTIONARY_MASK GPBF_IMPLODING_3_SHANNON_FANO_TREES_MASK GPBF_IS_COMPRESSED_PATCHED_DATA_MASK COMPRESSION_SHRUNK DEFLATING_COMPRESSION_NORMAL DEFLATING_COMPRESSION_MAXIMUM DEFLATING_COMPRESSION_FAST DEFLATING_COMPRESSION_SUPER_FAST COMPRESSION_REDUCED_1 COMPRESSION_REDUCED_2 COMPRESSION_REDUCED_3 COMPRESSION_REDUCED_4 COMPRESSION_IMPLODED COMPRESSION_TOKENIZED COMPRESSION_DEFLATED_ENHANCED COMPRESSION_PKWARE_DATA_COMPRESSION_LIBRARY_IMPLODED ) ], ERROR_CODES => [ qw( AZ_OK AZ_STREAM_END AZ_ERROR AZ_FORMAT_ERROR AZ_IO_ERROR ) ], # For Internal Use Only PKZIP_CONSTANTS => [ qw( SIGNATURE_FORMAT SIGNATURE_LENGTH LOCAL_FILE_HEADER_SIGNATURE LOCAL_FILE_HEADER_FORMAT LOCAL_FILE_HEADER_LENGTH CENTRAL_DIRECTORY_FILE_HEADER_SIGNATURE DATA_DESCRIPTOR_FORMAT DATA_DESCRIPTOR_LENGTH DATA_DESCRIPTOR_SIGNATURE DATA_DESCRIPTOR_FORMAT_NO_SIG DATA_DESCRIPTOR_LENGTH_NO_SIG CENTRAL_DIRECTORY_FILE_HEADER_FORMAT CENTRAL_DIRECTORY_FILE_HEADER_LENGTH END_OF_CENTRAL_DIRECTORY_SIGNATURE END_OF_CENTRAL_DIRECTORY_SIGNATURE_STRING END_OF_CENTRAL_DIRECTORY_FORMAT END_OF_CENTRAL_DIRECTORY_LENGTH ) ], # For Internal Use Only UTILITY_METHODS => [ qw( _error _printError _ioError _formatError _subclassResponsibility _binmode _isSeekable _newFileHandle _readSignature _asZipDirName ) ], ); # Add all the constant names and error code names to @EXPORT_OK Exporter::export_ok_tags( qw( CONSTANTS ERROR_CODES PKZIP_CONSTANTS UTILITY_METHODS MISC_CONSTANTS ) ); } # Error codes use constant AZ_OK => 0; use constant AZ_STREAM_END => 1; use constant AZ_ERROR => 2; use constant AZ_FORMAT_ERROR => 3; use constant AZ_IO_ERROR => 4; # File types # Values of Archive::Zip::Member->fileAttributeFormat() use constant FA_MSDOS => 0; use constant FA_AMIGA => 1; use constant FA_VAX_VMS => 2; use constant FA_UNIX => 3; use constant FA_VM_CMS => 4; use constant FA_ATARI_ST => 5; use constant FA_OS2_HPFS => 6; use constant FA_MACINTOSH => 7; use constant FA_Z_SYSTEM => 8; use constant FA_CPM => 9; use constant FA_TOPS20 => 10; use constant FA_WINDOWS_NTFS => 11; use constant FA_QDOS => 12; use constant FA_ACORN => 13; use constant FA_VFAT => 14; use constant FA_MVS => 15; use constant FA_BEOS => 16; use constant FA_TANDEM => 17; use constant FA_THEOS => 18; # general-purpose bit flag masks # Found in Archive::Zip::Member->bitFlag() use constant GPBF_ENCRYPTED_MASK => 1 << 0; use constant GPBF_DEFLATING_COMPRESSION_MASK => 3 << 1; use constant GPBF_HAS_DATA_DESCRIPTOR_MASK => 1 << 3; # deflating compression types, if compressionMethod == COMPRESSION_DEFLATED # ( Archive::Zip::Member->bitFlag() & GPBF_DEFLATING_COMPRESSION_MASK ) use constant DEFLATING_COMPRESSION_NORMAL => 0 << 1; use constant DEFLATING_COMPRESSION_MAXIMUM => 1 << 1; use constant DEFLATING_COMPRESSION_FAST => 2 << 1; use constant DEFLATING_COMPRESSION_SUPER_FAST => 3 << 1; # compression method # these two are the only ones supported in this module use constant COMPRESSION_STORED => 0; # file is stored (no compression) use constant COMPRESSION_DEFLATED => 8; # file is Deflated use constant COMPRESSION_LEVEL_NONE => 0; use constant COMPRESSION_LEVEL_DEFAULT => -1; use constant COMPRESSION_LEVEL_FASTEST => 1; use constant COMPRESSION_LEVEL_BEST_COMPRESSION => 9; # internal file attribute bits # Found in Archive::Zip::Member::internalFileAttributes() use constant IFA_TEXT_FILE_MASK => 1; use constant IFA_TEXT_FILE => 1; use constant IFA_BINARY_FILE => 0; # PKZIP file format miscellaneous constants (for internal use only) use constant SIGNATURE_FORMAT => "V"; use constant SIGNATURE_LENGTH => 4; # these lengths are without the signature. use constant LOCAL_FILE_HEADER_SIGNATURE => 0x04034b50; use constant LOCAL_FILE_HEADER_FORMAT => "v3 V4 v2"; use constant LOCAL_FILE_HEADER_LENGTH => 26; # PKZIP docs don't mention the signature, but Info-Zip writes it. use constant DATA_DESCRIPTOR_SIGNATURE => 0x08074b50; use constant DATA_DESCRIPTOR_FORMAT => "V3"; use constant DATA_DESCRIPTOR_LENGTH => 12; # but the signature is apparently optional. use constant DATA_DESCRIPTOR_FORMAT_NO_SIG => "V2"; use constant DATA_DESCRIPTOR_LENGTH_NO_SIG => 8; use constant CENTRAL_DIRECTORY_FILE_HEADER_SIGNATURE => 0x02014b50; use constant CENTRAL_DIRECTORY_FILE_HEADER_FORMAT => "C2 v3 V4 v5 V2"; use constant CENTRAL_DIRECTORY_FILE_HEADER_LENGTH => 42; use constant END_OF_CENTRAL_DIRECTORY_SIGNATURE => 0x06054b50; use constant END_OF_CENTRAL_DIRECTORY_SIGNATURE_STRING => pack( "V", END_OF_CENTRAL_DIRECTORY_SIGNATURE ); use constant END_OF_CENTRAL_DIRECTORY_FORMAT => "v4 V2 v"; use constant END_OF_CENTRAL_DIRECTORY_LENGTH => 18; use constant GPBF_IMPLODING_8K_SLIDING_DICTIONARY_MASK => 1 << 1; use constant GPBF_IMPLODING_3_SHANNON_FANO_TREES_MASK => 1 << 2; use constant GPBF_IS_COMPRESSED_PATCHED_DATA_MASK => 1 << 5; # the rest of these are not supported in this module use constant COMPRESSION_SHRUNK => 1; # file is Shrunk use constant COMPRESSION_REDUCED_1 => 2; # file is Reduced CF=1 use constant COMPRESSION_REDUCED_2 => 3; # file is Reduced CF=2 use constant COMPRESSION_REDUCED_3 => 4; # file is Reduced CF=3 use constant COMPRESSION_REDUCED_4 => 5; # file is Reduced CF=4 use constant COMPRESSION_IMPLODED => 6; # file is Imploded use constant COMPRESSION_TOKENIZED => 7; # reserved for Tokenizing compr. use constant COMPRESSION_DEFLATED_ENHANCED => 9; # reserved for enh. Deflating use constant COMPRESSION_PKWARE_DATA_COMPRESSION_LIBRARY_IMPLODED => 10; # Load the various required classes require Archive::Zip::Archive; require Archive::Zip::Member; require Archive::Zip::FileMember; require Archive::Zip::DirectoryMember; require Archive::Zip::ZipFileMember; require Archive::Zip::NewFileMember; require Archive::Zip::StringMember; use constant ZIPARCHIVECLASS => 'Archive::Zip::Archive'; use constant ZIPMEMBERCLASS => 'Archive::Zip::Member'; # Convenience functions sub _ISA ($$) { # Can't rely on Scalar::Util, so use the next best way local $@; !! eval { ref $_[0] and $_[0]->isa($_[1]) }; } sub _CAN ($$) { local $@; !! eval { ref $_[0] and $_[0]->can($_[1]) }; } ##################################################################### # Methods sub new { my $class = shift; return $class->ZIPARCHIVECLASS->new(@_); } sub computeCRC32 { my ( $data, $crc ); if ( ref( $_[0] ) eq 'HASH' ) { $data = $_[0]->{string}; $crc = $_[0]->{checksum}; } else { $data = shift; $data = shift if ref($data); $crc = shift; } return Compress::Raw::Zlib::crc32( $data, $crc ); } # Report or change chunk size used for reading and writing. # Also sets Zlib's default buffer size (eventually). sub setChunkSize { shift if ref( $_[0] ) eq 'Archive::Zip::Archive'; my $chunkSize = ( ref( $_[0] ) eq 'HASH' ) ? shift->{chunkSize} : shift; my $oldChunkSize = $Archive::Zip::ChunkSize; $Archive::Zip::ChunkSize = $chunkSize if ($chunkSize); return $oldChunkSize; } sub chunkSize { return $Archive::Zip::ChunkSize; } sub setErrorHandler { my $errorHandler = ( ref( $_[0] ) eq 'HASH' ) ? shift->{subroutine} : shift; $errorHandler = \&Carp::carp unless defined($errorHandler); my $oldErrorHandler = $Archive::Zip::ErrorHandler; $Archive::Zip::ErrorHandler = $errorHandler; return $oldErrorHandler; } ###################################################################### # Private utility functions (not methods). sub _printError { my $string = join ( ' ', @_, "\n" ); my $oldCarpLevel = $Carp::CarpLevel; $Carp::CarpLevel += 2; &{$ErrorHandler} ($string); $Carp::CarpLevel = $oldCarpLevel; } # This is called on format errors. sub _formatError { shift if ref( $_[0] ); _printError( 'format error:', @_ ); return AZ_FORMAT_ERROR; } # This is called on IO errors. sub _ioError { shift if ref( $_[0] ); _printError( 'IO error:', @_, ':', $! ); return AZ_IO_ERROR; } # This is called on generic errors. sub _error { shift if ref( $_[0] ); _printError( 'error:', @_ ); return AZ_ERROR; } # Called when a subclass should have implemented # something but didn't sub _subclassResponsibility { Carp::croak("subclass Responsibility\n"); } # Try to set the given file handle or object into binary mode. sub _binmode { my $fh = shift; return _CAN( $fh, 'binmode' ) ? $fh->binmode() : binmode($fh); } # Attempt to guess whether file handle is seekable. # Because of problems with Windows, this only returns true when # the file handle is a real file. sub _isSeekable { my $fh = shift; return 0 unless ref $fh; if ( _ISA($fh, 'IO::Scalar') ) { # IO::Scalar objects are brokenly-seekable return 0; } if ( _ISA($fh, 'IO::String') ) { return 1; } if ( _ISA($fh, 'IO::Seekable') ) { # Unfortunately, some things like FileHandle objects # return true for Seekable, but AREN'T!!!!! if ( _ISA($fh, 'FileHandle') ) { return 0; } else { return 1; } } if ( _CAN($fh, 'stat') ) { return -f $fh; } return ( _CAN($fh, 'seek') and _CAN($fh, 'tell') ) ? 1 : 0; } # Print to the filehandle, while making sure the pesky Perl special global # variables don't interfere. sub _print { my ($self, $fh, @data) = @_; local $\; return $fh->print(@data); } # Return an opened IO::Handle # my ( $status, fh ) = _newFileHandle( 'fileName', 'w' ); # Can take a filename, file handle, or ref to GLOB # Or, if given something that is a ref but not an IO::Handle, # passes back the same thing. sub _newFileHandle { my $fd = shift; my $status = 1; my $handle; if ( ref($fd) ) { if ( _ISA($fd, 'IO::Scalar') or _ISA($fd, 'IO::String') ) { $handle = $fd; } elsif ( _ISA($fd, 'IO::Handle') or ref($fd) eq 'GLOB' ) { $handle = IO::File->new; $status = $handle->fdopen( $fd, @_ ); } else { $handle = $fd; } } else { $handle = IO::File->new; $status = $handle->open( $fd, @_ ); } return ( $status, $handle ); } # Returns next signature from given file handle, leaves # file handle positioned afterwards. # In list context, returns ($status, $signature) # ( $status, $signature) = _readSignature( $fh, $fileName ); sub _readSignature { my $fh = shift; my $fileName = shift; my $expectedSignature = shift; # optional my $signatureData; my $bytesRead = $fh->read( $signatureData, SIGNATURE_LENGTH ); if ( $bytesRead != SIGNATURE_LENGTH ) { return _ioError("reading header signature"); } my $signature = unpack( SIGNATURE_FORMAT, $signatureData ); my $status = AZ_OK; # compare with expected signature, if any, or any known signature. if ( ( defined($expectedSignature) && $signature != $expectedSignature ) || ( !defined($expectedSignature) && $signature != CENTRAL_DIRECTORY_FILE_HEADER_SIGNATURE && $signature != LOCAL_FILE_HEADER_SIGNATURE && $signature != END_OF_CENTRAL_DIRECTORY_SIGNATURE && $signature != DATA_DESCRIPTOR_SIGNATURE ) ) { my $errmsg = sprintf( "bad signature: 0x%08x", $signature ); if ( _isSeekable($fh) ) { $errmsg .= sprintf( " at offset %d", $fh->tell() - SIGNATURE_LENGTH ); } $status = _formatError("$errmsg in file $fileName"); } return ( $status, $signature ); } # Utility method to make and open a temp file. # Will create $temp_dir if it doesn't exist. # Returns file handle and name: # # my ($fh, $name) = Archive::Zip::tempFile(); # my ($fh, $name) = Archive::Zip::tempFile('mytempdir'); # sub tempFile { my $dir = ( ref( $_[0] ) eq 'HASH' ) ? shift->{tempDir} : shift; my ( $fh, $filename ) = File::Temp::tempfile( SUFFIX => '.zip', UNLINK => 0, # we will delete it! $dir ? ( DIR => $dir ) : () ); return ( undef, undef ) unless $fh; my ( $status, $newfh ) = _newFileHandle( $fh, 'w+' ); return ( $newfh, $filename ); } # Return the normalized directory name as used in a zip file (path # separators become slashes, etc.). # Will translate internal slashes in path components (i.e. on Macs) to # underscores. Discards volume names. # When $forceDir is set, returns paths with trailing slashes (or arrays # with trailing blank members). # # If third argument is a reference, returns volume information there. # # input output # . ('.') '.' # ./a ('a') a # ./a/b ('a','b') a/b # ./a/b/ ('a','b') a/b # a/b/ ('a','b') a/b # /a/b/ ('','a','b') /a/b # c:\a\b\c.doc ('','a','b','c.doc') /a/b/c.doc # on Windoze # "i/o maps:whatever" ('i_o maps', 'whatever') "i_o maps/whatever" # on Macs sub _asZipDirName { my $name = shift; my $forceDir = shift; my $volReturn = shift; my ( $volume, $directories, $file ) = File::Spec->splitpath( File::Spec->canonpath($name), $forceDir ); $$volReturn = $volume if ( ref($volReturn) ); my @dirs = map { $_ =~ s{/}{_}g; $_ } File::Spec->splitdir($directories); if ( @dirs > 0 ) { pop (@dirs) unless $dirs[-1] } # remove empty component push ( @dirs, defined($file) ? $file : '' ); #return wantarray ? @dirs : join ( '/', @dirs ); return join ( '/', @dirs ); } # Return an absolute local name for a zip name. # Assume a directory if zip name has trailing slash. # Takes an optional volume name in FS format (like 'a:'). # sub _asLocalName { my $name = shift; # zip format my $volume = shift; $volume = '' unless defined($volume); # local FS format my @paths = split ( /\//, $name ); my $filename = pop (@paths); $filename = '' unless defined($filename); my $localDirs = @paths ? File::Spec->catdir(@paths) : ''; my $localName = File::Spec->catpath( $volume, $localDirs, $filename ); unless ( $volume ) { $localName = File::Spec->rel2abs( $localName, Cwd::getcwd() ); } return $localName; } 1; __END__ =pod =head1 NAME Archive::Zip - Provide an interface to ZIP archive files. =head1 SYNOPSIS # Create a Zip file use Archive::Zip qw( :ERROR_CODES :CONSTANTS ); my $zip = Archive::Zip->new(); # Add a directory my $dir_member = $zip->addDirectory( 'dirname/' ); # Add a file from a string with compression my $string_member = $zip->addString( 'This is a test', 'stringMember.txt' ); $string_member->desiredCompressionMethod( COMPRESSION_DEFLATED ); # Add a file from disk my $file_member = $zip->addFile( 'xyz.pl', 'AnotherName.pl' ); # Save the Zip file unless ( $zip->writeToFileNamed('someZip.zip') == AZ_OK ) { die 'write error'; } # Read a Zip file my $somezip = Archive::Zip->new(); unless ( $somezip->read( 'someZip.zip' ) == AZ_OK ) { die 'read error'; } # Change the compression type for a file in the Zip my $member = $somezip->memberNamed( 'stringMember.txt' ); $member->desiredCompressionMethod( COMPRESSION_STORED ); unless ( $zip->writeToFileNamed( 'someOtherZip.zip' ) == AZ_OK ) { die 'write error'; } =head1 DESCRIPTION The Archive::Zip module allows a Perl program to create, manipulate, read, and write Zip archive files. Zip archives can be created, or you can read from existing zip files. Once created, they can be written to files, streams, or strings. Members can be added, removed, extracted, replaced, rearranged, and enumerated. They can also be renamed or have their dates, comments, or other attributes queried or modified. Their data can be compressed or uncompressed as needed. Members can be created from members in existing Zip files, or from existing directories, files, or strings. This module uses the L library to read and write the compressed streams inside the files. One can use L to read the zip file archive members as if they were files. =head2 File Naming Regardless of what your local file system uses for file naming, names in a Zip file are in Unix format (I slashes (/) separating directory names, etc.). C tries to be consistent with file naming conventions, and will translate back and forth between native and Zip file names. However, it can't guess which format names are in. So two rules control what kind of file name you must pass various routines: =over 4 =item Names of files are in local format. C and C are used for various file operations. When you're referring to a file on your system, use its file naming conventions. =item Names of archive members are in Unix format. This applies to every method that refers to an archive member, or provides a name for new archive members. The C methods that can take one or two names will convert from local to zip names if you call them with a single name. =back =head2 Archive::Zip Object Model =head2 Overview Archive::Zip::Archive objects are what you ordinarily deal with. These maintain the structure of a zip file, without necessarily holding data. When a zip is read from a disk file, the (possibly compressed) data still lives in the file, not in memory. Archive members hold information about the individual members, but not (usually) the actual member data. When the zip is written to a (different) file, the member data is compressed or copied as needed. It is possible to make archive members whose data is held in a string in memory, but this is not done when a zip file is read. Directory members don't have any data. =head2 Inheritance Exporter Archive::Zip Common base class, has defs. Archive::Zip::Archive A Zip archive. Archive::Zip::Member Abstract superclass for all members. Archive::Zip::StringMember Member made from a string Archive::Zip::FileMember Member made from an external file Archive::Zip::ZipFileMember Member that lives in a zip file Archive::Zip::NewFileMember Member whose data is in a file Archive::Zip::DirectoryMember Member that is a directory =head1 EXPORTS =over 4 =item :CONSTANTS Exports the following constants: FA_MSDOS FA_UNIX GPBF_ENCRYPTED_MASK GPBF_DEFLATING_COMPRESSION_MASK GPBF_HAS_DATA_DESCRIPTOR_MASK COMPRESSION_STORED COMPRESSION_DEFLATED IFA_TEXT_FILE_MASK IFA_TEXT_FILE IFA_BINARY_FILE COMPRESSION_LEVEL_NONE COMPRESSION_LEVEL_DEFAULT COMPRESSION_LEVEL_FASTEST COMPRESSION_LEVEL_BEST_COMPRESSION =item :MISC_CONSTANTS Exports the following constants (only necessary for extending the module): FA_AMIGA FA_VAX_VMS FA_VM_CMS FA_ATARI_ST FA_OS2_HPFS FA_MACINTOSH FA_Z_SYSTEM FA_CPM FA_WINDOWS_NTFS GPBF_IMPLODING_8K_SLIDING_DICTIONARY_MASK GPBF_IMPLODING_3_SHANNON_FANO_TREES_MASK GPBF_IS_COMPRESSED_PATCHED_DATA_MASK COMPRESSION_SHRUNK DEFLATING_COMPRESSION_NORMAL DEFLATING_COMPRESSION_MAXIMUM DEFLATING_COMPRESSION_FAST DEFLATING_COMPRESSION_SUPER_FAST COMPRESSION_REDUCED_1 COMPRESSION_REDUCED_2 COMPRESSION_REDUCED_3 COMPRESSION_REDUCED_4 COMPRESSION_IMPLODED COMPRESSION_TOKENIZED COMPRESSION_DEFLATED_ENHANCED COMPRESSION_PKWARE_DATA_COMPRESSION_LIBRARY_IMPLODED =item :ERROR_CODES Explained below. Returned from most methods. AZ_OK AZ_STREAM_END AZ_ERROR AZ_FORMAT_ERROR AZ_IO_ERROR =back =head1 ERROR CODES Many of the methods in Archive::Zip return error codes. These are implemented as inline subroutines, using the C pragma. They can be imported into your namespace using the C<:ERROR_CODES> tag: use Archive::Zip qw( :ERROR_CODES ); ... unless ( $zip->read( 'myfile.zip' ) == AZ_OK ) { die "whoops!"; } =over 4 =item AZ_OK (0) Everything is fine. =item AZ_STREAM_END (1) The read stream (or central directory) ended normally. =item AZ_ERROR (2) There was some generic kind of error. =item AZ_FORMAT_ERROR (3) There is a format error in a ZIP file being read. =item AZ_IO_ERROR (4) There was an IO error. =back =head2 Compression Archive::Zip allows each member of a ZIP file to be compressed (using the Deflate algorithm) or uncompressed. Other compression algorithms that some versions of ZIP have been able to produce are not supported. Each member has two compression methods: the one it's stored as (this is always COMPRESSION_STORED for string and external file members), and the one you desire for the member in the zip file. These can be different, of course, so you can make a zip member that is not compressed out of one that is, and vice versa. You can inquire about the current compression and set the desired compression method: my $member = $zip->memberNamed( 'xyz.txt' ); $member->compressionMethod(); # return current compression # set to read uncompressed $member->desiredCompressionMethod( COMPRESSION_STORED ); # set to read compressed $member->desiredCompressionMethod( COMPRESSION_DEFLATED ); There are two different compression methods: =over 4 =item COMPRESSION_STORED File is stored (no compression) =item COMPRESSION_DEFLATED File is Deflated =back =head2 Compression Levels If a member's desiredCompressionMethod is COMPRESSION_DEFLATED, you can choose different compression levels. This choice may affect the speed of compression and decompression, as well as the size of the compressed member data. $member->desiredCompressionLevel( 9 ); The levels given can be: =over 4 =item 0 or COMPRESSION_LEVEL_NONE This is the same as saying $member->desiredCompressionMethod( COMPRESSION_STORED ); =item 1 .. 9 1 gives the best speed and worst compression, and 9 gives the best compression and worst speed. =item COMPRESSION_LEVEL_FASTEST This is a synonym for level 1. =item COMPRESSION_LEVEL_BEST_COMPRESSION This is a synonym for level 9. =item COMPRESSION_LEVEL_DEFAULT This gives a good compromise between speed and compression, and is currently equivalent to 6 (this is in the zlib code). This is the level that will be used if not specified. =back =head1 Archive::Zip Methods The Archive::Zip class (and its invisible subclass Archive::Zip::Archive) implement generic zip file functionality. Creating a new Archive::Zip object actually makes an Archive::Zip::Archive object, but you don't have to worry about this unless you're subclassing. =head2 Constructor =over 4 =item new( [$fileName] ) Make a new, empty zip archive. my $zip = Archive::Zip->new(); If an additional argument is passed, new() will call read() to read the contents of an archive: my $zip = Archive::Zip->new( 'xyz.zip' ); If a filename argument is passed and the read fails for any reason, new will return undef. For this reason, it may be better to call read separately. =back =head2 Zip Archive Utility Methods These Archive::Zip methods may be called as functions or as object methods. Do not call them as class methods: $zip = Archive::Zip->new(); $crc = Archive::Zip::computeCRC32( 'ghijkl' ); # OK $crc = $zip->computeCRC32( 'ghijkl' ); # also OK $crc = Archive::Zip->computeCRC32( 'ghijkl' ); # NOT OK =over 4 =item Archive::Zip::computeCRC32( $string [, $crc] ) This is a utility function that uses the Compress::Raw::Zlib CRC routine to compute a CRC-32. You can get the CRC of a string: $crc = Archive::Zip::computeCRC32( $string ); Or you can compute the running CRC: $crc = 0; $crc = Archive::Zip::computeCRC32( 'abcdef', $crc ); $crc = Archive::Zip::computeCRC32( 'ghijkl', $crc ); =item Archive::Zip::setChunkSize( $number ) Report or change chunk size used for reading and writing. This can make big differences in dealing with large files. Currently, this defaults to 32K. This also changes the chunk size used for Compress::Raw::Zlib. You must call setChunkSize() before reading or writing. This is not exportable, so you must call it like: Archive::Zip::setChunkSize( 4096 ); or as a method on a zip (though this is a global setting). Returns old chunk size. =item Archive::Zip::chunkSize() Returns the current chunk size: my $chunkSize = Archive::Zip::chunkSize(); =item Archive::Zip::setErrorHandler( \&subroutine ) Change the subroutine called with error strings. This defaults to \&Carp::carp, but you may want to change it to get the error strings. This is not exportable, so you must call it like: Archive::Zip::setErrorHandler( \&myErrorHandler ); If myErrorHandler is undef, resets handler to default. Returns old error handler. Note that if you call Carp::carp or a similar routine or if you're chaining to the default error handler from your error handler, you may want to increment the number of caller levels that are skipped (do not just set it to a number): $Carp::CarpLevel++; =item Archive::Zip::tempFile( [$tmpdir] ) Create a uniquely named temp file. It will be returned open for read/write. If C<$tmpdir> is given, it is used as the name of a directory to create the file in. If not given, creates the file using C. Generally, you can override this choice using the $ENV{TMPDIR} environment variable. But see the L documentation for your system. Note that on many systems, if you're running in taint mode, then you must make sure that C<$ENV{TMPDIR}> is untainted for it to be used. Will I create C<$tmpdir> if it doesn't exist (this is a change from prior versions!). Returns file handle and name: my ($fh, $name) = Archive::Zip::tempFile(); my ($fh, $name) = Archive::Zip::tempFile('myTempDir'); my $fh = Archive::Zip::tempFile(); # if you don't need the name =back =head2 Zip Archive Accessors =over 4 =item members() Return a copy of the members array my @members = $zip->members(); =item numberOfMembers() Return the number of members I have =item memberNames() Return a list of the (internal) file names of the zip members =item memberNamed( $string ) Return ref to member whose filename equals given filename or undef. C<$string> must be in Zip (Unix) filename format. =item membersMatching( $regex ) Return array of members whose filenames match given regular expression in list context. Returns number of matching members in scalar context. my @textFileMembers = $zip->membersMatching( '.*\.txt' ); # or my $numberOfTextFiles = $zip->membersMatching( '.*\.txt' ); =item diskNumber() Return the disk that I start on. Not used for writing zips, but might be interesting if you read a zip in. This should be 0, as Archive::Zip does not handle multi-volume archives. =item diskNumberWithStartOfCentralDirectory() Return the disk number that holds the beginning of the central directory. Not used for writing zips, but might be interesting if you read a zip in. This should be 0, as Archive::Zip does not handle multi-volume archives. =item numberOfCentralDirectoriesOnThisDisk() Return the number of CD structures in the zipfile last read in. Not used for writing zips, but might be interesting if you read a zip in. =item numberOfCentralDirectories() Return the number of CD structures in the zipfile last read in. Not used for writing zips, but might be interesting if you read a zip in. =item centralDirectorySize() Returns central directory size, as read from an external zip file. Not used for writing zips, but might be interesting if you read a zip in. =item centralDirectoryOffsetWRTStartingDiskNumber() Returns the offset into the zip file where the CD begins. Not used for writing zips, but might be interesting if you read a zip in. =item zipfileComment( [$string] ) Get or set the zipfile comment. Returns the old comment. print $zip->zipfileComment(); $zip->zipfileComment( 'New Comment' ); =item eocdOffset() Returns the (unexpected) number of bytes between where the EOCD was found and where it expected to be. This is normally 0, but would be positive if something (a virus, perhaps) had added bytes somewhere before the EOCD. Not used for writing zips, but might be interesting if you read a zip in. Here is an example of how you can diagnose this: my $zip = Archive::Zip->new('somefile.zip'); if ($zip->eocdOffset()) { warn "A virus has added ", $zip->eocdOffset, " bytes of garbage\n"; } The C is used to adjust the starting position of member headers, if necessary. =item fileName() Returns the name of the file last read from. If nothing has been read yet, returns an empty string; if read from a file handle, returns the handle in string form. =back =head2 Zip Archive Member Operations Various operations on a zip file modify members. When a member is passed as an argument, you can either use a reference to the member itself, or the name of a member. Of course, using the name requires that names be unique within a zip (this is not enforced). =over 4 =item removeMember( $memberOrName ) Remove and return the given member, or match its name and remove it. Returns undef if member or name doesn't exist in this Zip. No-op if member does not belong to this zip. =item replaceMember( $memberOrName, $newMember ) Remove and return the given member, or match its name and remove it. Replace with new member. Returns undef if member or name doesn't exist in this Zip, or if C<$newMember> is undefined. It is an (undiagnosed) error to provide a C<$newMember> that is a member of the zip being modified. my $member1 = $zip->removeMember( 'xyz' ); my $member2 = $zip->replaceMember( 'abc', $member1 ); # now, $member2 (named 'abc') is not in $zip, # and $member1 (named 'xyz') is, having taken $member2's place. =item extractMember( $memberOrName [, $extractedName ] ) Extract the given member, or match its name and extract it. Returns undef if member doesn't exist in this Zip. If optional second arg is given, use it as the name of the extracted member. Otherwise, the internal filename of the member is used as the name of the extracted file or directory. If you pass C<$extractedName>, it should be in the local file system's format. All necessary directories will be created. Returns C on success. =item extractMemberWithoutPaths( $memberOrName [, $extractedName ] ) Extract the given member, or match its name and extract it. Does not use path information (extracts into the current directory). Returns undef if member doesn't exist in this Zip. If optional second arg is given, use it as the name of the extracted member (its paths will be deleted too). Otherwise, the internal filename of the member (minus paths) is used as the name of the extracted file or directory. Returns C on success. =item addMember( $member ) Append a member (possibly from another zip file) to the zip file. Returns the new member. Generally, you will use addFile(), addDirectory(), addFileOrDirectory(), addString(), or read() to add members. # Move member named 'abc' to end of zip: my $member = $zip->removeMember( 'abc' ); $zip->addMember( $member ); =item updateMember( $memberOrName, $fileName ) Update a single member from the file or directory named C<$fileName>. Returns the (possibly added or updated) member, if any; C on errors. The comparison is based on C and (in the case of a non-directory) the size of the file. =item addFile( $fileName [, $newName ] ) Append a member whose data comes from an external file, returning the member or undef. The member will have its file name set to the name of the external file, and its desiredCompressionMethod set to COMPRESSION_DEFLATED. The file attributes and last modification time will be set from the file. If the name given does not represent a readable plain file or symbolic link, undef will be returned. C<$fileName> must be in the format required for the local file system. The optional C<$newName> argument sets the internal file name to something different than the given $fileName. C<$newName>, if given, must be in Zip name format (i.e. Unix). The text mode bit will be set if the contents appears to be text (as returned by the C<-T> perl operator). I that you shouldn't (generally) use absolute path names in zip member names, as this will cause problems with some zip tools as well as introduce a security hole and make the zip harder to use. =item addDirectory( $directoryName [, $fileName ] ) Append a member created from the given directory name. The directory name does not have to name an existing directory. If the named directory exists, the file modification time and permissions are set from the existing directory, otherwise they are set to now and permissive default permissions. C<$directoryName> must be in local file system format. The optional second argument sets the name of the archive member (which defaults to C<$directoryName>). If given, it must be in Zip (Unix) format. Returns the new member. =item addFileOrDirectory( $name [, $newName ] ) Append a member from the file or directory named $name. If $newName is given, use it for the name of the new member. Will add or remove trailing slashes from $newName as needed. C<$name> must be in local file system format. The optional second argument sets the name of the archive member (which defaults to C<$name>). If given, it must be in Zip (Unix) format. =item addString( $stringOrStringRef, $name ) Append a member created from the given string or string reference. The name is given by the second argument. Returns the new member. The last modification time will be set to now, and the file attributes will be set to permissive defaults. my $member = $zip->addString( 'This is a test', 'test.txt' ); =item contents( $memberOrMemberName [, $newContents ] ) Returns the uncompressed data for a particular member, or undef. print "xyz.txt contains " . $zip->contents( 'xyz.txt' ); Also can change the contents of a member: $zip->contents( 'xyz.txt', 'This is the new contents' ); If called expecting an array as the return value, it will include the status as the second value in the array. ($content, $status) = $zip->contents( 'xyz.txt'); =back =head2 Zip Archive I/O operations A Zip archive can be written to a file or file handle, or read from one. =over 4 =item writeToFileNamed( $fileName ) Write a zip archive to named file. Returns C on success. my $status = $zip->writeToFileNamed( 'xx.zip' ); die "error somewhere" if $status != AZ_OK; Note that if you use the same name as an existing zip file that you read in, you will clobber ZipFileMembers. So instead, write to a different file name, then delete the original. If you use the C or C methods, you can re-write the original zip in this way. C<$fileName> should be a valid file name on your system. =item writeToFileHandle( $fileHandle [, $seekable] ) Write a zip archive to a file handle. Return AZ_OK on success. The optional second arg tells whether or not to try to seek backwards to re-write headers. If not provided, it is set if the Perl C<-f> test returns true. This could fail on some operating systems, though. my $fh = IO::File->new( 'someFile.zip', 'w' ); unless ( $zip->writeToFileHandle( $fh ) == AZ_OK ) { # error handling } If you pass a file handle that is not seekable (like if you're writing to a pipe or a socket), pass a false second argument: my $fh = IO::File->new( '| cat > somefile.zip', 'w' ); $zip->writeToFileHandle( $fh, 0 ); # fh is not seekable If this method fails during the write of a member, that member and all following it will return false from C. See writeCentralDirectory() for a way to deal with this. If you want, you can write data to the file handle before passing it to writeToFileHandle(); this could be used (for instance) for making self-extracting archives. However, this only works reliably when writing to a real file (as opposed to STDOUT or some other possible non-file). See examples/selfex.pl for how to write a self-extracting archive. =item writeCentralDirectory( $fileHandle [, $offset ] ) Writes the central directory structure to the given file handle. Returns AZ_OK on success. If given an $offset, will seek to that point before writing. This can be used for recovery in cases where writeToFileHandle or writeToFileNamed returns an IO error because of running out of space on the destination file. You can truncate the zip by seeking backwards and then writing the directory: my $fh = IO::File->new( 'someFile.zip', 'w' ); my $retval = $zip->writeToFileHandle( $fh ); if ( $retval == AZ_IO_ERROR ) { my @unwritten = grep { not $_->wasWritten() } $zip->members(); if (@unwritten) { $zip->removeMember( $member ) foreach my $member ( @unwritten ); $zip->writeCentralDirectory( $fh, $unwritten[0]->writeLocalHeaderRelativeOffset()); } } =item overwriteAs( $newName ) Write the zip to the specified file, as safely as possible. This is done by first writing to a temp file, then renaming the original if it exists, then renaming the temp file, then deleting the renamed original if it exists. Returns AZ_OK if successful. =item overwrite() Write back to the original zip file. See overwriteAs() above. If the zip was not ever read from a file, this generates an error. =item read( $fileName ) Read zipfile headers from a zip file, appending new members. Returns C or error code. my $zipFile = Archive::Zip->new(); my $status = $zipFile->read( '/some/FileName.zip' ); =item readFromFileHandle( $fileHandle, $filename ) Read zipfile headers from an already-opened file handle, appending new members. Does not close the file handle. Returns C or error code. Note that this requires a seekable file handle; reading from a stream is not yet supported. my $fh = IO::File->new( '/some/FileName.zip', 'r' ); my $zip1 = Archive::Zip->new(); my $status = $zip1->readFromFileHandle( $fh ); my $zip2 = Archive::Zip->new(); $status = $zip2->readFromFileHandle( $fh ); =back =head2 Zip Archive Tree operations These used to be in Archive::Zip::Tree but got moved into Archive::Zip. They enable operation on an entire tree of members or files. A usage example: use Archive::Zip; my $zip = Archive::Zip->new(); # add all readable files and directories below . as xyz/* $zip->addTree( '.', 'xyz' ); # add all readable plain files below /abc as def/* $zip->addTree( '/abc', 'def', sub { -f && -r } ); # add all .c files below /tmp as stuff/* $zip->addTreeMatching( '/tmp', 'stuff', '\.c$' ); # add all .o files below /tmp as stuff/* if they aren't writable $zip->addTreeMatching( '/tmp', 'stuff', '\.o$', sub { ! -w } ); # add all .so files below /tmp that are smaller than 200 bytes as stuff/* $zip->addTreeMatching( '/tmp', 'stuff', '\.o$', sub { -s < 200 } ); # and write them into a file $zip->writeToFileNamed('xxx.zip'); # now extract the same files into /tmpx $zip->extractTree( 'stuff', '/tmpx' ); =over 4 =item $zip->addTree( $root, $dest [,$pred] ) -- Add tree of files to a zip C<$root> is the root of the tree of files and directories to be added. It is a valid directory name on your system. C<$dest> is the name for the root in the zip file (undef or blank means to use relative pathnames). It is a valid ZIP directory name (that is, it uses forward slashes (/) for separating directory components). C<$pred> is an optional subroutine reference to select files: it is passed the name of the prospective file or directory using C<$_>, and if it returns true, the file or directory will be included. The default is to add all readable files and directories. For instance, using my $pred = sub { /\.txt/ }; $zip->addTree( '.', '', $pred ); will add all the .txt files in and below the current directory, using relative names, and making the names identical in the zipfile: original name zip member name ./xyz xyz ./a/ a/ ./a/b a/b To translate absolute to relative pathnames, just pass them in: $zip->addTree( '/c/d', 'a' ); original name zip member name /c/d/xyz a/xyz /c/d/a/ a/a/ /c/d/a/b a/a/b Returns AZ_OK on success. Note that this will not follow symbolic links to directories. Note also that this does not check for the validity of filenames. Note that you generally I want to make zip archive member names absolute. =item $zip->addTreeMatching( $root, $dest, $pattern [,$pred] ) $root is the root of the tree of files and directories to be added $dest is the name for the root in the zip file (undef means to use relative pathnames) $pattern is a (non-anchored) regular expression for filenames to match $pred is an optional subroutine reference to select files: it is passed the name of the prospective file or directory in C<$_>, and if it returns true, the file or directory will be included. The default is to add all readable files and directories. To add all files in and below the current dirctory whose names end in C<.pl>, and make them extract into a subdirectory named C, do this: $zip->addTreeMatching( '.', 'xyz', '\.pl$' ) To add all I files in and below the dirctory named C whose names end in C<.pl>, and make them extract into a subdirectory named C, do this: $zip->addTreeMatching( '/abc', 'xyz', '\.pl$', sub { -w } ) Returns AZ_OK on success. Note that this will not follow symbolic links to directories. =item $zip->updateTree( $root, [ $dest, [ $pred [, $mirror]]] ); Update a zip file from a directory tree. C takes the same arguments as C, but first checks to see whether the file or directory already exists in the zip file, and whether it has been changed. If the fourth argument C<$mirror> is true, then delete all my members if corresponding files weren't found. Returns an error code or AZ_OK if all is well. =item $zip->extractTree() =item $zip->extractTree( $root ) =item $zip->extractTree( $root, $dest ) =item $zip->extractTree( $root, $dest, $volume ) If you don't give any arguments at all, will extract all the files in the zip with their original names. If you supply one argument for C<$root>, C will extract all the members whose names start with C<$root> into the current directory, stripping off C<$root> first. C<$root> is in Zip (Unix) format. For instance, $zip->extractTree( 'a' ); when applied to a zip containing the files: a/x a/b/c ax/d/e d/e will extract: a/x as ./x a/b/c as ./b/c If you give two arguments, C extracts all the members whose names start with C<$root>. It will translate C<$root> into C<$dest> to construct the destination file name. C<$root> and C<$dest> are in Zip (Unix) format. For instance, $zip->extractTree( 'a', 'd/e' ); when applied to a zip containing the files: a/x a/b/c ax/d/e d/e will extract: a/x to d/e/x a/b/c to d/e/b/c and ignore ax/d/e and d/e If you give three arguments, C extracts all the members whose names start with C<$root>. It will translate C<$root> into C<$dest> to construct the destination file name, and then it will convert to local file system format, using C<$volume> as the name of the destination volume. C<$root> and C<$dest> are in Zip (Unix) format. C<$volume> is in local file system format. For instance, under Windows, $zip->extractTree( 'a', 'd/e', 'f:' ); when applied to a zip containing the files: a/x a/b/c ax/d/e d/e will extract: a/x to f:d/e/x a/b/c to f:d/e/b/c and ignore ax/d/e and d/e If you want absolute paths (the prior example used paths relative to the current directory on the destination volume, you can specify these in C<$dest>: $zip->extractTree( 'a', '/d/e', 'f:' ); when applied to a zip containing the files: a/x a/b/c ax/d/e d/e will extract: a/x to f:\d\e\x a/b/c to f:\d\e\b\c and ignore ax/d/e and d/e Returns an error code or AZ_OK if everything worked OK. =back =head1 MEMBER OPERATIONS =head2 Member Class Methods Several constructors allow you to construct members without adding them to a zip archive. These work the same as the addFile(), addDirectory(), and addString() zip instance methods described above, but they don't add the new members to a zip. =over 4 =item Archive::Zip::Member->newFromString( $stringOrStringRef [, $fileName] ) Construct a new member from the given string. Returns undef on error. my $member = Archive::Zip::Member->newFromString( 'This is a test', 'xyz.txt' ); =item newFromFile( $fileName ) Construct a new member from the given file. Returns undef on error. my $member = Archive::Zip::Member->newFromFile( 'xyz.txt' ); =item newDirectoryNamed( $directoryName [, $zipname ] ) Construct a new member from the given directory. C<$directoryName> must be a valid name on your file system; it doesn't have to exist. If given, C<$zipname> will be the name of the zip member; it must be a valid Zip (Unix) name. If not given, it will be converted from C<$directoryName>. Returns undef on error. my $member = Archive::Zip::Member->newDirectoryNamed( 'CVS/' ); =back =head2 Member Simple accessors These methods get (and/or set) member attribute values. =over 4 =item versionMadeBy() Gets the field from the member header. =item fileAttributeFormat( [$format] ) Gets or sets the field from the member header. These are C values. =item versionNeededToExtract() Gets the field from the member header. =item bitFlag() Gets the general purpose bit field from the member header. This is where the C bits live. =item compressionMethod() Returns the member compression method. This is the method that is currently being used to compress the member data. This will be COMPRESSION_STORED for added string or file members, or any of the C values for members from a zip file. However, this module can only handle members whose data is in COMPRESSION_STORED or COMPRESSION_DEFLATED format. =item desiredCompressionMethod( [$method] ) Get or set the member's C. This is the compression method that will be used when the member is written. Returns prior desiredCompressionMethod. Only COMPRESSION_DEFLATED or COMPRESSION_STORED are valid arguments. Changing to COMPRESSION_STORED will change the member desiredCompressionLevel to 0; changing to COMPRESSION_DEFLATED will change the member desiredCompressionLevel to COMPRESSION_LEVEL_DEFAULT. =item desiredCompressionLevel( [$method] ) Get or set the member's desiredCompressionLevel This is the method that will be used to write. Returns prior desiredCompressionLevel. Valid arguments are 0 through 9, COMPRESSION_LEVEL_NONE, COMPRESSION_LEVEL_DEFAULT, COMPRESSION_LEVEL_BEST_COMPRESSION, and COMPRESSION_LEVEL_FASTEST. 0 or COMPRESSION_LEVEL_NONE will change the desiredCompressionMethod to COMPRESSION_STORED. All other arguments will change the desiredCompressionMethod to COMPRESSION_DEFLATED. =item externalFileName() Return the member's external file name, if any, or undef. =item fileName() Get or set the member's internal filename. Returns the (possibly new) filename. Names will have backslashes converted to forward slashes, and will have multiple consecutive slashes converted to single ones. =item lastModFileDateTime() Return the member's last modification date/time stamp in MS-DOS format. =item lastModTime() Return the member's last modification date/time stamp, converted to unix localtime format. print "Mod Time: " . scalar( localtime( $member->lastModTime() ) ); =item setLastModFileDateTimeFromUnix() Set the member's lastModFileDateTime from the given unix time. $member->setLastModFileDateTimeFromUnix( time() ); =item internalFileAttributes() Return the internal file attributes field from the zip header. This is only set for members read from a zip file. =item externalFileAttributes() Return member attributes as read from the ZIP file. Note that these are NOT UNIX! =item unixFileAttributes( [$newAttributes] ) Get or set the member's file attributes using UNIX file attributes. Returns old attributes. my $oldAttribs = $member->unixFileAttributes( 0666 ); Note that the return value has more than just the file permissions, so you will have to mask off the lowest bits for comparisions. =item localExtraField( [$newField] ) Gets or sets the extra field that was read from the local header. This is not set for a member from a zip file until after the member has been written out. The extra field must be in the proper format. =item cdExtraField( [$newField] ) Gets or sets the extra field that was read from the central directory header. The extra field must be in the proper format. =item extraFields() Return both local and CD extra fields, concatenated. =item fileComment( [$newComment] ) Get or set the member's file comment. =item hasDataDescriptor() Get or set the data descriptor flag. If this is set, the local header will not necessarily have the correct data sizes. Instead, a small structure will be stored at the end of the member data with these values. This should be transparent in normal operation. =item crc32() Return the CRC-32 value for this member. This will not be set for members that were constructed from strings or external files until after the member has been written. =item crc32String() Return the CRC-32 value for this member as an 8 character printable hex string. This will not be set for members that were constructed from strings or external files until after the member has been written. =item compressedSize() Return the compressed size for this member. This will not be set for members that were constructed from strings or external files until after the member has been written. =item uncompressedSize() Return the uncompressed size for this member. =item isEncrypted() Return true if this member is encrypted. The Archive::Zip module does not currently create or extract encrypted members. =item isTextFile( [$flag] ) Returns true if I am a text file. Also can set the status if given an argument (then returns old state). Note that this module does not currently do anything with this flag upon extraction or storage. That is, bytes are stored in native format whether or not they came from a text file. =item isBinaryFile() Returns true if I am a binary file. Also can set the status if given an argument (then returns old state). Note that this module does not currently do anything with this flag upon extraction or storage. That is, bytes are stored in native format whether or not they came from a text file. =item extractToFileNamed( $fileName ) Extract me to a file with the given name. The file will be created with default modes. Directories will be created as needed. The C<$fileName> argument should be a valid file name on your file system. Returns AZ_OK on success. =item isDirectory() Returns true if I am a directory. =item writeLocalHeaderRelativeOffset() Returns the file offset in bytes the last time I was written. =item wasWritten() Returns true if I was successfully written. Reset at the beginning of a write attempt. =back =head2 Low-level member data reading It is possible to use lower-level routines to access member data streams, rather than the extract* methods and contents(). For instance, here is how to print the uncompressed contents of a member in chunks using these methods: my ( $member, $status, $bufferRef ); $member = $zip->memberNamed( 'xyz.txt' ); $member->desiredCompressionMethod( COMPRESSION_STORED ); $status = $member->rewindData(); die "error $status" unless $status == AZ_OK; while ( ! $member->readIsDone() ) { ( $bufferRef, $status ) = $member->readChunk(); die "error $status" if $status != AZ_OK && $status != AZ_STREAM_END; # do something with $bufferRef: print $$bufferRef; } $member->endRead(); =over 4 =item readChunk( [$chunkSize] ) This reads the next chunk of given size from the member's data stream and compresses or uncompresses it as necessary, returning a reference to the bytes read and a status. If size argument is not given, defaults to global set by Archive::Zip::setChunkSize. Status is AZ_OK on success until the last chunk, where it returns AZ_STREAM_END. Returns C<( \$bytes, $status)>. my ( $outRef, $status ) = $self->readChunk(); print $$outRef if $status != AZ_OK && $status != AZ_STREAM_END; =item rewindData() Rewind data and set up for reading data streams or writing zip files. Can take options for C or C, but this isn't likely to be necessary. Subclass overrides should call this method. Returns C on success. =item endRead() Reset the read variables and free the inflater or deflater. Must be called to close files, etc. Returns AZ_OK on success. =item readIsDone() Return true if the read has run out of data or errored out. =item contents() Return the entire uncompressed member data or undef in scalar context. When called in array context, returns C<( $string, $status )>; status will be AZ_OK on success: my $string = $member->contents(); # or my ( $string, $status ) = $member->contents(); die "error $status" unless $status == AZ_OK; Can also be used to set the contents of a member (this may change the class of the member): $member->contents( "this is my new contents" ); =item extractToFileHandle( $fh ) Extract (and uncompress, if necessary) the member's contents to the given file handle. Return AZ_OK on success. =back =head1 Archive::Zip::FileMember methods The Archive::Zip::FileMember class extends Archive::Zip::Member. It is the base class for both ZipFileMember and NewFileMember classes. This class adds an C and an C member to keep track of the external file. =over 4 =item externalFileName() Return the member's external filename. =item fh() Return the member's read file handle. Automatically opens file if necessary. =back =head1 Archive::Zip::ZipFileMember methods The Archive::Zip::ZipFileMember class represents members that have been read from external zip files. =over 4 =item diskNumberStart() Returns the disk number that the member's local header resides in. Should be 0. =item localHeaderRelativeOffset() Returns the offset into the zip file where the member's local header is. =item dataOffset() Returns the offset from the beginning of the zip file to the member's data. =back =head1 REQUIRED MODULES L requires several other modules: L L L L L L L L L L L =head1 BUGS AND CAVEATS =head2 When not to use Archive::Zip If you are just going to be extracting zips (and/or other archives) you are recommended to look at using L instead, as it is much easier to use and factors out archive-specific functionality. =head2 Try to avoid IO::Scalar One of the most common ways to use Archive::Zip is to generate Zip files in-memory. Most people have use L for this purpose. Unfortunately, as of 1.11 this module no longer works with L as it incorrectly implements seeking. Anybody using L should consider porting to L, which is smaller, lighter, and is implemented to be perfectly compatible with regular seekable filehandles. Support for L most likely will B be restored in the future, as L itself cannot change the way it is implemented due to back-compatibility issues. =head1 TO DO * auto-choosing storing vs compression * extra field hooks (see notes.txt) * check for dups on addition/renaming? * Text file extraction (line end translation) * Reading zip files from non-seekable inputs (Perhaps by proxying through IO::String?) * separate unused constants into separate module * cookbook style docs * Handle tainted paths correctly * Work on better compatability with other IO:: modules =head1 SUPPORT Bugs should be reported via the CPAN bug tracker L For other issues contact the maintainer =head1 AUTHOR Adam Kennedy Eadamk@cpan.orgE Previously maintained by Steve Peters Esteve@fisharerojo.orgE. File attributes code by Maurice Aubrey Emaurice@lovelyfilth.comE. Originally by Ned Konz Enedkonz@cpan.orgE. =head1 COPYRIGHT Some parts copyright 2006 - 2009 Adam Kennedy. Some parts copyright 2005 Steve Peters. Original work copyright 2000 - 2004 Ned Konz. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. =head1 SEE ALSO Look at L which is a wrapper that allows one to read Zip archive members as if they were files. L, L, L There is a Japanese translation of this document at L that was done by DEQ Edeq@oct.zaq.ne.jpE . Thanks! =cut gdata/inst/perl/Archive/Zip/0000755000175100001440000000000013115545572015422 5ustar hornikusersgdata/inst/perl/Archive/Zip/Archive.pm0000644000175100001440000007132313003720416017333 0ustar hornikuserspackage Archive::Zip::Archive; # Represents a generic ZIP archive use strict; use File::Path; use File::Find (); use File::Spec (); use File::Copy (); use File::Basename; use Cwd; use vars qw( $VERSION @ISA ); BEGIN { $VERSION = '1.30'; @ISA = qw( Archive::Zip ); } use Archive::Zip qw( :CONSTANTS :ERROR_CODES :PKZIP_CONSTANTS :UTILITY_METHODS ); # Note that this returns undef on read errors, else new zip object. sub new { my $class = shift; my $self = bless( { 'diskNumber' => 0, 'diskNumberWithStartOfCentralDirectory' => 0, 'numberOfCentralDirectoriesOnThisDisk' => 0, # shld be # of members 'numberOfCentralDirectories' => 0, # shld be # of members 'centralDirectorySize' => 0, # must re-compute on write 'centralDirectoryOffsetWRTStartingDiskNumber' => 0, # must re-compute 'writeEOCDOffset' => 0, 'writeCentralDirectoryOffset' => 0, 'zipfileComment' => '', 'eocdOffset' => 0, 'fileName' => '' }, $class ); $self->{'members'} = []; my $fileName = ( ref( $_[0] ) eq 'HASH' ) ? shift->{filename} : shift; if ($fileName) { my $status = $self->read($fileName); return $status == AZ_OK ? $self : undef; } return $self; } sub storeSymbolicLink { my $self = shift; $self->{'storeSymbolicLink'} = shift; } sub members { @{ shift->{'members'} }; } sub numberOfMembers { scalar( shift->members() ); } sub memberNames { my $self = shift; return map { $_->fileName() } $self->members(); } # return ref to member with given name or undef sub memberNamed { my $self = shift; my $fileName = ( ref( $_[0] ) eq 'HASH' ) ? shift->{zipName} : shift; foreach my $member ( $self->members() ) { return $member if $member->fileName() eq $fileName; } return undef; } sub membersMatching { my $self = shift; my $pattern = ( ref( $_[0] ) eq 'HASH' ) ? shift->{regex} : shift; return grep { $_->fileName() =~ /$pattern/ } $self->members(); } sub diskNumber { shift->{'diskNumber'}; } sub diskNumberWithStartOfCentralDirectory { shift->{'diskNumberWithStartOfCentralDirectory'}; } sub numberOfCentralDirectoriesOnThisDisk { shift->{'numberOfCentralDirectoriesOnThisDisk'}; } sub numberOfCentralDirectories { shift->{'numberOfCentralDirectories'}; } sub centralDirectorySize { shift->{'centralDirectorySize'}; } sub centralDirectoryOffsetWRTStartingDiskNumber { shift->{'centralDirectoryOffsetWRTStartingDiskNumber'}; } sub zipfileComment { my $self = shift; my $comment = $self->{'zipfileComment'}; if (@_) { my $new_comment = ( ref( $_[0] ) eq 'HASH' ) ? shift->{comment} : shift; $self->{'zipfileComment'} = pack( 'C0a*', $new_comment ); # avoid unicode } return $comment; } sub eocdOffset { shift->{'eocdOffset'}; } # Return the name of the file last read. sub fileName { shift->{'fileName'}; } sub removeMember { my $self = shift; my $member = ( ref( $_[0] ) eq 'HASH' ) ? shift->{memberOrZipName} : shift; $member = $self->memberNamed($member) unless ref($member); return undef unless $member; my @newMembers = grep { $_ != $member } $self->members(); $self->{'members'} = \@newMembers; return $member; } sub replaceMember { my $self = shift; my ( $oldMember, $newMember ); if ( ref( $_[0] ) eq 'HASH' ) { $oldMember = $_[0]->{memberOrZipName}; $newMember = $_[0]->{newMember}; } else { ( $oldMember, $newMember ) = @_; } $oldMember = $self->memberNamed($oldMember) unless ref($oldMember); return undef unless $oldMember; return undef unless $newMember; my @newMembers = map { ( $_ == $oldMember ) ? $newMember : $_ } $self->members(); $self->{'members'} = \@newMembers; return $oldMember; } sub extractMember { my $self = shift; my ( $member, $name ); if ( ref( $_[0] ) eq 'HASH' ) { $member = $_[0]->{memberOrZipName}; $name = $_[0]->{name}; } else { ( $member, $name ) = @_; } $member = $self->memberNamed($member) unless ref($member); return _error('member not found') unless $member; my $originalSize = $member->compressedSize(); my ( $volumeName, $dirName, $fileName ); if ( defined($name) ) { ( $volumeName, $dirName, $fileName ) = File::Spec->splitpath($name); $dirName = File::Spec->catpath( $volumeName, $dirName, '' ); } else { $name = $member->fileName(); ( $dirName = $name ) =~ s{[^/]*$}{}; $dirName = Archive::Zip::_asLocalName($dirName); $name = Archive::Zip::_asLocalName($name); } if ( $dirName && !-d $dirName ) { mkpath($dirName); return _ioError("can't create dir $dirName") if ( !-d $dirName ); } my $rc = $member->extractToFileNamed( $name, @_ ); # TODO refactor this fix into extractToFileNamed() $member->{'compressedSize'} = $originalSize; return $rc; } sub extractMemberWithoutPaths { my $self = shift; my ( $member, $name ); if ( ref( $_[0] ) eq 'HASH' ) { $member = $_[0]->{memberOrZipName}; $name = $_[0]->{name}; } else { ( $member, $name ) = @_; } $member = $self->memberNamed($member) unless ref($member); return _error('member not found') unless $member; my $originalSize = $member->compressedSize(); return AZ_OK if $member->isDirectory(); unless ($name) { $name = $member->fileName(); $name =~ s{.*/}{}; # strip off directories, if any $name = Archive::Zip::_asLocalName($name); } my $rc = $member->extractToFileNamed( $name, @_ ); $member->{'compressedSize'} = $originalSize; return $rc; } sub addMember { my $self = shift; my $newMember = ( ref( $_[0] ) eq 'HASH' ) ? shift->{member} : shift; push( @{ $self->{'members'} }, $newMember ) if $newMember; return $newMember; } sub addFile { my $self = shift; my ( $fileName, $newName, $compressionLevel ); if ( ref( $_[0] ) eq 'HASH' ) { $fileName = $_[0]->{filename}; $newName = $_[0]->{zipName}; $compressionLevel = $_[0]->{compressionLevel}; } else { ( $fileName, $newName, $compressionLevel ) = @_; } my $newMember = $self->ZIPMEMBERCLASS->newFromFile( $fileName, $newName ); $newMember->desiredCompressionLevel($compressionLevel); if ( $self->{'storeSymbolicLink'} && -l $fileName ) { my $newMember = $self->ZIPMEMBERCLASS->newFromString(readlink $fileName, $newName); # For symbolic links, External File Attribute is set to 0xA1FF0000 by Info-ZIP $newMember->{'externalFileAttributes'} = 0xA1FF0000; $self->addMember($newMember); } else { $self->addMember($newMember); } return $newMember; } sub addString { my $self = shift; my ( $stringOrStringRef, $name, $compressionLevel ); if ( ref( $_[0] ) eq 'HASH' ) { $stringOrStringRef = $_[0]->{string}; $name = $_[0]->{zipName}; $compressionLevel = $_[0]->{compressionLevel}; } else { ( $stringOrStringRef, $name, $compressionLevel ) = @_;; } my $newMember = $self->ZIPMEMBERCLASS->newFromString( $stringOrStringRef, $name ); $newMember->desiredCompressionLevel($compressionLevel); return $self->addMember($newMember); } sub addDirectory { my $self = shift; my ( $name, $newName ); if ( ref( $_[0] ) eq 'HASH' ) { $name = $_[0]->{directoryName}; $newName = $_[0]->{zipName}; } else { ( $name, $newName ) = @_; } my $newMember = $self->ZIPMEMBERCLASS->newDirectoryNamed( $name, $newName ); if ( $self->{'storeSymbolicLink'} && -l $name ) { my $link = readlink $name; ( $newName =~ s{/$}{} ) if $newName; # Strip trailing / my $newMember = $self->ZIPMEMBERCLASS->newFromString($link, $newName); # For symbolic links, External File Attribute is set to 0xA1FF0000 by Info-ZIP $newMember->{'externalFileAttributes'} = 0xA1FF0000; $self->addMember($newMember); } else { $self->addMember($newMember); } return $newMember; } # add either a file or a directory. sub addFileOrDirectory { my $self = shift; my ( $name, $newName, $compressionLevel ); if ( ref( $_[0] ) eq 'HASH' ) { $name = $_[0]->{name}; $newName = $_[0]->{zipName}; $compressionLevel = $_[0]->{compressionLevel}; } else { ( $name, $newName, $compressionLevel ) = @_; } $name =~ s{/$}{}; if ( $newName ) { $newName =~ s{/$}{}; } else { $newName = $name; } if ( -f $name ) { return $self->addFile( $name, $newName, $compressionLevel ); } elsif ( -d $name ) { return $self->addDirectory( $name, $newName ); } else { return _error("$name is neither a file nor a directory"); } } sub contents { my $self = shift; my ( $member, $newContents ); if ( ref( $_[0] ) eq 'HASH' ) { $member = $_[0]->{memberOrZipName}; $newContents = $_[0]->{contents}; } else { ( $member, $newContents ) = @_; } return _error('No member name given') unless $member; $member = $self->memberNamed($member) unless ref($member); return undef unless $member; return $member->contents($newContents); } sub writeToFileNamed { my $self = shift; my $fileName = ( ref( $_[0] ) eq 'HASH' ) ? shift->{filename} : shift; # local FS format foreach my $member ( $self->members() ) { if ( $member->_usesFileNamed($fileName) ) { return _error( "$fileName is needed by member " . $member->fileName() . "; consider using overwrite() or overwriteAs() instead." ); } } my ( $status, $fh ) = _newFileHandle( $fileName, 'w' ); return _ioError("Can't open $fileName for write") unless $status; my $retval = $self->writeToFileHandle( $fh, 1 ); $fh->close(); $fh = undef; return $retval; } # It is possible to write data to the FH before calling this, # perhaps to make a self-extracting archive. sub writeToFileHandle { my $self = shift; my ( $fh, $fhIsSeekable ); if ( ref( $_[0] ) eq 'HASH' ) { $fh = $_[0]->{fileHandle}; $fhIsSeekable = exists( $_[0]->{seek} ) ? $_[0]->{seek} : _isSeekable($fh); } else { $fh = shift; $fhIsSeekable = @_ ? shift : _isSeekable($fh); } return _error('No filehandle given') unless $fh; return _ioError('filehandle not open') unless $fh->opened(); _binmode($fh); # Find out where the current position is. my $offset = $fhIsSeekable ? $fh->tell() : 0; $offset = 0 if $offset < 0; foreach my $member ( $self->members() ) { my $retval = $member->_writeToFileHandle( $fh, $fhIsSeekable, $offset ); $member->endRead(); return $retval if $retval != AZ_OK; $offset += $member->_localHeaderSize() + $member->_writeOffset(); $offset += $member->hasDataDescriptor() ? DATA_DESCRIPTOR_LENGTH + SIGNATURE_LENGTH : 0; # changed this so it reflects the last successful position $self->{'writeCentralDirectoryOffset'} = $offset; } return $self->writeCentralDirectory($fh); } # Write zip back to the original file, # as safely as possible. # Returns AZ_OK if successful. sub overwrite { my $self = shift; return $self->overwriteAs( $self->{'fileName'} ); } # Write zip to the specified file, # as safely as possible. # Returns AZ_OK if successful. sub overwriteAs { my $self = shift; my $zipName = ( ref( $_[0] ) eq 'HASH' ) ? $_[0]->{filename} : shift; return _error("no filename in overwriteAs()") unless defined($zipName); my ( $fh, $tempName ) = Archive::Zip::tempFile(); return _error( "Can't open temp file", $! ) unless $fh; ( my $backupName = $zipName ) =~ s{(\.[^.]*)?$}{.zbk}; my $status = $self->writeToFileHandle($fh); $fh->close(); $fh = undef; if ( $status != AZ_OK ) { unlink($tempName); _printError("Can't write to $tempName"); return $status; } my $err; # rename the zip if ( -f $zipName && !rename( $zipName, $backupName ) ) { $err = $!; unlink($tempName); return _error( "Can't rename $zipName as $backupName", $err ); } # move the temp to the original name (possibly copying) unless ( File::Copy::move( $tempName, $zipName ) ) { $err = $!; rename( $backupName, $zipName ); unlink($tempName); return _error( "Can't move $tempName to $zipName", $err ); } # unlink the backup if ( -f $backupName && !unlink($backupName) ) { $err = $!; return _error( "Can't unlink $backupName", $err ); } return AZ_OK; } # Used only during writing sub _writeCentralDirectoryOffset { shift->{'writeCentralDirectoryOffset'}; } sub _writeEOCDOffset { shift->{'writeEOCDOffset'}; } # Expects to have _writeEOCDOffset() set sub _writeEndOfCentralDirectory { my ( $self, $fh ) = @_; $self->_print($fh, END_OF_CENTRAL_DIRECTORY_SIGNATURE_STRING) or return _ioError('writing EOCD Signature'); my $zipfileCommentLength = length( $self->zipfileComment() ); my $header = pack( END_OF_CENTRAL_DIRECTORY_FORMAT, 0, # {'diskNumber'}, 0, # {'diskNumberWithStartOfCentralDirectory'}, $self->numberOfMembers(), # {'numberOfCentralDirectoriesOnThisDisk'}, $self->numberOfMembers(), # {'numberOfCentralDirectories'}, $self->_writeEOCDOffset() - $self->_writeCentralDirectoryOffset(), $self->_writeCentralDirectoryOffset(), $zipfileCommentLength ); $self->_print($fh, $header) or return _ioError('writing EOCD header'); if ($zipfileCommentLength) { $self->_print($fh, $self->zipfileComment() ) or return _ioError('writing zipfile comment'); } return AZ_OK; } # $offset can be specified to truncate a zip file. sub writeCentralDirectory { my $self = shift; my ( $fh, $offset ); if ( ref( $_[0] ) eq 'HASH' ) { $fh = $_[0]->{fileHandle}; $offset = $_[0]->{offset}; } else { ( $fh, $offset ) = @_; } if ( defined($offset) ) { $self->{'writeCentralDirectoryOffset'} = $offset; $fh->seek( $offset, IO::Seekable::SEEK_SET ) or return _ioError('seeking to write central directory'); } else { $offset = $self->_writeCentralDirectoryOffset(); } foreach my $member ( $self->members() ) { my $status = $member->_writeCentralDirectoryFileHeader($fh); return $status if $status != AZ_OK; $offset += $member->_centralDirectoryHeaderSize(); $self->{'writeEOCDOffset'} = $offset; } return $self->_writeEndOfCentralDirectory($fh); } sub read { my $self = shift; my $fileName = ( ref( $_[0] ) eq 'HASH' ) ? shift->{filename} : shift; return _error('No filename given') unless $fileName; my ( $status, $fh ) = _newFileHandle( $fileName, 'r' ); return _ioError("opening $fileName for read") unless $status; $status = $self->readFromFileHandle( $fh, $fileName ); return $status if $status != AZ_OK; $fh->close(); $self->{'fileName'} = $fileName; return AZ_OK; } sub readFromFileHandle { my $self = shift; my ( $fh, $fileName ); if ( ref( $_[0] ) eq 'HASH' ) { $fh = $_[0]->{fileHandle}; $fileName = $_[0]->{filename}; } else { ( $fh, $fileName ) = @_; } $fileName = $fh unless defined($fileName); return _error('No filehandle given') unless $fh; return _ioError('filehandle not open') unless $fh->opened(); _binmode($fh); $self->{'fileName'} = "$fh"; # TODO: how to support non-seekable zips? return _error('file not seekable') unless _isSeekable($fh); $fh->seek( 0, 0 ); # rewind the file my $status = $self->_findEndOfCentralDirectory($fh); return $status if $status != AZ_OK; my $eocdPosition = $fh->tell(); $status = $self->_readEndOfCentralDirectory($fh); return $status if $status != AZ_OK; $fh->seek( $eocdPosition - $self->centralDirectorySize(), IO::Seekable::SEEK_SET ) or return _ioError("Can't seek $fileName"); # Try to detect garbage at beginning of archives # This should be 0 $self->{'eocdOffset'} = $eocdPosition - $self->centralDirectorySize() # here - $self->centralDirectoryOffsetWRTStartingDiskNumber(); for ( ; ; ) { my $newMember = $self->ZIPMEMBERCLASS->_newFromZipFile( $fh, $fileName, $self->eocdOffset() ); my $signature; ( $status, $signature ) = _readSignature( $fh, $fileName ); return $status if $status != AZ_OK; last if $signature == END_OF_CENTRAL_DIRECTORY_SIGNATURE; $status = $newMember->_readCentralDirectoryFileHeader(); return $status if $status != AZ_OK; $status = $newMember->endRead(); return $status if $status != AZ_OK; $newMember->_becomeDirectoryIfNecessary(); push( @{ $self->{'members'} }, $newMember ); } return AZ_OK; } # Read EOCD, starting from position before signature. # Return AZ_OK on success. sub _readEndOfCentralDirectory { my $self = shift; my $fh = shift; # Skip past signature $fh->seek( SIGNATURE_LENGTH, IO::Seekable::SEEK_CUR ) or return _ioError("Can't seek past EOCD signature"); my $header = ''; my $bytesRead = $fh->read( $header, END_OF_CENTRAL_DIRECTORY_LENGTH ); if ( $bytesRead != END_OF_CENTRAL_DIRECTORY_LENGTH ) { return _ioError("reading end of central directory"); } my $zipfileCommentLength; ( $self->{'diskNumber'}, $self->{'diskNumberWithStartOfCentralDirectory'}, $self->{'numberOfCentralDirectoriesOnThisDisk'}, $self->{'numberOfCentralDirectories'}, $self->{'centralDirectorySize'}, $self->{'centralDirectoryOffsetWRTStartingDiskNumber'}, $zipfileCommentLength ) = unpack( END_OF_CENTRAL_DIRECTORY_FORMAT, $header ); if ($zipfileCommentLength) { my $zipfileComment = ''; $bytesRead = $fh->read( $zipfileComment, $zipfileCommentLength ); if ( $bytesRead != $zipfileCommentLength ) { return _ioError("reading zipfile comment"); } $self->{'zipfileComment'} = $zipfileComment; } return AZ_OK; } # Seek in my file to the end, then read backwards until we find the # signature of the central directory record. Leave the file positioned right # before the signature. Returns AZ_OK if success. sub _findEndOfCentralDirectory { my $self = shift; my $fh = shift; my $data = ''; $fh->seek( 0, IO::Seekable::SEEK_END ) or return _ioError("seeking to end"); my $fileLength = $fh->tell(); if ( $fileLength < END_OF_CENTRAL_DIRECTORY_LENGTH + 4 ) { return _formatError("file is too short"); } my $seekOffset = 0; my $pos = -1; for ( ; ; ) { $seekOffset += 512; $seekOffset = $fileLength if ( $seekOffset > $fileLength ); $fh->seek( -$seekOffset, IO::Seekable::SEEK_END ) or return _ioError("seek failed"); my $bytesRead = $fh->read( $data, $seekOffset ); if ( $bytesRead != $seekOffset ) { return _ioError("read failed"); } $pos = rindex( $data, END_OF_CENTRAL_DIRECTORY_SIGNATURE_STRING ); last if ( $pos >= 0 or $seekOffset == $fileLength or $seekOffset >= $Archive::Zip::ChunkSize ); } if ( $pos >= 0 ) { $fh->seek( $pos - $seekOffset, IO::Seekable::SEEK_CUR ) or return _ioError("seeking to EOCD"); return AZ_OK; } else { return _formatError("can't find EOCD signature"); } } # Used to avoid taint problems when chdir'ing. # Not intended to increase security in any way; just intended to shut up the -T # complaints. If your Cwd module is giving you unreliable returns from cwd() # you have bigger problems than this. sub _untaintDir { my $dir = shift; $dir =~ m/\A(.+)\z/s; return $1; } sub addTree { my $self = shift; my ( $root, $dest, $pred, $compressionLevel ); if ( ref( $_[0] ) eq 'HASH' ) { $root = $_[0]->{root}; $dest = $_[0]->{zipName}; $pred = $_[0]->{select}; $compressionLevel = $_[0]->{compressionLevel}; } else { ( $root, $dest, $pred, $compressionLevel ) = @_; } return _error("root arg missing in call to addTree()") unless defined($root); $dest = '' unless defined($dest); $pred = sub { -r } unless defined($pred); my @files; my $startDir = _untaintDir( cwd() ); return _error( 'undef returned by _untaintDir on cwd ', cwd() ) unless $startDir; # This avoids chdir'ing in Find, in a way compatible with older # versions of File::Find. my $wanted = sub { local $main::_ = $File::Find::name; my $dir = _untaintDir($File::Find::dir); chdir($startDir); push( @files, $File::Find::name ) if (&$pred); chdir($dir); }; File::Find::find( $wanted, $root ); my $rootZipName = _asZipDirName( $root, 1 ); # with trailing slash my $pattern = $rootZipName eq './' ? '^' : "^\Q$rootZipName\E"; $dest = _asZipDirName( $dest, 1 ); # with trailing slash foreach my $fileName (@files) { my $isDir = -d $fileName; # normalize, remove leading ./ my $archiveName = _asZipDirName( $fileName, $isDir ); if ( $archiveName eq $rootZipName ) { $archiveName = $dest } else { $archiveName =~ s{$pattern}{$dest} } next if $archiveName =~ m{^\.?/?$}; # skip current dir my $member = $isDir ? $self->addDirectory( $fileName, $archiveName ) : $self->addFile( $fileName, $archiveName ); $member->desiredCompressionLevel($compressionLevel); return _error("add $fileName failed in addTree()") if !$member; } return AZ_OK; } sub addTreeMatching { my $self = shift; my ( $root, $dest, $pattern, $pred, $compressionLevel ); if ( ref( $_[0] ) eq 'HASH' ) { $root = $_[0]->{root}; $dest = $_[0]->{zipName}; $pattern = $_[0]->{pattern}; $pred = $_[0]->{select}; $compressionLevel = $_[0]->{compressionLevel}; } else { ( $root, $dest, $pattern, $pred, $compressionLevel ) = @_; } return _error("root arg missing in call to addTreeMatching()") unless defined($root); $dest = '' unless defined($dest); return _error("pattern missing in call to addTreeMatching()") unless defined($pattern); my $matcher = $pred ? sub { m{$pattern} && &$pred } : sub { m{$pattern} && -r }; return $self->addTree( $root, $dest, $matcher, $compressionLevel ); } # $zip->extractTree( $root, $dest [, $volume] ); # # $root and $dest are Unix-style. # $volume is in local FS format. # sub extractTree { my $self = shift; my ( $root, $dest, $volume ); if ( ref( $_[0] ) eq 'HASH' ) { $root = $_[0]->{root}; $dest = $_[0]->{zipName}; $volume = $_[0]->{volume}; } else { ( $root, $dest, $volume ) = @_; } $root = '' unless defined($root); $dest = './' unless defined($dest); my $pattern = "^\Q$root"; my @members = $self->membersMatching($pattern); foreach my $member (@members) { my $fileName = $member->fileName(); # in Unix format $fileName =~ s{$pattern}{$dest}; # in Unix format # convert to platform format: $fileName = Archive::Zip::_asLocalName( $fileName, $volume ); my $status = $member->extractToFileNamed($fileName); return $status if $status != AZ_OK; } return AZ_OK; } # $zip->updateMember( $memberOrName, $fileName ); # Returns (possibly updated) member, if any; undef on errors. sub updateMember { my $self = shift; my ( $oldMember, $fileName ); if ( ref( $_[0] ) eq 'HASH' ) { $oldMember = $_[0]->{memberOrZipName}; $fileName = $_[0]->{name}; } else { ( $oldMember, $fileName ) = @_; } if ( !defined($fileName) ) { _error("updateMember(): missing fileName argument"); return undef; } my @newStat = stat($fileName); if ( !@newStat ) { _ioError("Can't stat $fileName"); return undef; } my $isDir = -d _; my $memberName; if ( ref($oldMember) ) { $memberName = $oldMember->fileName(); } else { $oldMember = $self->memberNamed( $memberName = $oldMember ) || $self->memberNamed( $memberName = _asZipDirName( $oldMember, $isDir ) ); } unless ( defined($oldMember) && $oldMember->lastModTime() == $newStat[9] && $oldMember->isDirectory() == $isDir && ( $isDir || ( $oldMember->uncompressedSize() == $newStat[7] ) ) ) { # create the new member my $newMember = $isDir ? $self->ZIPMEMBERCLASS->newDirectoryNamed( $fileName, $memberName ) : $self->ZIPMEMBERCLASS->newFromFile( $fileName, $memberName ); unless ( defined($newMember) ) { _error("creation of member $fileName failed in updateMember()"); return undef; } # replace old member or append new one if ( defined($oldMember) ) { $self->replaceMember( $oldMember, $newMember ); } else { $self->addMember($newMember); } return $newMember; } return $oldMember; } # $zip->updateTree( $root, [ $dest, [ $pred [, $mirror]]] ); # # This takes the same arguments as addTree, but first checks to see # whether the file or directory already exists in the zip file. # # If the fourth argument $mirror is true, then delete all my members # if corresponding files weren't found. sub updateTree { my $self = shift; my ( $root, $dest, $pred, $mirror, $compressionLevel ); if ( ref( $_[0] ) eq 'HASH' ) { $root = $_[0]->{root}; $dest = $_[0]->{zipName}; $pred = $_[0]->{select}; $mirror = $_[0]->{mirror}; $compressionLevel = $_[0]->{compressionLevel}; } else { ( $root, $dest, $pred, $mirror, $compressionLevel ) = @_; } return _error("root arg missing in call to updateTree()") unless defined($root); $dest = '' unless defined($dest); $pred = sub { -r } unless defined($pred); $dest = _asZipDirName( $dest, 1 ); my $rootZipName = _asZipDirName( $root, 1 ); # with trailing slash my $pattern = $rootZipName eq './' ? '^' : "^\Q$rootZipName\E"; my @files; my $startDir = _untaintDir( cwd() ); return _error( 'undef returned by _untaintDir on cwd ', cwd() ) unless $startDir; # This avoids chdir'ing in Find, in a way compatible with older # versions of File::Find. my $wanted = sub { local $main::_ = $File::Find::name; my $dir = _untaintDir($File::Find::dir); chdir($startDir); push( @files, $File::Find::name ) if (&$pred); chdir($dir); }; File::Find::find( $wanted, $root ); # Now @files has all the files that I could potentially be adding to # the zip. Only add the ones that are necessary. # For each file (updated or not), add its member name to @done. my %done; foreach my $fileName (@files) { my @newStat = stat($fileName); my $isDir = -d _; # normalize, remove leading ./ my $memberName = _asZipDirName( $fileName, $isDir ); if ( $memberName eq $rootZipName ) { $memberName = $dest } else { $memberName =~ s{$pattern}{$dest} } next if $memberName =~ m{^\.?/?$}; # skip current dir $done{$memberName} = 1; my $changedMember = $self->updateMember( $memberName, $fileName ); $changedMember->desiredCompressionLevel($compressionLevel); return _error("updateTree failed to update $fileName") unless ref($changedMember); } # @done now has the archive names corresponding to all the found files. # If we're mirroring, delete all those members that aren't in @done. if ($mirror) { foreach my $member ( $self->members() ) { $self->removeMember($member) unless $done{ $member->fileName() }; } } return AZ_OK; } 1; gdata/inst/perl/Archive/Zip/BufferedFileHandle.pm0000644000175100001440000000533613003720416021411 0ustar hornikuserspackage Archive::Zip::BufferedFileHandle; # File handle that uses a string internally and can seek # This is given as a demo for getting a zip file written # to a string. # I probably should just use IO::Scalar instead. # Ned Konz, March 2000 use strict; use IO::File; use Carp; use vars qw{$VERSION}; BEGIN { $VERSION = '1.30'; $VERSION = eval $VERSION; } sub new { my $class = shift || __PACKAGE__; $class = ref($class) || $class; my $self = bless( { content => '', position => 0, size => 0 }, $class ); return $self; } # Utility method to read entire file sub readFromFile { my $self = shift; my $fileName = shift; my $fh = IO::File->new( $fileName, "r" ); CORE::binmode($fh); if ( !$fh ) { Carp::carp("Can't open $fileName: $!\n"); return undef; } local $/ = undef; $self->{content} = <$fh>; $self->{size} = length( $self->{content} ); return $self; } sub contents { my $self = shift; if (@_) { $self->{content} = shift; $self->{size} = length( $self->{content} ); } return $self->{content}; } sub binmode { 1 } sub close { 1 } sub opened { 1 } sub eof { my $self = shift; return $self->{position} >= $self->{size}; } sub seek { my $self = shift; my $pos = shift; my $whence = shift; # SEEK_SET if ( $whence == 0 ) { $self->{position} = $pos; } # SEEK_CUR elsif ( $whence == 1 ) { $self->{position} += $pos; } # SEEK_END elsif ( $whence == 2 ) { $self->{position} = $self->{size} + $pos; } else { return 0; } return 1; } sub tell { return shift->{position}; } # Copy my data to given buffer sub read { my $self = shift; my $buf = \( $_[0] ); shift; my $len = shift; my $offset = shift || 0; $$buf = '' if not defined($$buf); my $bytesRead = ( $self->{position} + $len > $self->{size} ) ? ( $self->{size} - $self->{position} ) : $len; substr( $$buf, $offset, $bytesRead ) = substr( $self->{content}, $self->{position}, $bytesRead ); $self->{position} += $bytesRead; return $bytesRead; } # Copy given buffer to me sub write { my $self = shift; my $buf = \( $_[0] ); shift; my $len = shift; my $offset = shift || 0; $$buf = '' if not defined($$buf); my $bufLen = length($$buf); my $bytesWritten = ( $offset + $len > $bufLen ) ? $bufLen - $offset : $len; substr( $self->{content}, $self->{position}, $bytesWritten ) = substr( $$buf, $offset, $bytesWritten ); $self->{size} = length( $self->{content} ); return $bytesWritten; } sub clearerr() { 1 } 1; gdata/inst/perl/Archive/Zip/ZipFileMember.pm0000644000175100001440000003233313003720416020442 0ustar hornikuserspackage Archive::Zip::ZipFileMember; use strict; use vars qw( $VERSION @ISA ); BEGIN { $VERSION = '1.30'; @ISA = qw ( Archive::Zip::FileMember ); } use Archive::Zip qw( :CONSTANTS :ERROR_CODES :PKZIP_CONSTANTS :UTILITY_METHODS ); # Create a new Archive::Zip::ZipFileMember # given a filename and optional open file handle # sub _newFromZipFile { my $class = shift; my $fh = shift; my $externalFileName = shift; my $possibleEocdOffset = shift; # normally 0 my $self = $class->new( 'crc32' => 0, 'diskNumberStart' => 0, 'localHeaderRelativeOffset' => 0, 'dataOffset' => 0, # localHeaderRelativeOffset + header length @_ ); $self->{'externalFileName'} = $externalFileName; $self->{'fh'} = $fh; $self->{'possibleEocdOffset'} = $possibleEocdOffset; return $self; } sub isDirectory { my $self = shift; return ( substr( $self->fileName, -1, 1 ) eq '/' and $self->uncompressedSize == 0 ); } # Seek to the beginning of the local header, just past the signature. # Verify that the local header signature is in fact correct. # Update the localHeaderRelativeOffset if necessary by adding the possibleEocdOffset. # Returns status. sub _seekToLocalHeader { my $self = shift; my $where = shift; # optional my $previousWhere = shift; # optional $where = $self->localHeaderRelativeOffset() unless defined($where); # avoid loop on certain corrupt files (from Julian Field) return _formatError("corrupt zip file") if defined($previousWhere) && $where == $previousWhere; my $status; my $signature; $status = $self->fh()->seek( $where, IO::Seekable::SEEK_SET ); return _ioError("seeking to local header") unless $status; ( $status, $signature ) = _readSignature( $self->fh(), $self->externalFileName(), LOCAL_FILE_HEADER_SIGNATURE ); return $status if $status == AZ_IO_ERROR; # retry with EOCD offset if any was given. if ( $status == AZ_FORMAT_ERROR && $self->{'possibleEocdOffset'} ) { $status = $self->_seekToLocalHeader( $self->localHeaderRelativeOffset() + $self->{'possibleEocdOffset'}, $where ); if ( $status == AZ_OK ) { $self->{'localHeaderRelativeOffset'} += $self->{'possibleEocdOffset'}; $self->{'possibleEocdOffset'} = 0; } } return $status; } # Because I'm going to delete the file handle, read the local file # header if the file handle is seekable. If it isn't, I assume that # I've already read the local header. # Return ( $status, $self ) sub _become { my $self = shift; my $newClass = shift; return $self if ref($self) eq $newClass; my $status = AZ_OK; if ( _isSeekable( $self->fh() ) ) { my $here = $self->fh()->tell(); $status = $self->_seekToLocalHeader(); $status = $self->_readLocalFileHeader() if $status == AZ_OK; $self->fh()->seek( $here, IO::Seekable::SEEK_SET ); return $status unless $status == AZ_OK; } delete( $self->{'eocdCrc32'} ); delete( $self->{'diskNumberStart'} ); delete( $self->{'localHeaderRelativeOffset'} ); delete( $self->{'dataOffset'} ); return $self->SUPER::_become($newClass); } sub diskNumberStart { shift->{'diskNumberStart'}; } sub localHeaderRelativeOffset { shift->{'localHeaderRelativeOffset'}; } sub dataOffset { shift->{'dataOffset'}; } # Skip local file header, updating only extra field stuff. # Assumes that fh is positioned before signature. sub _skipLocalFileHeader { my $self = shift; my $header; my $bytesRead = $self->fh()->read( $header, LOCAL_FILE_HEADER_LENGTH ); if ( $bytesRead != LOCAL_FILE_HEADER_LENGTH ) { return _ioError("reading local file header"); } my $fileNameLength; my $extraFieldLength; my $bitFlag; ( undef, # $self->{'versionNeededToExtract'}, $bitFlag, undef, # $self->{'compressionMethod'}, undef, # $self->{'lastModFileDateTime'}, undef, # $crc32, undef, # $compressedSize, undef, # $uncompressedSize, $fileNameLength, $extraFieldLength ) = unpack( LOCAL_FILE_HEADER_FORMAT, $header ); if ($fileNameLength) { $self->fh()->seek( $fileNameLength, IO::Seekable::SEEK_CUR ) or return _ioError("skipping local file name"); } if ($extraFieldLength) { $bytesRead = $self->fh()->read( $self->{'localExtraField'}, $extraFieldLength ); if ( $bytesRead != $extraFieldLength ) { return _ioError("reading local extra field"); } } $self->{'dataOffset'} = $self->fh()->tell(); if ( $bitFlag & GPBF_HAS_DATA_DESCRIPTOR_MASK ) { # Read the crc32, compressedSize, and uncompressedSize from the # extended data descriptor, which directly follows the compressed data. # # Skip over the compressed file data (assumes that EOCD compressedSize # was correct) $self->fh()->seek( $self->{'compressedSize'}, IO::Seekable::SEEK_CUR ) or return _ioError("seeking to extended local header"); # these values should be set correctly from before. my $oldCrc32 = $self->{'eocdCrc32'}; my $oldCompressedSize = $self->{'compressedSize'}; my $oldUncompressedSize = $self->{'uncompressedSize'}; my $status = $self->_readDataDescriptor(); return $status unless $status == AZ_OK; return _formatError( "CRC or size mismatch while skipping data descriptor") if ( $oldCrc32 != $self->{'crc32'} || $oldUncompressedSize != $self->{'uncompressedSize'} ); } return AZ_OK; } # Read from a local file header into myself. Returns AZ_OK if successful. # Assumes that fh is positioned after signature. # Note that crc32, compressedSize, and uncompressedSize will be 0 if # GPBF_HAS_DATA_DESCRIPTOR_MASK is set in the bitFlag. sub _readLocalFileHeader { my $self = shift; my $header; my $bytesRead = $self->fh()->read( $header, LOCAL_FILE_HEADER_LENGTH ); if ( $bytesRead != LOCAL_FILE_HEADER_LENGTH ) { return _ioError("reading local file header"); } my $fileNameLength; my $crc32; my $compressedSize; my $uncompressedSize; my $extraFieldLength; ( $self->{'versionNeededToExtract'}, $self->{'bitFlag'}, $self->{'compressionMethod'}, $self->{'lastModFileDateTime'}, $crc32, $compressedSize, $uncompressedSize, $fileNameLength, $extraFieldLength ) = unpack( LOCAL_FILE_HEADER_FORMAT, $header ); if ($fileNameLength) { my $fileName; $bytesRead = $self->fh()->read( $fileName, $fileNameLength ); if ( $bytesRead != $fileNameLength ) { return _ioError("reading local file name"); } $self->fileName($fileName); } if ($extraFieldLength) { $bytesRead = $self->fh()->read( $self->{'localExtraField'}, $extraFieldLength ); if ( $bytesRead != $extraFieldLength ) { return _ioError("reading local extra field"); } } $self->{'dataOffset'} = $self->fh()->tell(); if ( $self->hasDataDescriptor() ) { # Read the crc32, compressedSize, and uncompressedSize from the # extended data descriptor. # Skip over the compressed file data (assumes that EOCD compressedSize # was correct) $self->fh()->seek( $self->{'compressedSize'}, IO::Seekable::SEEK_CUR ) or return _ioError("seeking to extended local header"); my $status = $self->_readDataDescriptor(); return $status unless $status == AZ_OK; } else { return _formatError( "CRC or size mismatch after reading data descriptor") if ( $self->{'crc32'} != $crc32 || $self->{'uncompressedSize'} != $uncompressedSize ); } return AZ_OK; } # This will read the data descriptor, which is after the end of compressed file # data in members that that have GPBF_HAS_DATA_DESCRIPTOR_MASK set in their # bitFlag. # The only reliable way to find these is to rely on the EOCD compressedSize. # Assumes that file is positioned immediately after the compressed data. # Returns status; sets crc32, compressedSize, and uncompressedSize. sub _readDataDescriptor { my $self = shift; my $signatureData; my $header; my $crc32; my $compressedSize; my $uncompressedSize; my $bytesRead = $self->fh()->read( $signatureData, SIGNATURE_LENGTH ); return _ioError("reading header signature") if $bytesRead != SIGNATURE_LENGTH; my $signature = unpack( SIGNATURE_FORMAT, $signatureData ); # unfortunately, the signature appears to be optional. if ( $signature == DATA_DESCRIPTOR_SIGNATURE && ( $signature != $self->{'crc32'} ) ) { $bytesRead = $self->fh()->read( $header, DATA_DESCRIPTOR_LENGTH ); return _ioError("reading data descriptor") if $bytesRead != DATA_DESCRIPTOR_LENGTH; ( $crc32, $compressedSize, $uncompressedSize ) = unpack( DATA_DESCRIPTOR_FORMAT, $header ); } else { $bytesRead = $self->fh()->read( $header, DATA_DESCRIPTOR_LENGTH_NO_SIG ); return _ioError("reading data descriptor") if $bytesRead != DATA_DESCRIPTOR_LENGTH_NO_SIG; $crc32 = $signature; ( $compressedSize, $uncompressedSize ) = unpack( DATA_DESCRIPTOR_FORMAT_NO_SIG, $header ); } $self->{'eocdCrc32'} = $self->{'crc32'} unless defined( $self->{'eocdCrc32'} ); $self->{'crc32'} = $crc32; $self->{'compressedSize'} = $compressedSize; $self->{'uncompressedSize'} = $uncompressedSize; return AZ_OK; } # Read a Central Directory header. Return AZ_OK on success. # Assumes that fh is positioned right after the signature. sub _readCentralDirectoryFileHeader { my $self = shift; my $fh = $self->fh(); my $header = ''; my $bytesRead = $fh->read( $header, CENTRAL_DIRECTORY_FILE_HEADER_LENGTH ); if ( $bytesRead != CENTRAL_DIRECTORY_FILE_HEADER_LENGTH ) { return _ioError("reading central dir header"); } my ( $fileNameLength, $extraFieldLength, $fileCommentLength ); ( $self->{'versionMadeBy'}, $self->{'fileAttributeFormat'}, $self->{'versionNeededToExtract'}, $self->{'bitFlag'}, $self->{'compressionMethod'}, $self->{'lastModFileDateTime'}, $self->{'crc32'}, $self->{'compressedSize'}, $self->{'uncompressedSize'}, $fileNameLength, $extraFieldLength, $fileCommentLength, $self->{'diskNumberStart'}, $self->{'internalFileAttributes'}, $self->{'externalFileAttributes'}, $self->{'localHeaderRelativeOffset'} ) = unpack( CENTRAL_DIRECTORY_FILE_HEADER_FORMAT, $header ); $self->{'eocdCrc32'} = $self->{'crc32'}; if ($fileNameLength) { $bytesRead = $fh->read( $self->{'fileName'}, $fileNameLength ); if ( $bytesRead != $fileNameLength ) { _ioError("reading central dir filename"); } } if ($extraFieldLength) { $bytesRead = $fh->read( $self->{'cdExtraField'}, $extraFieldLength ); if ( $bytesRead != $extraFieldLength ) { return _ioError("reading central dir extra field"); } } if ($fileCommentLength) { $bytesRead = $fh->read( $self->{'fileComment'}, $fileCommentLength ); if ( $bytesRead != $fileCommentLength ) { return _ioError("reading central dir file comment"); } } # NK 10/21/04: added to avoid problems with manipulated headers if ( $self->{'uncompressedSize'} != $self->{'compressedSize'} and $self->{'compressionMethod'} == COMPRESSION_STORED ) { $self->{'uncompressedSize'} = $self->{'compressedSize'}; } $self->desiredCompressionMethod( $self->compressionMethod() ); return AZ_OK; } sub rewindData { my $self = shift; my $status = $self->SUPER::rewindData(@_); return $status unless $status == AZ_OK; return AZ_IO_ERROR unless $self->fh(); $self->fh()->clearerr(); # Seek to local file header. # The only reason that I'm doing this this way is that the extraField # length seems to be different between the CD header and the LF header. $status = $self->_seekToLocalHeader(); return $status unless $status == AZ_OK; # skip local file header $status = $self->_skipLocalFileHeader(); return $status unless $status == AZ_OK; # Seek to beginning of file data $self->fh()->seek( $self->dataOffset(), IO::Seekable::SEEK_SET ) or return _ioError("seeking to beginning of file data"); return AZ_OK; } # Return bytes read. Note that first parameter is a ref to a buffer. # my $data; # my ( $bytesRead, $status) = $self->readRawChunk( \$data, $chunkSize ); sub _readRawChunk { my ( $self, $dataRef, $chunkSize ) = @_; return ( 0, AZ_OK ) unless $chunkSize; my $bytesRead = $self->fh()->read( $$dataRef, $chunkSize ) or return ( 0, _ioError("reading data") ); return ( $bytesRead, AZ_OK ); } 1; gdata/inst/perl/Archive/Zip/NewFileMember.pm0000644000175100001440000000424513003720416020432 0ustar hornikuserspackage Archive::Zip::NewFileMember; use strict; use vars qw( $VERSION @ISA ); BEGIN { $VERSION = '1.30'; @ISA = qw ( Archive::Zip::FileMember ); } use Archive::Zip qw( :CONSTANTS :ERROR_CODES :UTILITY_METHODS ); # Given a file name, set up for eventual writing. sub _newFromFileNamed { my $class = shift; my $fileName = shift; # local FS format my $newName = shift; $newName = _asZipDirName($fileName) unless defined($newName); return undef unless ( stat($fileName) && -r _ && !-d _ ); my $self = $class->new(@_); $self->{'fileName'} = $newName; $self->{'externalFileName'} = $fileName; $self->{'compressionMethod'} = COMPRESSION_STORED; my @stat = stat(_); $self->{'compressedSize'} = $self->{'uncompressedSize'} = $stat[7]; $self->desiredCompressionMethod( ( $self->compressedSize() > 0 ) ? COMPRESSION_DEFLATED : COMPRESSION_STORED ); $self->unixFileAttributes( $stat[2] ); $self->setLastModFileDateTimeFromUnix( $stat[9] ); $self->isTextFile( -T _ ); return $self; } sub rewindData { my $self = shift; my $status = $self->SUPER::rewindData(@_); return $status unless $status == AZ_OK; return AZ_IO_ERROR unless $self->fh(); $self->fh()->clearerr(); $self->fh()->seek( 0, IO::Seekable::SEEK_SET ) or return _ioError( "rewinding", $self->externalFileName() ); return AZ_OK; } # Return bytes read. Note that first parameter is a ref to a buffer. # my $data; # my ( $bytesRead, $status) = $self->readRawChunk( \$data, $chunkSize ); sub _readRawChunk { my ( $self, $dataRef, $chunkSize ) = @_; return ( 0, AZ_OK ) unless $chunkSize; my $bytesRead = $self->fh()->read( $$dataRef, $chunkSize ) or return ( 0, _ioError("reading data") ); return ( $bytesRead, AZ_OK ); } # If I already exist, extraction is a no-op. sub extractToFileNamed { my $self = shift; my $name = shift; # local FS name if ( File::Spec->rel2abs($name) eq File::Spec->rel2abs( $self->externalFileName() ) and -r $name ) { return AZ_OK; } else { return $self->SUPER::extractToFileNamed( $name, @_ ); } } 1; gdata/inst/perl/Archive/Zip/FileMember.pm0000644000175100001440000000251013003720416017751 0ustar hornikuserspackage Archive::Zip::FileMember; use strict; use vars qw( $VERSION @ISA ); BEGIN { $VERSION = '1.30'; @ISA = qw ( Archive::Zip::Member ); } use Archive::Zip qw( :UTILITY_METHODS ); sub externalFileName { shift->{'externalFileName'}; } # Return true if I depend on the named file sub _usesFileNamed { my $self = shift; my $fileName = shift; my $xfn = $self->externalFileName(); return undef if ref($xfn); return $xfn eq $fileName; } sub fh { my $self = shift; $self->_openFile() if !defined( $self->{'fh'} ) || !$self->{'fh'}->opened(); return $self->{'fh'}; } # opens my file handle from my file name sub _openFile { my $self = shift; my ( $status, $fh ) = _newFileHandle( $self->externalFileName(), 'r' ); if ( !$status ) { _ioError( "Can't open", $self->externalFileName() ); return undef; } $self->{'fh'} = $fh; _binmode($fh); return $fh; } # Make sure I close my file handle sub endRead { my $self = shift; undef $self->{'fh'}; # _closeFile(); return $self->SUPER::endRead(@_); } sub _become { my $self = shift; my $newClass = shift; return $self if ref($self) eq $newClass; delete( $self->{'externalFileName'} ); delete( $self->{'fh'} ); return $self->SUPER::_become($newClass); } 1; gdata/inst/perl/Archive/Zip/StringMember.pm0000644000175100001440000000331013003720416020337 0ustar hornikuserspackage Archive::Zip::StringMember; use strict; use vars qw( $VERSION @ISA ); BEGIN { $VERSION = '1.30'; @ISA = qw( Archive::Zip::Member ); } use Archive::Zip qw( :CONSTANTS :ERROR_CODES ); # Create a new string member. Default is COMPRESSION_STORED. # Can take a ref to a string as well. sub _newFromString { my $class = shift; my $string = shift; my $name = shift; my $self = $class->new(@_); $self->contents($string); $self->fileName($name) if defined($name); # Set the file date to now $self->setLastModFileDateTimeFromUnix( time() ); $self->unixFileAttributes( $self->DEFAULT_FILE_PERMISSIONS ); return $self; } sub _become { my $self = shift; my $newClass = shift; return $self if ref($self) eq $newClass; delete( $self->{'contents'} ); return $self->SUPER::_become($newClass); } # Get or set my contents. Note that we do not call the superclass # version of this, because it calls us. sub contents { my $self = shift; my $string = shift; if ( defined($string) ) { $self->{'contents'} = pack( 'C0a*', ( ref($string) eq 'SCALAR' ) ? $$string : $string ); $self->{'uncompressedSize'} = $self->{'compressedSize'} = length( $self->{'contents'} ); $self->{'compressionMethod'} = COMPRESSION_STORED; } return $self->{'contents'}; } # Return bytes read. Note that first parameter is a ref to a buffer. # my $data; # my ( $bytesRead, $status) = $self->readRawChunk( \$data, $chunkSize ); sub _readRawChunk { my ( $self, $dataRef, $chunkSize ) = @_; $$dataRef = substr( $self->contents(), $self->_readOffset(), $chunkSize ); return ( length($$dataRef), AZ_OK ); } 1; gdata/inst/perl/Archive/Zip/FAQ.pod0000644000175100001440000003033613003720416016526 0ustar hornikusers =head1 NAME Archive::Zip::FAQ - Answers to a few frequently asked questions about Archive::Zip =head1 DESCRIPTION It seems that I keep answering the same questions over and over again. I assume that this is because my documentation is deficient, rather than that people don't read the documentation. So this FAQ is an attempt to cut down on the number of personal answers I have to give. At least I can now say "You I read the FAQ, right?". The questions are not in any particular order. The answers assume the current version of Archive::Zip; some of the answers depend on newly added/fixed functionality. =head1 Install problems on RedHat 8 or 9 with Perl 5.8.0 B Archive::Zip won't install on my RedHat 9 system! It's broke! B This has become something of a FAQ. Basically, RedHat broke some versions of Perl by setting LANG to UTF8. They apparently have a fixed version out as an update. You might try running CPAN or creating your Makefile after exporting the LANG environment variable as C L =head1 Why is my zip file so big? B My zip file is actually bigger than what I stored in it! Why? B Some things to make sure of: =over 4 =item Make sure that you are requesting COMPRESSION_DEFLATED if you are storing strings. $member->desiredCompressionMethod( COMPRESSION_DEFLATED ); =item Don't make lots of little files if you can help it. Since zip computes the compression tables for each member, small members without much entropy won't compress well. Instead, if you've got lots of repeated strings in your data, try to combine them into one big member. =item Make sure that you are requesting COMPRESSION_STORED if you are storing things that are already compressed. If you're storing a .zip, .jpg, .mp3, or other compressed file in a zip, then don't compress them again. They'll get bigger. =back =head1 Sample code? B Can you send me code to do (whatever)? B Have you looked in the C directory yet? It contains: =over 4 =item examples/calcSizes.pl -- How to find out how big a Zip file will be before writing it =item examples/copy.pl -- Copies one Zip file to another =item examples/extract.pl -- extract file(s) from a Zip =item examples/mailZip.pl -- make and mail a zip file =item examples/mfh.pl -- demo for use of MockFileHandle =item examples/readScalar.pl -- shows how to use IO::Scalar as the source of a Zip read =item examples/selfex.pl -- a brief example of a self-extracting Zip =item examples/unzipAll.pl -- uses Archive::Zip::Tree to unzip an entire Zip =item examples/updateZip.pl -- shows how to read/modify/write a Zip =item examples/updateTree.pl -- shows how to update a Zip in place =item examples/writeScalar.pl -- shows how to use IO::Scalar as the destination of a Zip write =item examples/writeScalar2.pl -- shows how to use IO::String as the destination of a Zip write =item examples/zip.pl -- Constructs a Zip file =item examples/zipcheck.pl -- One way to check a Zip file for validity =item examples/zipinfo.pl -- Prints out information about a Zip archive file =item examples/zipGrep.pl -- Searches for text in Zip files =item examples/ziptest.pl -- Lists a Zip file and checks member CRCs =item examples/ziprecent.pl -- Puts recent files into a zipfile =item examples/ziptest.pl -- Another way to check a Zip file for validity =back =head1 Can't Read/modify/write same Zip file B Why can't I open a Zip file, add a member, and write it back? I get an error message when I try. B Because Archive::Zip doesn't (and can't, generally) read file contents into memory, the original Zip file is required to stay around until the writing of the new file is completed. The best way to do this is to write the Zip to a temporary file and then rename the temporary file to have the old name (possibly after deleting the old one). Archive::Zip v1.02 added the archive methods C and C to do this simply and carefully. See C for an example of this technique. =head1 File creation time not set B Upon extracting files, I see that their modification (and access) times are set to the time in the Zip archive. However, their creation time is not set to the same time. Why? B Mostly because Perl doesn't give cross-platform access to I. Indeed, many systems (like Unix) don't support such a concept. However, if yours does, you can easily set it. Get the modification time from the member using C. =head1 Can't use Archive::Zip on gzip files B Can I use Archive::Zip to extract Unix gzip files? B No. There is a distinction between Unix gzip files, and Zip archives that also can use the gzip compression. Depending on the format of the gzip file, you can use L, or L to decompress it (and de-archive it in the case of Tar files). You can unzip PKZIP/WinZip/etc/ archives using Archive::Zip (that's what it's for) as long as any compressed members are compressed using Deflate compression. =head1 Add a directory/tree to a Zip B How can I add a directory (or tree) full of files to a Zip? B You can use the Archive::Zip::addTree*() methods: use Archive::Zip; my $zip = Archive::Zip->new(); # add all readable files and directories below . as xyz/* $zip->addTree( '.', 'xyz' ); # add all readable plain files below /abc as def/* $zip->addTree( '/abc', 'def', sub { -f && -r } ); # add all .c files below /tmp as stuff/* $zip->addTreeMatching( '/tmp', 'stuff', '\.c$' ); # add all .o files below /tmp as stuff/* if they aren't writable $zip->addTreeMatching( '/tmp', 'stuff', '\.o$', sub { ! -w } ); # add all .so files below /tmp that are smaller than 200 bytes as stuff/* $zip->addTreeMatching( '/tmp', 'stuff', '\.o$', sub { -s < 200 } ); # and write them into a file $zip->writeToFileNamed('xxx.zip'); =head1 Extract a directory/tree B How can I extract some (or all) files from a Zip into a different directory? B You can use the Archive::Zip::extractTree() method: ??? || # now extract the same files into /tmpx $zip->extractTree( 'stuff', '/tmpx' ); =head1 Update a directory/tree B How can I update a Zip from a directory tree, adding or replacing only the newer files? B You can use the Archive::Zip::updateTree() method that was added in version 1.09. =head1 Zip times might be off by 1 second B It bothers me greatly that my file times are wrong by one second about half the time. Why don't you do something about it? B Get over it. This is a result of the Zip format storing times in DOS format, which has a resolution of only two seconds. =head1 Zip times don't include time zone information B My file times don't respect time zones. What gives? B If this is important to you, please submit patches to read the various Extra Fields that encode times with time zones. I'm just using the DOS Date/Time, which doesn't have a time zone. =head1 How do I make a self-extracting Zip B I want to make a self-extracting Zip file. Can I do this? B Yes. You can write a self-extracting archive stub (that is, a version of unzip) to the output filehandle that you pass to writeToFileHandle(). See examples/selfex.pl for how to write a self-extracting archive. However, you should understand that this will only work on one kind of platform (the one for which the stub was compiled). =head1 How can I deal with Zips with prepended garbage (i.e. from Sircam) B How can I tell if a Zip has been damaged by adding garbage to the beginning or inside the file? B I added code for this for the Amavis virus scanner. You can query archives for their 'eocdOffset' property, which should be 0: if ($zip->eocdOffset > 0) { warn($zip->eocdOffset . " bytes of garbage at beginning or within Zip") } When members are extracted, this offset will be used to adjust the start of the member if necessary. =head1 Can't extract Shrunk files B I'm trying to extract a file out of a Zip produced by PKZIP, and keep getting this error message: error: Unsupported compression combination: read 6, write 0 B You can't uncompress this archive member. Archive::Zip only supports uncompressed members, and compressed members that are compressed using the compression supported by Compress::Raw::Zlib. That means only Deflated and Stored members. Your file is compressed using the Shrink format, which isn't supported by Compress::Raw::Zlib. You could, perhaps, use a command-line UnZip program (like the Info-Zip one) to extract this. =head1 Can't do decryption B How do I decrypt encrypted Zip members? B With some other program or library. Archive::Zip doesn't support decryption, and probably never will (unless I write it). =head1 How to test file integrity? B How can Archive::Zip can test the validity of a Zip file? B If you try to decompress the file, the gzip streams will report errors if you have garbage. Most of the time. If you try to open the file and a central directory structure can't be found, an error will be reported. When a file is being read, if we can't find a proper PK.. signature in the right places we report a format error. If there is added garbage at the beginning of a Zip file (as inserted by some viruses), you can find out about it, but Archive::Zip will ignore it, and you can still use the archive. When it gets written back out the added stuff will be gone. There are two ready-to-use utilities in the examples directory that can be used to test file integrity, or that you can use as examples for your own code: =over 4 =item examples/zipcheck.pl shows how to use an attempted extraction to test a file. =item examples/ziptest.pl shows how to test CRCs in a file. =back =head1 Duplicate files in Zip? B Archive::Zip let me put the same file in my Zip twice! Why don't you prevent this? B As far as I can tell, this is not disallowed by the Zip spec. If you think it's a bad idea, check for it yourself: $zip->addFile($someFile, $someName) unless $zip->memberNamed($someName); I can even imagine cases where this might be useful (for instance, multiple versions of files). =head1 File ownership/permissions/ACLS/etc B Why doesn't Archive::Zip deal with file ownership, ACLs, etc.? B There is no standard way to represent these in the Zip file format. If you want to send me code to properly handle the various extra fields that have been used to represent these through the years, I'll look at it. =head1 I can't compile but ActiveState only has an old version of Archive::Zip B I've only installed modules using ActiveState's PPM program and repository. But they have a much older version of Archive::Zip than is in CPAN. Will you send me a newer PPM? B Probably not, unless I get lots of extra time. But there's no reason you can't install the version from CPAN. Archive::Zip is pure Perl, so all you need is NMAKE, which you can get for free from Microsoft (see the FAQ in the ActiveState documentation for details on how to install CPAN modules). =head1 My JPEGs (or MP3's) don't compress when I put them into Zips! B How come my JPEGs and MP3's don't compress much when I put them into Zips? B Because they're already compressed. =head1 Under Windows, things lock up/get damaged B I'm using Windows. When I try to use Archive::Zip, my machine locks up/makes funny sounds/displays a BSOD/corrupts data. How can I fix this? B First, try the newest version of Compress::Raw::Zlib. I know of Windows-related problems prior to v1.14 of that library. If that doesn't get rid of the problem, fix your computer or get rid of Windows. =head1 Zip contents in a scalar B I want to read a Zip file from (or write one to) a scalar variable instead of a file. How can I do this? B Use C and the C and C methods. See C and C. =head1 Reading from streams B How do I read from a stream (like for the Info-Zip C program)? B This isn't currently supported, though writing to a stream is. gdata/inst/perl/Archive/Zip/Tree.pm0000644000175100001440000000147413003720416016651 0ustar hornikuserspackage Archive::Zip::Tree; use strict; use vars qw{$VERSION}; BEGIN { $VERSION = '1.30'; } use Archive::Zip; warn( "Archive::Zip::Tree is deprecated; its methods have been moved into Archive::Zip." ) if $^W; 1; __END__ =head1 NAME Archive::Zip::Tree - (DEPRECATED) methods for adding/extracting trees using Archive::Zip =head1 SYNOPSIS =head1 DESCRIPTION This module is deprecated, because all its methods were moved into the main Archive::Zip module. It is included in the distribution merely to avoid breaking old code. See L. =head1 AUTHOR Ned Konz, perl@bike-nomad.com =head1 COPYRIGHT Copyright (c) 2000-2002 Ned Konz. All rights reserved. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. =head1 SEE ALSO L =cut gdata/inst/perl/Archive/Zip/MemberRead.pm0000644000175100001440000001636713003720416017764 0ustar hornikuserspackage Archive::Zip::MemberRead; =head1 NAME Archive::Zip::MemberRead - A wrapper that lets you read Zip archive members as if they were files. =cut =head1 SYNOPSIS use Archive::Zip; use Archive::Zip::MemberRead; $zip = Archive::Zip->new("file.zip"); $fh = Archive::Zip::MemberRead->new($zip, "subdir/abc.txt"); while (defined($line = $fh->getline())) { print $fh->input_line_number . "#: $line\n"; } $read = $fh->read($buffer, 32*1024); print "Read $read bytes as :$buffer:\n"; =head1 DESCRIPTION The Archive::Zip::MemberRead module lets you read Zip archive member data just like you read data from files. =head1 METHODS =over 4 =cut use strict; use Archive::Zip qw( :ERROR_CODES :CONSTANTS ); use vars qw{$VERSION}; my $nl; BEGIN { $VERSION = '1.30'; $VERSION = eval $VERSION; # Requirement for newline conversion. Should check for e.g., DOS and OS/2 as well, but am too lazy. $nl = $^O eq 'MSWin32' ? "\r\n" : "\n"; } =item Archive::Zip::Member::readFileHandle() You can get a C from an archive member by calling C: my $member = $zip->memberNamed('abc/def.c'); my $fh = $member->readFileHandle(); while (defined($line = $fh->getline())) { # ... } $fh->close(); =cut sub Archive::Zip::Member::readFileHandle { return Archive::Zip::MemberRead->new( shift() ); } =item Archive::Zip::MemberRead->new($zip, $fileName) =item Archive::Zip::MemberRead->new($zip, $member) =item Archive::Zip::MemberRead->new($member) Construct a new Archive::Zip::MemberRead on the specified member. my $fh = Archive::Zip::MemberRead->new($zip, 'fred.c') =cut sub new { my ( $class, $zip, $file ) = @_; my ( $self, $member ); if ( $zip && $file ) # zip and filename, or zip and member { $member = ref($file) ? $file : $zip->memberNamed($file); } elsif ( $zip && !$file && ref($zip) ) # just member { $member = $zip; } else { die( 'Archive::Zip::MemberRead::new needs a zip and filename, zip and member, or member' ); } $self = {}; bless( $self, $class ); $self->set_member($member); return $self; } sub set_member { my ( $self, $member ) = @_; $self->{member} = $member; $self->set_compression(COMPRESSION_STORED); $self->rewind(); } sub set_compression { my ( $self, $compression ) = @_; $self->{member}->desiredCompressionMethod($compression) if $self->{member}; } =item setLineEnd(expr) Set the line end character to use. This is set to \n by default except on Windows systems where it is set to \r\n. You will only need to set this on systems which are not Windows or Unix based and require a line end diffrent from \n. This is a class method so call as C->C =cut sub setLineEnd { shift; $nl = shift; } =item rewind() Rewinds an C so that you can read from it again starting at the beginning. =cut sub rewind { my $self = shift; $self->_reset_vars(); $self->{member}->rewindData() if $self->{member}; } sub _reset_vars { my $self = shift; $self->{line_no} = 0; $self->{at_end} = 0; delete $self->{buffer}; } =item input_record_separator(expr) If the argumnet is given, input_record_separator for this instance is set to it. The current setting (which may be the global $/) is always returned. =cut sub input_record_separator { my $self = shift; if (@_) { $self->{sep} = shift; $self->{sep_re} = _sep_as_re($self->{sep}); # Cache the RE as an optimization } return exists $self->{sep} ? $self->{sep} : $/; } # Return the input_record_separator in use as an RE fragment # Note that if we have a per-instance input_record_separator # we can just return the already converted value. Otherwise, # the conversion must be done on $/ every time since we cannot # know whether it has changed or not. sub _sep_re { my $self = shift; # Important to phrase this way: sep's value may be undef. return exists $self->{sep} ? $self->{sep_re} : _sep_as_re($/); } # Convert the input record separator into an RE and return it. sub _sep_as_re { my $sep = shift; if (defined $sep) { if ($sep eq '') { return "(?:$nl){2,}"; } else { $sep =~ s/\n/$nl/og; return quotemeta $sep; } } else { return undef; } } =item input_line_number() Returns the current line number, but only if you're using C. Using C will not update the line number. =cut sub input_line_number { my $self = shift; return $self->{line_no}; } =item close() Closes the given file handle. =cut sub close { my $self = shift; $self->_reset_vars(); $self->{member}->endRead(); } =item buffer_size([ $size ]) Gets or sets the buffer size used for reads. Default is the chunk size used by Archive::Zip. =cut sub buffer_size { my ( $self, $size ) = @_; if ( !$size ) { return $self->{chunkSize} || Archive::Zip::chunkSize(); } else { $self->{chunkSize} = $size; } } =item getline() Returns the next line from the currently open member. Makes sense only for text files. A read error is considered fatal enough to die. Returns undef on eof. All subsequent calls would return undef, unless a rewind() is called. Note: The line returned has the input_record_separator (default: newline) removed. =cut sub getline { my $self = shift; my $size = $self->buffer_size(); my $sep = $self->_sep_re(); for (;;) { if ( $sep && defined($self->{buffer}) && $self->{buffer} =~ s/^(.*?)$sep//s ) { $self->{line_no}++; return $1; } elsif ($self->{at_end}) { $self->{line_no}++ if $self->{buffer}; return delete $self->{buffer}; } my ($temp,$status) = $self->{member}->readChunk($size); if ($status != AZ_OK && $status != AZ_STREAM_END) { die "ERROR: Error reading chunk from archive - $status"; } $self->{at_end} = $status == AZ_STREAM_END; $self->{buffer} .= $$temp; } } =item read($buffer, $num_bytes_to_read) Simulates a normal C system call. Returns the no. of bytes read. C on error, 0 on eof, I: $fh = Archive::Zip::MemberRead->new($zip, "sreeji/secrets.bin"); while (1) { $read = $fh->read($buffer, 1024); die "FATAL ERROR reading my secrets !\n" if (!defined($read)); last if (!$read); # Do processing. .... } =cut # # All these $_ are required to emulate read(). # sub read { my $self = $_[0]; my $size = $_[2]; my ( $temp, $status, $ret ); ( $temp, $status ) = $self->{member}->readChunk($size); if ( $status != AZ_OK && $status != AZ_STREAM_END ) { $_[1] = undef; $ret = undef; } else { $_[1] = $$temp; $ret = length($$temp); } return $ret; } 1; =back =head1 AUTHOR Sreeji K. Das, See L by Ned Konz without which this module does not make any sense! Minor mods by Ned Konz. =head1 COPYRIGHT Copyright 2002 Sreeji K. Das. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. =cut gdata/inst/perl/Archive/Zip/DirectoryMember.pm0000644000175100001440000000371713003720416021050 0ustar hornikuserspackage Archive::Zip::DirectoryMember; use strict; use File::Path; use vars qw( $VERSION @ISA ); BEGIN { $VERSION = '1.30'; @ISA = qw( Archive::Zip::Member ); } use Archive::Zip qw( :ERROR_CODES :UTILITY_METHODS ); sub _newNamed { my $class = shift; my $fileName = shift; # FS name my $newName = shift; # Zip name $newName = _asZipDirName($fileName) unless $newName; my $self = $class->new(@_); $self->{'externalFileName'} = $fileName; $self->fileName($newName); if ( -e $fileName ) { # -e does NOT do a full stat, so we need to do one now if ( -d _ ) { my @stat = stat(_); $self->unixFileAttributes( $stat[2] ); my $mod_t = $stat[9]; if ( $^O eq 'MSWin32' and !$mod_t ) { $mod_t = time(); } $self->setLastModFileDateTimeFromUnix($mod_t); } else { # hmm.. trying to add a non-directory? _error( $fileName, ' exists but is not a directory' ); return undef; } } else { $self->unixFileAttributes( $self->DEFAULT_DIRECTORY_PERMISSIONS ); $self->setLastModFileDateTimeFromUnix( time() ); } return $self; } sub externalFileName { shift->{'externalFileName'}; } sub isDirectory { return 1; } sub extractToFileNamed { my $self = shift; my $name = shift; # local FS name my $attribs = $self->unixFileAttributes() & 07777; mkpath( $name, 0, $attribs ); # croaks on error utime( $self->lastModTime(), $self->lastModTime(), $name ); return AZ_OK; } sub fileName { my $self = shift; my $newName = shift; $newName =~ s{/?$}{/} if defined($newName); return $self->SUPER::fileName($newName); } # So people don't get too confused. This way it looks like the problem # is in their code... sub contents { return wantarray ? ( undef, AZ_OK ) : undef; } 1; gdata/inst/perl/Archive/Zip/MockFileHandle.pm0000644000175100001440000000247113003720416020555 0ustar hornikuserspackage Archive::Zip::MockFileHandle; # Output file handle that calls a custom write routine # Ned Konz, March 2000 # This is provided to help with writing zip files # when you have to process them a chunk at a time. use strict; use vars qw{$VERSION}; BEGIN { $VERSION = '1.30'; $VERSION = eval $VERSION; } sub new { my $class = shift || __PACKAGE__; $class = ref($class) || $class; my $self = bless( { 'position' => 0, 'size' => 0 }, $class ); return $self; } sub eof { my $self = shift; return $self->{'position'} >= $self->{'size'}; } # Copy given buffer to me sub print { my $self = shift; my $bytes = join( '', @_ ); my $bytesWritten = $self->writeHook($bytes); if ( $self->{'position'} + $bytesWritten > $self->{'size'} ) { $self->{'size'} = $self->{'position'} + $bytesWritten; } $self->{'position'} += $bytesWritten; return $bytesWritten; } # Called on each write. # Override in subclasses. # Return number of bytes written (0 on error). sub writeHook { my $self = shift; my $bytes = shift; return length($bytes); } sub binmode { 1 } sub close { 1 } sub clearerr { 1 } # I'm write-only! sub read { 0 } sub tell { return shift->{'position'} } sub opened { 1 } 1; gdata/inst/perl/Archive/Zip/Member.pm0000644000175100001440000007742213003720416017167 0ustar hornikuserspackage Archive::Zip::Member; # A generic membet of an archive use strict; use vars qw( $VERSION @ISA ); BEGIN { $VERSION = '1.30'; @ISA = qw( Archive::Zip ); } use Archive::Zip qw( :CONSTANTS :MISC_CONSTANTS :ERROR_CODES :PKZIP_CONSTANTS :UTILITY_METHODS ); use Time::Local (); use Compress::Raw::Zlib qw( Z_OK Z_STREAM_END MAX_WBITS ); use File::Path; use File::Basename; use constant ZIPFILEMEMBERCLASS => 'Archive::Zip::ZipFileMember'; use constant NEWFILEMEMBERCLASS => 'Archive::Zip::NewFileMember'; use constant STRINGMEMBERCLASS => 'Archive::Zip::StringMember'; use constant DIRECTORYMEMBERCLASS => 'Archive::Zip::DirectoryMember'; # Unix perms for default creation of files/dirs. use constant DEFAULT_DIRECTORY_PERMISSIONS => 040755; use constant DEFAULT_FILE_PERMISSIONS => 0100666; use constant DIRECTORY_ATTRIB => 040000; use constant FILE_ATTRIB => 0100000; # Returns self if successful, else undef # Assumes that fh is positioned at beginning of central directory file header. # Leaves fh positioned immediately after file header or EOCD signature. sub _newFromZipFile { my $class = shift; my $self = $class->ZIPFILEMEMBERCLASS->_newFromZipFile(@_); return $self; } sub newFromString { my $class = shift; my ( $stringOrStringRef, $fileName ); if ( ref( $_[0] ) eq 'HASH' ) { $stringOrStringRef = $_[0]->{string}; $fileName = $_[0]->{zipName}; } else { ( $stringOrStringRef, $fileName ) = @_; } my $self = $class->STRINGMEMBERCLASS->_newFromString( $stringOrStringRef, $fileName ); return $self; } sub newFromFile { my $class = shift; my ( $fileName, $zipName ); if ( ref( $_[0] ) eq 'HASH' ) { $fileName = $_[0]->{fileName}; $zipName = $_[0]->{zipName}; } else { ( $fileName, $zipName ) = @_; } my $self = $class->NEWFILEMEMBERCLASS->_newFromFileNamed( $fileName, $zipName ); return $self; } sub newDirectoryNamed { my $class = shift; my ( $directoryName, $newName ); if ( ref( $_[0] ) eq 'HASH' ) { $directoryName = $_[0]->{directoryName}; $newName = $_[0]->{zipName}; } else { ( $directoryName, $newName ) = @_; } my $self = $class->DIRECTORYMEMBERCLASS->_newNamed( $directoryName, $newName ); return $self; } sub new { my $class = shift; my $self = { 'lastModFileDateTime' => 0, 'fileAttributeFormat' => FA_UNIX, 'versionMadeBy' => 20, 'versionNeededToExtract' => 20, 'bitFlag' => 0, 'compressionMethod' => COMPRESSION_STORED, 'desiredCompressionMethod' => COMPRESSION_STORED, 'desiredCompressionLevel' => COMPRESSION_LEVEL_NONE, 'internalFileAttributes' => 0, 'externalFileAttributes' => 0, # set later 'fileName' => '', 'cdExtraField' => '', 'localExtraField' => '', 'fileComment' => '', 'crc32' => 0, 'compressedSize' => 0, 'uncompressedSize' => 0, 'isSymbolicLink' => 0, @_ }; bless( $self, $class ); $self->unixFileAttributes( $self->DEFAULT_FILE_PERMISSIONS ); return $self; } sub _becomeDirectoryIfNecessary { my $self = shift; $self->_become(DIRECTORYMEMBERCLASS) if $self->isDirectory(); return $self; } # Morph into given class (do whatever cleanup I need to do) sub _become { return bless( $_[0], $_[1] ); } sub versionMadeBy { shift->{'versionMadeBy'}; } sub fileAttributeFormat { my $self = shift; if (@_) { $self->{fileAttributeFormat} = ( ref( $_[0] ) eq 'HASH' ) ? $_[0]->{format} : $_[0]; } else { return $self->{fileAttributeFormat}; } } sub versionNeededToExtract { shift->{'versionNeededToExtract'}; } sub bitFlag { my $self = shift; # Set General Purpose Bit Flags according to the desiredCompressionLevel setting if ( $self->desiredCompressionLevel == 1 || $self->desiredCompressionLevel == 2 ) { $self->{'bitFlag'} = DEFLATING_COMPRESSION_FAST; } elsif ( $self->desiredCompressionLevel == 3 || $self->desiredCompressionLevel == 4 || $self->desiredCompressionLevel == 5 || $self->desiredCompressionLevel == 6 || $self->desiredCompressionLevel == 7 ) { $self->{'bitFlag'} = DEFLATING_COMPRESSION_NORMAL; } elsif ( $self->desiredCompressionLevel == 8 || $self->desiredCompressionLevel == 9 ) { $self->{'bitFlag'} = DEFLATING_COMPRESSION_MAXIMUM; } $self->{'bitFlag'}; } sub compressionMethod { shift->{'compressionMethod'}; } sub desiredCompressionMethod { my $self = shift; my $newDesiredCompressionMethod = ( ref( $_[0] ) eq 'HASH' ) ? shift->{compressionMethod} : shift; my $oldDesiredCompressionMethod = $self->{'desiredCompressionMethod'}; if ( defined($newDesiredCompressionMethod) ) { $self->{'desiredCompressionMethod'} = $newDesiredCompressionMethod; if ( $newDesiredCompressionMethod == COMPRESSION_STORED ) { $self->{'desiredCompressionLevel'} = 0; $self->{'bitFlag'} &= ~GPBF_HAS_DATA_DESCRIPTOR_MASK; } elsif ( $oldDesiredCompressionMethod == COMPRESSION_STORED ) { $self->{'desiredCompressionLevel'} = COMPRESSION_LEVEL_DEFAULT; } } return $oldDesiredCompressionMethod; } sub desiredCompressionLevel { my $self = shift; my $newDesiredCompressionLevel = ( ref( $_[0] ) eq 'HASH' ) ? shift->{compressionLevel} : shift; my $oldDesiredCompressionLevel = $self->{'desiredCompressionLevel'}; if ( defined($newDesiredCompressionLevel) ) { $self->{'desiredCompressionLevel'} = $newDesiredCompressionLevel; $self->{'desiredCompressionMethod'} = ( $newDesiredCompressionLevel ? COMPRESSION_DEFLATED : COMPRESSION_STORED ); } return $oldDesiredCompressionLevel; } sub fileName { my $self = shift; my $newName = shift; if ($newName) { $newName =~ s{[\\/]+}{/}g; # deal with dos/windoze problems $self->{'fileName'} = $newName; } return $self->{'fileName'}; } sub lastModFileDateTime { my $modTime = shift->{'lastModFileDateTime'}; $modTime =~ m/^(\d+)$/; # untaint return $1; } sub lastModTime { my $self = shift; return _dosToUnixTime( $self->lastModFileDateTime() ); } sub setLastModFileDateTimeFromUnix { my $self = shift; my $time_t = shift; $self->{'lastModFileDateTime'} = _unixToDosTime($time_t); } sub internalFileAttributes { shift->{'internalFileAttributes'}; } sub externalFileAttributes { shift->{'externalFileAttributes'}; } # Convert UNIX permissions into proper value for zip file # Usable as a function or a method sub _mapPermissionsFromUnix { my $self = shift; my $mode = shift; my $attribs = $mode << 16; # Microsoft Windows Explorer needs this bit set for directories if ( $mode & DIRECTORY_ATTRIB ) { $attribs |= 16; } return $attribs; # TODO: map more MS-DOS perms } # Convert ZIP permissions into Unix ones # # This was taken from Info-ZIP group's portable UnZip # zipfile-extraction program, version 5.50. # http://www.info-zip.org/pub/infozip/ # # See the mapattr() function in unix/unix.c # See the attribute format constants in unzpriv.h # # XXX Note that there's one situation that isn't implemented # yet that depends on the "extra field." sub _mapPermissionsToUnix { my $self = shift; my $format = $self->{'fileAttributeFormat'}; my $attribs = $self->{'externalFileAttributes'}; my $mode = 0; if ( $format == FA_AMIGA ) { $attribs = $attribs >> 17 & 7; # Amiga RWE bits $mode = $attribs << 6 | $attribs << 3 | $attribs; return $mode; } if ( $format == FA_THEOS ) { $attribs &= 0xF1FFFFFF; if ( ( $attribs & 0xF0000000 ) != 0x40000000 ) { $attribs &= 0x01FFFFFF; # not a dir, mask all ftype bits } else { $attribs &= 0x41FFFFFF; # leave directory bit as set } } if ( $format == FA_UNIX || $format == FA_VAX_VMS || $format == FA_ACORN || $format == FA_ATARI_ST || $format == FA_BEOS || $format == FA_QDOS || $format == FA_TANDEM ) { $mode = $attribs >> 16; return $mode if $mode != 0 or not $self->localExtraField; # warn("local extra field is: ", $self->localExtraField, "\n"); # XXX This condition is not implemented # I'm just including the comments from the info-zip section for now. # Some (non-Info-ZIP) implementations of Zip for Unix and # VMS (and probably others ??) leave 0 in the upper 16-bit # part of the external_file_attributes field. Instead, they # store file permission attributes in some extra field. # As a work-around, we search for the presence of one of # these extra fields and fall back to the MSDOS compatible # part of external_file_attributes if one of the known # e.f. types has been detected. # Later, we might implement extraction of the permission # bits from the VMS extra field. But for now, the work-around # should be sufficient to provide "readable" extracted files. # (For ASI Unix e.f., an experimental remap from the e.f. # mode value IS already provided!) } # PKWARE's PKZip for Unix marks entries as FA_MSDOS, but stores the # Unix attributes in the upper 16 bits of the external attributes # field, just like Info-ZIP's Zip for Unix. We try to use that # value, after a check for consistency with the MSDOS attribute # bits (see below). if ( $format == FA_MSDOS ) { $mode = $attribs >> 16; } # FA_MSDOS, FA_OS2_HPFS, FA_WINDOWS_NTFS, FA_MACINTOSH, FA_TOPS20 $attribs = !( $attribs & 1 ) << 1 | ( $attribs & 0x10 ) >> 4; # keep previous $mode setting when its "owner" # part appears to be consistent with DOS attribute flags! return $mode if ( $mode & 0700 ) == ( 0400 | $attribs << 6 ); $mode = 0444 | $attribs << 6 | $attribs << 3 | $attribs; return $mode; } sub unixFileAttributes { my $self = shift; my $oldPerms = $self->_mapPermissionsToUnix; my $perms; if ( @_ ) { $perms = ( ref( $_[0] ) eq 'HASH' ) ? $_[0]->{attributes} : $_[0]; if ( $self->isDirectory ) { $perms &= ~FILE_ATTRIB; $perms |= DIRECTORY_ATTRIB; } else { $perms &= ~DIRECTORY_ATTRIB; $perms |= FILE_ATTRIB; } $self->{externalFileAttributes} = $self->_mapPermissionsFromUnix($perms); } return $oldPerms; } sub localExtraField { my $self = shift; if (@_) { $self->{localExtraField} = ( ref( $_[0] ) eq 'HASH' ) ? $_[0]->{field} : $_[0]; } else { return $self->{localExtraField}; } } sub cdExtraField { my $self = shift; if (@_) { $self->{cdExtraField} = ( ref( $_[0] ) eq 'HASH' ) ? $_[0]->{field} : $_[0]; } else { return $self->{cdExtraField}; } } sub extraFields { my $self = shift; return $self->localExtraField() . $self->cdExtraField(); } sub fileComment { my $self = shift; if (@_) { $self->{fileComment} = ( ref( $_[0] ) eq 'HASH' ) ? pack( 'C0a*', $_[0]->{comment} ) : pack( 'C0a*', $_[0] ); } else { return $self->{fileComment}; } } sub hasDataDescriptor { my $self = shift; if (@_) { my $shouldHave = shift; if ($shouldHave) { $self->{'bitFlag'} |= GPBF_HAS_DATA_DESCRIPTOR_MASK; } else { $self->{'bitFlag'} &= ~GPBF_HAS_DATA_DESCRIPTOR_MASK; } } return $self->{'bitFlag'} & GPBF_HAS_DATA_DESCRIPTOR_MASK; } sub crc32 { shift->{'crc32'}; } sub crc32String { sprintf( "%08x", shift->{'crc32'} ); } sub compressedSize { shift->{'compressedSize'}; } sub uncompressedSize { shift->{'uncompressedSize'}; } sub isEncrypted { shift->bitFlag() & GPBF_ENCRYPTED_MASK; } sub isTextFile { my $self = shift; my $bit = $self->internalFileAttributes() & IFA_TEXT_FILE_MASK; if (@_) { my $flag = ( ref( $_[0] ) eq 'HASH' ) ? shift->{flag} : shift; $self->{'internalFileAttributes'} &= ~IFA_TEXT_FILE_MASK; $self->{'internalFileAttributes'} |= ( $flag ? IFA_TEXT_FILE: IFA_BINARY_FILE ); } return $bit == IFA_TEXT_FILE; } sub isBinaryFile { my $self = shift; my $bit = $self->internalFileAttributes() & IFA_TEXT_FILE_MASK; if (@_) { my $flag = shift; $self->{'internalFileAttributes'} &= ~IFA_TEXT_FILE_MASK; $self->{'internalFileAttributes'} |= ( $flag ? IFA_BINARY_FILE: IFA_TEXT_FILE ); } return $bit == IFA_BINARY_FILE; } sub extractToFileNamed { my $self = shift; # local FS name my $name = ( ref( $_[0] ) eq 'HASH' ) ? $_[0]->{name} : $_[0]; $self->{'isSymbolicLink'} = 0; # Check if the file / directory is a symbolic link or not if ( $self->{'externalFileAttributes'} == 0xA1FF0000 ) { $self->{'isSymbolicLink'} = 1; $self->{'newName'} = $name; my ( $status, $fh ) = _newFileHandle( $name, 'r' ); my $retval = $self->extractToFileHandle($fh); $fh->close(); } else { #return _writeSymbolicLink($self, $name) if $self->isSymbolicLink(); return _error("encryption unsupported") if $self->isEncrypted(); mkpath( dirname($name) ); # croaks on error my ( $status, $fh ) = _newFileHandle( $name, 'w' ); return _ioError("Can't open file $name for write") unless $status; my $retval = $self->extractToFileHandle($fh); $fh->close(); chmod ($self->unixFileAttributes(), $name) or return _error("Can't chmod() ${name}: $!"); utime( $self->lastModTime(), $self->lastModTime(), $name ); return $retval; } } sub _writeSymbolicLink { my $self = shift; my $name = shift; my $chunkSize = $Archive::Zip::ChunkSize; #my ( $outRef, undef ) = $self->readChunk($chunkSize); my $fh; my $retval = $self->extractToFileHandle($fh); my ( $outRef, undef ) = $self->readChunk(100); } sub isSymbolicLink { my $self = shift; if ( $self->{'externalFileAttributes'} == 0xA1FF0000 ) { $self->{'isSymbolicLink'} = 1; } else { return 0; } 1; } sub isDirectory { return 0; } sub externalFileName { return undef; } # The following are used when copying data sub _writeOffset { shift->{'writeOffset'}; } sub _readOffset { shift->{'readOffset'}; } sub writeLocalHeaderRelativeOffset { shift->{'writeLocalHeaderRelativeOffset'}; } sub wasWritten { shift->{'wasWritten'} } sub _dataEnded { shift->{'dataEnded'}; } sub _readDataRemaining { shift->{'readDataRemaining'}; } sub _inflater { shift->{'inflater'}; } sub _deflater { shift->{'deflater'}; } # Return the total size of my local header sub _localHeaderSize { my $self = shift; return SIGNATURE_LENGTH + LOCAL_FILE_HEADER_LENGTH + length( $self->fileName() ) + length( $self->localExtraField() ); } # Return the total size of my CD header sub _centralDirectoryHeaderSize { my $self = shift; return SIGNATURE_LENGTH + CENTRAL_DIRECTORY_FILE_HEADER_LENGTH + length( $self->fileName() ) + length( $self->cdExtraField() ) + length( $self->fileComment() ); } # DOS date/time format # 0-4 (5) Second divided by 2 # 5-10 (6) Minute (0-59) # 11-15 (5) Hour (0-23 on a 24-hour clock) # 16-20 (5) Day of the month (1-31) # 21-24 (4) Month (1 = January, 2 = February, etc.) # 25-31 (7) Year offset from 1980 (add 1980 to get actual year) # Convert DOS date/time format to unix time_t format # NOT AN OBJECT METHOD! sub _dosToUnixTime { my $dt = shift; return time() unless defined($dt); my $year = ( ( $dt >> 25 ) & 0x7f ) + 80; my $mon = ( ( $dt >> 21 ) & 0x0f ) - 1; my $mday = ( ( $dt >> 16 ) & 0x1f ); my $hour = ( ( $dt >> 11 ) & 0x1f ); my $min = ( ( $dt >> 5 ) & 0x3f ); my $sec = ( ( $dt << 1 ) & 0x3e ); # catch errors my $time_t = eval { Time::Local::timelocal( $sec, $min, $hour, $mday, $mon, $year ); }; return time() if ($@); return $time_t; } # Note, this isn't exactly UTC 1980, it's 1980 + 12 hours and 1 # minute so that nothing timezoney can muck us up. my $safe_epoch = 315576060; # convert a unix time to DOS date/time # NOT AN OBJECT METHOD! sub _unixToDosTime { my $time_t = shift; unless ($time_t) { _error("Tried to add member with zero or undef value for time"); $time_t = $safe_epoch; } if ( $time_t < $safe_epoch ) { _ioError("Unsupported date before 1980 encountered, moving to 1980"); $time_t = $safe_epoch; } my ( $sec, $min, $hour, $mday, $mon, $year ) = localtime($time_t); my $dt = 0; $dt += ( $sec >> 1 ); $dt += ( $min << 5 ); $dt += ( $hour << 11 ); $dt += ( $mday << 16 ); $dt += ( ( $mon + 1 ) << 21 ); $dt += ( ( $year - 80 ) << 25 ); return $dt; } # Write my local header to a file handle. # Stores the offset to the start of the header in my # writeLocalHeaderRelativeOffset member. # Returns AZ_OK on success. sub _writeLocalFileHeader { my $self = shift; my $fh = shift; my $signatureData = pack( SIGNATURE_FORMAT, LOCAL_FILE_HEADER_SIGNATURE ); $self->_print($fh, $signatureData) or return _ioError("writing local header signature"); my $header = pack( LOCAL_FILE_HEADER_FORMAT, $self->versionNeededToExtract(), $self->bitFlag(), $self->desiredCompressionMethod(), $self->lastModFileDateTime(), $self->crc32(), $self->compressedSize(), # may need to be re-written later $self->uncompressedSize(), length( $self->fileName() ), length( $self->localExtraField() ) ); $self->_print($fh, $header) or return _ioError("writing local header"); # Check for a valid filename or a filename equal to a literal `0' if ( $self->fileName() || $self->fileName eq '0' ) { $self->_print($fh, $self->fileName() ) or return _ioError("writing local header filename"); } if ( $self->localExtraField() ) { $self->_print($fh, $self->localExtraField() ) or return _ioError("writing local extra field"); } return AZ_OK; } sub _writeCentralDirectoryFileHeader { my $self = shift; my $fh = shift; my $sigData = pack( SIGNATURE_FORMAT, CENTRAL_DIRECTORY_FILE_HEADER_SIGNATURE ); $self->_print($fh, $sigData) or return _ioError("writing central directory header signature"); my $fileNameLength = length( $self->fileName() ); my $extraFieldLength = length( $self->cdExtraField() ); my $fileCommentLength = length( $self->fileComment() ); my $header = pack( CENTRAL_DIRECTORY_FILE_HEADER_FORMAT, $self->versionMadeBy(), $self->fileAttributeFormat(), $self->versionNeededToExtract(), $self->bitFlag(), $self->desiredCompressionMethod(), $self->lastModFileDateTime(), $self->crc32(), # these three fields should have been updated $self->_writeOffset(), # by writing the data stream out $self->uncompressedSize(), # $fileNameLength, $extraFieldLength, $fileCommentLength, 0, # {'diskNumberStart'}, $self->internalFileAttributes(), $self->externalFileAttributes(), $self->writeLocalHeaderRelativeOffset() ); $self->_print($fh, $header) or return _ioError("writing central directory header"); if ($fileNameLength) { $self->_print($fh, $self->fileName() ) or return _ioError("writing central directory header signature"); } if ($extraFieldLength) { $self->_print($fh, $self->cdExtraField() ) or return _ioError("writing central directory extra field"); } if ($fileCommentLength) { $self->_print($fh, $self->fileComment() ) or return _ioError("writing central directory file comment"); } return AZ_OK; } # This writes a data descriptor to the given file handle. # Assumes that crc32, writeOffset, and uncompressedSize are # set correctly (they should be after a write). # Further, the local file header should have the # GPBF_HAS_DATA_DESCRIPTOR_MASK bit set. sub _writeDataDescriptor { my $self = shift; my $fh = shift; my $header = pack( SIGNATURE_FORMAT . DATA_DESCRIPTOR_FORMAT, DATA_DESCRIPTOR_SIGNATURE, $self->crc32(), $self->_writeOffset(), # compressed size $self->uncompressedSize() ); $self->_print($fh, $header) or return _ioError("writing data descriptor"); return AZ_OK; } # Re-writes the local file header with new crc32 and compressedSize fields. # To be called after writing the data stream. # Assumes that filename and extraField sizes didn't change since last written. sub _refreshLocalFileHeader { my $self = shift; my $fh = shift; my $here = $fh->tell(); $fh->seek( $self->writeLocalHeaderRelativeOffset() + SIGNATURE_LENGTH, IO::Seekable::SEEK_SET ) or return _ioError("seeking to rewrite local header"); my $header = pack( LOCAL_FILE_HEADER_FORMAT, $self->versionNeededToExtract(), $self->bitFlag(), $self->desiredCompressionMethod(), $self->lastModFileDateTime(), $self->crc32(), $self->_writeOffset(), # compressed size $self->uncompressedSize(), length( $self->fileName() ), length( $self->localExtraField() ) ); $self->_print($fh, $header) or return _ioError("re-writing local header"); $fh->seek( $here, IO::Seekable::SEEK_SET ) or return _ioError("seeking after rewrite of local header"); return AZ_OK; } sub readChunk { my $self = shift; my $chunkSize = ( ref( $_[0] ) eq 'HASH' ) ? $_[0]->{chunkSize} : $_[0]; if ( $self->readIsDone() ) { $self->endRead(); my $dummy = ''; return ( \$dummy, AZ_STREAM_END ); } $chunkSize = $Archive::Zip::ChunkSize if not defined($chunkSize); $chunkSize = $self->_readDataRemaining() if $chunkSize > $self->_readDataRemaining(); my $buffer = ''; my $outputRef; my ( $bytesRead, $status ) = $self->_readRawChunk( \$buffer, $chunkSize ); return ( \$buffer, $status ) unless $status == AZ_OK; $self->{'readDataRemaining'} -= $bytesRead; $self->{'readOffset'} += $bytesRead; if ( $self->compressionMethod() == COMPRESSION_STORED ) { $self->{'crc32'} = $self->computeCRC32( $buffer, $self->{'crc32'} ); } ( $outputRef, $status ) = &{ $self->{'chunkHandler'} }( $self, \$buffer ); $self->{'writeOffset'} += length($$outputRef); $self->endRead() if $self->readIsDone(); return ( $outputRef, $status ); } # Read the next raw chunk of my data. Subclasses MUST implement. # my ( $bytesRead, $status) = $self->_readRawChunk( \$buffer, $chunkSize ); sub _readRawChunk { my $self = shift; return $self->_subclassResponsibility(); } # A place holder to catch rewindData errors if someone ignores # the error code. sub _noChunk { my $self = shift; return ( \undef, _error("trying to copy chunk when init failed") ); } # Basically a no-op so that I can have a consistent interface. # ( $outputRef, $status) = $self->_copyChunk( \$buffer ); sub _copyChunk { my ( $self, $dataRef ) = @_; return ( $dataRef, AZ_OK ); } # ( $outputRef, $status) = $self->_deflateChunk( \$buffer ); sub _deflateChunk { my ( $self, $buffer ) = @_; my ( $status ) = $self->_deflater()->deflate( $buffer, my $out ); if ( $self->_readDataRemaining() == 0 ) { my $extraOutput; ( $status ) = $self->_deflater()->flush($extraOutput); $out .= $extraOutput; $self->endRead(); return ( \$out, AZ_STREAM_END ); } elsif ( $status == Z_OK ) { return ( \$out, AZ_OK ); } else { $self->endRead(); my $retval = _error( 'deflate error', $status ); my $dummy = ''; return ( \$dummy, $retval ); } } # ( $outputRef, $status) = $self->_inflateChunk( \$buffer ); sub _inflateChunk { my ( $self, $buffer ) = @_; my ( $status ) = $self->_inflater()->inflate( $buffer, my $out ); my $retval; $self->endRead() unless $status == Z_OK; if ( $status == Z_OK || $status == Z_STREAM_END ) { $retval = ( $status == Z_STREAM_END ) ? AZ_STREAM_END: AZ_OK; return ( \$out, $retval ); } else { $retval = _error( 'inflate error', $status ); my $dummy = ''; return ( \$dummy, $retval ); } } sub rewindData { my $self = shift; my $status; # set to trap init errors $self->{'chunkHandler'} = $self->can('_noChunk'); # Work around WinZip bug with 0-length DEFLATED files $self->desiredCompressionMethod(COMPRESSION_STORED) if $self->uncompressedSize() == 0; # assume that we're going to read the whole file, and compute the CRC anew. $self->{'crc32'} = 0 if ( $self->compressionMethod() == COMPRESSION_STORED ); # These are the only combinations of methods we deal with right now. if ( $self->compressionMethod() == COMPRESSION_STORED and $self->desiredCompressionMethod() == COMPRESSION_DEFLATED ) { ( $self->{'deflater'}, $status ) = Compress::Raw::Zlib::Deflate->new( '-Level' => $self->desiredCompressionLevel(), '-WindowBits' => -MAX_WBITS(), # necessary magic '-Bufsize' => $Archive::Zip::ChunkSize, @_ ); # pass additional options return _error( 'deflateInit error:', $status ) unless $status == Z_OK; $self->{'chunkHandler'} = $self->can('_deflateChunk'); } elsif ( $self->compressionMethod() == COMPRESSION_DEFLATED and $self->desiredCompressionMethod() == COMPRESSION_STORED ) { ( $self->{'inflater'}, $status ) = Compress::Raw::Zlib::Inflate->new( '-WindowBits' => -MAX_WBITS(), # necessary magic '-Bufsize' => $Archive::Zip::ChunkSize, @_ ); # pass additional options return _error( 'inflateInit error:', $status ) unless $status == Z_OK; $self->{'chunkHandler'} = $self->can('_inflateChunk'); } elsif ( $self->compressionMethod() == $self->desiredCompressionMethod() ) { $self->{'chunkHandler'} = $self->can('_copyChunk'); } else { return _error( sprintf( "Unsupported compression combination: read %d, write %d", $self->compressionMethod(), $self->desiredCompressionMethod() ) ); } $self->{'readDataRemaining'} = ( $self->compressionMethod() == COMPRESSION_STORED ) ? $self->uncompressedSize() : $self->compressedSize(); $self->{'dataEnded'} = 0; $self->{'readOffset'} = 0; return AZ_OK; } sub endRead { my $self = shift; delete $self->{'inflater'}; delete $self->{'deflater'}; $self->{'dataEnded'} = 1; $self->{'readDataRemaining'} = 0; return AZ_OK; } sub readIsDone { my $self = shift; return ( $self->_dataEnded() or !$self->_readDataRemaining() ); } sub contents { my $self = shift; my $newContents = shift; if ( defined($newContents) ) { # change our type and call the subclass contents method. $self->_become(STRINGMEMBERCLASS); return $self->contents( pack( 'C0a*', $newContents ) ) ; # in case of Unicode } else { my $oldCompression = $self->desiredCompressionMethod(COMPRESSION_STORED); my $status = $self->rewindData(@_); if ( $status != AZ_OK ) { $self->endRead(); return $status; } my $retval = ''; while ( $status == AZ_OK ) { my $ref; ( $ref, $status ) = $self->readChunk( $self->_readDataRemaining() ); # did we get it in one chunk? if ( length($$ref) == $self->uncompressedSize() ) { $retval = $$ref; } else { $retval .= $$ref } } $self->desiredCompressionMethod($oldCompression); $self->endRead(); $status = AZ_OK if $status == AZ_STREAM_END; $retval = undef unless $status == AZ_OK; return wantarray ? ( $retval, $status ) : $retval; } } sub extractToFileHandle { my $self = shift; return _error("encryption unsupported") if $self->isEncrypted(); my $fh = ( ref( $_[0] ) eq 'HASH' ) ? shift->{fileHandle} : shift; _binmode($fh); my $oldCompression = $self->desiredCompressionMethod(COMPRESSION_STORED); my $status = $self->rewindData(@_); $status = $self->_writeData($fh) if $status == AZ_OK; $self->desiredCompressionMethod($oldCompression); $self->endRead(); return $status; } # write local header and data stream to file handle sub _writeToFileHandle { my $self = shift; my $fh = shift; my $fhIsSeekable = shift; my $offset = shift; return _error("no member name given for $self") if $self->fileName() eq ''; $self->{'writeLocalHeaderRelativeOffset'} = $offset; $self->{'wasWritten'} = 0; # Determine if I need to write a data descriptor # I need to do this if I can't refresh the header # and I don't know compressed size or crc32 fields. my $headerFieldsUnknown = ( ( $self->uncompressedSize() > 0 ) and ($self->compressionMethod() == COMPRESSION_STORED or $self->desiredCompressionMethod() == COMPRESSION_DEFLATED ) ); my $shouldWriteDataDescriptor = ( $headerFieldsUnknown and not $fhIsSeekable ); $self->hasDataDescriptor(1) if ($shouldWriteDataDescriptor); $self->{'writeOffset'} = 0; my $status = $self->rewindData(); ( $status = $self->_writeLocalFileHeader($fh) ) if $status == AZ_OK; ( $status = $self->_writeData($fh) ) if $status == AZ_OK; if ( $status == AZ_OK ) { $self->{'wasWritten'} = 1; if ( $self->hasDataDescriptor() ) { $status = $self->_writeDataDescriptor($fh); } elsif ($headerFieldsUnknown) { $status = $self->_refreshLocalFileHeader($fh); } } return $status; } # Copy my (possibly compressed) data to given file handle. # Returns C on success sub _writeData { my $self = shift; my $writeFh = shift; # If symbolic link, just create one if the operating system is Linux, Unix, BSD or VMS # TODO: Add checks for other operating systems if ( $self->{'isSymbolicLink'} == 1 && $^O eq 'linux' ) { my $chunkSize = $Archive::Zip::ChunkSize; my ( $outRef, $status ) = $self->readChunk($chunkSize); symlink $$outRef, $self->{'newName'}; } else { return AZ_OK if ( $self->uncompressedSize() == 0 ); my $status; my $chunkSize = $Archive::Zip::ChunkSize; while ( $self->_readDataRemaining() > 0 ) { my $outRef; ( $outRef, $status ) = $self->readChunk($chunkSize); return $status if ( $status != AZ_OK and $status != AZ_STREAM_END ); if ( length($$outRef) > 0 ) { $self->_print($writeFh, $$outRef) or return _ioError("write error during copy"); } last if $status == AZ_STREAM_END; } $self->{'compressedSize'} = $self->_writeOffset(); } return AZ_OK; } # Return true if I depend on the named file sub _usesFileNamed { return 0; } 1; gdata/inst/perl/supportedFormats.pl0000644000175100001440000000106013003720416017176 0ustar hornikusers#!/usr/bin/perl BEGIN { use File::Basename; # Add current path to perl library search path use lib dirname($0); } require 'module_tools.pl'; my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_ParseXLSX) = check_modules(0); $XLS_Support = $HAS_Spreadsheet_ParseExcel; $XLSX_Support = $HAS_Spreadsheet_ParseExcel && $HAS_Compress_Raw_Zlib && $HAS_Spreadsheet_ParseXLSX; printf "Supported formats: "; printf "XLS " if ( $XLS_Support ); printf "XLSX" if ( $XLSX_Support ); printf "\n"; gdata/inst/perl/Graphics/0000755000175100001440000000000013003720416015023 5ustar hornikusersgdata/inst/perl/Graphics/ColorUtils.pm0000644000175100001440000016307013003720416017467 0ustar hornikusers package Graphics::ColorUtils; use 5.008003; use strict; use warnings; use Carp; require Exporter; our @ISA = qw(Exporter); our %EXPORT_TAGS = ( 'gradients' => [ qw( gradient grad2rgb available_gradients register_gradient) ], 'names' => [ qw( name2rgb available_names register_name set_default_namespace get_default_namespace ) ], 'all' => [ qw( rgb2yiq yiq2rgb rgb2cmy cmy2rgb rgb2hls hls2rgb rgb2hsv hsv2rgb gradient grad2rgb available_gradients register_gradient name2rgb available_names register_name set_default_namespace get_default_namespace ) ], ); our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } ); our @EXPORT = qw( rgb2yiq yiq2rgb rgb2cmy cmy2rgb rgb2hls hls2rgb rgb2hsv hsv2rgb ); our $VERSION = '0.17'; # ================================================== # ++++++++++++++++++++++++++++++++++++++++++++++++++ # ================================================== # ================================================== # Utility # Takes a (r,g,b) triple of numbers (possibly floats) and returns # - a string like '#33FF21' in scalar context # - a triple of corresponding integers in array context sub _fmt { return wantarray ? map { int } @_ : sprintf( "#%02x%02x%02x", @_ ); } # ================================================== # YIQ sub rgb2yiq { # $r, $g, $b : 0..255 my ( $r, $g, $b ) = map { $_/255.0 } @_; # Scale RGB to 0..1 my $y = 0.299*$r + 0.587*$g + 0.114*$b; my $i = 0.596*$r - 0.275*$g - 0.321*$b; my $q = 0.212*$r - 0.523*$g + 0.311*$b; return ( $y, $i, $q ); } sub yiq2rgb { # $y, $i, $q : 0..1 my ( $y, $i, $q ) = @_; my $r = 255.0*( $y + 0.956*$i + 0.621*$q ); my $g = 255.0*( $y - 0.272*$i - 0.647*$q ); my $b = 255.0*( $y - 1.105*$i + 1.705*$q ); return _fmt( $r, $g, $b ); } # ================================================== # CMY sub rgb2cmy { # $r, $g, $b : 0..255 my ( $r, $g, $b ) = map { $_/255.0 } @_; # Scale RGB to 0..1 return ( 1.0 - $r, 1.0 - $g, 1.0 - $b ); } sub cmy2rgb { # $c, $m, $y : 0..1 my ( $c, $m, $y ) = @_; return _fmt( 255*(1.0-$c), 255*(1.0-$m), 255*(1.0-$y) ); } # ================================================== # HLS # Foley, van Dam, et al: # Computer Grapics-Principles and Practice (1990) p595f sub rgb2hls { # $r, $g, $b : 0..255 # Note special name '$bb' to avoid conflict with ($a,$b) in sort() my ( $r, $g, $bb ) = map { $_/255.0 } @_; # Scale RGB to 0..1 my ( $minc, $maxc ) = ( sort { $a <=> $b } ( $r, $g, $bb ) )[0,2]; my $m = $minc + $maxc; # "Mean" if( $maxc == $minc ) { return ( 0, 0.5*$m, 0 ); } # Achromatic case my $d = $maxc - $minc; # "Delta" my $s = ( $m <= 1.0 ) ? $d/$m : $d/(2.0-$m ); # Saturation my $h = 0; # Hue if( $r == $maxc ) { $h = ( $g-$bb )/$d; } elsif( $g == $maxc ) { $h = 2 + ( $bb-$r )/$d; } elsif( $bb == $maxc ) { $h = 4 + ( $r-$g )/$d; } else { # Never get here! croak "Internal Error: Unexpected value ,$maxc, in Graphics::ColorUtils::rgb2hls( $r, $g, $bb )"; } $h *= 60; # Convert to degrees if( $h < 0 ) { $h += 360; } # Ensure positive hue return ( $h, 0.5*$m, $s ); } sub hls2rgb { # $h: 0..360 (red=0->yellow->green=120->cyan->blue=240->magenta steps of 60) # $l, $s : 0..1 (inclusive) my ( $h, $l, $s ) = @_; if( $s == 0.0 ) { return _fmt(255*$l, 255*$l, 255*$l); } # achromatic (grey) # This is the INCORRECT line as it is in the book quoted above: # my $m2 = ( $l <= 0.5 ) ? ($l*($l+$s)) : ($l - $l*$s + $s); # This is the CORRECT line: (first alternative: 1 vs $l) my $m2 = ( $l <= 0.5 ) ? ($l*(1+$s)) : ($l - $l*$s + $s); my $m1 = 2.0*$l - $m2; my $r = 255 * _value( $m1, $m2, $h + 120 ); my $g = 255 * _value( $m1, $m2, $h ); my $b = 255 * _value( $m1, $m2, $h - 120 ); return _fmt( $r, $g, $b ); } sub _value { my ( $n1, $n2, $hue ) = @_; if( $hue > 360 ) { $hue -= 360; } elsif( $hue < 0 ) { $hue += 360; } if( $hue < 60 ) { return $n1 + $hue * ( $n2-$n1 )/60.0; } elsif( $hue < 180 ) { return $n2; } elsif( $hue < 240 ) { return $n1 + ( 240-$hue ) * ( $n2-$n1 )/60.0; } else { return $n1; } } # ================================================== # HSV # Foley, van Dam, et al: # Computer Grapics-Principles and Practice (1990) p592f sub rgb2hsv { # $r, $g, $b : 0..25 # Note special name '$bb' to avoid conflict with ($a,$b) in sort() my ( $r, $g, $bb ) = map { $_/255.0 } @_; # Scale RGB to 0..1 my ( $minc, $maxc ) = ( sort { $a <=> $b } ( $r, $g, $bb ) )[0,2]; my $v = $maxc; # Value my $d = $maxc - $minc; # "Delta" my $s = ( $maxc == 0 ) ? 0 : $d/$maxc; # No saturation if R=G=B=0 if( $s == 0 ) { return ( 0, 0, $v ); } # Achromatic case my $h = 0; # Hue if( $r == $maxc ) { $h = ( $g-$bb )/$d; } elsif( $g == $maxc ) { $h = 2 + ( $bb-$r )/$d; } elsif( $bb == $maxc ) { $h = 4 + ( $r-$g )/$d; } else { # Never get here! croak "Internal Error: Unexpected value ,$maxc, in Graphics::ColorUtils::rgb2hsv( $r, $g, $bb )"; } $h *= 60; # Convert to degrees if( $h < 0 ) { $h += 360; } # Ensure positive hue return ( $h, $s, $v ); } sub hsv2rgb { # $h: 0..360 (red=0->yellow->green=120->cyan->blue=240->magenta steps of 60) # (tolerates larger values of $h by reducing them to the standard circle) # $s, $v : 0..1 (inclusive) my ( $h, $s, $v ) = @_; $v *= 255; if( $s == 0 ) { return _fmt( $v, $v, $v ); } # achromatic (grey) my $i = int( $h/60 ); # sector 0 to 5 my $f = ($h/60) - $i; # fractional part of h/60 my $p = $v * ( 1 - $s ); my $q = $v * ( 1 - $s * $f ); my $t = $v * ( 1 - $s * ( 1 - $f ) ); $i %= 6; # tolerate values of $h larger than 360 if( $i==0 ) { return _fmt( $v, $t, $p ); } elsif( $i==1 ) { return _fmt( $q, $v, $p ); } elsif( $i==2 ) { return _fmt( $p, $v, $t ); } elsif( $i==3 ) { return _fmt( $p, $q, $v ); } elsif( $i==4 ) { return _fmt( $t, $p, $v ); } elsif( $i==5 ) { return _fmt( $v, $p, $q ); } else { # Never get here! croak "Internal Error: Unexpected value ,$i, in Graphics::ColorUtils::hsv2rgb( $h, $s, $v )"; } } # ================================================== # Gradients # Gradients grey, heat, map, and rainbow have been inspired by similar # ideas in Yorick. # For Yorick, cf http://yorick.sourceforge.net # and also http://www.maumae.net/yorick/doc/index.php # as well as http://www.mhatt.aps.anl.gov/dohn/software/yorick/ BEGIN { my %_gradients = ( 'grey' => [ [ 0, 0, 0],[ 1, 1, 1],[ 2, 2, 2],[ 3, 3, 3],[ 4, 4, 4], [ 5, 5, 5],[ 6, 6, 6],[ 7, 7, 7],[ 9, 9, 9],[ 10, 10, 10], [ 11, 11, 11],[ 12, 12, 12],[ 13, 13, 13],[ 14, 14, 14],[ 15, 15, 15], [ 16, 16, 16],[ 17, 17, 17],[ 18, 18, 18],[ 19, 19, 19],[ 20, 20, 20], [ 21, 21, 21],[ 22, 22, 22],[ 23, 23, 23],[ 25, 25, 25],[ 26, 26, 26], [ 27, 27, 27],[ 28, 28, 28],[ 29, 29, 29],[ 30, 30, 30],[ 31, 31, 31], [ 32, 32, 32],[ 33, 33, 33],[ 34, 34, 34],[ 35, 35, 35],[ 36, 36, 36], [ 37, 37, 37],[ 38, 38, 38],[ 39, 39, 39],[ 41, 41, 41],[ 42, 42, 42], [ 43, 43, 43],[ 44, 44, 44],[ 45, 45, 45],[ 46, 46, 46],[ 47, 47, 47], [ 48, 48, 48],[ 49, 49, 49],[ 50, 50, 50],[ 51, 51, 51],[ 52, 52, 52], [ 53, 53, 53],[ 54, 54, 54],[ 55, 55, 55],[ 57, 57, 57],[ 58, 58, 58], [ 59, 59, 59],[ 60, 60, 60],[ 61, 61, 61],[ 62, 62, 62],[ 63, 63, 63], [ 64, 64, 64],[ 65, 65, 65],[ 66, 66, 66],[ 67, 67, 67],[ 68, 68, 68], [ 69, 69, 69],[ 70, 70, 70],[ 71, 71, 71],[ 73, 73, 73],[ 74, 74, 74], [ 75, 75, 75],[ 76, 76, 76],[ 77, 77, 77],[ 78, 78, 78],[ 79, 79, 79], [ 80, 80, 80],[ 81, 81, 81],[ 82, 82, 82],[ 83, 83, 83],[ 84, 84, 84], [ 85, 85, 85],[ 86, 86, 86],[ 87, 87, 87],[ 89, 89, 89],[ 90, 90, 90], [ 91, 91, 91],[ 92, 92, 92],[ 93, 93, 93],[ 94, 94, 94],[ 95, 95, 95], [ 96, 96, 96],[ 97, 97, 97],[ 98, 98, 98],[ 99, 99, 99],[100,100,100], [101,101,101],[102,102,102],[103,103,103],[105,105,105],[106,106,106], [107,107,107],[108,108,108],[109,109,109],[110,110,110],[111,111,111], [112,112,112],[113,113,113],[114,114,114],[115,115,115],[116,116,116], [117,117,117],[118,118,118],[119,119,119],[121,121,121],[122,122,122], [123,123,123],[124,124,124],[125,125,125],[126,126,126],[127,127,127], [128,128,128],[129,129,129],[130,130,130],[131,131,131],[132,132,132], [133,133,133],[134,134,134],[135,135,135],[137,137,137],[138,138,138], [139,139,139],[140,140,140],[141,141,141],[142,142,142],[143,143,143], [144,144,144],[145,145,145],[146,146,146],[147,147,147],[148,148,148], [149,149,149],[150,150,150],[151,151,151],[153,153,153],[154,154,154], [155,155,155],[156,156,156],[157,157,157],[158,158,158],[159,159,159], [160,160,160],[161,161,161],[162,162,162],[163,163,163],[164,164,164], [165,165,165],[166,166,166],[167,167,167],[169,169,169],[170,170,170], [171,171,171],[172,172,172],[173,173,173],[174,174,174],[175,175,175], [176,176,176],[177,177,177],[178,178,178],[179,179,179],[180,180,180], [181,181,181],[182,182,182],[183,183,183],[185,185,185],[186,186,186], [187,187,187],[188,188,188],[189,189,189],[190,190,190],[191,191,191], [192,192,192],[193,193,193],[194,194,194],[195,195,195],[196,196,196], [197,197,197],[198,198,198],[199,199,199],[201,201,201],[202,202,202], [203,203,203],[204,204,204],[205,205,205],[206,206,206],[207,207,207], [208,208,208],[209,209,209],[210,210,210],[211,211,211],[212,212,212], [213,213,213],[214,214,214],[215,215,215],[217,217,217],[218,218,218], [219,219,219],[220,220,220],[221,221,221],[222,222,222],[223,223,223], [224,224,224],[225,225,225],[226,226,226],[227,227,227],[228,228,228], [229,229,229],[230,230,230],[231,231,231],[233,233,233],[234,234,234], [235,235,235],[236,236,236],[237,237,237],[238,238,238],[239,239,239], [240,240,240],[241,241,241],[242,242,242],[243,243,243],[244,244,244], [245,245,245],[246,246,246],[247,247,247],[249,249,249],[250,250,250], [251,251,251],[252,252,252],[253,253,253],[254,254,254],[255,255,255] ], 'heat' => [ [ 0, 0, 0],[ 1, 0, 0],[ 2, 0, 0],[ 4, 0, 0],[ 5, 0, 0], [ 7, 0, 0],[ 8, 0, 0],[ 10, 0, 0],[ 11, 0, 0],[ 13, 0, 0], [ 15, 0, 0],[ 17, 0, 0],[ 18, 0, 0],[ 20, 0, 0],[ 21, 0, 0], [ 23, 0, 0],[ 24, 0, 0],[ 26, 0, 0],[ 27, 0, 0],[ 28, 0, 0], [ 30, 0, 0],[ 31, 0, 0],[ 33, 0, 0],[ 34, 0, 0],[ 36, 0, 0], [ 37, 0, 0],[ 39, 0, 0],[ 40, 0, 0],[ 42, 0, 0],[ 43, 0, 0], [ 46, 0, 0],[ 47, 0, 0],[ 49, 0, 0],[ 50, 0, 0],[ 52, 0, 0], [ 53, 0, 0],[ 55, 0, 0],[ 56, 0, 0],[ 57, 0, 0],[ 59, 0, 0], [ 60, 0, 0],[ 62, 0, 0],[ 63, 0, 0],[ 65, 0, 0],[ 66, 0, 0], [ 68, 0, 0],[ 69, 0, 0],[ 70, 0, 0],[ 72, 0, 0],[ 73, 0, 0], [ 76, 0, 0],[ 78, 0, 0],[ 79, 0, 0],[ 81, 0, 0],[ 82, 0, 0], [ 84, 0, 0],[ 85, 0, 0],[ 86, 0, 0],[ 88, 0, 0],[ 89, 0, 0], [ 92, 0, 0],[ 94, 0, 0],[ 95, 0, 0],[ 97, 0, 0],[ 98, 0, 0], [ 99, 0, 0],[101, 0, 0],[102, 0, 0],[104, 0, 0],[105, 0, 0], [108, 0, 0],[110, 0, 0],[111, 0, 0],[113, 0, 0],[114, 0, 0], [115, 0, 0],[117, 0, 0],[118, 0, 0],[120, 0, 0],[121, 0, 0], [123, 0, 0],[124, 0, 0],[126, 0, 0],[127, 0, 0],[128, 0, 0], [130, 0, 0],[131, 0, 0],[133, 0, 0],[134, 0, 0],[136, 0, 0], [139, 0, 0],[140, 0, 0],[141, 0, 0],[143, 0, 0],[144, 0, 0], [146, 0, 0],[147, 0, 0],[149, 0, 0],[150, 0, 0],[152, 0, 0], [153, 0, 0],[155, 0, 0],[156, 0, 0],[157, 0, 0],[159, 0, 0], [160, 0, 0],[162, 0, 0],[163, 0, 0],[165, 0, 0],[166, 0, 0], [169, 0, 0],[170, 0, 0],[172, 0, 0],[173, 0, 0],[175, 1, 0], [176, 3, 0],[178, 5, 0],[179, 7, 0],[181, 9, 0],[182, 11, 0], [185, 15, 0],[186, 17, 0],[188, 18, 0],[189, 20, 0],[191, 22, 0], [192, 24, 0],[194, 26, 0],[195, 28, 0],[197, 30, 0],[198, 32, 0], [201, 35, 0],[202, 37, 0],[204, 39, 0],[205, 41, 0],[207, 43, 0], [208, 45, 0],[210, 47, 0],[211, 49, 0],[212, 51, 0],[214, 52, 0], [215, 54, 0],[217, 56, 0],[218, 58, 0],[220, 60, 0],[221, 62, 0], [223, 64, 0],[224, 66, 0],[226, 68, 0],[227, 69, 0],[228, 71, 0], [231, 75, 0],[233, 77, 0],[234, 79, 0],[236, 81, 0],[237, 83, 0], [239, 85, 0],[240, 86, 0],[241, 88, 0],[243, 90, 0],[244, 92, 0], [246, 94, 0],[247, 96, 0],[249, 98, 0],[250,100, 0],[252,102, 0], [253,103, 0],[255,105, 0],[255,107, 0],[255,109, 0],[255,111, 0], [255,115, 0],[255,117, 0],[255,119, 0],[255,120, 0],[255,122, 0], [255,124, 0],[255,126, 0],[255,128, 0],[255,130, 0],[255,132, 0], [255,136, 7],[255,137, 11],[255,139, 15],[255,141, 19],[255,143, 23], [255,145, 27],[255,147, 31],[255,149, 35],[255,151, 39],[255,153, 43], [255,156, 51],[255,158, 54],[255,160, 58],[255,162, 62],[255,164, 66], [255,166, 70],[255,168, 74],[255,170, 78],[255,171, 82],[255,173, 86], [255,175, 90],[255,177, 94],[255,179, 98],[255,181,102],[255,183,105], [255,185,109],[255,187,113],[255,188,117],[255,190,121],[255,192,125], [255,196,133],[255,198,137],[255,200,141],[255,202,145],[255,204,149], [255,205,153],[255,207,156],[255,209,160],[255,211,164],[255,213,168], [255,215,172],[255,217,176],[255,219,180],[255,221,184],[255,222,188], [255,224,192],[255,226,196],[255,228,200],[255,230,204],[255,232,207], [255,236,215],[255,238,219],[255,239,223],[255,241,227],[255,243,231], [255,245,235],[255,247,239],[255,249,243],[255,251,247],[255,253,251] ], 'map' => [ [ 0, 0, 0],[ 0, 0, 46],[ 0, 0, 58],[ 0, 0, 69],[ 0, 0, 81], [ 0, 0, 92],[ 0, 0,104],[ 0, 0,116],[ 0, 3,116],[ 1, 6,116], [ 2, 8,116],[ 2, 11,116],[ 3, 13,117],[ 4, 16,117],[ 5, 18,117], [ 5, 21,117],[ 6, 23,117],[ 7, 26,118],[ 8, 28,118],[ 8, 31,118], [ 9, 33,118],[ 10, 36,118],[ 11, 38,119],[ 11, 41,119],[ 12, 43,119], [ 13, 45,119],[ 14, 48,119],[ 15, 50,120],[ 15, 52,120],[ 16, 55,120], [ 17, 57,120],[ 18, 59,120],[ 18, 61,121],[ 19, 64,121],[ 20, 66,121], [ 21, 68,121],[ 22, 70,121],[ 22, 72,122],[ 23, 74,122],[ 24, 77,122], [ 25, 79,122],[ 26, 81,122],[ 26, 83,123],[ 27, 85,123],[ 28, 87,123], [ 29, 89,123],[ 30, 91,123],[ 31, 93,124],[ 31, 95,124],[ 32, 97,124], [ 33, 99,124],[ 34,100,124],[ 35,102,125],[ 36,104,125],[ 36,106,125], [ 37,108,125],[ 38,109,125],[ 39,111,126],[ 40,113,126],[ 41,115,126], [ 41,116,126],[ 42,118,126],[ 43,120,127],[ 44,121,127],[ 45,123,127], [ 46,125,127],[ 47,126,127],[ 48,128,128],[ 48,128,126],[ 48,129,125], [ 49,129,124],[ 49,130,123],[ 50,131,122],[ 50,131,120],[ 51,132,119], [ 51,133,118],[ 52,133,117],[ 52,134,115],[ 53,134,114],[ 53,135,113], [ 54,136,111],[ 54,136,110],[ 55,137,109],[ 55,138,108],[ 56,138,106], [ 56,139,105],[ 57,140,104],[ 57,140,102],[ 58,141,101],[ 58,141,100], [ 59,142, 98],[ 59,143, 97],[ 60,143, 96],[ 61,144, 94],[ 61,145, 93], [ 62,145, 92],[ 62,146, 90],[ 63,146, 89],[ 63,147, 88],[ 64,148, 86], [ 64,148, 85],[ 65,149, 84],[ 65,150, 82],[ 66,150, 81],[ 67,151, 80], [ 67,151, 78],[ 68,152, 77],[ 68,153, 76],[ 69,153, 74],[ 69,154, 73], [ 70,155, 71],[ 71,155, 70],[ 73,156, 71],[ 76,156, 72],[ 78,157, 72], [ 81,158, 73],[ 83,158, 73],[ 86,159, 74],[ 88,160, 75],[ 91,160, 75], [ 94,161, 76],[ 96,161, 76],[ 99,162, 77],[101,163, 77],[104,163, 78], [106,164, 79],[109,165, 79],[111,165, 80],[114,166, 80],[117,166, 81], [119,167, 82],[121,168, 82],[122,168, 82],[124,168, 83],[126,169, 83], [128,169, 83],[129,170, 84],[131,170, 84],[133,171, 84],[135,171, 85], [136,172, 85],[138,172, 85],[140,172, 86],[141,173, 86],[143,173, 86], [145,174, 87],[147,174, 87],[149,175, 87],[150,175, 88],[152,175, 88], [154,176, 88],[156,176, 89],[157,177, 89],[159,177, 89],[161,178, 90], [163,178, 90],[165,179, 90],[166,179, 91],[168,179, 91],[170,180, 91], [172,180, 92],[174,181, 92],[175,181, 92],[177,182, 93],[179,182, 93], [181,183, 93],[183,183, 94],[183,182, 94],[184,181, 94],[184,181, 95], [185,180, 95],[185,179, 95],[186,178, 96],[186,177, 96],[187,176, 97], [187,175, 97],[187,174, 97],[188,173, 98],[188,172, 98],[189,171, 98], [189,170, 99],[190,169, 99],[190,168, 99],[190,167,100],[191,166,100], [191,165,100],[192,164,101],[192,163,101],[193,163,104],[195,164,106], [196,164,108],[197,165,111],[198,165,113],[199,166,116],[201,167,118], [202,167,121],[203,168,123],[204,169,126],[205,170,129],[207,171,131], [208,172,134],[209,173,137],[210,174,139],[211,175,142],[213,176,145], [214,177,148],[215,178,150],[216,179,153],[217,181,156],[219,182,159], [220,184,162],[221,185,165],[222,187,168],[223,188,170],[225,190,173], [226,192,176],[227,194,179],[228,196,182],[229,198,185],[231,200,189], [232,202,192],[233,204,195],[234,206,198],[235,208,201],[237,211,204], [238,213,207],[239,215,211],[240,218,214],[241,221,217],[243,223,220], [244,226,224],[245,229,227],[246,232,230],[247,235,234],[249,238,237], [250,241,241],[251,244,244],[252,248,248],[253,251,251],[255,255,255] ], 'rainbow' => [ [255, 0, 42],[255, 0, 36],[255, 0, 31],[255, 0, 26],[255, 0, 20], [255, 0, 15],[255, 0, 10],[255, 0, 4],[255, 5, 0],[255, 11, 0], [255, 16, 0],[255, 22, 0],[255, 27, 0],[255, 32, 0],[255, 38, 0], [255, 43, 0],[255, 48, 0],[255, 54, 0],[255, 59, 0],[255, 65, 0], [255, 70, 0],[255, 75, 0],[255, 81, 0],[255, 91, 0],[255, 97, 0], [255,102, 0],[255,108, 0],[255,113, 0],[255,118, 0],[255,124, 0], [255,129, 0],[255,135, 0],[255,140, 0],[255,145, 0],[255,151, 0], [255,156, 0],[255,161, 0],[255,167, 0],[255,178, 0],[255,183, 0], [255,188, 0],[255,194, 0],[255,199, 0],[255,204, 0],[255,210, 0], [255,215, 0],[255,221, 0],[255,226, 0],[255,231, 0],[255,237, 0], [255,242, 0],[255,247, 0],[255,253, 0],[245,255, 0],[240,255, 0], [235,255, 0],[229,255, 0],[224,255, 0],[219,255, 0],[213,255, 0], [208,255, 0],[202,255, 0],[197,255, 0],[192,255, 0],[186,255, 0], [181,255, 0],[175,255, 0],[170,255, 0],[159,255, 0],[154,255, 0], [149,255, 0],[143,255, 0],[138,255, 0],[132,255, 0],[127,255, 0], [122,255, 0],[116,255, 0],[111,255, 0],[106,255, 0],[100,255, 0], [ 95,255, 0],[ 89,255, 0],[ 84,255, 0],[ 73,255, 0],[ 68,255, 0], [ 63,255, 0],[ 57,255, 0],[ 52,255, 0],[ 46,255, 0],[ 41,255, 0], [ 36,255, 0],[ 30,255, 0],[ 25,255, 0],[ 19,255, 0],[ 14,255, 0], [ 9,255, 0],[ 3,255, 0],[ 0,255, 1],[ 0,255, 12],[ 0,255, 17], [ 0,255, 23],[ 0,255, 28],[ 0,255, 33],[ 0,255, 39],[ 0,255, 44], [ 0,255, 49],[ 0,255, 55],[ 0,255, 60],[ 0,255, 66],[ 0,255, 71], [ 0,255, 76],[ 0,255, 82],[ 0,255, 87],[ 0,255, 98],[ 0,255,103], [ 0,255,109],[ 0,255,114],[ 0,255,119],[ 0,255,125],[ 0,255,130], [ 0,255,135],[ 0,255,141],[ 0,255,146],[ 0,255,152],[ 0,255,157], [ 0,255,162],[ 0,255,168],[ 0,255,173],[ 0,255,184],[ 0,255,189], [ 0,255,195],[ 0,255,200],[ 0,255,205],[ 0,255,211],[ 0,255,216], [ 0,255,222],[ 0,255,227],[ 0,255,232],[ 0,255,238],[ 0,255,243], [ 0,255,248],[ 0,255,254],[ 0,250,255],[ 0,239,255],[ 0,234,255], [ 0,228,255],[ 0,223,255],[ 0,218,255],[ 0,212,255],[ 0,207,255], [ 0,201,255],[ 0,196,255],[ 0,191,255],[ 0,185,255],[ 0,180,255], [ 0,174,255],[ 0,169,255],[ 0,164,255],[ 0,153,255],[ 0,148,255], [ 0,142,255],[ 0,137,255],[ 0,131,255],[ 0,126,255],[ 0,121,255], [ 0,115,255],[ 0,110,255],[ 0,105,255],[ 0, 99,255],[ 0, 94,255], [ 0, 88,255],[ 0, 83,255],[ 0, 78,255],[ 0, 67,255],[ 0, 62,255], [ 0, 56,255],[ 0, 51,255],[ 0, 45,255],[ 0, 40,255],[ 0, 35,255], [ 0, 29,255],[ 0, 24,255],[ 0, 18,255],[ 0, 13,255],[ 0, 8,255], [ 0, 2,255],[ 2, 0,255],[ 7, 0,255],[ 18, 0,255],[ 24, 0,255], [ 29, 0,255],[ 34, 0,255],[ 40, 0,255],[ 45, 0,255],[ 50, 0,255], [ 56, 0,255],[ 61, 0,255],[ 67, 0,255],[ 72, 0,255],[ 77, 0,255], [ 83, 0,255],[ 88, 0,255],[ 93, 0,255],[104, 0,255],[110, 0,255], [115, 0,255],[120, 0,255],[126, 0,255],[131, 0,255],[136, 0,255], [142, 0,255],[147, 0,255],[153, 0,255],[158, 0,255],[163, 0,255], [169, 0,255],[174, 0,255],[180, 0,255],[190, 0,255],[196, 0,255], [201, 0,255],[206, 0,255],[212, 0,255],[217, 0,255],[223, 0,255], [228, 0,255],[233, 0,255],[239, 0,255],[244, 0,255],[249, 0,255], [255, 0,254],[255, 0,249],[255, 0,243],[255, 0,233],[255, 0,227], [255, 0,222],[255, 0,217],[255, 0,211],[255, 0,206],[255, 0,201] ] ); # Returns a hash: gradient-name => color-count sub available_gradients { return map { $_, scalar( @{ $_gradients{$_} } ) } keys %_gradients; } # Returns array-ref of rgb-triples, undef if gradient-name not found sub gradient { my ( $name ) = @_; unless( exists $_gradients{ $name } ) { return; } return $_gradients{$name}; } # Returns the color corresponding to the position in the gradient given by f. # Returns undef when gradient not found or f outside valid range. sub grad2rgb { my ( $name, $frac ) = @_; unless( exists $_gradients{ $name } ) { return; } if( $frac < 0.0 || $frac >= 1.0 ) { return; } my $idx = int( $frac * scalar( @{$_gradients{$name}} ) ); return _fmt( @{ $_gradients{$name}[$idx] } ); } # Expects a gradient and and array-ref to an array of rgb triples. # If the name already exists, the function returns the old array; undef otherws sub register_gradient { my ( $name, $array_ref ) = @_; if( exists $_gradients{ $name } ) { my $old = $_gradients{ $name }; $_gradients{ $name } = $array_ref; return $old; } $_gradients{ $name } = $array_ref; return undef; } } # end BEGIN (Gradients) # ================================================== # Names BEGIN { my $_default_namespace = 'x11'; my %_colors = ( 'www:aqua' => [ 0,255,255],'www:black' => [ 0, 0, 0], 'www:blue' => [ 0, 0,255],'www:fuchsia' => [255, 0,255], 'www:gray' => [190,190,190],'www:green' => [ 0,128, 0], 'www:lime' => [ 0,255, 0],'www:maroon' => [128, 0, 0], 'www:navy' => [ 0, 0,128],'www:olive' => [128,128, 0], 'www:purple' => [128, 0,128],'www:red' => [255, 0, 0], 'www:silver' => [192,192,192],'www:teal' => [ 0,128,128], 'www:white' => [255,255,255],'www:yellow' => [255,255, 0], 'www:orange' => [255,165, 0], 'svg:palevioletred' => [219,112,147],'svg:mediumslateblue' => [123,104,238], 'svg:gold' => [255,215,0],'svg:gainsboro' => [220,220,220], 'svg:yellow' => [255,255,0],'svg:limegreen' => [50,205,50], 'svg:lightgoldenrodyellow' => [250,250,210],'svg:lavenderblush' => [255,240,245], 'svg:darkmagenta' => [139,0,139],'svg:darkgrey' => [169,169,169], 'svg:blanchedalmond' => [255,235,205],'svg:ghostwhite' => [248,248,255], 'svg:floralwhite' => [255,250,240],'svg:coral' => [255,127,80], 'svg:honeydew' => [240,255,240],'svg:mistyrose' => [255,228,225], 'svg:slateblue' => [106,90,205],'svg:goldenrod' => [218,165,32], 'svg:darkcyan' => [0,139,139],'svg:moccasin' => [255,228,181], 'svg:mediumvioletred' => [199,21,133],'svg:maroon' => [128,0,0], 'svg:lightpink' => [255,182,193],'svg:lightsalmon' => [255,160,122], 'svg:paleturquoise' => [175,238,238],'svg:darksalmon' => [233,150,122], 'svg:yellowgreen' => [154,205,50],'svg:mediumturquoise' => [72,209,204], 'svg:chartreuse' => [127,255,0],'svg:peru' => [205,133,63], 'svg:palegoldenrod' => [238,232,170],'svg:red' => [255,0,0], 'svg:lavender' => [230,230,250],'svg:lightseagreen' => [32,178,170], 'svg:powderblue' => [176,224,230],'svg:orchid' => [218,112,214], 'svg:cornsilk' => [255,248,220],'svg:seagreen' => [46,139,87], 'svg:royalblue' => [65,105,225],'svg:ivory' => [255,255,240], 'svg:tan' => [210,180,140],'svg:linen' => [250,240,230], 'svg:darkorchid' => [153,50,204],'svg:tomato' => [255,99,71], 'svg:lightcyan' => [224,255,255],'svg:darkolivegreen' => [85,107,47], 'svg:sienna' => [160,82,45],'svg:lightsteelblue' => [176,196,222], 'svg:indigo' => [75,0,130],'svg:peachpuff' => [255,218,185], 'svg:lime' => [0,255,0],'svg:mediumspringgreen' => [0,250,154], 'svg:silver' => [192,192,192],'svg:saddlebrown' => [139,69,19], 'svg:lightyellow' => [255,255,224],'svg:grey' => [128,128,128], 'svg:thistle' => [216,191,216],'svg:deepskyblue' => [0,191,255], 'svg:lightgreen' => [144,238,144],'svg:blueviolet' => [138,43,226], 'svg:aqua' => [0,255,255],'svg:cyan' => [0,255,255], 'svg:papayawhip' => [255,239,213],'svg:deeppink' => [255,20,147], 'svg:firebrick' => [178,34,34],'svg:navy' => [0,0,128], 'svg:hotpink' => [255,105,180],'svg:pink' => [255,192,203], 'svg:darkturquoise' => [0,206,209],'svg:navajowhite' => [255,222,173], 'svg:lightslategrey' => [119,136,153],'svg:lawngreen' => [124,252,0], 'svg:lightcoral' => [240,128,128],'svg:palegreen' => [152,251,152], 'svg:dodgerblue' => [30,144,255],'svg:greenyellow' => [173,255,47], 'svg:lightskyblue' => [135,206,250],'svg:brown' => [165,42,42], 'svg:dimgrey' => [105,105,105],'svg:aquamarine' => [127,255,212], 'svg:darkseagreen' => [143,188,143],'svg:fuchsia' => [255,0,255], 'svg:magenta' => [255,0,255],'svg:chocolate' => [210,105,30], 'svg:mediumseagreen' => [60,179,113],'svg:cadetblue' => [95,158,160], 'svg:purple' => [128,0,128],'svg:turquoise' => [64,224,208], 'svg:darkkhaki' => [189,183,107],'svg:antiquewhite' => [250,235,215], 'svg:skyblue' => [135,206,235],'svg:sandybrown' => [244,164,96], 'svg:mediumblue' => [0,0,205],'svg:steelblue' => [70,130,180], 'svg:indianred' => [205,92,92],'svg:khaki' => [240,230,140], 'svg:lightblue' => [173,216,230],'svg:green' => [0,128,0], 'svg:olive' => [128,128,0],'svg:mediumorchid' => [186,85,211], 'svg:blue' => [0,0,255],'svg:snow' => [255,250,250], 'svg:rosybrown' => [188,143,143],'svg:orange' => [255,165,0], 'svg:slategrey' => [112,128,144],'svg:darkorange' => [255,140,0], 'svg:violet' => [238,130,238],'svg:darkslategrey' => [47,79,79], 'svg:whitesmoke' => [245,245,245],'svg:burlywood' => [222,184,135], 'svg:darkgreen' => [0,100,0],'svg:lemonchiffon' => [255,250,205], 'svg:midnightblue' => [25,25,112],'svg:mintcream' => [245,255,250], 'svg:oldlace' => [253,245,230],'svg:black' => [0,0,0], 'svg:bisque' => [255,228,196],'svg:mediumaquamarine' => [102,205,170], 'svg:olivedrab' => [107,142,35],'svg:salmon' => [250,128,114], 'svg:teal' => [0,128,128],'svg:seashell' => [255,245,238], 'svg:springgreen' => [0,255,127],'svg:plum' => [221,160,221], 'svg:darkviolet' => [148,0,211],'svg:wheat' => [245,222,179], 'svg:mediumpurple' => [147,112,219],'svg:cornflowerblue' => [100,149,237], 'svg:forestgreen' => [34,139,34],'svg:darkgoldenrod' => [184,134,11], 'svg:aliceblue' => [240,248,255],'svg:white' => [255,255,255], 'svg:darkblue' => [0,0,139],'svg:azure' => [240,255,255], 'svg:darkred' => [139,0,0],'svg:orangered' => [255,69,0], 'svg:darkslateblue' => [72,61,139],'svg:crimson' => [220,20,60], 'svg:lightgrey' => [211,211,211],'svg:beige' => [245,245,220], 'x11:deepskyblue3' => [0,154,205],'x11:gold' => [255,215,0], 'x11:gold1' => [255,215,0],'x11:mediumpurple3' => [137,104,205], 'x11:royalblue3' => [58,95,205],'x11:lightgoldenrodyellow' => [250,250,210], 'x11:lavenderblush' => [255,240,245],'x11:lavenderblush1' => [255,240,245], 'x11:pink1' => [255,181,197],'x11:green3' => [0,205,0], 'x11:lightsteelblue1' => [202,225,255],'x11:blanchedalmond' => [255,235,205], 'x11:salmon1' => [255,140,105],'x11:ghostwhite' => [248,248,255], 'x11:floralwhite' => [255,250,240],'x11:dodgerblue4' => [16,78,139], 'x11:grey43' => [110,110,110],'x11:indianred4' => [139,58,58], 'x11:mistyrose1' => [255,228,225],'x11:mistyrose' => [255,228,225], 'x11:dodgerblue2' => [28,134,238],'x11:grey37' => [94,94,94], 'x11:grey9' => [23,23,23],'x11:purple4' => [85,26,139], 'x11:orchid2' => [238,122,233],'x11:cornsilk3' => [205,200,177], 'x11:goldenrod' => [218,165,32],'x11:hotpink4' => [139,58,98], 'x11:lightpink' => [255,182,193],'x11:coral2' => [238,106,80], 'x11:cyan2' => [0,238,238],'x11:grey87' => [222,222,222], 'x11:grey91' => [232,232,232],'x11:violetred4' => [139,34,82], 'x11:violetred2' => [238,58,140],'x11:indianred2' => [238,99,99], 'x11:lightyellow3' => [205,205,180],'x11:darkolivegreen2' => [188,238,104], 'x11:magenta3' => [205,0,205],'x11:grey64' => [163,163,163], 'x11:honeydew3' => [193,205,193],'x11:lightsalmon3' => [205,129,98], 'x11:springgreen4' => [0,139,69],'x11:grey57' => [145,145,145], 'x11:grey50' => [127,127,127],'x11:grey66' => [168,168,168], 'x11:antiquewhite1' => [255,239,219],'x11:paleturquoise' => [175,238,238], 'x11:navajowhite2' => [238,207,161],'x11:lightpink3' => [205,140,149], 'x11:darksalmon' => [233,150,122],'x11:grey52' => [133,133,133], 'x11:slategrey3' => [159,182,205],'x11:darkseagreen4' => [105,139,105], 'x11:chartreuse' => [127,255,0],'x11:chartreuse1' => [127,255,0], 'x11:grey42' => [107,107,107],'x11:peru' => [205,133,63], 'x11:tan3' => [205,133,63],'x11:grey19' => [48,48,48], 'x11:palegreen3' => [124,205,124],'x11:lavender' => [230,230,250], 'x11:red3' => [205,0,0],'x11:orchid' => [218,112,214], 'x11:powderblue' => [176,224,230],'x11:grey35' => [89,89,89], 'x11:plum4' => [139,102,139],'x11:cornsilk' => [255,248,220], 'x11:cornsilk1' => [255,248,220],'x11:royalblue' => [65,105,225], 'x11:darkgoldenrod2' => [238,173,14],'x11:lightpink4' => [139,95,101], 'x11:springgreen2' => [0,238,118],'x11:tan' => [210,180,140], 'x11:lightslateblue' => [132,112,255],'x11:darkorchid' => [153,50,204], 'x11:orangered2' => [238,64,0],'x11:palevioletred1' => [255,130,171], 'x11:grey63' => [161,161,161],'x11:maroon2' => [238,48,167], 'x11:blue2' => [0,0,238],'x11:turquoise4' => [0,134,139], 'x11:lightcyan1' => [224,255,255],'x11:lightcyan' => [224,255,255], 'x11:springgreen3' => [0,205,102],'x11:darkorchid4' => [104,34,139], 'x11:sienna' => [160,82,45],'x11:goldenrod2' => [238,180,34], 'x11:lightgoldenrod3' => [205,190,112],'x11:green' => [0,255,0], 'x11:green1' => [0,255,0],'x11:peachpuff1' => [255,218,185], 'x11:peachpuff' => [255,218,185],'x11:yellow3' => [205,205,0], 'x11:mediumspringgreen' => [0,250,154],'x11:cadetblue3' => [122,197,205], 'x11:royalblue1' => [72,118,255],'x11:deepskyblue1' => [0,191,255], 'x11:deepskyblue' => [0,191,255],'x11:firebrick1' => [255,48,48], 'x11:grey80' => [204,204,204],'x11:grey28' => [71,71,71], 'x11:palegreen2' => [144,238,144],'x11:lightgreen' => [144,238,144], 'x11:blueviolet' => [138,43,226],'x11:deeppink1' => [255,20,147], 'x11:deeppink' => [255,20,147],'x11:deeppink2' => [238,18,137], 'x11:lightskyblue2' => [164,211,238],'x11:grey77' => [196,196,196], 'x11:grey72' => [184,184,184],'x11:tomato2' => [238,92,66], 'x11:steelblue2' => [92,172,238],'x11:hotpink' => [255,105,180], 'x11:slateblue4' => [71,60,139],'x11:pink' => [255,192,203], 'x11:darkturquoise' => [0,206,209],'x11:antiquewhite3' => [205,192,176], 'x11:grey32' => [82,82,82],'x11:lightyellow2' => [238,238,209], 'x11:olivedrab4' => [105,139,34],'x11:lightblue4' => [104,131,139], 'x11:royalblue2' => [67,110,238],'x11:navajowhite1' => [255,222,173], 'x11:navajowhite' => [255,222,173],'x11:lightgoldenrod' => [238,221,130], 'x11:grey85' => [217,217,217],'x11:maroon4' => [139,28,98], 'x11:grey90' => [229,229,229],'x11:grey17' => [43,43,43], 'x11:seashell4' => [139,134,130],'x11:greenyellow' => [173,255,47], 'x11:dodgerblue1' => [30,144,255],'x11:dodgerblue' => [30,144,255], 'x11:grey89' => [227,227,227],'x11:brown2' => [238,59,59], 'x11:paleturquoise2' => [174,238,238],'x11:lightskyblue' => [135,206,250], 'x11:salmon4' => [139,76,57],'x11:chocolate3' => [205,102,29], 'x11:grey70' => [179,179,179],'x11:grey25' => [64,64,64], 'x11:darkolivegreen4' => [110,139,61],'x11:mediumorchid2' => [209,95,238], 'x11:brown' => [165,42,42],'x11:grey67' => [171,171,171], 'x11:grey41' => [105,105,105],'x11:dimgrey' => [105,105,105], 'x11:grey60' => [153,153,153],'x11:indianred3' => [205,85,85], 'x11:chocolate' => [210,105,30],'x11:darkslategrey1' => [151,255,255], 'x11:grey2' => [5,5,5],'x11:firebrick3' => [205,38,38], 'x11:snow4' => [139,137,137],'x11:mediumseagreen' => [60,179,113], 'x11:darkorchid1' => [191,62,255],'x11:pink3' => [205,145,158], 'x11:violetred1' => [255,62,150],'x11:grey83' => [212,212,212], 'x11:olivedrab1' => [192,255,62],'x11:darkkhaki' => [189,183,107], 'x11:deepskyblue4' => [0,104,139],'x11:darkorchid2' => [178,58,238], 'x11:skyblue' => [135,206,235],'x11:mediumorchid3' => [180,82,205], 'x11:rosybrown4' => [139,105,105],'x11:grey16' => [41,41,41], 'x11:yellow4' => [139,139,0],'x11:maroon' => [176,48,96], 'x11:turquoise2' => [0,229,238],'x11:mistyrose2' => [238,213,210], 'x11:blue3' => [0,0,205],'x11:mediumblue' => [0,0,205], 'x11:grey4' => [10,10,10],'x11:pink2' => [238,169,184], 'x11:chocolate2' => [238,118,33],'x11:lightyellow4' => [139,139,122], 'x11:grey99' => [252,252,252],'x11:red2' => [238,0,0], 'x11:tan4' => [139,90,43],'x11:yellow2' => [238,238,0], 'x11:grey12' => [31,31,31],'x11:deeppink4' => [139,10,80], 'x11:lightsalmon4' => [139,87,66],'x11:lightcyan4' => [122,139,139], 'x11:snow1' => [255,250,250],'x11:snow' => [255,250,250], 'x11:brown4' => [139,35,35],'x11:darkseagreen2' => [180,238,180], 'x11:lightsteelblue2' => [188,210,238],'x11:rosybrown' => [188,143,143], 'x11:maroon1' => [255,52,179],'x11:slategrey' => [112,128,144], 'x11:orange' => [255,165,0],'x11:orange1' => [255,165,0], 'x11:orangered3' => [205,55,0],'x11:plum3' => [205,150,205], 'x11:turquoise3' => [0,197,205],'x11:pink4' => [139,99,108], 'x11:violet' => [238,130,238],'x11:grey96' => [245,245,245], 'x11:whitesmoke' => [245,245,245],'x11:lightgoldenrod1' => [255,236,139], 'x11:darkorange1' => [255,127,0],'x11:seashell2' => [238,229,222], 'x11:midnightblue' => [25,25,112],'x11:grey27' => [69,69,69], 'x11:mediumpurple2' => [159,121,238],'x11:bisque4' => [139,125,107], 'x11:black' => [0,0,0],'x11:grey0' => [0,0,0], 'x11:lavenderblush4' => [139,131,134],'x11:bisque1' => [255,228,196], 'x11:bisque' => [255,228,196],'x11:mediumaquamarine' => [102,205,170], 'x11:aquamarine3' => [102,205,170],'x11:goldenrod1' => [255,193,37], 'x11:green4' => [0,139,0],'x11:bisque3' => [205,183,158], 'x11:salmon' => [250,128,114],'x11:grey1' => [3,3,3], 'x11:purple3' => [125,38,205],'x11:khaki4' => [139,134,78], 'x11:grey' => [190,190,190],'x11:cadetblue4' => [83,134,139], 'x11:cadetblue1' => [152,245,255],'x11:hotpink3' => [205,96,144], 'x11:antiquewhite2' => [238,223,204],'x11:darkorange4' => [139,69,0], 'x11:cornsilk2' => [238,232,205],'x11:grey93' => [237,237,237], 'x11:thistle3' => [205,181,205],'x11:plum2' => [238,174,238], 'x11:burlywood2' => [238,197,145],'x11:skyblue4' => [74,112,139], 'x11:peachpuff2' => [238,203,173],'x11:grey62' => [158,158,158], 'x11:paleturquoise3' => [150,205,205],'x11:lightblue1' => [191,239,255], 'x11:mediumpurple' => [147,112,219],'x11:peachpuff3' => [205,175,149], 'x11:grey49' => [125,125,125],'x11:grey3' => [8,8,8], 'x11:steelblue1' => [99,184,255],'x11:grey73' => [186,186,186], 'x11:grey44' => [112,112,112],'x11:palevioletred4' => [139,71,93], 'x11:khaki2' => [238,230,133],'x11:gold3' => [205,173,0], 'x11:grey47' => [120,120,120],'x11:aliceblue' => [240,248,255], 'x11:grey58' => [148,148,148],'x11:darkslategrey4' => [82,139,139], 'x11:mediumorchid4' => [122,55,139],'x11:thistle1' => [255,225,255], 'x11:mistyrose4' => [139,125,123],'x11:orchid1' => [255,131,250], 'x11:hotpink2' => [238,106,167],'x11:azure' => [240,255,255], 'x11:azure1' => [240,255,255],'x11:darkred' => [139,0,0], 'x11:red4' => [139,0,0],'x11:chartreuse2' => [118,238,0], 'x11:slateblue1' => [131,111,255],'x11:grey15' => [38,38,38], 'x11:grey71' => [181,181,181],'x11:darkslategrey2' => [141,238,238], 'x11:snow3' => [205,201,201],'x11:bisque2' => [238,213,183], 'x11:darkslateblue' => [72,61,139],'x11:coral4' => [139,62,47], 'x11:grey69' => [176,176,176],'x11:burlywood4' => [139,115,85], 'x11:coral3' => [205,91,69],'x11:purple' => [160,32,240], 'x11:grey36' => [92,92,92],'x11:grey94' => [240,240,240], 'x11:palevioletred2' => [238,121,159],'x11:grey46' => [117,117,117], 'x11:palevioletred' => [219,112,147],'x11:mediumslateblue' => [123,104,238], 'x11:seagreen1' => [84,255,159],'x11:gainsboro' => [220,220,220], 'x11:yellow1' => [255,255,0],'x11:yellow' => [255,255,0], 'x11:limegreen' => [50,205,50],'x11:darkgrey' => [169,169,169], 'x11:darkmagenta' => [139,0,139],'x11:magenta4' => [139,0,139], 'x11:grey59' => [150,150,150],'x11:firebrick2' => [238,44,44], 'x11:coral' => [255,127,80],'x11:honeydew' => [240,255,240], 'x11:honeydew1' => [240,255,240],'x11:grey86' => [219,219,219], 'x11:grey13' => [33,33,33],'x11:purple1' => [155,48,255], 'x11:grey82' => [209,209,209],'x11:grey65' => [166,166,166], 'x11:grey97' => [247,247,247],'x11:azure4' => [131,139,139], 'x11:darkslategrey3' => [121,205,205],'x11:lightcyan3' => [180,205,205], 'x11:aquamarine2' => [118,238,198],'x11:grey92' => [235,235,235], 'x11:slateblue' => [106,90,205],'x11:darkcyan' => [0,139,139], 'x11:cyan4' => [0,139,139],'x11:chartreuse3' => [102,205,0], 'x11:moccasin' => [255,228,181],'x11:mediumvioletred' => [199,21,133], 'x11:tomato3' => [205,79,57],'x11:grey31' => [79,79,79], 'x11:sienna2' => [238,121,66],'x11:grey98' => [250,250,250], 'x11:gold4' => [139,117,0],'x11:slateblue3' => [105,89,205], 'x11:grey14' => [36,36,36],'x11:honeydew4' => [131,139,131], 'x11:grey61' => [156,156,156],'x11:violetred3' => [205,50,120], 'x11:grey39' => [99,99,99],'x11:aquamarine4' => [69,139,116], 'x11:darkgoldenrod4' => [139,101,8],'x11:mediumpurple1' => [171,130,255], 'x11:lightsalmon1' => [255,160,122],'x11:lightsalmon' => [255,160,122], 'x11:darkolivegreen3' => [162,205,90],'x11:grey10' => [26,26,26], 'x11:khaki3' => [205,198,115],'x11:navajowhite3' => [205,179,139], 'x11:lightpink1' => [255,174,185],'x11:grey81' => [207,207,207], 'x11:grey45' => [115,115,115],'x11:wheat3' => [205,186,150], 'x11:steelblue4' => [54,100,139],'x11:grey48' => [122,122,122], 'x11:olivedrab3' => [154,205,50],'x11:yellowgreen' => [154,205,50], 'x11:mediumturquoise' => [72,209,204],'x11:palegoldenrod' => [238,232,170], 'x11:ivory2' => [238,238,224],'x11:darkolivegreen1' => [202,255,112], 'x11:red1' => [255,0,0],'x11:red' => [255,0,0], 'x11:lemonchiffon4' => [139,137,112],'x11:lightseagreen' => [32,178,170], 'x11:seagreen4' => [46,139,87],'x11:seagreen' => [46,139,87], 'x11:ivory' => [255,255,240],'x11:ivory1' => [255,255,240], 'x11:linen' => [250,240,230],'x11:grey34' => [87,87,87], 'x11:thistle2' => [238,210,238],'x11:tomato' => [255,99,71], 'x11:tomato1' => [255,99,71],'x11:slategrey1' => [198,226,255], 'x11:orchid3' => [205,105,201],'x11:lightcyan2' => [209,238,238], 'x11:grey54' => [138,138,138],'x11:darkolivegreen' => [85,107,47], 'x11:lightsteelblue' => [176,196,222],'x11:grey33' => [84,84,84], 'x11:chocolate4' => [139,69,19],'x11:saddlebrown' => [139,69,19], 'x11:orange3' => [205,133,0],'x11:lightyellow' => [255,255,224], 'x11:lightyellow1' => [255,255,224],'x11:grey75' => [191,191,191], 'x11:khaki1' => [255,246,143],'x11:thistle' => [216,191,216], 'x11:grey79' => [201,201,201],'x11:plum1' => [255,187,255], 'x11:paleturquoise4' => [102,139,139],'x11:cyan1' => [0,255,255], 'x11:cyan' => [0,255,255],'x11:maroon3' => [205,41,144], 'x11:papayawhip' => [255,239,213],'x11:seagreen3' => [67,205,128], 'x11:lightgoldenrod4' => [139,129,76],'x11:lightskyblue1' => [176,226,255], 'x11:firebrick' => [178,34,34],'x11:grey30' => [77,77,77], 'x11:grey26' => [66,66,66],'x11:antiquewhite4' => [139,131,120], 'x11:navyblue' => [0,0,128],'x11:navy' => [0,0,128], 'x11:grey7' => [18,18,18],'x11:grey5' => [13,13,13], 'x11:grey29' => [74,74,74],'x11:turquoise1' => [0,245,255], 'x11:darkgoldenrod3' => [205,149,12],'x11:goldenrod4' => [139,105,20], 'x11:palevioletred3' => [205,104,137],'x11:lightslategrey' => [119,136,153], 'x11:snow2' => [238,233,233],'x11:grey24' => [61,61,61], 'x11:slategrey4' => [108,123,139],'x11:grey55' => [140,140,140], 'x11:seashell3' => [205,197,191],'x11:deeppink3' => [205,16,118], 'x11:lawngreen' => [124,252,0],'x11:darkorchid3' => [154,50,205], 'x11:lightcoral' => [240,128,128],'x11:palegreen' => [152,251,152], 'x11:grey56' => [143,143,143],'x11:grey23' => [59,59,59], 'x11:grey74' => [189,189,189],'x11:azure2' => [224,238,238], 'x11:darkseagreen3' => [155,205,155],'x11:grey20' => [51,51,51], 'x11:cadetblue2' => [142,229,238],'x11:grey84' => [214,214,214], 'x11:cornsilk4' => [139,136,120],'x11:grey38' => [97,97,97], 'x11:magenta1' => [255,0,255],'x11:magenta' => [255,0,255], 'x11:darkseagreen' => [143,188,143],'x11:aquamarine1' => [127,255,212], 'x11:aquamarine' => [127,255,212],'x11:lightblue3' => [154,192,205], 'x11:olivedrab2' => [179,238,58],'x11:grey40' => [102,102,102], 'x11:peachpuff4' => [139,119,101],'x11:paleturquoise1' => [187,255,255], 'x11:darkseagreen1' => [193,255,193],'x11:darkorange3' => [205,102,0], 'x11:brown3' => [205,51,51],'x11:grey51' => [130,130,130], 'x11:mediumpurple4' => [93,71,139],'x11:lightpink2' => [238,162,173], 'x11:cadetblue' => [95,158,160],'x11:lemonchiffon2' => [238,233,191], 'x11:green2' => [0,238,0],'x11:azure3' => [193,205,205], 'x11:turquoise' => [64,224,208],'x11:brown1' => [255,64,64], 'x11:lightsteelblue4' => [110,123,139],'x11:orange2' => [238,154,0], 'x11:antiquewhite' => [250,235,215],'x11:wheat2' => [238,216,174], 'x11:rosybrown2' => [238,180,180],'x11:lightsteelblue3' => [162,181,205], 'x11:grey78' => [199,199,199],'x11:grey21' => [54,54,54], 'x11:sandybrown' => [244,164,96],'x11:lavenderblush2' => [238,224,229], 'x11:steelblue' => [70,130,180],'x11:grey95' => [242,242,242], 'x11:indianred' => [205,92,92],'x11:skyblue1' => [135,206,255], 'x11:khaki' => [240,230,140],'x11:orchid4' => [139,71,137], 'x11:chocolate1' => [255,127,36],'x11:goldenrod3' => [205,155,29], 'x11:sienna4' => [139,71,38],'x11:lightblue' => [173,216,230], 'x11:grey88' => [224,224,224],'x11:palegreen4' => [84,139,84], 'x11:mediumorchid' => [186,85,211],'x11:blue' => [0,0,255], 'x11:blue1' => [0,0,255],'x11:dodgerblue3' => [24,116,205], 'x11:indianred1' => [255,106,106],'x11:cyan3' => [0,205,205], 'x11:tan1' => [255,165,79],'x11:darkorange' => [255,140,0], 'x11:skyblue2' => [126,192,238],'x11:coral1' => [255,114,86], 'x11:darkslategrey' => [47,79,79],'x11:burlywood' => [222,184,135], 'x11:sienna3' => [205,104,57],'x11:darkgreen' => [0,100,0], 'x11:mistyrose3' => [205,183,181],'x11:grey68' => [173,173,173], 'x11:grey53' => [135,135,135],'x11:lemonchiffon' => [255,250,205], 'x11:lemonchiffon1' => [255,250,205],'x11:palegreen1' => [154,255,154], 'x11:grey76' => [194,194,194],'x11:steelblue3' => [79,148,205], 'x11:grey11' => [28,28,28],'x11:oldlace' => [253,245,230], 'x11:mintcream' => [245,255,250],'x11:firebrick4' => [139,26,26], 'x11:lemonchiffon3' => [205,201,165],'x11:olivedrab' => [107,142,35], 'x11:honeydew2' => [224,238,224],'x11:deepskyblue2' => [0,178,238], 'x11:slateblue2' => [122,103,238],'x11:slategrey2' => [185,211,238], 'x11:seagreen2' => [78,238,148],'x11:salmon2' => [238,130,98], 'x11:ivory3' => [205,205,193],'x11:mediumorchid1' => [224,102,255], 'x11:tan2' => [238,154,73],'x11:springgreen' => [0,255,127], 'x11:springgreen1' => [0,255,127],'x11:seashell1' => [255,245,238], 'x11:seashell' => [255,245,238],'x11:skyblue3' => [108,166,205], 'x11:chartreuse4' => [69,139,0],'x11:burlywood3' => [205,170,125], 'x11:plum' => [221,160,221],'x11:ivory4' => [139,139,131], 'x11:darkviolet' => [148,0,211],'x11:lightblue2' => [178,223,238], 'x11:wheat' => [245,222,179],'x11:darkgoldenrod1' => [255,185,15], 'x11:cornflowerblue' => [100,149,237],'x11:purple2' => [145,44,238], 'x11:grey6' => [15,15,15],'x11:magenta2' => [238,0,238], 'x11:sienna1' => [255,130,71],'x11:darkgoldenrod' => [184,134,11], 'x11:forestgreen' => [34,139,34],'x11:navajowhite4' => [139,121,94], 'x11:royalblue4' => [39,64,139],'x11:wheat1' => [255,231,186], 'x11:lightskyblue4' => [96,123,139],'x11:grey18' => [46,46,46], 'x11:orangered4' => [139,37,0],'x11:salmon3' => [205,112,84], 'x11:white' => [255,255,255],'x11:grey100' => [255,255,255], 'x11:orange4' => [139,90,0],'x11:wheat4' => [139,126,102], 'x11:rosybrown1' => [255,193,193],'x11:grey8' => [20,20,20], 'x11:lightgoldenrod2' => [238,220,130],'x11:lightskyblue3' => [141,182,205], 'x11:violetred' => [208,32,144],'x11:blue4' => [0,0,139], 'x11:darkblue' => [0,0,139],'x11:lavenderblush3' => [205,193,197], 'x11:thistle4' => [139,123,139],'x11:hotpink1' => [255,110,180], 'x11:darkorange2' => [238,118,0],'x11:lightsalmon2' => [238,149,114], 'x11:orangered1' => [255,69,0],'x11:orangered' => [255,69,0], 'x11:burlywood1' => [255,211,155],'x11:lightgrey' => [211,211,211], 'x11:grey22' => [56,56,56],'x11:tomato4' => [139,54,38], 'x11:rosybrown3' => [205,155,155],'x11:gold2' => [238,201,0], 'x11:beige' => [245,245,220] ); # Returns a hash-ref: color-name => RGB triple sub available_names { return \%_colors; } # Returns the RGB triple for a name, undef if name not found # The name is normalized before lookup is attempted. Normalization consists # of: lowercasing and elimination of whitespace. Also, "gray" is replaced # with "grey". # If the name is prefixed with a namespace (separated by colon ':'), # only this namespace is searched. If no namespace is specified, then # the lookup occurs first in the global namespace, then in the default # namespace. sub name2rgb { my ( $name ) = @_; my ( $ns, $core, $norm ) = _normalize_name( $name ); # If explicit namespace: if( $ns ne '' ) { if( exists $_colors{ $norm } ) { return _fmt( @{ $_colors{ $norm } } ); } else { return; # Do not search further if explicit namespace is given } } # No explicit namespace if( exists $_colors{ $core } ) { return _fmt( @{ $_colors{ $core } } ); # global namespace } # No namespace, but ':' prefix: search global ONLY, but not default ns if( $core ne $norm ) { return; } $norm = get_default_namespace() . ':' . $core; if( exists $_colors{ $norm } ) { return _fmt( @{ $_colors{ $norm } } ); # default namespace } # Not found return; } # Takes a name and an RGB triple. Registers the triple for the given name. # The name will be normalized (lowercased, whitespace eliminated, 'gray' # replaced by 'grey') before assignment is made. # If the name is not prefixed by a namespace, the color will be entered # into the global namespace. # Returns the old value for the name, if the name already exists. sub register_name { my ( $name, $r, $g, $b ) = @_; my ( $ns, $core, $norm ) = _normalize_name( $name ); # If no explicit ns is given, lookup and replace for $core, which is # guaranteed not preceeded by ':'. Otherwise, use fully qualified name. my $crr = ( $ns eq '' ) ? $core : $norm; if( exists $_colors{ $crr } ) { my $old = $_colors{ $crr }; $_colors{ $crr } = [ $r, $g, $b ]; return _fmt( @$old ); } $_colors{ $crr } = [ $r, $g, $b ]; return; } sub _normalize_name { my ( $name ) = @_; $name = lc( $name ); # Lowercase $name =~ s/\s//g; # Eliminate whitespace $name =~ s/gray/grey/; # gray -> grey my ( $ns, $core ) = ( '', $name ); if( $name =~ /:/ ) { ( $ns, $core ) = split ':', $name; } return ( $ns, $core, $name ); } # Sets the default namespace. Returns the previous value. # Giving an empty string as argument makes the global namespace the default. # Note that the global namespace is initially EMPTY! sub set_default_namespace { my $old = $_default_namespace; $_default_namespace = $_[0]; return $old; } sub get_default_namespace { return $_default_namespace; } } # end BEGIN (Names) 1; __END__ # ================================================== # ++++++++++++++++++++++++++++++++++++++++++++++++++ # ================================================== =head1 NAME Graphics::ColorUtils - Easy-to-use color space conversions and more. =head1 SYNOPSIS use Graphics::ColorUtils; ( $y, $i, $q ) = rgb2yiq( $r, $g, $b ); ( $r, $g, $b ) = yiq2rgb( $y, $i, $q ); $hex_string = yiq2rgb( $y, $i, $q ); ( $c, $m, $y ) = rgb2cmy( $r, $g, $b ); ( $r, $g, $b ) = cmy2rgb( $c, $m, $y ); $hex_string = cmy2rgb( $c, $m, $y ); ( $h, $l, $s ) = rgb2hls( $r, $g, $b ); ( $r, $g, $b ) = hls2rgb( $h, $l, $s ); $hex_string = hls2rgb( $h, $l, $s ); ( $h, $s, $v ) = rgb2hsv( $r, $g, $b ); ( $r, $g, $b ) = hsv2rgb( $h, $s, $v ); $hex_string = hsv2rgb( $h, $s, $v ); # ----- use Graphics::ColorUtils qw( :gradients ); ( $r, $g, $b ) = grad2rgb( $name, $f ); # where 0.0 <= $f < 1.0 $hex_string = grad2rgb( $name, $f ); %color_count_for_gradient_name = available_gradients(); $array_ref_of_rgb_triples = gradient( $name ); $array_ref_old_grad = register_gradient( $name, $array_ref_of_rgb_triples ); # ----- use Graphics::ColorUtils qw( :names ); ( $r, $g, $b ) = name2rgb( $name ); $hex_string = name2rgb( $name ); $hash_ref_rgb_triples_for_name = available_names(); ( $old_r, $old_g, $old_b ) = register_name( $name, $r, $g, $b ); $old_hex_string = register_name( $name, $r, $g, $b ); $default_ns = get_default_namespace(); $old_ns = set_default_namespace( $new_ns ); =head1 DESCRIPTION This modules provides some utility functions to handle colors and color space conversions. The interface has been kept simple, so that most functions can be called "inline" when making calls to graphics libraries such as GD, Tk, or when generating HTML/CSS. (E.g. for GD: C<$c = $img-EcolorAllocate( hsv2rgb( 270, 0.5, 0.3 ) );>.) Features: =over 4 =item Color Space Conversions Color space conversions, in particular between the "intuitive" color spaces HSV (Hue/Saturation/Value) and HLS (Hue/Lightness/Saturation) to and from RGB (Red/Green/Blue). =item Color Lookup Color lookup by name for three standard sets of colors: WWW/CSS, SVG, and X11. =item Color Gradients Management of color gradients, which can be indexed by a floating point number in the range 0..1. (Mostly intended for false-color data visualization.) =back =head1 CONVENTIONS Legal values: Y, I, Q: 0..1 C, M, Y: 0..1 R, G, B: 0..255 (may be float on input, guaranteed int on output) H: 0..360 (red=0->yellow->green=120->cyan->blue=240->magenta steps of 60) S, V: 0..1 L, S: 0..1 All C<...2rgb> functions return a three-element array in list context, and a string formatted according to C<"#%02x%02x%02x"> (e.g. C<'#ff3a18'>) in scalar context. =head1 METHODS =head2 Color Space Conversions =over 4 =item YIQ C and C =item CMY C and C =item HSV C and C =item HLS C and C =back All these methods take a triple of values and return a triple of converted values. However, B the C<...2rgb> methods return a string formatted according to C<"#%02x%02x%02x"> (e.g. C<'#ff3a18'>). This format is appropriate e.g. for calls to Tk routines: C<$mw-Ewidget( -color => hls2rgb( 180, 0.2, 0.1 ) );>, etc. =head2 Color Names Names can be arbitrary strings. If names contain a colon (C<':'>), the part of the name before the colon is considered a "namespace" specification. Namespaces allow to have multiple color values corresponding to the same name and to control the priority in which those values will be retrieved. =over 4 =item C Returns a triple C<( $r, $g, $b )> in list context or a a hex-string in scalar context if the name has been found, C otherwise. The name is normalized before lookup is attempted. Normalization consists of: lowercasing and elimination of whitespace. Also, "gray" is replaced with "grey". If the name is prefixed with a namespace (separated by colon a C<':'>), only this namespace is searched. If no namespace is specified, then the lookup occurs first in the global namespace, then in the default namespace. =item C Returns a reference to a hash, the keys of which are the color names, and the values are references to three-element arrays of RGB values. =item C Takes a name and an RGB triple. Stores the triple for the given name. The name will be normalized (lowercased, whitespace eliminated, 'gray' replaced by 'grey') before assignment is made. If the name is not prefixed by a namespace, the color will be entered into the global namespace. Returns the old value for the name, if the name already exists, C otherwise. =item C Returns the current value of the default namespace. Note that the empty string C<''> corresponds to the I namespace. =item C Sets the default namespace. Returns the previous value. Giving an empty string as argument makes the global namespace the default. Note that the global namespace is initially I. (On startup, the default namespace is C<'x11'>.) =back =head2 Color Gradients =over 4 =item C Given the name of a gradient and a floating point number between 0 and 1, returns the color (as RGB triple or formatted hex-string) corresponding to the position in the gradient given by C<$f>. Returns C when gradient not found or C<$f> outside valid range. =item C Returns a hash, the keys of which are the names of the known gradients and the values being the number of colors in the corresponding gradient. =item C Given the name of a gradient, returns a reference to an array of RGB triples or C if the gradient is not found. =item C Takes the name of a (possibly new) gradient and a reference to an array of RGB triples. Stores the array as gradient for that name. If the gradient name already existed, returns a reference to the old array, C otherwise. =back An introduction, together with a large number of sample gradients can be found at Paul Bourke's webpage: http://local.wasp.uwa.edu.au/~pbourke/texture_colour/colourramp/ =head1 EXPORT Exports by default: rgb2yiq(), yiq2rgb() rgb2cmy(), cmy2rgb() rgb2hls(), hls2rgb() rgb2hsv(), hsv2rgb() Using the export tag C<:names>, exports the following additional methods: name2rgb() available_names() register_name() set_default_namespace() get_default_namespace() Using the export tag C<:gradients>, exports the following additional methods: gradient() grad2rgb() available_gradients() register_gradient() =head1 BUGS =over 4 =item Input parameter validation Most methods do I explicitly validate that their arguments lie in the valid range. =item Multiple namespaces Names containing multiple colons may not be handled correctly. =item Hue wrap-around While hue should be restricted to 0..360, both C and C tolerate "moderate" violation of this constraint (up to +/- 359). =back =head1 TODO =over 4 =item Perl Versions This module has only been explicitly tested with Perl 5.8, but nothing (should) prevent it from running fine with other versions of Perl. =item Additional color space conversions For instance to and from XYZ, CIE, Luv; I. =item Additional pre-defined gradients Suggestions welcome! =back =head1 SEE ALSO =head2 Related Modules =over 4 =item Color::Rgb Lookup of color values for names. Similar to the "names" methods in this module. Requires F. =item Graphics::ColorNames Lookup of color values for names. Similar to the "names" methods in this module. Does I require F. Comes with several sets of predefined color names (similar to this module). =item Graphics::ColorObject Color space conversions, including conversions to and from XYZ and Luv. Object-oriented interface requires instantiation of a "color-object" for each color, which can then provide a representation of itself in all color spaces. =item Color::Scheme Generates pleasant color schemes (sets of colors). =back =head2 Standard Color Sets =over 4 =item WWW/CSS The 16 (or 17, including "orange") colors defined by the W3: http://www.w3.org/TR/css3-color =item SVG The 138 unique named colors (140 normalized unique names) defined for SVG by the W3: http://www.w3.org/TR/SVG/types.html#ColorKeywords =item X11 The 502 unique named colors (549 normalized unique names) defined by the X11 libraries in /usr/lib/X11/rgb.txt on an X11 system =back =head2 Websites =over 4 =item * Poynton's Color FAQ: http://www.poynton.com/ColorFAQ.html =item * Paper on Color Conversion Algorithms: http://www.poynton.com/PDFs/coloureq.pdf =item * Paul Bourke's Webpage with many relevant details: http://local.wasp.uwa.edu.au/~pbourke/texture_colour/ =back =head2 Books =over 4 =item * B by James D. Foley, Andries van Dam, Steven K. Feiner, John F. Hughes (Second Edition in C, 1990, mult. print runs) I> =item * B by James D. Foley, Andries van Dam, Steven K. Feiner, John F. Hughes, Richard L. Phillips (1990, mult. print runs) I =item * B by Donald Hearn and M. Pauline Baker (2nd ed, 1997) I =back =begin IMPLEMENTATION_NOTE There were two intents that drove part of the design: - I wanted to avoid dependency on other modules as much as possible. (This is a small module, it should not have all kinds of requirements on its installation environment.) - Including the VALUES for the color names and gradients in the source file itself is certainly a somewhat contentious decision. Here is the rationale: By embedding the data directly, we avoid the issue of files missing at run-time and the required error detection and recovery code. The impact on loading the module (as compared to requiring the data files) should be minimal - the same amount of data gets read one way or the other. - And obviously I did not want to rely on the file rgb.txt to be there. That's fine for Unix, not ok elsewhere. =end IMPLEMENTATION_NOTE =head1 AUTHOR Philipp K. Janert, Ejanert at ieee dot org E, http://www.beyondcode.org =head1 COPYRIGHT AND LICENSE Copyright (C) 2006 by Philipp K. Janert This library is free software; you can redistribute it and/or modify it under the same terms as Perl itself, either Perl version 5.8.3 or, at your option, any later version of Perl 5 you may have available. =cut gdata/inst/perl/VERSIONS0000644000175100001440000000016213003720416014455 0ustar hornikusersSpreadSheet::ParseExcel 0.56 Archive::Zip 1.30 IO::Stringy 2.110 OLE::Storage_Lite 0.19 Compress::Raw::Zlib 2.024 gdata/inst/perl/install_modules.pl0000644000175100001440000000065413003720416017023 0ustar hornikusers#!/usr/bin/perl BEGIN { use File::Basename; # Add current path to perl library search path use lib dirname($0); } require 'module_tools.pl'; my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_ParseXLSX); # check if we need to do anything ($HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_ParseXLSX) = check_modules(0); install_modules() unless $HAS_Compress_Raw_Zlib; gdata/inst/perl/Crypt/0000755000175100001440000000000013003720416014364 5ustar hornikusersgdata/inst/perl/Crypt/RC4.pm0000644000175100001440000001144113003720416015313 0ustar hornikusers#--------------------------------------------------------------------# # Crypt::RC4 # Date Written: 07-Jun-2000 04:15:55 PM # Last Modified: 13-Dec-2001 03:33:49 PM # Author: Kurt Kincaid (sifukurt@yahoo.com) # Copyright (c) 2001, Kurt Kincaid # All Rights Reserved. # # This is free software and may be modified and/or # redistributed under the same terms as Perl itself. #--------------------------------------------------------------------# package Crypt::RC4; use strict; use vars qw( $VERSION @ISA @EXPORT $MAX_CHUNK_SIZE ); $MAX_CHUNK_SIZE = 1024 unless $MAX_CHUNK_SIZE; require Exporter; @ISA = qw(Exporter); @EXPORT = qw(RC4); $VERSION = '2.02'; sub new { my ( $class, $key ) = @_; my $self = bless {}, $class; $self->{state} = Setup( $key ); $self->{x} = 0; $self->{y} = 0; $self; } sub RC4 { my $self; my( @state, $x, $y ); if ( ref $_[0] ) { $self = shift; @state = @{ $self->{state} }; $x = $self->{x}; $y = $self->{y}; } else { @state = Setup( shift ); $x = $y = 0; } my $message = shift; my $num_pieces = do { my $num = length($message) / $MAX_CHUNK_SIZE; my $int = int $num; $int == $num ? $int : $int+1; }; for my $piece ( 0..$num_pieces - 1 ) { my @message = unpack "C*", substr($message, $piece * $MAX_CHUNK_SIZE, $MAX_CHUNK_SIZE); for ( @message ) { $x = 0 if ++$x > 255; $y -= 256 if ($y += $state[$x]) > 255; @state[$x, $y] = @state[$y, $x]; $_ ^= $state[( $state[$x] + $state[$y] ) % 256]; } substr($message, $piece * $MAX_CHUNK_SIZE, $MAX_CHUNK_SIZE) = pack "C*", @message; } if ($self) { $self->{state} = \@state; $self->{x} = $x; $self->{y} = $y; } $message; } sub Setup { my @k = unpack( 'C*', shift ); my @state = 0..255; my $y = 0; for my $x (0..255) { $y = ( $k[$x % @k] + $state[$x] + $y ) % 256; @state[$x, $y] = @state[$y, $x]; } wantarray ? @state : \@state; } 1; __END__ =head1 NAME Crypt::RC4 - Perl implementation of the RC4 encryption algorithm =head1 SYNOPSIS # Functional Style use Crypt::RC4; $encrypted = RC4( $passphrase, $plaintext ); $decrypt = RC4( $passphrase, $encrypted ); # OO Style use Crypt::RC4; $ref = Crypt::RC4->new( $passphrase ); $encrypted = $ref->RC4( $plaintext ); $ref2 = Crypt::RC4->new( $passphrase ); $decrypted = $ref2->RC4( $encrypted ); # process an entire file, one line at a time # (Warning: Encrypted file leaks line lengths.) $ref3 = Crypt::RC4->new( $passphrase ); while () { chomp; print $ref3->RC4($_), "\n"; } =head1 DESCRIPTION A simple implementation of the RC4 algorithm, developed by RSA Security, Inc. Here is the description from RSA's website: RC4 is a stream cipher designed by Rivest for RSA Data Security (now RSA Security). It is a variable key-size stream cipher with byte-oriented operations. The algorithm is based on the use of a random permutation. Analysis shows that the period of the cipher is overwhelmingly likely to be greater than 10100. Eight to sixteen machine operations are required per output byte, and the cipher can be expected to run very quickly in software. Independent analysts have scrutinized the algorithm and it is considered secure. Based substantially on the "RC4 in 3 lines of perl" found at http://www.cypherspace.org A major bug in v1.0 was fixed by David Hook (dgh@wumpus.com.au). Thanks, David. =head1 AUTHOR Kurt Kincaid (sifukurt@yahoo.com) Ronald Rivest for RSA Security, Inc. =head1 BUGS Disclaimer: Strictly speaking, this module uses the "alleged" RC4 algorithm. The Algorithm known as "RC4" is a trademark of RSA Security Inc., and this document makes no claims one way or another that this is the correct algorithm, and further, make no claims about the quality of the source code nor any licensing requirements for commercial use. There's nothing preventing you from using this module in an insecure way which leaks information. For example, encrypting multilple messages with the same passphrase may allow an attacker to decode all of them with little effort, even though they'll appear to be secured. If serious crypto is your goal, be careful. Be very careful. It's a pure-Perl implementation, so that rating of "Eight to sixteen machine operations" is good for nothing but a good laugh. If encryption and decryption are a bottleneck for you, please re-write this module to use native code wherever practical. =head1 LICENSE This is free software and may be modified and/or redistributed under the same terms as Perl itself. =head1 SEE ALSO L, L, L, L, L =cut gdata/inst/NEWS0000644000175100001440000004510313003720417013024 0ustar hornikusersChanges in 2.17.0 (2015-07-02) ------------------------------ New features: - Add new argument 'byrow' to upperTriangle(), lowerTriangle(), upperTriangle<-(), and lowerTriangle<-() to specify by-row rather by-column order. This makes it simpler to copy values between the lower and upper triangular areas, e.g. to construct a symmetric matrix. Other changes: - Add inline comments to tests to alert reviewers of expected diffs on systems lacking the libraries for read.xls() to support XLSX formatted files. Changes in 2.16.1 (2015-04-28) ----------------------------- Bug fixes: - mapLevels() no longer generates warnings about conversion of lists to vectors. Other changes: - Requirement for Perl version 5.10.0 or later is specified in the package DESCRITION. - first() and last() are now simply wrappers for calls to 'head(x, n=1)' and 'tail(x, n=1)', respectively. Changes in 2.16.0 (2015-04-25) ------------------------------ New features: - New functions first() and last() to return the first or last element of an object. - New functions left() and right() to return the leftmost or rightmost n (default to 6) columns of a matrix or dataframe. - New 'scientific' argument to write.fwf(). Set 'scientific=FALSE' to prevent numeric columns from being displayed using scientific notification. - The 'standard' argument to humanReadable() now accepts three values, 'SI' for base 1000 ('MB'), 'IEC' for base 1024 ('MiB'), and 'Unix' for base 1024 and single-character units ('M') - object.size() now returns objects with S3 class 'object_sizes' (note the final 's') to avoid conflicts with methods in utils for class 'object_size' provided by package 'utils' which can only handle a scalar size. - New 'units' argument to humanReadable()--and hence to print.object_sizes() and format.object_sizes()--that permits specifying the unit to use for all values. Use 'bytes' to display all values with the unit 'bytes', use 'auto' (or leave it missing) to automatically select the best unit, and use a unit from the selected standard to use that unit (i.e. 'MiB'). - The default arguments to humanReadable() have changed. The defaults are now 'width=NULL' and 'digits=1', so that the default behavior is now to show digit after the decimal for all values. Bug fixes: - reorder.factor() was ignoring the argument 'X' unless 'FUN' was supplied, making it incompatible with the behavior of stats:::reorder.default(). This has been corrected, so that calling reorder on a factor with arguments 'X' and/or 'FUN' should now return the same results whether gdata is loaded or not. (Reported by Sam Hunter.) - write.fwf() now properly supports matrix objects, including matrix objects without column names. (Reported by Carl Witthoft.) Other changes: - Replaced deprecated PERL function POSIX::isdigit in xls2csv.pl (which is used by read.xls() ) with an equivalent regular expression. (Reported by both Charles Plessy, Gerrit-jan Schutten, and Paul Johnson. Charles also provided a patch to correct the issue.) - aggregate.table(), which has been defunct gdata 2.13.3 (2014-04-04) has now been completely removed. Changes in 2.14.0 (2014-08-27) ------------------------------ Bug Fixes: - read.xls() can now properly process XLSX files with up to 16385 columns (the maximum generated by Microsoft Excel). - read.xls() now properly handles XLS/XLSX files that use 1904-01-01 as the reference value for dates instead of 1900-01-01 (the default for MS-Excel files created on the Mac). Other changes: - Updated perl libraries and code underlying read.xls() to the latest version, including switching from Spreadsheet::XLSX to Spreadsheet::ParseXLSX. Changes in 2.13.3 (2014-04-04) ------------------------------ Bug Fixes: - Unit tests were incorrectly checking for equality of optional POSIXlt components. (Bug reported by Brian Ripley). Other Changes: - 'aggregate.table' is now defunct. See '?gdata-defunct' for details. - Unit tests and vignettes now follow R standard practice. - Minor changes to clean up R CMD check warnings. Changes in 2.13.2 (2013-06-28) ------------------------------ Enhancements: - Simplify ll() by converting a passed list to an environment, avoiding the need for special casing and the use of attach/detach. - Working of deprecation warning message in aggregate.table clarified. Changes in 2.13.1 (2013-03-24) ------------------------------ Enhancements: - Replaced calls to depreciated function ".path.package" with the new public function "path.package". Changes in 2.13.0 (2012-09-20) ----------------------------- New features: - New 'duplicated2' function which returns TRUE for *all* elements that are duplicated, including the first, contributed by Liviu Andronic. This differs from 'duplicated', which only returns the second and following (second-to last and previous when 'fromLast=TRUE') duplicate elements. - New 'ans' functon to return the value of the last evaluated top-level function (a convenience function for accessing .Last.value), contributed by Liviu Andonic. Bug Fixes: - On windows, warning messages printed to stdout by perl were being included in the return value from 'system', resulting in errors in 'sheetCount' and 'sheetNames'. Corrected. - The 'MedUnits' column names 'SIUnits' and 'ConventionalUnits' were reversed and misspelled. Changes in 2.12.0 (2012-09-12) ------------------------------ Other Changes: - 'stats::aggregate' was made into a generic on 27-Jan-2010, so that attempting to call 'aggregate' on a 'table' object will now incorrectly call 'aggregate.table'. Since 'aggregate.table' can be replaced by a call to tapply using two index vectors, e.g. aggregate.table(x, by1=a, by2=b, mean) can be replaced by tapply(x, INDEX=list(a, b), FUN=mean), the 'aggregate.table' function will now display a warning that it is depreciated and recommending the equivalent call to tapply. It will be removed entirely in a future version of gdata. Changes in 2.11.1 (2012-08-22) ------------------------------ Enhancements: - read.xls() now supports fileEncoding argument to allow non-ascii encoded data to be handled. See the manual page for an example. Bug Fixes: - The perl script utilized by read.xls() was incorrectly appending a space character at the end of each line, causing problems with character and NA entries in the final column. Changes in 2.11.0 (2012-06-18) ------------------------------ New Features: - read.xls() and supporting functions now allow blank lines to be preserved, rather than skipped, by supplying the argument "blank.lines.skip=FALSE". The underlying perl function has been extended to suppor this via an optional "-s" argument which, when present, *preserves* blank lines during the conversion. (The default behavior remains unchanged.) Other Changes: - Add SystemRequirements field specifying that perl is necessary for gdata to function fully. Changes in 2.10.6 (2012-06-12) ------------------------------ Bug fixes: - gdata::nobs.default() needs to handle logical vectors in addition to numeric vectors. Changes in 2.10.{3,4,5} (2012-06-08) ------------------------------------ Bug fixes: - Mark example for installXLSsupport() as dontrun so R CMD check won't fail on systems where PERL is not fully functional. - Correct name of installXLSsupport() in tests/test.read.xls.R. Other Changes: - Add dependency on R 2.13.0, since that is when stats::nobs appeared. Changes in 2.10.2 (2012-06-06) --------------------------------------- Bug fixes: - Fix issues in nobs.default identified in testing with the gmodels package. Changes in 2.10.1 (2012-06-06) ------------------------------ Bug fixes: - Undo removal of 'nobs' and 'nobs.lm'. Instead define aliases for 'nobs' and 'nobs.lm' to support backward compatibility for packages depending on gdata. Changes in 2.10.0 (2012-06-05) ------------------------------ New features: - New ls.funs() function to list all objects of class function in the specified environment. - New startsWith() function to determine if a string "starts with" the specified characters. Enhancements: - Add 'na.strings' argument to read.xls() to convert Excel's '#DIV/0!' to NA. Bug fixes: - Correct various R CMD check warnings Other changes: - Base S3 method for nobs() and nobs.lm() method removed since these are now provided in the stats package. Changes in 2.9.0 (2011-09-30) ----------------------------- New features: - Add centerText() function to center text strings for a specified width. - Add case() function, a vectorized variant of the base::switch() function, which is useful for converting numeric codes into factors. Enhancements: - Minor improvements to xls2csv() man page. CHANGES IN 2.8.1 (2011-04-15) ----------------------------- Enhancements: - nPairs() gains a summary method that shows how many times each variable is known, while the other variable of a pair is not Bug fixes: - Fix errors on windows when R or Perl install path includes spaces by properly quoting the path. CHANGES IN 2.8.1 (2010-11-12) ----------------------------- Enhancements: - Minor improvement to Args(), read.xls() man page. Bug fixes: - Modify write.fwf() to capture and pass on additional arguments for write.table(). This resolves a bug reported by Jan Wijffels. - Modify xls2sep.R to avoid use of file.access() which is unreliable on Windows network shares. CHANGES IN 2.8.0 (2010-04-03) ----------------------------- Enhancements: - When loaded, gtools (via an .onAttach() function) now checks: 1) if perl is available 2) whether the perl libraries for XLS support are available 3) whether the perl libraries for XLSX support are available If perl is not available, an appropriate warning message is displayed. If necessary perl libraries are not available, a warning message is displayed, as is a message suggesting the user run the (new) installXLSXsupport() function to attempt to install the necessary perl libraries. - The function installXLSXsupport() has been provided to install the binary perl modules that read.xls needs to support Excel 2007+ 'XLSX' files. CHANGES IN 2.7.3 (2010-04-02) ----------------------------- Enhancements: - New xlsFormats() command to determine which Excel formats are supported (XLS, XLSX). Bug Fixes: - No longer attempt to install perl modules Compress::Raw::Zlib and Spreadsheet::XLSX at build/compile time. This should resolve recent build issues, particularly on Windows. - All perl code can now operate (but generate warnings) when perl modules Compress::Raw::Zlib and Spreadsheet::XLSX when are not installed. - Also update Greg's email address. CHANGES IN 2.7.1 (2010-02-19) ----------------------------- Enhancements: - on Windows attempts to locate ActiveState perl if perl= not specified and Rtools perl would have otherwise been used in read.xls and other perl dependent functions. CHANGES IN 2.7.0 (2010-01-25) ----------------------------- Bug Fixes: - Fix building of Perl libraries on Win32 CHANGES IN 2.7.0 (2010-01-25) ----------------------------- Enhancements: - read.xls() now supports Excel 2007 'xlsx' files. - read.xls() now allows specification of worksheet by name - read.xls() now supports ftp URLs. - Improved ll() so user can limit output to specified classes New Functions: - sheetCount() and sheetNames() to determine the number and names of worksheets in an Excel file, respectively. Bug Fixes: - Fix formatting warning in frameApply(). - Resolve crash of "ll(.GlobalEnv)" - CHANGES IN 2.6.1 (2009-07-15) ----------------------------- Bug Fixes - Modify unit tests to avoid issues related to time zones. CHANGES IN 2.6.0 (2009-07-15) ----------------------------- Bug Fixes - Correct minor typos & issues in man pages for write.fwf(), resample() (Greg Warnes) - Correct calculation of object sizes in env() and ll() (Gregor Gorjanc) New Features - Add support for using tab for field separator during translation from xls format in read.xls (Greg Warnes) - Enhanced function object.size that returns the size of multiple objects. There is also a handy print method that can print size of an object in "human readable" format when options(humanReadable=TRUE) or print(object.size(x), humanReadable=TRUE). (Gregor Gorjanc) - New function wideByFactor that reshapes given dataset by a given factor - it creates a "multivariate" data.frame. (Gregor Gorjanc) - New function nPairs that gives the number of variable pairs in a data.frame or a matrix. (Gregor Gorjanc) - New functions getYear, getMonth, getDay, getHour, getMin, and getSec for extracting the date/time parts from objects of a date/time class. (Gregor Gorjanc) - New function bindData that binds two data frames into a multivariate data frame in a different way than merge. (Gregor Gorjanc) Other Changes - Correct Greg's email address CHANGES IN 2.5.0 ---------------- - New function .runRUnitTestsGdata that enables run of all RUnit tests during the R CMD check as well as directly from within R. - Enhanced function object.size that returns the size of multiple objects. There is also a handy print method that can print size of an object in "human readable" format when options(humanReadable=TRUE) or print(x, humanReadable=TRUE). - New function bindData that binds two data frames into a multivariate data frame in a different way than merge. - New function wideByFactor that reshapes given dataset by a given factor - it creates a "multivariate" data.frame. - New functions getYear, getMonth, getDay, getHour, getMin, and getSec for extracting the date/time parts from objects of a date/time class. - New function nPairs that gives the number of variable pairs in a data.frame or a matrix. - New function trimSum that sums trimmed values. - New function cbindX that can bind objects with different number of rows. - write.fwf gains the width argument. The value for unknown can increase or decrease the width of the columns. Additional tests and documentation fixes. CHANGES IN 2.4.2 (2008-05-11) ----------------------------- - Enhancements and bug fixes for read.xls() and xls2csv(): - More informative log messages when verbose=TRUE - File paths containing spaces or other non-traditional characters are now properly handled - Better error messages, particularly when perl fails to generate an output .csv file. - The 'shortcut' character "~" (meaning user's home directory) is now properly handled in file paths. - XLS files created by OpenOffice are now properly handled. Thanks to Robert Burns for pointing out the patch (http://rt.cpan.org/Public/Bug/Display.html?id=7206) CHANGES IN 2.4.1 (2008-03-24) ----------------------------- - Update perl libraries needed by xls2csv() and read.xls() to latest available versions on CRAN. - Add read.xls() to exported function list - Correct iris.xls example file. It didn't contain the complete & properly formatted iris data set. Fixed. - Fix typo in win32 example for read.xls() CHANGES IN 2.4.0 (2008-01-30) ----------------------------- - The keep() function now includes an 'all' argument to specify how objects with names starting with '.' are handled. - keep() now shows an informative warning message when a requested object does not exist - New vignette "Mapping Levels of a Factor" describing the use of mapLevels(). - New vignette "Working with Unknown Values" describing the use of isUnknown() and unknownToNA(). - Several enhancements to read.xls() (thanks to Gabor Grothendieck): - New function xls2csv(), which handles converting an xls file to a csv file and returns a connection to the temporary csv file - xls2csv() and read.xls() both allow a file or a url to be specified - read.xls() has a new 'pattern' argument which, if supplied, will ignore everything prior to the first line in th csv file that matches the pattern. This is typically used if there are a variable number of comment lines prior to the header in which case one can specify one of the column headings as the pattern. read.xls should be compatible with the old read.xls. - Minor fixes to drop.levels(), is.what(). - Implementation of unit tests for most functions. CHANGES IN 2.3.1 (2006-10-29) ----------------------------- - Arguments as well as their position of reorder.factor have been changed to conform with reorder.factor method in stats package, due to collision bug. Argument 'make.ordered' is now 'order' and old argument 'order' is now 'new.order'! Therefore, you have to implicitly specify new.order i.e. reorder(trt, new.order=c("PLACEBO", "300 MG", "600 MG", "1200 MG")) - trim() gains ... argument. - Added "unknown" methods for matrices. - Added c() method for factors based on mapLevels() functions. - Added write.fwf, which writes file in *F*ixed *W*idth *F*ormat. CHANGES FROM 2.1.X to 2.3.0 (2006-09-19) --------------------------------------- - Added mapLevels(), which produces a map with information on levels and/or internal integer codes. Contributed by Gregor Gorjanc. - Extended dropLevels() to work on the factors contained in a data frame, as well as individual factors. - Add unknown(), which changes given unknown value to NA and vice versa. Contributed by Gregor Gorjanc. - Extended trim() to handle a variety of data types data.frames, lists, factors, etc. Code changes contributed by Gregor Gorjanc. - Added resample() command that acts like sample() except that it _always_ samples from the arguments provided, even if only a single argument is present. This differs from sample() which behaves differently in this case. - Updated my email address. CHANGES IN GDATA 2.1.2 ----------------------- - Fixed bug in interleave.R - option to covert 1-column matrices to vector (based on Andrew Burgess's suggestion) - Updated Greg and Jim's email adresses - ll.R: Suppressed warning message in attach() call. - frameApply.Rd, reorder.Rd: Remove explicit loading of gtools in examples, so that failure to import functions from gtools gets properly caught by running the examples. - upperTriangle.R, man/upperTriangle.Rd: Add functions for extracting and modifying the upper and lower trianglular components of matrices. - is.what.R: Replaced the "not.using" vector with a more robust try(get(test)) to find out whether a particular is.* function returns a logical of length one. - DESCRIPTION: Added Suggests field - Updated the example in frameApply CHANGES IN GDATA 2.0.8 ----------------------- - Added DESCRIPTION and removed DESCRIPTION.in - Updated ll.Rd documentation - Fixed bug in Args.R, is.what.R, ll.R gdata/inst/bin/0000755000175100001440000000000013003720417013072 5ustar hornikusersgdata/inst/bin/xls2csv.bat0000644000175100001440000000011113003720417015157 0ustar hornikusersREM @echo off SET PERLPATH= %~dp0\..\perl\ perl %PERLPATH%\xls2csv.pl %* gdata/inst/bin/xls2csv0000755000175100001440000000011613003720417014422 0ustar hornikusers#!/bin/sh PERLPATH="`dirname ${0}`/../perl/" perl "${PERLPATH}/xls2csv.pl" $* gdata/inst/ChangeLog0000644000175100001440000012401513115345675014114 0ustar hornikusers2017-06-05 warnes * [r2154] DESCRIPTION: Fix type in DESCRIPTION date field. * [r2153] .Rbuildignore: Specify which file patterns to ignore when building R package file. * [r2152] DESCRIPTION, tests/test.humanReadable.Rout.save, tests/test.read.xls.Rout.save, tests/test.reorder.factor.Rout.save, tests/tests.write.fwf.Rout.save, vignettes/mapLevels.Rnw: Update package version and stored test output. * [r2151] inst/doc/Rnews.sty: Remove obsolete Rnews.sty file from inst/doc. * [r2150] R/startsWith.R: gdata::startsWith() now uses base::startsWith() to do the actual comparison, after hanlding ignore.case and trim arguments. * [r2149] man/trim.Rd: Add reference to 'new' base function 'trimws'. * [r2148] NAMESPACE, R/update.data.frame.R, R/update.list.R, man/update.list.Rd: Drop 'update.data.frame' until there is time to work on it. 2016-08-12 warnes * [r2130] NAMESPACE: Add mv to exported namespace 2016-05-31 warnes * [r2128] R/humanReadable.R: Fix typo that forced users of humanReadable() to provide two elements to the 'justify' argument. The correction allows a single value to be provided which will be expanded to two internally. 2016-02-05 warnes * [r2077] man/update.list.Rd: Add documentation for update() data.frame method. * [r2076] R/mv.R, man/mv.Rd: Add mv() function to rename an object. 2016-02-03 warnes * [r2075] NAMESPACE: - Add update() methods list and data.frame - Add 'first<-' and 'last<-' assignment methods * [r2074] R/update.data.frame.R, R/update.list.R, man/update.list.Rd: Add update() methods for lists and data frames * [r2073] R/first.R, man/first.Rd: Add assignment versions of first() and last() * [r2072] R/rename.vars.R: Improve logging and error reporting for remove.vars() 2015-10-15 warnes * [r2068] R/installXLSXsupport.R: Remove unused call to tempdir(). 2015-07-22 warnes * [r2062] DESCRIPTION, NAMESPACE, tests/test.humanReadable.Rout.save, tests/test.read.xls.R, tests/test.read.xls.Rout.save, tests/test.reorder.factor.Rout.save, tests/tests.write.fwf.Rout.save: Renamed 'test' directory to 'tests', commented out tests for lme4 which has a changed API 2015-07-03 warnes * [r2056] DESCRIPTION, inst/ChangeLog, inst/NEWS: Update for gdata 2.17.0 2015-06-29 warnes * [r2055] inst/ChangeLog: Update ChangeLog * [r2054] tests/test.humanReadable.Rout.save, tests/test.read.xls.R, tests/test.read.xls.Rout.save, tests/test.reorder.factor.Rout.save, tests/tests.write.fwf.Rout.save: Add note for R CMD check to help reviewers not freak out when diffs occur because of absence of a PERL library needed to support XLSX files. * [r2053] R/upperTriangle.R, man/upperTriangle.Rd: Add 'byrow' argument to lowerTriangle()/upperTriangle() functions. 2015-05-02 warnes * [r2018] Rename 'trunk' to 'pkg' for compatibility with R-forge 2015-04-29 warnes * [r1993] Update ChangeLog and NEWS again. * [r1992] Apparentely read.csv() needs different combination of "fileEncoding=`latin1`" and "encoding=`latin1`" on unix and windows platforms. * [r1991] In mapLevels(), use sapply() instead of lapply() to avoid warning message. * [r1990] Displaying all the latin1 characters for diff isn't reliable across platforms. Simply summarize the latin1 data instead. * [r1989] Display read latin1 data so that diff can catch changes. 2015-04-28 warnes * [r1988] Update ChangeLog for gdata 2.16.1 * [r1987] Update NEWS for gdata 2.16.1 * [r1986] Remove no-longer defined methods. * [r1985] Summary: Minor formatting changes, use rnorm() for X in example, and use set.seed() for consistent results. * [r1984] Summary: Replace unicode single-quote characters with ASCII ones. * [r1983] Summary: Call base::sort instead of sort, which has been redefined by arguments. * [r1982] Update NEWS and ChangeLog. * [r1981] Bump version number. * [r1980] Remove CVS header tag. * [r1979] Update version requirement for R (>= 2.3.0) and perl (5.10.0). * [r1978] - first() and last() are now simply wrappers to utils::head() and utils::tail() with a default 'n=1' instead of 'n=6'. - Move code for left() and right() into a separate file. * [r1977] If arguments 'X' or 'FUN' is supplied to reorder.factor(), mimic the behavior of stats::reorder.default() rather than trying to call it via NextMethod. 2015-04-25 warnes * [r1974] List needs a conjuction * [r1973] Fix spelling errors & typos * [r1972] Fix typographical errors * [r1971] Update NEWS and ChangeLog (again) * [r1970] Remove aggregate.table() entirely * [r1969] 'test.humanReadable.R' needed set.seed() to make the results consistent. * [r1968] Update .save files * [r1967] Missed on commit. * [r1966] Modfy write.fwf() to properly handle matrix argument, avoiding conversion to dataframe unless rownames=TRUE. Add corresponding unit tests. * [r1965] Installing PERL modules was failing. Adding CPAN configuration option fixed the problem. * [r1964] Error message about executable name was missing one alternative * [r1963] Better describe gdata contents * [r1962] is.* and as.* aren't generics * [r1961] Add 'justify' argument to print and format object_sizes methods * [r1960] Add 'justify' argument to print and format object_sizes methods * [r1959] Remove stray call to 'browser' * [r1958] Update DESCRIPTION, ChangeLog, and NEWS * [r1957] Complete work on object.size(), object_sizes methods, and humanReadable. * [r1956] Add error message if Excel file format is too old 2015-04-23 warnes * [r1953] Update NEWS and ChangeLog * [r1952] - write.fwf() now properly supports matrix objects, including matrix objects wihtout column names. (Reported by Carl Witthoft.) * [r1951] Remove 'use POSIX' from xls2csv.pl since it is no longer needed * [r1939] Update NEWS and ChangeLog * [r1938] reorder.factor() now hands off processing to stats:::reorder.default() when either 'X' or 'FUN' is specified. 2015-04-22 warnes * [r1937] Update NEWS and ChangeLog for changes to humanReadable() * [r1936] Fix 'units' argument of humanReadable() * [r1935] Update object.size() man page to reflect change in class of return value from 'object_size' to 'object_sizes' * [r1934] Update NEWS and ChangeLog for gdata 2.16.0 * [r1933] Modify gdaata:object.size to generate S3 objects of class 'object_sizes' (note the final 's') to avoid conflicts with methods in utils for object_size. * [r1932] Correct behavior of reorder.factor() when argument 'X' is supplied by delgating to stats:::reorder.default() 2015-04-14 warnes * [r1929] Update ChangeLog * [r1928] Remove editorializing * [r1927] Update NEWS and ChangeLog for gdata 2.15.0 * [r1926] Add 'scientific' argument to write.fwf to allow control of whether numeric values can be displated using scientific notation. * [r1925] Replace depricated PERL function POSIX::isnumeric with equivalent regexp * [r1924] Add gdata ChangeLog to SVN 2015-04-10 warnes * [r1922] Update files for gdata 2.15.0 2015-04-08 warnes * [r1919] Move first/last/left/right to from gtools to gdata 2014-08-28 warnes * [r1883] Everything works now! * [r1882] Suppress annoying warnings in Spreadsheet::ParseXLS::FmtDefalt. * [r1881] Add tests and corresponding test files for 1900 and 1904 based XLX/XLSX files * [r1880] Complete transition from Spreadsheet::XLSX to Spreadsheet::ParseXLSX * [r1879] Handle Excel files created on the Mac, where by default Excel uses 1904-01-01 as the baseline for dates, rather than the usual 1900-01-01. * [r1878] Remove dotfiles * [r1877] Update for release * [r1876] Add test for handling fo very wide xls and xlsx files. * [r1875] Add test for handling fo very wide xls and xlsx files. * [r1874] Modify code to use latest version of Spreadsheet::ParseExcel and to replace Spreadsheet::XLSX woth Spreadsheet::ParseXLSX * [r1873] Update Spreadsheet::ParseExcel, add Spreadsheet:ParseXLSX, add dependencies 2014-04-05 warnes * [r1801] Apply same changes to NAToUnknown that were previously applied to unknownToNA for POSIXlt. * [r1800] Update NEWS with latest changes * [r1799] Call stats::nobs instead of stats:::nobs.default within gdata::nobs.default. This avoids R CMD check warning. * [r1798] Don't compare optional POSIXlt field. Explicitly compare POSIXlt, with special handling of '-1' unknown value. * [r1797] Don't use gdata::: prefix to access gdata function * [r1796] Fix syntax error in DESCRIPTION file. * [r1795] Package name needs to be defined outside of if test. * [r1794] Style file needed * [r1793] The issue Brian pointed out was an error in the isUnknown() code, not an error in the unit tests! * [r1792] Apply changes Brian recommned to NAtoUnknown as well as unknownToNA. * [r1791] Update NEWS file * [r1790] Don't need latex .dtx source file * [r1789] Move vignettes from inst/doc/ to vignettes/ * [r1788] Change 'aggregate.table' from deprecated to defunct. * [r1787] Complete changes so that the unit tests are run as part of R CMD check * [r1786] Update NEWS for gdata 2.13.4 * [r1785] Update NAMESPACE file to remove deleted function * [r1784] Move unit test files back to inst/unitTests. Fix up runRUnitTests.R to work properly in the new location * [r1783] - For unit tests, don't check for equality of optional POSIXlt components. (Bug reported by Brian Ripley). * [r1782] Move unit test code into the (now) standard location 2014-03-19 arnima * [r1777] change warning message to R standards 2013-12-18 arnima * [r1758] Retain original list order unless sort=FALSE; also stop if unnamed list 2013-12-16 warnes * [r1757] Trim will now remove all types of leading/trailing whitespace by using the [:blank:] character class. 2013-06-29 warnes * [r1692] Update NEWS for second try for gdata 2.13.2 * [r1691] Simplify ll() by stuffing list arguments into an environment, avoiding the need to use attach/detach. 2013-06-28 warnes * [r1685] Update NEWS for gdata 2.13.2 * [r1684] Minor update to tests/*.Rout.save * [r1683] Add on.exit() handler to ensure a matching detach occurs when attach is used in ll() * [r1682] Update for gdata 2.13.2 * [r1681] Improve deprecated message 2013-03-24 warnes * [r1645] Update test files for code changes * [r1644] Fix formatting in NEWS * [r1643] Replaced calls to depreciated function ".path.package" with the new public function "path.package". 2013-01-14 warnes * [r1639] Replace (obsolete) '.path.package' with 'find.package' function. 2012-09-20 warnes * [r1622] Correct .Rd file errors detected by 'R CMD check'. * [r1621] Add duplicated() and ans() to the NAMESPACE. * [r1620] Update for gdata 2.13.0. * [r1619] Fix typographic error. * [r1618] Add 'ans()' and 'duplicated()' contributed by Liviu Andronic. 2012-09-19 warnes * [r1617] Correct column names. Unit columns were reversed and misspelled. * [r1616] Add ignore.stderr to system command in sheetCmd() to prevent stderr messages from being included in the captured output from the perl script. 2012-09-12 warnes * [r1606] Update for gdata 2.12.0 * [r1605] 'stats::aggregate' was made into a generic on 27-Jan-2010, so that attempting to call 'aggregate' on a 'table' object will now incorrectly call 'aggregate.table'. Since 'aggregate.table' can be replaced by a call to tapply using two index vectors, e.g. aggregate.table(x, by1=a, by2=b, mean) can be replaced by tapply(x, INDEX=list(a, b), FUN=mean), the 'aggregate.table' function will now display a warning that it is depreciated and recommending the equivalent call to tapply. It will be removed entirely in a future version of gdata. * [r1604] Don't ignore .Rnw files, but do ignore .svn files. 2012-09-11 warnes * [r1603] Clarify workding of DROP argument to interleave(). * [r1602] Replace call to aggregate.table() with equivalent tapply() call since aggregate.table() is being depreciated. 2012-08-22 warnes * [r1601] Update DESCRIPTION and NEWS for gdate 2.11.1. * [r1600] Add example for read.xls() that shows how to use the fileEncoding argument to read in latin-1 encoded data. * [r1599] Add XLSX test for latin-1 characters, and look for them in their new location in inst/xls/. * [r1598] add XLSX version of latin-1.xls * [r1597] Add test file and code to ensure that read.xls() can properly handle files with alternative encodings. latin-1.xls contains each of the non-ascii latin-1 special characters in both the column headings and the body of the file. * [r1596] Change code to have R read the csv/tab data from the file rather than from the connetion we made, so that file encodings can be properly handled. * [r1595] Always close the connection. 2012-08-13 warnes * [r1594] Remove trailing space from output line. 2012-06-18 warnes * [r1567] Update NEWS for 2.11.0 release. * [r1566] Bump version number and add SystemRequirements for perl. * [r1565] read.xls() and supporting functions now allow blank lines to be preserved, rather than skipped, by supplying the argument "blank.lines.skip=FALSE". The underlying perl function has been extended to suppor this via an optional "-s" argument which, when present, *preserves* blank lines during the conversion. 2012-06-13 warnes * [r1564] - nobs.default needs to handle logical vectors in addition to numeric vectors. - update DESCRIPTION and NEWS for 2.10.6. * [r1563] nobs.default needs to handle logical as well as numeric vectors. 2012-06-08 warnes * [r1562] Update DESCRIPTION and tests * [r1561] fix incorrect function name * [r1560] Mark example for installXLSXsupport() to not be executed durin R CMD check. * [r1559] stats:::nobs.default and stats::nobs.lm require R > 2.13.0, so add this as a dependency. 2012-06-06 warnes * [r1552] Update for release 2.10.2 * [r1551] Fix bugs in nobs.default. * [r1550] Update to reflect warning on startup that 'nobs' hides 'stats::nobs'. * [r1549] Remove stray non-ASCII characters. * [r1548] The nobs() dispatch method must be defined in the gdata namespace to pick up the definition of gdata::nobs.default. * [r1547] Update DESCRIPTION and NEWS for 2.10.1 release. * [r1546] Define aliases for 'nobs' and 'nobs.lm' to support backward compatibility for packages depending on gdata. * [r1545] Update DESCRIPTION and NEWS for 2.10.0 release * [r1544] - Add manual page and NAMESPACE entry for startsWith(). - Add 'ignore.case' argument to startsWith(). * [r1543] Update to match new code. * [r1542] Replace non-ASCII characters. * [r1541] Add na.strings to read.xls call to convert "#DIV/0!" to NA. 2012-06-05 warnes * [r1540] Remove nobs method dispatch and lm methods since these are now provided by the stats package. * [r1539] Spell out arguments to ls() to avoid R CMD check warnings. * [r1538] Add .Rinstignore file to omit latex style and source files from distributed inst/doc directory. * [r1537] - Add NULL definition of MedUnits to avoid R CMD check warning. - Specify local environment when calling data() so that MedUnits gets defined in the function's environment rather than the global environment. * [r1536] Fix error in ls.funs() that occurs when there are no objects in the environment. * [r1535] Avoid warning by calling utils::object.size rather than Internal(object.size(x)) 2012-05-31 warnes * [r1534] - Remove dispatch function 'nobs' and method 'nobs.lm' since these are now provided by the R 'stats' package. 2012-05-04 warnes * [r1532] Update for next release * [r1531] Add ls.funs() to show functions defined in the specified environment. * [r1530] Fix enumerate syntax. 2012-04-03 warnes * [r1522] Add startsWith() function. 2011-10-05 warnes * [r1516] Fix typo 2011-09-30 warnes * [r1515] Update DESCRIPTION and README for 2.9.0 release. * [r1514] Update DESCRIPTION and README for 2.9.0 release. 2011-09-20 warnes * [r1508] Improve xls2csv() man page * [r1507] Add case() function, a vector equivalent of the switch() function * [r1506] Add case() function, a vector equivalent of the switch() function 2011-09-02 warnes * [r1500] Add 'centerText' function to center text strings for a specified width. * [r1499] Add 'centerText' function to center text strings for a specified width. 2011-04-16 warnes * [r1469] Update for release 2.8.2 2011-04-15 warnes * [r1468] Fix errors on windows when R or Perl install path includes spaces by properly quoting the path. * [r1467] Fix error in xlsFormat() on windows when R or Perl install path includes spaces by quoting the path. 2011-01-15 ggorjan * [r1465] Adding summary method for nPairs 2010-11-12 warnes * [r1462] Update NEWS for gdata 2.8.1 * [r1461] Update DEScription file for 2.8.1 release * [r1460] Update test output to match latest code * [r1459] Modify write.fwf() to capture and pass on additional arguments for write.table(). This resolves a bug reported by Jan Wijffels. 2010-11-01 arnima * [r1453] Minor improvement in Args.Rd help page 2010-10-19 warnes * [r1452] Avoid use of file.access() which is unreliable on Windows network shares. 2010-07-08 ggrothendieck2 * [r1448] findPerl call added to xls2sep 2010-07-07 ggrothendieck2 * [r1447] small improvements to read.xls.Rd 2010-05-03 warnes * [r1439] Rename installXLSXModules() to installXLSXsupport() and provide documentation for it. * [r1438] Update news for gdata 2.8.0 * [r1437] Add .onAttach function to check & inform user if perl is available, to check whether XLS and XLSX formats are avaiable, and to run the (new) installXLSXModules() functon to attempt to install the necessar libraries if not. Added installXLSXModules() function. 2010-05-02 warnes * [r1436] Correct error in xlsFormat example * [r1435] Update perl code to work (but generate warnings) when Zlib or SpreadSheet::XLXS is not instaled. Also update Greg's email address 2010-02-21 ggrothendieck2 * [r1423] isOpen problems fixed (isOpen must have changed in R since this worked in earlier versions). Also nba.xls link in read.xls.Rd disappeared. Replaced with similar link. 2010-02-20 ggrothendieck2 * [r1422] improved INSTALL file 2010-02-19 ggrothendieck2 * [r1421] added findPerl to locate ActiveState Perl on Windows if perl= not specified and Rtools perl would have otherwise been used. Also added INSTALL file. 2010-01-28 warnes * [r1419] Update for release 2.7.1 * [r1418] xls2sep(): Show output of perl call when verbose=T * [r1417] More Win32 fixes * [r1416] More work on Win32 building * [r1415] Support building Compress::Raw::Zlib perl package under windows. 2010-01-26 warnes * [r1413] Fix typos * [r1412] Show more details in sheetCount() when verbose=TRUE 2010-01-24 warnes * [r1411] Replace two calls to 'dQuote', to 'dQuote.ascii' * [r1408] Remove auto-generated pdf files from svn * [r1407] create 'distclean' to remove perl binary dir, currently mac-only * [r1406] Make read.xls() and xls2sep() quieter when verbose=FALSE * [r1405] Add tests for read.xls, sheetCount, and sheetNames * [r1404] Modify makefile to 1) clean up after build, 2) make tar non-verbose * [r1403] Close connections when done. * [r1402] Fix typo * [r1401] Fix R CMD CHECK errors * [r1400] Use the original gz file for Compress::Raw::Zlib to avoid issues with 'non-platform-independent' filename error in R CMD CHECK * [r1399] Rename files to remove R CMD check error * [r1398] Update for 2.7.0 release * [r1397] Add new functions to NAMESPACE * [r1396] Add Compress::Raw::Zlib code * [r1395] Add/Update documentation * [r1394] Minor formatting change * [r1393] Add additional example files * [r1392] Combine sheetCount.pl and sheetNames.pl and modify to support Excel 2007 'xlsx' format * [r1391] Complete changes to handle Excel 2007 'xlsx' files * [r1390] Add additional Perl modules to support Excel 2007 'xlsx' files 2010-01-24 ggrothendieck2 * [r1389] added sheetNames.Rd (documenting sheetNames/sheetCount) and updated NAMESPACE file. * [r1388] fixed spacing problem in NEWS 2010-01-23 warnes * [r1387] Check if parsing the xls file succeeds... Current code doesn't handle new XML-based format * [r1386] Remove perl 'Spreadsheet:XLSX' module since it depends on Compress-Raw-Zlib, which probably won't be available on most machines, and I don't have time to figure out how to get R to build it properly when gdata is installed. * [r1385] Add perl 'Spreadsheet:XLSX' module to support new Excel XML format files * [r1384] Add xls2tsv() convenience wrapper to xls2sep() * [r1383] Update to match new xls2csv.pl code, allow specification of sheets by name, support CSV and TAB delimited files using the same code, other minor changes. * [r1382] Add sheetNames() function to extract the names from XLS files * [r1381] Fix xls2csv.bat * [r1380] If only one sheet is present in the file, don't insert the sheet name into the filename * [r1379] Add additional test/example Excel files * [r1378] Modify xls2csv.pl script to: - Use tab-delimiter and .tsv or .tab extension if called with the name xls2tsv.pl or xls2tab.pl, respectively. This allows a single source file and two symlinks to be used intstead of maintaining several almost-identical files. - Allow selection of sheets by name - Provide better error checking - Other code improvements * [r1377] Add perl scripts to extract worksheet names and sheet count from Excel files 2010-01-22 warnes * [r1376] Upgrade Perl OLE::StorageLight module to version 0.19 * [r1375] Upgrade perl Spreadsheet::ParseExcel to version 0.56 * [r1374] Add complete list of contributors 2010-01-22 arnima * [r1373] Minor improvement in help page * [r1371] Many small improvements to documentation of Arni's five functions 2010-01-22 warnes * [r1370] - Move xls2csv(), xls2tab(), xls2sep() to a separate file - Move qQuote.ascii to a separate file - Bug Fix: xls2csv(), xls2tab() failed to pass the provided 'perl' parameter to xls2sep() - New Feature: xls2sep() (and hence xls2csv, xls2tab, and read.xls) now supports ftp URLs. 2009-12-06 arnima * [r1369] Minor improvements of Args(). * [r1368] Improved ll() so user can limit output to specified classes 2009-11-16 arnima * [r1366] ll(.GlobalEnv) does not crash anymore 2009-08-20 warnes * [r1357] Replace \ldots with \dots to make the new R CMD CHECK happy. 2009-08-19 warnes * [r1355] Update for 2.6.1 release * [r1354] Modify unit tests to avoid issues related to zime zones. 2009-08-05 warnes * [r1353] Update vignettes for 2.6.0 release * [r1352] Fix formatting warning in frameApply man page 2009-07-16 ggorjan * [r1350] Reverting recent change and clarifying the meaning. 2009-07-16 warnes * [r1349] Add contents of \value section for resample() man page * [r1348] Update test output to remove R CMD check warning * [r1347] Update ChangeLog and NEWS for gdata 2.6.0 release * [r1346] Update DESCRIPTION file for gdata 2.6.0 * [r1345] Correct Greg's email address * [r1344] Correct minor typos in write.fwf() man page * [r1343] Correct page for resample() * [r1342] Add support for using tab for field separator during translation from xls format in read.xls 2009-04-19 arnima * [r1314] Changed object.size(object) to unclass(object.size(object)). 2008-12-31 ggorjan * [r1312] Documenting changes and exporting the functions. * [r1311] Enhanced function object.size that returns the size of multiple objects. There is also a handy print method that can print size of an object in "human readable" format when options(humanReadable=TRUE) or print(object.size(x), humanReadable=TRUE). * [r1310] New function wideByFactor that reshapes given dataset by a given factor - it creates a "multivariate" data.frame. * [r1309] New function nPairs that gives the number of variable pairs in a data.frame or a matrix. * [r1308] New functions getYear, getMonth, getDay, getHour, getMin, and getSec for extracting the date/time parts from objects of a date/time class. * [r1307] New function bindData that binds two data frames into a multivariate data frame in a different way than merge. * [r1306] New function .runRUnitTestsGdata that enables run of all RUnit tests during the R CMD check as well as directly from within R. 2008-12-20 ggorjan * [r1305] * [r1304] To remove some output in the R CMD check 2008-08-05 ggorjan * [r1300] - Increased version to 2.5.0 - New function cbindX that can bind objects with different number of rows. - write.fwf gains width argument. Unknown values can increase or decrease the width of the columns. Additional tests and documentation fixes. 2008-06-30 arnima * [r1299] Simplified default 'unit' argument from c("KB","MB","bytes") to "KB". 2008-05-13 warnes * [r1270] Update NEWS file for 2.4.2 * [r1269] Use path.expand() to give proper full path to xls file to be translated by read.xls() * [r1268] Modifed read.xls() failed to return the converted data... fixed. * [r1267] Correct broken patch for open-office support * [r1266] For read.xls() and xls2csv(): - Implement more informative log messages when verbose=TRUE - Quote temporary file name to avoid errors when calling perl to do the work. - Add better error messages, particularly when perl fails to generate an output .csv file. Update version number in DESCRIPTION. 2008-05-12 warnes * [r1265] Patch to correct issue with OpenOffice-created XLS files. Thanks to Robert Burns for pointing out the patch at http://rt.cpan.org/Public/Bug/Display.html?id=7206 2008-03-25 warnes * [r1250] Update for version 2.4.1 * [r1249] Example iris.xls file didn't complete & properly formatted iris data set. Fixed. * [r1248] Update perl modules to latest versions 2008-03-24 warnes * [r1247] Fix typo in win32 example for read.xls() 2008-03-11 warnes * [r1246] Add xls2csv to exported function list 2008-01-30 warnes * [r1241] Update DESCRIPTION and NEWS for release 2.4.0 2008-01-29 arnima * [r1240] Added argument 'all'. * [r1239] Added argument 'all'. 2007-10-22 warnes * [r1196] Clarify GPL version 2007-09-10 ggorjan * [r1169] removed unmatched brace * [r1168] adding alias 2007-09-06 ggorjan * [r1162] keyword 2007-08-21 ggorjan * [r1154] package help page * [r1153] move * [r1152] move 2007-08-20 ggorjan * [r1151] clean * [r1150] a real vignette * [r1149] a real vignette * [r1148] additional keyword for searchig 2007-08-17 ggorjan * [r1147] keyword 2007-07-22 arnima * [r1103] Reverted back to as.character(substitute(x)), so user can run keep(x), keep("x"), Args(x), and Args("x"). 2007-07-21 arnima * [r1102] Changed as.character(substitute()) to deparse(substitute()), following help(substitute) recommendation. * [r1101] Changed as.character(substitute()) to deparse(substitute()), following help(substitute) recommendation. 2007-07-10 warnes * [r1099] Update read.xls() code and docs with enhacements by Gabor Grothendieck 2007-06-06 ggorjan * [r1097] last edits from newsletter * [r1096] drop levels as suggested by Brian Ripley * [r1095] better integration of unit tests * [r1094] making codetools happy 2007-01-28 arnima * [r1042] Throw warnings rather than errors 2007-01-27 arnima * [r1041] Meaningful error message is given when requested object does not exist * [r1040] is.* tests that return NA are not reported is.what recursion is avoided 2006-11-30 ggorjan * [r1035] minor commet to the code * [r1034] description of mapLevels methods * [r1033] description of unknown methods 2006-11-16 ggorjan * [r1013] seems that c.factor was not a good idea and there were better examples posted on r-devel list 2006-11-14 ggorjan * [r1012] Removed executable property 2006-11-10 ggorjan * [r1004] just formatting 2006-11-02 ggorjan * [r1002] typos 2006-10-30 ggorjan * [r1001] some more examples for use of read.fwf after write.fwf * [r1000] ignore for report files * [r999] Id tag from source * [r998] removing unused import * [r997] Id tag * [r996] write.fwf * [r995] Id tag * [r994] added unit tests for reorder.factor * [r993] mapply keeps names in R 2.4; POSIX unit tests solved; $ should work now 2006-10-29 ggorjan * [r992] fixed problem in tests; added unknown methods and tests for matrices * [r991] sort is generic now; mapply keeps names in R 2.4.0; some codetools suggestions fixed * [r990] sort is generic from R 2.4.0 * [r989] trim() gains ... argument; version bump * [r988] Fixed collision bug with stats version of reorder.factor 2006-10-27 warnes * [r987] Add c() method for factor objects, submitted by Gregor Gorjanc 2006-09-19 warnes * [r986] Update NEWS file for 2.3.0 release * [r985] Explicitly set the local in runit.trim.R to one where leading spaces affect sort order so that the unit test works properly. 2006-09-18 warnes * [r984] Update Rnews.sty to the latest version * [r983] Integrate fixes for trim() from Gregor and myself. * [r982] Remove unneeded files. 2006-09-13 warnes * [r981] Add unknown() and unit test files * [r980] More fixes from Gregor Gorjanc * [r979] Add mapLevels functions from Gregor Gorjanc, along with associated unit tests. 2006-08-03 warnes * [r978] Add Gregor Gorjanc's mapFactor() and combineLevels() functions. 2006-08-02 warnes * [r977] Update my email address * [r976] Remove MedUnits.rda to convert to binary format * [r975] Remove MedUnits.rda to convert to binary format * [r974] Update version number * [r973] Integrate changes suggested by Gregor Gorjanc 2006-03-14 nj7w * [r940] Fixed R CMD check errors and added trim.default to NAMESPACE 2006-03-13 nj7w * [r939] Added trim.character and trim.factor as per Gregor's suggestions 2006-01-03 warnes * [r839] Add resample() function, which generates a random sample or permutation from the elements of the supplied vector, even if the vector has length 1. This avoide the problems caused by base::sample()'s special case for vectors of length 1, where it attempts to sample from 1:x. 2005-12-13 nj7w * [r806] Updated news and removed changelog 2005-12-12 nj7w * [r798] Updated version number for CRAN release 2005-12-08 warnes * [r789] Andrew Burgess reported that interleave() converts 1-column matrixes to vectors and provided a patch. A slight modification of his patch has been applied. There is now a 'drop' argument, which controls whether 'unnecessary' dimensions are dropped. The default is FALSE. 2005-12-04 warnes * [r779] Andrew Burgess reported that interleave() converts 1-column matrixes to vectors and provided a patch. A slight modification of his patch has been applied. There is now a 'drop' argument, which controls whether 'unnecessary' dimensions are dropped. The default is FALSE. 2005-12-01 nj7w * [r775] Updated Greg's email address * [r774] Updated Jim's email address 2005-11-21 arnima * [r744] Suppressed warning message in attach() call. 2005-10-27 warnes * [r716] Bump version number again to show that I fixed a bug. * [r715] Update version number * [r714] Remove explicit loading of gtools in examples, so that failure to import functions from gtools gets properly caught by running the examples. * [r713] Add missing close-bracket * [r712] Add upperTriangle and friends * [r711] Add functions for extracting, modifying upper and lower trianglular components of matrices. 2005-10-19 arnima * [r695] Replaced the "not.using" vector with a more robust try(get(test)) to find out whether a particular is.* function returns a logical of length one. 2005-09-12 nj7w * [r671] Updated Greg's email 2005-09-06 nj7w * [r661] Added library(gtools) in the example * [r660] Removed gtools dependency from NAMESPACE, as it was being used only in an example, and was giving warning * [r659] Added Suggests field 2005-09-02 nj7w * [r658] Updated the example in frameApply * [r656] Added NEWS * [r654] ChangeLog 2005-08-31 nj7w * [r644] Added DESCRIPTION file * [r643] removed DESCRIPTION.in 2005-07-20 nj7w * [r631] updated documentation * [r630] ## Args() was using a different search path from args(), e.g. rep <- function(local) return(NULL) args() Args() ## Fixed * [r629] ## is.what() was giving needless warnings for functions, e.g. is.what(plot) ## Fixed * [r628] ## ll() was crashing if argument was a list of length zero, e.g. x <- list() ll(x) ## Fixed, and added sort.elements (see new help page) 2005-06-09 nj7w * [r625] Updating the version number, and various help files to synchronize splitting of gregmisc bundle in 4 individual components. 2005-06-07 nj7w * [r622] Reverting to the previous version of drop.levels.R by replacing sapply(...) with as.data.frame(lapply(...)) because sapply has the undesirable effect of converting the object to a matrix, which in turn coerces the factors to numeric. 2005-05-13 nj7w * [r621] 1) Using dQuote.ascii function in read.xls as the new version of dQuote doesn't work proprly with UTF-8 locale. 2) Modified CrossTable.Rd usage in gmodels 3) Modified heatmap.2 usage in gplots. 2005-04-02 warnes * [r600] Move drop.levels() from gtools to gdata. * [r598] Move frameApply() to gdata package. 2005-03-31 warnes * [r586] Comment out example to avoid R CMD check warnings 2005-03-22 warnes * [r578] Fixes to pass `R CMD check'. * [r577] Integrated fixes from Arni. * [r576] Improve documentation of 'perl' argument and give examples. 2005-03-09 warnes * [r573] - Add ConvertMedUnits() plus documentation - Add documentation for MedUnits data set. * [r572] Update MedUnits data file. * [r571] Don't need both .Rda and .tab forms of the data. * [r570] Add MedUnits data set, which provides conversions between American 'Conventional' and Standard Intertional (SI) medical units. 2005-03-01 warnes * [r566] - Remove 'elem' call from ll example. - Add note to 'elem' man page that it is depreciated and 'll' should be used instead. 2005-02-26 nj7w * [r565] *** empty log message *** 2005-02-25 warnes * [r564] Remove ll methods since the base function now handles lists and data frames. * [r563] Integrate changes submitted by Arni Magnusson 2005-01-31 warnes * [r529] Add ability to specify the perl executable and path. 2005-01-28 warnes * [r526] Add dependency on stats. 2005-01-12 warnes * [r515] Add dependency on R 1.9.0+ to prevent poeple from installing on old versions of R which don't support namespaces. 2004-12-27 warnes * [r509] Update usage to match code. * [r508] Replace 'F' with 'FALSE'. 2004-10-12 warneg * [r465] Add unmatrix() function 2004-09-27 warneg * [r461] Updated to pass R CMD check. 2004-09-03 warneg * [r455] added to cvs. * [r454] Checkin xls2csv.pl. Should have been in long ago, must have been an oversight * [r451] Need to look for files using the new package name. * [r449] Need to use the new package name when looking for iris.xls. * [r448] Add ll.list to the to the list of functions described * [r447] Add ll and friends to the namespace * [r446] initial bundle checkin 2004-09-02 warneg * [r442] Initial revision 2004-08-27 warnes * [r441] Fixed bug in mixedsort, and modified reorder.factor to use mixedsort. 2004-07-29 warnes * [r427] Add perl modules to CVS. 2004-07-27 warnes * [r425] Fix typos/spelling. * [r424] Add note that Perl is required for read.xls to work properly. 2004-07-16 warnes * [r420] Remove the temporary csv file if reading it in fails. 2004-06-22 warnes * [r377] Add S3 methods for data frames and lists. 2004-06-08 warnes * [r371] Moved from gregmisc/src/. * [r370] Remove the files in src, instead provide "pre-installed" perl packages in inst/perl. 2004-06-05 warnes * [r365] Fix typo. * [r364] Fix Unix makefile so that it works when invoked directly. * [r363] Fixes for Windows * [r362] Minor enhancment to read.xls example. * [r361] - Merge Makefile.win into Makefile. Makefile.win now just redirects to Makefile. - Update xls2csv.bat and xls2csv shell script to correctly obtain thier installion path and infer the location of the perl code and libraries. - The xls2csv.pl script now assumes that the libraries it needs are installed into the same directory where it is. 2004-06-04 warnes * [r360] More changes, indended to improve installation reliabilty and to make Makefile and Makefile.win as similar as possible. 2004-05-27 warnes * [r358] Clean should remove scripts from source directory. * [r357] Moved to xls2csv.pl.in. * [r354] More fixes. * [r353] Fix missing brace. * [r352] Add explicit package name to see also links. * [r351] More xls2csv perl module support changes. * [r350] More changes to fix local installation of perl modules. 2004-05-26 warnes * [r345] Escape underscores in email addresses so Latex is happy. 2004-05-25 warnes * [r339] More changes to xls2csv code. * [r337] Add Args() function contributed by Arni Magnusson . * [r335] - Change to call perl directly rather than depending on the installed shell script. This should make the code more portable to MS-Windows systes. - Add additional commants.. * [r332] Makefile now modifies xls2csv.bat xls2csv.pl and xls2csv to contain an explicit path to the perl script/libraries. * [r330] R CMD build calls the clean target to purge build files from the source tree when packaging. To get use this behavior correctly, I've renamed the clean target to cleanup and distclean target to clean. * [r329] Add read.xls(), a function to read Microsoft Excel files by translating them to csv files via the xls2csv.pl script. * [r326] More fixes. Seems to work now. 2004-05-24 warnes * [r325] Add files to enable inclusion and installation of xls2csv.pl as part of the package. 2004-04-01 warnes * [r312] Add function remove.vars(). 2004-03-26 warnes * [r307] Contents of package 'mva' moveed to 'stats'. * [r298] - Fix is.what() for use under R 1.9.0 - is.what() now uses is.* functions found in any attached frame 2004-01-21 warnes * [r282] - Add ... argument to match generic provided in mva. 2004-01-19 warnes * [r275] - Integrated (partial) patch submitted by Arni Magnusson to clarify help text. - Modifed code to use match.arg(). 2003-12-15 warnes * [r271] - Applied patch from Arni that fixed a bug that caused env() to crash if any environment was completely empty 2003-12-03 warnes * [r253] - match function argument defaults with 'usage' 2003-12-02 warnes * [r249] Add one argument, to match code. 2003-12-01 warnes * [r244] - Apply changes submitted by Arni Magnusson 2003-11-19 warnes * [r229] Changes to pass R CMD check. 2003-11-18 warnes * [r224] - Convert from MS-Dos to Unix line endings. - Reformat to 80 columns. 2003-11-17 warnes * [r223] Replace 'T' with 'TRUE' to remove R CMD check error. * [r222] Fix syntax error. 2003-11-10 warnes * [r220] - Add files contributed by Arni Magnusson . As well as some of my own. 2003-06-07 warnes * [r198] - Fixed error in examples. Had sqrt(var(x)/(n-1)) for the standard error of the mean instead of sqrt(var(x)/n). 2003-05-23 warnes * [r197] - Fixed typos * [r196] - library() backported from 1.7-devel. This version of the function adds the "pos=" argument to specify where in the search path the library should be placed. - updated .First.lib to use library(...pos=3) for MASS to avoid the 'genotype' data set in MASS from masking the genotype funciton in genetics when it loads gregmisc - Added logit() inv.logit() matchcols() function and corresponding docs 2003-05-20 warnes * [r195] - Omit NULL variables. * [r194] - Added function trim() and assocated docs. 2003-04-22 warnes * [r188] - The mva package (which is part of recommended) now provides a generic 'reorder' function. Consequently, the 'reorder' function here has been renamed to 'reorder.factor'. - Removed check of whether the argument is a factor object. 2003-03-03 warnes * [r165] - Updated to match reorder.Rd which was exetended to handle factor label names in addition to numeric indices. * [r164] - Added handling of factor level names in addition to numeric indexes. 2002-09-23 warnes * [r118] Added inst/doc directory and contents to CVS. * [r117] - Modified all files to include CVS Id and Log tags. 2002-08-01 warnes * [r112] Added reorder() function to reorder the levels of a factor. 2002-04-09 warneg * [r109] Checkin for version 0.5.3 * [r108] - Properly handle case when some or all arguments are vectors. 2002-03-26 warneg * [r104] - Changed methods to include '...' to match the generic. - Updated for version 0.5.1 * [r102] Added ... to methods. * [r101] Updated to add ... parameter to function calls. * [r98] Initial checkin. * [r95] - Added CVS tags 2002-02-21 warneg * [r87] - Fixed bug where row and column labels didn't always correspond to the contents. This only occured when a factor was used for by1 or by2 and the factors levels weren't in the default sort order. 2002-02-20 warneg * [r86] New function. * [r85] Initial checkin. * [r84] Initial checkin. * [r83] Noted that specialized methods exist. * [r82] Incorrectly had contents of nobs.R here instead of help text. Corrected. * [r81] Minor changes, typo and formatting fixes. * [r79] - initial checkin. 2001-12-12 warneg * [r53] Added omitted documentaton for 'info' parameter. Changed example code not to use 'Orthodont' data set so that the nlme package is not required. 2001-12-08 warneg * [r47] Changed 'T' to 'TRUE' in parameter list. 2001-12-07 warneg * [r45] - Fixed see also link. Mis-typed 'data.frame' as 'dataframe'. * [r44] Added attribution. * [r43] Added proper attribution to Don MacQueen. * [r39] Initial checkin. Unfortunately, I've lost the email of the person who sent this to me. I'll credit him/her when I find out who it was! * [r38] Initial checkin 2001-12-05 warneg * [r34] - Renamed 'concat' function to 'combine' to avoid name conflict with an existing S-Plus function. * [r32] - Changed function name 'concat' to 'combine' and renamed concat.Rd to combine.Rd gdata/inst/doc/0000755000175100001440000000000013115346316013075 5ustar hornikusersgdata/inst/doc/unknown.Rnw0000644000175100001440000002336513115346316015275 0ustar hornikusers %\VignetteIndexEntry{Working with Unknown Values} %\VignettePackage{gdata} %\VignetteKeywords{unknown, missing, manip} \documentclass[a4paper]{report} \usepackage{Rnews} \usepackage[round]{natbib} \bibliographystyle{abbrvnat} \usepackage{Sweave} \SweaveOpts{strip.white=all, keep.source=TRUE} \begin{document} \begin{article} \title{Working with Unknown Values} \subtitle{The \pkg{gdata} package} \author{by Gregor Gorjanc} \maketitle This vignette has been published as \cite{Gorjanc}. \section{Introduction} Unknown or missing values can be represented in various ways. For example SAS uses \code{.}~(dot), while \R{} uses \code{NA}, which we can read as Not Available. When we import data into \R{}, say via \code{read.table} or its derivatives, conversion of blank fields to \code{NA} (according to \code{read.table} help) is done for \code{logical}, \code{integer}, \code{numeric} and \code{complex} classes. Additionally, the \code{na.strings} argument can be used to specify values that should also be converted to \code{NA}. Inversely, there is an argument \code{na} in \code{write.table} and its derivatives to define value that will replace \code{NA} in exported data. There are also other ways to import/export data into \R{} as described in the {\emph R Data Import/Export} manual \citep{RImportExportManual}. However, all approaches lack the possibility to define unknown value(s) for some particular column. It is possible that an unknown value in one column is a valid value in another column. For example, I have seen many datasets where values such as 0, -9, 999 and specific dates are used as column specific unknown values. This note describes a set of functions in package \pkg{gdata}\footnote{ package version 2.3.1} \citep{WarnesGdata}: \code{isUnknown}, \code{unknownToNA} and \code{NAToUnknown}, which can help with testing for unknown values and conversions between unknown values and \code{NA}. All three functions are generic (S3) and were tested (at the time of writing) to work with: \code{integer}, \code{numeric}, \code{character}, \code{factor}, \code{Date}, \code{POSIXct}, \code{POSIXlt}, \code{list}, \code{data.frame} and \code{matrix} classes. \section{Description with examples} The following examples show simple usage of these functions on \code{numeric} and \code{factor} classes, where value \code{0} (beside \code{NA}) should be treated as an unknown value: <>= library("gdata") xNum <- c(0, 6, 0, 7, 8, 9, NA) isUnknown(x=xNum) @ The default unknown value in \code{isUnknown} is \code{NA}, which means that output is the same as \code{is.na} --- at least for atomic classes. However, we can pass the argument \code{unknown} to define which values should be treated as unknown: <>= isUnknown(x=xNum, unknown=0) @ This skipped \code{NA}, but we can get the expected answer after appropriately adding \code{NA} into the argument \code{unknown}: <>= isUnknown(x=xNum, unknown=c(0, NA)) @ Now, we can change all unknown values to \code{NA} with \code{unknownToNA}. There is clearly no need to add \code{NA} here. This step is very handy after importing data from an external source, where many different unknown values might be used. Argument \code{warning=TRUE} can be used, if there is a need to be warned about ``original'' \code{NA}s: <>= (xNum2 <- unknownToNA(x=xNum, unknown=0)) @ Prior to export from \R{}, we might want to change unknown values (\code{NA} in \R{}) to some other value. Function \code{NAToUnknown} can be used for this: <>= NAToUnknown(x=xNum2, unknown=999) @ Converting \code{NA} to a value that already exists in \code{x} issues an error, but \code{force=TRUE} can be used to overcome this if needed. But be warned that there is no way back from this step: <>= NAToUnknown(x=xNum2, unknown=7, force=TRUE) @ Examples below show all peculiarities with class \code{factor}. \code{unknownToNA} removes \code{unknown} value from levels and inversely \code{NAToUnknown} adds it with a warning. Additionally, \code{"NA"} is properly distinguished from \code{NA}. It can also be seen that the argument \code{unknown} in functions \code{isUnknown} and \code{unknownToNA} need not match the class of \code{x} (otherwise factor should be used) as the test is internally done with \code{\%in\%}, which nicely resolves coercing issues. <>= (xFac <- factor(c(0, "BA", "RA", "BA", NA, "NA"))) isUnknown(x=xFac) isUnknown(x=xFac, unknown=0) isUnknown(x=xFac, unknown=c(0, NA)) isUnknown(x=xFac, unknown=c(0, "NA")) isUnknown(x=xFac, unknown=c(0, "NA", NA)) (xFac <- unknownToNA(x=xFac, unknown=0)) (xFac <- NAToUnknown(x=xFac, unknown=0)) @ These two examples with classes \code{numeric} and \code{factor} are fairly simple and we could get the same results with one or two lines of \R{} code. The real benefit of the set of functions presented here is in \code{list} and \code{data.frame} methods, where \code{data.frame} methods are merely wrappers for \code{list} methods. We need additional flexibility for \code{list}/\code{data.frame} methods, due to possibly having multiple unknown values that can be different among \code{list} components or \code{data.frame} columns. For these two methods, the argument \code{unknown} can be either a \code{vector} or \code{list}, both possibly named. Of course, greater flexibility (defining multiple unknown values per component/column) can be achieved with a \code{list}. When a \code{vector}/\code{list} object passed to the argument \code{unknown} is not named, the first value/component of a \code{vector}/\code{list} matches the first component/column of a \code{list}/\code{data.frame}. This can be quite error prone, especially with \code{vectors}. Therefore, I encourage the use of a \code{list}. In case \code{vector}/\code{list} passed to argument \code{unknown} is named, names are matched to names of \code{list} or \code{data.frame}. If lengths of \code{unknown} and \code{list} or \code{data.frame} do not match, recycling occurs. The example below illustrates the application of the described functions to a list which is composed of previously defined and modified numeric (\code{xNum}) and factor (\code{xFac}) classes. First, function \code{isUnknown} is used with \code{0} as an unknown value. Note that we get \code{FALSE} for \code{NA}s as has been the case in the first example. <>= (xList <- list(a=xNum, b=xFac)) isUnknown(x=xList, unknown=0) @ We need to add \code{NA} as an unknown value. However, we do not get the expected result this way! <>= isUnknown(x=xList, unknown=c(0, NA)) @ This is due to matching of values in the argument \code{unknown} and components in a \code{list}; i.e., \code{0} is used for component \code{a} and \code{NA} for component \code{b}. Therefore, it is less error prone and more flexible to pass a \code{list} (preferably a named list) to the argument \code{unknown}, as shown below. <>= (xList1 <- unknownToNA(x=xList, unknown=list(b=c(0, "NA"), a=0))) @ Changing \code{NA}s to some other value (only one per component/column) can be accomplished as follows: <>= NAToUnknown(x=xList1, unknown=list(b="no", a=0)) @ A named component \code{.default} of a \code{list} passed to argument \code{unknown} has a special meaning as it will match a component/column with that name and any other not defined in \code{unknown}. As such it is very useful if the number of components/columns with the same unknown value(s) is large. Consider a wide \code{data.frame} named \code{df}. Now \code{.default} can be used to define unknown value for several columns: <>= df <- data.frame(col1=c(0, 1, 999, 2), col2=c("a", "b", "c", "unknown"), col3=c(0, 1, 2, 3), col4=c(0, 1, 2, 2)) @ <>= tmp <- list(.default=0, col1=999, col2="unknown") (df2 <- unknownToNA(x=df, unknown=tmp)) @ If there is a need to work only on some components/columns you can of course ``skip'' columns with standard \R{} mechanisms, i.e., by subsetting \code{list} or \code{data.frame} objects: <>= df2 <- df cols <- c("col1", "col2") tmp <- list(col1=999, col2="unknown") df2[, cols] <- unknownToNA(x=df[, cols], unknown=tmp) df2 @ \section{Summary} Functions \code{isUnknown}, \code{unknownToNA} and \code{NAToUnknown} provide a useful interface to work with various representations of unknown/missing values. Their use is meant primarily for shaping the data after importing to or before exporting from \R{}. I welcome any comments or suggestions. % \bibliography{refs} \begin{thebibliography}{1} \providecommand{\natexlab}[1]{#1} \providecommand{\url}[1]{\texttt{#1}} \expandafter\ifx\csname urlstyle\endcsname\relax \providecommand{\doi}[1]{doi: #1}\else \providecommand{\doi}{doi: \begingroup \urlstyle{rm}\Url}\fi \bibitem[Gorjanc(2007)]{Gorjanc} G.~Gorjanc. \newblock Working with unknown values: the gdata package. \newblock \emph{R News}, 7\penalty0 (1):\penalty0 24--26, 2007. \newblock URL \url{http://CRAN.R-project.org/doc/Rnews/Rnews_2007-1.pdf}. \bibitem[{R Development Core Team}(2006)]{RImportExportManual} {R Development Core Team}. \newblock \emph{R Data Import/Export}, 2006. \newblock URL \url{http://cran.r-project.org/manuals.html}. \newblock ISBN 3-900051-10-0. \bibitem[Warnes (2006)]{WarnesGdata} G.~R. Warnes. \newblock \emph{gdata: Various R programming tools for data manipulation}, 2006. \newblock URL \url{http://cran.r-project.org/src/contrib/Descriptions/gdata.html}. \newblock R package version 2.3.1. Includes R source code and/or documentation contributed by Ben Bolker, Gregor Gorjanc and Thomas Lumley. \end{thebibliography} \address{Gregor Gorjanc\\ University of Ljubljana, Slovenia\\ \email{gregor.gorjanc@bfro.uni-lj.si}} \end{article} \end{document} gdata/inst/doc/unknown.pdf0000644000175100001440000032227213115346316015277 0ustar hornikusers%PDF-1.5 % 28 0 obj << /Length 4211 /Filter /FlateDecode >> stream x;s㶱d3 !@ :Mg.]rm9$@I%d] ٲs8 .$|W_n3L.&΄4N^.'L?t߿)L?z?tR~o}տ_IX% L꜂IHr5Y^[2Y³N|rG&Y.E[O>WII&*61RÈٴ~ׄ]adן:De=H&3 'i˛^^b.]5z7^~Z}W^idӭ^mu.w;F`,6y]7nRD'*I,R.3f|J-~6bW5#Ȧ"Q)8KMmy 4hCM[w)8kjR yknma4ӻgZgw2jSx| ]"-b'ͅVeÍ3jA*RWZmYI]QԹڼ"@wZUK9\p_f<X{LiCShn]u[E--\+XIzS= s9vznHIbZB&-gInzT6YԻ"bpZ0udq t ?)λ,y-l/Z {nip)x8u\']1rB1 }ӟ~~rE] -@N,ScL,6IY>ݔ1Y edSO7SQ6|ez kdP , A!vS.s\*ljGG~CAm@wSؑ鴽iE6Zl!Zo 'uM;(P)P6 i Ap#R9;<]:6mOtvZFI@ !`CJ3:K/Se0b`4 n4 U԰s>P[)=Ye'Fd#=#sw}sPF\}K*~2/ww 9yʼd37tjC܎Ϯ5, Azq1|gS;䒟xo?pV<8s!6~miʇŗu3GSu6EclhئN;vwĊ4||?pG%I$6ue$#ݐ'O 6sbY Л:ƫM^+ut9یd{?reqŰ9> us8w˦mER[4|%E%!ɩibI C-+ɧ(˞Ό,gم6&EY *2FXŒVFM&=Oq|[p ZkjC<2|v_)sNci X+"c>m0"&tg] kuFP3tuC3`0z0qBID2)CЉ0cD3D;5CCf{UN h=>LA?0uDsS^TuY*`m׬(G%7Gr3V~\Q 'G)fhE>,F*ʆ1&eUGp@Пr33GBPy`c;I?ˠU%kR@4!XpxN)+мz,6A3~AX".l"fe˲mwAY PI*0qrv,f^TQ-".0a7P򪹍g0v>G7sr't`K&f0J -ktu]AC4].r_BP[\2Վ 2u j|j8 ɹU`~ &LNPF(aX1:80]b57sP}&P#sE+vr (];f H)> stream xْ}Q[Gq*ɑlʑql?$DLk\JصTPsb}u+GHDHLk1Rfqg|]~{u9~x}5^?{嘇#o}ﯾz_sᷟxy}#1?ǟ ƾ,hkfFaY߽|J .R^vɽŖ|[[ɗ>-_7a׀MegS4*~i_\9|o{G~k;N;N,BOi=+o9<¿eiueOn9A16@5 r ܱ*"zi TLJ!{m=&[?ٛ01T׻ߨ ㎜3--')|w\ZB]W;5"~G{[}1p]ȣpsv_qHr*Uۂ*duLKl6?]&e ݻP3KE=`bP!gBI>s,'QLVkA@7;ab887O#luCxSڎiYڮŠ,Uj8I[e00ayJ]ŚJYfyjs*ȴ cDɒ&N|좪'xE;@L{#xOK깵7I*EZYi܁F}}^cbi4۸߻B8T"Y¨l[2LPJ EUJE1+s=w7W!@;p,||هdžgDvަk;u^] 8)"` 0btPH&R <׾H5XKf @A!dˬH]{,=s,vu1< uA$4fRz>8n&WZ$wY~CpvRk0@;N*4ɩkR,#]-y 'vtq~|IXS˫CuS"_ȸ(*/U*2VP Њ**hEL10٬# y7gna#C,S?\LAY5s=X/ye`">'xHL{;>2EƊ ) CA@qf$`n^/ [KFм-5KiFFgTGr%Beb9t2]drAßI ~#łfv͒[*; ۀX9rZ.}ؘ70OH(X3%S[3-.,S#"@I9>N0X'alŒFCݝ Xw8F%#hYf}LsPSҜ¦K2NA_$>.5N@ĵ XR\nj *fI#I`O/i+&a>ֳYZNل\&7:Uy X&T,yE6]PgfMk']VlJ g^sB-Y#4 :Ҡ:8~ԕg~8pVע@]\ldZslcŭS$T 'q]c`s8L끳P44D[o;ᨙUY ktu!)直R%9@m)?B]R;Pos tLĔ[$c5Lb3+jigln> CaA0gM#;:^wܗ8d^ВiPTBȺA oO;3O |XxGs*7FI4h+[ƶ[g(gHMb7z<^|" +9sCDMr3( >zf(KTQ8`+T" u}]lǵs`;w`] ֺ֌q`DB\\"omTH6'ytMd2܃5Ѥ^]R *ƂCHI6Z e¿s}igNIu7hz!ntȴ͹ۯ83sqô +ʅBYx5SvC^jrNNS“ V6L5;(RcD6Ӱ2/=u]℄r73/"$s?t&O,V) ͇Ӥ#9e 8kfhd$)$#!d7<Ǒy(8Z~`.': Iɠ'*?1{KWsaog.UW;=ZpB>MJ+kj"D!RyH3lG!F29엸C'T#B'pdL[``BPRƆPy?q?,ƌo&#Ĭ6[.Z@IG@&Fb76u'=jS pJX#_tjןL"Y=/irc,#w#d]j y9E#QYM:㪎dZOUJ0qw|.AX x,"'"}Ye63)W>+I[qH p * Dl wsAó ؃-_,>S`^d);@_jj5HÆ^R%f1i|s#P"vV ,1tn endstream endobj 53 0 obj << /Length 1872 /Filter /FlateDecode >> stream xXYsD~ϯP\&s-pPZ(Ym%:{zfZd+a jL__::g|(8KwH(y^ۗݟg8q8틫WW]_~?rqmÀЈ;IquVCPso( GFD s}rzGɸħ3mg-czv0@TPBGbh>ɩ1FC?yAĘA1H 9~ oQB^M] 5P.~/p˃~LS c>}'^k+ Q"%s<_:75y$N*o^%8~3Y' ''{'>ƽ>-1+څp%pZ<nM <4ktjei+l_շvT%JTICCqvUmsTyWp"PuvĥTk\IEse2Y"M+;El2k9,n336ݲI/Yp^(rCzNz*,/= Z i 遀s7i6/p^OlG8L 9:9`c}f0PoC3A%c2?u ?&pYž$)ώH$z"BI"!2I]5C58+۴^ljM!m!`F6.kĖ;o3^U@lMqlWpzy[Vy5Nf.λ!6jW:A2\JHqfaZκ;{ A=dWWqۥx Y֛ +_Pqu;\etJA]"$TS 5\EW07ƣG.t8 :f;( hI`VOԽxmqGxRAIޥuaLaڙu-[ыMe'*PkЫ*4^œ%O .:[v _/R[:K6LS6Х Ŷ*b, ^uE>cT@yn(#5'|SfF!k<[Qί.-x8nyRtSf>C\< y7ט!VTϜH_Bavh}ue endstream endobj 66 0 obj << /Length1 2174 /Length2 13065 /Length3 0 /Length 14368 /Filter /FlateDecode >> stream xڍveT[k.Ŋ+R]RJqR(݊{gwwd$9gkP3;]XqEu 6 `ّih4]m44Z`gk{و;Ab`sqظx@;? 9@ `vAwprrd:3z_Q;rA2lf`W AՑÃdl)Dv]`sJ ;?ͱ 4]V;Xz lqr7;  eG 03 ?;_ 33;G% PR`qtel] w-bW *?9[;XwȠ%.ȿ듰vA&;x[1wsdմvrJ "B#@ /`N$^࿔lŐ>|V~`  puv[ `nm 0[Z#-8[{?3_ͪ'-.O<>@3; @* /WY{ B?5tzRr w  \_d&)7[ۿ,YzBa7W:(:@jbEʺ k!jo 63 'bk)kO_4[{l*.ֿ3t]3\'.vCV鿳Jڛ99v.n YNs_|;B\ȿ *[7A|V*A, ^?qX5 H>AAl 2Ck ?r\'$-FxY! @X ?.,!X(-? HV^VKDf/Ϳ d2rCJ}.@ƳeͿ&qv/5f?jH0GoVG&;)'dn+_SdH!50. ^fz8K ۿ g?r+;9? @ v;홙3d]%׃ !/: i"J3íyFΉzM1g5orʰg}XEX8bɏ<6|b,XءX02D,uoQ"M{?{ɜQTb9;)#UTRޟû>UWG OEjCi<]Ձe23+0&ZӔXsުC^+SQT.%S*jAgU(۷b ٓQkُʆB !޽:*9); 䞒;ecĄj.G)-^X,uvԸLhK1͋mwq=+Ǚ~ut@ 9=0 3D-x8PƟ,AIUÞU"7#F:RiWO񞙡d)·~}Ph7c0JLNyLGW,2I ך֮l^┘_p]]SU gYCd ׬S,4,Ӷ. ԟ:;9сg<daX;ޖn\g픖D =f7;J|`^}n:~b̯Gqj(Ȏ냐ZhW2D׊`,JDFȂnx]k4ݚg_)Jpݷ{D\Jzz[FnW^]Sq?i&s[ܓ&YD˔@&p%% )*n 2o*zL{uNlKb!?>ԩw&Xǖ6;)){Thi'n|.B*BY5n*Yt|eHem]cI(9 ,P0u1ƅF 1F5bȡ)|fC(;=DlTj6^Iu+fc徹,z *-(iBZiUTR<K@š^᯻.̤ش(Koب ^ cu:v_,|'{^ܖVII\I4<[h&& ~Lg?NKK3f]CYuY5*‹O+VrRǵZpN~GטYJbMsdd ;ޜO򝌰"{?(GMNݍ@A}Kz_NڥDMvY<"k6 ȸ$?T|MS6'f˒= 7K{+י $KTÏڠ6Mh J~I#%[4Tg&w0ip i=8Ъpy51Ej3na^ݲ\Ŧ^݂ ˦1򰋘ފʦv؜&5X !&V韙aOrb4tZ;tՇI'>{ ʽ0;қD9@_3ۢ(HH92; uVa9R/c]xwUW:kH`bCNwf' ٣Y1BIk2ٻM4%(6*W -'`7\PJSX\k"%7_a`'ZDdFPg 2*w fz_g?ɐ@p? a\xaoڐ@ QTFx1ߓAS'FuDRR];)ko=bG.rS`fյʱs>Ҷ+y|ܲx\倻՚պ_\i8]ObSyBT6n= {qsϋ v_&X2nVDgL1e' D[8њVvH%]msa!JS-:mW_ eUt7g1$^ȋYd c%xB5cyU3EW\XxKbvdmxO@˝cۋH)AI41Ȥ1 V(gb]+Kʼn(ڷ.s,\mSk&:YS0 l5>cGa 2|6)q%mBA} _e/Ǔ:<^&I{$Ai:=wZqdRfV7q 3tRCl\@|Ķh&[o F] V::M>#퇣*uѓZ/BZ Q0P9µ5g40IW'zoEuM,aؽjt0Q _lezV)ivSTΓ*?>]@.E7lO=/cX;UcV^",rΌ= Gn+.d fB$5o195|4`f2CZjs>]l*H|zf8_݄ÑQ1{&#H Y/}νҼ40{kv媂ـ||j:='2[{GB}5F\=ˇM0 lsšR q`A$p.sDϹce_ *W"yXɕ:5m{EQnPG[ o%K-P֒uE1n;j1)"-'$[M01a('ԫ/nC]f)|43EqU!|90f\ /cJTVz8_@Q'>LCΗ"a5VB]8~} o6 :/,myS_VvXW7[*ߋi$;W$P jtPK{|M-\'J-yrp?)ˏL)3{ {P+qD,,{ޜ쒀T-Pҝ ء;pI="g]o\ T?pRUWW~;X|ٲrtȲ/&FmeuA&D,NB|D+iX!pC>@?N]Dնӄ`vL{'҃"{g\A6Hf̈a<syv@ku7򵦭F8tLΗnl))0d U=p^\˱+<4l\F}A &N#ِˊY$?U>3uaD"v4{?9F7#n4 ]e0Z&L’"JYJ*8t>YyU|{'%`(PŦbH~1;ΎU3 v@SIt׺PU#Vm$;'m^&E_m+c)&qSU}G1}f5 Vެc8thG(Z3鋾^Ui~@`kic+nZ_Z"8Lv8@wK0-ODUaS1֐#Iw#?֣$#:x:sQDݮ`~Q, {V<`Uoؙb́6"p;; WS cz[,{?=&n>3YPŸ0[ ]|P 0{rlP-a<<]Ap=G^/ C%sa4jpc hy 2R!+~Q_ ͬ]o#&Dœ1rԩ&pZVϞ3"MkB}(Ux1 t$?%LЎsRƨi@su8#E~ 7cGlxj"Ҭn5ǖB&eUdLxUF|~ l3 jmhlGUZ(N =>7):K%"ry1K=tlK@Nbxptާ@RmzpĻ8;)fLG!P)fzm,/ vfFvr4aUW*#})_샀ae2E[QqόBx#ӿpM pBŽ ;gᇞb_tСfe9$bVѝ*6U[!n"4259JmHuV76o" 1*Gi8ƟNL呭$uoD^[>V=Ȟ)-_+׸7kM=pAox =gE@[I&{}*s,f"ݕVqۼ~ux^j-sC<^1Xi&ŝPbiv$9LG.VyG#l_?FEAn+CH~Y,VIj/e P/d X|u:ں/7x@3 N_jABHEO ]C978Lo=~Q5cjDu幎&IF!iw hrIJC3!?TԚPEaYN QWo|,B3`HWAύ#."Pvb-GDGD9K.P_{^Ҽd)gcpiVڰ]WWR@i,h"`.xdZX_ SཋhptbEqNoNٳn"IM3=,IӜ|K))N[j\mhݽ[{_W^GV ì*z0RD*YB ^*UHr܉aø¯t^a)3Dv.4^ ~^*`uAmn"-uG)͔ x2F7Y?/݊@W> W{ŘÎ%PH{Ѿui?SaTTɾ*|BGSEHb._yX cZ g6wxTY&ϖ(YPrͣA/~UuQ}T.<[j$(S}f.j Rz{niޓG3U`u$esNQ:P;)83H+4$m~G=ֱ׍^f\6T- H8Hd )-,r@^ZOB p\ [# vliE  ކo(J7^$x2`S9#lJ+5-G^V E;!'Hx;on^]-rL[6z{`@: й=\YUdzdY+e(kQݣ׺lioY tȘl_EwhIDJ] -cjؼ,3:򾒮kIۉ}՜}e$sIے[Cme)#}5JDhg~c\w$?_CK 澐B:@ 9둲 ![V;{8spGQ60t 2`Mר,x%AWȋnHp0|J}j/tM# ő63IczVKڼqĨIlyon&\zWbI}}ttÔ{m<pOٍ3ft67Ҷ8z@Бkl\gLHٶ 7T36bvxrգی*59&_͓jݱ2jBQӷ?BzGGNH)0cT3wxrĎ;BR4I$ꩧ+zK b~o=b}o /)(wgxUoY6R-LONsT`;hQG/^+|9Xm_iyt i+"rGRR#CL DhW5U{^z7fPmoRV Ao-{`9ф~ 'Pa K]ѝRZ徲Af3!I j,ˬ\)M4Gz0R (ةQvL8U[A?ӨÊh܏9"D]M, wH a(k1=FMJXq_lXtad׽ ]MxGuSvv3V}6O,jT~V'{ }X =͆Ǟ|/fK5#Ǣ>G2cid޷'tESe'O9p[f3AEA!y (Y~}?<p#87Ftx,[b;V:FѶdL}#J6\s`t1>WM,忹)V,;Z;i1écFdԶ[MlHH2Q& aJ!cuKK3~Y|7&+l#:P:"x.̓#Za9ya/ ҩ>QFǶ].\Cr"&u."S +զgWC)wy.zpX_qXُ44e,К 4㮞eˊmq]2!CFrƮ:5v7[;X:NY_`ؾwD(U $xߦUHg7:ow~ !(-g?*Լ|ŗcm ,S+/Dev֜.P8{ݐԾWy{v&HJ Q}!뚸5%:-U~X)s5ձ\ŏeV%>&B II9K+` M^zV4{؎KQ;G?(yY?nlaKҩtDuY*bU\_GGxXF MR &mj4(5@+˚z1^cсӇ ݪI}rE؆1\y~yk"Rt‡,HKlG짖RK}u(mZ"#/^fK|z}AnѝgFߕpW wjd4goi_DSŌ|"H"n[eghSMuN3sLS/4c63jprsdlJ6M?\ j$`8q_Оf >]o⑄ZгPAWaG3PZ':r.Np^~~Ãwv-M>2pJ:LjU.Кs~ᕒ;2mA_U9n߰Dvœq!Eu4Uj/2{E?wwZN=ыipz[aT q_L*rл Ws~$.m*Va'; g\'M"ō3=>%__P6DC͓ 2tU3\Kjl{:sr,pdRvpvPwAjl$b3in(-i pʣʊ\ݤslEt"LL@eTIg-{0|,JiYUJkdb%N_nOdP X_$ B3#͏xGy _?aMtaa#:$~;>Xةv` ̡9{[~KK0fJZU0pae s-yL#iEE(vjv=6Q`]WJVj`_˂wĚeNn]W<LQct򋶿:3 >@gQG]|hFʜ[GF1ut6˔'kewʨO#p ?y)$HƳAku2YRYuqiF6Liי0?ƧR-"zqsr+|!*meϷP9J ).3->CY1velrߎ[rtZc֬ͩ".PRmPݛ[_wRyթ~=;#3'JӡS+=IhRH).xFW Fa1QŞYT$j c9}Kv1f̚oM|cqv C$乖ШNe~iܓjI" j2W}jOZg߯\a{^?YTOޗz Tz 5. dCɽLAN~ZZo{nP]+oַP;rm;Д1v<2gA#wyo}jb_2  E/v/{iqz$z%IgvuTQzrBx$Ij3#99P4}%c+ɻ\UMLKI1.0[bh7~"V.|SMn٧`ۑ |ZĎO ~`/,yrd}k݄#k=,I^,%Kvt˧hRJõ>wdie &wV _=qvkhOnZMi4W"b96 ;~l> stream xڌP\ N48 4{=X[p,>ιszǔ1e͹vm*rUu&13 HlP`cr0#QQiX؂#G99[a!$.oJ`{- `ge!؉ t2(1 g$* [КxvفL%%-)6x? ;3Ιd!LprANn 3_%vƌDаrBltV {7W{3-:@]N ntog)hieo0T]<\@{ 7hfw@ldleW,ѼYLlgwqF+?I+'[=Y}6`w{ s+{30su`ѴrtIMGfrpr@%_4<@+v2 y;@'W?fV.=71_<zo`ofcjjˉ3*o&N; _Uտ9dߺ=^:r)3\o_lo3ߌ]mmGܺm6_2rZ9.[VV 3U+S˿g_bͿ vf0rڼo# ;QlגsqNN@O$ַIbxm!0ۃ]\oNH)7E/ѿ7EHA?"_ `8,rƢ񾱨A?ShAo5hAo>b{CN@SVkxlvu,fo0a?!;q ]I7_$?M`_>KOK?3Y%fT$V_#G'?nbm'7g*r7wO8Y]. 3M'm]z3w~--o$v߱X:qogbv6;d#GVo^ O_oOgd6zW+Fδ;!4KFLWt#< cuGZt>=CǣϓQndXC? "1ϳV tdca jgb4KM (\H=ofr'_|Ob8u7?{Uj;R@_cMS{/zGo-1ClvSguZh` 1kW؁m]ZJkqP8m[,2̵I==mDm5pG_%8=B2:֓'{RhH؅ ^f6z[%kZߋ1<pUjkөN>YEevŮs0f=08bb+QYU˲1\^ Z&mrl'n$1he>}#NZO/GOGlUQE-YZ\UǺ=~Rd_ ;~ĉ}-ʏT=8|d5M}dSƜY귒<,_ OyzD_S񶱇0!#F5X #iNIƵ%HMʹ}EF^PM$);9[#^ac4=>{O>'Kq^n+GhŶ~-4!ݧ߁QRA:4CG\ҦB]۝p 8ql(1Owc&rY~/䗭E%]?[Pu;? Z]x׏W暑2ix%_/F''z|IoK1 a{SOusᐏ_i U>}nej>M peډ 1i-x$d⑅sj!媺!Q=7US,(1z{%kAdPruH4Vf:E_)aL#?,yO*=D&tB"cYgLsИ6j.Y]a{F83KjSU_;RC] M[?wS|c MkiKXxGqҖ$^93h(fx/?.nhi/=@6[ݔ%iԗ2 pfq-W Ӓu`.L+ea>an5Q iCcmyxRtbY. $*Ԁ`/=r* }پ&r <cK)=+ti@{]@~2&b?P&z_KO9+6%^d4bGIίW$',ȿzdtR|ZH_R();1AgûFa6ց*GpIIޗSEiֻ WSO&\P. 94JeO.ȫBr1i$ϊԁ0QjzFB5]*~<>@œ8Ft:AOj٠]1(BĒz Ea|HtѢ$ҫm1P[pzj{Y/Setppn_%ij-QgeN̶Nlܸ@G>VpEdKAr4cŐHfvb͎x"ɜ3}vY$-8Sp8yl"[m)x.IoPM&[Dw2rA KhkPnk/8Q`Ĩ~D+FӁ5cXN=Ĩhr'GpX+mF˜lGfW:tߪa#N1 ԰ 5l<3-Lrdiz*R ˯+ <W M 2-aMmC.ϓd)@{JjyOs%% @ߗ:> ^isGFc߸ҷ=dά 2rً{w-X.I U!$OqP( [Ղgߊ=5T!uW$ɥKGC2Ь-f|}c׵W="+6ވاPoNAadMG/QyU6 ]*ݩ7VA槜˔RR~n—_h]f6>ÙWdVj4)ЋF|fn25Y,hasϥS0){apl$Q`5/x;pr5fEg?9?j×6y*iWHEn0s;˓i՜(sG~cz.dqQ=l&7a۱ISѬ @ k~ݷw^D;rU!Qj唳EA',y'ہRip OJY'K+}YV:m%+Blvvu@=)ѰT0\I ; G_CB)f &]6 L#H/ypDj$xSMG[hhe0ڽݸm>ں!GSâ{8)ՐnrJ,kRƤu+I@e]a` WyQ{ޤ}.,.nCԫsŏ$/sSvvQ?j d}օ_uwߍ4t ]ݧnK/CԹ#A:i0p+pux܊I(s9⎹F .Iltb hwg;y^FRvm/Ś`[@w@npu}f9pqj~FQKǶ0uE:z|1i)/eԲSI| vN>y~S_;4 VX~klP=3 P^#y MkM1o!M]lK8|E͠ JXI$"%Y XB )`$uZ9pG2iŁқ&\#PBry |M:4ᆤ.2Ϊ+XNR0,p*1 R@5l `@$Gis`.i e;,_kFQwhsZ4!uظRTYݞ::w"}v>|hHw>E,_pLnE*hj縤(!;v%$|Nfp rΜPރ6978g(HTFY˥)$7$w n> K:].:}`F)}8&$CiY駱k*tKP[ƾ .4!xlZ9^b 3_Nuu+*Vgt'+)M r{Ӛ!V"y Kx.G 2=H܂~ = &0ꂗ$šRe^wt*x)~h2D`g;ۺPY󦕣0c;]陵{ߢAScEuKbܯ!ܦؚ\my~Sr sv$&~ 87 A1>p`,7ÃhGd ➽kv?A\-M/F&G.7qlG; O5?+3NnW[7Ƽ!.R Ɯ )L\G(=N)f vXs0T ZIt%2#UrY恱A =p!u\ɷL<z3[$cن"6t~W: O!jkSY]**.#V5E![҄[f[=15|,|uX0tW _}ԉe=rXDžVB裩y^Az ʣi}9[jOի'Y=R viӖE|)5QyOu*9mpi(2]2xB6쨣>̡($U5X-o`j|FBW !~gRNDS ӍG$8Ys)>n0ݹ}?0U;Tb 41m 0 &Ş>=JwؼNOq8GPͲz֝E.1v$?H%j}U!{Q2q H/"cJyehZց $R#󠬨LLkJ@yAmF*N'cQf~Vk}S p*Movdwm=5(M+ˣ,GXLXN>6^{ٳfRqo6h?K1m`S$RS@M9)Ut"Bzo)p fv N YW\ͳAM+B[,s8 =Nv?4҇nʋKlPf`YؼP Gd"-QPMuGmu#γp>\G Pŷws߄^ieׅ=CTO_ج&u0&8q7?sfkqSFz-%2d|!RDtOXq5GS.}=־0YˋNx_&u9c.Qv.>[q;C\xr,€I,=nl[̳*pБǷ[iZ57dvұYvU0ueVR@5 3G@ӳck$=g$B3~"AC<<3geg̞!Yd`*qibH 1?KTzV{-Die_kUu>JY Zgp׭e-^PI($뎥1joNh]SW;:P9kɶFOѰ(Nu ,U9h6hCA:0;.S&ϺT c H- yYRM.5ntiҫ7TU"\Xł" KY:Ρ tٟA(} .IC1EqU2ܧ3;]de@,7o(I@>cKֶjRWPI >Y0.|6 w~gzV!{'$;ȵ?(ٱ(F@ᥓZN]2ʂ ounL><}FxFRvM3AA4Ry.A&v8L0V)vmxIPuHËj#udGhH推;zѭl(q6|xQJuA#j  7Q*6*&L;o"G)&2ܧ5[d9^ZOFR0;\:IoZmsպ(yh BGO^t؀,E+5brX+vG^ > pU03Q8 oYҺ1Uno*NԚiuKKz;Q*(c2Z^|lh^BsHD]S@pS]wX?:2]KFٵ칂G^"t5gHwp TF"q>m(h׹+:פ5\8H!6{K!2D4~^X)8A 6:re{2ATx*Z-PHӡ39Fm/io鰓V%"yD<ڣ{[+T*,f':gZQ6_$t汏ˆgR`3QztNMaJ/6x,.:wD"͗MITC8n rJwBqm\Rq̾c  f[t3-`"[`>6>t'Ju"oVggmSs' usՃiI!Th[+Tϟ?zn⢉qH]D* ]LOgwZ3+pL]KA|%=N4 # 87ϜjU-$D#^xn'\ ּ+ŵbL$D#$M%/8Ȋ_Y?zGc?oldOg o<5'jJmSr:!h 0[0yPlS#~̈́DOy)v79 n֣ps kY,jޓY>qlϘ E0;(ߡYK)^2!O'8N!E MpOq6E{~-p84xيMgfQ_$5;~8!;0r'_YQ]A}? .)":n]ُz0R2o_^]N~k@lRAC<!\m/:նћu`~&M7o2*7hl-, BuhwpM8QȒ%Tu-znK!zހL&%*7sPձrN!\8,r_N߻b ]RnVh|/*!:q/Z-O3y$~xdGJ+%9D̷jEĔ %)Ht iehv,OѯP%k:CA Ǒ/YߤeV !My\"vAԉ53;^|01%,CP!+ѴSX\kmk8vĭs>30}|R7v[@dQ{JNz1~<ƾcT>+ās}4΂ OJT")wZr'ýs F9vrOd&z<+'Gl.M _yƔ;9\";'d_rsip=KG'X`4/;q' ^>eZޡiK1dORe 9P$̴YJtO%b+N~1nf :)S[+sX5׻IKb:{ -@өB>%=ҩHR UGY{YX^bu ݥ%,J2kןm.é5{)K"pVM/=7&Xr'&,o̩+lhgrq.܀Ļ-8жbMCU VG#vƲGSKr',ƨ ű(SO4dxy3rU,;5V*߿WQk'(n,=w5aZqϜNAޓ,wyw[ńs5ϏZf}2ϰeu)8$"TKv1,4YMN=e(ґZ7SE5lo 3Vba:-Sf;X>F)-:QH?:e5ޝS$>R$^@PO[^%HAݣh#;_5)5Zފkx}?vv ǜ^N3B x{$#SRlWá"stIⲚw|B|Ӣ횄n%g'QΑ҂2?21 1 Iml=e唩@IۺIOM% y9Y*w{S1)Uu/EO#Qg:r=٫V2O*eGH5?G ooHՎx`WZdDk@ [eE񝛽s  {/F{CϨb *`Wr$,؛ ;rT4r׸2,C{v'Pmc5aK7\ڎkZ`h .`7Kl'/ \}/J>1vB%:1S넖獻 8QH8vުE&u.Fh3fp1H-6ѕ'~xһ^t8@&Q^z2Yȼt6RDfsmpc#LT_)i nukv~8lMc{"js>[ȣīC|M\҉Or<(. ?2|{S& C\8ng)"54t-Kplw?9Y)l4\-Zq9p$m js$ӈg|*`*PCZ>wc'#M(; :F OTL BuR ʿKWQؐCi<P)FfM~ˈs+(D<](< dغuA36 "ێϳ/S++#>lHjG<\O ʖ(bɐ2a$ >c s Q }: Ths;<FDk6Ai 1FeUy=OQV_8v$k:J[5߼ͰNSCH)Mte]L֝}pUQ*W z-Zsb9s4ʬp 4UH7w'8:5bXu-ܵĬ3^{ lweH׾:ƟگKJ7s#BeLʡ*D||n 2 [vկYjp^9R 7&bMYm#gL$ yazyM- i25BĠ܊r%z) 3,6 nzOO_$KU3{HJ{͍gs'f?M̞#[Wj_Ir$EG;/!4P1Ptw4A3J,akc]_נHO'zڢr}ÇÌun4Yc7vEWeNM=g碒fM~}dQĄ6*SPQ1\VX!{s?!d!=Y'w1/z9Gdj#6q;J'b.W|/@,XUK:OUM.pID~qG\ rquqe+hѵmO#43r5GR[cٔ߃JYd,CwoOp-o8xR 8 OZѤzOLClN2̫q~MXcn%OVs]Ϗ+j-GN^4#&oJýpPIL; qY:+Fkt|e ĨF:r]9P a` ÆҊ9ø a.OGg63ɘ'žNi!qzek~'vƓ VҌո›|oq-$#)GoEYz 'E@5|m'Se,F4I"bͳsP<(bMn&HWW0y^(Tzbmkb~3x|@&7鹛w ga!K0r{zk:3Jj~뙦 p<ǒ\:Z˟"Ye$hu'I.i.(,j rޏm]`Q8* {L4<{2gȋ>,,c@L:r([zSl*_=ou+`"QGB/Qճ䇁JY~@I1)w:Ik`릨C֚릻Z/sֵ^[;(LxhVsf9Qw8 -j/JID#/~~w&ծ$u_J6ҌܑckHr_x:g-] rYfk. k,pWP1Yà?3'PdW. DXt|\nvk5S|ƴBۍ{s{Hw%oYWZ^hu!,+s$(}y揟} 郤l oEƒN?sK_P-n0^FYm@~<DtT]p+X)mE Eh( OGFS\lڛ,tab^VY(SdyaxeȮ)ĂdNěa4uSdF_痈}{_DHP>G Q/f+=ew,=I#K/aN)7kkKZ_/C&Pexi`ʧG0 : Ġ?It{Iy %eY$p vTOi$]#<p/'b,5dC`w! y7H>g:}^!$;H顨v8\\kFpظp"mADRZ1Ne6vITVp,*h_V+~଄ۻkmܕ%/A  58)9dJ(Ђ>aGfTNii^M v噅Sk"TC]6ƿɡ5Tq(o;3RDQ*_y"-蕗mo[Z bw {G咿CyNQfGtZ WȟtK"lgtgDkt,-p-# xk"[ \UFme>BneU0Ԭ|]йxI4(#wX[2n`͆}w^/{e_=nX֤G# >xoovju.;(BE+Ut endstream endobj 70 0 obj << /Length1 1614 /Length2 13308 /Length3 0 /Length 14132 /Filter /FlateDecode >> stream xڭwUT]ۖ-ٸn}㲃[p],[n[U>Vkk飏m-JR%UFaS{c#+ @]嫒) OI)4r3rMb@˗/Q{_B|d:Z>^\6 [:Q8Yf6@$FRA :m,Mr&@;G -`>FG# fbKG v&6ΦM`aSwtr4q9>*I_-?{HS{Zu2s8ݜe Z:l?j,hig/  Oq;`1cgeiQ"mgf`eO gh?HٸLf N%4;D?Dyw!=-lc`d ; ; oF6SG~"d1a;1XXatt*Y:X̌l>]`ciPAYYXͧfaibm9ڙ;&ά"".FoԿ>wRs}PNMs q>|a?T_ky#'K7G5ϿVz#ngbo^Qu23^im'@~yބ7*%=թ'{`LLr T\V[i*~}'C?ԅmCݙ<%NN۝N~ߟY)k9M(m.1eh6v?.yw doh_`'TGw}Ã賢(ypI nL>=pS t#PQ  }a_R'AOG7.>#ǫ;@^Ud;)zZnb-_4`),ieO>&)n~:a%+a|J o~S|GNR |ӭr@Ox} 1طl\[ >a̷VhMS˵4mlNzfݵTKR< VuǼ/zc% b.!7oObC*ya*Iֈ#\QbUyO=8Y#^ɪji"L6L޻ydkvGY$T#<}Xj2967h-U}tqoJzrQxqnЫȌzHjYŴ$!}4)ZnM#Ns@|xp!-pB҆ C 7E׺l%"RVb`0?ur9R2#rӹFvJQ?W¦XDWV*}PMVI=% A*FzCF-m@;=kUSf9Fpn#z#YJ"HA_:_)Uʄ-cct#TCP憠Kb~~( ,HD޾1YX/O}"P= )/*_Wr}0ߖÚ<i{Eb#`䙦lrJ!/44mI{?̅)k;N{ M~0.X<\sjSI[C,Ij<\dԪ(P sqʵIN|OYEЈX)ѵ/ W&F&zŕ[q 庇PO>,Wb nexW+ |c`Bc ESVeQ>|n36Jsr̨Q-q}M21)jIW8hFBy "PO޶ .<:5midق[Go }q`S[Uَ;dYQvL;~ _+N:/vp,#V;l@%V7/{g+ھ?Z>)1!ޭ| 5yϴy^ g]MRt]TYS-iމ# ,E:Fe+!\<SD Q3Ϻl8H{ C?LRG9EY}$]x)ҋ!7|a4V.$0^t iAӥ!sg~:I|O&؀DdoHeRZ, Y Dh^8Ptd3p/e /sᮿjê>wV"բZA3{(52 NWOP1qKO2_~O{fh-q\O'FI7"to1c~)*x@jWHT 2N|i;Q"phK rX. wi5B g/*x7n.aN>3ЙIXKvC.q+yg7O!>ubȔE ]B}SӜ9*gT]ߵWBjDv:ot'Bq5(u ?<ŧ& .T}Uzmj?&K(i$Qbor/dV ߄~G6)[YX:P|۷/.Wܱ[ж';g4XRgpڎ!jCL-5"1~ ,1-hSk3NuIrqv-g4]~AUvo}:;]G*7J[ $>r}hW^P#(b^}!.mJֆ-'w ZWd /%IW~5s&X59y%Wz憤Jwr劉)pC2ʑ ̢832vHq* g#Ἵ+T:[óQ 0iޥґְ=u7edokr[D4tmKe%"=[(to*vm\BJXrŲ 8˛D`%gL؉4-&b A_DIp=`)&~ir)i|HQkw{^=ѠxXPFcw*L?U@qv]刭!k`v#GIA͈,|#O8lE~>>ڶ HíZ#mD3G(!w:0 `d.QO'dl-kodҪ.YǤQ0>RTt9Ț!ɲ/{Tb*eJ2FRkd6鹮Xv4DɚBxDnuʇ Kl i\XK0.NBނ ,*$y" E )BϏHWzتּ N}ss"EEQLrkS=83UnR@{Ew hp֢lk]rKPx9x-Ϝ&dR|w_jg8o tX(I,'YIxBtrnbn:RZN;iuc&[Ԧ) Qk9#Le;> a뷻zbF2{3T.LWAyq(<=q*g|oIGtA6R /2tLYbKx0j3;|,~vbF+ qn\1K'33)ʨ$hB6㽽lZ8G ҭ $LJ fGBd{jcxM/f)-~\}"."M%28csĆzkS%0N7iz$}%'9qopnO4^:n[@d4y|be/4sPԑ 5|چI.Y"6 GL і9 VD:ݱ;\Ny\$Ds^;6īвũGJUXڋ$3P=+hܹˁe5sR<a͢\սY'J6Y@!팽 Y,Ha4h=Xf!jpS8GںBQg .1'S2UK9r[gfU5Bf |8"=s&\#V;njpF:Xɻpc8J$zd./f 8~K7Ru e&ӇMAqU\rV:]_Ȟ6ӱՅ'K#gEc&,b)ДJ ]$-\W;p оK$ۻ.$SOD;M1|LxDcKڹP<mslXb& cuY%C mnҗ>h CśJ0z]qܾ;2EjJs=.wϷ2 73EKvcOFռX&ڂ_i up[;lYXJxAf0 nR0z' p.̭kN!r &xn<ʛlK~y{e:mX]?F5_ Cό- D;]qH$)z@Ԁ=**e ~ϯ WäSO`B C&.w ]oMVwB4+z: x0m{sxN8;ogEXʼlGQhC^y@|&4&a~J 36u2f2%Y?; )>af֋4'ħYsjg.vhS4+$ +{Ϛѐ*QlDqZI5fAxYcq:)cnK&%fYJM hT/!1B}pZ!7H:$,UHBDVARfQ$-K"*P1?LV>=>miyҢ.Nw](1:B) &A6y, kVA-[򉗜az)#[X%5{2)dYޟGEZ7蟀im98$lq6^ܷ jjgcJ y.;[Ai)H"L&*)aܿ[*LDzTB oh=9HV kƠy0K%lN&8]In)3z$&/4C 8ߒ]wXnRӣ v/+iS!Z1}]B6|`EFyZe4掾~:k*ezy ,Ytv I#xd*?R /k@7td)L>X}Ŭ9SuYg.?0&~#z.OP}s4mQ^m eK:c wqW4hO7>8ZeAilO\CTTשFato6B$K{VAǝb@ y&,&lYNØH>%|a{jچ=x!㧬5tZ~ p;j<]̰=^w2 /LjPmΒGNEh..)XSig-cH-»lamXY̼Y5$d+`LQi9'`ant>a+k"~VISWKkp5exࡊ'~e2xn]ڞ&3G. "h+ƨ ml,䧫-;vH#5?v[{p4t{VEE]ht&jǪ:reFP G9w1G[Iyb^nNq1} ".~k1Ow3[gyOwGvoxe94NFO,zpשhz} ™BYM6T#CGr-TҪ&[Ε*]yw#!>eqwDizyY=ƲJ?^gZ)Ub2ˈR8(Y!_H l\"X;(? 7+~):-xFur Fg /jW-ʞZ7"0^n~lKG!~)xK0O=-18&>Jj;,sT'3Xd\qn*LsqFmS!Zu3nW`$c+bTktQHəJ_g3'DP6[4f}זZk_Ƞ) ْ+?$  ?+idLmΤ?)/9Z! ~m:ԕ ==cs\4;kt8,YRԃsJX˧s hPB>J:/ 6IbN~=P\aU"a8Wu|PZ_u ˘zHرTmsIb,Y !N?ܧeW.5iǟw߉>ppd% Ռoj7tXTL9As/S?:o;8:`Nok~{a5MЬE8D>/V\ƭ\ڥz;<;: Oe45~u = kyTsͧϑ2f)R0h$IÓl}}&O'Wa9v=sQd}#Ѻ'bˌuxmpP/b%X4S~PQcOD27gPsj? U~ `PN (7㖇;3yrPnBy[sdHY+3IAHyTS>;}*<f}| ES Brjd'zfewDP-zPi҅\⻔ihSK QMz'fduXv䀞5Zಔ?ǫaҏY3TYHcf oj/ri#AU|D2bAK3g"e >[L 3uқ +$-.<2UET-A`E6(}ʠ4A:Љ\{h1cT)9G 4JuD6|2>Tbzq9>՚_MJz]J x%? S/g7X Y7J &M׏bј> stream xڬzePeϓ%oݝ;;4K4|ڝ/Vfɓyފx$* Bfv. ,<5e E##S,௙Bho'jb03XY,p{O' / Z {Lg_*ffK39 %%/WHٙ9]m&Y `o)Ҝb 9f&2jpldxo.'/䧋}'{qSFy[eL=Ŗ9vJHl'2]LߞM25t' 8Ẃ\՞nsI=z]i&Hj$NmՉ9.9[$uCxI MQ"*^sod0FLjGw8gLOÂu@*[iրf$0Edr')I´R~3z@>,݊V֭ɇpNOZ0 ʩlNmeF/*J[qIz|%O r{L4 j">6e+Wr2MA}'jL*jaMy5K#*ij>dca2Rd[=FnMT;ÕCL5ґj4ڕY iCA)M7YPOXJrл䝮m{il+$ Z=eJ j p lݸ&gYf?FNlj ߋNF,ȅJw;O"&_SuX'OΤqɔ GW/μdwl\a+Tk,s829v;#y#4!q 2P8nO}Z_mn}qdYD Jھ;3wAH$P䶼CZץo`t l)ӛTXk G ٦2 x(^|k t` W3l_ooo] ڍ3nk!ZjtBr4[&MU\‚gի3d:c›Tt_*ak-ADOb7MzBꏻd̟I.X$ :wL-qTTͷi#)xNty6ЈlFS̃= v`GCFnd'EmżNrt-!qxfxU(o.soqHLqiIl/j[ƭ c Qˈ8FH>ː czm k~fBY֎cCqwSf,ƕ>H2I.J$2rO`T"mp13.^G/NΩ A&> d9wZK7Ll6٥nLn*K;/RxrïazY׾nJ\Y} ZϷڟ~905Xϫo)({ݰNI [kpGf%j;A祱J9;I*ys@|>T ,>o:v=:66#oɎjSb1ӌN-z!{/B@-Gp kU\SKŸ1vq0yGr r8/N ^Q:#y*ɆN[Bѷ4#`SL*\Q8.q֋<" AM0źb&ev| <}?mחa|&yL2>6eieː 9J(ާT'BzVt jJf%嵧E&ŏ뙯aZ^@G0ԋ{+})?";1 CkRPeRjBJPȽoU\l.qse?W=Y6&) ~ ~8>Q~^k.e/WGkn\yyEˌeswaD/onNMV9:,*xRPb!=\khC50Y S2d`avAXrN ¬Dq )AkQUʔNBeK+)b-*ljy ^^;xa6,ԦCȿgr6 gڤ Bn<1hxqq&亨#7h xqK:@, C0%՗KpA7#v A#z2|d[En0}N)ZKبK.2ioGt ,JwV>5)cA+Ol?e}יwBg _ ICFژoc37Am@SrųZȊq}!ͻJͿ;gZKw#"s}>8rEXGQ!У & ^b\4Mc]>h .cv!a@ao1XAYj ~c,MbAKIFϓ]%oGt{f İ2D ?-: $nku>l oxV_LKD,7p6ǰ *.#NS ^nhpWw܏Gg15gV)Bw-Eq犀uЍZӱ#E׀eD q/TbkI DŽL=L]75{r@8;P׻1>|meELDNRJ(BWz]<#Zi f&YzX#bo 1O(H2!ú_ǻz,~,pz}jV?*Iq!nV$(.x/LĨ LD,N)%*=ThZq»SO k'^ѵ=,$i{J zn`+gOpOWki/0M9kR#ߐ#8ED=ɬcgX߮QMyBRɐ^.[=;¶-Zu8+!Z*9. aZ\.%9~8ZI1۪XbRFl$ q oP^z`\u <{2HHƶ._r%kXt\9SUQmxĈ e|p@-?ͱTSCU"7&׎Ik6 CO LW='D%jp"_)I͸'ML4EQݞĉ&pǬucCi vZw{t[_ZS0wUhp ufq%=Jn)|)Bd$Zi6ƟMo({M@/~j%omt%pj& \;#7 @ QPKRzuc 뽹+?5k8g%R{~:%:RD)͏jDȄ>E=֗ɱIoZ;rEQW`|IeU1ܜY\D.$·ЍbeVA~'A3n9 A4r-=xg^S](~zk$8;kB Ӱ#L|ip}M'#wUVY2O<ܣ*f%Q>X'MOĥjT́uM[N>E,(:-&3.V FU/X&Z$Q_!feхQaTHWn 0=YM$jgW}#ISz:|.C$cB@ lhZX_4{]4#XMz\KF%u&@fq]@D@<!v*MiC/,9Ly(6ݦ~یh_֌SB)HrIQ5ݜ<ƂZcTN(`S 7Kㇵk|s؆Acψ~͞?Zk`^ϲhAw6+ʾ>B<@v4kv9~Q^8XxԔL.-HTL"do /r`ϦD&M?ѩd Ԃ/IA/8<)+ݏ]m{sLWuQUe ŏdg4~ Ǫ wF|wČ(a͏e9;QiaU?>R@5_(z6^,Psvy]>uDu%l"6bݔŕ{hϡfw8?h'I˥< סR(2SD$1[s8)BGKx<0mB$66x}cXA/eJYWQYNpa-w2V6J6()[㚪,]=$N|g,A!e;^8^vl2X׋k?imŖSȆ݁hO⊧O_`!JPEq(3JY{_Da!ۅ؁y۳gBsI?0WŤ=5LPN&K7ɸ7*,+ ^"un]J͎61aFpDz)P4` ~K )RvUFjU H loK1;nyxJ/9:] _dX ^B@h< ^ޅLH`sjyY\j'a_8^ˈu&E ⪒Q;("F4LK1] A7?nagךd,ݕ70̒ ?9b@S4~I9ϰz&,sDB5Lb|nė~g՛ G$qbP 6>=:&Zj* A(Xwȇi0~f׏nS^gJc\厐y^f 3F x+ж3S]rY”""> %/-Sc3OE^ډXo|~[N(b$] ٞFNi7lPEaHWθ~Us-bMCmOI{OkIkߓƚN1UT)Bz [Xv#vs71# ei \YVl܇fNF]Y+y4tcF͠:@fnSI'sd`TE>#,-0JJ|#OvHn 2R6<)TFԡG]Ԯow鹕Sɭ}x+l=;x@%C4& `qj1{dpfŸo_6¤~/<9z'jHzkz+SءyP'LtM5Ak@{+MgI%rq#`{!ٞLer+<^,VrȵyByJoN*vr̀CU!)ͧ548OgazES'XRTFDA0!ۆ6hܕ%2<+mY–Fk+v{ƻUd AY%%=Cݻ _tSɛ}u,h#q)@! ,{:+qڈt}_FKoNs*̮941'H {f݁.VwT" .0 సQb~դsUAi<|^Bn@-mw*v4YB- 52Rm0hVE,w LQK*3/ǠNʅxp`x>XW՜ R~pģ "|)*)#xKbFj<$e}rOLE_xN}Βb}|:?1]teT#  g'7u^ȿhOHX1U̇$f+r`ؘQbA`< ؚmwd['XgrļGTvg.2U-Z[-Y])M%/O@,,1Y*}2ghE_?P$?;[s"FOaLyVC;e)ĂZi`dL}dhH8-p֍|}2B "9 嘌 ~f;+Es߹gsdd@9v@.H7C=Lͨ"0y.5Kd n?B*P R PUC:pL|Cb=wiD] yeyc4P{_tWy@8Ix td.+9w 7/|?mpa3C(L/^5. we.w.Qn-;3zuS19V# S^ e3V7TGcIiEQ*cc / p >]K-.0޼ h+,z9Q +\bC/izK}L [P4JRGRUNVH|̐lE$Y,4ox" ͦVI ý免l;+Guw#!?KNDH~.oXbgRhu*Yl:4k%rh fשQ;jƉ)3&o0Z!& ,3jTSg;Z&3^}sՓEsJ 汅Z:zRs bgZ[xT fdԝ:pOr%~}Q!Q |0_Tiu8(I tތ=KEp^-fQnXsT ~|[k`]rdhl~OHn_dW1*{InWp]=e"8,qE\ :(ѪH]`kҺ4K1NO{ αȰ1~ˏ8\cc5Ą-mY9XjIås]1}Qwp%Re^bwf˂c0ȧَG-89S@螸2ZzuU(JFm{T ilH uf*@ 6Jwú.kj6͇ 5QkEQa\#=1}NنZ!GtԈ=6ԡ@{&r5 ÀI7QAг, Pg.66/袪iM=+k ,9޼Ǯzp$4Z ҉vP)wGmk୶K]#ݜ XsQyG鐆8%H͜eB6At (:*\ 5(4XszMݽ7id|{n"Dk3^EagLM Q/ WEA"@>f('+ԎMɊZxm΄O#b/{OzǍ`+4w$' ?o$*M2YOgvT^ } P-t Ls&OjB'or@Ή}&HAA 2$uΑpaOf@"h pV9{]5!.<)LXä*k )!z؊ uXs?jqC5\Է9HuJJr|`)HOUcoB/}Dw—=R H)\ق'qdX.z}x3DMn,%3SKM (]Jiuq|n@Dѕr@<AplO#GO Nv;JrGLRG9͒c~8s+Ynr97!*I37+Y(ӣS"ā% S!H*<4pHU8eɭ( юNӭh[}UtHPQΨNP!%E;x8TOوXm*$jQ,3 -bo?x7g B+$q:V˸$nU&*Sc_XM~tlj5a&i_%1 w$P"=֨P8^Mg #xG~ SOz:r9()\^׿BU}Pc?xyOQNlaݐ.VZ %C \-:xj__!iU n[npyH*eɜw!YeA0A/g "_|lpۈ&ғaXYD{!n5Ur]fVpȻ^ہ@\1/E 0fM pn0vg].5). o/lz7Ǜ!2{ b*Vbr} SG/uRKB=!ZIƴ~c{&-~F<áOl?^Aw%&6f) X⬾t *џhK0IWy6=uk>P83iE PwRw^LPwhvsd4Ỷ % 2]֯Osi6/#/y'~GMƷ&V~LǓJxrr:VRzyWvGWUR\1X9RD\?~<1 W0 R)tWӵ[1A.Y[GZDG(ЗKBEgsAJ3w'WtT"PznpDƨ0qD^VaP|ãn19*Ft20ZHxz.Y,Ԑԃ+4ԻwdN41/&ExV_7B穾oGR:`; H}fbw\IK2iH3(:>%v_zLwʑ{|ߜnM%ITseTA0Φ$@zފ475ȬXQGzP{Xؕ)n;x~ѫ\bLC#Q%BWtfbc <, 7<#) 2n!eg2I\`3B 32h7˫9gv>Xa[$a/X`tHL tr"~g+WOװI W E+cDLI4tR!ޖ^҅\`b+ooFluW+W~A`/23uw8@ bnK((Ĭ]IAAӋհga6+[mz1cp u߃.Y g^Al#9W>A^_cQ?Yu6{#c4q B .x^i|=jP(rIS4#E}skX4ɰ-lÿW=3|$3p<&2Gt:B cT=E܂֫U-&_5:-ki>pM[JYZEޚWcPe5zxHJdJR~a}#`脛s,`@Ÿ!ZtMW:yUV>XVPWea%eM}dDH[e# 5G4: T_+?]З+ W#B.ik goҲhԤ]DYu`[8Dq|vO2HŽXi#,aщN^p) ]-uCm;uBߨ|yZA07Bn|:S2͌a[Mڂ ?oiM.uomN>Ny)@+S'hh.%Q8yY-+&& 7c5l&#K`}bUL:=8syP!f&VS7Q.i')$Q5h[/`ŎOt5.aX% oRC 7%*-EOyʲe>r8GFp8HFrWTh4S1?%J& rbRELj5#JQbWrR 97?$l,wG'4@P*AAsTvzn̾ۑgwP1=@bC>! F̙]H 'N9A1Xl/p^ ?bAHEG 1\x $?#Q~*^$ xp*!]&m^/7IJy.Fͨ_,n`6dDs\σ~N7P1AP{o Gu6":!2El"\J7wht|Kw; nCE2ias:B7:D,Ei8B} )75>⍩A6Ӌ8$Ĭ(,D;{htf Tkwp J\ 8*F#S?δnco@5{tG% w%NC(~UCр{KF _9N$ނj 2'Àd?[Ё?#u rb@`,vTrl䬾Ǥ; Rػ(v\Vʡk6XC8j1Qɍ$l|BNZI @O=ЙxZ ]]bx< sb*/S{: 'OxWU0STGJ,⚩ *zX *T¤އ|iگy-\Zo4+qbZtkEoҾ. dh(] 6fUgha[W5ͯ{=e4.Pv[< O|`Vs"%n'7l#;l>8}3ԣHD qJ\m/OYez'FEjGCWO 4"C\CT(z>s6Մkm6+&-'M#B ) # j,P Gp?b&)TIJX}J i}]E:= 8X2 ݭW$=4%#},& yFt=pomՍ&ޔZ51-WZؤ"cj/7w~ޯyWDzi;_Bq-?Y9Hyϓj`1UT3^c= /xX`XTcó2? Y^2^rp4q>8:ƚFԍ~qHEϑYy glUC!QZAkk( 8'Lh)'t|hB;`^*~aHds-ZzgELHEDA얆uQnDVNk!Ĥʃ9f<lG_dEM}g"wG^":n6e>%Sηe9xQSGq2s h~axU#K["qX[QԊiWch8*Qʥq6Э 5:kE{(Uvo߾S;ɱ~J!Y B mhu2T14<̪KޕP4i=YorY~sZ.p.E׷77'3eP~Ca0!jC_=b'>Y5bReD^n{9^BpcifIt:\7KҩLfX H rYۻRuÉ1XVdt/ZG0|^]L*BvdC H{OyvqfSRgGdžHJiTxȎw{h0@?r=q53Ҿ|tH΋z'xXXJ6EeNIJo =K1ܑq5zfS)'7Ufm9!EQ?r 5o®YBЋAX[DOmfQTt7onK0R(E"Vr::SBaF]q+l5 xTfsvD^b5wB/0N.rPƇ Yj_lEQL"Fny롊-av;Qn۱ Z p6_0(lvO$0)m@7b6"ɖMOj$NRx\W\LJX\XGW2EB{2P.I6iV"<ܵk0SdGWݑi(f(P=\1cF z[Me[LHT ETPba/ }~ ->\֫'&;*\~^&չRAp}/w԰̨T$糮(<#(bTS{MT<_*2?nuҘ 0jfA9>F-=`+"աNq񋵰hꨱ|duUw+^>9&)*(/KD>͝`>ɪ7MBx GObTb$@WrԐd0d}{n(>u#頩뜺]{BfR~a˟Z%Ma |,[1v9 hٙsy&\Bfd?Sc-0y # Ja-E"ʼnՙݓ"AdLZ~Va,>ּ"Phx< ߕZoߌ;MC|/$=9e#\$m@v6ԣ $K2G* `zEKB|/H¦2S ϸS Z- >\+e?$ wZi@!'ͻ3b?V_S9@GZ`<[& z8Jݲ_aZ@RxqI(JoVה<՜ٽgP&܅MsyjK CJA8̋*5C@5R@Qd뗞Afrڼ!d7>lYt) DHI}`ȶr.)܀,3 fQFvncE˲1NCFㄡx@\17qhW`5hE|a "0G*AIe"u9ڗ o R3Lo+9q=u6|ݭ E6 HX$a`R}_JW$ 'tR@z)yoP2ݯ b1W΁ɦ蔵/EncoFp ᨐ*˃ yBsHmc˰@o%%&])%)d{knݷmTR&E4lTWa$uv]K@Alu|Wh\ㆬ\V@F3AeC6#lvb?]yaT$)_`^fkP}mR3o{Wj8fk9)E(N IG;P-<>SpALôw ΟS61 9bWrDH65wm/*Q(e]N@X~`YJDVAjzmPw^q z^/.Ɔt "s9gw8S Hk]D4$aW, p*YbVE5]ѯyrH]urV;p2eXJ"ԙk<Sjt^z@P;UGuBX?J?GMxb~ 2p-i_k`ȫ %P+B6]81ޔ{'BCF)f9,Hr=JvhNPp%^FUǹ sM!ܔCjIr:Mb.20 F5罌#75wo'Gl8>g2&|&^!5g=N%lٵw;>=oNnx ٯסyl԰7o8=ü@ZXmѧΆ+7b7 P`=u93RWoߩQNTBTAS ]KI|Z%YnLpsmm>ԣhv_2Z&¬ߚ0ϝU>QUgy[5/yF^F"AG-2]28v{.nPv7!|Ep}`jCdIAN '3 Di87V%u#'*_VC~?\([N~|xwmlAًH\Gkih-6Q~F@sE2P H QeX #KbfW.TQؘ#p@Ep[jitLVxeۤ۝ұL!܎+ ܶQ'Ie- 9o]euE˵E>hYH> m˯SKoIY7T6`z(8\s0Sޓ9P/ endstream endobj 74 0 obj << /Length1 1630 /Length2 11468 /Length3 0 /Length 12314 /Filter /FlateDecode >> stream xڭweX]]%\sw v;w   yy_3]jUZse:A[#=#7@UI]d+Cdkm0! ; @6"N@n: 403¶v 3s'*u2P|l6Nρ@ 0Y r rq ld2Ȁ6@*_ ?G;1# f  GǏw``h'[ovS s>l@vN "bonG 545vdp9e ?r9)dc_f&V@G}YmrrZ#014vmA`;*6&Ms:ӠgC[+w A#%s"H?"F!=N-le%gh 'w @ AV{UK#d A9Y9@n@9WUmLV @o9omLABKBBXW锭>K_ nt겶&'$dcbg1s|@f&+"Z:9ڌLso46ƶ&gGc6vvpPcg@71o[oAiN89c"ڽLvu*?*m|S6 ^'Z^w(yޤT=km4{ zi'Qs2PZj{[cJzE/0Dm,pT~.~dwv(>)Xh` O(( w%Ɏ'ft% Ʌ-;Cϖ4ZR /SPQuejv% 9Jܯ3=cLBC˟v/>ހw`yU.Q{bsFin,-VBYpZfB9Zጏk->|BE 5#_ΞlsĈr$qua ZǦ&sC{9b(Vva;PA(Wq<}YO(7TS)Un8V͆tNwH*$qlFV-htt`r*ʏku((ȹ @)P@% Wu?mQkj] Ql25Ǐ7p$JW[\}n\r`򻭪`ͦK$@0#b΅Ft5KPq/F:1{֢3:HKb0}\iՙgc#6lXDݽQ~MckZadIIǎw{~}] 1-sms(wZXy^7-Imiɩ/J:l+-UXfeO` s)Dw^.p@^ r*_|Zb[UPSaWXsU@Ar%gXyievRK<-_¯FckUU[Ўhr&7d+7kf ,0iŘQ`㮎IrZ!`Y$Hk) _Ώ+0ܞya7Z+a٭۴΁;)O|ϩe`=赸1SZ(8VT0UV'p)X&o6LD<^w~wzzcUr jn0DPm$:f{zVW1weR؟-5`;p-W&9Q}E ˩SS-oV%: XҼĝ6S^7_g%"Lvj-1OrtENOp2ϳ'̞`#Jc/1qO8A &$wrrpmP*@@Lr N%|…viRLfľd5m^~Um{t'&LfM &yFT$7N\VýqcNP_=dc^7]nwHX^i`2aQge{덹. ƶ(ʹ>2A4R;&h1TO.f{prc1N zaقA³Zs a Xwӌ8ɖsׂzmի& fpl@D{pNk47, iNwMi!.lLNqsRܝ,730a=3Ar(O':Xg!r3_si_Q z~/wlrU#`h[DmQ5&39|`:ʦt~v[́ș+$N9U3"2VZmWԡ?(E-SBv?CkP7H"ΓU|(m>EAU ƽv?8N C1w&1~x:YQcQMT <8ƴYLcbmht.6xeP2Rॠ.c RFF#jȱL$єnKjT}0M5-oPc†P%edS>3?I3!BG [g6i:[jeXFGiB1&12hpedNt[Аnqr3ҥ=1釽*Yr􂱇JG~Pt/lͼs'[FF2vص.?~m0ї)@rc4q su?~<$}ɼ_ (1 H^@D?h?2xY;G׏<ΉvC~`[rqEg+ dK d9\w>Q~$rI-;/Neb36Qƚ:I,e+H[Hs3$G"d(h?1Ȳ>KMΩUpjiJaKV0]3S}W˛QVicrPi<9UkrRMoj|&σA#?3l$cC07Q4r=>{Qlş>}~GWD5'Kξp@юCD徊FGz@D_}''KJZ oi1+0\N@[2' FkˑuFCx%_<ڑd8sz/v]SjվѪs:hȂ2 bPb]*y]xu9Ctc_(сGSNGn&UCcQR^0Dr;~~ i&`Y`Z$p թX@$1Oc3 ǨmT*7^Iډxw,y`E 2QXT$j:JN=vT foSH Wȭ JUɒyCtK2A]W8c~;=r_eEFOT Н)IP26ƺ-:Z=xM}VOF'Za 0OtVQ5т@+,w/nZ\ԊYۡ r\+񅾪e[x aV3֚ :HUVj26* u:“φU̚etCP,J8W ^F4cj l/pM(R2/hdCfa/C؍6rjm q:eQ+7j Z,}J6s6(gzo#mWk=BOڃHߴA\-I\crZN=}D9wh7׿.5Y,[:3c/r[aS+[L-b_ǻ4c")SzJ##;|azppm:ʑb+ga.ϱpwy6pY o'A*>*[8ƈ'2I#ҟ,[ӓXA.IZ^ \V+jNucAگ!*Ʈ?pp '$BF5Y qRM_5'#Z0n8)EHy݊%{ ]CųpoH%̷Q^sH@?VQAm K:,̲q* L[f]82Cʇ$W̷cmK&~ nOO{w#[6󔬍t8GxHT.oDN# TqUl4GۚD{k DP:$Q#|L!36>,rum @{D>uk( ?3#( 'Tٙbx!/L s6,x+(I{TQ"M9ප656+XBz> vܧEt!8d^OqbCY @*D.bnf2q] )Q/DD@4Z~PcQBȞTdwgtɉ.C|S?Wft8YEBXqW#8G ,o^%hH3'^߻=w>%gW7m NɆE%CJ7][p}<R^_~t KҴwA7s@RFH_p:^%nB2^nj.#;r5I)&6hxTgS3X^F6MR&vsKvTa"G 1D@g9 s%L?)&6l X˃+*O{Y hD{9A @$\yk5<^F GWwllֈԥ&#!;FBZ??`|;L$וUV2-JohD_Sؓ zRQ>4f~VtEžvᰣu]ǀ'C|Z$K)i$Η!{U[8>RU<-s?3z{_-^|J1̿WgvB*^b)&[xׂ3Ec21BGӣ#Uՠ}$CĻ{R%-q+߱0Hh@8)jb1EPcxQiAuA:/>#otz@KƏk;Pfʹql53RT6HcPX{e?bF6ΕQ"skMӾUǾ?/Benت4^F FhX[t'wS"gDa c\N1(@ xRI"WpU]*0h{ ԯMYXΤuW9 ߷n0]ih"DϤ3F7z"ud;0jF 2񥈵T0|yL!b12mߌl)z+GgA2BX4NwreNK UȚs=.㪠Nv_bԔ}gϻBFyX"KTJrEROg=2z&H3d:srgPW23P⻌z-{%NFLJ^쓪iY0#Q?̘zq9?V uw!5$FZ-0($[V^ne {y3[+)2Z* 0.$il@W4:D l6s<KoWQVuVHn;"caFPF(F.Wߊv Gs1a?qfwbWM6>*Ω}pc_a߶DHW9VL/RۦxT ib#DwjN;gF(qx l6<.$&uer3-hoJ˝eP "ϭC.L=Rik/9DNf |o,SY"=&_0鲥U-vynª>U!e)qt01n(#d3 +NkunJͳeUMa\P+IaO1r(Kx2%櫟Akqj\x}(928t$8tǫ49%~(~0KGET]β'(P w7 g;q|+agS`/ڒFw *BƷil:ۣz>q3/JU3 a=xv>CiY&J +Hn*clؗY Hp^y$Woꑴ4}J7BhJxL*ȿv셲 ܨSGyRb jŘiKк_&D|r-Iۓ4Jl{j9\F ;2KHG@}Q8^g6׷ .֓)k>dzIX!}d>ᤋVvXxbCMQCIFr_鉸,= +lb~h|!`SGLE`Q+K_/׵`]hSW>?v!.G_[6E QD{'O<UJkL=0T|]$'%ۃoW^rդsiEÕ]滍Α޹DŽM|4?<ĈGy[)- lGM LR$kr3pX UkɁbrna H 59:8ݹN|"/2oP̎/U1WRP~4f|2ql?B^ +2XL%vO6C엸"#)#;:Pn/0&:{o["Xf͊g $7++ PSN!K<[0}8ah*s!M4aXeˆZTy3Y2E~3oZ~@_q$>I?a~[/ ҔDސ/KԜ[B5A1IBbSK4J?ۨ7C?O v{(m(46:^J! tWxDU"Bm{^rL8(.1K$RYg.358/,nʈP(*l ^]Zytfd[2VڊҨvlTxLx摲",͌/~1|}diJɏ^^3F?rV]sϔ|k-n)q0*Q~4D.zA3\ǛړQS$Gԇf"A"綰!,#YP޸,VOH\߻FFwS4Z#q s7 ț6ߋ vTOfx n?'HsVPt`;R{*X%H!` _B\i ef'F7N e^=ZXˊN@5EALev`& y ;mzpz1쑮9˾G%SjYQ%&a-dUeP~~ k/ƫ fƱ0ס7HQZ|WCky+&IrUWjVUn9K M2B_Iْ߀TiFpm7af?iwOjaj@"jd[c+ixߎߊ0~[9?rĽpStp9#QA;joZ֖Ti HZNW>u»UlQbQ/X1ӃWtG%+%(MWu.<60 I*(< q@㣠ny;:c{v%>C9ۣ݊poq,c4q3A.ƕMSZV }sV(parcKXr qޖ~:y^iNJ@XQL{^IEd3.ibu*z\ĕTfk6<3CAY\=QS"%AB#-Jy' 8<z6_u'Y g&>gաFJLR$+.៳(cQOIiH^qdgCX.FJoi ㅰz&2}F@ЊUP6F[:xh%x$|i'F7UHߌjW; A*]C8SY&)+mQ'XYB'iQMX@p4Ҋ3O0ZoabIf: d%\Ej3~ 3ߢ;)_N4/ePy6ہm q:C"KXEDߖ!$ FkAMGq"0SrciorNxUx_S8n!:2}z/_#SQ"SiuD{R-RxlS&U|N 9!l&ic NNa{f4=Y:wu6!SjJ^iIqvBS G{0]73A!/XɰX?(2RoL~Ν@D;B"$议k.Ql LHDAqR*' SaPqc1@w͕UznGq#/uF4 endstream endobj 76 0 obj << /Length1 1620 /Length2 12546 /Length3 0 /Length 13375 /Filter /FlateDecode >> stream xڭwcxuulb۶mwX͎mNұm۶ٱ/{3g3kf~T]ϽkcH im b6Nt \U%u++cs[ZI'+Thdnk#b"@# lk`nj䠤/?HGsS hI?TNf@ ,))'SmM(8Zd̍6@J_9}r : v@#0/`t6wt|;L l>gd 01r6O9~zXbd NFvNϬ "b܎0>i>Q'sG\@gO2;pv41W4Ohۿs'G  #gN#ܦ60]I[#?v\;CY;hC/g@?SNO?'kN-le%g` ;w @K迸X[wGJ@Sg+A/hc-'?bn@cs'#3m=N-#ÿa*fF6 5e{o?OT퀀D]? ٺIhDml1N6Ɵ? FMu݀F0 F܁iNU9c"=]Av%*>[eAtu\'vo{RTC]V<\ob|uVv}%piꑞs2_BM2;@]>P0JAkCA*89%K8z'ڽCMm#0] ķT׈rmL+M&Q^q?bMjgFT+yiN)mS$뷷@0[LLJo[ AN{a CzT"oyw p਷.B|\)Ύ0ew[nΰ`[;w WGLɞ-}u L2h7!2TAdeC LR龯l8kmV!%WݫUhmCq0PnBS He ^' ŨLY5ĎWG^ǪrW kOU6 |m}C6Qaeߎ4j+xP ՉRA_xGNƴ'/ FUOI*D{Uh[v'?qm'o$ްLK{ PM? M+qQBͻ3)6'@ c 6,'jH5Qb3@ LzL_j֜x5O÷ޅTokd7]nLA{`\sb M&(Z  O1׵DWI r%!:xQQVEi9nEJr06m p#JI Vܟ)ٌVT FnKCb .5|d5o&"!A%`ôfNmOJQpMQ[ a GJ59C9lT ; ^Ak+Us㌅ 3/7KGطMub\YY.D=J|Oǩkꬍ"{?gp;՘ %Sa?/f}ʤ7Q03B)?_&mƎ*!"*)_( 3;ekfkNn5ں!l!q١d!IzV r^7B٦B5@]YҎ捻VչXDz%tFœ;eW˥S#f~]ͳGL'ι္ʸ6- s-WRPp q”eOr{yٟ~x 9`Up`wÂYCPŀE1sɖ߉VS 1Zd`7,/bqmm꫶V &΂`A0߁P -_T:.Phj<:}eS8 %d]nW.5n+ ,|)F'keY{qڬ.;AqUQA Ee2oiE"scI׼صB1n"W(5 {R9E("U3O٩ ))1Cדe+T]݀ 1 a#`l7:ӗ$Pl\=؊y%K K`85=,EGl" qW`Wj |UhT8`A->pt`Lhbaw&Qi{Yu1V\o @7!͜őH:իR|xLۖbPLύ`##& u^s r`Sa`R )n !f-Apb6sNWtא}:'Q@y*tsjѸ;Pc3pzjis~$Ryf/Ppduo.S}p%`XUQF1Z/ExJxF- 0͖~oGbJ`zqI! os0XסM2X,G}#De3RJ鷫%-Ӕ^fx\LS9g  =ξ5,$ҫ%'TC "$ kqq/_hFC<$o 8P@;Y?@#TU-Gį/ϾZ9)=& kxYaPPZ,nh>zJQA!v71]\P¥0#᦮̨dp[Xjyp/&1j1_Ѱ#1QJdnxspO܆7$=] uMHpBuw՜R"Y4_mxHh[N`Fp֜Uqnq/I|V/8T6Kydw/`k5&5@$_2Qmñ,Zmŀ%뼄!-b`qjPR$O1{<7{}=R{<-7=jWT R yD mi B͡^90qu,oC bU1Yi Ą³ \/>x~Ik<=ߘjf :'8nL/| #}FEiX_0zԨi!$΄!¤&}8"?u;Eq`K8uw2L{O2ø|ǩcۡRpAQs1LӓCn=(9SB{]nWV~o,jҩDV>1坚U;~apMX94w6O3㊫Au"X oT5Oc ͏0hCԢ܊1kΖeߧlUʧ}B9~)NX9>RҸ!V* D`9sup y, :C.섯žۻxB4."(p{!K nfrR;&^H#%UY ~= MY䘔Cg+E#!onfqrw7;<Ƃ]Q^$Үʮ__^L%W2=)YNZ9 HvkIQ 7Ԙ-^6S= R,i'6%J:bvy H3|qiw}ٓ 0--AH`XTOxX8P:Mb˗:h對Ma(K/ =MJNM_F&朦Z;$v¤@ʃ:bD1%>m].΅ᇖ:l K$x}W?؆uE" `d%xK0y&&>  y_*jEᮂ &]*)u YdI:_;XDR0sfim8/Opq~dVd! fl `ErzW%>nNd\^OTIkU^ "u#.Xo+iYt^o:"#0Fۨs2.77;B=Q.l{u"F _,Iw4JI5_t LB32V1L\ǠPzz(){S {o6Uŗ|ҵaˌW)]4q }u44*٥&5=(~ZvT3K5c LWt#H0甿OMbh`eCDk!qKTEiͳH⎖D?/ʯetnؔ"tY4 ͜567cBPϘcͼS*O{/AtY]߯3ɏ)?)oX\6P}IM,ofO"WNE=Z(`2&d" ~ ^,DUHZKH>gh۟>E"rKɛO ` >*O%-n "I lNwH|aN v d\Ft~]->ts3%`]#W޶8 %y !f9`,10nd Hk%-S9Wiqc*ޮ'TL~,0#DK{r 4ە(Bl9UC-ȆD_3QB a_}%Tw+Z]׻/V!0[Yg*j -WƋ/bM}h|.Ɲm! }CP]NTH82:K@I40vՖ<-)YOҢғ RK MhTV@yZ#$,<>[ݜoN_F93D5W"jD<^HQH[An؍*q$6J>W0گdZtIjW 0ENp DzmXVm= ޙnp-r[(gR!HAO޼z\^dYdEE-fR^Sӫ#9 6{pstU\e]S7ebhs( SZ`MP&x~ۢL,#iOZD12B#,Ũ_Ae[942o/|3}pH(OYpxVGZKUkc(Ixтds$w)f%}yW 6-\T1RGx#%Z.>X7.m;; f6;KOiq-J "um&FFʢw|$tqZzHa):k.9|Gd/;2QCTyOXN8Wx[[䧣o^ͯNv6h>nGL (K[4ejk@++ 2!lDRܫmO-&w l5NgeE=ۣh{5gRN{ILuf6,/G[NJz;g*,x=M7ys핣I̴9L㥖XQC2uAt9M))%'S?=&^R@22BxՌ111y;͑6xU1_]HƎ_0̐o_l"ly1VL~۹e;WቋeiXQ79ktڻ$z>TiI$xVJ%f۶xU_0n 3~I$g(D/ hݭ }予Pݒsi6~aN<"$('Ć 2WuGIQIB&ઔ[J_$#IʞlyD 6T #p>cN'0)B,RfEev5XzT]w%<"/fͷj.?鴴DQT$O;  xi;H)M{yi`7z5 ;M:13v.PWkPIg^0oYS[G)L`f oN?Hi)*["ل(v R8 e aޣz 7vxRIrQygi-&37CHwViV=kQ(8BX};?ë9p]pxI(9`Sb?'9֗r]@;(9vgyӷ#r"mH`~IK5!%qu~+yЭCbם\HG sِy acD❞m GųMN.QP36cSHp 'BUpvZΛi[J/YC=7Ad[ ;a@/kac+VE+NvKn ~ T7>X#aD]gXK Z9-ӹ udž?B(M_.ő)z{:r4j7hqrt1:8>/y )hdAA48Lfg207 I |ÃOB|@AoTJDȡsL>uJB Ӡ'%f[?YU$}١mADIXde,F޿qwKJm*pAm~~)hQ ghen-Yr /ܻDz RFhF_2=fK܊ܕd88L3(߬/O<[\ƠڃxbZP|JMI-  * Vv-w+I^ՠRi&ŜHA "k1r!SX?ݢX gt2q'QoеhEËvZXky}+V=J@ A?->^HGݶ1۠=u x4(`U}Z&m}+~Z̟ܤśzdX@vMEY/k/$5wqrQHH\qPg]V]0 l{sp%':UOY!#N̤|7AB0#ýdA .?oykr'r q!~5s0/x;;8ʵ{U{(\ dS.J V4DDaIj$=UeJq(u@1$y Shzӷstٟ5h"$no)^ yXfvBz,|k5bY"^؋o5=e"؟&'w1B{swqJCnx3}Q1AT̘j*قg,)3KXѝ/`_L~g*~xppIDX<6'2}?~[VsP滅1zD/J0~6a.8K$erZ0Nz9zІӔAHT D 2/aBm$`mIPe4xᕛcv"m!誻/- oqݐOi0|7Igmg9@~ Ig`fn+`Z L:ǻ; {@ ixJOv1HЮC%&zSg4rt]T[n 7T4o(3 pFh.9C`+dKpUy@(ZEvғ}X{]4&<&IBзE;#n;"KK{T|Ljbc̖Foc4;N ZdɑbD,8b_N1M%? TUy׿ܐ ^֏/i#7m ,5 UEzdߖ`acPˊe.S|;x${Ȅ]Pձ Ԋ]74 0w)$Q?+-IDJSxC\Bnˀ+ua; yFrNa>?<7iWHj{b|KT.w"N﬽5񞀦`ӅS[YzżL5kg"%Ktz`WҼ":GjFH4-Y_̑=)/іbaFÝӏ#~ ܭU w GbTRد{S&s n3(؃67Lfcr#Y3iݸ}k_h`hLJ֚ +F=!'!KnnI-} 8 PsBa7x# : qPL ק1m Qږ)ȸewSWWN r53[ѹn+%Xr,&Tq%o' Sow*} LagLSz9MXt6 w/SO]H}ߧ_ҁ7xV MkM N=m~;$!$NN+4zr *vNLR]6AE.#Q/1ޝBѨ6 tvvqc4cwӌL{L|YMWo$lvf&t%5"e R. _ɼj0MOE19>-D:/.m~1SNP*`Hjnҙ.j-3uF^X6`յJsԮn+rF ƌ)_a@ɜPW˖w){~vo?d:"h}əd0a-fl>.E/.uh1̅忨 6hqNwE3XFT_r!i X4gzUو+9v[&w s(> 9psH!Ԡ3w(TKP{2@(7Fus \#5˯1XYAc,s%<~ChEB %Y =jUP00kJ5 ea0+֨dW3W|clUf`JZytQhUE^_!¿+Ήf&.As:TwS!iB;Q-JK-g|/Sʟ*Wv6a\Ȉ,ԉ$$&Oc8Κv`QJZ޾ $_Ia4=$Wv7jIa{B ?!dd w?7 endstream endobj 85 0 obj << /Author()/Title()/Subject()/Creator(LaTeX with hyperref package)/Producer(pdfTeX-1.40.17)/Keywords() /CreationDate (D:20170605172741-04'00') /ModDate (D:20170605172741-04'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.14159265-2.6-1.40.17 (TeX Live 2016) kpathsea version 6.2.2) >> endobj 2 0 obj << /Type /ObjStm /N 74 /First 564 /Length 2968 /Filter /FlateDecode >> stream xZ[s6~ׯcĕ`'YۉcoI&0-3Dr_H%nn:6ElrH찟~k;JO2 Ddd [PqP/mDurg*E lutL' _ ox~= QM}!ӫ=5S/ "֚$EEBT1V VI0J9q{Ӭ4*U4u_TeQ]F޺蓪"jP"|򫺑RQKV=3>+f9/y9:ڏ*O5(|~+^:+ox  ;,0akUѫPvE,R[?8x_@!g^%o^&GXO&YuM=JKnS^Ѥ:lJ=Ke筆3DaYW5e(t?:}"FEy_NF&P視 XwPO%:kNH]}/w1܀$[B$38.ś/VqZN-c :K n'aV86Kv' 2͖įA׭P^<ȎY8}N)Yn9tF|x_>-t7&n6[P"ʣGGϮrl͚PJJj5΅BɪN߮Y5!" @NEO{HՊmH\O$+yOwS*wvim_grTƘ@qqØin1A: čVʨbj !.Clw8o$:hYsYb\7&yF+q2Rr.np\z]>_[1jguwf6f+fJ@-â2 `'Y{#$F{]˚>h7_+F֘a/oH8F!KB 4L}\Kj 5۬ƪ{vd\өAtBs Wm zO6?z}6M -àbٯ@!Lxwq͗j_uwU # ղcc3Y ݋@f7 mmW%@˻ꆣ >xTwϚjv5 *}|0}jVDqu7Z(V^V듘v }7wuv7K5[քB#xɮ%ڰf>[ߣO:EfНI~Fm(8 I9\lijaNuv8Ο+|>g }Iٲ endstream endobj 86 0 obj << /Type /XRef /Index [0 87] /Size 87 /W [1 3 1] /Root 84 0 R /Info 85 0 R /ID [<68B9B586BDA34601CB2114117A596956> <68B9B586BDA34601CB2114117A596956>] /Length 215 /Filter /FlateDecode >> stream x%9RBQEs " `mHj`1@'4 /ɪ:u$)YG ܂AspC0 iAWQ$aR‚I)` 0 g0 u8S8y ؄-X),+ ۰dgC; ǰp%SekU1}J/^U֟W . ?RH endstream endobj startxref 107240 %%EOF gdata/inst/doc/unknown.R0000644000175100001440000000627313115346316014727 0ustar hornikusers### R code from vignette source 'unknown.Rnw' ################################################### ### code chunk number 1: ex01 ################################################### library("gdata") xNum <- c(0, 6, 0, 7, 8, 9, NA) isUnknown(x=xNum) ################################################### ### code chunk number 2: ex02 ################################################### isUnknown(x=xNum, unknown=0) ################################################### ### code chunk number 3: ex03 ################################################### isUnknown(x=xNum, unknown=c(0, NA)) ################################################### ### code chunk number 4: ex04 ################################################### (xNum2 <- unknownToNA(x=xNum, unknown=0)) ################################################### ### code chunk number 5: ex05 ################################################### NAToUnknown(x=xNum2, unknown=999) ################################################### ### code chunk number 6: ex06 ################################################### NAToUnknown(x=xNum2, unknown=7, force=TRUE) ################################################### ### code chunk number 7: ex07 ################################################### (xFac <- factor(c(0, "BA", "RA", "BA", NA, "NA"))) isUnknown(x=xFac) isUnknown(x=xFac, unknown=0) isUnknown(x=xFac, unknown=c(0, NA)) isUnknown(x=xFac, unknown=c(0, "NA")) isUnknown(x=xFac, unknown=c(0, "NA", NA)) (xFac <- unknownToNA(x=xFac, unknown=0)) (xFac <- NAToUnknown(x=xFac, unknown=0)) ################################################### ### code chunk number 8: ex08 ################################################### (xList <- list(a=xNum, b=xFac)) isUnknown(x=xList, unknown=0) ################################################### ### code chunk number 9: ex09 ################################################### isUnknown(x=xList, unknown=c(0, NA)) ################################################### ### code chunk number 10: ex10 ################################################### (xList1 <- unknownToNA(x=xList, unknown=list(b=c(0, "NA"), a=0))) ################################################### ### code chunk number 11: ex11 ################################################### NAToUnknown(x=xList1, unknown=list(b="no", a=0)) ################################################### ### code chunk number 12: ex12 ################################################### df <- data.frame(col1=c(0, 1, 999, 2), col2=c("a", "b", "c", "unknown"), col3=c(0, 1, 2, 3), col4=c(0, 1, 2, 2)) ################################################### ### code chunk number 13: ex13 ################################################### tmp <- list(.default=0, col1=999, col2="unknown") (df2 <- unknownToNA(x=df, unknown=tmp)) ################################################### ### code chunk number 14: ex14 ################################################### df2 <- df cols <- c("col1", "col2") tmp <- list(col1=999, col2="unknown") df2[, cols] <- unknownToNA(x=df[, cols], unknown=tmp) df2 gdata/inst/doc/mapLevels.Rnw0000644000175100001440000002020513115346316015514 0ustar hornikusers %\VignetteIndexEntry{Mapping levels of a factor} %\VignettePackage{gdata} %\VignetteKeywords{levels, factor, manip} \documentclass[a4paper]{report} \usepackage{Rnews} \usepackage[round]{natbib} \bibliographystyle{abbrvnat} \usepackage{Sweave} \SweaveOpts{strip.white=all, keep.source=TRUE} \begin{document} \SweaveOpts{concordance=TRUE} \begin{article} \title{Mapping levels of a factor} \subtitle{The \pkg{gdata} package} \author{by Gregor Gorjanc} \maketitle \section{Introduction} Factors use levels attribute to store information on mapping between internal integer codes and character values i.e. levels. First level is mapped to internal integer code 1 and so on. Although some users do not like factors, their use is more efficient in terms of storage than for character vectors. Additionally, there are many functions in base \R{} that provide additional value for factors. Sometimes users need to work with internal integer codes and mapping them back to factor, especially when interfacing external programs. Mapping information is also of interest if there are many factors that should have the same set of levels. This note describes \code{mapLevels} function, which is an utility function for mapping the levels of a factor in \pkg{gdata} \footnote{from version 2.3.1} package \citep{WarnesGdata}. \section{Description with examples} Function \code{mapLevels()} is an (S3) generic function and works on \code{factor} and \code{character} atomic classes. It also works on \code{list} and \code{data.frame} objects with previously mentioned atomic classes. Function \code{mapLevels} produces a so called ``map'' with names and values. Names are levels, while values can be internal integer codes or (possibly other) levels. This will be clarified later on. Class of this ``map'' is \code{levelsMap}, if \code{x} in \code{mapLevels()} was atomic or \code{listLevelsMap} otherwise - for \code{list} and \code{data.frame} classes. The following example shows the creation and printout of such a ``map''. <>= library(gdata) (fac <- factor(c("B", "A", "Z", "D"))) (map <- mapLevels(x=fac)) @ If we have to work with internal integer codes, we can transform factor to integer and still get ``back the original factor'' with ``map'' used as argument in \code{mapLevels<-} function as shown bellow. \code{mapLevels<-} is also an (S3) generic function and works on same classes as \code{mapLevels} plus \code{integer} atomic class. <>= (int <- as.integer(fac)) mapLevels(x=int) <- map int identical(fac, int) @ Internally ``map'' (\code{levelsMap} class) is a \code{list} (see bellow), but its print method unlists it for ease of inspection. ``Map'' from example has all components of length 1. This is not mandatory as \code{mapLevels<-} function is only a wrapper around workhorse function \code{levels<-} and the later can accept \code{list} with components of various lengths. <>= str(map) @ Although not of primary importance, this ``map'' can also be used to remap factor levels as shown bellow. Components ``later'' in the map take over the ``previous'' ones. Since this is not optimal I would rather recommend other approaches for ``remapping'' the levels of a \code{factor}, say \code{recode} in \pkg{car} package \citep{FoxCar}. <>= map[[2]] <- as.integer(c(1, 2)) map int <- as.integer(fac) mapLevels(x=int) <- map int @ Up to now examples showed ``map'' with internal integer codes for values and levels for names. I call this integer ``map''. On the other hand character ``map'' uses levels for values and (possibly other) levels for names. This feature is a bit odd at first sight, but can be used to easily unify levels and internal integer codes across several factors. Imagine you have a factor that is for some reason split into two factors \code{f1} and \code{f2} and that each factor does not have all levels. This is not uncommon situation. <>= (f1 <- factor(c("A", "D", "C"))) (f2 <- factor(c("B", "D", "C"))) @ If we work with this factors, we need to be careful as they do not have the same set of levels. This can be solved with appropriately specifying \code{levels} argument in creation of factors i.e. \code{levels=c("A", "B", "C", "D")} or with proper use of \code{levels<-} function. I say proper as it is very tempting to use: <>= fTest <- f1 levels(fTest) <- c("A", "B", "C", "D") fTest @ Above example extends set of levels, but also changes level of 2nd and 3rd element in \code{fTest}! Proper use of \code{levels<-} (as shown in \code{levels} help page) would be: <>= fTest <- f1 levels(fTest) <- list(A="A", B="B", C="C", D="D") fTest @ Function \code{mapLevels} with character ``map'' can help us in such scenarios to unify levels and internal integer codes across several factors. Again the workhorse under this process is \code{levels<-} function from base \R{}! Function \code{mapLevels<-} just controls the assignment of (integer or character) ``map'' to \code{x}. Levels in \code{x} that match ``map'' values (internal integer codes or levels) are changed to ``map'' names (possibly other levels) as shown in \code{levels} help page. Levels that do not match are converted to \code{NA}. Integer ``map'' can be applied to \code{integer} or \code{factor}, while character ``map'' can be applied to \code{character} or \code{factor}. Result of \code{mapLevels<-} is always a \code{factor} with possibly ``remapped'' levels. To get one joint character ``map'' for several factors, we need to put factors in a \code{list} or \code{data.frame} and use arguments \code{codes=FALSE} and \code{combine=TRUE}. Such map can then be used to unify levels and internal integer codes. <>= (bigMap <- mapLevels(x=list(f1, f2), codes=FALSE, combine=TRUE)) mapLevels(f1) <- bigMap mapLevels(f2) <- bigMap f1 f2 cbind(as.character(f1), as.integer(f1), as.character(f2), as.integer(f2)) @ If we do not specify \code{combine=TRUE} (which is the default behaviour) and \code{x} is a \code{list} or \code{data.frame}, \code{mapLevels} returns ``map'' of class \code{listLevelsMap}. This is internally a \code{list} of ``maps'' (\code{levelsMap} objects). Both \code{listLevelsMap} and \code{levelsMap} objects can be passed to \code{mapLevels<-} for \code{list}/\code{data.frame}. Recycling occurs when length of \code{listLevelsMap} is not the same as number of components/columns of a \code{list}/\code{data.frame}. Additional convenience methods are also implemented to ease the work with ``maps'': \begin{itemize} \item \code{is.levelsMap}, \code{is.listLevelsMap}, \code{as.levelsMap} and \code{as.listLevelsMap} for testing and coercion of user defined ``maps'', \item \code{"["} for subsetting, \item \code{c} for combining \code{levelsMap} or \code{listLevelsMap} objects; argument \code{recursive=TRUE} can be used to coerce \code{listLevelsMap} to \code{levelsMap}, for example \code{c(llm1, llm2, recursive=TRUE)} and \item \code{unique} and \code{sort} for \code{levelsMap}. \end{itemize} \section{Summary} Functions \code{mapLevels} and \code{mapLevels<-} can help users to map internal integer codes to factor levels and unify levels as well as internal integer codes among several factors. I welcome any comments or suggestions. % \bibliography{refs} \begin{thebibliography}{1} \providecommand{\natexlab}[1]{#1} \providecommand{\url}[1]{\texttt{#1}} \expandafter\ifx\csname urlstyle\endcsname\relax \providecommand{\doi}[1]{doi: #1}\else \providecommand{\doi}{doi: \begingroup \urlstyle{rm}\Url}\fi \bibitem[Fox(2006)]{FoxCar} J.~Fox. \newblock \emph{car: Companion to Applied Regression}, 2006. \newblock URL \url{http://socserv.socsci.mcmaster.ca/jfox/}. \newblock R package version 1.1-1. \bibitem[Warnes(2006)]{WarnesGdata} G.~R. Warnes. \newblock \emph{gdata: Various R programming tools for data manipulation}, 2006. \newblock URL \url{http://cran.r-project.org/src/contrib/Descriptions/gdata.html}. \newblock R package version 2.3.1. Includes R source code and/or documentation contributed by Ben Bolker, Gregor Gorjanc and Thomas Lumley. \end{thebibliography} \address{Gregor Gorjanc\\ University of Ljubljana, Slovenia\\ \email{gregor.gorjanc@bfro.uni-lj.si}} \end{article} \end{document} gdata/inst/doc/gregmisc.pdf0000644000175100001440000034353513003720417015377 0ustar hornikusers%PDF-1.2 6 0 obj << /S /GoTo /D (chapter*.1) >> endobj 8 0 obj (The gregmisc package: something for everyone) endobj 10 0 obj << /S /GoTo /D [9 0 R /Fit ] >> endobj 21 0 obj << /Length 22 0 R /Filter /FlateDecode >> stream xZYܸ/ bDxYd3; HZG[*RC= E6*H/di.b1U/ߧP hwL%vb4qݻUཻx7< o~zPd?~pe ^\ۇ/y//XL8Kd0&,n MƯevtإȗFG+~I չL'a^i a4eiyª.ye^")yE{ZCB+]Y`ۢr;)a.fd;Se u+l>K(*cXZ}-0XB7TcȎ0O&(FLmw퇜q눧l_^T`Y^&P2 X i{5- ZuV D85`*Ct6x,%1]tB߄G>FjF7bVh(v4Y؛{n?3a:⑳h>GN~<78X:b/O%-sjVMsOab}q᳢ے3s;&5M6SpjCѴeea%䎒\<ǜ :C, E'ω\~ˏ>zRx>L>MsH-O+mPUՒ uBA݀ wbQA PFB U]Kk^b<{R%H X(gj=vbA<\i\N79L!rE>a>A,VEf7xnA#dXRd81~*f]'ޘj!hҎ GK+nt5G ׸o%BL:`NrC0GiŞҎdNp25b*X\J !tS xUN0;Stɒa]ѓ##3mc3PR;`b˞Vj_o;g;"E1 IRǧPTO= Rؒ&*DT!^Ki6<Tˋ"ץ%J/"d%bo|x̏g_뻢^^ʴƾA ޷Mnp*KήλޟvuŶ |[?=% WO™P$ epL{d_endstream endobj 22 0 obj 2867 endobj 9 0 obj << /Type /Page /Contents 21 0 R /Resources 20 0 R /MediaBox [0 0 611.998 791.997] /Parent 29 0 R >> endobj 1 0 obj << /Type /Encoding /Differences [ 24 /breve /caron /circumflex /dotaccent /hungarumlaut /ogonek /ring /tilde 39 /quotesingle 96 /grave 128 /bullet /dagger /daggerdbl /ellipsis /emdash /endash /florin /fraction /guilsinglleft /guilsinglright /minus /perthousand /quotedblbase /quotedblleft /quotedblright /quoteleft /quoteright /quotesinglbase /trademark /fi /fl /Lslash /OE /Scaron /Ydieresis /Zcaron /dotlessi /lslash /oe /scaron /zcaron 164 /currency 166 /brokenbar 168 /dieresis /copyright /ordfeminine 172 /logicalnot /.notdef /registered /macron /degree /plusminus /twosuperior /threesuperior /acute /mu 183 /periodcentered /cedilla /onesuperior /ordmasculine 188 /onequarter /onehalf /threequarters 192 /Agrave /Aacute /Acircumflex /Atilde /Adieresis /Aring /AE /Ccedilla /Egrave /Eacute /Ecircumflex /Edieresis /Igrave /Iacute /Icircumflex /Idieresis /Eth /Ntilde /Ograve /Oacute /Ocircumflex /Otilde /Odieresis /multiply /Oslash /Ugrave /Uacute /Ucircumflex /Udieresis /Yacute /Thorn /germandbls /agrave /aacute /acircumflex /atilde /adieresis /aring /ae /ccedilla /egrave /eacute /ecircumflex /edieresis /igrave /iacute /icircumflex /idieresis /eth /ntilde /ograve /oacute /ocircumflex /otilde /odieresis /divide /oslash /ugrave /uacute /ucircumflex /udieresis /yacute /thorn /ydieresis ] >> endobj 2 0 obj << /Type /Font /Subtype /Type1 /Name /ZaDb /BaseFont /ZapfDingbats >> endobj 3 0 obj << /Type /Font /Subtype /Type1 /Name /Helv /BaseFont /Helvetica /Encoding 1 0 R >> endobj 4 0 obj << /Fields [] /DR << /Font << /ZaDb 2 0 R /Helv 3 0 R >> >> /DA (/Helv 10 Tf 0 g ) /NeedAppearances true >> endobj 19 0 obj << /D [9 0 R /XYZ 54.992 736.389 null] >> endobj 11 0 obj << /D [9 0 R /XYZ 54.992 711.482 null] >> endobj 5 0 obj << /D [9 0 R /XYZ 54.992 711.482 null] >> endobj 12 0 obj << /D [9 0 R /XYZ 54.992 657.34 null] >> endobj 13 0 obj << /D [9 0 R /XYZ 54.992 436.382 null] >> endobj 14 0 obj << /D [9 0 R /XYZ 54.992 339.327 null] >> endobj 15 0 obj << /D [9 0 R /XYZ 54.992 190.981 null] >> endobj 16 0 obj << /D [9 0 R /XYZ 305.899 505.734 null] >> endobj 17 0 obj << /D [9 0 R /XYZ 305.899 419.704 null] >> endobj 18 0 obj << /D [9 0 R /XYZ 305.899 277.102 null] >> endobj 20 0 obj << /Font << /F43 23 0 R /F44 24 0 R /F45 25 0 R /F46 26 0 R /F47 24 0 R /F48 27 0 R /F14 28 0 R >> /ProcSet [ /PDF /Text ] >> endobj 28 0 obj << /Type /Font /Subtype /Type1 /FirstChar 0 /LastChar 127 /Widths 30 0 R /BaseFont 36 0 R /FontDescriptor 37 0 R >> endobj 30 0 obj [ 778 278 778 500 778 500 778 778 778 778 778 778 778 1000 500 500 778 778 778 778 778 778 778 778 778 778 778 778 1000 1000 778 778 1000 1000 500 500 1000 1000 1000 778 1000 1000 611 611 1000 1000 1000 778 275 1000 667 667 889 889 0 0 556 556 667 500 722 722 778 778 611 798 657 527 771 528 719 595 844 544 678 762 690 1201 820 796 696 817 847 606 545 626 613 988 713 668 725 667 667 667 667 667 611 611 444 444 444 444 500 500 389 389 278 500 500 611 500 278 833 750 833 417 667 667 778 778 444 444 444 611 778 778 778 778 ] endobj 31 0 obj << /Length 32 0 R /Length1 33 0 R /Length2 34 0 R /Length3 35 0 R >> stream %!PS-AdobeFont-1.1: CMSY10 1.0 %%CreationDate: 1991 Aug 15 07:20:57 % Copyright (C) 1997 American Mathematical Society. All Rights Reserved. 11 dict begin /FontInfo 7 dict dup begin /version (1.0) readonly def /Notice (Copyright (C) 1997 American Mathematical Society. All Rights Reserved) readonly def /FullName (CMSY10) readonly def /FamilyName (Computer Modern) readonly def /Weight (Medium) readonly def /ItalicAngle -14.035 def /isFixedPitch false def end readonly def /FontName /PAAAAA+CMSY10 def /PaintType 0 def /FontType 1 def /FontMatrix [0.001 0 0 0.001 0 0] readonly def /Encoding 256 array 0 1 255 {1 index exch /.notdef put} for dup 15 /bullet put readonly def /FontBBox{-29 -960 1116 775}readonly def /UniqueID 5000820 def currentdict end currentfile eexec oc;j~EЪ/ ȭX~id}S5Q!gtⵎkJc;rN^X5.Sy +'IqV:r㚉#,# dBZ *R*"7٨y=cLIPsF'f> ba ]fv+QAwdO[x"%Sx~{p҈덡|O BÄ/GL3h+Ng03jU1~akDzq=U}.KY碌 ֻ1?C N2Muh/4Gm&v.d)%\о .u 39:8*v؍ަ]/}p2V&dg#U텧I!zF[ +2C?l#"Uc_Wc90000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 cleartomark endstream endobj 32 0 obj 1898 endobj 33 0 obj 772 endobj 34 0 obj 594 endobj 35 0 obj 532 endobj 36 0 obj /PAAAAA+CMSY10 endobj 37 0 obj << /Ascent 750 /CapHeight 683 /Descent 0 /FontName 36 0 R /ItalicAngle -14 /StemV 85 /XHeight 431 /FontBBox [ -29 -960 1116 775 ] /Flags 4 /CharSet (/bullet) /FontFile 31 0 R >> endobj 27 0 obj << /Type /Font /Subtype /Type1 /FirstChar 0 /LastChar 127 /Widths 38 0 R /BaseFont 44 0 R /FontDescriptor 45 0 R >> endobj 38 0 obj [ 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 ] endobj 39 0 obj << /Length 40 0 R /Length1 41 0 R /Length2 42 0 R /Length3 43 0 R >> stream %!PS-AdobeFont-1.1: CMTT10 1.00B %%CreationDate: 1992 Apr 26 10:42:42 % Copyright (C) 1997 American Mathematical Society. All Rights Reserved. 11 dict begin /FontInfo 7 dict dup begin /version (1.00B) readonly def /Notice (Copyright (C) 1997 American Mathematical Society. All Rights Reserved) readonly def /FullName (CMTT10) readonly def /FamilyName (Computer Modern) readonly def /Weight (Medium) readonly def /ItalicAngle 0 def /isFixedPitch true def end readonly def /FontName /QIQTAA+CMTT10 def /PaintType 0 def /FontType 1 def /FontMatrix [0.001 0 0 0.001 0 0] readonly def /Encoding 256 array 0 1 255 {1 index exch /.notdef put} for dup 46 /period put dup 101 /e put dup 105 /i put dup 108 /l put dup 109 /m put dup 110 /n put dup 111 /o put dup 112 /p put dup 113 /q put dup 115 /s put dup 116 /t put dup 119 /w put readonly def /FontBBox{-4 -235 731 800}readonly def /UniqueID 5000832 def currentdict end currentfile eexec oc;j~EЪ*BgNӽ ؑlKq*޲Xws|QFqv`zXMyp"5O˩YŝP(DT![v67XFlU&3!Rq4wσ~j+ou_c2Bطj=-8\Dg݌] /%bԺnٻڿSy b*L(9sWF R:EMksH02E?Oe+Z'zK΃оif <,EDNZ|J#BW3ϗMp(%5%~(5Bk}Aj c'S-8*!iy$G.w g J0nr ;଍ Kr^ox7p4= CyO#y[49'n*xx/J}o֯p6Oޏe}hà1W6Ϋ%(i]Ii 텴""b3=5sJzbJ>[Wе H 2"ێ0Oy9.t:=$EǪ ]to`% 'EKKfrRK^y`vO^v~ZwR iNMW3HSp+T,q!s0(ع;U+3"J8q3dJ`77+kXֻvuGga懨O|?Ja \Lp|j 5 լhER5} d߀?eu6sjDh*/"qJrWx #1].kKy;"%o(UYO![x;o_*A*O˵cTtEQ &+KԌ PJ>薻Cy_5XK" 0,pr0 BZo3 ij>r̐:ԫv0I7v qt@'*STg8E.ZQp׊P%x^a&(-X%OfCmTsCJzZפ4k^H J=o9vMLg'!Tx}ᒥM F {'tV2ANX8 F]E {b&bfo_%pT1}*ڢUO4W^t6U:05:a"0~rn:%g@Aɠe`Oڀj|Y}#xHR~@paAY< Z.#0ȼ. DkQ6MgDțHhrtml)!|=~tWPA߾Xxɮ'[Y"f6S2P)Q1Dl;;Sz$̢!)Q\d D}N\ip, :22N^x"6?_Oki֧@2|[(_c6wL*^,&l!+U+.>AZĜXYVχ% CN+7an;H#e2{f3zUI9xa]X7W^^.؍=ŅSϡ pݬ@Aεr$4q^҂fތ]Z^X}hrÔl%Hb% m敄{nI(ZRozVTQu;ei}%#{>q[yU L"'䄈H%WA^~-i,S&5.ØS𳻧cL3 &k 2:0wCխugmx $떲5_k-9 \W4ȸlLSmD[nLy>s Yh;oI̽E9ˆC51x} Ԇm>Ƨ:#R<6*KdqqMOEXyG}- ^>vTv n2Ï DHnaӇǡ%a[oҶ~36|óĀ8bL /qav?t u=5: #Uó钽@QSX;,E (q'6a-u:[+9w__KS(ӋVUo0tco7]0eÐuŗܴ_ei7&>5)CE&RE4H6ɋ 4e[b'LD|T.Vү.>)kȁ.͜*l죆:8_3ǶImއɒ\wmc:6#!i% 0Nԍ_~ܸ.@Kdw(E`a0r\]@ };ؿ<& 0¼_f2M;TF5r:]1D%[`f4~*v[)ưyprUGzc}KAGHl$Uѱ}[B:vDJk=kbq[~׿xen/W54ՆQƛZ DMU#ܧK6Vd]~SkBL-y.F+(Wf-Efʸq1TCrVڒ[PXen!o[LZid)zTj9X}Ab$Lx3P2?6ԜnPgV' u> !R62͌连Ht7: Nh$5b]#x2IycP(a.Q/WMaa{z$lMw};âOa|ꑅQ+ho'uJ=_$La8{F^JMqwN,mhB ,^i6vE\UE+ЧZA.Q [րձː/󠕖?阹Z"hAHua49 "eP:{ Ls"5u. / QZҰx|^d'Cs'dNB@#Yx}z4#Dx菛. OK`ё=҂{]X5*Cb\a+DPE_e,81ڟIciZTK umKq4yrh"TR5ezR{/6*b8/ks}WxpC7*C 2oϕ]iq0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 cleartomark endstream endobj 40 0 obj 5052 endobj 41 0 obj 932 endobj 42 0 obj 3588 endobj 43 0 obj 532 endobj 44 0 obj /QIQTAA+CMTT10 endobj 45 0 obj << /Ascent 611 /CapHeight 611 /Descent -222 /FontName 44 0 R /ItalicAngle 0 /StemV 69 /XHeight 431 /FontBBox [ -4 -235 731 800 ] /Flags 4 /CharSet (/period/e/i/l/m/n/o/p/q/s/t/w) /FontFile 39 0 R >> endobj 46 0 obj << /Type /Encoding /Differences [ 0/.notdef 1/dotaccent/fi/fl/fraction/hungarumlaut/Lslash/lslash/ogonek/ring 10/.notdef 11/breve/minus 13/.notdef 14/Zcaron/zcaron/caron/dotlessi/dotlessj/ff/ffi/ffl 22/.notdef 30/grave/quotesingle/space/exclam/quotedbl/numbersign/dollar/percent/ampersand/quoteright/parenleft/parenright/asterisk/plus/comma/hyphen/period/slash/zero/one/two/three/four/five/six/seven/eight/nine/colon/semicolon/less/equal/greater/question/at/A/B/C/D/E/F/G/H/I/J/K/L/M/N/O/P/Q/R/S/T/U/V/W/X/Y/Z/bracketleft/backslash/bracketright/asciicircum/underscore/quoteleft/a/b/c/d/e/f/g/h/i/j/k/l/m/n/o/p/q/r/s/t/u/v/w/x/y/z/braceleft/bar/braceright/asciitilde 127/.notdef 130/quotesinglbase/florin/quotedblbase/ellipsis/dagger/daggerdbl/circumflex/perthousand/Scaron/guilsinglleft/OE 141/.notdef 147/quotedblleft/quotedblright/bullet/endash/emdash/tilde/trademark/scaron/guilsinglright/oe 157/.notdef 159/Ydieresis 160/.notdef 161/exclamdown/cent/sterling/currency/yen/brokenbar/section/dieresis/copyright/ordfeminine/guillemotleft/logicalnot/hyphen/registered/macron/degree/plusminus/twosuperior/threesuperior/acute/mu/paragraph/periodcentered/cedilla/onesuperior/ordmasculine/guillemotright/onequarter/onehalf/threequarters/questiondown/Agrave/Aacute/Acircumflex/Atilde/Adieresis/Aring/AE/Ccedilla/Egrave/Eacute/Ecircumflex/Edieresis/Igrave/Iacute/Icircumflex/Idieresis/Eth/Ntilde/Ograve/Oacute/Ocircumflex/Otilde/Odieresis/multiply/Oslash/Ugrave/Uacute/Ucircumflex/Udieresis/Yacute/Thorn/germandbls/agrave/aacute/acircumflex/atilde/adieresis/aring/ae/ccedilla/egrave/eacute/ecircumflex/edieresis/igrave/iacute/icircumflex/idieresis/eth/ntilde/ograve/oacute/ocircumflex/otilde/odieresis/divide/oslash/ugrave/uacute/ucircumflex/udieresis/yacute/thorn/ydieresis] >> endobj 26 0 obj << /Type /Font /Subtype /Type1 /Encoding 46 0 R /FirstChar 1 /LastChar 255 /Widths 47 0 R /BaseFont 53 0 R /FontDescriptor 54 0 R >> endobj 47 0 obj [ 250 605 608 167 380 611 291 313 333 0 333 606 0 667 500 333 287 0 0 0 0 0 0 0 0 0 0 0 0 333 208 250 278 371 500 500 840 778 278 333 333 389 606 250 333 250 606 500 500 500 500 500 500 500 500 500 500 250 250 606 606 606 444 747 778 611 709 774 611 556 763 832 337 333 726 611 946 831 786 604 786 668 525 613 778 722 1000 667 667 667 333 606 333 606 500 278 500 553 444 611 479 333 556 582 291 234 556 291 883 582 546 601 560 395 424 326 603 565 834 516 556 500 333 606 333 606 0 0 0 278 500 500 1000 500 500 333 1144 525 331 998 0 0 0 0 0 0 500 500 606 500 1000 333 979 424 331 827 0 0 667 0 278 500 500 500 500 606 500 333 747 333 500 606 333 747 333 400 606 300 300 333 603 628 250 333 300 333 500 750 750 750 444 778 778 778 778 778 778 944 709 611 611 611 611 337 337 337 337 774 831 786 786 786 786 786 606 833 778 778 778 778 667 604 556 500 500 500 500 500 500 758 444 479 479 479 479 287 287 287 287 546 582 546 546 546 546 546 606 556 603 603 603 603 556 601 556 ] endobj 48 0 obj << /Length 49 0 R /Length1 50 0 R /Length2 51 0 R /Length3 52 0 R >> stream %!PS-AdobeFont-1.0: URWPalladioL-Roma 1.05 %%CreationDate: Wed Dec 22 1999 % Copyright (URW)++,Copyright 1999 by (URW)++ Design & Development % (URW)++,Copyright 1999 by (URW)++ Design & Development % See the file PUBLIC (Aladdin Free Public License) for license conditions. % As a special exception, permission is granted to include this font % program in a Postscript or PDF file that consists of a document that % contains text to be displayed or printed using this font, regardless % of the conditions or license applying to the document itself. 12 dict begin /FontInfo 10 dict dup begin /version (1.05) readonly def /Notice ((URW)++,Copyright 1999 by (URW)++ Design & Development. See the file PUBLIC (Aladdin Free Public License) for license conditions. As a special exception, permission is granted to include this font program in a Postscript or PDF file that consists of a document that contains text to be displayed or printed using this font, regardless of the conditions or license applying to the document itself.) readonly def /Copyright (Copyright (URW)++,Copyright 1999 by (URW)++ Design & Development) readonly def /FullName (URW Palladio L Roman) readonly def /FamilyName (URW Palladio L) readonly def /Weight (Roman) readonly def /ItalicAngle 0.0 def /isFixedPitch false def /UnderlinePosition -100 def /UnderlineThickness 50 def end readonly def /FontName /XSEYTV+URWPalladioL-Roma def /PaintType 0 def /WMode 0 def /FontBBox {-166 -283 1021 943} readonly def /FontType 1 def /FontMatrix [0.001 0.0 0.0 0.001 0.0 0.0] readonly def /Encoding StandardEncoding def /UniqueID 5021141 def currentdict end currentfile eexec oc;jtD[1ƅpTo9`.:ypJ*l'e}#)&7+/^ W{LZ60VQR^λ3r)#v$p~c&'ſ+ %;v &q?ZUEV[7b~FJOtǪj<ʂ1-Yޱ;l\<܆hV؎GYxx`W5kHZ>1SR@7{WFV[~Ac5r 1Gے$Ȋ8/yUM\T4U3m"0G xTx>|a!Z%aDD`5=itj̪?H)*##qȲpCk*,BqOD1#d*7(S 2:U勧VJ@,#r#jnT*ߧ[ d]1%b`nˏ#i0US?<GբaǀT H6RI/ swl!Bwo`_g26S^_dRr!(ȱnW5 i6ی䛉D1ş?X-ܲ[H0}it^B3V* =J'}%Gz(yH&X2L;"CD:v';}BGs<+#iBX4eYBҮ3HVCMuc̀OT+Ƒ8B9 kGS >a,bGV,$r!i#Nl\D5+\WDsm@3klmҳ3p2Ӽ(-iz+h XP840ܢ-Y` Oժ5}]d̈́2\lO$ɯKwxƤZiFQ*c@+&dS=S#~*4RN1nv4X/ن%q=mbv]ӧU^=cӻ; hDc;~7GZ@¶~}gD:IzcPFAٺН8TQzyF4fj 姷\2m9WD]r?CZ6kovqicuGXN҃@vR;%ӌBHLJV0HLCJhEv-0`*+ܱDrIhT{sr=%j5!)+2Z(~$!-Dl-2Xe:>j-aYq"#ȋȎMh#ϞwDa#7NN,2M"Rͨ8eYd@$Oڃ1 AGѓ좤I%_ >*9!1L94%P_YrvgHjwrYEd1֖AֈUN/2=HylS)[/0mhV&S 'v;E]]*WVV5B;QD .EhHgHt\k|C ET8$>6q_u$3[RaܭسOiGͱD7tOFL7IMJs~h7yy?`jo<(fq W)nM>lQr5Sۅ!fS+"XTKq=?G,h+d)Ohݵ*h9nޝ hO rZ\'QidAX\hz0̎]ePyWIC%k+$3ʉrЙ31m4e%(qO0 .vKz`SP(@x׳Ġ32EcKn%hΏ!}"ng3 Ŷ)g:1Ѷ] .@wyW֝\a;``h1^bDVET)&{dJY0$Jh16e;1,|]ϝ^>rzYptDфV\ $wY%YNm!@rWAh.,U3'+|>!LSɃ=]"H78;d?~0m^\uh{ 4;$Y[B'?=vs% V0F1GѪ )Nbx(sz"_xrBarHśW16^PќfH(p5S0r0+ܤ9bd޸Q RH+PH+?i+U+- @tP}wUF$\5[y&O$0 O[>WN[ /[ňl;n1=ճ`ǝ}CZ,cĕSD SkbDv^6A7+ڗ,f?[g"Nc? ӻ18{{C N۽!cкF2 =+!1infi,@xNOPs2ggގ[E3a뀡K""{q3+h~Z(FEFZ>JR@!wPܫa.r{UHh˜5L)`߆Ζ)[~ϕϟЍ+^C̪Pjz|P,J7WN) r/m6g ijU_TD 6xv=)>\]!f!+DO0-~HKS{ bo3۳Cٝ(kބF1@H;__֯x!$4~64^:!Skdfm>;^E[2D.{ ތkm^)5Ñp^~ wj#?`8)/IĚ -'K(u>KF Hi+LZ'DZyvwpUj}7`/A"|0!qO/ V~AEx4jlDg yOw"ڷX#?[CfE(UCv"#o}]&w#޾f78r3_U\ɤd[GjSmFJXΪAT+w/3Ս8 P%Dg-inӢi/t|O:1q▕jY蓥pxm3r*,û{ӆMN),""ra:V»`p^\l/D.Mq 7G 'y}PgCc!?eFKuxV*/~pgbԴO\cC E;M'*FwH/y[x:ӭ=XGna_αa@hdsF5Y SzM=U >zK75"'F6x-'"ۉT?Ndayhz26 ͋+iqC\y./eٷ7aܐF6ɏ[ru ͱKڪ}%l"zŇaM3me'!>*艃8N]D'M&:H~1P 7(#3eҮZ&Ꟛ-F6/>.{SMFr74:!k=V~UM(߶p'LzH{# X6NCv ǣ4!u%00Ry$ A4 Xy@ fK_[]EdUu'|REDFEz"E#I ;.Otk شp5&"sȍ߀]>nR2(%9Dޠ_R۸Osn1Lkyꗟ8(]wWamqri%㆏Ct%aHtE (9ecn\uzWm9_ MH*D|q^rVx@n;܅ @%Y_?Z:B2,nԐ= 6蛪^ ;s֮$Q0{Adnc;CզBR>r/m2p*ă(,TX_[DVl 駄`ϰO_XҎY0`͹\|]qtt'M R ?KGڡ>UyвHi^&Xߑ'd,(-*r/Qh#kI9ԧ ˆlCJ6 쒔*-><=XAY|YALijUdvQyxkcM(rwIԵ8ͦ)3B6->};]1T K$ᨙTukY"K53{FƦP`&[ICv:T3s+I';ɝYFRoaE2t+)Hcs ` r'$P˗=RO[QF\+ =Tgf1qr5 V7hgyⱼA<Aa=zFcĻe5ǹjhdLŋ[GalB#i}H4==͖}~]ջ۾lF)e5FCd_/Ѐ}Ki%Ѐ!``Sl d;7V mZsSt"HoS G*ȇN7ar0Κ?KV,jnLUE?M{2p. dٱ.xpmmW]qBDK0QG:+GwǏ&^݄ FɄ<oѤ@k<~)!& R H= iS'+1T͆aPI^ \V1ln|w(qXꊫkBK类Oɣs5dgh[+n BsHTmTҿ(DUEu~/B6>jǵb󃝃f3w&{y-޽'7 ?gBah;"?D ̍:%H?+K7wRA]-\ h`rK mkAdtBY=u.%y|5 âPɑĝCΖދ1@6c;A+df@C;rX N~Of^$Vssbfem*+-G>'qSu023?YOlMڅ( ¶Cx:cYڡ_NXrT4XK?&::}O,e+0D}!9P5sSE~YMBHTW$lx5i"dսJT[OE, [_Q*6[H<2%C@)ؿ ^=DCW-Jin:Pmᮮo %gP3p=IA…*-moy>a^Dq?CΏ̶88i~ݸ*Caᶵ, .^:o2Kȫ*=$NkO9) fxcz|iܴohߨ7O!57 2԰q;o^cR/aXN8X]VF8{LN3wW'L† lF樨Ǧo. eItmf* 6J%4@5ųE1@V˨χ'G>} >G<4ƾ|_Y[~ZX6J_><|!nn Γ踣wΖsH4@(O-,`iޠo"v^bK'1°Ak酳i Aj$OfjkXexEPNފZH߸ka8 fT{PE>B_|ӞC_BxTX* wPL!Zu*s{.1(;΀Y8' U"}{LV!6 FXŧ!&&akDKS&7CӵjcP-YTH -˝BHpIBf BpC }m-ǘ;z^_LG/'Ya:I8[~s>`H 2*q!_Pc[W}-Z}2jKYSz)2-4YajR}:Q*f~a֠':jqsyhcW'ɱqSu1/pHn.sfbk] D$ڌȉ9Nq-rBc QUTp)w@?ķr@Vp.J҉WJZ3{"2fa DT% [wSNHhO\r^<8ϻލAO%58g*ߕZJף+zXu˜Q&4hU2d|4F.F1aZogDm7/Q 50-j|jH7OeZ@Lڭ<rP 9KiMo#J}5L$Miq"|`iGyyNx4(hxHA3:1 lPyV嫞rvUsHs-\c+iRG:]"2$tJV#;!p?7;řKEâ_+$Y"xSE˓ GZ8Uy{2 P-ſ+ c_U*XDz>vV$Qd]51|uW-s#6΄W6y,r(@!QKAxJjߖ;'#g $6Wޅe5  _%AKs6<ʅWԩ܋1 }Vq_ j*<%ɢ , 52"/>kK4s4d"_ԍ2FAtTR]ɴ:kX:ի(۠D^`= P)9J4{" w Z8=xV>[1 ~hC1?`>@-Fs?}=bW "Ah "BTZkh+{;AdcZY_c! 1w+UK 9Mc.$c;'XPliyJsdؚRvtsU?X)tJ/pDC_{~:(M|Udl.p(>o̓$ 'hf}Fwd0%>AX Lݿ<8 hͮ?;H #T1s1eD41ĉkrMîm-g&DrD<}e,ACQysIT6OOikc];,䲃UFm.hչw0R&A]:5XY9~0w!&˴z%0!:hDHno 8BR.@6"S!0~8FpF. 3/L$U8L*yhknu\v2ym \bgIMy[FOHɇ}zqXkʄ V"@hyG_WFvr섏ΖOϊN,/]/h!Ap#잓V N3n@ei9`~jsQ{Ƚ p8 \vkNT8m_r4ݛNYބh#tBT<VENg21IGf'nЎ0"ekQ.mL؄7/k2wf|2?cIQRou)YvG#;]#z7RU֞BM1G$'@b1ɴ,!_2^ؐN5t5Gn*_afeG$i&]֋.Q##QrȷN~PG^rM**@Ew?2Q)yS\Ϊ"a5[CsUy]?gl:a9(/s{Ѕ"ƄvIz:s T gkF_)YDcwJxZVJFżⲂÑC<| Wp xf//l42bwj'"^{6ڪkTtMTωao:>k ,)1_4gyS-Ū?h*F[e3i9&R\Y% r)p5.`DS׷O$0c[tʈh1hy6~lX<C=mfpkS3?T5uΖRw ݛ9xs)!%Vu|\D'1h'7;Vd|2@^%sY&"aQ3#CHsQKhB/$\m=DъݳõW۵{smnj` T |Dd73IC,:Yv[ma4]@.\3"*kxfXtT,h[RɆsNM.`3Od qkUd.,Wш,ѣGqg# ("hDXǐTgeEYsd۰Z5 HVNlU+_7-|#%J7{:@uIolڻQ5YS,v Dޱ0AE̹=7cl\Ct0/.ѹVVwO$IcA ~}`ij荂r9B%Ld'Pw|XjIYyr`ѧRk5.6= IbZep#@E]5{*V3Ó๒јWpQ*>vH*:FhGஸ.mC>LhƊk~EL1 P@()t$*k4Y9xcUTFKE)U{em3*b8 #""Ec[]r0;MZe% 0QMVJ1Iytk#Rػ 9v(hj[BgHr悉Iu&9#v8b}xQb0 85J,)ntR.BC |[QKL徛o7xD|Vo{H̓UȊ@Tu9+K%vqGxv73[_/qfn ʗK Y,ڌݙ^Ӊ4;w, ٹX jM=J1 Ai^+5n+g`g`sF 5>¸=}=\J G| LZAfab']0-8;f&#"4Xg|8$6ȧ9Eٗ˭ʲm~U9 }|.V3f0?lI*~rC1鼪:<塡\FžBJZVwԴn,*3#YInϤݝQWw`}ɂ:^2:)QGc?bB"3 tک]*.hfg$<2^_y ~)%+Ξy%aWNV]}!tsD!;D8N^=)MJ O?J@2(Nc"8$n|~8;nH>MQh70}sN(݄͖[1if㜖 s&_5ME-c`WyiO_te={~ \qoY߈#UC=S\|%6;1-w\+v7SHx ֈ2V7\*@R| ư4 ' 9wYΕC ;T'OM4OQ!HP!7I^'Ci5L F'!OLD3kyQM!POy7Z&BO;݂?yY50|diw <~ Y#wѱ0] ."I<]!y*M3 3:Ev2MjT2o==ȣX\[u^D6#m B܆.0;ESk2Z5$=Q !x UYATxdR.>7Y86J؟Q{v)EY&H~'"ڍE:*BᚕZxFSʔx@$NG`߾KOcq0]zulP:xםXdưOX3kIHRrUQ{(%\i4Gp}N,v5F*%ͽKcaVZ,%oTS g!VJ >/1e\u)eϝ,D= pM츑 @f7 jȒb:12 H44{c⁥9:sb.n͸EilKk%I4Hxd+hC=ȉŎO?ܔG16kii@7rQR{EgUMa6% =d'?HKS`u$L L$^{8TMP8;"qTo9(~q9mw-ؾl yjv", 6&*zo ^5_}4*vR:ȹ+=C4&7exNIp!:0hQFQ)Z=My~“uTI$&W~Ta3_̥p/i2VDyT 6ӻb~ڿ as%3c ,V* Q Yi%cT(`B8reS }f>4^yQ°JMIҚL@ +Dj%M_\@`i|q^]w(-F<@`s}ñ {HxrX\4L8IVRA~#)z$YՔUCt a8{z o\ f+JLFAp/Jyڀb'D)NKD;$#mgNV䓣t?AoPu .5zU큭ϺyЋ'*eN`Ůx= cv2f] < ӥt϶ZB]ډŝ3Q=)nOrtKNU8ƼZMʓ9Y bb-be}zq| | >T~9@[=ݽigӱ؝\a^0H$YUcEX0 \;SBɧ7<:DŶ9zg}2훻Ҭ.3AĆv4,x.wշuCM7hjR5gbה4*V(EyT(@d+UV~>wYc3&% pXzLBf!n C|.[XIrYpEJ lBX5G q՚socOwVJIʿ9 Dh1Vylo Ew9j˻1r!/Sp!Dp_}bc}$/#nmRCF%Ɵ~85Z8 I ۏVdn BIș^B;=&zv~a !s(4mĆ2y RU  2hnܜ(s~.ئjz4[_:!!dfywK2SBelQddF"ӆOQxEؒXL gSӭu&.6[dL$Ƌsޙ,T~ŸhOOK G{.CFQY63*]!'36!L ;eQl_*Ɏ#yӇ:#ՊI zg.'?r̗fE8 g^;颿$BxSen@݊2عd*}BQn T* Qz0f$8E~EB†=2q4*`2(ma.[E8LOx"'r))6,Vκ9MJ=Ϸ B,sGgM:Mj 8gUVD_Ug Ky!F6u &5>;k5lHXNzCwj?nF+2WaW,xP0 doa qUQ5 Ef vsJHƌ$-Jr!+tGlgty]9!@1DN"cs8Z6&Y]y6 k:M: cQpwI bF-e+:E+9OtO?诚rWtk7:SB.'?Mw{ 1W 6PS AKhC:+%^ShA[ ₣s; 22qw&LB/ẻ爔*-ii<6kṕpR>Va><J5P~)?iˉښzsabT`>:Odƌh  [%x$[P| q4a|p)7[݃X/$̟gxStٮB奄Pz|T1KL5Ej7ܽV%=HcIPS|iS 3׆NN\R4#<1C/Xo-sHJBL4eYխkT<@0St0B=?/Pi qI S(@T7lr7 fWaGP &-=3W PMxX&p4l]Gmd;ʣ}@_21AVs ؊vB70@ؗciT^W& S?Y"k[IdanʪEegARw._Kꑼ߻?gh`uaq䠵႘39~"fa}-d48iT A/ØW.!7ZƫQUW5[sG< CEĻ<~,|]\i]w{Fm=s>T4\=*[ۘIrHE)C",vQ^1~K6|Ȝ;%b6Qwl8o~ް uZzM1OpE Δ7 ^=pP >)fe~J"_c!K"ŝRʏ.OjdbViXwaߞ"%~ /D d28~=F<{b /|MdJc@]d_ꌑ'>7ωP]4X<$ojsPMc,G,DQT[nf%CANTAb/~{V08(,5y3pD`bGxÖwQC&fݪ-ߞȋ别:f_7}m̲9O2*b؁? iS{fzNQU hU;2)įx`,!%c4!هF~/\ߝӥ]\fZAEbִ.dKuoʴ>DO{!y`#GjmyE ƪ+y)I!302xQ] o}'hyP45F?`7;X}_ A^A.YdqͳZ̿FP4O(ٱIskG#)U;#._}DZO\9JV."wXEa'iP UF+^,slpz"Z94r5 !B"ŋiI$LIJ)Q2Iż:@SK~`mPLџj|GhRXy$"7"B/'{ϊRpfHL $qP2~yfLӄ7r,KvI<@S],O=2@#P{>q`dvI'!JZpwiuryw4gGUތfO5 b.i(qN ] &S!8<рx `-P^&3ΑZԞ-{;-p~K\p*h(}vtzᦕ6?Jfp⌣j;)**{E)2j?Nch3?~}AEc+Xjb%e__d&},T}/$XR^8ol0yˊ+.~X*eMk0;yVM̠]l *Zzn>D[r~Υ:[2ʨ+쯿yE=\ZH6/0al A`tw-UaJX&'bb #zg'I. MWؘ &WϱlQ%֠lBciFBNb$E EԄY]J *X\^Ï[(t >Lc9t=){29QNI^t \-Gy&}[}Fd:ɢ fMŠwIVbM0}BXni0%>Q k%1Jbǥmll㍐_F#` ¬{dNg++O肾Sk}"TkE X_55B5OLm_tc!Pץj&F sltD!.%T9|?oimyW8uٖq!~](O0]}6hJa$y #~E}}0 B3E!tSfBXhNq^H;^}%l<%Ǵ+ʸ*O"#A7\Nn!dy 293!pz8+"2gVh:4gvޓ ]=.7@]"anrݤ f,폨~V^ofMs6car5"y"A6`npUw0pPF$O%*fwR\G]^lfp/&uEy+:^U^wB569EC]K]<fJc@d{fHYd<0m~ +iS%X6`۠e!d+ԺBirCy&]3,e^3@ԜO7֠$yowp._\Qj$MH38Hrzp$2 mN>@ETAYrWҹA[''Czv4B=9Y&hl}-x3E/Qd0S^<-=Q`)612~"DO"hǝ+S!.oO9dJoAgєh+Q2v9NQ@a9 ߀H[Xh; &tp{ݥ_$#푒ҲSz5?sFJ?sK]S5Tv>g> 5i]v 9FJ,V9-?1iL?Eҕ8?Y:n$"FF p(f ^;Xq?G,+1Q3 zu+JgQz8mT֣k٩22IRёtr^Ѡ]/-n,[nr2s_ܱ<۩ ^}mǓk P??>qP= u S{g`v^8*9ac\\(ۙ$*K,B'$5*c,@}%S1&?;ODo:jkpZ5XI ].TRȇ P %Fmo9+sc6<*/%7LW[FbBw mI1ܸ@A⥐<Śzdk}>F9=ԸR ~C^㘥HN*]6f[~O _۲fbNP#2ˠasM~9C*Ԥ)rgk"q9"Z7J ay~T.hX rZTu_=56^K BNlKͫ,m <ŕB6)g['F(/b1gBe3j׹,5} S{C j#4 LE`AO-~7_C XoY?vDfc3mO =ӵI$L:9ۿvOg!!m~D[[ W!ť cl9 ?{꡾<0Q^D{idUQЂ *ӏG]$ 2HԊG8>,ЗJ+Rr;!9sΞ-nz'=u֘J ̀x$XTo,@j6, Vс3RpwHVSLkAZ&7?e8=4YttVgZL%#*Ǖ9!z'dТJ1=d]-_aA<"&ïz7# 6~_0v5/3 QZ^ɤ8|hC\K>4hʭ1hLR!4V *tLBRì4%1b5&ճ5hedG&ɥLT@i 0b ̼,!qC-DPAng_CH*/9K8|%D>Z/KZ[g0x҈~s/2ݬSbgLʿUP@:dTg[)?cv0A˟޵4wZsQ =` nrܵE&N2ֲa$T dv7ؽ{U$s-P뚿h&cG&b5ݓ#.Elc͙ހȥ倳h;l<ڊNn(R/1̧W1U4o) :^`5kС%y}4( u2ν@*nJ/nٸ;3{7ڋ9OAD6/_0ٷrKYB0)3dz10sts<9[`G^t s#"#9hgDhr/ j^ž,IS5RFdlmA@ D&u3K?4JT3s7_0^f֒ZYaU<60H:]\|0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 cleartomark endstream endobj 49 0 obj 26621 endobj 50 0 obj 1638 endobj 51 0 obj 24451 endobj 52 0 obj 532 endobj 53 0 obj /XSEYTV+URWPalladioL-Roma endobj 54 0 obj << /Ascent 715 /CapHeight 680 /Descent -282 /FontName 53 0 R /ItalicAngle 0 /StemV 84 /XHeight 469 /FontBBox [ -166 -283 1021 943 ] /Flags 4 /CharSet (/fi/quoteright/parenleft/parenright/comma/hyphen/period/one/two/three/four/five/six/colon/at/A/B/C/D/E/I/M/P/Q/R/T/V/W/underscore/a/b/c/d/e/f/g/h/i/j/k/l/m/n/o/p/q/r/s/t/u/v/w/x/y/z) /FontFile 48 0 R >> endobj 25 0 obj << /Type /Font /Subtype /Type1 /Encoding 46 0 R /FirstChar 1 /LastChar 255 /Widths 55 0 R /BaseFont 61 0 R /FontDescriptor 62 0 R >> endobj 55 0 obj [ 333 528 545 167 333 556 278 333 333 0 333 606 0 667 444 333 278 0 0 0 0 0 0 0 0 0 0 0 0 333 333 250 333 500 500 500 889 778 278 333 333 389 606 250 333 250 296 500 500 500 500 500 500 500 500 500 500 250 250 606 606 606 500 747 722 611 667 778 611 556 722 778 333 333 667 556 944 778 778 611 778 667 556 611 778 722 944 722 667 667 333 606 333 606 500 278 444 463 407 500 389 278 500 500 278 278 444 278 778 556 444 500 463 389 389 333 556 500 722 500 500 444 333 606 333 606 0 0 0 278 500 500 1000 500 500 333 1000 556 333 1028 0 0 0 0 0 0 500 500 500 500 1000 333 1000 389 333 669 0 0 667 0 333 500 500 500 500 606 500 333 747 333 500 606 333 747 333 400 606 300 300 333 556 500 250 333 300 333 500 750 750 750 500 722 722 722 722 722 722 941 667 611 611 611 611 333 333 333 333 778 778 778 778 778 778 778 606 778 778 778 778 778 667 611 500 444 444 444 444 444 444 638 407 389 389 389 389 278 278 278 278 444 556 444 444 444 444 444 606 444 556 556 556 556 500 500 500 ] endobj 56 0 obj << /Length 57 0 R /Length1 58 0 R /Length2 59 0 R /Length3 60 0 R >> stream %!PS-AdobeFont-1.0: URWPalladioL-Ital 1.05 %%CreationDate: Wed Dec 22 1999 % Copyright (URW)++,Copyright 1999 by (URW)++ Design & Development % (URW)++,Copyright 1999 by (URW)++ Design & Development % See the file PUBLIC (Aladdin Free Public License) for license conditions. % As a special exception, permission is granted to include this font % program in a Postscript or PDF file that consists of a document that % contains text to be displayed or printed using this font, regardless % of the conditions or license applying to the document itself. 12 dict begin /FontInfo 10 dict dup begin /version (1.05) readonly def /Notice ((URW)++,Copyright 1999 by (URW)++ Design & Development. See the file PUBLIC (Aladdin Free Public License) for license conditions. As a special exception, permission is granted to include this font program in a Postscript or PDF file that consists of a document that contains text to be displayed or printed using this font, regardless of the conditions or license applying to the document itself.) readonly def /Copyright (Copyright (URW)++,Copyright 1999 by (URW)++ Design & Development) readonly def /FullName (URW Palladio L Italic) readonly def /FamilyName (URW Palladio L) readonly def /Weight (Regular) readonly def /ItalicAngle -9.5 def /isFixedPitch false def /UnderlinePosition -100 def /UnderlineThickness 50 def end readonly def /FontName /REMBJF+URWPalladioL-Ital def /PaintType 0 def /WMode 0 def /FontBBox {-170 -305 1010 941} readonly def /FontType 1 def /FontMatrix [0.001 0.0 0.0 0.001 0.0 0.0] readonly def /Encoding StandardEncoding def /UniqueID 5021143 def currentdict end currentfile eexec oc;jtD[1ƅpTo9`.:ypJ*l'e}#)&7+/^ W{LZ60VQR^λ3r)#v$p~c&'ſ+ %;v &q?ZUEsT 0!bH;Ef`6sl=׏ Dإe=/kg'Ҫlvn_0uw~u3Q0YDT9y$:ǽh'%J(8wDF.Of`A_%iWbWi5'$E-tbeqLj^ruqnl9Du zVCŨUadaN8 C?/JaݰMo]QXrSea\fd 77;oD7^a_BY)X 6]fuļaa$oINNo]}m̘ke+`Q69ظt(ROρs[rvU?x}kBg1{DVe6 s'\M#( M-J]5%lIgSwY%ꨛkVw+HctH' MpaYlxdF ><0A@)1oUWV'6:{/R?1ht`K2z5akLXK-* pFGǯ<"沶r"qcmk XNCfI%v-)nZeS[?؇ >2 WpD6) ˛VOQ rX߅{ȅF_uꋛi"wLjUdSq$ Cg>! -tX䃶.V+R iqI׺wC伟q“bkSTf#&V+X3hUK`Q1~m,P݊)mk{B56QpROVLDJuiLײ_5ŧ[pG}[:.x`H^Av'r֢e49LB -\ r,iN8ئ\>#tƷg?WEAucU9sBHOVxB旀 ^C)8LtFibLE3#m>GpZYy,{~kWAHzD6sZ"oV~3Ge$ >U@Wct)Ym2xS`(=M:j#ٸW`hzNx55|OΞţnlo֜ Xt4#}> _G-[ǰ-ifW!4*vn-]^:+>pՎ㔁 h2,6~% "+czs=U]o `q͞ [TCb ?⧦Cg271oH_be>XLqPvdQuf'șV2CDn mV";ɫLe\I%N *A.LyG6J&3-f鬛˸ [eG,p v iW V7&V[z0p#ON> p9Z%? D#*o<6PI&+BI+o.+ÜWPW3ߵ-뚹r_[7k´W:}ׯ`@eAԎh)S%] 'Mb'7m5+Dk'׷_39ٚ*[q wSwi3?Tf`TF@oŽr/ Wtr5V{UuxS1.{HMu9f檘~;4Kz@šcs&{hϋvJ@Kq?Gvj8ҳ¨a}-eu1ɥ{ZӖ@;gfm+@AfG:5bADuB`ktO@N՝nx?WEyO?T5*ex wG%O680TMH:mb"ZHPY.dHqKh=N}p@[y _~{I=̂#] -W5GYɗQV| aPԦWia[[O} #~Hy{l/܍]kڤ/*~kL\R`^ $oY,|(}N=؝lxς)JLh%`~@YqƱoR߰<m)?K2x;8x"=qoR) cr9f-ՆHѤGJ"DEhveL>A<zRLwjۨ?G!LrN,_\Q 5!pe@}6f8] q}'2YYy  pcut/do%oVrqesyJT|ð09s@oNUtO; | v-\TG;Ja~ #"reE#zB:P̹Hlm4ˢF'/sN6ZDΓ^1Gs.NvӆeUXI,V8bve&˚S^XB=@ k"gyF,P_!S+ČTXea&eiYzQQNz n ++ 8$c:㝒+պ |TT4՟f9zuh Ƿvh}'Pr˞\Nz.!D~Տ_fNyR]| sW>t%]\b0TO/~εǪ z5Mf>FnIL#R#n Q(Ps%sIax(o]!JHA`);:>mSR`\PqRMIP-}*;,7nt*R6T:r /$1F"_qZ͑jYOg%딌oQ&?/FF?됐(@ĒcQ^r*~r?֨\S>%V@ ;G4Y!Զ8TJ̱c(IFfyZEhZ@ǹ@DRP|vg,/^2؄OڡD,~kTсN:_UA9|sJg~i5JB#G.:{K6ۨiY*xo : {n:ZއKh eҜF@6:jIoK#7p+<֕/Q #T=+s'ԁBy 7]|;vanGҵ;*SM[)閸 Wͤ3"ps 6p8ft8BTytLo&3_x` HdET)d$՗@姙mJf$ >B38\d7ѰGʿ{8Hcv$VT|N`],0;j-Ԙcr=ob|FdS53 k}5q&uv_}YrJdv?ߺP^ *-j6ݷ=FH*pPY,*tr%ƛ؀0*:XW B]ECK0˨BrV蚌SBC۽D1<4i(wmx+tyfnj뛿r'4 8S-pJEԥs5#J.5rji@S5qk x"X1}j$V5W(Lm^qAL#('blXQO3 )X8fs}ƖSZvdF6V <^>S{JcR@O{sɩ*0y`C`k/L3 lJ60»}i zYpSuWV8e7.c`zaU&tƤ>>*I*}ҫ95)م7T#=ĈkI/Q$-YfrkE u( K]xh xHqxPp_`Ir^d7f/=DG6+ LO A @:ˡ Ӡ(w[/ D&-/pwBO{uu_ȵ'9,<Ջx2Jɫ0ƅߪ1?- #ho/7eu=Qt4f/LСUkϧ;8 v;{+ & rn9vЮj}$zkؚ'er}BvhnUu[@l#zzDES鵡 0ԅ,BO&ݭ,'# w&{nI1JRڻam;m`WRYhb@ B꽨wQk*"r|8VWC .l"48up^9Ro8]&52Uu]Tj+TQF_ё'#|x>0υhZ;,?ӯYyjP NQ%Uزq;[11Ρv,g4_υQd_vĢh0֛ITK,Y]G$TڧOL)h\(y3x̗ݟ'$-/ѰefUnF29Cw%Y|Q\CT&!X/~>յq9tO䁨oG3=„ ΡE,=_Yк$/_X˛sӢR%4G]ֳ+WROwؘr+Gl#<FYoŠ=LLI #q2,0<Cg~Zdj3G6Vezq ̝ƾI_y4 "<ޣFE6R~Ëz }\t坽ݙ>kga,J{Q,ݧ_dK߄:8F5|fPh62;||]uwC> Y1)Cf1%0kH3:#5h?jhNQIA}ub{D.AEEbmطe;ago):{yA9[]Gl$qfEr#2kvr/~~+I?vYn~{080cZ,u,i¨vԌtfh*EVJvRɡKPk+l&E7#A;Gc89ܳuOE4]U|fGz,r!+ Ί:m`A5^\h{"%"Ց̺TP>'B`찠-&$ 4lȷ@ҏdTb^QcJFRGW8_½ilЍ蛚ooB>򄵃8,Hx\ތ"ܛ{1 {[$ YE/їMk T[wYh:jExV|vD-Τ^[̓i.VU;}.%de ؙ}l+>c@2k)B!@8U]yQptIn|7 M)~-HMj1vSԇI=犁c-`.~cQJ݅" lx N|MR `>߉i/g- %㶰}8 Xx3y6N>re R+ΐ^0̱.Qqj ^i)|i*@&uBb'v䃅M]}.(+0gmu| ʴ·K^ub4m%LD"n'hJDSOM%S{t̼<fʆGxR)ӊzJ.%2TZ|cZS)uWa5XmݜALXg~:uk-wj.AAf#6,WaNJcJ7, _1IBc,h:i$mwPIG~{98((ojVm$NYRQq.Y"Qjg,iB{=%/=(Yױ@D_ Qɶ'g2@qBkd6&M(DY| ٞ_b0̣n ($y'Q G_5-թlO@BvF;X. I65ˇk눦O% 9eU"p Ibaވ Wk;.`g }Gp󵈺T X0BkA'㑨2|?#KdTz+ix@. ϪݗYFjQu΋~^%3-o-(ںV.Jc]qSzr6 Z2Xԕyn%Uexk}b}m]pZwMYivy|A ۚݔWUX?6-@C;"9:m (S9MeB  ~ \,YM+‚"0FFόy~ìqK1iDd}gMd`j-A fIKEz O˂<*(ah*bqVa$! qv.,"ob8$K-O l`kkܗVH04~r-:vGh"9~jf\]m,Ett^jV~/`\)Eb \K\wvlB;7yo^-lz UmK83不uőPzC)_h5Q~:~RBD_g=L'Ʉh{}óD1C* 2 KjŭpxKnakIENYQS*ZbRyw lL _{?ՓGM(O+{o}NL־t$ ^R9ݳJ,4ju_З"XwH#pe}4) w'b{]MX#fF| {W&_5fV#Lrk~VT[}ï7<x=^hij Q G4LB(Z{]WSh)&`ٯo_Q ^Nsf7]ER$:6<:]Wvl=2i47r\_X.E+ظmN;iԒUum.G]{R ?x;Zu `WlUHݞa7“5Ỳ?\ d[L]R>^L yw6k"T~霰𫫵O1t_z^ q}&R=pk!:=#{+[b sDrqqGýbNٯR VN0ms@~:c\MNٗ2?40/0({[fMൾ)ZM^AV1{{-F>41jBGȔjL֪"D R@6\yTG7:d_d][2r݌ x#Q}(/1[M7:V/yh7;vmq.`F~W]mJ; [$iJz:|‹J#!Wr ߥU-tS<~6rY^?+:޻To~}}`E#5'Mˌߐ SE1 Kvp?͕ +􂹋z j/f4C;TP*'9պ8@";p\hRvB/c?Q<|땀<Pȕ9n/h>n4~f|^){$9P͏ qo4o{b*% t3pB(;c2EJKH˪)S]@S %F]@?'maI<|%q?th_$Ŏ4>F*%Rd9ԧYVoeߞJި]Je/:r/Ύq`3t:g6C30Iѣgj._dJD?&;A2_VƎswaP <)#<~G F8i ^9lG ]/&xV{3OJBUؔl^޾(2T5ˢyiGW[;gӈR&UtKL{/%r-uNZ5rc`-&o`y$!}4M2rZoooE80U貿O/Q!ع](u?^8%kJpŤ7|{$aRwQ6+3*>p(tޯW7-Cu!dKyeᛰԺnJ?7^|WM?n}"i2ۚҏA:@wqC>A׭- r{C^?yy1 pXNq>T63j*%I|\a 'tn6{lxT7OQ:!ߥ=m'uHq^ۻc0$A~$  PP ̎5I_[(Lnq\_՞d0Į}ʗ,(C'cui!ħ9xY_wBp9HwȊAz_? 0dag[F"*5$a`_-rڳi| \rQ-o&%ΚMsŰD!TwP/@"٣دkq J,{zKHJUK0==-9  S)̰J$ɳAHk59vYgl^"#[j"=p2bfҰ`ixG;TduřP !\$u y2X`n^u.i5{P㺪SĤ*7m waD3%r庯kIYߝ#^(PH/&:=۾ZvK# x P FƲ(7]: ׊覱cSTs ϵ4oUyB;CI8s6|wDYz!Æ ,9-ݎs0L]_~悈CM͇pLK!EJlS78ćNHj.i_[}};۶F:Nd9:m (PU1x+Ԋe_;AD*F޵Q6RD0pծ)+xJ@\$DY) I;%O 0B6[A =Ddl>?zJ׌ORCDr\.etgR~G..XHgv%7Vܤf{,N&>Δ7͛O~!.b g;2>@ Q?C\9#[v9 Qދ/XzF%LCKN0QE;,tw |ilU [a^ŇduKc&QD#I Zb>lE8btE5Wm塂?؇T bÇ;4tִs1lYL>a>cro8P茛Ӝa~-q"Lk[:0(ع;S/fƋ"@%'PHZWba<#eԟktJrRY6bOX3EM5ڏΥ/g2h A9]}|Z╟>W/%_,ەvb/lh&) (JNn' %8P$ R>dmKB H+JJt8ɯY )%[z6N~Ӎb1Q|ۚNo7HszB)mbYN6?R$X଍8b &/mDJ 4<?*Jh<Dl77җ4M_;;BY)ϽT 9Ӭ fk65"a *1Ɩo~LnQq'b.~ >aNBI/."&* Wsqltn?1E&lHG5V E(iЁ*nDWՂ͡3CEQ?^dn9 {ju>XޖkАI_G~|OP: N)dZl-ecJkl=szƾ7ygGbVO)v>,JziMc2L ;F aዒ.`^`kϒޠ%ױsn$ӭ ^3bDhD_- Ř9"w(`]/P]498h9B 6m%8xSH.M+̄Ow3V=cGdi0rҢ3Nyv.0xEHъEo=]ہulIGX ɿѼ+U.$`|å͘c Sն,|2),8^J09|@u~28q;D/x32zי f=I+EuJS6&duQ PGVTBoi,rhnh{M#j=}x%#wΗxZu;Fkw*lOҴ9">@5:a\#DFİd3mJfa6yq Fw&5/Lc-$30azt!m u0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 cleartomark endstream endobj 57 0 obj 19638 endobj 58 0 obj 1642 endobj 59 0 obj 17464 endobj 60 0 obj 532 endobj 61 0 obj /REMBJF+URWPalladioL-Ital endobj 62 0 obj << /Ascent 722 /CapHeight 693 /Descent -261 /FontName 61 0 R /ItalicAngle -9 /StemV 78 /XHeight 482 /FontBBox [ -170 -305 1010 941 ] /Flags 4 /CharSet (/fi/period/D/G/P/R/W/a/b/c/d/e/g/h/l/m/n/o/p/r/s/t/v/y/z) /FontFile 56 0 R >> endobj 24 0 obj << /Type /Font /Subtype /Type1 /Encoding 46 0 R /FirstChar 1 /LastChar 255 /Widths 63 0 R /BaseFont 69 0 R /FontDescriptor 70 0 R >> endobj 63 0 obj [ 333 611 611 167 333 611 333 333 333 0 333 606 0 667 500 333 333 0 0 0 0 0 0 0 0 0 0 0 0 333 227 250 278 402 500 500 889 833 278 333 333 444 606 250 333 250 296 500 500 500 500 500 500 500 500 500 500 250 250 606 606 606 444 747 778 667 722 833 611 556 833 833 389 389 778 611 1000 833 833 611 833 722 611 667 778 778 1000 667 667 667 333 606 333 606 500 278 500 611 444 611 500 389 556 611 333 333 611 333 889 611 556 611 611 389 444 333 611 556 833 500 556 500 310 606 310 606 0 0 0 333 500 500 1000 500 500 333 1000 611 389 1000 0 0 0 0 0 0 500 500 606 500 1000 333 998 444 389 833 0 0 667 0 278 500 500 500 500 606 500 333 747 438 500 606 333 747 333 400 606 300 300 333 611 641 250 333 300 488 500 750 750 750 444 778 778 778 778 778 778 1000 722 611 611 611 611 389 389 389 389 833 833 833 833 833 833 833 606 833 778 778 778 778 667 611 611 500 500 500 500 500 500 778 444 500 500 500 500 333 333 333 333 556 611 556 556 556 556 556 606 556 611 611 611 611 556 611 556 ] endobj 64 0 obj << /Length 65 0 R /Length1 66 0 R /Length2 67 0 R /Length3 68 0 R >> stream %!PS-AdobeFont-1.0: URWPalladioL-Bold 1.05 %%CreationDate: Wed Dec 22 1999 % Copyright (URW)++,Copyright 1999 by (URW)++ Design & Development % (URW)++,Copyright 1999 by (URW)++ Design & Development % See the file PUBLIC (Aladdin Free Public License) for license conditions. % As a special exception, permission is granted to include this font % program in a Postscript or PDF file that consists of a document that % contains text to be displayed or printed using this font, regardless % of the conditions or license applying to the document itself. 12 dict begin /FontInfo 10 dict dup begin /version (1.05) readonly def /Notice ((URW)++,Copyright 1999 by (URW)++ Design & Development. See the file PUBLIC (Aladdin Free Public License) for license conditions. As a special exception, permission is granted to include this font program in a Postscript or PDF file that consists of a document that contains text to be displayed or printed using this font, regardless of the conditions or license applying to the document itself.) readonly def /Copyright (Copyright (URW)++,Copyright 1999 by (URW)++ Design & Development) readonly def /FullName (URW Palladio L Bold) readonly def /FamilyName (URW Palladio L) readonly def /Weight (Bold) readonly def /ItalicAngle 0.0 def /isFixedPitch false def /UnderlinePosition -100 def /UnderlineThickness 50 def end readonly def /FontName /XHKPLQ+URWPalladioL-Bold def /PaintType 0 def /WMode 0 def /FontBBox {-152 -301 1000 935} readonly def /FontType 1 def /FontMatrix [0.001 0.0 0.0 0.001 0.0 0.0] readonly def /Encoding StandardEncoding def /UniqueID 5021142 def currentdict end currentfile eexec oc;jtD[1ƅpTo9`.:ypJ*l'e}#)&7+/^ W{LZ60VQR^λ3r)#v$p~c&'ſ+ %;v &q?ZUE+dc Glt哏>wj,VRp^6>Z'h"%"{=p?u͓3(͝VN:hAWwwg`r==(+2$Q]qh1S=l-oȪkR܃_\n`ߟq+oEi蘯`T&1Sb; ml8gO<}y FGBу0vIox--X$D_٘Еc;ASxd_-o@#R!T3N_&Q7Lf yw_ tڙhQoZ%ߟ^R󑙙9u99 naJOIYuʐR%W/ڶWv#d\uD˜{l_1(QX`nA9>z<5g^ȯ n.T3a`@CPJ@ߥ-4%)hs.xI /M%`9 w0Jf1Rj^By3-Ƴ[Xv7q KuSv+ ]E)އ| dvȤ(w7xz%e4A|FczhV oP6PPx`:b1`}hFDa骫?4%P,G͊~/+DE?;FĄb7_ uhg18!î1aD̢Ť|޸&Xvm7ߝh@lydwj鼪mQb,m@MKporoo0+DUfqϷ6NAy-v%R둘qϴL|y'q s,b|PqjUבJ [ibh ι`2,6 }/NU-6K\v/J,`)vr6U&E=~™" O)luW&=oO&* FTbPbAAP,$'@Y R[D34%~Z419AgC nQ-M!A GA<8jaw] s:+kES = iEx5]|.2 x$,21׍Oێ^[|ᛩ=L6==G/OĖOo:<C=r{Fbz{`eX .!p7Eïi=P3 "+k+j'*<ߖCxyYWܴxBеJ0;$)fi^[JơF?o:dg )nG8%R pqDNa_]'1oS ("Ei&:w?1NKwU|_VրA4DCjM6)[ FxЧI(dO5#- ٠MʖфGJ=nLzkpA֨$a)Lz}~tiY}k#(s_mi3KgͫjUƺ듉F*_fi&`!PK_tE[w0V$ $|j3/wy "xxmX q5φHDs~N%f4Y3<~=?>GjVI 5<;zˊvCYka~Jk(֖;T#mUvjDՖ8|+n=WvtcϾiwU[]'1OD[`.Up\זhxQy~`09MU!x&׎')z)a Om^$q'uLeG'7ǮyQ׌푉Znr8 ε;:R9ZH5/B)58~'>zF")l6uTZ}oF̴8N#/iVWv[v@nhHp-`ޣvr@qUP_i yh,urzy U 4(~3" ӯg-U@:vfӗuS I͗Mݡ3Yp~IɌ"ymj .7)}*~z#M9W?1 ] 'Pȫ%6ESl B q ~JRKliGDivkiǵ1=t%z̳lkH\ƑJg؉UHEIa`3Z `Ox ߂r~ KrkžeٍȆŬ^T)Mdvκ^6>4xóԐ95ce9{6ř%1d|"TLXF_yj~<hF#+5f_'+*LwmCUǃ̺sӧ:< ֬&Q0uim=E#$ؕSrcn,$_paz<[ǟTP\ubu`V0}nqu{Mo MVi7,<;6/yLAzFܓ׹V7ڛl9_V/rO-k mBpĔ ҝ5dqe.nҕ}f0ȭf1}^MhT *Aw'CYّ;OZf͓I106r #Q=Q9.m ~BWoب.eB!0P'z)Ȕmq!~O\AgZ'bE&1$..a.oCE7 yJfK!5{rq`9kf쩱mo(LH%FB?$D+?-;y\3 K{G,/@Ez~> ۓ~P}sͪoaAx~h x<[w)4Ba:S+(t^xyH $롗ǃ. xRr>auOf A˧⺬22LFE[`re{: >‡KU, >Nj[PB0)quc3qiFm#8qBp=cfd[of3ma D&7Ӻ)QeO?!Z{} ~HaDlc}5%Tol'0$'<$R1 ̺`8\<{z~C=0xgwoV3ʞ3.OC[ls R"?U˴ANhQGmD2etąe8[I*}ҫlt h\v>OS?jv[lZ `Y+v;RL'<=*7_4MEs>7s֓\7?W5kN!qJYŘ okC-}c4gˈf?K89@`lJ[BmKǎ^#u~y.?ء<ɾQ_ vm؟JtZtxAO:YEdemE2_sWY~q.Z>LJzWNE!E.#0vrjIdljmd%~Ko; R S tK"?65k2BTTK8"5hFTRZow!/l 3K˪f蒲g"u'ԭ>r 4t;h. ȇ8xY֥(Tс!nK%z && !*sIa?;䐛E++5-~{ȉu^gəѳ3Z;fg!e':vm dF\w3hŭRQXY֣J`֟JrfGv ݜlZVIёPs%Sꍽqؽ~x)0>gI2kc7┘藸S#h}gWJOo ot 'Gh_,b8t02]^:tKq+i7\S`֧wZeE09FO!za9 `mVE̕"؇#>GI7vEaJ_xxձk0H#s ~' 2Kh䫽ku2Iƙ* _pb/^8Ԯ 2癩k=V?\ⱌ rbL)u9 0-Eϴ(ն2m0>S$7^RhgtXKEc|& BJWg7qރ[A}9K1Ĉ'vhXzs/i|B_%wh {ncEڈgڣ#Sƞ`YZo z襪ߠAq24_G9u\8jH0}L0+|1Ka:N:XTwS17{[>8qЎY/nR*Edl#gtrHJԠ{RׁZRBxSr]>`(cXq`OэpoSֵ5S|hJBvS'YuqK }rmdOJ"uٖInkPӳ6k|_1(x㻤7T\GvJ2 ocP}59\k-ˡ>ɷ U2~zfe'65 /'?1a1|&N߲̍ Cǟf4y_HA_‘Vz} PaYRPۑ08(2Պ*EW-b;z4s3 CP1W!^ӜN?H^p%-8H5ck3/wy l[XԤ79l*Tj6.ɊH;,WyXs{(P%{]3cĈm'%pB>`w_vYGzEwcfP3`(* {nx-؄;a~ P}`OC`(Lo_/M]YԿA]DCZ&GGes.8L'5*Qi6{km!. p/1 UQ%~82-ŏ9"Ϳqhqb P9ܬ0EZ,QAɎ/C>Hr /IJ2^eP/UV2IԩU |]SoչZ=' OnO>|G86!y[# C# Yv'qk甥{Eem`t-J!>xk%;HWӏpEmc;;98j!'F3ħS!> 院n=elRl%饕T@ ր O$FϩB +ڃGFmG2_7Dj_"\h^@fk9fՄe@Y?N=LUT6V}j-c|Ɍi걧wj`&/A4rq30F#IP/DrG Z6H^yN]HCZvRa2=jzҔq~X^2&  V;E=эH,p`XKL\,b1.3\%"nѤ`D-ƹ'w /z;P4ڬJ.E;7,1Jzco0YHIA -pltWMV>G!ZA-â* H`n!M3'E {Đ2sL |~v4i7e_KǜIsF QY~cRfj8(ŵBK-u! Dps!ʓ$ٜiol|BoFr祔 P|(0\>W:9ְ{Bo' S (>j>an*g |o*Mk%KGmk6W[|ԡŐdt9suiMWGe`m~#N1Av>&Pg_k_JMD|\7C> "v!m Q OPgUb ,9#NЀzޛtGLduh=o93BNB. F٪c|V"N{4feJ Pݲ u>Y4RSIW#D]G8+᷉:KU.!6 ~Fp؛u~`p#wHjך(qq,m p ",6D\=Ӗ%`⃞5w(P$:#l 4U{=9Gv^Nȶ#1'et5M".__t{hFK%=̚(q͛Z!0]TUvP϶4cM EiTegƷ:oZHG:׫Y"_u&DvP81yx8K[ L0ߜHԷ0¼pV;~cMjJpT.!ǘVrl~+~/i>Z^Ƌ<噌'_wJ:c:8[qE۸NS \8xt,ŷޕ K7+L yN8Cs* ITut팡!44QͅX[2#u..<2V>!:"@|R_ 8e8E 9$(5\}O&O+cŘǼ?=,1 KCm\IX>J#MKcƻl| HCFeqz-ZU49)ˡ0%xEtF%0S8\J󄸑'äh,`k%_8?p;[5uonI@ߣֽqr❸/4w$Gi$& b`S!%7 X.:n5+# ͘WfO5WЄ>gyfa>~l*^,oQ<R.7(9wv"mur9B+A 8Hؼ3) 1e0o&*kQZPɷ3FNWVn-{G!IfYW{ G㏘|Xi.~eUQwr=_CejOdv3dNl.GB?~XmAOFLRXf?zRùIcrs8\)`ZEKfMړl#@fl˭ffy]$IH삘({k輀]Rc98uON9O@tlFU[V\牋3tJ5 ;H&/GK(ʯtft1bMT]^ z.rPY 0  $ˁNDʥb3$Lyߖ:gkcs[yZs)QvY89W1<)_9y`8B:(B{Q\oz`<>j3%7yg^gDz,E2_ꍢ'XjeIߖܺT8kmG+\EW욡Q.4Q~ʲA±(ӹףO2[ OQ^h6N uK1 :kj}XvUo#U3ًVn5^gvY{'4CHe;c=e|sb;RR5qEx&)PӦיuj 0sm*k["V9OX'-1<0Mͮl~7Úoqa>_swx[^^*OvqTqI&ia]yI>ʪ)&D;ks 2$·Kք7m%U@ԁ!;` ,l@IaGtKZ눘$ּy:cq484SE333\\msMLX+2H?̠HS뚟I&ڂbGN\W&r5RN^Ϣ٥XNtUq YGYT]=_lFB~.$Z aזK B-}x@ WKpmrtE_tqߙx֓SV:b[$o\ " f34N zԨ\yM\#Z:X pztw=Ќx\VK{8fFBM$:~ p0R,pVY=6tgPA)j~&l1ҥ?5#0kGfٙ ΕܢO^VgJvhqke!uOɹnv1^NØzg'S;;efE՘qPې hn+%+ w3)Yf W Vv ^ke7%QQ9=.M|s Ÿ0QFoA4LP"ɋJћ/n 61@'$-*3G7l {$n)GK{ԕeqOz5_ۯ2nPetCdV0'arR,`O#C91DkIUȞ|m^w'x{v?GEm鷿TX\!lD՟;R7dk>"t+g`t PCyte]dq";NV2\I萐GqlP]'-Jr7A^Tgp"&Sv!ia)&B7 \z3Ⰾ+x4 [g !I0i7܆ ÞugSPvx@ WIhN%OؔUAPr ς ŘuY*I}®wݗAO0e$r: եtV8 0X5h5l{S.J-Eɇ;Cm^79Y;PmH&hkʃ $̓$\Y\eZ\b:T9/(r,8~$= Q ݔ;!#Ʌpݙ J֐dˑ'NO%X6nj-w]o8ڈǂ7yROF˩{<ڵe~2."a]hDG$bCbZB  n&#'e'pʃrA. L䵟ޒ-ɄEV%Ӝtw8kf!Ϲ.;TRà{9?Ee̎>ʫߋtmNۛ1N3B߽݋ ]; m/ξE^ L\9a6-ܝ<0KL&JH@%Dy*.ºYu1ؠbDܖn 9}ͦW!=ߪu$( 3hT һ $D !36fLQ0f b`*Xs @==r~ ϟLwF/nGn?(|\ qҍPaJكFj>M"sTb])D1gc~%qM^wT.ͯ`jE`jf}T&\ʀNqZ%yVh抈yP.V}] C271!m l :jJ/ ݀Vofya1 R㧶B &N/[Uޤu# f,7/ı@/ccn_EF^TFX6JrI j䭣"#r|wޣ8_RW#;:6>0ձ#өaZޖS\Z~"_v:37󶕾;ͫO%+q:JYo=#S7 .< W(0ͳ(>^9L*NmGD8>̽嵹~8?P]Ƹ%\{>]MV,gMI!G`mpX?w'ʵcv=KZ5+a uz W[Oͻ(DSZ orSI~voH O"!Zt*Ecӽ}oJ\\.1AyTGyZL\;yD33(ލ.WdsHuV ]K *W9Kޫ5d]{b:{_` ÈF29̪C~3ťYVۈu>+a5wꐨqtYV+4lCHK%l]s-rl'=2hXR@'Me']vc*քs9jPXҹ9sf ʢd,cx~̙JԽe"X0#Ar\ϫ|6 ?WT)Ͳ.,̈́ kTng*N4E&xB /ɼ%O'ehf$IIȪZyDQx\ 7TQʵ>L<9UgΦG^᪛&g9<P<ٷ QКVgQs$MښK,|6GroF6duW%Xѧ<'[+2㑀u=%W{ ʜ鞫Rk^?$bWah.cstn/,s?W+c^LPܬqBj5#BXEPUosOu NHY*!i/e6- `U^sriA;2q9gvHᙈ\Yo.8ƪfR?G9j4& T }hE" UgI@JX"1I6[ps鵈 L)/U\XUZՍz7 K{.<5 Ltpwrܮ), \EȰyꣁȶ)^]sn7VK\%4ȣ"+|?=a}U$ INO{g?a[YHB$.ozR5˿7adQbJ<( պvU @0h[98JA^Xj!x]jErlH1H& osvD ԰{Us']Ԭelٚ)53"~QuH|)ǔYFf`dg5\CiA/C-iE% "YO\(Y3Qt5ʢ\7fBwXK Òm|4ish%,ۄ@)>/a 3L4ʵ]{r Dfb&WG 4]kckdx\I?~H`ӄp ߷YJ` 'D!wZ@~n|ԈpǛ]IV- Y>[*C:Sx.LG2{ Cd(*[hzEgDJ/J%| bӌ \}qg|Wk5UG%nP~w{K5dmtHmZ R%{.ā*|TB9L% ,YcS%i!'0OJ6Vv }JD3o{kb@P%{|<\P+ՉT*%*DjJgR_x j._IgYLMNH=|!0l}*!YAB L``uё|<-ŸXCEd0.NR#5C I /"Hȯ氼["m(>T*8AF f1PQ2MR0LdTA! .Pݜc}Wl#p[$FYx(Af3{ 2? R> endobj 23 0 obj << /Type /Font /Subtype /Type1 /Encoding 46 0 R /FirstChar 1 /LastChar 255 /Widths 71 0 R /BaseFont 77 0 R /FontDescriptor 78 0 R >> endobj 71 0 obj [ 250 605 608 167 380 611 291 313 333 0 333 606 0 667 500 333 287 0 0 0 0 0 0 0 0 0 0 0 0 333 208 250 278 371 500 500 840 778 278 333 333 389 606 250 333 250 606 500 500 500 500 500 500 500 500 500 500 250 250 606 606 606 444 747 778 611 709 774 611 556 763 832 337 333 726 611 946 831 786 604 786 668 525 613 778 722 1000 667 667 667 333 606 333 606 500 278 500 553 444 611 479 333 556 582 291 234 556 291 883 582 546 601 560 395 424 326 603 565 834 516 556 500 333 606 333 606 0 0 0 278 500 500 1000 500 500 333 1144 525 331 998 0 0 0 0 0 0 500 500 606 500 1000 333 979 424 331 827 0 0 667 0 278 500 500 500 500 606 500 333 747 333 500 606 333 747 333 400 606 300 300 333 603 628 250 333 300 333 500 750 750 750 444 778 778 778 778 778 778 944 709 611 611 611 611 337 337 337 337 774 831 786 786 786 786 786 606 833 778 778 778 778 667 604 556 500 500 500 500 500 500 758 444 479 479 479 479 287 287 287 287 546 582 546 546 546 546 546 606 556 603 603 603 603 556 601 556 ] endobj 72 0 obj << /Length 73 0 R /Length1 74 0 R /Length2 75 0 R /Length3 76 0 R >> stream %!PS-AdobeFont-1.0: URWPalladioL-Roma 1.05 %%CreationDate: Wed Dec 22 1999 % Copyright (URW)++,Copyright 1999 by (URW)++ Design & Development % (URW)++,Copyright 1999 by (URW)++ Design & Development % See the file PUBLIC (Aladdin Free Public License) for license conditions. % As a special exception, permission is granted to include this font % program in a Postscript or PDF file that consists of a document that % contains text to be displayed or printed using this font, regardless % of the conditions or license applying to the document itself. 12 dict begin /FontInfo 10 dict dup begin /version (1.05) readonly def /Notice ((URW)++,Copyright 1999 by (URW)++ Design & Development. See the file PUBLIC (Aladdin Free Public License) for license conditions. As a special exception, permission is granted to include this font program in a Postscript or PDF file that consists of a document that contains text to be displayed or printed using this font, regardless of the conditions or license applying to the document itself.) readonly def /Copyright (Copyright (URW)++,Copyright 1999 by (URW)++ Design & Development) readonly def /FullName (URW Palladio L Roman) readonly def /FamilyName (URW Palladio L) readonly def /Weight (Roman) readonly def /ItalicAngle -9.5 def /isFixedPitch false def /UnderlinePosition -100 def /UnderlineThickness 50 def end readonly def /FontName /RIOVQB+URWPalladioL-Roma-Slant_167 def /PaintType 0 def /WMode 0 def /FontBBox {-166 -283 1021 943} readonly def /FontType 1 def /FontMatrix [0.001 0 0.000167 0.001 0 0 ] readonly def /Encoding StandardEncoding def currentdict end currentfile eexec oc;jtD[1ƅpTo9`.:ypJ*l'e}#)&7+/^ W{LZ60VQR^λ3r)#v$p~c&'ſ+ %;a~!ty`rcv5b/Wz#g|f SSC3R>Nn'Р(ead/Bҡ nKs w8H3ETzXHW}EpsgcxHSAB]& *P>9ݚ.TZw^q$S QgXuV,В"%_ /w!jTaJ@Pq[vM~ZIf 1N46{A";hdg ol^kiEbЯ'AvYqe}nPA 5]X]a-]3P|pyjEKuOL8W繢a}ݪ' ( 6֘sXUb;Q5ۗkj1t3Me:Ŕ/md<ݔ+ WZ| SYڸyz#d馊>RD}~>"t?۠}=@®φJJw FjZ+zDM[{Ix`&WA 窦ZS_ѥw9O1] UONq6NLCsSywNj-t͍DǞte@`lic?=Ks]9Ai>1mS擈c5 =>n~ĝx!WDXӳfu!H3U'`:;)F-T 4FO7 &͋X%Oup? pz¸M2(*ZrBYvT27Ujlyњ`VOevY R(~[JYi*?=fTT7Kh*0WD~6j1QZ{td>Wv 1[ rbK=륄fX.`Ň.(r ޝ %=Lc ܪ Pw 'h\efҀZB] gLҿ) {jֶnE%r0G>"֌ V]٦(rS 3 s᫒KTc.ɲUptr2I@z`#UZ6T,i([@s%1V1Ƃ٣p"c?k(΋]L4-uNq{ꣷ]{`\K‡\wX"ש;Q'P"(887BAYW,g|V.L7|ς}=2mU.|_;<;9P ,}?7 d7wt3(FW@2>U-ҟ!ώ^Qѱm2@b,Lqz6f&]Ym3HC:VQja />}9L*'󯙵>Gga`&pxepA~&/[Xs\. `%k#esE_Z_?2IۨayDc~{+t`Jͽ4mډ<<pJJ?}̉.EN&Nf'w5^M1jjCyX_iScLw'rib2;:plQ#ȓ>|+cPOy 5C_F囹IX:I+BGp՝BZ4+b!t9bBux ~?>;O@ ˫gIaNLN.-s\ёn<W۷.X[O2MZ=o,4Ř+ÜG#3nlI6"WP%Iү̚x(p{5@a@d*$$ֹL򾿑'іt vښ=mfn4g*WsIPwLߍֲ(89&]z݉qRI}HƇ@p?{U ~sg,Cnkcf N'_~l∣ /VVFa p&+V֪o=h@[v`rj@dyB@nE[OH鬧Mq&={/ȏY`Yo"]Jpsh ʕ}PT0y(Tyı ZLǹ,=;gkIK:I"~5u)~toE$! Pbϓw`b&yօ7yk. X5YTk"ϛ '+ń둜4r !EZY0ΚXhp /sʆ9--FUe˗ɒM _N=TӃ1066ե?ě{?rڀsAtP /CrathՐ?K@6@y8P}N_4Φ>k3O`ԜBg@{c ?D *L:7LZ g܁ d 6*9!1LK?M&B3tT $guE4F?%(b,l>AM1̖\uV+3<P ъJ x7 $9Q|t96>3*:]ϧf8sZzd%$ ˴^u,qY,i,Il㾅I *"0Go5u^RyiuLЌ]1E:g*Xf(l^v6 (~ #6<Xg5n/z865;y^@DީPwNlG%5AF,S?$d|A*- ;*%ԆG8v~@4c 0Wۄ6װWGg4IxmO|>ml+NŽ.}X6SeR8LԱZzoKBo4!V#, \!{{8S) x7rf{;@(0/%%=tvmo7!5uv}2z:PLUϤ9;P>Y_R)(CB~4Xzwc?FhaUtN Q B `Dp#C&h/QQ_F}ϴ%:+8B\~ "&O + 1u(Qu %xBn;I{$럗+z=`@4ĩN/}>v]0O&@3zsQVN[PBO8&F )K4qoڭlYd愐g=: 0~鶪=/Agl^%2''.b\*]jRLf'ה{vwn2cvf !/D z̮-s->PѰ5:hBc݋#G,lCuՉ Xv7p1ѓ"ioEoweRYG}"'ySQ>-OxN gY_rM4J@*< ׺C߱hDYH1y)z<bKabEvƍ?Te>9=-i?/9G6c~eK3""&3 ("̓9Qt,{)+c.He*2`2B,lkN Sxrr_o vZ8֙u{Cjr%"(px](kv}?ԊC[! 97Ie֌,ƻWZqƍVw4˹|GO zR L6{=0r>y-i DUo1@/s#V+>edrueHpp7j]ߵFح[GLr\/R1A7ci;cB Nen wZ :3pVFQKWymV`dyq'yLV]KjegeB'2kzi&IT5oj!YYLa\JbKv+fIA3G`%R׋p]nNބ\g2ޫXnrNɦ8AWeԵM#@"Q#q=fܭ+C# .s46Ij^l_39 {PL ٦rH}s亻72- [>:q $I|y.>D&ECoHԎO@ĐqrB%Aq x]2y*./095PFWCRXPz 3^A6%=lJ#`LiƤkm͉0`ѯ]:[AuL\pV s9:fA^w2N -?+NJPx71>GiVb`D׎ς_R05;-K@<`:r eC]f*OP:P\?ﴡo}JطV]@ɦ"oEWcwv]T WɅPT'P*懜  cnfe*Dx}iST.ir0[ _F3MP~bb"l细< @ BW IUê7!k'Љȝ Yi'*c$coiFlQڌ*ip\*[H[c4]X)jơ/[ǾBQ\gur۫Xdz ۉ Gi%Y'C(^c8Pˁ*j4)~[ldlGv׉02u,.q I,# ³vLw۵UhB)赀 &ިt4 8|~ K B-~,S'L] #s\TNsŊFDQ<]]zPBb GzTTD '(iKs|eXm~{%.,f)}<R)}LwCZn#x""C8f[#oD#B/0ƷIY~&?$,>yewzp& 嘎DvI56Bv..c*q-z 웰j!oCuq+b^dt'SW9 19Q3p Ҩe6T!&C[>4)KzZ*Gj/c~b,{` {B+g|+ң (vi<HR9 I c 3_7}9"nZm)\GUdT$+@H`·ClGvar6VIAyИηZ=m)-0`qd6[cNMܥe iosΟ2r,V7 'R|G '>2Ӧ;m5S˃dÉJ40/0,XGJq:w8~AQDc>݋ZJ45KO'xFv #]T2簚nONS'iV*]\(2i)4k)BwxVϝdaL mɢĶӇx@OsV͢/e[[:n7m1E!DzJZT!}3G?ͽIq _?q"ޤQ WH ׋bylghiYM3;/|.k+z]EDk߸ ;%i˔~'cGWy ?>/Ro&ÍE5ᙕG38ZbC2ʁ9 ,][\xYY}<ʙiW]2uZo^K!i['U,Z+g˛v'#mi`- aMO^\ .Q ٮi}vRY %V @M&m{ǶSIߋ$$: ٩XD{*n@q-Ęzb"4\@>=/YR gY֒DEz';}\$7cԒ^()@x.Q)H\v)^B nX6SWq9Mdj0XFަ'yڨ= kW+h5wAl>dKiՕzsԫA$Ņ[ՏQ07f왈a`G&ΐ G{sjVTt=erjabCYʽkE* ;qԀ b6gS6FX$?'>"W#`Af/c̚rW <yi Mc_(/8gpO;2 +qVSV]Z͘;rT&Rma|gN\R+{xNOǩWXmBDKnԜkix56r(:$,6rZIvgW }oiZ_8RxɷT5z!b ◺Tu`cL38ǩJ\NI`Rmȅ(+.s)>Ek=Nt; ţpp|DnCa9:vh}u,ldii!pԣn4|9/~H`5CFy DŽe;hBm&v[K[v^3HqYtI^:Q.pg@:p/ܷn~!*%| qYh'Kڗ; ĵu Pgs3I;®nIĠp7L_ߛeoDZ>u~(96ieqzla~,b1'؏#Ou#of՟ab$"}3RUifG=dA=!d07a|2*ZF+>*4uo)Qz썣PQCIL(r#>=tk ehHnq|3B!bh#Ͻ-h@*A FCtzqIsfuу0 ,S\9>&2M)u>9Fz%T5j{jK!, FƣTGHCi^}IfzA<5Mq/[/D`l@8Pmh+!n?r? c{>2 &̷otTP _\hwv߷w+ z(e(xV*(Ŕt& >M@4l H˪ߎ:"1gJNUz;|Oi*qg0rՙʀI:b@WR r-#= 5 })I3I+ꂧج? Y jX7; h`GD(g(В{Q}|QVK Sr ߵ|R%b)E t`p|;vr{_ӨLf;I`|c2l7 4E)8VCq9цj9T<1fh#3l.RsyҟP$Nx油YVjufbˣhdSvpahWx(Et\I-~_x؀'>WxԄ12e\fx=PG5*M*)Z}gʥ뜲jOs1[ps6>MԆRɈz ;1:%|#W%X068.A_}Oh>j  w>ޠƮ 2u~/0+dF}L[Ȧ@ !''Qɑ q$i V`0.YR_gBW2]8Lƃ_K̏唍:AtGR~oMЮbyNǡpg{`|"me&Fk&A`/6?!oJVf !qta_\nHۅܣMnL}&x,3T8?$9( U|RovcIP;6Dzc렼$Nf`6 Z.8:|-k6+Jo_RXF'*!^Ah_CC [.`Ga3CE:8 2Z^ɮ1IiJˬcz%i4<=7A NG$ـЇSo4qil ܮiP%o>-]? mUKڠzĄMؿ3& S' GO]hcs yu&:G)+$PkXa٥oWb@HΤdRBZ8핣 }R~3ig}.7'e>S2*ɀIg?: Q+DtƒBw~@?}OYC\ Ox:Ba‘1Z=}22o٤*YuzΞ"F5u_s*j^/hMHȔɈiWuʧSi\'5ԯ1Wm:VJgq{jt4D67w>?p|a87|SNijyͫmy亍ŀZU778\(Q Cz|_rDv-֝1y!6Z9|ёY:Ӽqٌ}r1Ɵ%` |Ӑ\CK=6> X-hKz==6qj=3bU36;5Y2xZiLfdP_Zw}XהmX lnN:AdqDhQP6 KuLDqBQpDOZ95F֥ F8~0 O0MC%NH'zUL`&ȗꯑ;2 Toᶺ0._ ^>Y%%rBm%Q4K&Z7J`ؙ8Qk[=Q y+SH$V^=Ȓa(Dw5uHnxl^1?DdÄؒh{$6-9AS#u/ޯf@<^x])8^ai<95  .}wHlbSGka DK O .(Pܮ^r\s똁4y6*@U39кm TQiMBز9f8Sβft\gJ0M^Ɯpc|Y5IHSD[gnd $[|uZC\U|3Kz?ɚEvp㕤8 ]\u~`rn2Ty>YLN;? WΦ 8RqXé(Z,1(h>VlfV(; _p!~a CݜQW]s{]o֭W|@GI"q7}owQߑN8%CM^ s&!3: ԗja1O'ㇵtcoU6)J*|ٹIDP疗YP zU(ۇ9xr UgaR_o;̋A {$AdTMp5.iNt+ hQ=j½͘$]]r!r DDmhGs}N_}J] E$%N~zj[77>HOZp<_L;jvZ$* VbpK+e.p!t`7 ~aO _fV@=z"x#$޺!YMLU76R*42$O2a&n `, KBmDdXpaĘ8(@TT<1U[t_3";4m%rN)Ӝg#=sT+k̞4{⿆ Rs*CVE~T-v.-Rxr-UOCs[}# oX擡X0&}7/uʪ&26!8qc&_5Ċx_ʒW#jz,D#<-d[OW Q(Æ wڡ8M&к) ڝNͼߊ.>;7~WW?C QL(,M݉;84=YٞDByS6usl}'{+zbK-H٥Ƶs1ubڲA1g>݈*zj?5%!$ u 5|}$RιA1֬OO\Ȝ> ^;#㊲:~/ŧ8(y`+v *0d+(:X=+ lj[cU,Hfפ7FElӔ{Ӭ7N{U!C˨*1Hj,ߕ(wWlJuƮb-p@9Yx;M-+Livr$c*>6pno.y0C^A3ns=*8p p`L.r2k=*Mϸ)Q3N1tr1Ċk 7 rr= 1yϟ9xk9O4ڑ3~?Y_8W_ux-Q/@2pP7a;yih0Y^?D.}AٲS.^XJRJJL3a"g#Nx*::pT)!pbnF`ZOPt6Rƨ( e5\_|+Va1p jijykY2Kp7ĩѯ;q{|3f$>7ZHtQLdN| I . \*Rx XGzSx=Iv6vŴ>L"s c͘c粽 '>"+C8`Wnֵ`;۬'׎c^`a.9 2%шKÉZ1;1!+6?ȁtP}Lm Yj|V̀YtWA^x1o=)P8&zplwg窯`5O/,^T1|Ue4dq'%ʞ Ѫ xmMMF 6oڷ]63 G]ze\Z{< 0ŊZhu@xgC.4:!HvFzvmҗүE-ʄܞ \RB.[(ڰ( .*sh sP}9Q%J8a$Nb]i7RJpPؿq^ĹJnC(",[ȩٴ K#iT;9.! u[?  OYb0D>qT@O]_6lqas:=# E(K}$z[^v8 ԰XdOD+ϤS7<Ѭͯ \7TP"bd͑`Mnүay}z;$X4>il\dREAffjIn|w: ZZ~ĦX)q rۖq;/.AW0Zxd Ht.=!v1QHPֺ[vjew `BO0Y#i};!WzTJ!8S6ШY#NPm$"dh]% %qJU#}f鹤RmͯvVba@4sGrIS^c)n2^,5*\ljJ힭HQB:KבW k @YK.f+Xu#\Np"Le~ Z3f _4tgވ:Ik]|??`़0ƆgݥuȞӂԫBp5~;#FN/vd)rv6776#ږ,.v|g[?sDNΟ]( Mɩ$GǟDr 8Sp=|!idƹ.S_&@̝qͨՂA(HnAy/*NԌ 'TJQ\+TE:Zzӡ삂y^%#FT-a Q d{k7֧vi)4w5Z{9A?lAE.+H{սPܾiXEYS=\mM8IEUx@E 5\'D)0 p&=b7Z[k1<=3ztɞ ܓ|J GUsanx~\ԵѤ3aY#ޤQ>6mV%¿y7cs@oKF,vi3h!#zrBu{RT |_hHvy C4 ƾtiYD@K|0 xh-d㋥ťiJgM"1?B1rvqђ(n!ݱ9 ?WFe|Δ@L#wa9D176ncZ%HfK]*G]clq Y: |b1Z4 x0u'UhpZuhDܹyC^`"14P%,ZF^/T|Xĭ;;= QiM J$  B)pI Ыg/^C`z}Б/Pbh엩 !6d́(j9 NKeeT2?J,+-|d1^fNuAD v)V7d(/7pyӱ|91~.AC;]w* ..4-|ʕU)ku5B3 t=/|\z?S2*ɀK2 zEb˰\΄QĬ5N؟FjMZWёiѢ4<݊Ft}sk}Q7!v!bQFCK 4Ƶ-~~hZ|T #XC% 5Nt@ ĩl`n>4t|Nr 9IjPU6Rojz$XsEw3IYiwKv@Z(w0`QCh)]&#!]pUx5{tYo5M8dS+"$O܅Q^$B$=]6l!v%3 ~զ}8+)hPL҄4D&MQGQ}$I`R⬈\x|7F*hQkܛn_ux Cxvs(R3O^V [:&BC*L"s·J~ P &rDGX :gljl67NXO7+:e+||t>ڊ\g«XR38#wZOc~q)* e*u-*r\>%!VD+6~)Nf#6.JAmxU1ZjY$ v۾5xy]=?YVJ͆7,ݞdN 5x$Xˀ|I{+cP=y!,C8l!4!iq>? (q[ ]i s4N@^,AI#w#*A}(o5VdKY}^&:uP9ȸ\JZ͡h@^on-d`C`bN? [ff ߰VH~ X ~i)̹u~% H[46-D] KTf: P%O36~m:X6ֲ}lu|~Y&T4&$_nP(4W4Py6'J#uS](xK7o5_Y>=TKMcfd~A )A4.K뗤hQ6UOkju9Kt=X{ivJ8EХa{f mWF9Hmc./?=i}jx3jy8ط 0Vr[,#<M*Gy500`ţXײUcd %"a7n/. hd+ӷQj^o@0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 cleartomark endstream endobj 73 0 obj 22071 endobj 74 0 obj 1627 endobj 75 0 obj 19912 endobj 76 0 obj 532 endobj 77 0 obj /RIOVQB+URWPalladioL-Roma-Slant_167 endobj 78 0 obj << /Ascent 715 /CapHeight 680 /Descent -282 /FontName 77 0 R /ItalicAngle -9 /StemV 84 /XHeight 469 /FontBBox [ -166 -283 1021 943 ] /Flags 4 /CharSet (/colon/A/C/E/F/G/H/I/K/M/N/O/P/R/S/T/V/Y) /FontFile 72 0 R >> endobj 29 0 obj << /Type /Pages /Count 1 /Kids [9 0 R] >> endobj 79 0 obj << /Type /Outlines /First 7 0 R /Last 7 0 R /Count 1 >> endobj 7 0 obj << /Title 8 0 R /A 6 0 R /Parent 79 0 R >> endobj 80 0 obj << /Names [(Doc-Start) 11 0 R (Item.1) 13 0 R (Item.2) 14 0 R (Item.3) 15 0 R (Item.4) 16 0 R (Item.5) 17 0 R (Item.6) 18 0 R (chapter*.1) 5 0 R (chapter.1) 12 0 R (page.1) 19 0 R] /Limits [(Doc-Start) (page.1)] >> endobj 81 0 obj << /Kids [80 0 R] >> endobj 82 0 obj << /Dests 81 0 R >> endobj 83 0 obj << /Type /Catalog /Pages 29 0 R /Outlines 79 0 R /Names 82 0 R /PageMode /UseOutlines /URI << /Base () >> /ViewerPreferences << >> /OpenAction 10 0 R >> endobj 84 0 obj << /Author () /Title () /Subject () /Creator (LaTeX with hyperref package) /Producer (pdfTeX13.d) /Keywords () /Creator (TeX) /Producer (pdfTeX-0.13d) /CreationDate (D:20020325161700) >> endobj xref 0 85 0000000000 65535 f 0000003244 00000 n 0000004562 00000 n 0000004649 00000 n 0000004749 00000 n 0000004990 00000 n 0000000009 00000 n 0000114054 00000 n 0000000055 00000 n 0000003129 00000 n 0000000117 00000 n 0000004932 00000 n 0000005047 00000 n 0000005104 00000 n 0000005162 00000 n 0000005220 00000 n 0000005278 00000 n 0000005337 00000 n 0000005396 00000 n 0000004874 00000 n 0000005455 00000 n 0000000166 00000 n 0000003108 00000 n 0000090243 00000 n 0000066128 00000 n 0000044873 00000 n 0000016512 00000 n 0000008578 00000 n 0000005597 00000 n 0000113924 00000 n 0000005729 00000 n 0000006272 00000 n 0000008272 00000 n 0000008293 00000 n 0000008313 00000 n 0000008333 00000 n 0000008353 00000 n 0000008384 00000 n 0000008710 00000 n 0000009242 00000 n 0000014396 00000 n 0000014417 00000 n 0000014437 00000 n 0000014458 00000 n 0000014478 00000 n 0000014509 00000 n 0000014724 00000 n 0000016661 00000 n 0000017653 00000 n 0000044376 00000 n 0000044398 00000 n 0000044419 00000 n 0000044441 00000 n 0000044461 00000 n 0000044503 00000 n 0000045022 00000 n 0000046015 00000 n 0000065755 00000 n 0000065777 00000 n 0000065798 00000 n 0000065820 00000 n 0000065840 00000 n 0000065882 00000 n 0000066277 00000 n 0000067272 00000 n 0000089857 00000 n 0000089879 00000 n 0000089900 00000 n 0000089922 00000 n 0000089942 00000 n 0000089984 00000 n 0000090392 00000 n 0000091384 00000 n 0000113557 00000 n 0000113579 00000 n 0000113600 00000 n 0000113622 00000 n 0000113642 00000 n 0000113694 00000 n 0000113982 00000 n 0000114112 00000 n 0000114343 00000 n 0000114380 00000 n 0000114416 00000 n 0000114588 00000 n trailer << /Size 85 /Root 83 0 R /Info 84 0 R >> startxref 114793 %%EOF gdata/inst/doc/mapLevels.R0000644000175100001440000000346013115346316015153 0ustar hornikusers### R code from vignette source 'mapLevels.Rnw' ################################################### ### code chunk number 1: ex01 ################################################### library(gdata) (fac <- factor(c("B", "A", "Z", "D"))) (map <- mapLevels(x=fac)) ################################################### ### code chunk number 2: ex02 ################################################### (int <- as.integer(fac)) mapLevels(x=int) <- map int identical(fac, int) ################################################### ### code chunk number 3: ex03 ################################################### str(map) ################################################### ### code chunk number 4: ex04 ################################################### map[[2]] <- as.integer(c(1, 2)) map int <- as.integer(fac) mapLevels(x=int) <- map int ################################################### ### code chunk number 5: ex05 ################################################### (f1 <- factor(c("A", "D", "C"))) (f2 <- factor(c("B", "D", "C"))) ################################################### ### code chunk number 6: ex06 ################################################### fTest <- f1 levels(fTest) <- c("A", "B", "C", "D") fTest ################################################### ### code chunk number 7: ex07 ################################################### fTest <- f1 levels(fTest) <- list(A="A", B="B", C="C", D="D") fTest ################################################### ### code chunk number 8: ex08 ################################################### (bigMap <- mapLevels(x=list(f1, f2), codes=FALSE, combine=TRUE)) mapLevels(f1) <- bigMap mapLevels(f2) <- bigMap f1 f2 cbind(as.character(f1), as.integer(f1), as.character(f2), as.integer(f2)) gdata/inst/doc/mapLevels.pdf0000644000175100001440000030575713115346316015541 0ustar hornikusers%PDF-1.5 % 20 0 obj << /Length 282 >> stream concordance:mapLevels.tex:mapLevels.Rnw:1 51 1 1 2 1 0 1 1 6 0 1 1 7 0 1 2 6 1 1 2 6 0 2 1 6 0 1 1 6 0 1 2 7 1 1 2 12 0 1 2 6 1 1 2 1 0 1 1 6 0 3 1 7 0 1 2 9 1 1 2 7 0 1 1 7 0 1 2 6 1 1 2 1 0 2 1 7 0 1 2 4 1 1 2 1 0 1 2 1 0 1 1 7 0 1 2 17 1 1 4 9 0 3 1 6 0 1 1 6 0 1 2 10 0 1 2 68 1 endstream endobj 27 0 obj << /Length 3580 /Filter /FlateDecode >> stream xَ6XAkE&Lf& f:@&yPj[;oc&A`ijXl5f\[Ͳ03̮ogqZe$Ԥ}˟~˹Qݛ?%o/R?_~ś^,VaIۄQ*gߣ};8KgjTifx&I&i"pnL|+/jS˜| F23] nZUaӾڮ;| UO^-Ʌ c`8EW֕m@[=~, m艀ȼf_]IKdtY6I?O <ݶ'J;E 芦7 kʢ^-ʫŭ&_}st/N|a"mF-G턫- i U*U3$#\D2x~v# ڷժlj 6凂!f\EiЭ| pP+n[EYTvزm} *8i^1>RN1D7K!D{_Wyu"oJ{&omӟev h\^rk#^JOYîtwp]-Z X2n> n Ѧ!pՠ8#M^r vW,J:jxXŦ hOnY|Id<]; hy|C0l) >zJl' H1bٳtR6',^wo$NA5h梖Y'[VX둰J2Maprv}w ;[nk=^eLb7|"6YؔV MsōM z$% j@J([eBi!ֱnIAdB2C#, qL$z6r-ѿ/3 S(R)t!cR]d5k:ᝥ t7&6EE}%JBOgAGqx"KDfﻘ~UTES.2wYZ>"8T3IߖSMkH _Y<>Kz+8Y*]pmo&)LcO[$d۶`Wݿr*{Y) ^J$"tC,AvI1 ene[" I(T?>y 'N}ڠ+},؂i稊 g݅c{$&̵z jVUt\IB1o:6-_N0~hŝ O^W_Xaq {S{3Ƚ& jtlNPEm[ްPsY fJ8٦DHMN{88SxlmL7>ܫWe=P>)}<jlx:E3(o3BazyTZR72M/ʑ j<˘ B% @: t鈷%h,͹0̓cD=,Ą\r/蚏Cʑ60TU`]u} V˂ [lbnh$1CάIOL(񝯀|wQVбEڞ'?37)^wF)SkN Aik𽇖w/׾s;ws>wz1i!X>䜞I{9& 哣`)5+Yp(@r beQƺ*d'X3־tM^r7  -ڎ%]+iX: P7媤cM=%J:OLD[TZ7"o9Xi[\h.Gaͱ#ST$+-@[Udh$s'CӤ#`;f>!w+b';{RZ U3P>'G fmԻ.ozl舜ZvIG''h!(>>{A8r5`DpDzPְcٚ[ ' NH8ʼJMᤦ^1OY6?e<; r~*[+zF;Xڑ9J{s/~ ߛtxWV롇'M2U $β2pozNYg $X˹/FTak(\fUF+7dI] nn]/WumJۀC@,rlR},wOL~/--*k҉J 0fQowu|&GcZe O9KiO؂sկk-;wcM>2o QCt{J sDgq'9e8-iGfΕq6( ٕEG!0-.ԲCN 4ݎׁ&]`(ϥБZFo$t>iܚ/tx ^pc/n8K>^~=Kz ˢFiʛ: 8KMYaȁK|A$2ysϕjQ\6hG[|R GRɉYpƴY/Rb[YQ>"K&GW*֬u}]:bUߑxkNZd9  #)%2o D #fpol&6e3 -Ar82)$]%l~\ܐOabyiXBM"Tr^xn'֖`a-"ڒ|z[-獄>vK|F/9+U-jaF2r0_3^mUYs͟yƃJN D>C?D&ZzvMݦt*"t÷)%q(Rhjшq!4vX3^2(38݁({YG=68a@ZfgXQL7-}QXPC/3i/rV% ujP&:yٕ@yǡJ0k4 !VBkѴ{,3ŻWI&ioɦ endstream endobj 46 0 obj << /Length 3425 /Filter /FlateDecode >> stream xksO/L\.It|%R*(],&KX,P4EN/N~Q’XģH+$bdlllG?}:fӉ0*2g?`z3|G5N8v_/?y{q oYdh>5Ďnݛ둲D;М).h8h!ǿD::98A uo  m?6ݿ̷c9   [G=džptX45oyDkƷ+3j"cpH5LT;6΃TS- 80R]4ρd|Aˣm,w ˒`t++drz[܇߻2B.]M6]uह ``~7*xl#5ӜW_ w1^g9<%,zݲ鉣IRe,4,~G)FܴVhP@Z&J1qe=Ȅ2d_fZn:1L=" ࢘5.}>E33=ٛ2N[;ivd[.[ovy4*]~hMot 1.yHL Cj8<=<3!^Gו=ڼϮ Tʚq%]oV`峒N#b[҈H.i*a<]u3j% ϨVXyFe>tHg 7t4mHT_|uLpX)P„@;X%>afb\\9$A,Kaj J:#C-q[W3^_ n})z۰9o=kz:z詚7GyG}yZo%!ڻ}x$/Ccf~Qleq _t"݅.1,!4Z-ӅM<. wv}_G]ҚwJ&B@LB琧%5k^f*RƖ>+' u갆 >h |V8{޿f,ː,P3TyAt1Vk *u.>}E=<ǣ}< Gۚ}ۗ;Z}Z;B!L8|G9Ҳ\^rNHkFp\+ms8vEf:(gk$# LvSŶ'.˕Pv:a-0aHw4:M/n>Vd:!vNUq pk+k)RS;QA#nl]čڄD8( )1)gQ"+~p69`kUSqȞOZT뻇c2QDFHknC7i&ʙ5JW%unKeԱ2ӦJ} m!XE~ Vh4aV~DGܯPKv]7/b>tG atuU:byN&,)A$pO2 Lfަ*3L.N1F5R  'zN+ oo&gY2FWE)af#@Ђt@#j- 1%_1n9hp[ÙJJ֎BGj,3\܅ggSeѽn6=SY+x坏?mYs&"z ʓ5 룞Oy`XXHyvcKxu9\YX;z{bǏq].%=8Q mZh%?OxCWM0*Rūv1Z'0cgz}I&W5#^D(H!J.F#h(9cI@8 N p2׍(CE?%qbœ>ўt֜jbi' 3aړ ;ÓUcݡ[ H!H9R?:5liA)Ƽ7>EԿ{}z{8|8 qEyS \:Tr"eH;*- ޒh#nW tL@e&Oe gfؾ:A3㮻[6ݹ 2ۗ<284̰%\>"nUZ u! `QLrB3aݡɣ0b3~ ˪S6E,~J.&F*˯1K$@3ƕ|af4[V,"$@nh]Lt+I{]r7aW.n]gMcFi8׹vBz@&tΖx+d.YJ,fXgK!8n!8;$d0ͺw2׹ BV],puJu7Ͷ*Tӂtqhe'?FuO!nNseJwUkL נL/?`'8ӗ8htyb]Vҝ6BE;-j_]Grp>G7~"WfW]xp,r(`j:nlQy6홶9W<|5n'tӍ/i&\0ͼ7,Dz6 LKwc Z(yPiBO@N Ϥym峹%&pX >SEFU:Lq\z Ex0%'L8˸um[xhC݅1oi*IWmT#anϨPK}_| 䛺TKuvivVmH|^cj…McudZxU$q :0E55F/)*+q endstream endobj 50 0 obj << /Length 889 /Filter /FlateDecode >> stream xڍUݓ6_G9t)qڸws3mN&0B;N&;+_.j[ݾ4T+@"I$N4MTBv9wooH(*_[ f(KɚO ':!M cMޯ\#4J,_qʘWT2eW3'r)#cg~Ok3_5m ULgiD¼9meZg=˹t"wۢ|xE[E| ߀k&

wԓv]?":2D";ԙ<"p?#襩g;F:!N_,b@J8c yɅ+/R(DE> stream xڍveTٶ-Na-S8- 5 {s;}{?`Zs/_Pj0[́2 'wfv6&;FǃD tu9 # 4sۤT%େ#+`c"U eik Pb9ݐh$A>6L@gA`e8@jkaP2s:3Z94@@w A'd,brgxٺԁn@WO%we3G?ͱ 4mlviܽ\>d t4*@Ɋp9;_,,@fN>N+[ @EF۝ `dh74u03* #0,\mXl~w; xN GGl]@^N~V,=Yl]_6= bJb hJF~(BRQ u<>J,Optǫ9{L3 ,d!Zy}O8 晓5g;;nJA^dMSJ3g3AdSF5/])Fː~]νQaE h_ AtCVtÈ"[dV#1Fdz0(irfԮ+,*mob?G {Ą^clz+fS<;zQ>ɊZx6UqvT{M3yjQ-I(V+44~ YEvJepwT'q@2^޳U!B/Y*k]eބyE\c3z?OY|p Z=J|PJb!4tpQ%r(^՚\t|g Yc6Ki-{e6ܥGoǖ}_.J]DF&|%3{О}ڸ zi`sC$:G9[F`/fz˘ 9XZ6̓A(aȑqk;yTFÉ$mϰIZ<TM :dܾW&%+~aD7eEUhBp_b03&]-D\ =nO;a/'\,e5ߏ,g>'{&K[" bu}(jF" 4as3 lyxs爥!2tV=JR%@(O1f[]D͊^U4pRAbЯl/!Oʒ1tsss|B.g3bg $l4YۻpMJDrzɏxگi/H8d/0qLiAُؓE{6Te#LqM1cE394Sf/_i<8Qp`L0؂Pd "pqҖ$ F P kuٙJuUɑEIe ,pʺ=MΓQŹYϋ;blr^cO**h;f:Լax]: >^p(Ut*cZߖ#ܨ' WZ/xj=f/SyG~[s}OY$XEjL(0,yL\տ"FN_G□ֵHok]>m S'1/DR Ж|`QճCri|BCx[Taud*v6's2GNK. NFʆk8)x#'YwF3{hw7XdP^!D%h=7GԮ';ǒag~㾫TE{]6|_qiQYLybg!nd [(|3G'8S|@RK{q|=r[j)bZjH cud.k[ٺ>%ErĚXn+J~]:kZ $$J!ٲ1(EYoHT\ҳxcb6ܾ)ڢBH^ c~"4P7yi^̿n<4L~±/,y 1l*'蜚uHPD'XRyΎ i7;LJVIF|bcPy֌?i/%rά7:z<*8n)g[+~.?'S\%<.S<E<+7SZ]]z!{0;KQ zH)00T&W_:^H²g6TdDž`'aϱ,ۛLQc ^x3>B qG̰K4c O:qOS[м{ jQ/krvd_oXYMkzJtG WXOm"e66g3\l^g'jHܞtag'~30ixCϾk $,>hcǼ0S[jedgC5f8ʦ[^6"eBJ(8n3Iiٜ^A}w#~'ߩ`=F_qʖ4}CqsC%~s!O[ԻId59όoZ@QkFW->0Lو ׍}TAc,psNC}&+!W6x|3z4>h˶:+zj"91+vilB>Nyu|an=W(| 3L^yyBT?3CzQX P*=#uc-a NάJYo.|Y(Ow@}1%XJE_E_Sx7ZPkB>OtP-bt)v#DaҘa*kiqX{ ]A3]k>G ׸_1ėϞIX2cHaVn|0;߉d8 AqX 8 Q%2c|Oa=4ھs c$}K2A*ROX\G.aqGذ{sKbeuֱhDH' K̻֞@"NAy8P^cI5%q2/S^U up)!15Q~|2yiPCGb<(h]VL'Y|؇MnG挊- LҤ wK ǴNfU{~^:^6BMIl/4AFPH6]Ո8#͕=b; vLD)8sAgu,s &C_k;[BLVMOJ;f&ns$dDOÂz+髁ya}Z 8YZFג@$L|PZSw.a36v㲳[RDѽ(255pM5 ]ʚ ˜5jjgj~teNVvng=z zWͅ.(73}B<~y7|{ۅMx6Ol8ȏ,;ZQt-#w Pn6G'DD|Т3b&_Ŭ[;ayF61jtP~`*]a:cx[LG_jQ?`>gX<6_(3(/c.92[{ޥښ!sK0#ܥ|%x@TjiS[uduX쯤F8E̊",zaGta[ ^.*[9a6zT^jv=5`9 @yn]֕^(/Mg6w\o|Gu ןoʌ8ZYHP"S=߅lu<0dܗ>}ttջtّ2[(^̮ dM>7]1xq^џpgCϹsirA6)nQ:*> +e{ YGpNQbN֒m_iCf2hXEB?cC{CgRpK=F̰?0l[uWXuy',vO4~py#El+eJK>̏4ήThSB{ YSoJ>Oj9s\N_4̯k_!HWYU5KsY4'ޤ-\a,_r~"irSPfZB4n.9@/ kl3<I(lsL.hU@jYio< 4ఠiGf^_VUv)=Sg4:201Km{'sWbNM%aVJ UC6λQf,]"CTnF9k *9 .M܌5XG5[d7s>店4<\&2٭LZ@ -"tb}ۥo([~&y/U5U؅G*7x^hYf=UQ 3"%J4=֛5I{1:ΞFFn&6n9PQtu]5&̩ˍx2?`Za!18՚*Rj j~'uZ]+lGbuldpf]6(f;hoBL< kgzٸ88w;sRw|-貛k6l i G bF$E.f& Q8$_ }'7Ib~} x!)pRAٵ۫FA3ݹˀYNa$<@aC3'RTe"vIjk*8T*'Q7׵D-]½|fEh<][̑|}+p8VBm>3bJ5{QϓPF$$򋂒B5@q^-b~mך2ܝC6@AF#Ǘ@)B|֛~쒫C;]ߑ9A?ͷקbw۟hCA7n)n6k p ݮƊ6w5 ѲK;[Y؇ƭ&jn$E2u~⫗6sl3r2 7XͥO\-elF 'n*)Wu2L'5}پe6Rdgiu'ʼnQ2Wj '^lp~y㾕e.̈́߸6Xww2 7!8/-Z {#?༚bCQ-.VSFG~VSy-}2e+ˠ!euFko@X}2Itui4Y44]T2hQa|RM(v"(u"VfݴUc&eH>Q+|nT 0 b xjimVQKS ͚ C:P얇6}.*S(NtYU XˤgR><:\usV~AޡZ_*,BQXk/7_e|GG|;pΑj ;mbN~+n1 YQwou3$8_؃\jVd6> gEKY! f'!T<^k!LwҖ}UH#6yqm˅E/a4;t֊g %CN`Rܻ( 1M3"I֜3k] P'E@Wq>|H!H2+RBJr6"g% M*<=o2ΦYIwi)`r0H\kc@{3Nj@Yo(17#@vp>Ğ/H|Ճ/\UTrT2/fڱ|zs4ZƹØ\~L+=etqJ剮s] d\_Щ״gjzRLUrglBO>8B!tc)=z0]ZJ ,ݑ+2Sٟn5x\iywzXJȭmk{w_A-MY$͹k"DW|z~bqčZAroB-v_Ɲk' ^K{ކkm#ϿwPAŷ䜰tv]vwMhTE~6E\^,s'gTCc9z&MsdSGG_'3d=T.T#rK6K T:n;xDn[v;kqR ߱VhImw}TZ% fqMcqfe=^Y @]2d"a_'q[,`b#JХHEV G(zQ{"D%8>o)X/dX1*k{ tO 8^{ )Vlh플eBtUU C"auQד;s킘|R#J^S-Uvj~ա:ew젤x{o iM箟&EJNHG_k}rs! q1|z|Y7?[ gθ:l`zKyhf1-r>~)T})P%6wg񽜘NtÌAJ㐢Qj!W)6N#5jB by=[QI$^4ps4"F!QqfT/=Z3WX'pՅ`l[׼krG?FE}]T?j"ҁhNk;*/&g?+'Ac꣖>9YO jl} X|gI'{ц->gZID\)EiÝ,m,kKDդKxj`n9nb!ӬGo$zm##j Eثb0w)a 3?;SDU`A0~Z>Oy5Jr<-[כd^5&4l k%u^ Vv\)0ᕹ 9iff%ؔʲ}EOw*/MhA#ċKƏ9m] '!22,òY_8,@fjZc}^Cxb¾";MU$ φ28%FųvQfZiǤdS/:.Hj)aʣ*(\7V{5ɫ,ˀQy(_pN1-3"w_|Yl1gSlG`݊JuIJ-dt5aebUN+ ^/ye7 O]\Z$<qIY3316jZ"vg*F]EgRZFjcSБ)/>ڗd𩂂pw!"ˡ;KpdBT>bHNjrAdnr_E vFJd[` k0\̸r0"%ϹHMxE4WJֆ6wL~:! {8gT4>S`csN_;Rraq U^V .{"lohU:{E%v/w_݄\#ި2rVM%c<vٲZ.ԩ>틔HC[;HzF #j[ֈE3kӜ们t6"5ekq ?XuK7 hv(0Qo9 :q`B_NԾxmJlޚ|'☔N[G;q}3̣XHzh.u|&JڡOP|7$q blczf"6/-/ ,zpWᖶ,R(uωCfPC=Ul˄PBrHEjyYkJH|3j[DXIQIUVžݝzD2Z}Mr"ӯ+sǽ„ @J Ğ@yW!nwI{nV#ij+ Aު櫟D]J3<{(24bLW!ߟvU]85BR75?l4v7@.}^t<|a{6TD"Y)ߠ=:BRsT7?4x9ZL'I4qP]" d}⛎іj-ml9v)dȟ-An16 /m{_Jd\?ތ1-˒L +e |jU Q @l昞;i>${_J17̲'Bv&K4}C"g2 ܕ"쪱,|A:-o>,yQi͛oJr+ B o'? T#,fŠP/]X[wpVDt xo'&U.-- nF؞Ɲ$P>N}Vn94ZNy{A~zzs)" u2 Q*nO=?sKPZMF5"k6ǵq'CVF<\3/A<P?N,z;xl֣IYchPˠ$6k zvMTDSؓqdP )p9N\JM{mƸa:'4;ed'qE^5|2c(tê~P (:$FX2Lj)̃҃)J endstream endobj 63 0 obj << /Length1 2241 /Length2 15353 /Length3 0 /Length 16699 /Filter /FlateDecode >> stream xڍP u ww=-H$,,+9aV{P3MAR`W&6fV~+J9 qgM&t}3T;l6n~6~VV;++ 9@ v Q-\+֌;@lmt(]@òvu5B X:xxx0]ΖBtkW+2U2@ hwiHT +k)@gM`gmrpysqs09޲eʎ +ˀؘvhfw:xY;X,@e)fWOWF/C MU*w}.f֎..vW6K:A.Hv݋߇kpv0 s7GMk'7ĿmDHd W+++/;yY@/[ ~>`G[ ?k ?$;` v@M ~;gkO>X7÷ 3;y1YUDe]bb`O'oYS ٷ.'AXJ෹h+aߌu/#)7;2m6no;~k *̭V vm'\X=v]YLlG\foHft0d\3 mع>loh{,`7[u~ 3_G`K/ `x,/EHX( [?-/;EA?荧[>?--&]l} tb{C@3[+`+5Uo_ lvC`;;?,X 9-,d ? ?ghrś?[Yskߚ`8,vDZd}{~cG9O9-st|;]?½o;]Ahۛb{+O5o#bt[?Y߮.W+gП6nos=fno}ž-/& 2CZ^} m%`ڟNcYvnw{DOt%2܃+I{+Bs\ђ8Amzii w`TICW+S*ɍMݽGgmX¾A%<OAsT9pL$Xsfr'_b8m>{ktP@bMS-}\J 9O;I__A yOzү%H1{_đmCJ@`(.ҙ\=^`{VVGͤ"Zغs"ooM7j^3YO2s74$” tw 0i DLʨ YAGb,}<415Tfw>]"e9 OX*;|'Da6~QkE6\>K_ԥug sK]S|RVAbIf:\6:nedm_[Wdۊ8pAds^韡zf~aowI].[{~2|I%@% %,#Ep7oN?Y=/tǘ3O),@PxnMuj-򚆻nr:rTb)PsmJ.-Ur2xq@u Kp*SNJuOn(Q6'(z6KC|N&N*U41F歌˟Aђ4-Cw'\Rf6;p fGl(1ǎ8Mse #jcP-򿭝D'\=XXQu:? X_)|E({Q?PeJ^NԲONtIU͗gcZ~3 N;+ʅC>]~IGTJ  z]b/:7Đ8>%1 ,߄H )Wjqј;1b('Uo#*)9I_;6V/(&Cut[>ϙ@C1ÃbmttGkg>S.$&L~XXX%F3+ŴO+l:Dmɵ`~r PgY.{ + wVyJToH+wx~d"j6>Y>IF^mr@P>n~O*Kl唰IJ tRX)t31@6Ńȧڠs q`i~3E;IB-gI vҐG2b(ܫ7Kc Mee%ʼnBޘ­ظ=XBMJ ;-:wXG#k!g~BzCU>xݚz| s߷8VLN,h;qa|d]:WnCbҦ=?pN 0nWyc"K:;w1|R'rX=+#V&K5x'瓼7:…k~G~J5z Cg:['ed)6A F ɭϤA<;fK„FLj2 ğ(`Awi/:ԉj}Ȇ% '}0(WVtc8,b@ _Ix-:lܔ8~OAl6;<܇ϔ ȪZ7}F| 7#p3Idh 8+sRge,nt#pZU ڇ믚q ;_R~B3&S g$5˝뻶*=ycC 6{f'v06fs>{-\a(Nwf.h"0iE(qmw~V/bݏhYg6n⼊EuH&{vGl,ʦ~n{KyfBWSq;݇{ޙi|e8%%]2L93-?REY61)4BG!܀r!coOq3CnrW$5E@) CJlFH3jEWC9% @ߏ6> ܥ7KR۬5)K<`3pM2g^FkhM9Ŀ? +g@z*r#ո~Hā-nuhz75)댟d>b9šY5mf<W}H"fȸa<=Jrd'/ѓyR5ȵ6i7WAS.ҢJSvo]j]f֝uu?ÙS`~X7$?˴xL¬qj6׹Y?"-KR~R0H}I$0k^e+v gz%zVxr/ 9?zÏ.e*y_䡲XYv k'ۋyՂ(}Gnsz>tqN=.M.!AiY3[S %$qo] PThRYƯ%g,yGbg;AԒpݵ=48ӖV"tM6AK 1N7 9-D]k-^AS"i_‰r% [F7r' $5L0319 8Վ)Hp kO.Pb;q}hBOdE"&x̕{:iG **>QS( {/)pXͯ3$E=)t3VZ*; *NG(9N% =@C7;$.-@?a1#JJ8a4ovWd Ψ ]`gc7tr(88aLDfh`WACj]e[oz\jiɒٚ_4c`V]R[5 #,~C$ch~Ra<֩Ird@X/ zV qR!<<aX4G/ u:uը+@%}!`sbҼyGiZw{%v-IK6OBLPեlU'%sn>:0sdEIaozROUuD#P'|g(I]aUWz!m C.`:T !TxI aW퇫L -ɚ-j{)2fpMfg]@,5 YK(+VWoNOvho)p7l(v7bh|bM|Ɲ,GW7n7;HW4J0;lJ2FbO3gk>7b NL9~㦖J[wq)@q- A[f)mWW_r g(1G׽&1DqߔL-lCٜ3>r&xKmr҆_R'I.|LM]l߉,{'W#byP0g}ecuh ̣n\?yʝlXs}RrJ0%?}L8f!0 3}ĂAeO69wUYPBskIV1uLO(oD,q vgj+϶<29H{0 t~-I1T]xUOZ3Xj{"U@'G5j d@$Gnvd* e;.aP(m@Qwh4?~ :'CTku;curlGRv`,iPpǼ%5Gb|oZ0e7H삖APt^M8//\%4J芑T F(BfK/l'S3·nhчCM˳Z ?30Z}4-ț?XIy(3mX(kKfY}R`WiVf_R+uPyϰ|rst8mq\/i(\3#xB6TPwx*j-6/LOh`j{DbHf= W) !!JF)%)h#w#8u_7pi5JSLoĞ&^uX~Zȑ׃to8$<;$e:==I5p~ƬP"hklVLCp|>#+SDsٷwpX+Bh-P~RI!}Ef<#o^\ԣXhWt+܄(ECN 0"|E|'y`eM2LoAba00IBh|;I,ϫ`x0j [M#2c㍡ȉwL59˜Ep)IK CePN *@lr-<#vģa* Ѷv\<4Iչ.Z@;4POV ?xC ==&O~rD= mT9q$2Off.uQi!ZO}KcF8:8,F6;\0ԑC[8e3}) >%0вi=hvF*;zPU[0b[$/fJ)"?*{:{Sg!pr2[r@dؤx3<3g}l)֤.uu/T [~hՍ1c|^cl$(1qRN1;8GXD1 YW]1Uݏ!YE[o3Wz6Ya!x-yoJ:ב&wrᆴ%.c*c6滰Qmmx#zݺ߻ $  `~:{¸0ɿ@8P(_0R C~]%56=:RC%ұ,~)2i s(b7fAԶLWTfŕaf&cyyY9 T齛Rl^K{[^a׏굞%sj:9<%z zNqfhQ7(بW-7<צ<kGȽ*SxdU&7WOxAãXKr2mHԣ qBfhZ]TQו 65}i4td 6 )wI%ϭWWi8.&Z~om4橓q' κ`>Z-:,;1@R]\pS%*)|(F1,U_oZTH5aeci6^:mKP:Kt#} K6Lli F|KGܚ~=8%)"9V9kE>?Sʾ#}TP.{# E2{-V 2qTx=YM]5Np=dsʋrn܁dӑFCFpy163,kRP1ms8էx.XQ+.FuQy>Fd+XV|z;Ju=v,`L# Y",s-Ye< Zpǘ$*-"c6opI#Y]Cxjڔ/ 5秕p WDUVBN!Хl8.0.1/442}o`f1Jgњ^.wJ Li A9e14\y~2^SAUQ}waN&$?|dl/ &#S![.izy'jW*z䜦#uq%*[ai` U7uS ᐂ}(RPܗ0S$V)PPuJuaPBה,Uєd*X0i9#F㽡h;_qVb$q3=Tn~2qJsFX ^ulCF'Ksk6*/]$%*=X15aZ;06SʦuԨw=ZjʋtMnb# 8*lY1/p;pTO^\l85M x v(e+:0gN0ohyjh-ER(by!kk5hNRt:$wNrCuya_y/Nv܂]{p@y"dh=FEŞLv;2$:ք E Cho8Egs 6eK;`bmKd%Dq^C1'đ,9gzxS7&lzAUdV?oDVKoY qHc I̡J ]8@M+X˃e>IuA_0FNyQh!afl"t7g,tV+.m`r-OCYe<\ݚ6sޥY3`]"PU6BYQƚ,&7*#En`z@Zl$yχc:lamvF┊vlHN^P!ٺȍCr u]]aR)g3\P]!\ @U|y*[:Ǭjv`)kzARO<༧Y7\ |Z!D\T -AFj;,zQU|t2 9ӌ0&I1WPȞ=TD۪: m>v lݭHK--lOS0jq Lֳ` 'uwF)/v)G xD%QxXwHo'/t)vf!g3UpKk| տTfz)\Wiɔ {Gհ[0؝%,S"*mT$6W_?[jK)Ǖ7QmZ N>]a\b[w+aAIaI`v*H,`J yMTHL`Fax|f,XF1eTշ\jM Oh',E5p#C] Z=E[a5oVy5UyRr 2K%-V62ɠ_{ˀ:*VN[#%wtIԲo^XE(#33s\R.!v멐}G[_wGnsV߅Bz'",&\5$iȇh!Y%z] :5,D q33̲%en=lsrԕF<ɐ8i:L9$Jxp?3-fb˪ciqbdʮNT.z. !c+IML*X)A1D-EtA* <2QW>4\*J lCx=@ϗ>Ĉa,+DoTh"X^W=STʭto40~!va1VإIp72 a+$=lKX 5ȨXsUb~4u++* B+w:@}^ǟ s"]ěƆ3;Wzbh>詅c_8{tznH |bk e2 wLتvՙslya~J 'H ĒA.)GuǪ^Q.eS$b;K$XV9~1dhX$6uo3+󄸆l=`j < ~ӀhV;%C*Nˤ` jgL!K%X4tms~L'&',m6+:2I1{O?ZXj[7d/ qя"q )hnn!2.=&[#8xn~ -])>5gq-t(  5]o.!h> 뿻i[uk Vf26-@t,TKav%09„2wAшThHBѐ7oAHn!8peFY  J-5X+ZE Y\p+uG2(aG덴`R lRc-˾n)JRz +WHY]DqX̖M;55g|iHq5fx4l/]:5s s2&G֣Xm Xe 9EYȗf'v{PwWU_,f\Ab}YH)E F]xD*M% 3Kґ]mrP9J0KS?O\AMNj蘥n³R ;7]rYKxr`ї^ \ P:2)NWTZZ=1?0ؓVϨ JP m 6 u5SluL R~d;i"bwnHKEZ҇-u9 =}($Wh98mdl5?Of[T&dz^$ Ein͢S(ӓՈJHuf~VW_`Z.k~]7Oi PE|tonfo-m:.*g/aq(\vࣨ[7GOZ=!y>5r*¾́H朂*#%ܔ83vzU-3Su°hZy}f#o0t1-$%!ۦ;@Pmڹ Cx#pmZ9G|HG#=$-W6yvXBxݥ-LyQPNU4o]7$&QSw>EQl@_aM N5[~`U|^;x ;% a>J }vZ Zw}\#4 F:G*WSL7K\b᫄н5 2r*\їU1DkqqGmVŪ6.B3fj66-mɍnygZ?\$VktOXpx@(r]r9d[ԙoǘi\Ω#MY f?y9[/tv9s`u Ժ r٠r iAmg8|'"ω>xN?t|F\m6G?y zS|ep~PqBpV!muKt4wU\D/i]Gv!Ę a$RuؑD e5_9w;#N[.řoK rG, $M͆V\Hw_. q|S-'G"dT= -0!\J4WN@?R'; 9g_#״scCؽ`3x֒>G+d/d;bއW"][E}ưк=}[%XH4c:[E'nOE{&24OssX>>?-JMɻk+/m57qPu/`a^n}:MĿ)Z)^k՘>3IMבV+ d%!ʸ(ycT4e+M5\5+ A`c ?Ȅkn$ %uhL4,KCE`p0^oZ EwYfQִY+A @QMGZsrӪ0&Ab =49>z{l7>l>H|s>Z}.Q)򴌰h$Iѭdm2 NSkC?IlfBu\*?6$=>ي5PS%1rL(ɧ啸[O*/>麶ip:-Q}G$u}@b C.4DSҁ ,Z!DCh5\|fGe[zGY]zsGbopr@nIhʲi PC=G%E]l in ]&FiA"_n60& N?z1 ?2ؒi,fԥ7ivMD ad>'Kh=sk8Q!\k:ZⅬxo)_P헸tn4I ɂ͉XN.+IWư{T-Q1ΘV(Vsck)W6 uLkGiY endstream endobj 65 0 obj << /Length1 1614 /Length2 13484 /Length3 0 /Length 14314 /Filter /FlateDecode >> stream xڭzep\͒b[L33[-33X,Y h133Zۙx;vGGJ8y2OսQMA ljo sf`adh*Z1ۘ> 3 4Mnnnx @ACGG_!c|d:Y(?\6@;DU lYJZ jIu$2(X,MvN@=`okNXN## nt8ANNK'9cK;ӿ>frD~>읜L@΀Jbla`oijo|0^g#K;'3o-c .NvŀLmNN0_}l<ɶ'?9X:;mYX?j86gU,8g@  #S{;) I$Me9$D?q]pQ0w @%Zxw Ecvb032h$a4Ut6|L)dciPAXͧfaibmwrL@gVTߨ)}(Aw"oo(""/V3ǁTF KwG߿Z߽ldgm}ωh?lt h`olݹ'gpLLr0ġA0߿ھ/-|&qcuOvۆ+xGCFӓNEȤ_T3rNnJYc{LYEp {yOOI~kZ j#zm)eU@t>]78 ^#$gCM+G|;1 HcȜ#-lM~Aj2f/SN֞,"fwAa"wRB@:Sй[L:iR*/if* |\J$t(7(,*b?lčwM~ˍ!zEG^{SŸ\3|H*ħ :r=$wykiMvTFVݵ%DS0#A՜1Re 0囯۰J~&aJ5h6"k*Y'+3r+iM=M@F)0{dD3iBnnX  ghzzi3twDbOotJ'V93,Br4aނa GrCC@;(/6U 4Қ;Mb7Ja2 7Έ*|f gHP0ah_5*ʥnxҎU%sVAqO<::(/Q_P1Btݦm%v(T9e:Adb [UƬJGPןȪ> J aֲP0t ,ыI|P"(4Emԓo-&U7d6L9[8vS2Ɗ=f@D.!2MBB\j,jNO$Ɨo:Akc!/E'Rh(n:NF!V<B ¤b9U{f+ y?@ mY36|`.|?]2=~IJ}&zh)OSB)şDAahxҳtc J^qS/Kүj9# {5\/xR*_dTio\!&(`1-IyrIM kVGǾUѤ_ʄS=w=ia2A6wRH0v畡oc%UMs]6Ee)uM18:*5R*r󅎡VM¦Xt긷wv*]p>uS`Vi#) wfCf> MP;7[MsV9FhG1fhpjhBHA%:0 0*-q݇O!k#1v {(s0`#-eR^h7"k4?׋ſgb3 NB !^wd"e:^Xi)x)Z)k{Ɋ `g2aG86em=Oߥ#s 7/f7A&*(RԳH]F"+S^㳂6Ce1J\]{^96Ho\\9 ب0s!?RWI~.5 v@)SYX|2Ƽ[U Hr</9Ե\Je1jUۮ,-,֏}&9 ]>&ضWR1)*I7R80" 5"x;n\u GlK j. &::2'M=nUր8Y#B;&~:p1YLG6}@m9pg2"?΀iJyV y_bQG>3,owRi5 ޔ!_ܻizڄ9"Ε;"f&U-: -޻ƭ]A(6~ bIIdݨBS찠>[6sxU0\oGuMRMV.몇zsNcS$d G{c6sd]7N>tԒaB: 0f#]'v_ɢy+4 JJgW?s6uIPo&F_}^hS"j_`Iֶ(j,d.уJ DTl9;|BwW-p$!Ƭe"4Sq J * -k3;;F쭄"R\i7g]{:W0ps:feBrztNZ ޾Քf]xOa&Huڛ9ܗKC2D%o¼hc|+_YXkPh`:UĈ (H߱x냧Ul#ACL="1~'\Y"[_-wkHpɼ$ .}dI#b{⒧= ܺ% [XUY$nBTRRM֞-Bf3μ#}OPb2'SK@aBV5ETr tљV2Uy+wZUPdVa+EMnlI|BjD)2CWBr'^(%if+]Gb[au;'OӮ!6~ ux~䘗6~ 8 b [\[a Ϗ^8W$aKc6ǧ`6=hUi4 ʞ? &w᢭KzdSC@=/pY ֟ jrkT9b.))녋"I,viMjüR,FtXV%/u[+DfH]=GӚ(!N#$*Z|<-#2:?Vj9U YDԶ(lBd bj,P9~\mVFph7L^*zKmw=-[HSbn)S.14TwƅInAn'Qx6DAQ& Ld`:ǚ? _m|YZ!Wh rDVvK9KHKL3b>3Thx[1ZO R2|Oޔsh>n R]rS9 &uwDwa^y?t6‡U9_gcU݈e(wcqQǢ(hzԃdfVfC|qr'ٽ;-}j|J,& +ʎg 1/2{rI~8m{iF45 q0xA+ߵ[1e4sz ŔPbP,iBNe?Z®SUm=Qx4Rn7|J5m#5~NHH8ɂ9s*5 ztgUŁI6,\,C@p $2"ĎZ]ud& <9) Mi;D)a3qцPSMJ˓ؾiTBN &bQjYYu$u %n s*];3CVG~rgԾYBe6JUBpyŎQB;Q8uK03D3Z5(L 3zI[ Qvl3 ȳ}8mapcVj6Y '+8'lFT6wٿuTnOqK$|(edCI,-Fuwy8vb'ڀQJ2+z&GsplPYo#s,߶6~%zoOTtJv+P#ioAm;" Gfl7ţE|S){P|jн*Qp+YKǎI~]>r:Kл*SRŔ*`i_ؾꗗg!Nѫ{DFΐ;~ )aw|d /ү)""F !~=JF^nCO/!+?wc#Q=! s$k{ *vAeǙ{7w *('Imз6 zŠi{boCt6=ōp%OP1:[}͢0\O>`O](P!hhN<3Ƥ ;TIH[p=,> r2 kZij) ֘0,fo_i.i+G mp@IEL=gQg̛jY> nE#8M׼w'Ǻ1jcezi?鑂 !t I]$ wUTz2杻<\ড়Г>!ڒ2FeO5C P2.v5 :-(Jt+9ꏰ"g9b⦒ghܽyqӸ Ÿ@BkUȵ1beCzh63 jG=y9p led~q*EAՙfxQ%>ov \~&td n @p#<uQh)7~|M 2}5UutX !1or>8ωվa X꧗ b/ɠռtnZ#uTSARޭs9^ag,-[Aa(e(x挖ЂJ++:5Աc_鋒r746/Ӈ_X~F䍞8Quɻr4hRLd7c{Jܯ<i>ɴYFt)UrJ /f=^ո3E &g7XCpO '8|Hp,h{ ^LRuzI-iKl_FFնF 7ԈH2G.Q*0s.zpEoPxD-O;l[ñ*`?„ ѿyV'rf3]b=mPUB`/ +Δ_W& ><1 LFv2RIr3GT%4&@ZcpBiͷRml#tE"eZ`R  9mv$.]7J#_|䩠 JN2;ghpTUBRQA(2BI] |'^1#*[Fpsg1›h@6/4eDf|"|ٍNqiGI]Rfgl8E >Tj^]ܸo<8O"2U4q=ESO_]_=ODIcA؁p^؃NۿzD`dZRn酼+ AjU>$?G;;KeB{5Jz_C 5ZܴCr7#ne أjI ήOtHG8Ubה 0zOFSA%Og'QoV$9Aڷ5 v*ltE[ָuyo:*6CL{&xPÀV*qOsj4$^hsW__@#oI~?i4b\R#y{6QD}nv{EUE`:jm!~[e迣(KTNWJ=d-4?pI5UDo>iMԖD@r~lGq&ܹيARzּ)^dJI.A[}BEF򑕦3Q̣_ [27A-qb͍Lwi?y @蹁~<~]@TV?),n4>cLN7ko9\ú3@@fѧ=Pbq[s!o a9{)_ lK:2&/i6QbQ| %i/赺Y711Ƴ7J&S`8Q-zWxgC<}Z&wKHxAak2q!rJ,SeC-̴A2Hњ!N+aK:\C@E7Dr-K?my4,lUQ{3?_97O=_[1CXwqUXPpь'뽫-Zꋡz8b~}xH5߄UX:V&$v~zP@GGV RΔ*^oo~khb Pꎋ[78- #1".C'>WIegd}k[JwI-ǁ?;^"pOdnʢFb&1|NwSЌ~\.GaOL3!KU(<{Z1 @ZBGz> |6ɱ[3a5Ke]UyoBYUknmStU_)_Bز+%Zs%er~h)ZH׬7@UP0[)eVG짠fab7œ~A^4ןð2zo Q#` 4׭LBDӏm,D!hIeW!?"] ~Ch!~g"wkp9t/ ktB^ьc]ƽO@!܅"4@9s7\@dT{Tn֭DÄjgM'nl S*$.^0"5hfyl݉cX1f35{ pz2JʱGОtxksj@̆ZF$v^č%@j0U&]Zk g0$/]Ѱz`A|`I)X!L?Z9eď shLu'F{sszp~/W_%YneAOz;YJo)[1Cȇq̾ % Y~HK/xY=-pRYK$*S.. raO*?>4+'2 ITMT7~Ḁs(c1x&p 10WbteL\\֌+_:B'>'92ajM]X_l"2oѐVsbP>UuyKIz ̻6NӨn$mguTC+`}ѧZ0@ǀS$\~..Pu osIi}{EXr1d3NA!y !"s h T⠕K?5 endstream endobj 67 0 obj << /Length1 1616 /Length2 18911 /Length3 0 /Length 19746 /Filter /FlateDecode >> stream xڬct[%۶TPmv*mNŶUmsn߾=n3Ƴ4\kɉMbv L\DJ ֆ&v2Jv6Dl0ŽCg ;[Cg:H`L3''' 93_ jZZBd񟑿NfD\v6[ׅ9@$,))'ND%.J$8Z)Y[XlDvDaٚX3_,A'"C"'{21=?8Y[C_Bv3l)99;;Z; "< d7Ldg7埑u6u"r;@dbdom_0{Gi8Yؚ:"G5/__s/[{[mo`6ea񷧱f0I[S;"fg93IZ{La$S$"y5_.ߡ\ mC$C#ceXX{{:?Ha$ BL Lpp(X8Zӿ~U[-௞T-lY<&_e(,,#O)Y/14As'gfg'`&de?tlYCgG w"c31;o0v&egC[: 8:;w=;fmΘ;2-3ݹ3wxRDt8ľQ_ZgGmC4Wҙh/5U>)u_ΟzQ^׋2`ZLjJz%ӝ,PhdOƩ ]HM@(ug'Oc#Copisbɹ 1}ψ= \^=HͶMAwBa&cX2l( fl'Yy>`Vکz45lڍCETf^#^P=gzh^mq},=VBYpN~Z>FᒏOC[Tw^=ar 4Hn+b٘F-KlI~<qI7JFлkuc`<4Bw =46䗅RhtE떶*H|qaZgDPЮON!XٽHB!Fbx̳8bV3W fSj1 /jETHnכ=y' uc{)th{ցp_5ǎRzJw֪׊D^;9("M"0r'Ӆ^%;+Ӥ٧9TBcB` M]^C@۟/k;UJot6ְ\dCnOn|K ģ=2I΍0ݐJWwMEnsۘGQD3X\d|@?;k=P<QZ%&ם!KMOW*mL֢ ,ۜn%a>P.4ΤBKh@ UKo jpWN4p*U瑔PWzعYa \A9*]ߺxWODxE TNZ,=%|qr?:=,2EռHZyr\CS̘ xe?ã~B ٴ~Nul 1 @K_ :Oxn9_'ʁJ2s+pэž_- S\a"| &[@_M'Y޿^n3BE^b1ID,ntDƝD<ض`F>R4/ghA=JUǍOwߙӞ6 aE)N|B( eէ~YFWFFd~=$+@W2K*9Qg_&6pɺO?Ԗ~2Zjx4wP?r! O^dC㾨@Vl8} z>BN0l>z#ґc[o@f#~1&gO|W(GXO<ȾLsiʉa–^x,FNLwFLj}#t]@یt>.lrCquv@)XكVF6w4ڢϣY=+fkd hjk|P:k?y{S a!G'dlXAe,:3r u6y0Z B+#KqRRd vVunB!(& { |]!LyEp\,!݉^ _t:U" l:t5kBO:؍J )JU옒ГIqbHt|j'˜ zDԺFS#v ~hb0gK vtBBʎ E@o%h¥iDE_; wjShe:ӡGCOGT7ߞbFWuưyO -im&DL J 5oF?Y᝴~TϏaF BqgsYjx(}ʒ~;đpXb!C\#L N3B}vX%_hRr"!3m+0y$:Xx*0<Q?[㎊q2=$nܙ_"M[uUK`uO0_w˪y05]ە#\[`ҬjpgodMR9Vgn:lLK1BT*)|DXZm^L5 "iiS^5]8yZo5 u^rU'y(9ru#Qj;m1\!#Bi􅌷Acy.o /QZt^Д0  keo0[ΖG5I 6OE٦p2j EhՈA&vlغ5񖒸I`<'Vet2tYKd0oݥcg%~+L);\qTI%C M3mIp|0Y܇XrƇ(@6Ou-Vř4Co IZke#f$5$}$sС"7soJh̜U_ѓ':F,dg.|OL1 e䷼B"౏84 >lPii?~9.r{Ч^˦MeBNXxGP@I V%_jC}y);&y+ܞ!On:fM$ŵ bӎ2}&<ukSMZnvY[;!pP`At^9:g7NrQ摄=ݵ3T?j|,ʝMΖv1S5DƝ!ұ17Ϟha$vJR-u92QD 1.kW"\yy .d2 84A`NF a)m9nɼҔ5A# EҐ}ڬY+ݻk(4U6m- -:@G]V3 k'@F^2]Ŏ5DuS!܁ɢΰɡ`b|bU4mtv=B7azhDBd/#F#Κpyy_u}hl>6# :N"F"iA*ޜ8&;|Ҁ_jvGAj2ݛl1Jy{h:6\-A4'Gwzpw#(Pˆ >+ڬesVFwG1iTVo VI[H2rIR5}m:++d,hvkd2E;2OWTzt@C G@-rEZ]@#V;8>Yv8k`𺝶n"6svVmzyB=4)F:Vs4Qf[ӷ=`wv~2:'w*4Kc67cwɞ@Ut{F7V559~5+GFj)Y#lh dD Kn{ ?x˞ӳb FT.7-Ԓ(-=kvT/lBEժ0F^ayO+,!)BIOǷr!5*j9 ˫sOIj+ww}V"'c+fx//dnڞ ] 1,h q%f.D!t؃AojD bnBzUU#\U!|o@t"Q_B$P4j#^,<t2 gK6}گO+&9{ߣrgAu-5uL݉W#VkOјAFݚM'tgQgW7WXzSJ'ADX8Y 5fSEIob3Ki._{]e|j<5 qu} Ħx% k's 6km7S |:^nYG]'0 g bi<9]ԖDM O<|lfרVhrq3G $5`0~ŧv>lƢG`o׏^1 @ϞQC#xH.uyARJ^,XvrtҒ ǷiLg{JbHXh.!wx< Zڊ_pMbaNm*8=JBGP%%HnNQ FKhBL9 V! 4U";%8/Pֳ[HqX]g匳"X0+/wG;9bIq9N@Idf Yf|~ZCxrF͹\v,FNa͡v4 "$@Xaq%%hV}< (;6"Tj1fO}sv*ziޒZDҮ[)h X}|v5x,L8ZR0tΒ#F"s ~-ҢhCDd;+Y>AhCp (76Nb˨T/~,垓--]P>X#.&ɚN\ =z,Q[gK^cQkS=rfe K4AI10*bbS޴`W;[l`7|/2`{1e; P'{{s/c4$2,pbIx-:e`COn-UJ+ -vV֑~n? -E zbpISrg{iLN}vt;>/(aѼ83) ubR &Q̪2b!/ra߽5Dѣ`?-#ؑ/9.)$1a(s8@Lv 6ӔXmH~"0'>ݯ&픅7̛0}" g 2Z}1\ *BNtVD\nV|&h4yMtlsAy ,Wm@avI Co Seb'N84k#UhD[zi35ieyxVۈyo6H h' c‚KEQ< ~_"} RyLѯc_E@5$.r)٤+}O0= I$ fAݕ/Xݺ!Nlzw,n͍ Y#L9d9f YWo .#nlm}0d+>ZL+C)Dlk0XA}τcňa JN%H`tD+]ZPl԰s2&ȭء>"H?R'@aUT21XGQtEwdu~g}GZktN50Ȣo#c5&?p7 Q`2j/UDBJAE9О'Yaj fB:K>F}}~3Sqv Ms)ɣo9kxp@xnĕĕ^dfM  Ǐz"#F,=Dz}/0xl0px&52'\1=Q * w>ծAf棒V`'% D'rMC&H]}Gö[a!sԝU7wP-d&<}l$Uh;Gv%q7$xb5q};O}2؁J˾ĩS@.,,=˽/֘I͆8Gxq%jaxϰfKThZRRcj$lw$'4歾n48]QӼwp2!S 8c[,wLBi+1 _zܸZA:]O ikH˴<{-caMKXXE ٠f}%TdKۆ {kw̧7Gٹ|;,ˤNO?E@Yr~,r#?Pm\EF톻E]x{YFdc::mLv'VtHqV*Hn'+;zIia$}rؿ=$ G`f؍N,Y]7GKL>pyxB{?*X|l+"8x[n!NBԐOg2XgdWK91 ɼcj蕲΋&U+GYguA]tH0]~rC aH_C)N68< * g+SZFXbPhW 4 tgē(7JD-y)lDd*Z?N]+T3ףC\ng=[)VI"V)}H} >S|;WjILj ~Olcp@jpatb" .#[V,V}~kK5xZ8Ҹot`J~'dtXmw7Gh8JW=YE D@nlCP#4?MNy\\yI޽X睿`$q&QS"Zu8E>X<[26$xE$Ub?\Ba.ǯqoSZŐQ P>^Q(U 2`5W.C%d_QB]2s5ҙF3d3{(M&5"RUmsko~)F5 s ɲ }?ڷ4}9s,d袈vj8iAf0CY/4$i.|Dm h c>@`ZbeBS-}r> C:5c_o^o (`_lh-b7MKwVfSiV4\t`5t`vRZrIH<9$p vȇ>n"Gz{G!j.QT׎A2|4"edG:b v yG6qoc\,'Hd:k%smxIྉ !?YmvvĸZ"e–}vat/y)h.ȇm {n<͵%{'5a;8 ,?h/z~NQ48͔VG 4 ΆpZ-qp}o/yb4S#ݲ(;r&b4](4߫"ƌ8ҋcPY?W _GhsN8Sjw&ayqyXUx&jܰt-5 ad)&A ]x7Uϗ+\Ak#PA|fq㺍>L^HCNPnTZ}1#{~E/8iTދN,~|d J)ŒKK~_ѿc񸔂w}< CI^ס5]#\3UE@X]ÛU(/7(aCDa ~g D<}v$Z7,:01YkS4qmdcо3A81FQomC-¾  ;B'Yu|v&Z~Y~C(5]3b}ё1c5LĨaZP_Q1]Ai; kuMrԙo)>? (卻KqЉ̧͈1ȁN}B^Dej$h`'?*#F981IR{[:rC%ϩX`ئ!w_NG~t:jQPz fU' <} RU|n̚rUg6M-u2VfYq^k>3?u|<ƌ4zosށX Z 3XHX?&oMǵh+A?uK~ 4&pSr1?>=!Knr?CD.. 5F( Wr34\~hG=UW=ai3!s>AZX0e i>2~e*uxR,z^ 9Lgd#ffBm0V/y)UjcWjY* 6㥣{+}@03t/N N($<Cաcމ6[UA\fr|7f GTA}hlmzCc~Hc~LDROҘR@]F3XnI 텵]6_7eh_X`qD)׌5եv V֦1SooЛ6>NQy ?RG,vZ1W59;^1Q*A%ߺtMβL}#C'ڒ.ȫ2ocѐo|]Y6᧵I6Џ@+܉6G _ '}&}vW"m_T1owhrc(R*+#SP%et6Z1mP`VPrFsopt0wj>i3^kAd=,zǴT3^Y͹x[ilR4R3"E5VYFc(uNaɋpJSaAOIȡYqA']%yx%mbڥY ,Nh_T}V8ElCyn Py(}[5DcW_hWM5CPW'ڿBU'vTr23*ON;$LK,I*Q6w/U*E]+^a]X 2Qn o30tM:MNѭb[؁>=*#T58C[PfC7DmGyX|6? b{Yt +˰>)y=ͺ{by=dPʝ+ naD1~4XT^kh3dtV2>g` \ P<0;#d'F jSپuK-p?{W!CjO4G~fMer {5-)Bj L mη"QnYEG):Q@MtG]ѷ$[ QTQZu{o~ZW;DZZQ޹9؃żk7-x* :^j0LOf;ѨX*kBX[m 90g= |K}FQR~8hby8>7GWޘxΆUhQFR+ `/Rwh8lbz"$aAjDžpV6 K[y| m\7nM1#cZ&+6΀-&*-XNqw,佬?h^?]IDYG`Ĝ}(?xQ&!P^hJ2NnLjԌ eI'|6#|T7xB&yB;`ZԶ 8Ns!Ѭ d?̆ lRcQ6)iM^e_]K*` .AW8{<*w;{3Cᛗ5!%U$ Y\=*b$ܪngPwSo'IE;8z b+^Zz1( Š0ź7䨍s/@XM0Ӊ/>|An4!V!hjpfw{>w0V5:i'JRݟ2)-ħH㨬N㨾S^&erALT$EY?i.bW}(˕Uq.4W *Oq3yׂ/Կz;Sa#\)w*HR:WWpfںb2pqwpb2Ѱ|M prz_VSꊻ0:O[|j0ibjbL1#jRռ[wJ\ذF˞0Boϐzf8Y'@rdn>_B|eE@FK#N BRt du#M ïf!4p_ςquF^ vJPTlʁo7w=h+ZYiɠ,l{};凾cfDX)^7 k9`Q zLJ*Cj2;q6ey@ 붻E`׸N)Hd9γXւ. ƐwP (~%ʳe $kl@b<$C+27@'PpŘ0c% _:ɬ̲}D_%н zhUL}oѣz[HD ZE,uw| 7pfDJ4YoIO4 ;uK|oJ&4Tm&3,4k?D8(`2gr6nr ž@ W_x-_mQބ3O8g<)%|;ԸXaJYͭ^P `[79؈zߙoy:'v8Uß.|̼Ka9Ch&B)|(*W'=49xkP Ai\=S?@toC xzRMF:ʁmēN 6'/Fϱ"ɌJ3Ydbn_"k'K5S3yYk:ϩ(%MH q^Vѥ{;jqipCVmNDٓقLC¸by6:>~9>պ`x*|"Rh[ޔ?dmjt!Y_%~ЛC; P IӶ?w'V^ɦ1 T)jP9,Q>AK-Pvb[DBR%N[+m;gn g?gF-RvP<ۙjuF>A$ iGT[2^3:$X}clA8/,q W{Gyl%&U:N}J%r|겙i܋D7$[uOxז.6:?I-a"& ׋a8'&1C0mlGYF^{`ε ( )&VJFr&-hRwv"/5u,tc(zuх@!,l3:tHs;d l[ȣ[ȴVn脜 atv +IחG#y9#0Aa 9+o#;62*ÁuچS?YRd^T!^HN(뮈n>q&4J]a=zR-zOO":%` ̵J vzgh33_~*AM P>L[v1`(r@E( Tfr1(;@ SkoBg~gS&[ ,ap7:s"+:=Q7hq5Ʌn(#\U4)3h287P h@&{qKſ%] H"8-+Wϡ{n0i>y(W1tsA`Y̸]@N02e.Hd,}S62ojkQĖ%a "G&vӫKd~1 $h[HkRbn{Mrԓ% eP<ϨmlNY Ļaim)>I4J1q,]E@Am[0Rfɩ:?2ujahdX1I*Qꛖ)ڝ {}W:v|8iRҔ06L}x%UZdgPQkhCLt}9?A?.ͭ\ϕQG_'|\Hz%4lzw[#"Ygd)@s 7ߨ`Q@Ȟ=-QaTGaHYa~]aن`jvh4~M >J&7-d ڪ ,;u uHW9YB)dheۑB?i^rw0@Y(m.Y@tAi R27o69 2Uhp͛&iy&K<*(1r $1vőg8 9gƶaoڢ 9QQLŭ~b2")` l.M`a9_8Tp.s+iim"APhc#P9x0e׷=& y>&HC~ Û{ΰ-Evk ܋6sJRbU/CR|bN7n 6-9zF3.yϚZ&= DS2J X̉6sZcF|xiuWӘ" Y.ӎؾy8j*Z ҄ߚ@EzqjroVfyJփ iD$&;P:֢)v(%w+[ڷ 1\KBc_]; g8vtX FIw(m#,taWp3 BM2WDo@谹ӻu9K5_nP8Q4N.UgDZ<3z M CMC"Ů2mޑoVX՜s*vhCU1gߐC}Fڌ| r.s+ZLs#m-_2tf YK\ N3:]6x\INIK5K>f6VW cTr0Ԑb<ϡkxLF~lmLFA$ox<,׏2щ3sΏ]&C/s(G6 , R$mB秚_ %j|)8_(6=U}]8 endstream endobj 69 0 obj << /Length1 1630 /Length2 10669 /Length3 0 /Length 11507 /Filter /FlateDecode >> stream xڭyUX]5 ݂Cpww( w%Hkpw !{z^{-ksS*̢Sޅ! j; Z tC%. ~6 2ppO' _?.S `K{`wy_@+l +*I褕4 {jj 6(@ z ``7i͙K8;a 3 r;;~K' \ ^ rpzؽbd*gg3' 5uX]vū9OKa4 l pye l_ WL'%Jg:_:8z 5]A,9\^s[Y"{[O9U @S?'@yoF_SK*@ zO F: Q{W9XxR` `}_vM{s-_0a63C {UgSR`o*~s . O1 `|>.N!_D:+]}666W?lr07I{3Qwڛ? `3W'Wz^<@f? fi.#=]!4 *!i|&OU!,c-vvNI|w`д0}FK?Ҏ>QXfQU3*yB ktB<xVKugZ׆Y]]xxD{C}G_|61cN/";x6tӘwk>aÄmj™JݴdytS3aHÚG!,w:XrV}+y'u>/S;;Faf2#NF]tRf̐ʉ0JKĘ{t{Q #yؐz8LqjNkDe9M4Vw<5m+r{"* bn ^v>'Sr"QL-Zk` -_7M(}; o'{hhB =%[gEP" Cp[tI*7j!s +m"g߶x\,xAbH5z$xpd%e f6HÙCD;='m9R#MK[3Uﮆunelzd {C}YB'Ɩwk9tb5!#4$c1yܛ ¾^ B׽)|x:ZA.BJzX{; 8`њR҄*y)D|I5tt{ۿ ѣi*„iѿNTn;ȓRWIŽo.΍LM`>z&:[P=ƽQdYF@W{qfk7Rmz0G(-Z[0ߎ(Z-qwTj8Һ`NkpW<>+\>#`w{{mU5aB_5) mJ4 OTΊֻ:|jHcSUVgDߢ4@1~9e۽X݂0#QcE*DbU7`rKy=UK| n6 nL>)~W>ͽN:nAFAtG;{MVmS['6_}C4ݦT79ڦSvIK{tW|Ou7Od}VSɺv _R+rψjׯ>pf+&jň1a>Gݞ ?(F<^/d˾#L$Zcމsd+Ѓ)/$RCL+UtӲg S ˹N64d`NeJ.#8:db9br*失0jyLM S,ș<|Ԩy<"p?zH@%1z$)t^q4m|.޽eM|xR $A9?6,+v^> lJ؄r}f&|dg#3o+^D~N\?GSGOM ˑaN|#_R|!Nm[GJa% v>h9̥ 7/sVf>tZ1RЦ'Zw?2|Px:}a<ۥ jUFC[!1_8b,tepI ^AJ'Ê:NiGh4ͤ_|c1 ~I-@LCwtfJR-WZ&hf qÀ}'@1V=吹Frq= KT.A"ů~sR~kgLsRoFUA\l^~No%-_-<ȓϲ14wծlla&2Rv!-zTk]A/U?ΥSh8ڬWb 5O}٠%m<#Kn m>gME7ry,S z,6$p4:3ܚ$s$eSnAVQˡ5}ߍR?3Al՗kZIPdElq֛xw]?$qp*+Oxz.zn$t4{IlfhhR |_ [Eu&G\sN^wN{Fx`yax2 S0թ fa<>3U.:N_KK~,Z Wl0'_HQy %C!GŕH6< oKZD-@[ey)H(Pƈ'y\HMA)Ny\/n?.<|)AZw|p44]\:4>pVH#Cr7a3QodGe7t[J{gـZjW_t n!.41CrVQUBZ>L8= vϭAvknq9|2q2>2P#(>s/9MfFYj K*h XE]%}!ɵ.aBlYg~.]9]Գē5>cWO*3,BY,V< =ImZC)hFJ}zV56{iV[ۛlP% $|Nݦ< sRhb%&gu"Q<3-L@G%Ex6Qɔ_EW0tׄ+T҆+sK'5zJOo51xd̟ {5O6m5Us+^췿z]?'C`N +aK0{}EɉUF+#iLx!KHIdHķ9K2ҏn Bk9Ɩ< )Q\<&^ (v%UN]R<^Y TO;7O7b1"[HωiK[ބ+aIe%ŋ; h)-gf`#pU;^nGDQܥZ?/ywGUYZ$mIIN.ܑ(kw}P{Ҵ;ǃ {EGC X-l^7[$Ŋ-_K~Nc5ЎMC+ܩc2^[C63DhOO󢢱@ߓ'tw-;%сZL| nYۏT/i{iF$wSקgEZ+Y̚),02rǀ<+&{^?ϓXvci hyP24fs fS1?<9thq2yr +{iK-!ۼ^?$}]?H gZVZeXM ?yƻ]gD< R>ꏛ)30s?d$|V{fPG+mC֌\/aVڭZ~\r-!ٴ(J+'4s}N7kZUb4彉a&4XeCY +(6^u"~Њ5X)b`&55YW")&)YASe8@S w6$MqZ3}YtըZ߷s_W7㻜/q-7.]dfto܃ق,~K'<],I}yݵ=9Yn\j8$DԦP=)}WN8u:1CC 0AM:3я&!-+'' .TE=I'Td@K|Us@>C 8b-'HT! :˩$i3=R9E9>>U>f74^"w{{4rιvKM2r7Cpjazo6"}zu* g:-\$~QiPR0R*ӎ;xA)}ލoX-)t^N4Db98Wq-h͙c.Lvt({h -jj>3/ xzyvl^sj6O{X{ڧCx:M Mc_d"Z]ɪ-7#BV53mt*Aq[mPd|A%6;Rb{&̷U-'&zn#9 &$ #j՘"v01Fa%m?@q4(-}K`%d*ͩ. Ck!ɑ20{v(z#`0/Xs 41"NP$a{Cks VB{4jeU~9OrKm)CSڰ C#V)vŏ?n;-y; ~%wSb6"}WRӼXyZ~aq+ bt5i؏mK]zCP`v.]n"<ח0ם o>sm RN*Bm>Cj:~Gh:|80[Oh~=K;iOV)#֙c/AOTbSz] !AZ'p9dE(Bۚv-LR4E-cVZ_鹈6D[ş1 ٴзlcozّԶNaLRlXmAjc=z=PEͼ]e6_o9[Od\ 䫲=)kaڱë+8dKXWɸ6Yi fviT/\[ԍyf(##u(Bir ٿ2Hz i@>!Դl/HN~ }kb$t K)3Kb{OH`<:oPDcn 2 }ϛ:YQe^#tVtt~{0?a0u#żǃ 8&ձ(Elhd9o#ŝBJd%[/[ASΣ4JYQʭ9[4iZ*,wxQ{a #DQ7vV2j(IE;LCSe'eF H+}6͘/aN'U׽(䱆FPTi~(WIjwtGzYa1u2]~25|zRNWӂiw 뿂O@: oiܰ~K?A7:jJծ2'oh0%P?4@~r;q3!LKȵ$X9EMv&꭯2X{c)T6k H 40$+J;!G H-Ty XQɱ#圫&)(\FV&c'C$~AI47M h~uѠb,T&'!KC TD)ڟʟ!:J )czWsnh1;z7?< 坕đi,**]{(zކ %Z2n84HjJĻڴͭ+=i 1tb aR4\&1q=F~sbz;&A1Vߪq|!i&D}H0. )dBiwmw=s4ŭaU=r9/Mop]cP<;򍰹8 7J;}8o??b!t'O`u>Zx79DxKmUH!?y48 *gD}ZN*"ٔ o.X̟24|7~+pZnjk]OX0PRbU'd`Ay]s`̦&*E+4Dz~FpZ5h|T1j+e~hk#Ii0d >y#bѤ[jk\\KucmѬذGO]*NEicm!|-ǕF"cXVwyENO/4DwT ot+=dZUZ&|A%Oz{װ!1ݤ|*5Z7X3:ЖZ.O XTx֟&ǭ?/%}B ♟rDN,VsOx6=HPL|؏LGAo\9Oe- RS{gh ض'Gi:^e5Mmpqv-z32elءEi6ϱuxC?3)k!?@V8' .Lr 6U(!3D*QXfCxkr9&='Hgl>x5`JM#$*]"p u'*'>zHm}K^N9}D=fSo\ǿ P3%l0"fL]j0/U؛q(ʟ+[/ޖS|W^b֥GUeX2}`2fImD|Zuo漏7kC0ggy Vܑ_doͿ!_K R M٤VO.z sA(Cm4O'G,Ga `F+I8˘roR\q<}aG`2j-'0?POŚ59U';PZA=JƧ1#S'QbVNF{1>Czn|V73RÑ⑼ڟڱLm!75ݬj[~肢xDEGXU`5T9+8;ָֺ千֟ n+扚vZhUװ.kmWǬ[KRZ5ꠀcs!R/0}9jG/#+MԱtV{-w|([>nނktݖ<yNu> 3ȸoI:A9i ]G]qB(˾88?7YpQwYCU3ҀZU)yÅ\Hr[UJfAe!qȈ!mˆiB_11LZHޑ Q E͍)"gKnBN灥PAsLɀrű_EB[۸T /"<Սuj:~7kD ebI"xqq~ټ:S+^uyP [ŹnZOCim0I1_Ҧsqm:>Ib1q(&L>h HcA S9 ,bN?vF{*Tȶ*>q&Me+BҾW,FV'oq5Iya*R}QK endstream endobj 71 0 obj << /Length1 1620 /Length2 11407 /Length3 0 /Length 12232 /Filter /FlateDecode >> stream xڭweT\ݖ-!8!(VxwwNp@pwwusk5̽J]M$qs t4-*lPs;Nd;@d͡ a \BBBtGOg /_. ".`k dh@_)ہZ jX@jFy5<r~mB  ` tXjͅK`pqa  /r~.kgsuPsW9:;zؿbd.P3 xͪ.+:6пr_aի^i^Q9<,.v枯_XV3Jt'h0dgŎ}m rU!V.-]_{sK'dʡ}M `'+?5<;= ? K O`;*=@Rdjg? ֯ v{,P uVu g;0qqrmB>? 9uTեYPmOG&Su_,o6.N'{=xB\>EƿV5:=윜\?`׎тC,_7ί}_yK@i*1YÞ.. oy~J+&=w1t&s|hpV[Xv9LaEz̪#|4)z|C>|rGO{RCІ][pHs7<8yؽMʒB'bN9*i|Y |Fwp@ N }(S>0"?Zkzr+q*Wth@¼.AS 1NѮ^^Td2wвI%A LtvehWo40/ /ݤ;"Sh\]H΋P#K(f~xߔy{宧3@s>wwK[?cD3`INű|jn>0G:Dgac>N@ slvz##Lm+;'Vnq*?RU(aeO Hq,U*z_0{Y'-:#V|mwxi/T#3==1A8OI'%ybVDgQGVؚLn |<&09(̬4Pk"Ō.ګ>㠄NVȧEZa)f$o~1>_H,Xk/FX.ib 곖ET&F</Ab8IӗS9_YS4?>?hk隃 ,^"VĥoN3 vGB~}~Ժ?u@pKPÞ8M~r*!lYw4C!M6/` Dk^S9>n:6$Kl؀wCsZ&_ Ӳѣ7'92\s _]^Q lvxʪj ن?XSqcRŜx]jJSZP71dWM1w)xP4T=^CS1Tf(n&ᙾ2a "%"yIjB5)!)5vcZ6懲YĨ)ƀ{`Yi"`©/x˙Q[ӊycsUO@*^8#n ^]u.V/fr^KZ]}f/ݸ?-05NóvBڦ#Y gPu宷P}~C%C2W$FyuN] E^z##:x]z:>.DI߱pҘk)i0vKrJ+驌WKK:Z= T-gР'8'Y:We7 _'SF/u>y C?,<6*wGçA0*"@<3"Ka TK*MɶoSσ?fܚ{g9 a<ףX9pd-ۍ&5Q~)Rٛ81$8._m335' }BD3x0@-?~.#8k0 J.ZVm0:;>ֳ< t""?r4]< }"&ώѿ{6GA&jj] ph|d}p4rR$ "(T ?{vk~#tDK gwj=Pn-"9A<6|i@w+$' ~d}+YTvn}4~V'SÖ]CGUmpqK*I/n$q,\B؍ '@ .tsLT]0)/*q07V1DzSgi ?j?SrZjԎ_c*n3J oґrn֍ig,B&갔^دzPNZ q81'ju8nr˄& RBn[鹈,HZ =IlAP 9;^{mLŦؖ; pI4:Dc~m2"!$ZxCV:=̅_"UvڻA޲\_al)ǩ2X{Li‘+ԢUP?{?^J3x }س\ q)U*Sp]ӍO,!]}.#²P/&DGbuI]&$ͪj16F|KSSfZ4O;lvXL"nq՛㷲7aY`ŃhdnD^TAiNo?` ˄25䗕Xmx7B_Lc)Mu ̟ݐ\>q!։+QIجK_&x1б%7XԊe,~hZ /fb4 NI9xK3,\BB|~c7t 噟tTф%|)mfihVOΏmoћ!G~@nbMa2o,\pbA/x˛sb#^0:f>r]+?kȸ{&_~g O_{ C-i*ļFAx)T? NN?Tf1fg׬#|9]? QVN &ͣ|ȄEj1o>6:C!m?_Q5T-MʅWq`FoLgG*kaB+4sYr.l?AĆָxllQѻ%a+dCI\7N Fg y0_ Zݒ_(W3[<"eI/* .YD׉OxRcf]Y8U|>qX*EQؠY4+x'M˒͎qt.z&R@ƍ 2LnX NIa_vPƒɷ &EN? ~٢M RELeP]?3@UO!:s@bJ2tc->ZOH(9WOBu3pYdJ*1 D{t*7x_\qj 񲬧2ltd h+J }?@ϫݟ:մb!w,HjY?]x6sSbedz}Q"j[>Sį 2QA ]W?\.g[b _zE#coN5HY|̸cAw gU/TN"06Z/uQg>VM~Xlzr,-5  eݘ.&N5HOQ]/Sj~F`:}o_tce]d=K9n.`i%ȯ'[34CW:Gqc]oa $C|nU[zԾ m)vjؐVl^zeadwL+(f8Cy2ǯJbl79ӁD_<48͏M fdviXPJNZ@_4AUʩq?Ny.lJh &QXn6L]%ӎD]1 hxO=! Jtb~"ON(칧PRv]#Ѓu_t*jy?Q)bb(8o)*;a?18.JG/h{a1EzkjO#E؞0I&j%j5¹!8d:0]+LjO>do9`NMѲԀԶsyaJ`0/ݳW|@@sSL{ۊ[wr!ҎE R)%@%Kr.۽ 9:^d|ۆ{_C%ocH+w=)bS)%Ԝh[Yͮ7@O%',VCnJ%XAԛaZi(OYq1&27}}!ny#AJ/1m֦bq=00|Ycb^Q eGD)V`O]7u'E#m؇ .~گ9. nvZ4A|^`"2|lcWRH/J6g[ԝM?3[{3h1.}־[0«#G*S$LF( E:QfMhq0[d'3 &|& Ա!ҞN ӘH=pn5*? jH+b`MY2I' :U?Ul!Ԃ 2]D3|&hO#'z$Q_E/VE:r'?S! J0lt4%<7gMY˝),V*{M)BN[.]fet]; SbcQ-a%7ȉޫpI);tɊk&cAAl VA j*n`>;8?Rjb;O7%8+yZd[(DЏۉBi23!a|W!)C^3j h8v_ֵbe$d@y1Mϑ!N i2efBP_~>*J~<.n.sA G]B%FR)鄘ޏ $>J:%lx ~,Nc"OLB ɕ.K|=×byiDG409:Qz >Qz~ͼŒU"$EHGQ p H-RFд[f%"Q\C<Һ wإ 8` !m.T!6ͫgE1kKQeyU^qcC+CsrPw A@Jȉ8šed=}FfO2KO0kϒr/7]<> ̍K+L܊v7tØO'}ъ'5TRYdjçF^,G+sQx{5[J CD!%4B򡉱KN{XSZפ d#^l6;A{D㉱=?#Q9 acޖSkٽc4dɝ}Ķb@ }gVnbTy~_%uw>yK F1v&NHtBX-0sϏEEd7tXQ \.?$R? WlG0HPsC+blL> >꽗7M;ٞtvҫab[)4M{U[1,"7r.Xֵ clSg>/7w<:S+fTzݳ,q]*Xg!w9b>om1u#{JB)mqY<4=Jn[n8{GD"{H|5urN{=[E„9qH}38+7\'Λ,럪fi2-2:55J,6Gã=F =w"JLs #=Wux?۵Xj3x8S1Y} w@U]hөW/#$İ+UR^.3ٯwx:1rmR\ vԃnɁ/>i< <]xu -{.5csA/% w)[NW6*#}kHVfx&'/c|-Kݘb2{3pr'cH>1M噞sGLt[b2|oOa mv.a"nTm3v8 }Do8(Ԍo[nIX휴S:[cQwI2u ;-uI9y Wy&Q0IyηS:·1}M=QVX6SIܒy"Z)b4p! /Xvs!,\!mժa OYa^à|[/V#+1Hbp<ȏ+2|KJTwppK&gYͳ^!iܳdťYi*K W$^u<6:_˯38dιeGڝ➔Ǽ>F#f^ )IdЬ~0q>%C+[e'aEclYC!eGj i1~Q @˥$0 S7-LPlwM jmY=KfB.76 ԑm4 Lϩ{8Vƽ}hJD[گ@kJFEq#˞<* 3'XErxfsVA)E h@NbFф2}w*?5M~ci\p 6 l^$XRVk!ҝH}+aa YN6/Dj\bV;ǣBX("Ț{;7?ċ wE c[>QgTe=XNiIY1_CnDHc<K}昕yXl.ŝL}1wjRK[qLEW';&ٺCu۷#;"%;'gwa:xK76{Q 1din}ѓ`2IᄩڞWCޓD$j-w<@)o |e2)7)3̳*(Dn{+ Xh<,8ː/~H4,2Eiu 哴5kb|6 zp­}^SU|7 K??g!6qSn_tY.ME>jJ JP%!ȕt ~;afh:!zR|&TB(wP*o\vzA/:ưWzj^a@" 1W$WN+@K-S*E+ʮN"‘mSѡQF<6 4+< Uoچ<&+!͆fu W5|'[N o~5يy so# 01Au~G*Ka]Ei6+  lGPh]_5ܩ\E5S'Oӭ]D"B{cNyk"YZV8f/3نJ?vpYX,ySr+ˌߗT;mCp"_qY)5jP~\kcR@B9{IM$@ZE1K0]QyȚz' @Y3Hz9mfd=y,x0-FV#5m }̺wc'rp{K~G:c6hI.na?lB ܣEdVES}tM%xH-XL'磀b c5u{HNc x#'UIg= a7~i\mȆ6AբpN1⣺ИER6+M|IY''/cjkg~Ejv\; 9ݙ!}̖6\LH;#)YOưaI=c}F.l) -4mQ-\||E%bhg‡%YhI eH$DpAk 'r}4 @t t!8 T2v4WH3Nty Rټ0;Aƕ8'-dStMy2>0XuK*8S9K_FDqQHxAQr0HK# Vb0қۖKi{r]*Dy4{k벃"O2JC <Ȼٯy&|9ʒ,e ӿY1D֌Gk)/0T{Uc\ -ڋv z8G_#YU[gO ڐw?Xq`9d?!V#e.&N;f< e3IJ[4וJ}nB(} )b}'Mnz>z-K;.m3!:Vľ P? w6UYaĎc# ~4%<䵝Nnåliŗ})Q#6?b(W/sgbi{MUu1ݛm.kI8}%)'IS;@ajF}w蟾n,,ӕ,>.h' EY2ݱ|Og/#:q`/zM'y"-ftzff۟hvxO8p'yL3zz' V09jH h2c)4 Go::< 1BRfĪ6Y2%jձj%6PCNZ=b%&G`|WF.s-6Ew;sLwN`y㽔9Z[{mSl{څћ-|Qà ¯8yuOJY+[m+ endstream endobj 80 0 obj << /Author()/Title()/Subject()/Creator(LaTeX with hyperref package)/Producer(pdfTeX-1.40.17)/Keywords() /CreationDate (D:20170605172740-04'00') /ModDate (D:20170605172740-04'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.14159265-2.6-1.40.17 (TeX Live 2016) kpathsea version 6.2.2) >> endobj 2 0 obj << /Type /ObjStm /N 68 /First 516 /Length 2761 /Filter /FlateDecode >> stream xZmsH_1mE#ͻKeJ] [qe${z$9$waI3=xi&#f YE! $,fb4:&/fBt oaF㦘q, fV2 QdF E" \ңRC/ޥ`o# X95$Ex7`  0>%υ#-㮌<~4*d<0-8C4T"c2iB7£"D0R6 ځ`Hdcf P D   h2N#(bLjti+A';--ɬ: :\Ed G- %HfZP1kX2@x1'?-N ƟGU:b b2zt,|sW8g ;OuQnm+,&jP?Iq-} ^%EV UГ,)g>#ǏG"ee2MG|鼮 D8iU\"7-G$Kv+.pu H* \|6 Hzg>/Cxd榚 "?tO.0ݢg#טspRB*EhPE aikc5u CDQg@:)itɏ""(QE@" oHaiB 88T0'f4^R@ Q8|^d-2cA?26DD Hah,)w!5NwBCN EJ$#ur kuV.a Q,(ݳ}*ـ,I "=g4dޔ7ޮ}Z붂u'U->PI_UKu>P=:\S8M- ʠHT&ŁFmdٳeF <~TGuWi98 fYRiἸ[Νt11ct7q̃r{Q>()1xeʆO)w,8|áȗBVQV Ld\\2@}` YuLE|^c\;1JnlT\D4 ͵ٙ=˿f.oXloHn\P>Lq,R`lٺ+ M rCe{tʪ&[3 ;LڗHlR_Ttz'&~X! o߼p!ZnVBK{eIsC`K׸uKYe׼Fmim@ l{ݵLxtPѡ%٤Zg }qYٜz$uy=7L )}ќ:w=3}%G ˁƁQ/YgwZ4\ƈ0eCGu6}rMn)4a4ad4#Nnl ܲOow2˒M6nmZ Btpvȋ騉q[8Kic]ZUvK|N Dr9IP50E:o@Kz}2 endstream endobj 81 0 obj << /Type /XRef /Index [0 82] /Size 82 /W [1 3 1] /Root 79 0 R /Info 80 0 R /ID [ ] /Length 205 /Filter /FlateDecode >> stream x6a(!H9|hl̴ M\% nŕo^'[)+*`H``F0X  ,LAa,ē`aN:pE: T PүXP*ՠЀM8&l6z+XOQm3kFٵLԾupp={xAS-. endstream endobj startxref 100903 %%EOF gdata/tests/0000755000175100001440000000000013115331046012507 5ustar hornikusersgdata/tests/runRUnitTests.R0000644000175100001440000000706013003720415015444 0ustar hornikusers### runRUnitTests.R ###------------------------------------------------------------------------ ### What: Run RUnit tests (the core)- R code ### $Id$ ### Time-stamp: <2008-12-30 12:52:51 ggorjan> ###------------------------------------------------------------------------ ## The setup seems to be quite messy, but it is so to enable use of this in ## several ways as shown bellow. ## "R CMD check" way should be the most authoritative way to run the RUnit ## tests for a developer. RUnit tests are issued during R CMD check of the ## package due to example section of .runRUnitTests() function. If any test ## fails (failure) or if there are any R errors during RUnit testing, R CMD ## check fails. These are variable values specific for this way: ## - .path DEVEL/PATH/PKG.Rcheck/PKG/unitTests ## - .way function ## ".runRUnitTests()" way from within R after library(PKG) is handy for ## package useRs, since it enables useRs to be sure that all tests pass for ## their installation. This is just a convenient wrapper function to run ## the RUnit testing suite. These are variable values specific for this ## way: ## - .path INSTALL/PATH/PKG/unitTests ## - .way function ## "Shell" way is another possibility mainly for a developer in order to ## skip possibly lengthy R CMD check and perform just RUnit testing with an ## installed version of a pcakage. These are variable values specific for ## this way: ## - .path DEVEL/PATH/PKG/inst/unitTests ## - .way shell ## ## Rscript runRUnitTests.R ## R CMD BATCH runRUnitTests.R ## make ## make all PKG <- 'gdata' if(require("RUnit", quietly=TRUE)) { path <- normalizePath("unitTests") cat("\nRunning unit tests\n") print(list(pkg=PKG, getwd=getwd(), pathToUnitTests=path)) library(package=PKG, character.only=TRUE) testFileRegexp <- "^runit.+\\.[rR]$" ## Debugging echo cat("\nRunning RUnit tests\n") print(list(pkg=PKG, getwd=getwd(), pathToRUnitTests=path)) ## Define tests testSuite <- defineTestSuite(name=paste(PKG, "RUnit testing"), dirs=path, testFileRegexp=testFileRegexp ) ## Run tests <- runTestSuite(testSuite) if(file.access(path, 02) != 0) { ## cannot write to path -> use writable one tdir <- tempfile(paste(PKG, "RUnitTests", sep="_")) dir.create(tdir) pathReport <- file.path(tdir, "report") } else { pathReport <- file.path(path, "report") } ## Print results: printTextProtocol(tests) printTextProtocol(tests, fileName=paste(pathReport, ".txt", sep="")) ## Print HTML Version of results: printHTMLProtocol(tests, fileName=paste(pathReport, ".html", sep="")) cat("\nRUnit reports also written to\n", pathReport, ".(txt|html)\n\n", sep="") ## Return stop() to cause R CMD check stop in case of ## - failures i.e. FALSE to RUnit tests or ## - errors i.e. R errors tmp <- getErrors(tests) if(tmp$nFail > 0 || tmp$nErr > 0) { stop(paste("\n\nRUnit testing failed:\n", " - #test failures: ", tmp$nFail, "\n", " - #R errors: ", tmp$nErr, "\n\n", sep="") ) } } else { cat("R package 'RUnit' cannot be loaded - no unit tests run\n", "for package", PKG,"\n") } ###------------------------------------------------------------------------ ### runRUnitTests.R ends here gdata/tests/test.read.xls.Rout.save0000644000175100001440000013401313115331034017014 0ustar hornikusers R version 3.3.2 (2016-10-31) -- "Sincere Pumpkin Patch" Copyright (C) 2016 The R Foundation for Statistical Computing Platform: x86_64-apple-darwin13.4.0 (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(gdata) gdata: read.xls support for 'XLS' (Excel 97-2004) files ENABLED. gdata: read.xls support for 'XLSX' (Excel 2007+) files ENABLED. Attaching package: 'gdata' The following object is masked from 'package:stats': nobs The following object is masked from 'package:utils': object.size The following object is masked from 'package:base': startsWith > > if ( ! 'XLSX' %in% xlsFormats() ) + { + try( installXLSXsupport() ) + } > > # iris.xls is included in the gregmisc package for use as an example > xlsfile <- file.path(path.package('gdata'),'xls','iris.xls') > > iris.1 <- read.xls(xlsfile) # defaults to csv format > iris.1 Sepal.Length Sepal.Width Petal.Length Petal.Width Species 1 5.1 3.5 1.4 0.2 setosa 2 4.9 3.0 1.4 0.2 setosa 3 4.7 3.2 1.3 0.2 setosa 4 4.6 3.1 1.5 0.2 setosa 5 5.0 3.6 1.4 0.2 setosa 6 5.4 3.9 1.7 0.4 setosa 7 4.6 3.4 1.4 0.3 setosa 8 5.0 3.4 1.5 0.2 setosa 9 4.4 2.9 1.4 0.2 setosa 10 4.9 3.1 1.5 0.1 setosa 11 5.4 3.7 1.5 0.2 setosa 12 4.8 3.4 1.6 0.2 setosa 13 4.8 3.0 1.4 0.1 setosa 14 4.3 3.0 1.1 0.1 setosa 15 5.8 4.0 1.2 0.2 setosa 16 5.7 4.4 1.5 0.4 setosa 17 5.4 3.9 1.3 0.4 setosa 18 5.1 3.5 1.4 0.3 setosa 19 5.7 3.8 1.7 0.3 setosa 20 5.1 3.8 1.5 0.3 setosa 21 5.4 3.4 1.7 0.2 setosa 22 5.1 3.7 1.5 0.4 setosa 23 4.6 3.6 1.0 0.2 setosa 24 5.1 3.3 1.7 0.5 setosa 25 4.8 3.4 1.9 0.2 setosa 26 5.0 3.0 1.6 0.2 setosa 27 5.0 3.4 1.6 0.4 setosa 28 5.2 3.5 1.5 0.2 setosa 29 5.2 3.4 1.4 0.2 setosa 30 4.7 3.2 1.6 0.2 setosa 31 4.8 3.1 1.6 0.2 setosa 32 5.4 3.4 1.5 0.4 setosa 33 5.2 4.1 1.5 0.1 setosa 34 5.5 4.2 1.4 0.2 setosa 35 4.9 3.1 1.5 0.2 setosa 36 5.0 3.2 1.2 0.2 setosa 37 5.5 3.5 1.3 0.2 setosa 38 4.9 3.6 1.4 0.1 setosa 39 4.4 3.0 1.3 0.2 setosa 40 5.1 3.4 1.5 0.2 setosa 41 5.0 3.5 1.3 0.3 setosa 42 4.5 2.3 1.3 0.3 setosa 43 4.4 3.2 1.3 0.2 setosa 44 5.0 3.5 1.6 0.6 setosa 45 5.1 3.8 1.9 0.4 setosa 46 4.8 3.0 1.4 0.3 setosa 47 5.1 3.8 1.6 0.2 setosa 48 4.6 3.2 1.4 0.2 setosa 49 5.3 3.7 1.5 0.2 setosa 50 5.0 3.3 1.4 0.2 setosa 51 7.0 3.2 4.7 1.4 versicolor 52 6.4 3.2 4.5 1.5 versicolor 53 6.9 3.1 4.9 1.5 versicolor 54 5.5 2.3 4.0 1.3 versicolor 55 6.5 2.8 4.6 1.5 versicolor 56 5.7 2.8 4.5 1.3 versicolor 57 6.3 3.3 4.7 1.6 versicolor 58 4.9 2.4 3.3 1.0 versicolor 59 6.6 2.9 4.6 1.3 versicolor 60 5.2 2.7 3.9 1.4 versicolor 61 5.0 2.0 3.5 1.0 versicolor 62 5.9 3.0 4.2 1.5 versicolor 63 6.0 2.2 4.0 1.0 versicolor 64 6.1 2.9 4.7 1.4 versicolor 65 5.6 2.9 3.6 1.3 versicolor 66 6.7 3.1 4.4 1.4 versicolor 67 5.6 3.0 4.5 1.5 versicolor 68 5.8 2.7 4.1 1.0 versicolor 69 6.2 2.2 4.5 1.5 versicolor 70 5.6 2.5 3.9 1.1 versicolor 71 5.9 3.2 4.8 1.8 versicolor 72 6.1 2.8 4.0 1.3 versicolor 73 6.3 2.5 4.9 1.5 versicolor 74 6.1 2.8 4.7 1.2 versicolor 75 6.4 2.9 4.3 1.3 versicolor 76 6.6 3.0 4.4 1.4 versicolor 77 6.8 2.8 4.8 1.4 versicolor 78 6.7 3.0 5.0 1.7 versicolor 79 6.0 2.9 4.5 1.5 versicolor 80 5.7 2.6 3.5 1.0 versicolor 81 5.5 2.4 3.8 1.1 versicolor 82 5.5 2.4 3.7 1.0 versicolor 83 5.8 2.7 3.9 1.2 versicolor 84 6.0 2.7 5.1 1.6 versicolor 85 5.4 3.0 4.5 1.5 versicolor 86 6.0 3.4 4.5 1.6 versicolor 87 6.7 3.1 4.7 1.5 versicolor 88 6.3 2.3 4.4 1.3 versicolor 89 5.6 3.0 4.1 1.3 versicolor 90 5.5 2.5 4.0 1.3 versicolor 91 5.5 2.6 4.4 1.2 versicolor 92 6.1 3.0 4.6 1.4 versicolor 93 5.8 2.6 4.0 1.2 versicolor 94 5.0 2.3 3.3 1.0 versicolor 95 5.6 2.7 4.2 1.3 versicolor 96 5.7 3.0 4.2 1.2 versicolor 97 5.7 2.9 4.2 1.3 versicolor 98 6.2 2.9 4.3 1.3 versicolor 99 5.1 2.5 3.0 1.1 versicolor 100 5.7 2.8 4.1 1.3 versicolor 101 6.3 3.3 6.0 2.5 virginica 102 5.8 2.7 5.1 1.9 virginica 103 7.1 3.0 5.9 2.1 virginica 104 6.3 2.9 5.6 1.8 virginica 105 6.5 3.0 5.8 2.2 virginica 106 7.6 3.0 6.6 2.1 virginica 107 4.9 2.5 4.5 1.7 virginica 108 7.3 2.9 6.3 1.8 virginica 109 6.7 2.5 5.8 1.8 virginica 110 7.2 3.6 6.1 2.5 virginica 111 6.5 3.2 5.1 2.0 virginica 112 6.4 2.7 5.3 1.9 virginica 113 6.8 3.0 5.5 2.1 virginica 114 5.7 2.5 5.0 2.0 virginica 115 5.8 2.8 5.1 2.4 virginica 116 6.4 3.2 5.3 2.3 virginica 117 6.5 3.0 5.5 1.8 virginica 118 7.7 3.8 6.7 2.2 virginica 119 7.7 2.6 6.9 2.3 virginica 120 6.0 2.2 5.0 1.5 virginica 121 6.9 3.2 5.7 2.3 virginica 122 5.6 2.8 4.9 2.0 virginica 123 7.7 2.8 6.7 2.0 virginica 124 6.3 2.7 4.9 1.8 virginica 125 6.7 3.3 5.7 2.1 virginica 126 7.2 3.2 6.0 1.8 virginica 127 6.2 2.8 4.8 1.8 virginica 128 6.1 3.0 4.9 1.8 virginica 129 6.4 2.8 5.6 2.1 virginica 130 7.2 3.0 5.8 1.6 virginica 131 7.4 2.8 6.1 1.9 virginica 132 7.9 3.8 6.4 2.0 virginica 133 6.4 2.8 5.6 2.2 virginica 134 6.3 2.8 5.1 1.5 virginica 135 6.1 2.6 5.6 1.4 virginica 136 7.7 3.0 6.1 2.3 virginica 137 6.3 3.4 5.6 2.4 virginica 138 6.4 3.1 5.5 1.8 virginica 139 6.0 3.0 4.8 1.8 virginica 140 6.9 3.1 5.4 2.1 virginica 141 6.7 3.1 5.6 2.4 virginica 142 6.9 3.1 5.1 2.3 virginica 143 5.8 2.7 5.1 1.9 virginica 144 6.8 3.2 5.9 2.3 virginica 145 6.7 3.3 5.7 2.5 virginica 146 6.7 3.0 5.2 2.3 virginica 147 6.3 2.5 5.0 1.9 virginica 148 6.5 3.0 5.2 2.0 virginica 149 6.2 3.4 5.4 2.3 virginica 150 5.9 3.0 5.1 1.8 virginica > > iris.2 <- read.xls(xlsfile,method="csv") # specify csv format > iris.2 Sepal.Length Sepal.Width Petal.Length Petal.Width Species 1 5.1 3.5 1.4 0.2 setosa 2 4.9 3.0 1.4 0.2 setosa 3 4.7 3.2 1.3 0.2 setosa 4 4.6 3.1 1.5 0.2 setosa 5 5.0 3.6 1.4 0.2 setosa 6 5.4 3.9 1.7 0.4 setosa 7 4.6 3.4 1.4 0.3 setosa 8 5.0 3.4 1.5 0.2 setosa 9 4.4 2.9 1.4 0.2 setosa 10 4.9 3.1 1.5 0.1 setosa 11 5.4 3.7 1.5 0.2 setosa 12 4.8 3.4 1.6 0.2 setosa 13 4.8 3.0 1.4 0.1 setosa 14 4.3 3.0 1.1 0.1 setosa 15 5.8 4.0 1.2 0.2 setosa 16 5.7 4.4 1.5 0.4 setosa 17 5.4 3.9 1.3 0.4 setosa 18 5.1 3.5 1.4 0.3 setosa 19 5.7 3.8 1.7 0.3 setosa 20 5.1 3.8 1.5 0.3 setosa 21 5.4 3.4 1.7 0.2 setosa 22 5.1 3.7 1.5 0.4 setosa 23 4.6 3.6 1.0 0.2 setosa 24 5.1 3.3 1.7 0.5 setosa 25 4.8 3.4 1.9 0.2 setosa 26 5.0 3.0 1.6 0.2 setosa 27 5.0 3.4 1.6 0.4 setosa 28 5.2 3.5 1.5 0.2 setosa 29 5.2 3.4 1.4 0.2 setosa 30 4.7 3.2 1.6 0.2 setosa 31 4.8 3.1 1.6 0.2 setosa 32 5.4 3.4 1.5 0.4 setosa 33 5.2 4.1 1.5 0.1 setosa 34 5.5 4.2 1.4 0.2 setosa 35 4.9 3.1 1.5 0.2 setosa 36 5.0 3.2 1.2 0.2 setosa 37 5.5 3.5 1.3 0.2 setosa 38 4.9 3.6 1.4 0.1 setosa 39 4.4 3.0 1.3 0.2 setosa 40 5.1 3.4 1.5 0.2 setosa 41 5.0 3.5 1.3 0.3 setosa 42 4.5 2.3 1.3 0.3 setosa 43 4.4 3.2 1.3 0.2 setosa 44 5.0 3.5 1.6 0.6 setosa 45 5.1 3.8 1.9 0.4 setosa 46 4.8 3.0 1.4 0.3 setosa 47 5.1 3.8 1.6 0.2 setosa 48 4.6 3.2 1.4 0.2 setosa 49 5.3 3.7 1.5 0.2 setosa 50 5.0 3.3 1.4 0.2 setosa 51 7.0 3.2 4.7 1.4 versicolor 52 6.4 3.2 4.5 1.5 versicolor 53 6.9 3.1 4.9 1.5 versicolor 54 5.5 2.3 4.0 1.3 versicolor 55 6.5 2.8 4.6 1.5 versicolor 56 5.7 2.8 4.5 1.3 versicolor 57 6.3 3.3 4.7 1.6 versicolor 58 4.9 2.4 3.3 1.0 versicolor 59 6.6 2.9 4.6 1.3 versicolor 60 5.2 2.7 3.9 1.4 versicolor 61 5.0 2.0 3.5 1.0 versicolor 62 5.9 3.0 4.2 1.5 versicolor 63 6.0 2.2 4.0 1.0 versicolor 64 6.1 2.9 4.7 1.4 versicolor 65 5.6 2.9 3.6 1.3 versicolor 66 6.7 3.1 4.4 1.4 versicolor 67 5.6 3.0 4.5 1.5 versicolor 68 5.8 2.7 4.1 1.0 versicolor 69 6.2 2.2 4.5 1.5 versicolor 70 5.6 2.5 3.9 1.1 versicolor 71 5.9 3.2 4.8 1.8 versicolor 72 6.1 2.8 4.0 1.3 versicolor 73 6.3 2.5 4.9 1.5 versicolor 74 6.1 2.8 4.7 1.2 versicolor 75 6.4 2.9 4.3 1.3 versicolor 76 6.6 3.0 4.4 1.4 versicolor 77 6.8 2.8 4.8 1.4 versicolor 78 6.7 3.0 5.0 1.7 versicolor 79 6.0 2.9 4.5 1.5 versicolor 80 5.7 2.6 3.5 1.0 versicolor 81 5.5 2.4 3.8 1.1 versicolor 82 5.5 2.4 3.7 1.0 versicolor 83 5.8 2.7 3.9 1.2 versicolor 84 6.0 2.7 5.1 1.6 versicolor 85 5.4 3.0 4.5 1.5 versicolor 86 6.0 3.4 4.5 1.6 versicolor 87 6.7 3.1 4.7 1.5 versicolor 88 6.3 2.3 4.4 1.3 versicolor 89 5.6 3.0 4.1 1.3 versicolor 90 5.5 2.5 4.0 1.3 versicolor 91 5.5 2.6 4.4 1.2 versicolor 92 6.1 3.0 4.6 1.4 versicolor 93 5.8 2.6 4.0 1.2 versicolor 94 5.0 2.3 3.3 1.0 versicolor 95 5.6 2.7 4.2 1.3 versicolor 96 5.7 3.0 4.2 1.2 versicolor 97 5.7 2.9 4.2 1.3 versicolor 98 6.2 2.9 4.3 1.3 versicolor 99 5.1 2.5 3.0 1.1 versicolor 100 5.7 2.8 4.1 1.3 versicolor 101 6.3 3.3 6.0 2.5 virginica 102 5.8 2.7 5.1 1.9 virginica 103 7.1 3.0 5.9 2.1 virginica 104 6.3 2.9 5.6 1.8 virginica 105 6.5 3.0 5.8 2.2 virginica 106 7.6 3.0 6.6 2.1 virginica 107 4.9 2.5 4.5 1.7 virginica 108 7.3 2.9 6.3 1.8 virginica 109 6.7 2.5 5.8 1.8 virginica 110 7.2 3.6 6.1 2.5 virginica 111 6.5 3.2 5.1 2.0 virginica 112 6.4 2.7 5.3 1.9 virginica 113 6.8 3.0 5.5 2.1 virginica 114 5.7 2.5 5.0 2.0 virginica 115 5.8 2.8 5.1 2.4 virginica 116 6.4 3.2 5.3 2.3 virginica 117 6.5 3.0 5.5 1.8 virginica 118 7.7 3.8 6.7 2.2 virginica 119 7.7 2.6 6.9 2.3 virginica 120 6.0 2.2 5.0 1.5 virginica 121 6.9 3.2 5.7 2.3 virginica 122 5.6 2.8 4.9 2.0 virginica 123 7.7 2.8 6.7 2.0 virginica 124 6.3 2.7 4.9 1.8 virginica 125 6.7 3.3 5.7 2.1 virginica 126 7.2 3.2 6.0 1.8 virginica 127 6.2 2.8 4.8 1.8 virginica 128 6.1 3.0 4.9 1.8 virginica 129 6.4 2.8 5.6 2.1 virginica 130 7.2 3.0 5.8 1.6 virginica 131 7.4 2.8 6.1 1.9 virginica 132 7.9 3.8 6.4 2.0 virginica 133 6.4 2.8 5.6 2.2 virginica 134 6.3 2.8 5.1 1.5 virginica 135 6.1 2.6 5.6 1.4 virginica 136 7.7 3.0 6.1 2.3 virginica 137 6.3 3.4 5.6 2.4 virginica 138 6.4 3.1 5.5 1.8 virginica 139 6.0 3.0 4.8 1.8 virginica 140 6.9 3.1 5.4 2.1 virginica 141 6.7 3.1 5.6 2.4 virginica 142 6.9 3.1 5.1 2.3 virginica 143 5.8 2.7 5.1 1.9 virginica 144 6.8 3.2 5.9 2.3 virginica 145 6.7 3.3 5.7 2.5 virginica 146 6.7 3.0 5.2 2.3 virginica 147 6.3 2.5 5.0 1.9 virginica 148 6.5 3.0 5.2 2.0 virginica 149 6.2 3.4 5.4 2.3 virginica 150 5.9 3.0 5.1 1.8 virginica > > iris.3 <- read.xls(xlsfile,method="tab") # specify tab format > iris.3 Sepal.Length Sepal.Width Petal.Length Petal.Width Species 1 5.1 3.5 1.4 0.2 setosa 2 4.9 3.0 1.4 0.2 setosa 3 4.7 3.2 1.3 0.2 setosa 4 4.6 3.1 1.5 0.2 setosa 5 5.0 3.6 1.4 0.2 setosa 6 5.4 3.9 1.7 0.4 setosa 7 4.6 3.4 1.4 0.3 setosa 8 5.0 3.4 1.5 0.2 setosa 9 4.4 2.9 1.4 0.2 setosa 10 4.9 3.1 1.5 0.1 setosa 11 5.4 3.7 1.5 0.2 setosa 12 4.8 3.4 1.6 0.2 setosa 13 4.8 3.0 1.4 0.1 setosa 14 4.3 3.0 1.1 0.1 setosa 15 5.8 4.0 1.2 0.2 setosa 16 5.7 4.4 1.5 0.4 setosa 17 5.4 3.9 1.3 0.4 setosa 18 5.1 3.5 1.4 0.3 setosa 19 5.7 3.8 1.7 0.3 setosa 20 5.1 3.8 1.5 0.3 setosa 21 5.4 3.4 1.7 0.2 setosa 22 5.1 3.7 1.5 0.4 setosa 23 4.6 3.6 1.0 0.2 setosa 24 5.1 3.3 1.7 0.5 setosa 25 4.8 3.4 1.9 0.2 setosa 26 5.0 3.0 1.6 0.2 setosa 27 5.0 3.4 1.6 0.4 setosa 28 5.2 3.5 1.5 0.2 setosa 29 5.2 3.4 1.4 0.2 setosa 30 4.7 3.2 1.6 0.2 setosa 31 4.8 3.1 1.6 0.2 setosa 32 5.4 3.4 1.5 0.4 setosa 33 5.2 4.1 1.5 0.1 setosa 34 5.5 4.2 1.4 0.2 setosa 35 4.9 3.1 1.5 0.2 setosa 36 5.0 3.2 1.2 0.2 setosa 37 5.5 3.5 1.3 0.2 setosa 38 4.9 3.6 1.4 0.1 setosa 39 4.4 3.0 1.3 0.2 setosa 40 5.1 3.4 1.5 0.2 setosa 41 5.0 3.5 1.3 0.3 setosa 42 4.5 2.3 1.3 0.3 setosa 43 4.4 3.2 1.3 0.2 setosa 44 5.0 3.5 1.6 0.6 setosa 45 5.1 3.8 1.9 0.4 setosa 46 4.8 3.0 1.4 0.3 setosa 47 5.1 3.8 1.6 0.2 setosa 48 4.6 3.2 1.4 0.2 setosa 49 5.3 3.7 1.5 0.2 setosa 50 5.0 3.3 1.4 0.2 setosa 51 7.0 3.2 4.7 1.4 versicolor 52 6.4 3.2 4.5 1.5 versicolor 53 6.9 3.1 4.9 1.5 versicolor 54 5.5 2.3 4.0 1.3 versicolor 55 6.5 2.8 4.6 1.5 versicolor 56 5.7 2.8 4.5 1.3 versicolor 57 6.3 3.3 4.7 1.6 versicolor 58 4.9 2.4 3.3 1.0 versicolor 59 6.6 2.9 4.6 1.3 versicolor 60 5.2 2.7 3.9 1.4 versicolor 61 5.0 2.0 3.5 1.0 versicolor 62 5.9 3.0 4.2 1.5 versicolor 63 6.0 2.2 4.0 1.0 versicolor 64 6.1 2.9 4.7 1.4 versicolor 65 5.6 2.9 3.6 1.3 versicolor 66 6.7 3.1 4.4 1.4 versicolor 67 5.6 3.0 4.5 1.5 versicolor 68 5.8 2.7 4.1 1.0 versicolor 69 6.2 2.2 4.5 1.5 versicolor 70 5.6 2.5 3.9 1.1 versicolor 71 5.9 3.2 4.8 1.8 versicolor 72 6.1 2.8 4.0 1.3 versicolor 73 6.3 2.5 4.9 1.5 versicolor 74 6.1 2.8 4.7 1.2 versicolor 75 6.4 2.9 4.3 1.3 versicolor 76 6.6 3.0 4.4 1.4 versicolor 77 6.8 2.8 4.8 1.4 versicolor 78 6.7 3.0 5.0 1.7 versicolor 79 6.0 2.9 4.5 1.5 versicolor 80 5.7 2.6 3.5 1.0 versicolor 81 5.5 2.4 3.8 1.1 versicolor 82 5.5 2.4 3.7 1.0 versicolor 83 5.8 2.7 3.9 1.2 versicolor 84 6.0 2.7 5.1 1.6 versicolor 85 5.4 3.0 4.5 1.5 versicolor 86 6.0 3.4 4.5 1.6 versicolor 87 6.7 3.1 4.7 1.5 versicolor 88 6.3 2.3 4.4 1.3 versicolor 89 5.6 3.0 4.1 1.3 versicolor 90 5.5 2.5 4.0 1.3 versicolor 91 5.5 2.6 4.4 1.2 versicolor 92 6.1 3.0 4.6 1.4 versicolor 93 5.8 2.6 4.0 1.2 versicolor 94 5.0 2.3 3.3 1.0 versicolor 95 5.6 2.7 4.2 1.3 versicolor 96 5.7 3.0 4.2 1.2 versicolor 97 5.7 2.9 4.2 1.3 versicolor 98 6.2 2.9 4.3 1.3 versicolor 99 5.1 2.5 3.0 1.1 versicolor 100 5.7 2.8 4.1 1.3 versicolor 101 6.3 3.3 6.0 2.5 virginica 102 5.8 2.7 5.1 1.9 virginica 103 7.1 3.0 5.9 2.1 virginica 104 6.3 2.9 5.6 1.8 virginica 105 6.5 3.0 5.8 2.2 virginica 106 7.6 3.0 6.6 2.1 virginica 107 4.9 2.5 4.5 1.7 virginica 108 7.3 2.9 6.3 1.8 virginica 109 6.7 2.5 5.8 1.8 virginica 110 7.2 3.6 6.1 2.5 virginica 111 6.5 3.2 5.1 2.0 virginica 112 6.4 2.7 5.3 1.9 virginica 113 6.8 3.0 5.5 2.1 virginica 114 5.7 2.5 5.0 2.0 virginica 115 5.8 2.8 5.1 2.4 virginica 116 6.4 3.2 5.3 2.3 virginica 117 6.5 3.0 5.5 1.8 virginica 118 7.7 3.8 6.7 2.2 virginica 119 7.7 2.6 6.9 2.3 virginica 120 6.0 2.2 5.0 1.5 virginica 121 6.9 3.2 5.7 2.3 virginica 122 5.6 2.8 4.9 2.0 virginica 123 7.7 2.8 6.7 2.0 virginica 124 6.3 2.7 4.9 1.8 virginica 125 6.7 3.3 5.7 2.1 virginica 126 7.2 3.2 6.0 1.8 virginica 127 6.2 2.8 4.8 1.8 virginica 128 6.1 3.0 4.9 1.8 virginica 129 6.4 2.8 5.6 2.1 virginica 130 7.2 3.0 5.8 1.6 virginica 131 7.4 2.8 6.1 1.9 virginica 132 7.9 3.8 6.4 2.0 virginica 133 6.4 2.8 5.6 2.2 virginica 134 6.3 2.8 5.1 1.5 virginica 135 6.1 2.6 5.6 1.4 virginica 136 7.7 3.0 6.1 2.3 virginica 137 6.3 3.4 5.6 2.4 virginica 138 6.4 3.1 5.5 1.8 virginica 139 6.0 3.0 4.8 1.8 virginica 140 6.9 3.1 5.4 2.1 virginica 141 6.7 3.1 5.6 2.4 virginica 142 6.9 3.1 5.1 2.3 virginica 143 5.8 2.7 5.1 1.9 virginica 144 6.8 3.2 5.9 2.3 virginica 145 6.7 3.3 5.7 2.5 virginica 146 6.7 3.0 5.2 2.3 virginica 147 6.3 2.5 5.0 1.9 virginica 148 6.5 3.0 5.2 2.0 virginica 149 6.2 3.4 5.4 2.3 virginica 150 5.9 3.0 5.1 1.8 virginica > > stopifnot(all.equal(iris.1, iris.2)) > stopifnot(all.equal(iris.1, iris.3)) > > exampleFile <- file.path(path.package('gdata'),'xls', + 'ExampleExcelFile.xls') > > exampleFileX <- file.path(path.package('gdata'),'xls', + 'ExampleExcelFile.xlsx') > > # see the number and names of sheets: > sheetCount(exampleFile) [1] 4 > > if(! 'XLSX' %in% xlsFormats() ) + { + cat("************************************************************\n") + cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") + cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") + cat("************************************************************\n") + } else { + sheetCount(exampleFileX) + } [1] 4 > > > sheetNames(exampleFile) [1] "Sheet First" "Sheet Second" [3] "Sheet with a very long name!" "Sheet with initial text" > > if(! 'XLSX' %in% xlsFormats() ) + { + cat("************************************************************\n") + cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") + cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") + cat("************************************************************\n") + } else { + sheetNames(exampleFileX) + } [1] "Sheet First" "Sheet Second" [3] "Sheet with a very long name!" "Sheet with initial text" > > > example.1 <- read.xls(exampleFile, sheet=1) # default is first worksheet > example.1 A B C 1 1 1 1 2 2 4 8 3 3 9 27 4 4 16 64 5 5 25 125 6 6 36 216 7 7 49 343 > > example.2 <- read.xls(exampleFile, sheet=2) # second worksheet by number > example.2 X D E. F G Factor 1 FirstRow 1 NA NA NA Red 2 SecondRow 2 1 NA NA Green 3 ThirdRow 3 2 1 NA Red 4 FourthRow 4 3 2 1 Black > > example.3 <- read.xls(exampleFile, sheet=3, header=FALSE) # third worksheet by number > example.3 V1 V2 V3 V4 V5 V6 V7 1 1 2001-01-01 01:01:00.00 0.2058182 NA A 2001-01-01 01:01:01.01 2 2 2002-02-02 02:02:00.00 0.2910708 NA B 2002-02-02 02:02:02.02 3 3 2003-03-03 03:03:00.00 0.3564875 -0.84147098 C 2003-03-03 03:03:03.03 4 4 2004-04-04 04:04:00.00 0.4116363 0.70807342 2004-04-04 04:04:04.04 5 5 2005-05-05 05:05:00.00 0.4602234 0.50136797 A 2005-05-05 05:05:05.05 6 6 2006-06-06 06:06:00.00 NA 0.25136984 B 2006-06-06 06:06:06.06 7 7 2007-07-07 07:07:00.00 0.5445436 0.06318679 B 2007-07-07 07:07:07.07 8 8 2008-08-08 08:08:00.00 0.5821416 NA C 2008-08-08 08:08:08.08 9 9 2009-09-09 09:09:00.00 0.6174545 0.00000000 A 2009-09-09 09:09:09.09 10 10 2010-10-10 10:10:00.00 0.6508541 0.00000000 A 2010-10-10 10:10:10.10 V8 1 36892.04 2 37289.08 3 37683.13 4 38081.17 5 38477.21 6 38874.25 7 39270.30 8 39668.34 9 40065.38 10 40461.42 > > example.4 <- read.xls(exampleFile, sheet=4, header=FALSE) # fourth worksheet by number > example.4 V1 1 This line contains text that would need to be skipped to get to the data 2 3 4 5 6 7 V2 V3 V4 V5 V6 V7 1 2 This line too! 3 D E F G Factor 4 FirstRow 1 Red 5 SecondRow 2 1 Green 6 ThirdRow 3 2 1 Red 7 FourthRow 4 3 2 1 Black > > if(! 'XLSX' %in% xlsFormats() ) + { + cat("************************************************************\n") + cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") + cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") + cat("************************************************************\n") + } else { + example.x.1 <- read.xls(exampleFileX, sheet=1) # default is first worksheet + print(example.x.1) + + example.x.2 <- read.xls(exampleFileX, sheet=2) # second worksheet by number + print(example.x.2) + + example.x.3 <- read.xls(exampleFileX, sheet=3, header=FALSE) # third worksheet by number + print(example.x.3) + + example.x.4 <- read.xls(exampleFileX, sheet=4, header=FALSE) # fourth worksheet by number + print(example.x.4) + + data <- read.xls(exampleFileX, sheet="Sheet Second") # and by name + print(data) + + # load the third worksheet, skipping the first two non-data lines... + data <- read.xls(exampleFileX, sheet="Sheet with initial text", skip=2) + print(data) + } A B C 1 1 1 1 2 2 4 8 3 3 9 27 4 4 16 64 5 5 25 125 6 6 36 216 7 7 49 343 X D E. F G Factor 1 FirstRow 1 NA NA NA Red 2 SecondRow 2 1 NA NA Green 3 ThirdRow 3 2 1 NA Red 4 FourthRow 4 3 2 1 Black V1 V2 V3 V4 V5 V6 V7 1 1 2001-01-01 01:01:00.00 0.2058182 NA A 2001-01-01 01:01:01.01 2 2 2002-02-02 02:02:00.00 0.2910708 NA B 2002-02-02 02:02:02.02 3 3 2003-03-03 03:03:00.00 0.3564875 -0.84147098 C 2003-03-03 03:03:03.03 4 4 2004-04-04 04:04:00.00 0.4116363 0.70807342 2004-04-04 04:04:04.04 5 5 2005-05-05 05:05:00.00 0.4602234 0.50136797 A 2005-05-05 05:05:05.05 6 6 2006-06-06 06:06:00.00 NA 0.25136984 B 2006-06-06 06:06:06.06 7 7 2007-07-07 07:07:00.00 0.5445436 0.06318679 B 2007-07-07 07:07:07.07 8 8 2008-08-08 08:08:00.00 0.5821416 NA C 2008-08-08 08:08:08.08 9 9 2009-09-09 09:09:00.00 0.6174545 0.00000000 A 2009-09-09 09:09:09.09 10 10 2010-10-10 10:10:00.00 0.6508541 0.00000000 A 2010-10-10 10:10:10.10 V8 1 36892.04 2 37289.08 3 37683.13 4 38081.17 5 38477.21 6 38874.25 7 39270.30 8 39668.34 9 40065.38 10 40461.42 V1 1 This line contains text that would need to be skipped to get to the data 2 3 4 5 6 7 V2 V3 V4 V5 V6 V7 1 2 This line too! 3 D E F G Factor 4 FirstRow 1 Red 5 SecondRow 2 1 Green 6 ThirdRow 3 2 1 Red 7 FourthRow 4 3 2 1 Black X D E. F G Factor 1 FirstRow 1 NA NA NA Red 2 SecondRow 2 1 NA NA Green 3 ThirdRow 3 2 1 NA Red 4 FourthRow 4 3 2 1 Black X X.1 D E. F G Factor 1 NA FirstRow 1 NA NA NA Red 2 NA SecondRow 2 1 NA NA Green 3 NA ThirdRow 3 2 1 NA Red 4 NA FourthRow 4 3 2 1 Black > > ## Check handling of skip.blank.lines=FALSE > > example.skip <- read.xls(exampleFile, sheet=2, blank.lines.skip=FALSE) > example.skip X D E. F G Factor 1 FirstRow 1 NA NA NA Red 2 SecondRow 2 1 NA NA Green 3 NA NA NA NA 4 ThirdRow 3 2 1 NA Red 5 FourthRow 4 3 2 1 Black > > if(! 'XLSX' %in% xlsFormats() ) + { + cat("************************************************************\n") + cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") + cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") + cat("************************************************************\n") + } else { + example.x.skip <- read.xls(exampleFileX, sheet=2, blank.lines.skip=FALSE) + example.x.skip + } X D E. F G Factor 1 FirstRow 1 NA NA NA Red 2 SecondRow 2 1 NA NA Green 3 NA NA NA NA 4 ThirdRow 3 2 1 NA Red 5 FourthRow 4 3 2 1 Black > > > > ## Check handing of fileEncoding for latin-1 characters > > latin1File <- file.path(path.package('gdata'),'xls', 'latin-1.xls' ) > latin1FileX <- file.path(path.package('gdata'),'xls', 'latin-1.xlsx') > > if(.Platform$OS.type=="unix") + { + example.latin1 <- read.xls(latin1File, + fileEncoding='latin1', + encoding='latin1', + stringsAsFactors=FALSE) + } else { + example.latin1 <- read.xls(latin1File, + #fileEncoding='latin1', + encoding='latin1', + stringsAsFactors=FALSE) + } > > if(! 'XLSX' %in% xlsFormats() ) + { + cat("************************************************************\n") + cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") + cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") + cat("************************************************************\n") + } else { + if(.Platform$OS.type=="unix") + { + example.latin1.x <- read.xls(latin1FileX, + fileEncoding='latin1', + encoding='latin1', + stringsAsFactors=FALSE) + } else { + example.latin1.x <- read.xls(latin1FileX, + #fileEncoding='latin1', + encoding='latin1', + stringsAsFactors=FALSE) + } + } > > > ## Check handling of very wide file > > wideFile <- file.path(path.package('gdata'),'xls', 'wide.xls' ) > wideFileX <- file.path(path.package('gdata'),'xls', 'wide.xlsx') > > example.wide <- read.xls(wideFile) > stopifnot(dim(example.wide)==c(0,256)) > > if( !'XLSX' %in% xlsFormats() ) + { + cat("************************************************************\n") + cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") + cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") + cat("************************************************************\n") + } else { + example.wide.x <- read.xls(wideFileX) + stopifnot(dim(example.wide.x)==c(0,16384)) + } > > ## Check handling of files with dates calulcated relative to > ## 1900-01-01 and 1904-01-01 > > file.1900 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1900.xls' ) > file.1904 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1904.xls' ) > fileX.1900 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1900.xlsx') > fileX.1904 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1904.xlsx') > > example.1900 <- read.xls(file.1900, sheet=3, header=FALSE) > example.1900 V1 V2 V3 V4 V5 V6 V7 1 1 2001-01-01 01:01:00.00 0.2058182 NA A 2001-01-01 01:01:01.01 2 2 2002-02-02 02:02:00.00 0.2910708 NA B 2002-02-02 02:02:02.02 3 3 2003-03-03 03:03:00.00 0.3564875 -0.84147098 C 2003-03-03 03:03:03.03 4 4 2004-04-04 04:04:00.00 0.4116363 0.70807342 2004-04-04 04:04:04.04 5 5 2005-05-05 05:05:00.00 0.4602234 0.50136797 A 2005-05-05 05:05:05.05 6 6 2006-06-06 06:06:00.00 NA 0.25136984 B 2006-06-06 06:06:06.06 7 7 2007-07-07 07:07:00.00 0.5445436 0.06318679 B 2007-07-07 07:07:07.07 8 8 2008-08-08 08:08:00.00 0.5821416 NA C 2008-08-08 08:08:08.08 9 9 2009-09-09 09:09:00.00 0.6174545 0.00000000 A 2009-09-09 09:09:09.09 10 10 2010-10-10 10:10:00.00 0.6508541 0.00000000 A 2010-10-10 10:10:10.10 V8 1 36892.04 2 37289.08 3 37683.13 4 38081.17 5 38477.21 6 38874.25 7 39270.30 8 39668.34 9 40065.38 10 40461.42 > > example.1904 <- read.xls(file.1904, sheet=3, header=FALSE) > example.1904 V1 V2 V3 V4 V5 V6 V7 1 1 2001-01-01 01:01:00.00 0.2058182 NA A 2001-01-01 01:01:01.01 2 2 2002-02-02 02:02:00.00 0.2910708 NA B 2002-02-02 02:02:02.02 3 3 2003-03-03 03:03:00.00 0.3564875 -0.84147098 C 2003-03-03 03:03:03.03 4 4 2004-04-04 04:04:00.00 0.4116363 0.70807342 2004-04-04 04:04:04.04 5 5 2005-05-05 05:05:00.00 0.4602234 0.50136797 A 2005-05-05 05:05:05.05 6 6 2006-06-06 06:06:00.00 NA 0.25136984 B 2006-06-06 06:06:06.06 7 7 2007-07-07 07:07:00.00 0.5445436 0.06318679 B 2007-07-07 07:07:07.07 8 8 2008-08-08 08:08:00.00 0.5821416 NA C 2008-08-08 08:08:08.08 9 9 2009-09-09 09:09:00.00 0.6174545 0.00000000 A 2009-09-09 09:09:09.09 10 10 2010-10-10 10:10:00.00 0.6508541 0.00000000 A 2010-10-10 10:10:10.10 V8 1 35430.04 2 35827.08 3 36221.13 4 36619.17 5 37015.21 6 37412.25 7 37808.30 8 38206.34 9 38603.38 10 38999.42 > > exampleX.1900 <- read.xls(file.1900, sheet=3, header=FALSE) > exampleX.1900 V1 V2 V3 V4 V5 V6 V7 1 1 2001-01-01 01:01:00.00 0.2058182 NA A 2001-01-01 01:01:01.01 2 2 2002-02-02 02:02:00.00 0.2910708 NA B 2002-02-02 02:02:02.02 3 3 2003-03-03 03:03:00.00 0.3564875 -0.84147098 C 2003-03-03 03:03:03.03 4 4 2004-04-04 04:04:00.00 0.4116363 0.70807342 2004-04-04 04:04:04.04 5 5 2005-05-05 05:05:00.00 0.4602234 0.50136797 A 2005-05-05 05:05:05.05 6 6 2006-06-06 06:06:00.00 NA 0.25136984 B 2006-06-06 06:06:06.06 7 7 2007-07-07 07:07:00.00 0.5445436 0.06318679 B 2007-07-07 07:07:07.07 8 8 2008-08-08 08:08:00.00 0.5821416 NA C 2008-08-08 08:08:08.08 9 9 2009-09-09 09:09:00.00 0.6174545 0.00000000 A 2009-09-09 09:09:09.09 10 10 2010-10-10 10:10:00.00 0.6508541 0.00000000 A 2010-10-10 10:10:10.10 V8 1 36892.04 2 37289.08 3 37683.13 4 38081.17 5 38477.21 6 38874.25 7 39270.30 8 39668.34 9 40065.38 10 40461.42 > > exampleX.1904 <- read.xls(file.1904, sheet=3, header=FALSE) > exampleX.1904 V1 V2 V3 V4 V5 V6 V7 1 1 2001-01-01 01:01:00.00 0.2058182 NA A 2001-01-01 01:01:01.01 2 2 2002-02-02 02:02:00.00 0.2910708 NA B 2002-02-02 02:02:02.02 3 3 2003-03-03 03:03:00.00 0.3564875 -0.84147098 C 2003-03-03 03:03:03.03 4 4 2004-04-04 04:04:00.00 0.4116363 0.70807342 2004-04-04 04:04:04.04 5 5 2005-05-05 05:05:00.00 0.4602234 0.50136797 A 2005-05-05 05:05:05.05 6 6 2006-06-06 06:06:00.00 NA 0.25136984 B 2006-06-06 06:06:06.06 7 7 2007-07-07 07:07:00.00 0.5445436 0.06318679 B 2007-07-07 07:07:07.07 8 8 2008-08-08 08:08:00.00 0.5821416 NA C 2008-08-08 08:08:08.08 9 9 2009-09-09 09:09:00.00 0.6174545 0.00000000 A 2009-09-09 09:09:09.09 10 10 2010-10-10 10:10:00.00 0.6508541 0.00000000 A 2010-10-10 10:10:10.10 V8 1 35430.04 2 35827.08 3 36221.13 4 36619.17 5 37015.21 6 37412.25 7 37808.30 8 38206.34 9 38603.38 10 38999.42 > > # all colmns should be identical > stopifnot( na.omit(example.1900 == exampleX.1900) ) > stopifnot( na.omit(example.1904 == exampleX.1904) ) > > # column 8 will differ by 1462 due to different date baselines (1900 vs 1904) > stopifnot( na.omit(example.1900 [,-8] == example.1904 [,-8]) ) > stopifnot( na.omit(exampleX.1900[,-8] == exampleX.1904[,-8]) ) > > stopifnot( na.omit(example.1900 [,8] - example.1904 [,8]) == 1462 ) > stopifnot( na.omit(exampleX.1900[,8] - exampleX.1904[,8]) == 1462 ) > > proc.time() user system elapsed 10.455 0.880 12.534 gdata/tests/tests.write.fwf.R0000644000175100001440000000424113003720415015705 0ustar hornikusers### tests.write.fwf.R ###------------------------------------------------------------------------ ### What: Tests for write.fwf ### $Id: tests.write.fwf.R 1300 2008-08-05 11:47:18Z ggorjan $ ### Time-stamp: <2008-08-05 12:25:05 ggorjan> ###------------------------------------------------------------------------ library(gdata) ## --- Test data --- num <- round(c(733070.345678, 1214213.78765456, 553823.798765678, 1085022.8876545678, 571063.88765456, 606718.3876545678, 1053686.6, 971024.187656, 631193.398765456, 879431.1), digits=3) testData <- data.frame(num1=c(1:10, NA), num2=c(NA, seq(from=1, to=5.5, by=0.5)), num3=c(NA, num), int1=c(as.integer(1:4), NA, as.integer(4:9)), fac1=factor(c(NA, letters[1:9], "hjh")), fac2=factor(c(letters[6:15], NA)), cha1=c(letters[17:26], NA), cha2=c(NA, "longer", letters[25:17]), stringsAsFactors=FALSE) levels(testData$fac1) <- c(levels(testData$fac1), "unusedLevel") testData$Date <- as.Date("1900-1-1") testData$Date[2] <- NA testData$POSIXt <- as.POSIXct(strptime("1900-1-1 01:01:01", format="%Y-%m-%d %H:%M:%S")) testData$POSIXt[5] <- NA ## --- Tests --- ## Default write.fwf(testData) ## NA should be - write.fwf(x=testData, na="-") ## NA should be -NA- write.fwf(x=testData, na="-NA-") ## Some other separator than space write.fwf(testData[, 1:4], sep="-mySep-") ## Justify right for character columns write.fwf(testData, justify="right") ## Justify right for character columns, track na values write.fwf(testData, justify="right", na="!") ## With quotes write.fwf(testData, quote=TRUE) ## With rownames write.fwf(testData, rownames=TRUE) ## Without colnames write.fwf(testData, colnames=FALSE) ## Without colnames, with rownames write.fwf(testData, colnames=FALSE, rownames=TRUE) ## With rownames and colnames and rowCol write.fwf(testData, rownames=TRUE, rowCol="HI!") ## formatInfo is tested with unit tests ###------------------------------------------------------------------------ ### tests.write.fwf.R ends gdata/tests/test.read.xls.R0000644000175100001440000001554313003720415015336 0ustar hornikuserslibrary(gdata) if ( ! 'XLSX' %in% xlsFormats() ) { try( installXLSXsupport() ) } # iris.xls is included in the gregmisc package for use as an example xlsfile <- file.path(path.package('gdata'),'xls','iris.xls') iris.1 <- read.xls(xlsfile) # defaults to csv format iris.1 iris.2 <- read.xls(xlsfile,method="csv") # specify csv format iris.2 iris.3 <- read.xls(xlsfile,method="tab") # specify tab format iris.3 stopifnot(all.equal(iris.1, iris.2)) stopifnot(all.equal(iris.1, iris.3)) exampleFile <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile.xls') exampleFileX <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile.xlsx') # see the number and names of sheets: sheetCount(exampleFile) if(! 'XLSX' %in% xlsFormats() ) { cat("************************************************************\n") cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") cat("************************************************************\n") } else { sheetCount(exampleFileX) } sheetNames(exampleFile) if(! 'XLSX' %in% xlsFormats() ) { cat("************************************************************\n") cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") cat("************************************************************\n") } else { sheetNames(exampleFileX) } example.1 <- read.xls(exampleFile, sheet=1) # default is first worksheet example.1 example.2 <- read.xls(exampleFile, sheet=2) # second worksheet by number example.2 example.3 <- read.xls(exampleFile, sheet=3, header=FALSE) # third worksheet by number example.3 example.4 <- read.xls(exampleFile, sheet=4, header=FALSE) # fourth worksheet by number example.4 if(! 'XLSX' %in% xlsFormats() ) { cat("************************************************************\n") cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") cat("************************************************************\n") } else { example.x.1 <- read.xls(exampleFileX, sheet=1) # default is first worksheet print(example.x.1) example.x.2 <- read.xls(exampleFileX, sheet=2) # second worksheet by number print(example.x.2) example.x.3 <- read.xls(exampleFileX, sheet=3, header=FALSE) # third worksheet by number print(example.x.3) example.x.4 <- read.xls(exampleFileX, sheet=4, header=FALSE) # fourth worksheet by number print(example.x.4) data <- read.xls(exampleFileX, sheet="Sheet Second") # and by name print(data) # load the third worksheet, skipping the first two non-data lines... data <- read.xls(exampleFileX, sheet="Sheet with initial text", skip=2) print(data) } ## Check handling of skip.blank.lines=FALSE example.skip <- read.xls(exampleFile, sheet=2, blank.lines.skip=FALSE) example.skip if(! 'XLSX' %in% xlsFormats() ) { cat("************************************************************\n") cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") cat("************************************************************\n") } else { example.x.skip <- read.xls(exampleFileX, sheet=2, blank.lines.skip=FALSE) example.x.skip } ## Check handing of fileEncoding for latin-1 characters latin1File <- file.path(path.package('gdata'),'xls', 'latin-1.xls' ) latin1FileX <- file.path(path.package('gdata'),'xls', 'latin-1.xlsx') if(.Platform$OS.type=="unix") { example.latin1 <- read.xls(latin1File, fileEncoding='latin1', encoding='latin1', stringsAsFactors=FALSE) } else { example.latin1 <- read.xls(latin1File, #fileEncoding='latin1', encoding='latin1', stringsAsFactors=FALSE) } if(! 'XLSX' %in% xlsFormats() ) { cat("************************************************************\n") cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") cat("************************************************************\n") } else { if(.Platform$OS.type=="unix") { example.latin1.x <- read.xls(latin1FileX, fileEncoding='latin1', encoding='latin1', stringsAsFactors=FALSE) } else { example.latin1.x <- read.xls(latin1FileX, #fileEncoding='latin1', encoding='latin1', stringsAsFactors=FALSE) } } ## Check handling of very wide file wideFile <- file.path(path.package('gdata'),'xls', 'wide.xls' ) wideFileX <- file.path(path.package('gdata'),'xls', 'wide.xlsx') example.wide <- read.xls(wideFile) stopifnot(dim(example.wide)==c(0,256)) if( !'XLSX' %in% xlsFormats() ) { cat("************************************************************\n") cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") cat("************************************************************\n") } else { example.wide.x <- read.xls(wideFileX) stopifnot(dim(example.wide.x)==c(0,16384)) } ## Check handling of files with dates calulcated relative to ## 1900-01-01 and 1904-01-01 file.1900 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1900.xls' ) file.1904 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1904.xls' ) fileX.1900 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1900.xlsx') fileX.1904 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1904.xlsx') example.1900 <- read.xls(file.1900, sheet=3, header=FALSE) example.1900 example.1904 <- read.xls(file.1904, sheet=3, header=FALSE) example.1904 exampleX.1900 <- read.xls(file.1900, sheet=3, header=FALSE) exampleX.1900 exampleX.1904 <- read.xls(file.1904, sheet=3, header=FALSE) exampleX.1904 # all colmns should be identical stopifnot( na.omit(example.1900 == exampleX.1900) ) stopifnot( na.omit(example.1904 == exampleX.1904) ) # column 8 will differ by 1462 due to different date baselines (1900 vs 1904) stopifnot( na.omit(example.1900 [,-8] == example.1904 [,-8]) ) stopifnot( na.omit(exampleX.1900[,-8] == exampleX.1904[,-8]) ) stopifnot( na.omit(example.1900 [,8] - example.1904 [,8]) == 1462 ) stopifnot( na.omit(exampleX.1900[,8] - exampleX.1904[,8]) == 1462 ) gdata/tests/test.humanReadable.R0000644000175100001440000001071513003720415016342 0ustar hornikuserslibrary(gdata) options(humanReadable=FALSE) set.seed(123456) baseSI <- 10 powerSI <- seq(from=0, to=27, by=3) SI0 <- (baseSI)^powerSI k <- length(SI0) - 1 SI1 <- SI0 - SI0 / c(2, runif(n=k, min=1.01, max=5.99)) SI2 <- SI0 + SI0 / c(2, runif(n=k, min=1.01, max=5.99)) baseIEC <- 2 powerIEC <- seq(from=0, to=90, by=10) IEC0 <- (baseIEC)^powerIEC IEC1 <- IEC0 - IEC0 / c(2, runif(n=k, min=1.01, max=5.99)) IEC2 <- IEC0 + IEC0 / c(2, runif(n=k, min=1.01, max=5.99)) # Auto units, specify width cbind(humanReadable(x=SI2, standard="SI", width=7), humanReadable(x=SI2, standard="SI", width=5), humanReadable(x=SI2, standard="SI", width=3), humanReadable(x=IEC2, standard="IEC", width=7), humanReadable(x=IEC2, standard="IEC", width=5), humanReadable(x=IEC2, standard="IEC", width=3), humanReadable(x=IEC2, standard="Unix", width=7), humanReadable(x=IEC2, standard="Unix", width=5), humanReadable(x=IEC2, standard="Unix", width=3)) # Auto units, specify digits cbind(humanReadable(x=SI2, standard="SI", width=NULL, digits=7), humanReadable(x=SI2, standard="SI", width=NULL, digits=3), humanReadable(x=SI2, standard="SI", width=NULL, digits=2), humanReadable(x=SI2, standard="SI", width=NULL, digits=1), humanReadable(x=IEC2, standard="IEC", width=NULL, digits=7), humanReadable(x=IEC2, standard="IEC", width=NULL, digits=3), humanReadable(x=IEC2, standard="IEC", width=NULL, digits=2), humanReadable(x=IEC2, standard="IEC", width=NULL, digits=1), humanReadable(x=IEC2, standard="Unix", width=NULL, digits=7), humanReadable(x=IEC2, standard="Unix", width=NULL, digits=3), humanReadable(x=IEC2, standard="Unix", width=NULL, digits=2), humanReadable(x=IEC2, standard="Unix", width=NULL, digits=1)) # Single unit, specify width cbind(humanReadable(x=SI1, units="GB", standard="SI", width=7), humanReadable(x=SI1, units="GB", standard="SI", width=5), humanReadable(x=SI1, units="GB", standard="SI", width=3), humanReadable(x=IEC1, units="GiB", standard="IEC", width=7), humanReadable(x=IEC1, units="GiB", standard="IEC", width=5), humanReadable(x=IEC1, units="GiB", standard="IEC", width=3), humanReadable(x=IEC1, units="G", standard="Unix", width=7), humanReadable(x=IEC1, units="G", standard="Unix", width=5), humanReadable(x=IEC1, units="G", standard="Unix", width=3) ) # Single unit, specify digits cbind(humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=7), humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=3), humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=2), humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=1), humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=7), humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=3), humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=2), humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=1), humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=7), humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=3), humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=2), humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=1) ) stopifnot( is.object_sizes(as.object_sizes( 2^(1:30) ) ) ) stopifnot( format(as.object_sizes(124)) == "124 bytes") stopifnot( format(as.object_sizes(124e8), units="auto") == "11.5 GiB") stopifnot( format(as.object_sizes(124e8), humanReadable=TRUE) == "11.5 GiB") stopifnot( format(as.object_sizes(124e8), units="bytes") == "1.24e+10 bytes") tools::assertError( as.object_sizes(-1) ) tools::assertError( as.object_sizes('a') ) tools::assertError( as.object_sizes(list()) ) tools::assertError( as.object_sizes(NULL) ) tools::assertError( as.object_sizes(0+1i) ) stopifnot( format(as.object_sizes(1e40) ) == "1e+40 bytes" ) stopifnot( format(as.object_sizes(1e40), units="auto" ) == "8.271806e+15 YiB") stopifnot( format(as.object_sizes(1e40), units="bytes") == "1e+40 bytes" ) stopifnot( format(as.object_sizes(1e40), humanReadable=TRUE) == "8.271806e+15 YiB") stopifnot( format(as.object_sizes(1e40), humanReadable=FALSE) == "1e+40 bytes") options(humanReadable=TRUE) stopifnot( format(as.object_sizes(1e40) ) == "8.271806e+15 YiB") options(humanReadable=FALSE) gdata/tests/test.reorder.factor.R0000644000175100001440000000030113003720415016517 0ustar hornikusers## Test results before and after loading gdata m <- factor(c('a','b','c')) ( m1 <- reorder(m, X=c(3, 2, 1)) ) library(gdata) ( m2 <- reorder(m, X=c(3, 2, 1)) ) stopifnot(identical(m1,m2)) gdata/tests/test.write.fwf.eol.R0000644000175100001440000000025213003720415016276 0ustar hornikuserslibrary(gdata) saveto <- tempfile(pattern = "test.txt", tmpdir = tempdir()) write.fwf(x = data.frame(a=1:length(LETTERS), b=LETTERS), file=saveto, eol="\r\n") gdata/tests/test.humanReadable.Rout.save0000644000175100001440000003017713115331034020032 0ustar hornikusers R version 3.3.2 (2016-10-31) -- "Sincere Pumpkin Patch" Copyright (C) 2016 The R Foundation for Statistical Computing Platform: x86_64-apple-darwin13.4.0 (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(gdata) gdata: read.xls support for 'XLS' (Excel 97-2004) files ENABLED. gdata: read.xls support for 'XLSX' (Excel 2007+) files ENABLED. Attaching package: 'gdata' The following object is masked from 'package:stats': nobs The following object is masked from 'package:utils': object.size The following object is masked from 'package:base': startsWith > > options(humanReadable=FALSE) > > set.seed(123456) > > baseSI <- 10 > powerSI <- seq(from=0, to=27, by=3) > SI0 <- (baseSI)^powerSI > k <- length(SI0) - 1 > SI1 <- SI0 - SI0 / c(2, runif(n=k, min=1.01, max=5.99)) > SI2 <- SI0 + SI0 / c(2, runif(n=k, min=1.01, max=5.99)) > > baseIEC <- 2 > powerIEC <- seq(from=0, to=90, by=10) > IEC0 <- (baseIEC)^powerIEC > IEC1 <- IEC0 - IEC0 / c(2, runif(n=k, min=1.01, max=5.99)) > IEC2 <- IEC0 + IEC0 / c(2, runif(n=k, min=1.01, max=5.99)) > > # Auto units, specify width > cbind(humanReadable(x=SI2, standard="SI", width=7), + humanReadable(x=SI2, standard="SI", width=5), + humanReadable(x=SI2, standard="SI", width=3), + humanReadable(x=IEC2, standard="IEC", width=7), + humanReadable(x=IEC2, standard="IEC", width=5), + humanReadable(x=IEC2, standard="IEC", width=3), + humanReadable(x=IEC2, standard="Unix", width=7), + humanReadable(x=IEC2, standard="Unix", width=5), + humanReadable(x=IEC2, standard="Unix", width=3)) [,1] [,2] [,3] [,4] [,5] [,6] [1,] " 2 B " " 2 B " " 2 B " " 2 B " " 2 B " " 2 B " [2,] "1.54215 kB" "1.542 kB" " 1.5 kB" "1.18582 KiB" "1.186 KiB" " 1.2 KiB" [3,] "1.20064 MB" "1.201 MB" " 1.2 MB" "1.19003 MiB" " 1.19 MiB" " 1.2 MiB" [4,] "1.25207 GB" "1.252 GB" " 1.3 GB" "1.54448 GiB" "1.544 GiB" " 1.5 GiB" [5,] "1.18121 TB" "1.181 TB" " 1.2 TB" "1.27667 TiB" "1.277 TiB" " 1.3 TiB" [6,] " 1.1853 PB" "1.185 PB" " 1.2 PB" "1.18733 PiB" "1.187 PiB" " 1.2 PiB" [7,] " 1.1678 EB" "1.168 EB" " 1.2 EB" "1.46271 EiB" "1.463 EiB" " 1.5 EiB" [8,] "1.18275 ZB" "1.183 ZB" " 1.2 ZB" "1.62382 ZiB" "1.624 ZiB" " 1.6 ZiB" [9,] "1.18568 YB" "1.186 YB" " 1.2 YB" "1.19557 YiB" "1.196 YiB" " 1.2 YiB" [10,] "1501.49 YB" " 1501 YB" "1501 YB" "1750.35 YiB" " 1750 YiB" "1750 YiB" [,7] [,8] [,9] [1,] " 2 B" " 2 B" " 2 B" [2,] "1.18582 K" "1.186 K" " 1.2 K" [3,] "1.19003 M" " 1.19 M" " 1.2 M" [4,] "1.54448 G" "1.544 G" " 1.5 G" [5,] "1.27667 T" "1.277 T" " 1.3 T" [6,] "1.18733 P" "1.187 P" " 1.2 P" [7,] "1.46271 E" "1.463 E" " 1.5 E" [8,] "1.62382 Z" "1.624 Z" " 1.6 Z" [9,] "1.19557 Y" "1.196 Y" " 1.2 Y" [10,] "1750.35 Y" " 1750 Y" "1750 Y" > > # Auto units, specify digits > cbind(humanReadable(x=SI2, standard="SI", width=NULL, digits=7), + humanReadable(x=SI2, standard="SI", width=NULL, digits=3), + humanReadable(x=SI2, standard="SI", width=NULL, digits=2), + humanReadable(x=SI2, standard="SI", width=NULL, digits=1), + humanReadable(x=IEC2, standard="IEC", width=NULL, digits=7), + humanReadable(x=IEC2, standard="IEC", width=NULL, digits=3), + humanReadable(x=IEC2, standard="IEC", width=NULL, digits=2), + humanReadable(x=IEC2, standard="IEC", width=NULL, digits=1), + humanReadable(x=IEC2, standard="Unix", width=NULL, digits=7), + humanReadable(x=IEC2, standard="Unix", width=NULL, digits=3), + humanReadable(x=IEC2, standard="Unix", width=NULL, digits=2), + humanReadable(x=IEC2, standard="Unix", width=NULL, digits=1)) [,1] [,2] [,3] [,4] [1,] " 1.5000000 B " " 1.500 B " " 1.50 B " " 1.5 B " [2,] " 1.5421535 kB" " 1.542 kB" " 1.54 kB" " 1.5 kB" [3,] " 1.2006426 MB" " 1.201 MB" " 1.20 MB" " 1.2 MB" [4,] " 1.2520737 GB" " 1.252 GB" " 1.25 GB" " 1.3 GB" [5,] " 1.1812105 TB" " 1.181 TB" " 1.18 TB" " 1.2 TB" [6,] " 1.1853010 PB" " 1.185 PB" " 1.19 PB" " 1.2 PB" [7,] " 1.1678048 EB" " 1.168 EB" " 1.17 EB" " 1.2 EB" [8,] " 1.1827531 ZB" " 1.183 ZB" " 1.18 ZB" " 1.2 ZB" [9,] " 1.1856788 YB" " 1.186 YB" " 1.19 YB" " 1.2 YB" [10,] "1501.4852409 YB" "1501.485 YB" "1501.49 YB" "1501.5 YB" [,5] [,6] [,7] [,8] [1,] " 1.5000000 B " " 1.500 B " " 1.50 B " " 1.5 B " [2,] " 1.1858248 KiB" " 1.186 KiB" " 1.19 KiB" " 1.2 KiB" [3,] " 1.1900302 MiB" " 1.190 MiB" " 1.19 MiB" " 1.2 MiB" [4,] " 1.5444791 GiB" " 1.544 GiB" " 1.54 GiB" " 1.5 GiB" [5,] " 1.2766723 TiB" " 1.277 TiB" " 1.28 TiB" " 1.3 TiB" [6,] " 1.1873270 PiB" " 1.187 PiB" " 1.19 PiB" " 1.2 PiB" [7,] " 1.4627144 EiB" " 1.463 EiB" " 1.46 EiB" " 1.5 EiB" [8,] " 1.6238214 ZiB" " 1.624 ZiB" " 1.62 ZiB" " 1.6 ZiB" [9,] " 1.1955693 YiB" " 1.196 YiB" " 1.20 YiB" " 1.2 YiB" [10,] "1750.3547972 YiB" "1750.355 YiB" "1750.35 YiB" "1750.4 YiB" [,9] [,10] [,11] [,12] [1,] " 1.5000000 B" " 1.500 B" " 1.50 B" " 1.5 B" [2,] " 1.1858248 K" " 1.186 K" " 1.19 K" " 1.2 K" [3,] " 1.1900302 M" " 1.190 M" " 1.19 M" " 1.2 M" [4,] " 1.5444791 G" " 1.544 G" " 1.54 G" " 1.5 G" [5,] " 1.2766723 T" " 1.277 T" " 1.28 T" " 1.3 T" [6,] " 1.1873270 P" " 1.187 P" " 1.19 P" " 1.2 P" [7,] " 1.4627144 E" " 1.463 E" " 1.46 E" " 1.5 E" [8,] " 1.6238214 Z" " 1.624 Z" " 1.62 Z" " 1.6 Z" [9,] " 1.1955693 Y" " 1.196 Y" " 1.20 Y" " 1.2 Y" [10,] "1750.3547972 Y" "1750.355 Y" "1750.35 Y" "1750.4 Y" > > # Single unit, specify width > cbind(humanReadable(x=SI1, units="GB", standard="SI", width=7), + humanReadable(x=SI1, units="GB", standard="SI", width=5), + humanReadable(x=SI1, units="GB", standard="SI", width=3), + humanReadable(x=IEC1, units="GiB", standard="IEC", width=7), + humanReadable(x=IEC1, units="GiB", standard="IEC", width=5), + humanReadable(x=IEC1, units="GiB", standard="IEC", width=3), + humanReadable(x=IEC1, units="G", standard="Unix", width=7), + humanReadable(x=IEC1, units="G", standard="Unix", width=5), + humanReadable(x=IEC1, units="G", standard="Unix", width=3) + ) [,1] [,2] [,3] [,4] [,5] [,6] [1,] "5e-10 GB" "5e-10 GB" "5e-10 GB" "5e-10 GiB" "5e-10 GiB" "5e-10 GiB" [2,] "8e-07 GB" "8e-07 GB" "8e-07 GB" "6e-07 GiB" "6e-07 GiB" "6e-07 GiB" [3,] "8e-04 GB" "8e-04 GB" "8e-04 GB" "8e-04 GiB" "8e-04 GiB" "8e-04 GiB" [4,] "7e-01 GB" "7e-01 GB" "7e-01 GB" "4e-01 GiB" "4e-01 GiB" "4e-01 GiB" [5,] "6e+02 GB" "6e+02 GB" "6e+02 GB" "3e+02 GiB" "3e+02 GiB" "3e+02 GiB" [6,] "6e+05 GB" "6e+05 GB" "6e+05 GB" "4e+05 GiB" "4e+05 GiB" "4e+05 GiB" [7,] "5e+08 GB" "5e+08 GB" "5e+08 GB" "5e+08 GiB" "5e+08 GiB" "5e+08 GiB" [8,] "7e+11 GB" "7e+11 GB" "7e+11 GB" "8e+11 GiB" "8e+11 GiB" "8e+11 GiB" [9,] "3e+14 GB" "3e+14 GB" "3e+14 GB" "9e+14 GiB" "9e+14 GiB" "9e+14 GiB" [10,] "8e+17 GB" "8e+17 GB" "8e+17 GB" "9e+17 GiB" "9e+17 GiB" "9e+17 GiB" [,7] [,8] [,9] [1,] "5e-10 G" "5e-10 G" "5e-10 G" [2,] "6e-07 G" "6e-07 G" "6e-07 G" [3,] "8e-04 G" "8e-04 G" "8e-04 G" [4,] "4e-01 G" "4e-01 G" "4e-01 G" [5,] "3e+02 G" "3e+02 G" "3e+02 G" [6,] "4e+05 G" "4e+05 G" "4e+05 G" [7,] "5e+08 G" "5e+08 G" "5e+08 G" [8,] "8e+11 G" "8e+11 G" "8e+11 G" [9,] "9e+14 G" "9e+14 G" "9e+14 G" [10,] "9e+17 G" "9e+17 G" "9e+17 G" > > # Single unit, specify digits > cbind(humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=7), + humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=3), + humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=2), + humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=1), + humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=7), + humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=3), + humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=2), + humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=1), + humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=7), + humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=3), + humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=2), + humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=1) + ) [,1] [,2] [,3] [,4] [1,] "5.000000e-10 GB" "5.00e-10 GB" "5.0e-10 GB" "5e-10 GB" [2,] "7.993163e-07 GB" "7.99e-07 GB" "8.0e-07 GB" "8e-07 GB" [3,] "7.900375e-04 GB" "7.90e-04 GB" "7.9e-04 GB" "8e-04 GB" [4,] "6.619855e-01 GB" "6.62e-01 GB" "6.6e-01 GB" "7e-01 GB" [5,] "6.311259e+02 GB" "6.31e+02 GB" "6.3e+02 GB" "6e+02 GB" [6,] "6.440324e+05 GB" "6.44e+05 GB" "6.4e+05 GB" "6e+05 GB" [7,] "4.994386e+08 GB" "4.99e+08 GB" "5.0e+08 GB" "5e+08 GB" [8,] "7.277869e+11 GB" "7.28e+11 GB" "7.3e+11 GB" "7e+11 GB" [9,] "3.291745e+14 GB" "3.29e+14 GB" "3.3e+14 GB" "3e+14 GB" [10,] "8.313511e+17 GB" "8.31e+17 GB" "8.3e+17 GB" "8e+17 GB" [,5] [,6] [,7] [,8] [1,] "4.656613e-10 GiB" "4.66e-10 GiB" "4.7e-10 GiB" "5e-10 GiB" [2,] "5.975956e-07 GiB" "5.98e-07 GiB" "6.0e-07 GiB" "6e-07 GiB" [3,] "7.764672e-04 GiB" "7.76e-04 GiB" "7.8e-04 GiB" "8e-04 GiB" [4,] "4.459146e-01 GiB" "4.46e-01 GiB" "4.5e-01 GiB" "4e-01 GiB" [5,] "2.985889e+02 GiB" "2.99e+02 GiB" "3.0e+02 GiB" "3e+02 GiB" [6,] "4.209112e+05 GiB" "4.21e+05 GiB" "4.2e+05 GiB" "4e+05 GiB" [7,] "4.983449e+08 GiB" "4.98e+08 GiB" "5.0e+08 GiB" "5e+08 GiB" [8,] "7.751081e+11 GiB" "7.75e+11 GiB" "7.8e+11 GiB" "8e+11 GiB" [9,] "8.756173e+14 GiB" "8.76e+14 GiB" "8.8e+14 GiB" "9e+14 GiB" [10,] "9.390947e+17 GiB" "9.39e+17 GiB" "9.4e+17 GiB" "9e+17 GiB" [,9] [,10] [,11] [,12] [1,] "4.656613e-10 G" "4.66e-10 G" "4.7e-10 G" "5e-10 G" [2,] "5.975956e-07 G" "5.98e-07 G" "6.0e-07 G" "6e-07 G" [3,] "7.764672e-04 G" "7.76e-04 G" "7.8e-04 G" "8e-04 G" [4,] "4.459146e-01 G" "4.46e-01 G" "4.5e-01 G" "4e-01 G" [5,] "2.985889e+02 G" "2.99e+02 G" "3.0e+02 G" "3e+02 G" [6,] "4.209112e+05 G" "4.21e+05 G" "4.2e+05 G" "4e+05 G" [7,] "4.983449e+08 G" "4.98e+08 G" "5.0e+08 G" "5e+08 G" [8,] "7.751081e+11 G" "7.75e+11 G" "7.8e+11 G" "8e+11 G" [9,] "8.756173e+14 G" "8.76e+14 G" "8.8e+14 G" "9e+14 G" [10,] "9.390947e+17 G" "9.39e+17 G" "9.4e+17 G" "9e+17 G" > > > stopifnot( is.object_sizes(as.object_sizes( 2^(1:30) ) ) ) > stopifnot( format(as.object_sizes(124)) == "124 bytes") > stopifnot( format(as.object_sizes(124e8), units="auto") == "11.5 GiB") > stopifnot( format(as.object_sizes(124e8), humanReadable=TRUE) == "11.5 GiB") > stopifnot( format(as.object_sizes(124e8), units="bytes") == "1.24e+10 bytes") > > tools::assertError( as.object_sizes(-1) ) > tools::assertError( as.object_sizes('a') ) > tools::assertError( as.object_sizes(list()) ) > tools::assertError( as.object_sizes(NULL) ) > tools::assertError( as.object_sizes(0+1i) ) > > stopifnot( format(as.object_sizes(1e40) ) == "1e+40 bytes" ) > stopifnot( format(as.object_sizes(1e40), units="auto" ) == "8.271806e+15 YiB") > stopifnot( format(as.object_sizes(1e40), units="bytes") == "1e+40 bytes" ) > stopifnot( format(as.object_sizes(1e40), humanReadable=TRUE) == "8.271806e+15 YiB") > stopifnot( format(as.object_sizes(1e40), humanReadable=FALSE) == "1e+40 bytes") > > options(humanReadable=TRUE) > stopifnot( format(as.object_sizes(1e40) ) == "8.271806e+15 YiB") > options(humanReadable=FALSE) > > proc.time() user system elapsed 0.301 0.044 0.361 gdata/tests/test.reorder.factor.Rout.save0000644000175100001440000000256313115331034020217 0ustar hornikusers R version 3.3.2 (2016-10-31) -- "Sincere Pumpkin Patch" Copyright (C) 2016 The R Foundation for Statistical Computing Platform: x86_64-apple-darwin13.4.0 (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ## Test results before and after loading gdata > > m <- factor(c('a','b','c')) > > ( m1 <- reorder(m, X=c(3, 2, 1)) ) [1] a b c attr(,"scores") a b c 3 2 1 Levels: c b a > > library(gdata) gdata: read.xls support for 'XLS' (Excel 97-2004) files ENABLED. gdata: read.xls support for 'XLSX' (Excel 2007+) files ENABLED. Attaching package: 'gdata' The following object is masked from 'package:stats': nobs The following object is masked from 'package:utils': object.size The following object is masked from 'package:base': startsWith > > ( m2 <- reorder(m, X=c(3, 2, 1)) ) [1] a b c attr(,"scores") a b c 3 2 1 Levels: c b a > > stopifnot(identical(m1,m2)) > > proc.time() user system elapsed 0.262 0.043 0.321 gdata/tests/tests.write.fwf.Rout.save0000644000175100001440000002721413115331034017376 0ustar hornikusers R version 3.3.2 (2016-10-31) -- "Sincere Pumpkin Patch" Copyright (C) 2016 The R Foundation for Statistical Computing Platform: x86_64-apple-darwin13.4.0 (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ### tests.write.fwf.R > ###------------------------------------------------------------------------ > ### What: Tests for write.fwf > ### $Id: tests.write.fwf.R 1300 2008-08-05 11:47:18Z ggorjan $ > ### Time-stamp: <2008-08-05 12:25:05 ggorjan> > ###------------------------------------------------------------------------ > > library(gdata) gdata: read.xls support for 'XLS' (Excel 97-2004) files ENABLED. gdata: read.xls support for 'XLSX' (Excel 2007+) files ENABLED. Attaching package: 'gdata' The following object is masked from 'package:stats': nobs The following object is masked from 'package:utils': object.size The following object is masked from 'package:base': startsWith > > ## --- Test data --- > > num <- round(c(733070.345678, 1214213.78765456, 553823.798765678, + 1085022.8876545678, 571063.88765456, 606718.3876545678, + 1053686.6, 971024.187656, 631193.398765456, 879431.1), + digits=3) > > testData <- data.frame(num1=c(1:10, NA), + num2=c(NA, seq(from=1, to=5.5, by=0.5)), + num3=c(NA, num), + int1=c(as.integer(1:4), NA, as.integer(4:9)), + fac1=factor(c(NA, letters[1:9], "hjh")), + fac2=factor(c(letters[6:15], NA)), + cha1=c(letters[17:26], NA), + cha2=c(NA, "longer", letters[25:17]), + stringsAsFactors=FALSE) > levels(testData$fac1) <- c(levels(testData$fac1), "unusedLevel") > testData$Date <- as.Date("1900-1-1") > testData$Date[2] <- NA > testData$POSIXt <- as.POSIXct(strptime("1900-1-1 01:01:01", format="%Y-%m-%d %H:%M:%S")) > testData$POSIXt[5] <- NA > > ## --- Tests --- > > ## Default > write.fwf(testData) num1 num2 num3 int1 fac1 fac2 cha1 cha2 Date POSIXt 1 1 f q 1900-01-01 1900-01-01 01:01:01 2 1.0 733070.3 2 a g r longer 1900-01-01 01:01:01 3 1.5 1214213.8 3 b h s y 1900-01-01 1900-01-01 01:01:01 4 2.0 553823.8 4 c i t x 1900-01-01 1900-01-01 01:01:01 5 2.5 1085022.9 d j u w 1900-01-01 6 3.0 571063.9 4 e k v v 1900-01-01 1900-01-01 01:01:01 7 3.5 606718.4 5 f l w u 1900-01-01 1900-01-01 01:01:01 8 4.0 1053686.6 6 g m x t 1900-01-01 1900-01-01 01:01:01 9 4.5 971024.2 7 h n y s 1900-01-01 1900-01-01 01:01:01 10 5.0 631193.4 8 i o z r 1900-01-01 1900-01-01 01:01:01 5.5 879431.1 9 hjh q 1900-01-01 1900-01-01 01:01:01 > > ## NA should be - > write.fwf(x=testData, na="-") num1 num2 num3 int1 fac1 fac2 cha1 cha2 Date POSIXt 1 - - 1 - f q - 1900-01-01 1900-01-01 01:01:01 2 1.0 733070.3 2 a g r longer - 1900-01-01 01:01:01 3 1.5 1214213.8 3 b h s y 1900-01-01 1900-01-01 01:01:01 4 2.0 553823.8 4 c i t x 1900-01-01 1900-01-01 01:01:01 5 2.5 1085022.9 - d j u w 1900-01-01 - 6 3.0 571063.9 4 e k v v 1900-01-01 1900-01-01 01:01:01 7 3.5 606718.4 5 f l w u 1900-01-01 1900-01-01 01:01:01 8 4.0 1053686.6 6 g m x t 1900-01-01 1900-01-01 01:01:01 9 4.5 971024.2 7 h n y s 1900-01-01 1900-01-01 01:01:01 10 5.0 631193.4 8 i o z r 1900-01-01 1900-01-01 01:01:01 - 5.5 879431.1 9 hjh - - q 1900-01-01 1900-01-01 01:01:01 > ## NA should be -NA- > write.fwf(x=testData, na="-NA-") num1 num2 num3 int1 fac1 fac2 cha1 cha2 Date POSIXt 1 -NA- -NA- 1 -NA- f q -NA- 1900-01-01 1900-01-01 01:01:01 2 1.0 733070.3 2 a g r longer -NA- 1900-01-01 01:01:01 3 1.5 1214213.8 3 b h s y 1900-01-01 1900-01-01 01:01:01 4 2.0 553823.8 4 c i t x 1900-01-01 1900-01-01 01:01:01 5 2.5 1085022.9 -NA- d j u w 1900-01-01 -NA- 6 3.0 571063.9 4 e k v v 1900-01-01 1900-01-01 01:01:01 7 3.5 606718.4 5 f l w u 1900-01-01 1900-01-01 01:01:01 8 4.0 1053686.6 6 g m x t 1900-01-01 1900-01-01 01:01:01 9 4.5 971024.2 7 h n y s 1900-01-01 1900-01-01 01:01:01 10 5.0 631193.4 8 i o z r 1900-01-01 1900-01-01 01:01:01 -NA- 5.5 879431.1 9 hjh -NA- -NA- q 1900-01-01 1900-01-01 01:01:01 > > ## Some other separator than space > write.fwf(testData[, 1:4], sep="-mySep-") num1-mySep-num2-mySep-num3-mySep-int1 1-mySep- -mySep- -mySep-1 2-mySep-1.0-mySep- 733070.3-mySep-2 3-mySep-1.5-mySep-1214213.8-mySep-3 4-mySep-2.0-mySep- 553823.8-mySep-4 5-mySep-2.5-mySep-1085022.9-mySep- 6-mySep-3.0-mySep- 571063.9-mySep-4 7-mySep-3.5-mySep- 606718.4-mySep-5 8-mySep-4.0-mySep-1053686.6-mySep-6 9-mySep-4.5-mySep- 971024.2-mySep-7 10-mySep-5.0-mySep- 631193.4-mySep-8 -mySep-5.5-mySep- 879431.1-mySep-9 > > ## Justify right for character columns > write.fwf(testData, justify="right") num1 num2 num3 int1 fac1 fac2 cha1 cha2 Date POSIXt 1 1 f q 1900-01-01 1900-01-01 01:01:01 2 1.0 733070.3 2 a g r longer 1900-01-01 01:01:01 3 1.5 1214213.8 3 b h s y 1900-01-01 1900-01-01 01:01:01 4 2.0 553823.8 4 c i t x 1900-01-01 1900-01-01 01:01:01 5 2.5 1085022.9 d j u w 1900-01-01 6 3.0 571063.9 4 e k v v 1900-01-01 1900-01-01 01:01:01 7 3.5 606718.4 5 f l w u 1900-01-01 1900-01-01 01:01:01 8 4.0 1053686.6 6 g m x t 1900-01-01 1900-01-01 01:01:01 9 4.5 971024.2 7 h n y s 1900-01-01 1900-01-01 01:01:01 10 5.0 631193.4 8 i o z r 1900-01-01 1900-01-01 01:01:01 5.5 879431.1 9 hjh q 1900-01-01 1900-01-01 01:01:01 > > ## Justify right for character columns, track na values > write.fwf(testData, justify="right", na="!") num1 num2 num3 int1 fac1 fac2 cha1 cha2 Date POSIXt 1 ! ! 1 ! f q ! 1900-01-01 1900-01-01 01:01:01 2 1.0 733070.3 2 a g r longer ! 1900-01-01 01:01:01 3 1.5 1214213.8 3 b h s y 1900-01-01 1900-01-01 01:01:01 4 2.0 553823.8 4 c i t x 1900-01-01 1900-01-01 01:01:01 5 2.5 1085022.9 ! d j u w 1900-01-01 ! 6 3.0 571063.9 4 e k v v 1900-01-01 1900-01-01 01:01:01 7 3.5 606718.4 5 f l w u 1900-01-01 1900-01-01 01:01:01 8 4.0 1053686.6 6 g m x t 1900-01-01 1900-01-01 01:01:01 9 4.5 971024.2 7 h n y s 1900-01-01 1900-01-01 01:01:01 10 5.0 631193.4 8 i o z r 1900-01-01 1900-01-01 01:01:01 ! 5.5 879431.1 9 hjh ! ! q 1900-01-01 1900-01-01 01:01:01 > > ## With quotes > write.fwf(testData, quote=TRUE) "num1" "num2" "num3" "int1" "fac1" "fac2" "cha1" "cha2" "Date" "POSIXt" " 1" " " " " "1" " " "f" "q" " " "1900-01-01" "1900-01-01 01:01:01" " 2" "1.0" " 733070.3" "2" "a " "g" "r" "longer" " " "1900-01-01 01:01:01" " 3" "1.5" "1214213.8" "3" "b " "h" "s" "y " "1900-01-01" "1900-01-01 01:01:01" " 4" "2.0" " 553823.8" "4" "c " "i" "t" "x " "1900-01-01" "1900-01-01 01:01:01" " 5" "2.5" "1085022.9" " " "d " "j" "u" "w " "1900-01-01" " " " 6" "3.0" " 571063.9" "4" "e " "k" "v" "v " "1900-01-01" "1900-01-01 01:01:01" " 7" "3.5" " 606718.4" "5" "f " "l" "w" "u " "1900-01-01" "1900-01-01 01:01:01" " 8" "4.0" "1053686.6" "6" "g " "m" "x" "t " "1900-01-01" "1900-01-01 01:01:01" " 9" "4.5" " 971024.2" "7" "h " "n" "y" "s " "1900-01-01" "1900-01-01 01:01:01" "10" "5.0" " 631193.4" "8" "i " "o" "z" "r " "1900-01-01" "1900-01-01 01:01:01" " " "5.5" " 879431.1" "9" "hjh" " " " " "q " "1900-01-01" "1900-01-01 01:01:01" > > ## With rownames > write.fwf(testData, rownames=TRUE) num1 num2 num3 int1 fac1 fac2 cha1 cha2 Date POSIXt 1 1 1 f q 1900-01-01 1900-01-01 01:01:01 2 2 1.0 733070.3 2 a g r longer 1900-01-01 01:01:01 3 3 1.5 1214213.8 3 b h s y 1900-01-01 1900-01-01 01:01:01 4 4 2.0 553823.8 4 c i t x 1900-01-01 1900-01-01 01:01:01 5 5 2.5 1085022.9 d j u w 1900-01-01 6 6 3.0 571063.9 4 e k v v 1900-01-01 1900-01-01 01:01:01 7 7 3.5 606718.4 5 f l w u 1900-01-01 1900-01-01 01:01:01 8 8 4.0 1053686.6 6 g m x t 1900-01-01 1900-01-01 01:01:01 9 9 4.5 971024.2 7 h n y s 1900-01-01 1900-01-01 01:01:01 10 10 5.0 631193.4 8 i o z r 1900-01-01 1900-01-01 01:01:01 11 5.5 879431.1 9 hjh q 1900-01-01 1900-01-01 01:01:01 > > ## Without colnames > write.fwf(testData, colnames=FALSE) 1 1 f q 1900-01-01 1900-01-01 01:01:01 2 1.0 733070.3 2 a g r longer 1900-01-01 01:01:01 3 1.5 1214213.8 3 b h s y 1900-01-01 1900-01-01 01:01:01 4 2.0 553823.8 4 c i t x 1900-01-01 1900-01-01 01:01:01 5 2.5 1085022.9 d j u w 1900-01-01 6 3.0 571063.9 4 e k v v 1900-01-01 1900-01-01 01:01:01 7 3.5 606718.4 5 f l w u 1900-01-01 1900-01-01 01:01:01 8 4.0 1053686.6 6 g m x t 1900-01-01 1900-01-01 01:01:01 9 4.5 971024.2 7 h n y s 1900-01-01 1900-01-01 01:01:01 10 5.0 631193.4 8 i o z r 1900-01-01 1900-01-01 01:01:01 5.5 879431.1 9 hjh q 1900-01-01 1900-01-01 01:01:01 > > ## Without colnames, with rownames > write.fwf(testData, colnames=FALSE, rownames=TRUE) 1 1 1 f q 1900-01-01 1900-01-01 01:01:01 2 2 1.0 733070.3 2 a g r longer 1900-01-01 01:01:01 3 3 1.5 1214213.8 3 b h s y 1900-01-01 1900-01-01 01:01:01 4 4 2.0 553823.8 4 c i t x 1900-01-01 1900-01-01 01:01:01 5 5 2.5 1085022.9 d j u w 1900-01-01 6 6 3.0 571063.9 4 e k v v 1900-01-01 1900-01-01 01:01:01 7 7 3.5 606718.4 5 f l w u 1900-01-01 1900-01-01 01:01:01 8 8 4.0 1053686.6 6 g m x t 1900-01-01 1900-01-01 01:01:01 9 9 4.5 971024.2 7 h n y s 1900-01-01 1900-01-01 01:01:01 10 10 5.0 631193.4 8 i o z r 1900-01-01 1900-01-01 01:01:01 11 5.5 879431.1 9 hjh q 1900-01-01 1900-01-01 01:01:01 > > ## With rownames and colnames and rowCol > write.fwf(testData, rownames=TRUE, rowCol="HI!") HI! num1 num2 num3 int1 fac1 fac2 cha1 cha2 Date POSIXt 1 1 1 f q 1900-01-01 1900-01-01 01:01:01 2 2 1.0 733070.3 2 a g r longer 1900-01-01 01:01:01 3 3 1.5 1214213.8 3 b h s y 1900-01-01 1900-01-01 01:01:01 4 4 2.0 553823.8 4 c i t x 1900-01-01 1900-01-01 01:01:01 5 5 2.5 1085022.9 d j u w 1900-01-01 6 6 3.0 571063.9 4 e k v v 1900-01-01 1900-01-01 01:01:01 7 7 3.5 606718.4 5 f l w u 1900-01-01 1900-01-01 01:01:01 8 8 4.0 1053686.6 6 g m x t 1900-01-01 1900-01-01 01:01:01 9 9 4.5 971024.2 7 h n y s 1900-01-01 1900-01-01 01:01:01 10 10 5.0 631193.4 8 i o z r 1900-01-01 1900-01-01 01:01:01 11 5.5 879431.1 9 hjh q 1900-01-01 1900-01-01 01:01:01 > > ## formatInfo is tested with unit tests > > ###------------------------------------------------------------------------ > ### tests.write.fwf.R ends > > proc.time() user system elapsed 0.333 0.044 0.392 gdata/tests/unitTests/0000755000175100001440000000000013003720415014507 5ustar hornikusersgdata/tests/unitTests/Makefile0000644000175100001440000000060613115346316016161 0ustar hornikusersTOP=../.. PKG=${shell cd ${TOP};pwd} SUITE=runRUnitTests.R R=R test: # Run unit tests ${R} --vanilla --slave < ${SUITE} all: inst test echo: # Echo env. variables @echo "Package folder: ${PKG}" @echo "R binary: ${R}" help: # Help @echo -e '\nTarget: Dependency # Description'; \ echo '=================================================='; \ egrep '^[[:alnum:].+_()%]*:' ./Makefile gdata/tests/unitTests/runit.unknown.R0000644000175100001440000005625713003720415017510 0ustar hornikusers### runit.unknown.R ###------------------------------------------------------------------------ ### What: Tests for Change given unknown value to NA and vice versa methods ### $Id: runit.unknown.R 1801 2014-04-05 21:08:41Z warnes $ ### Time-stamp: <2006-10-30 17:46:21 ggorjan> ###------------------------------------------------------------------------ ### {{{ --- Test setup --- library("RUnit") library("gdata") ### {{{ --- Vectors --- intUnk <- 9999 xInt <- as.integer(c(NA, 1:2, NA, 5, 6, 7, 8, 9)) xIntUnk <- as.integer(c(intUnk, 1:2, intUnk, 5, 6, 7, 8, 9)) xIntUnkTest <- xIntUnk %in% intUnk numUnk <- 0 xNum <- c(9999, NA, 1.5, NA, 5, 6, 7, 8, 9) xNumUnk <- c(9999, 0, 1.5, 0, 5, 6, 7, 8, 9) xNumUnkTest <- xNumUnk %in% numUnk chaUnk <- "notAvail" chaUnk1 <- "-" xCha <- c("A", "B", NA, "C", NA, "-", "7", "8", "9") xChaUnk <- c("A", "B", chaUnk, "C", chaUnk, "-", "7", "8", "9") xChaUnk1 <- c("A", "B", chaUnk1, "C", chaUnk1, "-", "7", "8", "9") xChaUnkTest <- xChaUnk %in% chaUnk xChaUnk1Test <- xChaUnk %in% chaUnk1 facUnk <- "notAvail" facUnk1 <- "NA" xFac <- factor(c("A", "0", 0, "NA", "NA", intUnk, numUnk, "-", NA)) xFacUnk <- factor(c("A", "0", 0, "NA", "NA", intUnk, numUnk, "-", facUnk)) xFacUnk1 <- factor(c("A", "0", 0, "NA", "NA", intUnk, numUnk, "-", facUnk1)) xFacUnkTest <- c(0, 0, 0, 0, 0, 0, 0, 0, 1) xFacUnkTest <- as.logical(xFacUnkTest) xFacUnk1Test <- c(0, 0, 0, 1, 1, 0, 0, 0, 1) xFacUnk1Test <- as.logical(xFacUnk1Test) xFac1 <- factor(c("A", "0", 0, NA, NA, intUnk, numUnk, "-", NA)) facLev <- "A" xFacUnkLev <- factor(c("A", "0", 0, "NA", "NA", intUnk, numUnk, "-", "A")) xFacUnkLevTest <- c(1, 0, 0, 0, 0, 0, 0, 0, 1) xFacUnkLevTest <- as.logical(xFacUnkLevTest) dateUnk <- as.Date("2006-08-14") tmp <- as.Date("2006-08-15") xDate <- c(tmp, NA) xDateUnk <- c(tmp, dateUnk) xDateTest <- c(FALSE, TRUE) xDate1Unk <- c(tmp, dateUnk, NA) xDate1Test <- c(FALSE, TRUE, FALSE) POSIXltUnk <- strptime("2006-08-14", format="%Y-%m-%d") tmp <- strptime("2006-08-15", format="%Y-%m-%d") xPOSIXlt <- c(tmp, NA) xPOSIXltUnk <- c(tmp, POSIXltUnk) xPOSIXltTest <- c(FALSE, TRUE) xPOSIXlt1Unk <- c(tmp, POSIXltUnk, NA) xPOSIXlt1Test <- c(FALSE, TRUE, FALSE) POSIXctUnk <- as.POSIXct(strptime("2006-08-14 01:01:01", format="%Y-%m-%d %H:%M:%S")) tmp <- as.POSIXct(strptime("2006-08-15 01:01:01", format="%Y-%m-%d %H:%M:%S")) xPOSIXct <- c(tmp, NA) xPOSIXctUnk <- c(tmp, POSIXctUnk) xPOSIXctTest <- xPOSIXltTest xPOSIXct1Unk <- c(tmp, POSIXctUnk, NA) xPOSIXct1Test <- xPOSIXlt1Test ### }}} ### {{{ --- Lists and data.frames --- xList <- list(xInt, xCha, xNum, xFac) xListN <- list(int=xInt, cha=xCha, num=xNum, fac=xFac) xListUnk <- list(xIntUnk, xChaUnk, xNumUnk, xFacUnk) xListUnkTest <- list(xIntUnkTest, xChaUnkTest, xNumUnkTest, xFacUnkTest) xListNUnk <- list(int=xIntUnk, cha=xChaUnk, num=xNumUnk, fac=xFacUnk) xListNUnkTest <- list(int=xIntUnkTest, cha=xChaUnkTest, num=xNumUnkTest, fac=xFacUnkTest) xDF <- as.data.frame(xListN) xDF$cha <- as.character(xDF$cha) xDFUnk <- as.data.frame(xListNUnk) xDFUnk$cha <- as.character(xDFUnk$cha) xDFUnkTest <- as.data.frame(xListNUnkTest) unkC <- c(intUnk, chaUnk, numUnk, facUnk) unkL <- list(intUnk, chaUnk, numUnk, facUnk) unkLN <- list(num=numUnk, cha=chaUnk, fac=facUnk, int=intUnk) ## mixed as it is named unkLMN <- list(cha=chaUnk, int=intUnk, num=c(intUnk, numUnk), fac=c(chaUnk1, facUnk)) xListMNUnkF <- list(int=as.integer(c(9999, 1, 2, 9999, 5, 6, 7, 8, 9)), cha=c("A", "B", "notAvail", "C", "notAvail", "-", "7", "8", "9"), num=c(9999, 0, 1.5, 0, 5, 6, 7, 8, 9), fac=factor(c("A", "0", "0", "NA", "NA", 9999, "0", "-", "notAvail"))) xListMNUnkFTest <- list(int=c(1, 0, 0, 1, 0, 0, 0, 0, 0), cha=c(0, 0, 1, 0, 1, 0, 0, 0, 0), num=c(1, 1, 0, 1, 0, 0, 0, 0, 0), fac=c(0, 0, 0, 0, 0, 0, 0, 1, 1)) xListMNUnkFTest <- lapply(xListMNUnkFTest, as.logical) xListMNF <- list(int=as.integer(c(NA, 1, 2, NA, 5, 6, 7, 8, 9)), cha=c("A", "B", NA, "C", NA, "-", "7", "8", "9"), num=c(NA, NA, 1.5, NA, 5, 6, 7, 8, 9), fac=factor(c("A", "0", "0", "NA", "NA", "9999", "0", NA, NA))) xDFMUnkF <- as.data.frame(xListMNUnkF) xDFMUnkF$cha <- as.character(xDFMUnkF$cha) xDFMUnkFTest <- as.data.frame(xListMNUnkFTest) xDFMF <- as.data.frame(xListMNF) xDFMF$cha <- as.character(xDFMF$cha) unk1 <- 555555 xListUnk1 <- list(as.integer(c(unk1, 1, 2, unk1, 5, 6, 7, 8, 9)), c("A", "B", unk1, "C", unk1, "-", "7", "8", "9"), c(9999, unk1, 1.5, unk1, 5, 6, 7, 8, 9), factor(c("A", "0", "0", "NA", "NA", "9999", "0", "-", unk1))) xListUnk1Test <- lapply(xListUnk1, function(x) x %in% unk1) xListNUnk1 <- xListUnk1 names(xListNUnk1) <- c("int", "cha", "num", "fac") xDFUnk1 <- as.data.frame(xListNUnk1) xDFUnk1$cha <- as.character(xDFUnk1$cha) xDFUnk1Test <- as.data.frame(xListUnk1Test) names(xDFUnk1Test) <- names(xListNUnk1) unkC2 <- c(0, "notAvail") xListUnk2 <- list(as.integer(c(unkC2[1], 1, 2, unkC2[1], 5, 6, 7, 8, 9)), c("A", "B", unkC2[2], "C", unkC2[2], "-", "7", "8", "9"), c(9999, as.numeric(unkC2[1]), 1.5, as.numeric(unkC2[1]), 5, 6, 7, 8, 9), factor(c("A", "0", "0", "NA", "NA", "9999", "0", "-", unkC2[2]))) xListNUnk2 <- xListUnk2 names(xListNUnk2) <- c("int", "cha", "num", "fac") xDFUnk2 <- as.data.frame(xListNUnk2) xDFUnk2$cha <- as.character(xDFUnk2$cha) xListUnk2Test <- xListUnk2 xListUnk2Test[[1]] <- xListUnk2Test[[1]] %in% unkC2[1] xListUnk2Test[[2]] <- xListUnk2Test[[2]] %in% unkC2[2] xListUnk2Test[[3]] <- xListUnk2Test[[3]] %in% unkC2[1] xListUnk2Test[[4]] <- xListUnk2Test[[4]] %in% unkC2[2] xListNUnk2Test <- xListUnk2Test names(xListNUnk2Test) <- names(xListNUnk2) xDFUnk2Test <- as.data.frame(xListNUnk2Test) unkL2 <- as.list(unkC2) unkLN2 <- unkL2[c(2, 1)] names(unkLN2) <- c("cha", "int") xListUnk2a <- list(as.integer(c(NA, 1, 2, NA, 5, 6, 7, 8, 9)), c("A", "B", unkLN2[[2]], "C", unkLN2[[2]], "-", "7", "8", "9"), c(9999, NA, 1.5, NA, 5, 6, 7, 8, 9), factor(c("A", "0", "0", "NA", "NA", "9999", "0", "-", unkLN2[[2]]))) xListUnk2aTest <- xListUnk2a xListUnk2aTest[[1]] <- xListUnk2aTest[[1]] %in% unkLN2[1] xListUnk2aTest[[2]] <- xListUnk2aTest[[2]] %in% unkLN2[2] xListUnk2aTest[[3]] <- xListUnk2aTest[[3]] %in% unkLN2[1] xListUnk2aTest[[4]] <- xListUnk2aTest[[4]] %in% unkLN2[2] xList2a <- list(xListUnk2a[[1]], c("A", "B", NA, "C", NA, "-", "7", "8", "9"), xListUnk2a[[3]], factor(c("A", NA, NA, "NA", "NA", 9999, NA, "-", NA))) ### }}} ### {{{ --- Matrix --- matUnk <- 9999 mat <- matrix(1:25, nrow=5, ncol=5) mat[1, 2] <- NA; mat[1, 4] <- NA; mat[2, 2] <- NA; mat[3, 2] <- NA; mat[3, 5] <- NA; mat[5, 4] <- NA; matUnk1 <- mat matUnk1[1, 2] <- matUnk; matUnk1[1, 4] <- matUnk; matUnk1[2, 2] <- matUnk; matUnk1[3, 2] <- matUnk; matUnk1[3, 5] <- matUnk; matUnk1[5, 4] <- matUnk; matUnkTest <- matUnk1Test <- is.na(mat) matUnk2Test <- matUnkTest | mat == 1 ### }}} ### {{{ --- Use of unknown=list(.default=, ...) or similarly named vector --- D1 <- "notAvail" unkLND1 <- list(.default=D1) xListUnkD1 <- list(as.integer(c(NA, 1:2, NA, 5, 6, 7, 8, 9)), c("A", "B", D1, "C", D1, "-", "7", "8", "9"), c(9999, NA, 1.5, NA, 5, 6, 7, 8, 9), factor(c("A", "0", 0, "NA", "NA", intUnk, numUnk, "-", D1))) xListUnkD1Test <- lapply(xListUnkD1, function(x) x %in% D1) xListD1 <- xList xListNUnkD1 <- xListUnkD1 xListNUnkD1Test <- xListUnkD1Test names(xListNUnkD1) <- names(xListNUnkD1Test) <- names(xListNUnk1) xListND1 <- xListN DSO2 <- c("notAvail", 5678) unkLNDSO2 <- as.list(DSO2) names(unkLNDSO2) <- c(".default", "someOther") xListUnkDSO2 <- list(as.integer(c(NA, 1:2, NA, 5, 6, 7, 8, 9)), c("A", "B", DSO2[1], "C", DSO2[1], "-", "7", "8", "9"), c(9999, NA, 1.5, NA, 5, 6, 7, 8, 9), factor(c("A", "0", 0, "NA", "NA", intUnk, numUnk, "-", DSO2[2]))) xListUnkDSO2Test <- lapply(xListUnkDSO2, function(x) x %in% DSO2) unkLND3 <- list(.default="notAvail", num=0, int=9999) xListNUnkD3 <- list(int=as.integer(c(unkLND3[[3]], 1:2, unkLND3[[3]], 5, 6, 7, 8, 9)), cha=c("A", "B", unkLND3[[1]], "C", unkLND3[[1]], "-", "7", "8", "9"), num=c(9999, unkLND3[[2]], 1.5, unkLND3[[2]], 5, 6, 7, 8, 9), fac=factor(c("A", "0", 0, "NA", "NA", intUnk, numUnk, "-", unkLND3[[1]]))) xListNUnkD3Test <- xListNUnkD3 xListNUnkD3Test$int <- xListNUnkD3Test$int %in% unkLND3[[3]] xListNUnkD3Test$cha <- xListNUnkD3Test$cha %in% unkLND3[[1]] xListNUnkD3Test$num <- xListNUnkD3Test$num %in% unkLND3[[2]] xListNUnkD3Test$fac <- xListNUnkD3Test$fac %in% unkLND3[[1]] unkLND2E <- list(.default="notAvail", 9999) ### }}} ### }}} ### {{{ --- isUnknown --- test.isUnknown <- function() { ## --- base methods for vectors --- ## base ... checkIdentical(isUnknown(xIntUnk, unknown=as.integer(intUnk)), xIntUnkTest) checkIdentical(isUnknown(xIntUnk, unknown=intUnk), xIntUnkTest) checkIdentical(isUnknown(xNumUnk, unknown=numUnk), xNumUnkTest) checkIdentical(isUnknown(xNumUnk, unknown=as.integer(numUnk)), xNumUnkTest) checkIdentical(isUnknown(xChaUnk, unknown=chaUnk), xChaUnkTest) checkIdentical(isUnknown(xFacUnk, unknown=facUnk), xFacUnkTest) ## multiple values are allowed for vector methods in vector or list form checkIdentical(isUnknown(xIntUnk, unknown=unkC), xIntUnkTest) checkIdentical(isUnknown(xIntUnk, unknown=unkL), xIntUnkTest) ## NA's in factors checkIdentical(isUnknown(xFacUnk1, unknown=facUnk1), xFacUnk1Test) facNA <- factor(c("0", 1, 2, 3, NA, "NA")) facNATest <- c(FALSE, FALSE, FALSE, FALSE, TRUE, FALSE) checkIdentical(isUnknown(facNA), facNATest) ## Date-time classes checkIdentical(isUnknown(xDateUnk, unknown=dateUnk), xDateTest) checkIdentical(isUnknown(xDate1Unk, unknown=dateUnk), xDate1Test) checkIdentical(isUnknown(xPOSIXltUnk, unknown=POSIXltUnk), xPOSIXltTest) checkIdentical(isUnknown(xPOSIXlt1Unk, unknown=POSIXltUnk), xPOSIXlt1Test) checkIdentical(isUnknown(xPOSIXctUnk, unknown=POSIXctUnk), xPOSIXctTest) checkIdentical(isUnknown(xPOSIXct1Unk, unknown=POSIXctUnk), xPOSIXct1Test) ## --- lists and data.frames --- ## with vector of single unknown values checkIdentical(isUnknown(xListUnk, unknown=unkC), xListUnkTest) checkIdentical(isUnknown(xDFUnk, unknown=unkC), xDFUnkTest) ## with list of single unknown values checkIdentical(isUnknown(xListUnk, unknown=unkL), xListUnkTest) checkIdentical(isUnknown(xDFUnk, unknown=unkL), xDFUnkTest) ## with named list of single unknown values checkIdentical(isUnknown(xListNUnk, unknown=unkLN), xListNUnkTest) checkIdentical(isUnknown(xDFUnk, unknown=unkLN), xDFUnkTest) ## with named list of multiple unknown values - valid here checkIdentical(isUnknown(xListMNUnkF, unknown=unkLMN), xListMNUnkFTest) checkIdentical(isUnknown(xDFMUnkF, unknown=unkLMN), xDFMUnkFTest) ## with single unknown value - recycling checkIdentical(isUnknown(xListUnk1, unknown=unk1), xListUnk1Test) checkIdentical(isUnknown(xDFUnk1, unknown=unk1), xDFUnk1Test) ## with vector of two unknown values - recycling checkIdentical(isUnknown(xListUnk2, unknown=unkC2), xListUnk2Test) checkIdentical(isUnknown(xDFUnk2, unknown=unkC2), xDFUnk2Test) ## with list of two unknown values - recycling checkIdentical(isUnknown(xListUnk2, unknown=unkL2), xListUnk2Test) checkIdentical(isUnknown(xDFUnk2, unknown=unkL2), xDFUnk2Test) ## list(.default=) checkIdentical(isUnknown(x=xListUnkD1, unknown=unkLND1), xListUnkD1Test) ## list(.default=, someOther=) we do not know someOther, but should work ## as x is not named checkIdentical(isUnknown(x=xListUnkDSO2, unknown=unkLNDSO2), xListUnkDSO2Test) ## list(.default=) in named list checkIdentical(isUnknown(x=xListNUnkD1, unknown=unkLND1), xListNUnkD1Test) ## list(.default=, someOther=) OK if someOther is in the named list checkIdentical(isUnknown(x=xListNUnkD3, unknown=unkLND3), xListNUnkD3Test) ## list(.default=, 99) ERROR as we do not know where to apply 99 checkException(isUnknown(x=xListNUnk, unknown=unkLND2E)) ## --- matrix --- checkIdentical(isUnknown(x=mat, unknown=NA), matUnkTest) checkIdentical(isUnknown(x=matUnk1, unknown=matUnk), matUnkTest) checkIdentical(isUnknown(x=matUnk1, unknown=c(1, matUnk)), matUnk2Test) } ### }}} ### {{{ --- unknownToNA --- test.unknownToNA <- function() { ## --- base methods for vectors --- ## base ... checkIdentical(unknownToNA(xIntUnk, as.integer(intUnk)), xInt) checkIdentical(unknownToNA(xIntUnk, intUnk), xInt) ## with numeric checkIdentical(unknownToNA(xNumUnk, numUnk), xNum) checkIdentical(unknownToNA(xNumUnk, as.integer(numUnk)), xNum) checkIdentical(unknownToNA(xChaUnk, chaUnk), xCha) checkIdentical(unknownToNA(xChaUnk, chaUnk), xCha) checkIdentical(unknownToNA(xFacUnk, facUnk), xFac) ## multiple values are allowed for vector methods in vector or list form checkIdentical(unknownToNA(xIntUnk, unknown=unkC), xInt) checkIdentical(unknownToNA(xIntUnk, unknown=unkL), xInt) ## NA's in factors checkIdentical(unknownToNA(xFacUnk1, unknown=facUnk1), xFac1) facNA <- factor(c("0", 1, 2, 3, NA, "NA")) facNATest <- factor(c("0", 1, 2, 3, NA, NA)) checkIdentical(unknownToNA(x=facNA, unknown="NA"), facNATest) ## Date-time classes checkIdentical(unknownToNA(xDateUnk, unknown=dateUnk), xDate) checkIdentical(unknownToNA(xPOSIXctUnk, unknown=POSIXctUnk), xPOSIXct) #### ## Per Brian Ripley on 2014-01-15: ## ## On platforms where POSIXlt has a gmtoff component, it does not need to be set. So ## ## > z$gmtoff ## [1] 3600 NA ## > xPOSIXltUnk$gmtoff ## [1] 3600 3600 ## ## (or sometimes 0, not NA). ## ## So although identical() correctly reports that they differ, this ## is allowed for optional components. ## ## It would also be wrong to use identical() to compare isdst ## components: isdst = -1 means unknown. ## ## Replaced: ## checkIdentical(unknownToNA(xPOSIXltUnk, unknown=POSIXltUnk), xPOSIXlt) ## With: tmp_unknownToNA <- unknownToNA(xPOSIXltUnk, unknown=POSIXltUnk) tmp_xPOSIXlt <- xPOSIXlt ## tmp_unknownToNA$gmtoff <- NULL # Remove $gmtoff to avoid comparison tmp_xPOSIXlt$gmtoff <- NULL ## isdst.unknown <- unique( c(which(is.na(tmp_unknownToNA$isdst) | tmp_unknownToNA$isdst==-1 ) ) , c(which(is.na(tmp_xPOSIXlt$isdst) | tmp_xPOSIXlt$isdst==-1 ) ) ) ## checkIdentical(tmp_unknownToNA$isdst[!isdst.unknown], tmp_xPOSIXlt$isds[!isdst.unknown]) ## tmp_unknownToNA$isdst <- NULL # Remove $isdst to avoid comparison tmp_xPOSIXlt$isdst <- NULL # by checkIdentical ## checkIdentical(tmp_unknownToNA, tmp_xPOSIXlt) #### ## --- lists and data.frames --- ## with vector of single unknown values checkIdentical(unknownToNA(xListUnk, unknown=unkC), xList) checkIdentical(unknownToNA(xDFUnk, unknown=unkC), xDF) ## with list of single unknown values checkIdentical(unknownToNA(xListUnk, unknown=unkL), xList) checkIdentical(unknownToNA(xDFUnk, unknown=unkL), xDF) ## with named list of single unknown values checkIdentical(unknownToNA(xListNUnk, unknown=unkLN), xListN) checkIdentical(unknownToNA(xDFUnk, unknown=unkLN), xDF) ## with names list of multiple unknown values - must be an error checkIdentical(unknownToNA(xListMNUnkF, unknown=unkLMN), xListMNF) checkIdentical(unknownToNA(xDFMUnkF, unknown=unkLMN), xDFMF) ## with single unknown value - recycling checkIdentical(unknownToNA(xListUnk1, unknown=unk1), xList) checkIdentical(unknownToNA(xDFUnk1, unknown=unk1), xDF) ## with vector of two unknown values - recycling checkIdentical(unknownToNA(xListUnk2, unknown=unkC2), xList) checkIdentical(unknownToNA(xDFUnk2, unknown=unkC2), xDF) ## with list of two unknown values - recycling checkIdentical(unknownToNA(xListUnk2, unknown=unkL2), xList) checkIdentical(unknownToNA(xDFUnk2, unknown=unkL2), xDF) ## with named list of two unknown values but x is not named so named list ## does not have any effect --> error as we do not know how to recycle checkException(unknownToNA(xListUnk2a, unknown=unkLN2)) ## but we should get some results with named x checkIdentical(unknownToNA(xListNUnk2, unknown=unkL2), xListN) ## not also necesarilly with recycling of names lists, as it is ## not clear how to properly recycle named lists (only names that match ## can be really properly recycled) checkException(unknownToNA(xListNUnk2, unknown=unkLN2)) checkIdentical(unknownToNA(xDFUnk2, unknown=unkL2), xDF) checkException(unknownToNA(xDFUnk2, unknown=unkLN2)) ## list(.default=) checkIdentical(unknownToNA(x=xListUnkD1, unknown=unkLND1), xListD1) ## list(.default=, someOther=) we do not know someOther, but should work ## as x is not named checkIdentical(unknownToNA(x=xListUnkDSO2, unknown=unkLNDSO2), xList) ## list(.default=) in named list checkIdentical(unknownToNA(x=xListNUnkD1, unknown=unkLND1), xListND1) ## list(.default=, someOther=) OK if someOther is in the named list checkIdentical(unknownToNA(x=xListNUnkD3, unknown=unkLND3), xListN) ## list(.default=, 99) ERROR as we do not know where to apply 99 checkException(unknownToNA(x=xListNUnk, unknown=unkLND2E)) ## --- matrix --- checkEquals(unknownToNA(x=matUnk1, unknown=matUnk), mat) } ### }}} ### {{{ --- NAToUnknown --- test.NAToUnknown <- function() { ## --- base methods for vectors --- ## base ... checkIdentical(NAToUnknown(xInt, as.integer(intUnk)), xIntUnk) checkIdentical(NAToUnknown(xInt, intUnk), xIntUnk) ## with numeric checkIdentical(NAToUnknown(xNum, numUnk), xNumUnk) checkIdentical(NAToUnknown(xNum, as.integer(numUnk)), xNumUnk) checkIdentical(NAToUnknown(xCha, chaUnk), xChaUnk) checkIdentical(NAToUnknown(xCha, chaUnk), xChaUnk) checkIdentical(NAToUnknown(xFac, facUnk), xFacUnk) ## only single values are allowed for vector methods checkException(NAToUnknown(xInt, unknown=unkC)) checkException(NAToUnknown(xInt, unknown=unkL)) ## and they should not already be in x unless force=TRUE checkException(NAToUnknown(xCha, unknown=chaUnk1)) checkIdentical(NAToUnknown(xCha, unknown=chaUnk1, force=TRUE), xChaUnk1) checkException(NAToUnknown(xFac, unknown=facLev)) checkIdentical(NAToUnknown(xFac, unknown=facLev, force=TRUE), xFacUnkLev) ## NA's in factors checkIdentical(NAToUnknown(xFac, unknown=facUnk1, force=TRUE), xFacUnk1) facNA <- factor(c("0", 1, 2, 3, NA, NA)) facNATest <- factor(c("0", 1, 2, 3, "NA", "NA")) checkIdentical(NAToUnknown(x=facNA, unknown="NA"), facNATest) ## Date-time classes checkIdentical(NAToUnknown(xDate, unknown=dateUnk), xDateUnk) checkIdentical(NAToUnknown(xPOSIXct, unknown=POSIXctUnk), xPOSIXctUnk) #### ## Per Brian Ripley on 2014-01-15: ## ## On platforms where POSIXlt has a gmtoff component, it does not need to be set. So ## ## > z$gmtoff ## [1] 3600 NA ## > xPOSIXltUnk$gmtoff ## [1] 3600 3600 ## ## (or sometimes 0, not NA). ## ## So although identical() correctly reports that they differ, this ## is allowed for optional components. ## ## It would also be wrong to use identical() to compare isdst ## components: isdst = -1 means unknown. ## ## Replaced: ## checkIdentical(NAToUnknown(xPOSIXlt, unknown=POSIXltUnk), xPOSIXltUnk) ## With: tmp_NAToUnknown <- NAToUnknown(xPOSIXlt, unknown=POSIXltUnk) tmp_xPOSIXltUnk <- xPOSIXltUnk ## tmp_NAToUnknown$gmtoff <- NULL # Remove $gmtoff to avoid comparison tmp_xPOSIXltUnk$gmtoff <- NULL ## isdst.unknown <- unique( c(which(is.na(tmp_NAToUnknown$isdst) | tmp_NAToUnknown$isdst==-1 ) ) , c(which(is.na(tmp_xPOSIXltUnk$isdst) | tmp_xPOSIXltUnk$isdst==-1 ) ) ) ## checkIdentical(tmp_NAToUnknown$isdst[!isdst.unknown], tmp_xPOSIXltUnk$isds[!isdst.unknown]) ## tmp_NAToUnknown$isdst <- NULL # Remove $isdst to avoid comparison tmp_xPOSIXltUnk$isdst <- NULL # by checkIdentical ## checkIdentical(tmp_NAToUnknown, tmp_xPOSIXltUnk) #### ## --- lists and data.frames --- ## with vector of single unknown values checkIdentical(NAToUnknown(xList, unknown=unkC), xListUnk) checkIdentical(NAToUnknown(xDF, unknown=unkC), xDFUnk) ## with list of single unknown values checkIdentical(NAToUnknown(xList, unknown=unkL), xListUnk) checkIdentical(NAToUnknown(xDF, unknown=unkL), xDFUnk) ## with named list of single unknown values checkIdentical(NAToUnknown(xListN, unknown=unkLN), xListNUnk) checkIdentical(NAToUnknown(xDF, unknown=unkLN), xDFUnk) ## with names list of multiple unknown values - must be an error checkException(NAToUnknown(xListN, unknown=unkLMN)) checkException(NAToUnknown(xDF, unknown=unkLMN)) ## with single unknown value - recycling checkIdentical(NAToUnknown(xList, unknown=unk1), xListUnk1) checkIdentical(NAToUnknown(xDF, unknown=unk1), xDFUnk1) ## with vector of two unknown values - recycling checkIdentical(NAToUnknown(xList, unknown=unkC2), xListUnk2) checkIdentical(NAToUnknown(xDF, unknown=unkC2), xDFUnk2) ## with list of two unknown values - recycling checkIdentical(NAToUnknown(xList, unknown=unkL2), xListUnk2) checkIdentical(NAToUnknown(xDF, unknown=unkL2), xDFUnk2) ## with named list of two unknown values but x is not named so named list ## does not have any effect --> error as we do not know how to recycle checkException(NAToUnknown(xList, unknown=unkLN2)) ## but we should get some results with named x checkIdentical(NAToUnknown(xListN, unknown=unkL2), xListNUnk2) ## not also necesarilly with recycling of names lists, as it is ## not clear how to properly recycle named lists (only names that match ## can be really properly recycled) checkException(NAToUnknown(xListN, unknown=unkLN2)) checkIdentical(NAToUnknown(xDF, unknown=unkL2), xDFUnk2) checkException(NAToUnknown(xDF, unknown=unkLN2)) ## list(.default=) checkIdentical(NAToUnknown(x=xList, unknown=unkLND1), xListUnkD1) ## list(.default=, someOther=) we do not know someOther, but should work ## as x is not named checkIdentical(NAToUnknown(x=xList, unknown=unkLNDSO2), xListUnkDSO2) ## list(.default=) in named list checkIdentical(NAToUnknown(x=xListN, unknown=unkLND1), xListNUnkD1) ## list(.default=, someOther=) OK if someOther is in the named list checkIdentical(NAToUnknown(x=xListN, unknown=unkLND3), xListNUnkD3) ## list(.default=, 99) ERROR as we do not know where to apply 99 checkException(NAToUnknown(x=xListN, unknown=unkLND2E)) ## --- matrix --- checkEquals(NAToUnknown(x=mat, unknown=matUnk), matUnk1) } ### }}} ### {{{ Dear Emacs ### Local variables: ### folded-file: t ### End: ### }}} ###------------------------------------------------------------------------ ### runit.unknown.R ends here gdata/tests/unitTests/runit.trimSum.R0000644000175100001440000000240713003720415017435 0ustar hornikusers### runit.trimSum.R ###------------------------------------------------------------------------ ### What: Unit tests for trimSum ### $Id$ ### Time-stamp: <2008-12-20 11:58:50 ggorjan> ###------------------------------------------------------------------------ ### {{{ --- Test setup --- if(FALSE) { library("RUnit") library("gdata") } ### }}} ### {{{ --- trimSum --- test.trimSum <- function() { ## 'x' must be a vector - for now checkException(trimSum(matrix(1:10))) checkException(trimSum(data.frame(1:10))) checkException(trimSum(list(1:10))) ## 'x' must be numeric checkException(trimSum(letters)) ## 'n' must be smaller than the length of x checkException(trimSum(x=1:10, n=11)) checkException(trimSum(x=1, n=1)) ## Default x <- trimSum(x=1:10, n=5) x2 <- c(1:4, 45) checkEquals(x, x2) ## Left x <- trimSum(x=1:10, n=5, right=FALSE) x2 <- c(21, 7:10) checkEquals(x, x2) ## NA x <- trimSum(x=c(1:9, NA), n=5) x2 <- c(1:4, NA) checkEquals(x, x2) x <- trimSum(x=c(1:9, NA), n=5, na.rm=TRUE) x2 <- c(1:4, 35) checkEquals(x, x2) } ### }}} ### {{{ Dear Emacs ## Local variables: ## folded-file: t ## End: ### }}} ###------------------------------------------------------------------------ ### runit.trimSum.R ends here gdata/tests/unitTests/runit.bindData.R0000644000175100001440000000535313003720415017506 0ustar hornikusers### runit.bindData.R ###------------------------------------------------------------------------ ### What: Bind two data frames - unit tests ### $Id$ ### Time-stamp: <2008-12-30 11:58:50 ggorjan> ###------------------------------------------------------------------------ ### {{{ --- Test setup --- if(FALSE) { library("RUnit") library("gdata") } ### }}} ### {{{ --- bindData --- test.bindData <- function() { ## 'x'/'y' must be a data.frame checkException(bindData(x=1:10, y=1:10)) checkException(bindData(x=matrix(1:10), y=matrix(1:10))) n1 <- 6; n2 <- 12; n3 <- 4 ## Single trait 1 num <- c(5:n1, 10:13) tmp1 <- data.frame(y1=rnorm(n=n1), f1=factor(rep(c("A", "B"), n1/2)), ch=letters[num], fa=factor(letters[num]), nu=(num) + 0.5, id=factor(num), stringsAsFactors=FALSE) ## Single trait 2 with repeated records, some subjects also in tmp1 num <- 4:9 tmp2 <- data.frame(y2=rnorm(n=n2), f2=factor(rep(c("C", "D"), n2/2)), ch=letters[rep(num, times=2)], fa=factor(letters[rep(c(num), times=2)]), nu=c((num) + 0.5, (num) + 0.25), id=factor(rep(num, times=2)), stringsAsFactors=FALSE) ## Single trait 3 with completely distinct set of subjects num <- 1:4 tmp3 <- data.frame(y3=rnorm(n=n3), f3=factor(rep(c("E", "F"), n3/2)), ch=letters[num], fa=factor(letters[num]), nu=(num) + 0.5, id=factor(num), stringsAsFactors=FALSE) ## Combine all datasets tmp12 <- bindData(x=tmp1, y=tmp2, common=c("id", "nu", "ch", "fa")) tmp123 <- bindData(x=tmp12, y=tmp3, common=c("id", "nu", "ch", "fa")) checkEquals(names(tmp123), c("id", "nu", "ch", "fa", "y1", "f1", "y2", "f2", "y3", "f3")) checkEquals(rbind(tmp1["id"], tmp2["id"], tmp3["id"]), tmp123["id"]) checkEquals(rbind(tmp1["fa"], tmp2["fa"], tmp3["fa"]), tmp123["fa"]) checkEquals(is.na(tmp123$y1), c(rep(FALSE, times=n1), rep(TRUE, times=n2+n3))) checkEquals(is.na(tmp123$f1), c(rep(FALSE, times=n1), rep(TRUE, times=n2+n3))) checkEquals(is.na(tmp123$y2), c(rep(TRUE, times=n1), rep(FALSE, times=n2), rep(TRUE, times=n3))) checkEquals(is.na(tmp123$f2), c(rep(TRUE, times=n1), rep(FALSE, times=n2), rep(TRUE, times=n3))) checkEquals(is.na(tmp123$y3), c(rep(TRUE, times=n1+n2), rep(FALSE, times=n3))) checkEquals(is.na(tmp123$f3), c(rep(TRUE, times=n1+n2), rep(FALSE, times=n3))) } ### }}} ### {{{ Dear Emacs ## Local variables: ## folded-file: t ## End: ### }}} ###------------------------------------------------------------------------ ### runit.bindData.R ends here gdata/tests/unitTests/runit.trim.R0000644000175100001440000000271513003720415016752 0ustar hornikusers### runit.trim.R ###------------------------------------------------------------------------ ### What: Tests for trim ### $Id: runit.trim.R 1784 2014-04-05 02:23:45Z warnes $ ### Time-stamp: <2006-08-29 14:21:02 ggorjan> ###------------------------------------------------------------------------ ### {{{ --- Test setup --- if(FALSE) { library("RUnit") library("gdata") } ### }}} ### {{{ --- trim --- test.trim <- function() { tmp <- Sys.getlocale(category="LC_COLLATE") Sys.setlocale(category="LC_COLLATE", locale="C") sTrim <- " this is an example string " sTrimR <- "this is an example string" fTrim <- factor(c(sTrim, sTrim, " A", " B ", " C ", "D ")) fTrimR <- factor(c(sTrimR, sTrimR, "A", "B", "C", "D")) lTrim <- list(s=rep(sTrim, times=6), f=fTrim, i=1:6) lTrimR <- list(s=rep(sTrimR, times=6), f=fTrimR, i=1:6) dfTrim <- as.data.frame(lTrim) dfTrimR <- as.data.frame(lTrimR) checkIdentical(trim(sTrim), sTrimR) checkIdentical(trim(fTrim), fTrimR) checkIdentical( levels(trim(fTrim, recode.factor=FALSE)), c("this is an example string", "C", "A", "B", "D") ) checkIdentical(trim(lTrim), lTrimR) checkIdentical(trim(dfTrim), dfTrimR) Sys.setlocale(category="LC_COLLATE", locale=tmp) } ### }}} ### {{{ Dear Emacs ## Local variables: ## folded-file: t ## End: ### }}} ###------------------------------------------------------------------------ ### runit.trim.R ends here gdata/tests/unitTests/runit.cbindX.R0000644000175100001440000000516413003720415017207 0ustar hornikusers### runit.cbindX.R ###------------------------------------------------------------------------ ### What: Unit tests for cbindX ### $Id: runit.cbindX.R 1784 2014-04-05 02:23:45Z warnes $ ### Time-stamp: <2008-08-05 13:40:49 ggorjan> ###------------------------------------------------------------------------ ### {{{ --- Test setup --- if(FALSE) { library("RUnit") library("gdata") } ### }}} ### {{{ --- cbindX --- test.cbindX <- function() { df1 <- data.frame(a=1:3, b=c("A", "B", "C")) df2 <- data.frame(c=as.character(1:5), a=5:1) ma1 <- matrix(as.character(1:4), nrow=2, ncol=2) ma2 <- matrix(1:6, nrow=3, ncol=2) df12test <- cbindX(df1, df2) df12stand <- data.frame(a=c(1:3, NA, NA), b=c("A", "B", "C", NA, NA), c=as.character(1:5), a=5:1) names(df12stand)[4] <- "a" checkEquals(df12test, df12stand) ma12test <- cbindX(ma1, ma2) ma12stand <- matrix(as.character(c(1, 3, 1, 4, 2, 4, 2, 5, NA, NA, 3, 6)), nrow=3, ncol=4, byrow=TRUE) checkEquals(ma12test, ma12stand) da11test <- cbindX(df1, ma1) da11stand <- data.frame(a=1:3, b=c("A", "B", "C"), as.character(c(1:2, NA)), as.character(c(3:4, NA))) names(da11stand)[3:4] <- c("1", "2") checkEquals(da11test, da11stand) tmpTest <- cbindX(df1, df2, ma1, ma2) tmpStand <- data.frame(a=c(1:3, NA, NA), b=c("A", "B", "C", NA, NA), c=as.character(1:5), a=5:1, as.character(c(1:2, NA, NA, NA)), as.character(c(3:4, NA, NA, NA)), c(1:3, NA, NA), c(4:6, NA, NA)) names(tmpStand)[4:8] <- c("a", "1", "2", "1", "2") checkEquals(tmpTest, tmpStand) tmpTest <- cbindX(ma1, ma2, df1, df2) tmpStand <- data.frame(as.character(c(1:2, NA, NA, NA)), as.character(c(3:4, NA, NA, NA)), as.character(c(1:3, NA, NA)), as.character(c(4:6, NA, NA)), a=c(1:3, NA, NA), b=c("A", "B", "C", NA, NA), c=as.character(1:5), a=5:1) names(tmpStand)[c(1:4, 8)] <- c("1", "2", "3", "4", "a") checkEquals(tmpTest, tmpStand) } ### }}} ### {{{ Dear Emacs ### Local variables: ### folded-file: t ### end: ### }}} ###------------------------------------------------------------------------ ### runit.cbindX.R ends here gdata/tests/unitTests/runit.nPairs.R0000644000175100001440000000422113003720415017225 0ustar hornikusers### runit.nPairs.R ###------------------------------------------------------------------------ ### What: Number of variable pairs - unit tests ### $Id$ ### Time-stamp: <2008-12-30 18:24:59 ggorjan> ###------------------------------------------------------------------------ ### {{{ --- Test setup --- if(FALSE) { library("RUnit") library("gdata") } ### }}} ### {{{ --- nPairs --- test.nPairs <- function() { ## 'x' must be a data.frame or a matrix x <- rpois(100, lambda=10) checkException(nPairs(x=x)) checkException(nPairs(x=table(x))) test <- data.frame(V1=c(1, 2, 3, 4, 5), V2=c(NA, 2, 3, 4, 5), V3=c(1, NA, NA, NA, NA), V4=c(1, 2, 3, NA, NA)) testCheck <- matrix(data=as.integer(c(5, 4, 1, 3, 4, 4, 0, 2, 1, 0, 1, 1, 3, 2, 1, 3)), nrow=4, ncol=4, byrow=TRUE) class(testCheck) <- c("nPairs", class(testCheck)) testCheckNames <- testCheck colnames(testCheckNames) <- rownames(testCheckNames) <- colnames(test) checkIdentical(nPairs(x=test), testCheckNames) checkIdentical(nPairs(x=test, names=FALSE), testCheck) checkIdentical(nPairs(x=as.matrix(test)), testCheckNames) checkIdentical(nPairs(x=as.matrix(test), names=FALSE), testCheck) testCheck <- cbind(testCheckNames, as.integer(c(5, 4, 0, 0))) class(testCheck) <- class(testCheckNames) colnames(testCheck) <- c(colnames(test), "all") checkIdentical(nPairs(x=test, margin=TRUE), testCheck) testCheckSumm <- matrix(data=as.integer(c(0, 1, 4, 2, 0, 0, 4, 2, 0, 1, 0, 0, 0, 1, 2, 0)), nrow=4, ncol=4, byrow=TRUE) dimnames(testCheckSumm) <- dimnames(testCheckNames) tmp <- summary(nPairs(x=test)) checkEquals(tmp, testCheckSumm) } ### }}} ### {{{ Dear Emacs ### Local variables: ### folded-file: t ### end: ### }}} ###------------------------------------------------------------------------ ### runit.nPairs.R ends here gdata/tests/unitTests/runit.write.fwf.R0000644000175100001440000001346113003720415017712 0ustar hornikusers### runit.write.fwf.R ###------------------------------------------------------------------------ ### What: Unit tests for write.fwf ### $Id: runit.write.fwf.R 1966 2015-04-25 16:23:31Z warnes $ ### Time-stamp: <2008-08-05 11:58:50 ggorjan> ###------------------------------------------------------------------------ ### {{{ --- Test setup --- if(FALSE) { library("RUnit") library("gdata") } ### }}} ### {{{ --- write.fwf --- test.write.fwf <- function() { ## 'x' must be a data.frame or matrix checkException(write.fwf(1:10)) checkException(write.fwf(list(1:10))) ## only single value is allowed in 'na' checkException(write.fwf(data.frame(1:10, letters[1:10]), na=c("", " "))) ## Example dataset num <- round(c(733070.345678, 1214213.78765456, 553823.798765678, 1085022.8876545678, 571063.88765456, 606718.3876545678, 1053686.6, 971024.187656, 631193.398765456, 879431.1), digits=3) testData <- data.frame(num1=c(1:10, NA), num2=c(NA, seq(from=1, to=5.5, by=0.5)), num3=c(NA, num), int1=c(as.integer(1:4), NA, as.integer(4:9)), fac1=factor(c(NA, letters[1:9], "hjh")), fac2=factor(c(letters[6:15], NA)), cha1=c(letters[17:26], NA), cha2=c(NA, "longer", letters[25:17]), stringsAsFactors=FALSE) levels(testData$fac1) <- c(levels(testData$fac1), "unusedLevel") testData$Date <- as.Date("1900-1-1") testData$Date[2] <- NA testData$POSIXt <- as.POSIXct(strptime("1900-1-1 01:01:01", format="%Y-%m-%d %H:%M:%S")) testData$POSIXt[5] <- NA ## --- output --- ## is tested with regular tests ## --- formatInfo --- ## default output formatInfoT <- data.frame(colname=c("num1", "num2"), nlevels=c(0, 0), position=c(1, 4), width=c(2, 3), digits=c(0, 1), exp=c(0, 0), stringsAsFactors=FALSE) testData1 <- testData[, c("num1", "num2")] testData1M <- as.matrix(testData1) formatInfo <- write.fwf(testData1, formatInfo=TRUE) checkEquals(formatInfo, formatInfoT) formatInfoM <- write.fwf(testData1M, formatInfo=TRUE) checkEquals(formatInfoM, formatInfoT) ## scientific notation dd <- options("digits"); options(digits = 7) testData2 <- data.frame(a=123, b=pi, c=1e8, d=1e222) formatInfo <- write.fwf(x=testData2, formatInfo=TRUE) checkEquals(formatInfo$width, c(3, 8, 5, 6)) checkEquals(formatInfo$digits, c(0, 6, 0, 0)) checkEquals(formatInfo$exp, c(0, 0, 2, 3)) options(dd) ## reset old options ## 'na' can either decrease or increase the width ## --> values of int1 have width 1 and using na="" should not increase ## the width formatInfo <- write.fwf(testData[, "int1", drop=FALSE], formatInfo=TRUE, na="") checkEquals(formatInfo$width, 1) ## --> values of int1 have width 1 and using na="1234" should increase ## the width to 4 formatInfo <- write.fwf(testData[, "int1", drop=FALSE], formatInfo=TRUE, na="1234") checkEquals(formatInfo$width, 4) ## rowCol formatInfoTR <- data.frame(colname=c("row", "num1", "num2"), nlevels=c(11, 0, 0), position=c(1, 4, 7), width=c(2, 2, 3), digits=c(0, 0, 1), exp=c(0, 0, 0), stringsAsFactors=FALSE) testData3 <- testData[, c("num1", "num2")] testData3M <- as.matrix(testData3) formatInfoR <- write.fwf(testData3, formatInfo=TRUE, rownames=TRUE, rowCol="row") checkEquals(formatInfoR, formatInfoTR) formatInfoR <- write.fwf(testData3M, formatInfo=TRUE, rownames=TRUE, rowCol="row") checkEquals(formatInfoR, formatInfoTR) ## quoteInfo alone does not have any effect formatInfoI <- write.fwf(testData3, formatInfo=TRUE, quoteInfo=TRUE) checkEquals(formatInfoI, formatInfoT) formatInfoI <- write.fwf(testData3M, formatInfo=TRUE, quoteInfo=TRUE) checkEquals(formatInfoI, formatInfoT) ## quote formatInfoTQ <- formatInfoT formatInfoTQ$position <- c(1, 6) formatInfoTQ$width <- c(4, 5) formatInfoQ <- write.fwf(testData3, formatInfo=TRUE, quote=TRUE) checkEquals(formatInfoQ, formatInfoTQ) formatInfoQ <- write.fwf(testData3M, formatInfo=TRUE, quote=TRUE) checkEquals(formatInfoQ, formatInfoTQ) ## quote without quoteInfo formatInfoTQI <- formatInfoT formatInfoTQI$position <- c(2, 6) formatInfoQI <- write.fwf(testData3, formatInfo=TRUE, quote=TRUE, quoteInfo=FALSE) checkEquals(formatInfoQI, formatInfoTQI) formatInfoQI <- write.fwf(testData3M, formatInfo=TRUE, quote=TRUE, quoteInfo=FALSE) checkEquals(formatInfoQI, formatInfoTQI) ## width ## --> default width for num1 is 2 testData4 <- testData[, "num1", drop=FALSE] testData4M <- as.matrix(testData[, "num1", drop=FALSE]) formatInfo <- write.fwf(testData4, width=10, formatInfo=TRUE) checkEquals(formatInfo$width, 10) formatInfo <- write.fwf(testData4M, width=10, formatInfo=TRUE) checkEquals(formatInfo$width, 10) ## too small value in width (this also tests recycling) ## --> proper width for num1 is 2, while for num2 it is 3 checkException(write.fwf(testData[, c("num1", "num2")], width=2)) checkException(write.fwf(testData[, c("num1", "num2")], width=c(2, 1))) ## Done cat("\nDONE.\n\n") } ### }}} ### {{{ Dear Emacs ## Local variables: ## folded-file: t ## End: ### }}} ###------------------------------------------------------------------------ ### runit.write.fwf.R ends here gdata/tests/unitTests/runit.getDateTimeParts.R0000644000175100001440000000501213003720415021176 0ustar hornikusers### runit.getDateTimeParts.R ###------------------------------------------------------------------------ ### What: Extract date and time parts from ... - unit tests ### $Id$ ### Time-stamp: <2008-12-30 22:41:18 ggorjan> ###------------------------------------------------------------------------ ### {{{ --- Test setup --- if(FALSE) { library("RUnit") library("gdata") } num <- 1 cha <- "a" fac <- factor(c("A")) tYear <- as.character(c(2006, 1995, 1005, 3067)) tMonth <- c("01", "04", "06", "12") tDay <- c("01", "12", "22", "04") tDate <- paste( paste(tYear, tMonth, tDay, sep="-"), "GMT" ) tHour <- c("05", "16", "20", "03") tMin <- c("16", "40", "06", "52") tSec <- c("56", "34", "05", "15") tTime <- paste(tHour, tMin, tSec, sep=":") cDate <- as.Date(tDate) cDatePOSIXct <- as.POSIXct(tDate) cDatePOSIXlt <- as.POSIXlt(tDate) ### }}} ### {{{ --- getYear --- test.getYear <- function() { checkException(getYear(x=num)) checkException(getYear(x=cha)) checkException(getYear(x=fac)) checkIdentical(getYear(x=cDate), tYear) checkIdentical(getYear(x=cDatePOSIXct), tYear) checkIdentical(getYear(x=cDatePOSIXlt), tYear) } ### }}} ### {{{ --- getMonth --- test.getMonth <- function() { checkException(getMonth(x=num)) checkException(getMonth(x=cha)) checkException(getMonth(x=fac)) checkIdentical(getMonth(x=cDate), tMonth) checkIdentical(getMonth(x=cDatePOSIXct), tMonth) checkIdentical(getMonth(x=cDatePOSIXlt), tMonth) } ### }}} ### {{{ --- getDay --- test.getDay <- function() { checkException(getDay(x=num)) checkException(getDay(x=cha)) checkException(getDay(x=fac)) checkIdentical(getDay(x=cDate), tDay) checkIdentical(getDay(x=cDatePOSIXct), tDay) checkIdentical(getDay(x=cDatePOSIXlt), tDay) } ### }}} ### {{{ --- getHour --- test.getHour <- function() { checkException(getHour(x=num)) checkException(getHour(x=cha)) checkException(getHour(x=fac)) ## checkIdentical(getHour(x=cDate), tHour) } ### }}} ### {{{ --- getMin --- test.getMin <- function() { checkException(getMin(x=num)) checkException(getMin(x=cha)) checkException(getMin(x=fac)) ## checkIdentical(getMin(x=cDate), tMin) } ### }}} ### {{{ --- getSec --- test.getSec <- function() { checkException(getSec(x=num)) checkException(getSec(x=cha)) checkException(getSec(x=fac)) ## checkIdentical(getSec(x=cDate), tSec) } ### }}} ### {{{ Dear Emacs ### Local variables: ### folded-file: t ### end: ### }}} ###------------------------------------------------------------------------ ### runit.getDateTimeParts.R ends here gdata/tests/unitTests/runit.wideByFactor.R0000644000175100001440000000351113003720415020354 0ustar hornikusers### runit.wideByFactor.R ###------------------------------------------------------------------------ ### What: Reshape by factor levels - unit tests ### $Id$ ### Time-stamp: <2008-12-30 11:58:50 ggorjan> ###------------------------------------------------------------------------ ### {{{ --- Test setup --- if(FALSE) { library("RUnit") library("gdata") } ### }}} ### {{{ --- wideByFactor --- test.wideByFactor <- function() { n <- 10 f <- 2 tmp <- data.frame(y1=(1:n)/2, y2=(n:1)*2, f1=factor(rep(letters[1:f], n/2)), f2=factor(c(rep(c("M"), n/2), rep(c("F"), n/2))), c1=1:n, c2=2*(1:n)) ## 'x' must be a data.frame checkException(wideByFactor(x=1:10)) checkException(wideByFactor(x=matrix(1:10))) ## 'factor' can be only of length one checkException(wideByFactor(x=tmp, factor=c("f1", "f2"))) ## column defined in 'factor' must be a factor checkException(wideByFactor(x=tmp, factor="c1")) tmp2 <- wideByFactor(x=tmp, factor="f1", common=c("c1", "c2"), sort=FALSE) checkEquals(tmp2[c("c1", "c2")], tmp[c("c1", "c2")]) checkEquals(names(tmp2), c("c1", "c2", "f1", "y1.a", "y2.a", "f2.a", "y1.b", "y2.b", "f2.b")) checkEquals(tmp2$y1.a, c(0.5, NA, 1.5, NA, 2.5, NA, 3.5, NA, 4.5, NA)) checkEquals(tmp2$f2.a, factor(c("M", NA, "M", NA, "M", NA, "F", NA, "F", NA))) tmp2 <- wideByFactor(x=tmp, factor="f1", common=c("c1", "c2"), sort=TRUE, keepFactor=FALSE) checkEquals(tmp2$f2.a, factor(c("M", "M", "M", "F", "F", NA, NA, NA, NA, NA))) checkEquals(names(tmp2), c("c1", "c2", "y1.a", "y2.a", "f2.a", "y1.b", "y2.b", "f2.b")) } ### }}} ### {{{ Dear Emacs ## Local variables: ## folded-file: t ## End: ### }}} ###------------------------------------------------------------------------ ### runit.wideByFactor.R ends here gdata/tests/unitTests/runit.mapLevels.R0000644000175100001440000002405413003720415017727 0ustar hornikusers### runit.mapLevels.R ###------------------------------------------------------------------------ ### What: Unit tests for mapLevels et al. ### $Id: runit.mapLevels.R 1784 2014-04-05 02:23:45Z warnes $ ### Time-stamp: <2006-10-29 16:41:41 ggorjan> ###------------------------------------------------------------------------ ### {{{ --- Test setup --- if(FALSE) { library("RUnit") library("gdata") } ### }}} ### {{{ mapLevels, is.*, as.*, [.* test.mapLevels <- function() { ## Integer and numeric checkException(mapLevels(1:3)) # wrong class(x) checkException(mapLevels(1.5)) # wrong class(x) ## Factor f <- factor(c("B", "C", "A")) fMapInt <- list(A=as.integer(1), B=as.integer(2), C=as.integer(3)) fMapInt1 <- list(B=as.integer(1), C=as.integer(2)) fMapCha <- list(A="A", B="B", C="C") fMapInt <- as.levelsMap(fMapInt) fMapInt1 <- as.levelsMap(fMapInt1) fMapCha <- as.levelsMap(fMapCha) fMapCha1 <- fMapCha[c(1, 3)] # this will test also [.levelsMap checkIdentical(mapLevels(f), fMapInt) checkTrue(is.levelsMap(mapLevels(f))) # test for is.levelsMap checkTrue(is.levelsMap(fMapInt)) # test for as.levelsMap checkTrue(!gdata:::.isCharacterMap(fMapInt)) checkIdentical(mapLevels(f, sort=FALSE), fMapInt) # sort is not used for factors checkIdentical(mapLevels(f[1:2], drop=TRUE), fMapInt1) checkIdentical(mapLevels(f, codes=FALSE), fMapCha) checkIdentical(mapLevels(f[c(2, 3)], drop=TRUE, codes=FALSE), fMapCha1) ## Character cha <- c("Z", "M", "A") chaMapInt <- list(A=as.integer(1), M=as.integer(2), Z=as.integer(3)) chaMapIntO <- list(Z=as.integer(1), M=as.integer(2), A=as.integer(3)) chaMapInt1 <- list(M=as.integer(1), Z=as.integer(2)) chaMapCha <- list(A="A", M="M", Z="Z") chaMapInt <- as.levelsMap(chaMapInt) chaMapIntO <- as.levelsMap(chaMapIntO) chaMapInt1 <- as.levelsMap(chaMapInt1) chaMapCha <- as.levelsMap(chaMapCha) checkIdentical(mapLevels(cha), chaMapInt) checkIdentical(mapLevels(cha, sort=FALSE), chaMapIntO) # sort works for characters checkIdentical(mapLevels(cha[1:2], drop=TRUE), chaMapInt1) checkIdentical(mapLevels(cha, codes=FALSE), chaMapCha) ## List l <- list(f=f, cha=cha) l1 <- list(cha=cha, f=f) l2 <- list(cha=cha, f=f, i=1:10) lMapInt <- list(f=fMapInt, cha=chaMapInt) lMapCha <- list(f=fMapCha, cha=chaMapCha) lMapInt <- as.listLevelsMap(lMapInt) lMapCha <- as.listLevelsMap(lMapCha) lMapChaC <- as.list(sort(unique(c(cha, as.character(f))))) lMapChaCO <- as.list(unique(c(cha, as.character(f)))) names(lMapChaC) <- unlist(lMapChaC) names(lMapChaCO) <- unlist(lMapChaCO) lMapChaC <- as.levelsMap(lMapChaC) lMapChaCO <- as.levelsMap(lMapChaCO) checkIdentical(mapLevels(l), lMapInt) checkTrue(is.listLevelsMap(mapLevels(l))) # test for is.listLevelsMap checkTrue(is.listLevelsMap(lMapInt)) # test for as.listLevelsMap checkIdentical(mapLevels(l, codes=FALSE), lMapCha) checkException(mapLevels(l, combine=TRUE)) # can not combine integer maps checkIdentical(mapLevels(l, codes=FALSE, combine=TRUE), lMapChaC) checkIdentical(mapLevels(l1, codes=FALSE, combine=TRUE), lMapChaC) checkIdentical(mapLevels(l1, codes=FALSE, combine=TRUE, sort=FALSE), lMapChaCO) checkException(mapLevels(l2)) # only char and factor ## Data.frame df <- data.frame(f1=factor(c("G", "Abc", "Abc", "D", "F")), f2=factor(c("Abc", "Abc", "B", "D", "K")), cha=c("jkl", "A", "D", "K", "L"), int=1:5) dfMapInt <- list(f1=mapLevels(df$f1), f2=mapLevels(df$f2), cha=mapLevels(df$cha)) dfMapInt <- as.listLevelsMap(dfMapInt) dfMapInt1 <- dfMapInt[c(1, 3)] # this will test also [.listLevelsMap checkException(mapLevels(df)) # wrong class of int checkIdentical(mapLevels(df[, 1:3]), dfMapInt) checkIdentical(mapLevels(df[, c(1, 3)]), dfMapInt1) } ### }}} ### {{{ .check* test.checkLevelsMap <- function(x) { ## --- levelsMap --- ## not a list checkException(gdata:::.checkLevelsMap(x="A", method="raw")) ## list without names checkException(gdata:::.checkLevelsMap(x=list("A"), method="raw")) fMapInt <- list(A=as.integer(1), B=as.integer(2), C=as.integer(3)) ## x should be levelsMap checkException(gdata:::.checkLevelsMap(x=fMapInt, method="class")) ## --- listLevelsMap --- map <- list(as.levelsMap(fMapInt), as.levelsMap(fMapInt)) map1 <- list(fMapInt, fMapInt) class(map1) <- "listLevelsMap" ## x should be a listLevelsMap checkException(gdata:::.checkListLevelsMap(x=map, method="class")) ## x should be also a list of levelsMaps checkException(gdata:::.checkListLevelsMap(x=map1, method="class")) ## the rest is done with levelsMap tests } ### }}} ### {{{ c.* test.cLevelsMap <- function() { f1 <- factor(letters[c(2, 1)]) f2 <- factor(letters[c(3, 1, 2)]) mapCha1 <- mapLevels(f1, codes=FALSE) # get maps mapCha2 <- mapLevels(f2, codes=FALSE) mapCha1S <- mapLevels(as.character(f1), codes=FALSE, sort=FALSE) mapCha2S <- mapLevels(as.character(f2), codes=FALSE, sort=FALSE) mapChaTest <- list(a="a", b="b") mapChaTest1 <- list(a="a", b="b", c="c") mapChaTest2 <- list(c="c", a="a", b="b") class(mapChaTest) <- class(mapChaTest1) <- class(mapChaTest2) <- "levelsMap" mapChaTest3 <- list(mapChaTest, mapChaTest1, mapChaTest, mapChaTest1) class(mapChaTest3) <- "listLevelsMap" checkIdentical(c(mapCha1), mapChaTest) checkIdentical(c(mapCha2, mapCha1), mapChaTest1) checkIdentical(c(mapCha2S, mapCha1S, sort=FALSE), mapChaTest2) l <- list(f1, f2) mapCha <- mapLevels(l, codes=FALSE) checkIdentical(c(mapCha, mapCha), mapChaTest3) checkIdentical(c(mapCha, recursive=TRUE), mapChaTest1) checkException(c(mapLevels(f1))) # can not combine integer “levelsMaps” ## Example with maps of different length of components map1 <- list(A=c("a", "e", "i", "o", "u"), B="b", C="c", C="m", D=c("d", "e"), F="f") map2 <- list(A=c("a", "z", "w", "y", "x"), F="f", G=c("g", "h", "j"), i="i", k=c("k", "l"), B="B") map0Test <- list(A=c("a", "e", "i", "o", "u"), B="b", C="c", C="m", D=c("d", "e"), F="f", A=c("z", "w", "y", "x"), G=c("g", "h", "j"), i="i", k=c("k", "l"), B="B") map0Test <- as.levelsMap(map0Test) mapTest <- sort(map0Test) map1 <- as.levelsMap(map1) map2 <- as.levelsMap(map2) map <- c(map1, map2) map0 <- c(map1, map2, sort=FALSE) checkIdentical(map, mapTest) checkIdentical(map0, map0Test) } ### }}} ### {{{ unique test.uniqueLevelsMap <- function() { map <- list(A=c(1, 2, 1, 3), B=4, C=1, C=5, D=c(6, 8), E=7, B=4, D=c(6, 8)) map1 <- map map1[[1]] <- map[[1]][c(1, 2, 4)] map1[[7]] <- NULL # remove B=4 map1[[7]] <- NULL # remove D=c(6, 8) ## unique (used in as.levelsMap), will remove duplicates (A=1) checkIdentical(as.levelsMap(map1), as.levelsMap(map)) } ### }}} ### {{{ mapLevels<- "test.mapLevels<-" <- function() { ## Some errors checkException("mapLevels<-"(1.1, value=2)) # wrong class(x) checkException("mapLevels<-"(complex(1.1), value=2)) # wrong class(x) f <- factor(c("A", "B", "C")) fMapInt <- mapLevels(f) ## can not apply integer "levelsMap" to "character" checkException("mapLevels<-"(as.character(f), value=fMapInt)) fMapCha <- mapLevels(f, codes=FALSE) ## can not apply character levelsMap to "integer" checkException("mapLevels<-"(as.integer(f), value=chaMapCha)) fMapFuzz <- fMapInt fMapFuzz[[1]] <- "A" ## all components of 'value' must be of the same class checkException("mapLevels<-"(as.character(f), value=fMapFuzz)) checkException("mapLevels<-"(as.integer(f), value=fMapFuzz)) ## x integer, value integer levelsMap f <- factor(letters[c(10, 15, 1, 2)]) fMapInt <- mapLevels(f) fInt <- as.integer(f) mapLevels(fInt) <- fMapInt checkIdentical(fInt, f) ## x factor, value integer levelsMap fInt <- factor(as.integer(f)) mapLevels(fInt) <- fMapInt checkIdentical(fInt, f) ## above is essentially the same as levels<-.factor fInt1 <- factor(as.integer(f)) levels(fInt1) <- fMapInt checkIdentical(fInt1, f) ## x character, value character levelsMap cha <- c("B", "A", "C") chaMapCha <- as.levelsMap(list(A1="A", B2="B", C3="C")) mapLevels(cha) <- chaMapCha chaTest <- factor(c("B2", "A1", "C3")) checkIdentical(cha, chaTest) ## and a bit more for components of length > 1 cha <- c("G", "I", "B", "A", "C", "D", "Z") chaMapCha <- as.levelsMap(list(A1=c("A", "G", "I"), B2="B", C3=c("C", "D"))) mapLevels(cha) <- chaMapCha chaTest <- factor(c("A1", "A1", "B2", "A1", "C3", "C3", NA)) checkIdentical(cha, chaTest) ## x factor, value character levelsMap f <- factor(c("G", "I", "B", "A", "C", "D", "Z")) fMapCha <- as.levelsMap(list(A1=c("A", "G", "I"), B2="B", C3=c("C", "D"))) mapLevels(f) <- fMapCha fTest <- factor(c("A1", "A1", "B2", "A1", "C3", "C3", NA)) checkIdentical(f, fTest) ## Two factors and character map f1 <- factor(letters[1:10]) f2 <- factor(letters[5:14]) checkIdentical(as.integer(f1), as.integer(f2)) # the same integer codes mapCha1 <- mapLevels(f1, codes=FALSE) # get maps mapCha2 <- mapLevels(f2, codes=FALSE) mapCha <- c(mapCha1, mapCha2) # combine maps ## apply map mapLevels(f1) <- mapCha # the same as levels(f1) <- mapCha mapLevels(f2) <- mapCha # the same as levels(f2) <- mapCha checkIdentical(as.integer(f1), 1:10) # \ internal codes are now checkIdentical(as.integer(f2), 5:14) # / "consistent" among factors ## The same with list l <- list(f1=f1, f2=f2) mapCha <- mapLevels(l, codes=FALSE, combine=TRUE) mapLevels(l) <- mapCha checkIdentical(as.integer(l$f1), 1:10) # \ internal codes are now checkIdentical(as.integer(l$f2), 5:14) # / "consistent" among factors ## and data.frame df <- data.frame(f1=f1, f2=f2) mapCha <- mapLevels(df, codes=FALSE, combine=TRUE) mapLevels(df) <- mapCha checkIdentical(as.integer(df$f1), 1:10) # \ internal codes are now checkIdentical(as.integer(df$f2), 5:14) # / "consistent" among factors } ### }}} ### {{{ Dear Emacs ## Local variables: ## folded-file: t ## End: ### }}} ###------------------------------------------------------------------------ ### runit.mapLevels.R ends here gdata/tests/unitTests/runit.drop.levels.R0000644000175100001440000000177613003720415020242 0ustar hornikusers### runit.drop.levels.R ###------------------------------------------------------------------------ ### What: Tests for drop.levels ### $Id: runit.drop.levels.R 1784 2014-04-05 02:23:45Z warnes $ ### Time-stamp: <2006-08-29 14:21:12 ggorjan> ###------------------------------------------------------------------------ ### {{{ --- Test setup --- if(FALSE) { library("RUnit") library("gdata") } ### }}} ### {{{ --- drop.levels --- test.drop.levels <- function() { f <- factor(c("A", "B", "C", "D"))[1:3] fDrop <- factor(c("A", "B", "C")) l <- list(f=f, i=1:3, c=c("A", "B", "D")) lDrop <- list(f=fDrop, i=1:3, c=c("A", "B", "D")) df <- as.data.frame(l) dfDrop <- as.data.frame(lDrop) checkIdentical(drop.levels(f), fDrop) checkIdentical(drop.levels(l), lDrop) checkIdentical(drop.levels(df), dfDrop) } ### }}} ### {{{ Dear Emacs ## Local variables: ## folded-file: t ## End: ### }}} ###------------------------------------------------------------------------ ### runit.drop.levels.R ends here gdata/tests/unitTests/runit.reorder.factor.R0000644000175100001440000000357113003720415020717 0ustar hornikusers### runit.reorder.factor.R ###------------------------------------------------------------------------ ### What: Tests for reorder.factor ### $Id: runit.reorder.factor.R 1784 2014-04-05 02:23:45Z warnes $ ### Time-stamp: <2006-10-30 18:25:05 ggorjan> ###------------------------------------------------------------------------ ### {{{ --- Test setup --- if(FALSE) { library("RUnit") library("gdata") } ### }}} ### {{{ --- reorder.factor --- test.reorder.factor <- function() { tmp <- Sys.getlocale(category="LC_COLLATE") Sys.setlocale(category="LC_COLLATE", locale="C") ## Create a 4 level example factor levs <- c("PLACEBO", "300 MG", "600 MG", "1200 MG") trt <- factor(rep(x=levs, times=c(22, 24, 28, 26))) ## Change the order to something useful ## default "mixedsort" ordering trt2 <- reorder(trt) levsTest <- c("300 MG", "600 MG", "1200 MG", "PLACEBO") checkIdentical(levels(trt2), levsTest) ## using indexes: trt3 <- reorder(trt, new.order=c(4, 2, 3, 1)) levsTest <- c("PLACEBO", "300 MG", "600 MG", "1200 MG") checkIdentical(levels(trt3), levsTest) ## using label names: trt4 <- reorder(trt, new.order=c("PLACEBO", "300 MG", "600 MG", "1200 MG")) levsTest <- c("PLACEBO", "300 MG", "600 MG", "1200 MG") checkIdentical(levels(trt4), levsTest) ## using frequency trt5 <- reorder(trt, X=as.numeric(trt), FUN=length) levsTest <- c("PLACEBO", "300 MG", "1200 MG", "600 MG") checkIdentical(levels(trt5), levsTest) ## drop out the '300 MG' level trt6 <- reorder(trt, new.order=c("PLACEBO", "600 MG", "1200 MG")) levsTest <- c("PLACEBO", "600 MG", "1200 MG") checkIdentical(levels(trt6), levsTest) Sys.setlocale(category="LC_COLLATE", locale=tmp) } ### }}} ### {{{ Dear Emacs ## Local variables: ## folded-file: t ## End: ### }}} ###------------------------------------------------------------------------ ### runit.reorder.factor.R ends here gdata/NAMESPACE0000644000175100001440000000714613115324732012600 0ustar hornikusers export( .onAttach, ans, Args, bindData, case, cbindX, centerText, combine, ConvertMedUnits, drop.levels, duplicated2, elem, env, first, 'first<-', frameApply, installXLSXsupport, interleave, is.what, keep, last, 'last<-', left, ll, ls.funs, lowerTriangle, "lowerTriangle<-", matchcols, nobs, # default method now provided by stats package nPairs, mv, read.xls, rename.vars, remove.vars, reorder.factor, resample, right, sheetCount, sheetNames, startsWith, trim, trimSum, unmatrix, update.list, #update.data.frame, upperTriangle, "upperTriangle<-", wideByFactor, write.fwf, xls2csv, xls2tab, xls2tsv, xls2sep, xlsFormats, ## Object size stuff object.size, as.object_sizes, is.object_sizes, humanReadable, ## getDateTime stuff getYear, getMonth, getDay, getHour, getMin, getSec, ## mapLevels stuff mapLevels, as.levelsMap, as.listLevelsMap, is.levelsMap, is.listLevelsMap, "mapLevels<-", ## unknown stuff isUnknown, unknownToNA, NAToUnknown ) importFrom(stats, reorder) importFrom(stats, nobs) importFrom(gtools, mixedsort) importFrom(methods, is) importFrom(utils, data, download.file, head, read.csv, read.delim, read.table, tail, write.table) S3method(reorder, factor) S3method(summary, nPairs) ## drop.levels stuff S3method(drop.levels, default) S3method(drop.levels, factor) S3method(drop.levels, list) S3method(drop.levels, data.frame) ## getDateTime stuff S3method(getYear, default) S3method(getYear, Date) S3method(getYear, POSIXct) S3method(getYear, POSIXlt) S3method(getMonth, default) S3method(getMonth, Date) S3method(getMonth, POSIXct) S3method(getMonth, POSIXlt) S3method(getDay, default) S3method(getDay, Date) S3method(getDay, POSIXct) S3method(getDay, POSIXlt) S3method(getHour, default) S3method(getMin, default) S3method(getSec, default) ## mapLevels stuff S3method(mapLevels, default) S3method(mapLevels, character) S3method(mapLevels, factor) S3method(mapLevels, list) S3method(mapLevels, data.frame) S3method("mapLevels<-", default) S3method("mapLevels<-", list) S3method("mapLevels<-", data.frame) S3method(print, levelsMap) S3method(print, listLevelsMap) S3method("[", levelsMap) S3method("[", listLevelsMap) S3method(c, levelsMap) S3method(c, listLevelsMap) S3method(unique, levelsMap) S3method(sort, levelsMap) ## nobs stuff S3method(nobs, data.frame) S3method(nobs, default) S3method(nobs, lm) # now provided by stats package ## Object size stuff S3method(c, object_sizes) S3method(format, object_sizes) S3method(print, object_sizes) ## unknown stuff S3method(isUnknown, default) S3method(isUnknown, POSIXlt) S3method(isUnknown, list) S3method(isUnknown, data.frame) S3method(isUnknown, matrix) S3method(unknownToNA, default) S3method(unknownToNA, factor) S3method(unknownToNA, list) S3method(unknownToNA, data.frame) S3method(NAToUnknown, default) S3method(NAToUnknown, factor) S3method(NAToUnknown, list) S3method(NAToUnknown, data.frame) ## trim stuff S3method(trim, character) S3method(trim, default) S3method(trim, factor) S3method(trim, list) S3method(trim, data.frame) ## left, right S3method(left, data.frame) S3method(left, matrix) S3method(right, data.frame) S3method(right, matrix) # update methods for list, data.frame S3method(update, list) #S3method(update, data.frame) gdata/INSTALL0000644000175100001440000000444613003720415012404 0ustar hornikusers Windows ======= The instructions below relate to Windows only and are not relevant to other platforms. On Windows, the perl based routines (read.xls, xls2sep, xls2csv, xls2tab, xls2tsv, sheetNames, sheetCount) will work with ActiveState perl but not with Rtools perl. If you have ActiveState perl installed and the pl extension is associated with it (which the ActiveState installer associates automatically) then read.xls and other perl based routines in gdata will automatically locate ActiveState perl if you omit the perl= argument on these commands even if Rtools perl is ahead of it on your path. e.g. read.xls("mysheet.xls") Alternately you can use the perl= argument with a path to ActiveState perl to ensure it uses the right one. e.g. read.xls("mysheet.xls", perl = "C:\\Perl\\bin\\perl.exe") Also ensure that gdata was built with ActiveState Perl and not Rtools perl. This may require rebuilding gdata yourself like this: 1. Download and install the following (which all have automatic Windows installers and are therefore very easy to install): R (you likely already have R so you can likely skip this one) http://www.r-project.org Rtools http://www.murdoch-sutherland.com/Rtools/ ActiveState perl (ActivePerl) http://www.activestate.com/activeperl/ 2. If simply installing gdata in the usual way from within R results in read.xls and associated perl-based routines not working then re-install gdata yourself like this: Download gdata_*.tar.gz from: http://cran.r-project.org/web/packages/gdata/index.html and install it at the Windows console (not from within R): Rcmd INSTALL --build gdata_*.tar.gz Note: Rcmd is normally found in C:\Program Files\R\R-*\bin and if that is not on your path the above command should be written: "C:\Program Files\R\R-*\bin\Rcmd.exe" INSTALL --build gdata_*.tar.gz where the the path to Rcmd.exe should be replaced with the output of this R command: normalizePath(file.path(R.home(), "bin", "Rcmd.exe")) Note: On Vista and above you may need to run Rcmd as Administrator. Note: In the above the * in gdata_*.tar.gz should be replaced with the current version of gdata as found on: http://cran.r-project.org/web/packages/gdata/index.html gdata/NEWS0000644000175100001440000004510313003720417012047 0ustar hornikusersChanges in 2.17.0 (2015-07-02) ------------------------------ New features: - Add new argument 'byrow' to upperTriangle(), lowerTriangle(), upperTriangle<-(), and lowerTriangle<-() to specify by-row rather by-column order. This makes it simpler to copy values between the lower and upper triangular areas, e.g. to construct a symmetric matrix. Other changes: - Add inline comments to tests to alert reviewers of expected diffs on systems lacking the libraries for read.xls() to support XLSX formatted files. Changes in 2.16.1 (2015-04-28) ----------------------------- Bug fixes: - mapLevels() no longer generates warnings about conversion of lists to vectors. Other changes: - Requirement for Perl version 5.10.0 or later is specified in the package DESCRITION. - first() and last() are now simply wrappers for calls to 'head(x, n=1)' and 'tail(x, n=1)', respectively. Changes in 2.16.0 (2015-04-25) ------------------------------ New features: - New functions first() and last() to return the first or last element of an object. - New functions left() and right() to return the leftmost or rightmost n (default to 6) columns of a matrix or dataframe. - New 'scientific' argument to write.fwf(). Set 'scientific=FALSE' to prevent numeric columns from being displayed using scientific notification. - The 'standard' argument to humanReadable() now accepts three values, 'SI' for base 1000 ('MB'), 'IEC' for base 1024 ('MiB'), and 'Unix' for base 1024 and single-character units ('M') - object.size() now returns objects with S3 class 'object_sizes' (note the final 's') to avoid conflicts with methods in utils for class 'object_size' provided by package 'utils' which can only handle a scalar size. - New 'units' argument to humanReadable()--and hence to print.object_sizes() and format.object_sizes()--that permits specifying the unit to use for all values. Use 'bytes' to display all values with the unit 'bytes', use 'auto' (or leave it missing) to automatically select the best unit, and use a unit from the selected standard to use that unit (i.e. 'MiB'). - The default arguments to humanReadable() have changed. The defaults are now 'width=NULL' and 'digits=1', so that the default behavior is now to show digit after the decimal for all values. Bug fixes: - reorder.factor() was ignoring the argument 'X' unless 'FUN' was supplied, making it incompatible with the behavior of stats:::reorder.default(). This has been corrected, so that calling reorder on a factor with arguments 'X' and/or 'FUN' should now return the same results whether gdata is loaded or not. (Reported by Sam Hunter.) - write.fwf() now properly supports matrix objects, including matrix objects without column names. (Reported by Carl Witthoft.) Other changes: - Replaced deprecated PERL function POSIX::isdigit in xls2csv.pl (which is used by read.xls() ) with an equivalent regular expression. (Reported by both Charles Plessy, Gerrit-jan Schutten, and Paul Johnson. Charles also provided a patch to correct the issue.) - aggregate.table(), which has been defunct gdata 2.13.3 (2014-04-04) has now been completely removed. Changes in 2.14.0 (2014-08-27) ------------------------------ Bug Fixes: - read.xls() can now properly process XLSX files with up to 16385 columns (the maximum generated by Microsoft Excel). - read.xls() now properly handles XLS/XLSX files that use 1904-01-01 as the reference value for dates instead of 1900-01-01 (the default for MS-Excel files created on the Mac). Other changes: - Updated perl libraries and code underlying read.xls() to the latest version, including switching from Spreadsheet::XLSX to Spreadsheet::ParseXLSX. Changes in 2.13.3 (2014-04-04) ------------------------------ Bug Fixes: - Unit tests were incorrectly checking for equality of optional POSIXlt components. (Bug reported by Brian Ripley). Other Changes: - 'aggregate.table' is now defunct. See '?gdata-defunct' for details. - Unit tests and vignettes now follow R standard practice. - Minor changes to clean up R CMD check warnings. Changes in 2.13.2 (2013-06-28) ------------------------------ Enhancements: - Simplify ll() by converting a passed list to an environment, avoiding the need for special casing and the use of attach/detach. - Working of deprecation warning message in aggregate.table clarified. Changes in 2.13.1 (2013-03-24) ------------------------------ Enhancements: - Replaced calls to depreciated function ".path.package" with the new public function "path.package". Changes in 2.13.0 (2012-09-20) ----------------------------- New features: - New 'duplicated2' function which returns TRUE for *all* elements that are duplicated, including the first, contributed by Liviu Andronic. This differs from 'duplicated', which only returns the second and following (second-to last and previous when 'fromLast=TRUE') duplicate elements. - New 'ans' functon to return the value of the last evaluated top-level function (a convenience function for accessing .Last.value), contributed by Liviu Andonic. Bug Fixes: - On windows, warning messages printed to stdout by perl were being included in the return value from 'system', resulting in errors in 'sheetCount' and 'sheetNames'. Corrected. - The 'MedUnits' column names 'SIUnits' and 'ConventionalUnits' were reversed and misspelled. Changes in 2.12.0 (2012-09-12) ------------------------------ Other Changes: - 'stats::aggregate' was made into a generic on 27-Jan-2010, so that attempting to call 'aggregate' on a 'table' object will now incorrectly call 'aggregate.table'. Since 'aggregate.table' can be replaced by a call to tapply using two index vectors, e.g. aggregate.table(x, by1=a, by2=b, mean) can be replaced by tapply(x, INDEX=list(a, b), FUN=mean), the 'aggregate.table' function will now display a warning that it is depreciated and recommending the equivalent call to tapply. It will be removed entirely in a future version of gdata. Changes in 2.11.1 (2012-08-22) ------------------------------ Enhancements: - read.xls() now supports fileEncoding argument to allow non-ascii encoded data to be handled. See the manual page for an example. Bug Fixes: - The perl script utilized by read.xls() was incorrectly appending a space character at the end of each line, causing problems with character and NA entries in the final column. Changes in 2.11.0 (2012-06-18) ------------------------------ New Features: - read.xls() and supporting functions now allow blank lines to be preserved, rather than skipped, by supplying the argument "blank.lines.skip=FALSE". The underlying perl function has been extended to suppor this via an optional "-s" argument which, when present, *preserves* blank lines during the conversion. (The default behavior remains unchanged.) Other Changes: - Add SystemRequirements field specifying that perl is necessary for gdata to function fully. Changes in 2.10.6 (2012-06-12) ------------------------------ Bug fixes: - gdata::nobs.default() needs to handle logical vectors in addition to numeric vectors. Changes in 2.10.{3,4,5} (2012-06-08) ------------------------------------ Bug fixes: - Mark example for installXLSsupport() as dontrun so R CMD check won't fail on systems where PERL is not fully functional. - Correct name of installXLSsupport() in tests/test.read.xls.R. Other Changes: - Add dependency on R 2.13.0, since that is when stats::nobs appeared. Changes in 2.10.2 (2012-06-06) --------------------------------------- Bug fixes: - Fix issues in nobs.default identified in testing with the gmodels package. Changes in 2.10.1 (2012-06-06) ------------------------------ Bug fixes: - Undo removal of 'nobs' and 'nobs.lm'. Instead define aliases for 'nobs' and 'nobs.lm' to support backward compatibility for packages depending on gdata. Changes in 2.10.0 (2012-06-05) ------------------------------ New features: - New ls.funs() function to list all objects of class function in the specified environment. - New startsWith() function to determine if a string "starts with" the specified characters. Enhancements: - Add 'na.strings' argument to read.xls() to convert Excel's '#DIV/0!' to NA. Bug fixes: - Correct various R CMD check warnings Other changes: - Base S3 method for nobs() and nobs.lm() method removed since these are now provided in the stats package. Changes in 2.9.0 (2011-09-30) ----------------------------- New features: - Add centerText() function to center text strings for a specified width. - Add case() function, a vectorized variant of the base::switch() function, which is useful for converting numeric codes into factors. Enhancements: - Minor improvements to xls2csv() man page. CHANGES IN 2.8.1 (2011-04-15) ----------------------------- Enhancements: - nPairs() gains a summary method that shows how many times each variable is known, while the other variable of a pair is not Bug fixes: - Fix errors on windows when R or Perl install path includes spaces by properly quoting the path. CHANGES IN 2.8.1 (2010-11-12) ----------------------------- Enhancements: - Minor improvement to Args(), read.xls() man page. Bug fixes: - Modify write.fwf() to capture and pass on additional arguments for write.table(). This resolves a bug reported by Jan Wijffels. - Modify xls2sep.R to avoid use of file.access() which is unreliable on Windows network shares. CHANGES IN 2.8.0 (2010-04-03) ----------------------------- Enhancements: - When loaded, gtools (via an .onAttach() function) now checks: 1) if perl is available 2) whether the perl libraries for XLS support are available 3) whether the perl libraries for XLSX support are available If perl is not available, an appropriate warning message is displayed. If necessary perl libraries are not available, a warning message is displayed, as is a message suggesting the user run the (new) installXLSXsupport() function to attempt to install the necessary perl libraries. - The function installXLSXsupport() has been provided to install the binary perl modules that read.xls needs to support Excel 2007+ 'XLSX' files. CHANGES IN 2.7.3 (2010-04-02) ----------------------------- Enhancements: - New xlsFormats() command to determine which Excel formats are supported (XLS, XLSX). Bug Fixes: - No longer attempt to install perl modules Compress::Raw::Zlib and Spreadsheet::XLSX at build/compile time. This should resolve recent build issues, particularly on Windows. - All perl code can now operate (but generate warnings) when perl modules Compress::Raw::Zlib and Spreadsheet::XLSX when are not installed. - Also update Greg's email address. CHANGES IN 2.7.1 (2010-02-19) ----------------------------- Enhancements: - on Windows attempts to locate ActiveState perl if perl= not specified and Rtools perl would have otherwise been used in read.xls and other perl dependent functions. CHANGES IN 2.7.0 (2010-01-25) ----------------------------- Bug Fixes: - Fix building of Perl libraries on Win32 CHANGES IN 2.7.0 (2010-01-25) ----------------------------- Enhancements: - read.xls() now supports Excel 2007 'xlsx' files. - read.xls() now allows specification of worksheet by name - read.xls() now supports ftp URLs. - Improved ll() so user can limit output to specified classes New Functions: - sheetCount() and sheetNames() to determine the number and names of worksheets in an Excel file, respectively. Bug Fixes: - Fix formatting warning in frameApply(). - Resolve crash of "ll(.GlobalEnv)" - CHANGES IN 2.6.1 (2009-07-15) ----------------------------- Bug Fixes - Modify unit tests to avoid issues related to time zones. CHANGES IN 2.6.0 (2009-07-15) ----------------------------- Bug Fixes - Correct minor typos & issues in man pages for write.fwf(), resample() (Greg Warnes) - Correct calculation of object sizes in env() and ll() (Gregor Gorjanc) New Features - Add support for using tab for field separator during translation from xls format in read.xls (Greg Warnes) - Enhanced function object.size that returns the size of multiple objects. There is also a handy print method that can print size of an object in "human readable" format when options(humanReadable=TRUE) or print(object.size(x), humanReadable=TRUE). (Gregor Gorjanc) - New function wideByFactor that reshapes given dataset by a given factor - it creates a "multivariate" data.frame. (Gregor Gorjanc) - New function nPairs that gives the number of variable pairs in a data.frame or a matrix. (Gregor Gorjanc) - New functions getYear, getMonth, getDay, getHour, getMin, and getSec for extracting the date/time parts from objects of a date/time class. (Gregor Gorjanc) - New function bindData that binds two data frames into a multivariate data frame in a different way than merge. (Gregor Gorjanc) Other Changes - Correct Greg's email address CHANGES IN 2.5.0 ---------------- - New function .runRUnitTestsGdata that enables run of all RUnit tests during the R CMD check as well as directly from within R. - Enhanced function object.size that returns the size of multiple objects. There is also a handy print method that can print size of an object in "human readable" format when options(humanReadable=TRUE) or print(x, humanReadable=TRUE). - New function bindData that binds two data frames into a multivariate data frame in a different way than merge. - New function wideByFactor that reshapes given dataset by a given factor - it creates a "multivariate" data.frame. - New functions getYear, getMonth, getDay, getHour, getMin, and getSec for extracting the date/time parts from objects of a date/time class. - New function nPairs that gives the number of variable pairs in a data.frame or a matrix. - New function trimSum that sums trimmed values. - New function cbindX that can bind objects with different number of rows. - write.fwf gains the width argument. The value for unknown can increase or decrease the width of the columns. Additional tests and documentation fixes. CHANGES IN 2.4.2 (2008-05-11) ----------------------------- - Enhancements and bug fixes for read.xls() and xls2csv(): - More informative log messages when verbose=TRUE - File paths containing spaces or other non-traditional characters are now properly handled - Better error messages, particularly when perl fails to generate an output .csv file. - The 'shortcut' character "~" (meaning user's home directory) is now properly handled in file paths. - XLS files created by OpenOffice are now properly handled. Thanks to Robert Burns for pointing out the patch (http://rt.cpan.org/Public/Bug/Display.html?id=7206) CHANGES IN 2.4.1 (2008-03-24) ----------------------------- - Update perl libraries needed by xls2csv() and read.xls() to latest available versions on CRAN. - Add read.xls() to exported function list - Correct iris.xls example file. It didn't contain the complete & properly formatted iris data set. Fixed. - Fix typo in win32 example for read.xls() CHANGES IN 2.4.0 (2008-01-30) ----------------------------- - The keep() function now includes an 'all' argument to specify how objects with names starting with '.' are handled. - keep() now shows an informative warning message when a requested object does not exist - New vignette "Mapping Levels of a Factor" describing the use of mapLevels(). - New vignette "Working with Unknown Values" describing the use of isUnknown() and unknownToNA(). - Several enhancements to read.xls() (thanks to Gabor Grothendieck): - New function xls2csv(), which handles converting an xls file to a csv file and returns a connection to the temporary csv file - xls2csv() and read.xls() both allow a file or a url to be specified - read.xls() has a new 'pattern' argument which, if supplied, will ignore everything prior to the first line in th csv file that matches the pattern. This is typically used if there are a variable number of comment lines prior to the header in which case one can specify one of the column headings as the pattern. read.xls should be compatible with the old read.xls. - Minor fixes to drop.levels(), is.what(). - Implementation of unit tests for most functions. CHANGES IN 2.3.1 (2006-10-29) ----------------------------- - Arguments as well as their position of reorder.factor have been changed to conform with reorder.factor method in stats package, due to collision bug. Argument 'make.ordered' is now 'order' and old argument 'order' is now 'new.order'! Therefore, you have to implicitly specify new.order i.e. reorder(trt, new.order=c("PLACEBO", "300 MG", "600 MG", "1200 MG")) - trim() gains ... argument. - Added "unknown" methods for matrices. - Added c() method for factors based on mapLevels() functions. - Added write.fwf, which writes file in *F*ixed *W*idth *F*ormat. CHANGES FROM 2.1.X to 2.3.0 (2006-09-19) --------------------------------------- - Added mapLevels(), which produces a map with information on levels and/or internal integer codes. Contributed by Gregor Gorjanc. - Extended dropLevels() to work on the factors contained in a data frame, as well as individual factors. - Add unknown(), which changes given unknown value to NA and vice versa. Contributed by Gregor Gorjanc. - Extended trim() to handle a variety of data types data.frames, lists, factors, etc. Code changes contributed by Gregor Gorjanc. - Added resample() command that acts like sample() except that it _always_ samples from the arguments provided, even if only a single argument is present. This differs from sample() which behaves differently in this case. - Updated my email address. CHANGES IN GDATA 2.1.2 ----------------------- - Fixed bug in interleave.R - option to covert 1-column matrices to vector (based on Andrew Burgess's suggestion) - Updated Greg and Jim's email adresses - ll.R: Suppressed warning message in attach() call. - frameApply.Rd, reorder.Rd: Remove explicit loading of gtools in examples, so that failure to import functions from gtools gets properly caught by running the examples. - upperTriangle.R, man/upperTriangle.Rd: Add functions for extracting and modifying the upper and lower trianglular components of matrices. - is.what.R: Replaced the "not.using" vector with a more robust try(get(test)) to find out whether a particular is.* function returns a logical of length one. - DESCRIPTION: Added Suggests field - Updated the example in frameApply CHANGES IN GDATA 2.0.8 ----------------------- - Added DESCRIPTION and removed DESCRIPTION.in - Updated ll.Rd documentation - Fixed bug in Args.R, is.what.R, ll.R gdata/data/0000755000175100001440000000000013003720415012254 5ustar hornikusersgdata/data/MedUnits.rda0000644000175100001440000000714113003720415014477 0ustar hornikuserskU7M1/ 4P$f[HMnv&Qޙޙ'D|KjRRBSDAA|Nn{;sϽ;[w;vv444464v&(ל@C;<6IеPopjoٔ n=HgTrZiӛ3vZ6x#~yd(w!,pcIZZ2'?ߙgc.:kӟ;7u,HR%KM2<兂%o5Ugu3YΤX5W逶H:TV}A'+zUuB ^rY r)RA%$/67) 1cpKhKۓ db %%](հ@IV˲C hK\4'y'*%.k4UE9YYQ c!4ZHj rE*:)KnڇQP+Hmz^ѩ`q*?3T&]Vm"L^.+"_%Ž'aD\S2gU{2m %nAx"ʊi4R%QVTI%"(-)YQx{QKSԊ*%8Icxv-J*[֡x#L=&mr;]ԖRyNw̪S"*a"sRUHӇRBW m 9m:yMPTuQa+Z~X(qCx# \#* #zd z*2X\qHLQ橭6*訇Q KзA"f b Hݻjt\}5۵mZ벆KD)h+y(cXѝV@SrQI+֪bmC2~Rh"2+Ry V"vl4 i|<ҒM;{N`&EY.0㜦FfpiB[ O2ԓ; 5nO Ʌ4rYW52bRu#tAI:\wT:5j\&Z'ϳ@7G_2V.W$c q7Զa::JZGZI8:nL҈vua;F'Hl9VW}:D,| !6;_g l! _d؍3u2Һ&Ҿj\fc@\ϸ2"8%ȡ2p6#=2kT5GJ6@*r <Λy6'sLZuoM$;zjIYi&Y姧ͪDVN]V'ad^Αf!CTePf%O(а0R\vC43dKt$k/L8 ]ɼ$RNfV=1Z%]Ȃ(VnjKhg ȪLEH4o靧cpv3&$&- ͣ„\홐I/õVM1A5p)G/1S(Sb?Ԭ+wJ<92y8̑+s͑Sm#C ~C4& -vr6vMˁU('$v8/ )B2OL #gˢ1A;.?-7#G0uHXsŎan[ֲ͡s&ې\a@_x/ F㎼Hn3AB`n^͡0 +!DfYfIw }iwfB±a ݶܾҎ-]SiVѶNh<,Qtk.|}6P+i. W1{杖G.h{xC)Z GZOZnK9G4Toy,6{Ƕ/ax #OYɜ\XY3{@xX,[)WV xNaok{qEMT;@F+~2;[2IlfzoZW>wlV,~^ΛHj"isN˷~+ҺwN3+L %~=7z%]mc#Ӯ_z>?bW7ė?7Hy`w"=={2q ᎡMΨnJvCSUk?ݿȡWqޣ &>ea=aφZb ?站OZg-Ǯ|3`/h"ÆY>xS:H4N<|a;qrqv=[~Ū{؉[w'Z/Wl' ;ZO5_>򷟟o~0lKL;r蕯|]Pc ꊶ+k 2shOPR:yV+f߷۾kikÙONm]FXOmd5'vćuk 'bK>o&qh?\'o溭'e?x\x>kם܌-S5ﶋM̳LX706`&sl쯇5ؚ˲y\s=w?wS2C>z+e+d ɋ_LMjcp_}jTP߼OZt'z~5_t۪P|Y{йcdW1{s}3E})RߌW; {ak8¸Δ u ׿ɹ35Ba_'p[e. eh"?36S3mz3湧}:o(=b|Y 5k,qe^k0o>͖+99;yNotdi7T'/ZCjtpĘfs|5'dvnsbv!n€] X a)Xa)~, ` adFFadFFadAFdDAFdDAFQdDEFQdDEFQdDCF 1dĐCF 1dĐCF qdđGFqdđGFqdđя~d#GF?2я~d c@21d cD 21Ad "cMi1DaZbc~ZEJ QZB(-Di!J QZ”0)-LiaJ SZ”E(-BiJPZ"E(-JiQJRZҢ(E)-JiQJQZb(-Fi1JQZ8)-NiqJS䊚5/r}ŽsNpA-b`2gdata/R/0000755000175100001440000000000013115327257011557 5ustar hornikusersgdata/R/is.what.R0000644000175100001440000000135013003720415013243 0ustar hornikusersis.what <- function(object, verbose=FALSE) { do.test <- function(test, object) { result <- try(get(test)(object), silent=TRUE) if(!is.logical(result) || is.na(result) || length(result)!=1) result <- NULL return(result) } ## Get all names starting with "is." is.names <- unlist(sapply(search(), function(name) ls(name,pattern="^is\\."))) ## Narrow to functions is.functions <- is.names[sapply(is.names, function(x) is.function(get(x)))] tests <- sort(unique(is.functions[is.functions!="is.what"])) results <- suppressWarnings(unlist(sapply(tests, do.test, object=object))) if(verbose) output <- data.frame(is=ifelse(results,"T",".")) else output <- names(results)[results] return(output) } gdata/R/first.R0000644000175100001440000000055413003720415013022 0ustar hornikusers# Simply call 'first' or 'last' with a different default value for 'n'. first <- function(x, n=1, ...) head(x, n=n, ...) last <- function(x, n=1, ...) tail(x, n=n, ...) "first<-" <- function(x, n=1, ..., value ) { x[1:n] <- value[1:n] x } "last<-" <- function(x, n=1, ..., value ) { index <- seq( length(x)-n+1, length(x) ) x[index] <- value[1:n] x } gdata/R/interleave.R0000644000175100001440000000214613003720415014030 0ustar hornikusers# $Id: interleave.R 789 2005-12-08 20:18:15Z warnes $ interleave <- function(..., append.source=TRUE, sep=": ", drop=FALSE) { sources <- list(...) sources[sapply(sources, is.null)] <- NULL sources <- lapply(sources, function(x) if(is.matrix(x) || is.data.frame(x)) x else t(x) ) nrows <- sapply( sources, nrow ) mrows <- max(nrows) if(any(nrows!=mrows & nrows!=1 )) stop("Arguments have differening numbers of rows.") sources <- lapply(sources, function(x) if(nrow(x)==1) x[rep(1,mrows),,drop=drop] else x ) tmp <- do.call("rbind",sources) nsources <- length(sources) indexes <- outer( ( 0:(nsources-1) ) * mrows , 1:mrows, "+" ) retval <- tmp[indexes,,drop=drop] if(append.source && !is.null(names(sources) )) if(!is.null(row.names(tmp)) ) row.names(retval) <- paste(format(row.names(retval)), format(names(sources)), sep=sep) else row.names(retval) <- rep(names(sources), mrows) retval } gdata/R/mv.R0000644000175100001440000000060213003720414012306 0ustar hornikusersmv <- function(from, to, envir=parent.frame()) { if( !is.character(from) || !exists(from, envir=envir, inherits = TRUE) ) stop("`from` must be a character string specifying the name of an object.") if( !is.character(to) ) stop("`to` must be a characater string.") value <- get(from, envir=envir) assign(x=to, value=value, envir=envir) rm(list=from, envir=envir) }gdata/R/reorder.R0000644000175100001440000000226313003720415013334 0ustar hornikusersreorder.factor <- function(x, X, FUN, ..., order=is.ordered(x), new.order, sort=mixedsort) { constructor <- if (order) ordered else factor if(!missing(X) || !missing(FUN)) { if(missing(FUN)) FUN <- 'mean' ## I would prefer to call stats::reorder.default directly, ## but it exported from stats, so the relevant code is ## replicated here: ## --> scores <- tapply(X = X, INDEX = x, FUN = FUN, ...) levels <- names(base::sort(scores, na.last = TRUE)) if(order) ans <- ordered(x, levels=levels) else ans <- factor(x, levels=levels) attr(ans, "scores") <- scores ## <-- return(ans) } else if (!missing(new.order)) { if (is.numeric(new.order)) new.order <- levels(x)[new.order] else new.order <- new.order } else new.order <- sort(levels(x)) constructor(x, levels=new.order) } gdata/R/combine.R0000755000175100001440000000125113003720415013305 0ustar hornikusers# $Id: combine.R 625 2005-06-09 14:20:30Z nj7w $ combine <- function(..., names=NULL) { tmp <- list(...) if(is.null(names)) names <- names(tmp) if(is.null(names)) names <- sapply( as.list(match.call()), deparse)[-1] if( any( sapply(tmp, is.matrix) | sapply(tmp, is.data.frame) ) ) { len <- sapply(tmp, function(x) c(dim(x),1)[1] ) len[is.null(len)] <- 1 data <- rbind( ... ) } else { len <- sapply(tmp,length) data <- unlist(tmp) } namelist <- factor(rep(names, len), levels=names) return( data.frame( data, source=namelist) ) } gdata/R/read.xls.R0000644000175100001440000000426113003720414013411 0ustar hornikusers## s$Id: read.xls.R 1596 2012-08-22 15:45:22Z warnes $ read.xls <- function(xls, sheet = 1, verbose=FALSE, pattern, na.strings = c("NA","#DIV/0!"), ..., method=c("csv","tsv","tab"), perl="perl") { con <- tfn <- NULL on.exit({ err <- FALSE if (inherits(con, "connection")) { tryCatch(op <- isOpen(con), error = function(x) err <<- TRUE) if (!err && op) close(con) } if (file.exists(tfn)) file.remove(tfn) }) method <- match.arg(method) ## expand file path, translating ~ to user's home directory, etc. xls <- path.expand(xls) ## translate from xls to csv/tsv/tab format (returns name of created file) perl <- if (missing(perl)) findPerl(verbose = verbose) else findPerl(perl, verbose = verbose) con <- xls2sep(xls, sheet, verbose=verbose, ..., method=method, perl = perl) ## While xls2sep returns a connection, we are better off directly ## opening the file, so that R can properly handle the encoding. So, ## just grab the full file path to use later, and close the connection. tfn <- summary(con)$description close(con) if (missing(pattern)) { if(verbose) cat("Reading", method, "file ", dQuote(tfn), "...\n") if(method=="csv") retval <- read.csv(tfn, na.strings=na.strings, ...) else if (method %in% c("tsv","tab") ) retval <- read.delim(tfn, na.strings=na.strings, ...) else stop("Unknown method", method) if(verbose) cat("Done.\n") } else { if(verbose) cat("Searching for lines tfntaining pattern ", pattern, "... ") idx <- grep(pattern, readLines(tfn)) if (length(idx) == 0) { warning("pattern not found") return(NULL) } if(verbose) cat("Done.\n") if(verbose) cat("Reading", method, "file ", dQuote(tfn), "...\n") if(method=="csv") retval <- read.csv(tfn, skip = idx[1]-1, na.strings=na.strings, ...) else if (method %in% c("tsv","tab") ) retval <- read.delim(tfn, skip = idx[1]-1, na.strings=na.strings, ...) else stop("Unknown method", method) if(verbose) cat("Done.\n") } retval } gdata/R/case.R0000644000175100001440000000063713003720415012610 0ustar hornikuserscase <- function(x, ..., default=NA) { magic <- "....default...." alternatives <- c(...,"....default...."=magic) x <- as.character(x) retval <- factor( x, levels=alternatives, labels=names(alternatives) ) levels(retval)[length(alternatives)] <- as.character(default) retval[is.na(retval) & !is.na(x)] <- default retval } gdata/R/xls2sep.R0000644000175100001440000000703113003720415013270 0ustar hornikusers## s$Id: xls2sep.R 1639 2013-01-14 20:47:57Z warnes $ xls2csv <- function(xls, sheet=1, verbose=FALSE, blank.lines.skip=TRUE, ..., perl="perl") xls2sep(xls=xls, sheet=sheet, verbose=verbose, blank.lines.skip=blank.lines.skip, ..., method="csv", perl=perl) xls2tab <- function(xls, sheet=1, verbose=FALSE, blank.lines.skip=TRUE, ..., perl="perl") xls2sep(xls=xls, sheet=sheet, verbose=verbose, blank.lines.skip=blank.lines.skip, ..., method="tab", perl=perl) xls2tsv <- function(xls, sheet=1, verbose=FALSE, blank.lines.skip=TRUE, ..., perl="perl") xls2sep(xls=xls, sheet=sheet, verbose=verbose, blank.lines.skip=blank.lines.skip, ..., method="tsv", perl=perl) xls2sep <- function(xls, sheet=1, verbose=FALSE, blank.lines.skip=TRUE, ..., method=c("csv","tsv","tab"), perl = perl) { method <- match.arg(method) perl <- if (missing(perl)) findPerl(verbose = verbose) else findPerl(perl, verbose = verbose) ## ## directories package.dir <- find.package('gdata') perl.dir <- file.path(package.dir,'perl') ## ## ## filesheet tf <- NULL if ( substring(xls, 1, 7) == "http://" || substring(xls, 1, 6) == "ftp://" ) { tf <- paste(tempfile(), "xls", sep = ".") if(verbose) cat("Downloading", dQuote(xls), " to ", dQuote(tf), "...\n") download.file(xls, tf, mode = "wb") if(verbose) cat("Done.\n") xls <- tf } if(method=="csv") { script <- file.path(perl.dir,'xls2csv.pl') targetFile <- paste(tempfile(), "csv", sep = ".") } else if(method=="tab") { script <- file.path(perl.dir,'xls2tab.pl') targetFile <- paste(tempfile(), "tab", sep = ".") } else if(method=="tsv") { script <- file.path(perl.dir,'xls2tsv.pl') targetFile <- paste(tempfile(), "tsv", sep = ".") } else { stop("Unknown method", method) } ## ## ## ## blank.lines.skip ## if (blank.lines.skip) skipBlank="" else skipBlank="-s" ## ## execution command cmd <- paste(shQuote(perl), shQuote(script), skipBlank, # flag is not quoted shQuote(xls), shQuote(targetFile), shQuote(sheet), sep=" ") ## ## if(verbose) { cat("\n") cat("Converting xls file\n") cat(" ", dQuote(xls), "\n") cat("to", method, " file \n") cat(" ", dQuote(targetFile), "\n") cat("... \n\n") } ## ## do the translation if(verbose) cat("Executing '", cmd, "'... \n\n") results <- try(system(cmd, intern=!verbose)) if(inherits(results, "try-error")) stop( "Unable to read xls file '", xls, "':", results ) if(verbose) cat(results,"\n\n") if (verbose) cat("Done.\n\n") ## ## check that the target file was created ## if(!file.exists(targetFile)) stop( "Intermediate file '", targetFile, "' missing!" ) ## Creae a file object to hand to the next stage.. retval <- try(file(targetFile)) if(inherits(retval, "try-error")) stop("Unable to open intermediate file '", targetFile, "':", retval) return(retval) } gdata/R/duplicated2.R0000644000175100001440000000035713003720414014073 0ustar hornikusersduplicated2 <- function(x, bothWays=TRUE, ...) { if(!bothWays) { return(duplicated(x, ...)) } else if(bothWays) { return((duplicated(x, ...) | duplicated(x, fromLast=TRUE, ...))) } } gdata/R/left.R0000644000175100001440000000157313003720415012627 0ustar hornikusersleft <- function(x, n=6L) UseMethod("left") right <- function(x, n=6L) UseMethod("left") left.data.frame <- function(x, n=6) { stopifnot(length(n) == 1L) n <- if (n < 0L) max(ncol(x) + n, 0L) else min(n, ncol(x)) x[, seq_len(n), drop = FALSE] } left.matrix <- left.data.frame right.data.frame <- function (x, n = 6L, ...) { stopifnot(length(n) == 1L) ncx <- ncol(x) n <- if (n < 0L) max(ncx + n, 0L) else min(n, ncx) x[, seq.int(to = ncx, length.out = n), drop = FALSE] } right.matrix <- function (x, n = 6L, addcolnums = TRUE, ...) { stopifnot(length(n) == 1L) ncx <- ncol(x) n <- if (n < 0L) max(ncx + n, 0L) else min(n, ncx) sel <- seq.int(to = ncx, length.out = n) ans <- x[, sel, drop = FALSE] if (addcolnums && is.null(colnames(x))) colnames(ans) <- paste0("[", sel, ",]") ans } gdata/R/upperTriangle.R0000644000175100001440000000143213003720415014510 0ustar hornikusersupperTriangle <- function(x, diag=FALSE, byrow=FALSE) { if(byrow) t(x)[rev(upper.tri(x, diag=diag))] else x[upper.tri(x, diag=diag)] } "upperTriangle<-" <- function(x, diag=FALSE, byrow=FALSE, value) { if(byrow) { ret <- t(x) ret[rev(upper.tri(x, diag=diag))] <- value t(ret) } else { x[upper.tri(x, diag=diag)] <- value x } } lowerTriangle <- function(x, diag=FALSE, byrow=FALSE) { if(byrow) t(x)[rev(lower.tri(x, diag=diag))] else x[lower.tri(x, diag=diag)] } "lowerTriangle<-" <- function(x, diag=FALSE, byrow=FALSE, value) { if(byrow) { ret <- t(x) ret[rev(lower.tri(x, diag=diag))] <- value t(ret) } else { x[lower.tri(x, diag=diag)] <- value x } } gdata/R/unknown.R0000644000175100001440000001442113003720415013370 0ustar hornikusers### unknown.R ###------------------------------------------------------------------------ ### What: Change given unknown value to NA and vice versa ### $Id: unknown.R 1797 2014-04-05 18:19:49Z warnes $ ### Time-stamp: <2007-04-26 13:16:10 ggorjan> ###------------------------------------------------------------------------ ### {{{ isUnknown ###------------------------------------------------------------------------ isUnknown <- function(x, unknown=NA, ...) UseMethod("isUnknown") isUnknown.default <- function(x, unknown=NA, ...) { if(is.list(unknown)) unknown <- unlist(unknown) ret <- x %in% unknown if(any(is.na(unknown))) ret <- ret | is.na(x) ret } isUnknown.POSIXlt <- function(x, unknown=NA, ...) { ## FIXME: codetools say ## isUnknown.POSIXlt: wrong number of arguments to as.character if(is.list(unknown) && !inherits(x=unknown, what="POSIXlt")) { unknown <- lapply(unknown, FUN=as.character, ...) } else { unknown <- as.character(x=unknown, ...) } if(is.list(x) && !inherits(x=x, what="POSIXlt")) { x <- lapply(x, FUN=as.character, ...) } else { x <- as.character(x=x, ...) } isUnknown.default(x=as.character(x), unknown=as.character(unknown)) } isUnknown.list <- function(x, unknown=NA, ...) { unknown <- .unknownList(x=x, unknown=unknown) x <- mapply(FUN="isUnknown", x=x, unknown=unknown, ..., SIMPLIFY=FALSE) x } isUnknown.data.frame <- function(x, unknown=NA, ...) { x[] <- isUnknown.list(x, unknown=unknown, ...) x } isUnknown.matrix <- function(x, unknown=NA, ...) apply(X=x, MARGIN=ifelse(ncol(x) > nrow(x), 1, 2), FUN=isUnknown, unknown=unknown) ### }}} ### {{{ unknownToNA ###------------------------------------------------------------------------ unknownToNA <- function(x, unknown, warning=FALSE, ...) UseMethod("unknownToNA") unknownToNA.default <- function(x, unknown, warning=FALSE, ...) { if(warning) { if(any(is.na(x))) warning("'x' already has NA") } is.na(x) <- isUnknown(x=x, unknown=unknown) x } unknownToNA.factor <- function(x, unknown, warning=FALSE, ...) { ## could put this func into default method, but I need unlisted unknown ## for levels handling if(warning) { if(any(is.na(x))) warning("'x' already has NA") } if(is.list(unknown)) unknown <- unlist(unknown) ## Levels handling - read help page on this levs <- levels(x) levs <- levs[!(levs %in% unknown)] factor(x, levels=levs) } unknownToNA.list <- function(x, unknown, warning=FALSE, ...) { unknown <- .unknownList(x=x, unknown=unknown) x <- mapply(FUN="unknownToNA", x=x, unknown=unknown, warning=warning, SIMPLIFY=FALSE) return(x) } unknownToNA.data.frame <- function(x, unknown, warning=FALSE, ...) { x[] <- unknownToNA.list(x=x, unknown=unknown, warning=warning) x } ### }}} ### {{{ NAToUnknown ###------------------------------------------------------------------------ NAToUnknown <- function(x, unknown, force=FALSE, call.=FALSE, ...) UseMethod("NAToUnknown") NAToUnknown.default <- function(x, unknown, force=FALSE, call.=FALSE, ...) { if(length(as.character(unknown)) != 1) # as.character allows also POSIXlt stop("'unknown' must be a single value") if(any(isUnknown(x, unknown=unknown)) && !force) stop(sprintf("'x' already has value %s", dQuote(unknown))) classX <- class(x)[1] classUnk <- class(unknown)[1] if(classX != classUnk) { tmp <- c("integer", "numeric") if(!(classX %in% tmp && classUnk %in% tmp)) { warning(sprintf("'unknown' should be %s for %s 'x' - will try to coerce", dQuote(classX), dQuote(classX)), call.=call.) } unknown <- do.call(paste("as.", classX, sep=""), args=list(unknown)) } x[is.na(x)] <- unknown x } NAToUnknown.factor <- function(x, unknown, force=FALSE, call.=FALSE, ...) { if(length(unknown) != 1) stop("'unknown' must be a single value") if(any(isUnknown(x, unknown=unknown))) { if(!force) stop(sprintf("'x' already has level %s", dQuote(unknown))) } else { mapLevels(x) <- c(mapLevels(x, codes=FALSE), mapLevels(as.character(unknown), codes=FALSE)) } x[is.na(x)] <- unknown if(!force) warning(sprintf("new level is introduced: %s", unknown), call.=call.) x } NAToUnknown.list <- function(x, unknown, force=FALSE, call.=FALSE, ...) { unknown <- .unknownList(x=x, unknown=unknown) x <- mapply(FUN="NAToUnknown", x=x, unknown=unknown, force=force, call.=call., SIMPLIFY=FALSE) x } NAToUnknown.data.frame <- function(x, unknown, force=FALSE, call.=FALSE, ...) { x[] <- NAToUnknown.list(x=x, unknown=unknown, force=force, call.=call.) x } ### }}} ### {{{ .unknownList ###------------------------------------------------------------------------ .unknownList <- function(x, unknown) { ## --- Setup --- n <- length(x) unkN <- length(unknown) namesX <- names(x) namesXNullTest <- is.null(namesX) unkNames <- names(unknown) unkNamesNullTest <- is.null(unkNames) defInNames <- ".default" %in% unkNames defInd <- unkNames %in% ".default" def <- unknown[defInd] if(defInNames) { ## Remove default unkN <- unkN - 1 unkNames <- unkNames[!defInd] unknown <- unknown[!defInd] } if(!namesXNullTest) { ## Check for nonexistent name test <- !(unkNames %in% namesX) if(any(test)) stop(sprintf("name(s) %s not in names of 'x'", paste(sQuote(unkNames[test]), collapse=" "))) } ## --- Recycle --- if(unkN < n) { if(unkNamesNullTest | defInNames) { if(defInNames) { # handling .default names(def) <- NULL unknownDef <- rep(def, length=(n - unkN)) names(unknownDef) <- namesX[!(namesX %in% unkNames)] unknown <- c(unknownDef, unknown) } else { unknownDef <- unknown unknown <- rep(unknownDef, length=n) } } else { stop("can not propely recycle named 'unknown'") } } ## --- Names --- if(!namesXNullTest) { ## no need if namesX NULL if(unkNamesNullTest) { ## missing unkNames names(unknown) <- namesX } else { ## unkNames known unknown <- unknown[match(namesX, names(unknown))] } } unknown } ### }}} ### {{{ Dear Emacs ### Local variables: ### folded-file: t ### End: ### }}} ###------------------------------------------------------------------------ ### unknown.R ends here gdata/R/onAttach.R0000644000175100001440000000413413003720414013431 0ustar hornikusers.onAttach <- function(libname, pkgname) { show <- function(...) packageStartupMessage( paste( strwrap(x = list(...), prefix = "gdata: "), collapse="\n",sep="\n" ) ) try( { ## 1 - Can we access perl? hasPerl <- try( findPerl(), silent=TRUE) if(inherits(hasPerl, "try-error")) show( " Unable to locate valid perl interpreter \n \n read.xls() will be unable to read Excel XLS and XLSX files unless the 'perl=' argument is used to specify the location of a valid perl intrpreter. \n \n (To avoid display of this message in the future, please ensure perl is installed and available on the executable search path.) ") formats <- try(xlsFormats(),silent=TRUE) msg <- FALSE ## 2 - Are the libraries for XLS present? if( !("XLS" %in% formats) ) { show( "Unable to load perl libaries needed by read.xls()", " to support 'XLX' (Excel 97-2004) files." ) msg <- TRUE } else { show( "read.xls support for 'XLS' (Excel 97-2004) files ENABLED.") } show("\n") ## 3 - Are the libbaries for XLSX present? if( !("XLSX" %in% formats) ) { show( "Unable to load perl libaries needed by read.xls()", " to support 'XLSX' (Excel 2007+) files." ) msg <- TRUE } else { show( "read.xls support for 'XLSX' (Excel 2007+) files ENABLED." ) } if(msg) { show("\n") show( " Run the function 'installXLSXsupport()'", " to automatically download and install the perl", " libaries needed to support Excel XLS and XLSX formats." ) } }) } gdata/R/ConvertMedUnits.R0000644000175100001440000000340213003720414014756 0ustar hornikusersConvertMedUnits <- function(x, measurement, abbreviation, to=c("Conventional","SI","US"), exact=!missing(abbreviation)) { MedUnits <- NULL ## Define to avoid R CMD check warning data(MedUnits,package='gdata', envir=environment()) to=match.arg(to) if(!missing(measurement) && missing(abbreviation)) { if(exact) matchUnits <- MedUnits[tolower(MedUnits$Measurement)== tolower(measurement),] else matchUnits <- MedUnits[grep(measurement, MedUnits$Measurement, ignore.case=TRUE),] } else if(missing(measurement) && !missing(abbreviation)) { if(exact) matchUnits <- MedUnits[tolower(MedUnits$Abbreviation)== tolower(abbreviation),] else matchUnits <- MedUnits[grep(match, MedUnits$Abbrevation, ignore.case=TRUE),] } else # both missing or both specified stop("One of `measurement' or `abbreviation' must be specified.") if(nrow(matchUnits)>1) stop( paste("More than one matching row. Please use 'exact=TRUE' ", "and supply one of these matching strings:", paste('\t"',matchUnits$Measurement, '"', sep='', collapse="\n\t"), sep="\n\t")) else if (nrow(matchUnits)<1) stop("No match") if (to %in% c("Convetional", "US")) { retval <- x / matchUnits$Conversion attr(retval,"units") <- matchUnits$ConventionalUnits } else { retval <- x * matchUnits$Conversion attr(retval,"units") <- matchUnits$SIUnits } retval } gdata/R/env.R0000644000175100001440000000212213003720414012453 0ustar hornikusersenv <- function(unit="KB", digits=0) { get.object.size <- function(object.name, pos) { object <- get(object.name, pos=pos) size <- try(unclass(object.size(object)), silent=TRUE) if(class(size) == "try-error") size <- 0 return(size) } get.environment.size <- function(pos) { if(search()[pos]=="Autoloads" || length(ls(pos,all.names=TRUE))==0) size <- 0 else size <- sum(sapply(ls(pos,all.names=TRUE), get.object.size, pos=pos)) return(size) } get.environment.nobjects <- function(pos) { nobjects <- length(ls(pos,all.names=TRUE)) return(nobjects) } unit <- match.arg(unit, c("bytes","KB","MB")) denominator <- switch(unit, "KB"=1024, "MB"=1024^2, 1) size.vector <- sapply(seq(along=search()), get.environment.size) size.vector <- round(size.vector/denominator, digits) nobjects.vector <- sapply(seq(along=search()), get.environment.nobjects) env.frame <- data.frame(search(), nobjects.vector, size.vector) names(env.frame) <- c("Environment", "Objects", unit) print(env.frame, right=FALSE) invisible(env.frame) } gdata/R/resample.R0000644000175100001440000000170313003720415013500 0ustar hornikusers## The S/R 'sample' function behaves differently if it is passed a ## sampling vector of length 1 than if it is passed a ## vector of length greater than 1. For the 1-element ## case it samples from the list 1:x, instead of from the contents ## of x. This function remove the special case: it always samples from ## the provided argument, no matter the length. resample <- function(x, size, replace = FALSE, prob = NULL) { if(length(x)<1) if(!missing(size) && size>0) stop("Requested sample of size ", size, " from list of length 0") else x[FALSE] else if(length(x)==1) { if(missing(size) || size==1) x else if(size>=1 && replace==TRUE) rep(x, size) else if(size < 1) x[FALSE] else stop("Cannot cannot take a sample larger than the population", " when 'replace = FALSE'") } else sample(x, size, replace, prob) } gdata/R/bindData.R0000644000175100001440000000241013003720414013371 0ustar hornikusers### bindData.R ###------------------------------------------------------------------------ ### What: Bind two data frames - code ### $Id$ ### Time-stamp: <2008-12-30 22:01:00 ggorjan> ###------------------------------------------------------------------------ bindData <- function(x, y, common) { ## --- Setup --- if(!is.data.frame(x)) stop("'x' must be a data frame") if(!is.data.frame(y)) stop("'y' must be a data frame") ## --- New data frame --- ## First add common column and a dataset indicator column z <- rbind(x[common], y[common]) ## Other columns ## - remove common columns in x and y namesz <- names(z) otherx <- names(x) otherx <- otherx[!(otherx %in% namesz)] othery <- names(y) othery <- othery[!(othery %in% namesz)] ## - add all other columns but as a set for each input data frame rx <- nrow(x); cx <- length(otherx) ry <- nrow(y); cy <- length(othery) z <- cbind(z, rbind(x[otherx], matrix(rep(NA, times=(ry * cx)), nrow=ry, ncol=cx, dimnames=list(NULL, otherx)))) z <- cbind(z, rbind(matrix(rep(NA, times=(rx * cy)), nrow=rx, ncol=cy, dimnames=list(NULL, othery)), y[othery])) z } ###------------------------------------------------------------------------ ### bindData.R ends here gdata/R/drop.levels.R0000755000175100001440000000073113003720415014130 0ustar hornikusers drop.levels <- function(x, reorder=TRUE, ...) UseMethod("drop.levels") drop.levels.default <- function(x, reorder=TRUE, ...) x drop.levels.factor <- function(x, reorder=TRUE, ...) { x <- x[, drop=TRUE] if(reorder) x <- reorder(x, ...) x } drop.levels.list <- function(x, reorder=TRUE, ...) { lapply(x, drop.levels, reorder=reorder, ...) } drop.levels.data.frame <- function(x, reorder=TRUE, ...) { x[] <- drop.levels.list(x, reorder=reorder, ...) x } gdata/R/findPerl.R0000644000175100001440000000140313003720415013430 0ustar hornikusers## s$Id: read.xls.R 1342 2009-07-16 02:49:11Z warnes $ ## findPerl attempts to locate a valid perl executable. If the 'perl' argument is missing, findPerl <- function(perl, verbose = "FALSE") { errorMsg <- "perl executable not found. Use perl= argument to specify the correct path." if (missing(perl)) { perl = "perl" } perl = Sys.which(perl) if (perl=="" || perl=="perl") stop(errorMsg) if (.Platform$OS == "windows") { if (length(grep("rtools", tolower(perl))) > 0) { perl.ftype <- shell("ftype perl", intern = TRUE) if (length(grep("^perl=", perl.ftype)) > 0) { perl <- sub('^perl="([^"]*)".*', "\\1", perl.ftype) } } } if (verbose) cat("Using perl at", perl, "\n") perl } gdata/R/update.list.R0000644000175100001440000000112713115327130014125 0ustar hornikusers## this function updates the elements of list 'object' to contain all of the elements ## of 'new', overwriting elements with the same name, and (optionally) copying unnamed ## elements. update.list <- function(object, new, unnamed=FALSE, ...) { retval <- object for(name in names(new)) retval[[name]] <- new[[name]] if(unnamed) { if(is.null(names(new))) names(new) <- rep("", length=length(new)) for(i in (1:length(new))[names(new)==""] ) retval <- append(retval, new[[i]]) } retval } gdata/R/unmatrix.R0000644000175100001440000000100513003720415013532 0ustar hornikusers# $Id: unmatrix.R 625 2005-06-09 14:20:30Z nj7w $ unmatrix <- function(x, byrow=FALSE) { rnames <- rownames(x) cnames <- colnames(x) if(is.null(rnames)) rnames <- paste("r",1:nrow(x),sep='') if(is.null(cnames)) cnames <- paste("c",1:ncol(x),sep='') nmat <- outer( rnames, cnames, paste, sep=":" ) if(byrow) { vlist <- c(t(x)) names(vlist) <- c(t(nmat)) } else { vlist <- c(x) names(vlist) <- c(nmat) } return(vlist) } gdata/R/write.fwf.R0000644000175100001440000001642113003720415013606 0ustar hornikusers### write.fwf.R ###------------------------------------------------------------------------ ### What: Write fixed width format - code ### $Id: write.fwf.R 1967 2015-04-25 16:24:41Z warnes $ ### Time-stamp: <2008-08-05 12:11:27 ggorjan> ###------------------------------------------------------------------------ write.fwf <- function(x, file="", append=FALSE, quote=FALSE, sep=" ", na="", rownames=FALSE, colnames=TRUE, rowCol=NULL, justify="left", formatInfo=FALSE, quoteInfo=TRUE, width=NULL, eol="\n", qmethod=c("escape", "double"), scientific=TRUE, ...) { ## --- Setup --- dapply <- function(x, FUN, ..., simplify=TRUE) { if(is.data.frame(x)) return(sapply(x, FUN, ..., simplify=simplify)) else if(is.matrix(x)) return(apply(x, 2, FUN, ...)) else stop("x must be a data.frame or a matrix") } if(!(is.data.frame(x) || is.matrix(x))) stop("'x' must be a data.frame or matrix") if(length(na) > 1) stop("only single value can be defined for 'na'") if(!scientific) { option.scipen <- getOption("scipen") on.exit( function() options("scipen"=option.scipen) ) options("scipen"=100) } if(rownames) { x <- as.data.frame(x) x <- cbind(rownames(x), x) rowColVal <- ifelse(!is.null(rowCol), rowCol, "row") colnames(x)[1] <- rowColVal } colnamesMy <- colnames(x) if(length(colnamesMy)==0) colnamesMy <- paste( "V", 1:ncol(x), sep="") nRow <- nrow(x) nCol <- length(colnamesMy) widthNULL <- is.null(width) if(!widthNULL && length(width) != nCol) { warning("recycling 'width'") widthOld <- width width <- integer(length=nCol) width[] <- widthOld } ## --- Format info --- retFormat <- data.frame(colname=colnamesMy, nlevels=0, position=0, width=0, digits=0, exp=0, stringsAsFactors=FALSE) ## Which columns are numeric like isNum <- dapply(x, is.numeric) ## is.numeric picks also Date and POSIXt isNum <- isNum & !(dapply(x, inherits, what="Date") | dapply(x, inherits, what="POSIXt")) ## Which columns are factors --> convert them to character isFac <- dapply(x, is.factor) if(any(isFac)) ## This conditional is necessary because if x is a matrix, even if ## all(isFAC==FALSE), this assignment will coerce it to mode ## character. This isn't a problem for dataframes. x[, isFac] <- sapply(x[, isFac, drop=FALSE], as.character) ## Collect information about how format() will format columns. ## We need to get this info now, since format will turn all columns to character tmp <- dapply(x, format.info, ..., simplify=FALSE) if(is.matrix(x)) tmp <- as.data.frame(tmp) tmp1 <- sapply(tmp, length) tmp <- t(as.data.frame(tmp)) retFormat$width <- tmp[, 1] ## Collect other details for numeric columns if(any(isNum)) { ## Numeric columns with digits test <- tmp1 > 1 if(any(test)) { retFormat[test, c("digits", "exp")] <- tmp[test, c(2, 3)] ## Numeric columns with scientific notation test2 <- tmp[test, 3] > 0 if(any(test2)) ## adding +1; see ?format.info retFormat[test, ][test2, "exp"] <- retFormat[test, ][test2, "exp"] + 1 } } ## --- Format --- ## store original object in 'y' y <- x ## Formatting (to character) for(i in 1:nCol) { if(widthNULL) { tmp <- NULL } else { tmp <- width[i] } ## Due to na.encode bug in format() in 2.7.1; na.encode=TRUE should ## return NA values and not "NA", but even then we rely on the ## following test to "fiddle" with the value in 'na' argument since - ## NA should not increase the width of column with width 1, while wider ## value for 'na' should increase the width test <- is.na(y[, i]) ## Make a copy to make sure we get character after first format() - Date class caused problems x2 <- character(length=nRow) ## Add formatted values x2[!test] <- format(y[!test, i], justify=justify, width=tmp, ...) ## Add 'na' value x2[test] <- na ## Replace the original x[, i] <- x2 ## Collect width (again) tmp2 <- format.info(x2, ...)[1] ## Reformat if 'na' value change the width of the column if(tmp2 != retFormat[i, "width"]) { retFormat[i, "width"] <- tmp2 ## ifelse() makes sure that numeric columns are justified to right x[, i] <- format(x[, i], justify=ifelse(isNum[i], "right", justify), width=tmp, ...) } ## Reformat 'na' value if it is narrower than the width of the column if(nchar(na) < retFormat[i, "width"]) { x[test, i] <- format(na, justify=ifelse(isNum[i], "right", justify), width=retFormat[i, "width"], ...) } } ## Number of levels for "non-numeric"" columns if(any(!isNum)) { retFormat[!isNum, "nlevels"] <- dapply(x[, !isNum, drop=FALSE], function(z) length(unique(z))) } ## Check that width was not to small if(!widthNULL) { test <- retFormat$width > width if(any(test)) { tmpCol <- paste(colnamesMy[test], collapse=", ") tmpWidth <- paste(width[test], collapse=", ") tmpNeed <- paste(retFormat$width[test], collapse=", ") stop(paste("'width' (", tmpWidth, ") was too small for columns: ", tmpCol, "\n 'width' should be at least (", tmpNeed, ")", sep="")) } } ## --- Write --- if(colnames) { if(rownames && is.null(rowCol)) colnamesMy <- colnamesMy[-1] write.table(t(as.matrix(colnamesMy)), file=file, append=append, quote=quote, sep=sep, eol=eol, na=na, row.names=FALSE, col.names=FALSE, qmethod=qmethod) } write.table(x=x, file=file, append=(colnames || append), quote=quote, sep=sep, eol=eol, na=na, row.names=FALSE, col.names=FALSE, qmethod=qmethod) ## --- Return format and fixed width information --- if(formatInfo) { ## be carefull with these ifelse constructs retFormat$position[1] <- ifelse(quote, ifelse(quoteInfo, 1, 2), 1) if(ifelse(quote, quoteInfo, FALSE)) retFormat$width <- retFormat$width + 2 N <- nrow(retFormat) if(N > 1) { for(i in 2:N) { retFormat$position[i] <- retFormat$position[i - 1] + retFormat$width[i - 1] + nchar(x=sep, type="chars") + ifelse(quote, ifelse(quoteInfo, 0, 1), 0) } } if(rownames && is.null(rowCol)) { retFormat <- retFormat[-1,] rownames(retFormat) <- 1:(N-1) } return(retFormat) } } ###------------------------------------------------------------------------ ### write.fwf.R ends here gdata/R/sheetCount.R0000644000175100001440000000326513003720414014015 0ustar hornikuserssheetCount <- function(xls, verbose = FALSE, perl = "perl") { perl <- if (missing(perl)) findPerl(verbose = verbose) else findPerl(perl, verbose = verbose) sheetCmd(xls, cmd="sheetCount.pl", verbose=verbose, perl=perl) } sheetNames <- function(xls, verbose = FALSE, perl = "perl") { perl <- if (missing(perl)) findPerl(verbose = verbose) else findPerl(perl, verbose = verbose) sheetCmd(xls, cmd="sheetNames.pl", verbose=verbose, perl=perl) } sheetCmd <- function(xls, cmd="sheetCount.pl", verbose=FALSE, perl="perl") { ## ## directories package.dir <- find.package('gdata') perl.dir <- file.path(package.dir,'perl') ## ## ## ## files tf <- NULL if ( substring(xls, 1, 7) == "http://" || substring(xls, 1, 6) == "ftp://" ) { tf <- paste(tempfile(), "xls", sep = ".") if(verbose) cat("Downloading", dQuote(xls), " to ", dQuote(tf), "...\n") else cat("Downloading...\n") download.file(xls, tf, mode = "wb") cat("Done.\n") xls <- tf } ## sc <- file.path(perl.dir, cmd) ## ## ## ## execution command cmd <- paste(shQuote(perl), shQuote(sc), shQuote(xls), sep=" ") ## ## ## ## do the translation if(verbose) { cat("\n") cat("Extracting sheet information from\n") cat(" ", dQuote(xls), "\n") cat("... \n\n") } ## output <- system(cmd, intern=TRUE, ignore.stderr=TRUE) if(verbose) cat("Results: ", output, "\n") ## tc <- textConnection(output) results <- read.table(tc, as.is=TRUE, header=FALSE) close(tc) results <- unlist(results) names(results) <- NULL ## if (verbose) cat("Done.\n\n") results } gdata/R/trimSum.R0000644000175100001440000000154613003720415013335 0ustar hornikusers### trimSum.R ###------------------------------------------------------------------------ ### What: Sum trimmed values - code ### $Id$ ### Time-stamp: <2008-12-20 12:11:27 ggorjan> ###------------------------------------------------------------------------ trimSum <- function(x, n, right=TRUE, na.rm=FALSE, ...) { ## --- Setup --- if(!is.vector(x) | is.list(x)) stop("'x' must be a vector - for now") if(!is.numeric(x)) stop("'x' must be numeric") if(length(x) <= n) stop("'n' must be smaller than the length of x") ## --- Trim --- N <- length(x) if(right) { x2 <- x[1:n] x2[n] <- sum(x[n:N], na.rm=na.rm) } else { k <- (N - n + 1) x2 <- x[k:N] x2[1] <- sum(x[1:k], na.rm=na.rm) } ## --- Return --- x2 } ###------------------------------------------------------------------------ ### trimSum.R ends here gdata/R/nobs.R0000644000175100001440000000112513003720415012627 0ustar hornikusers# $Id: nobs.R 1799 2014-04-05 18:38:23Z warnes $ ## Redefine here, so that the locally defined methods (particularly ## nobs.default) take precidence over the ones now defined in the ## stats package nobs <- function(object, ...) UseMethod("nobs") nobs.default <- function(object, ...) { if(is.numeric(object) || is.logical(object)) sum( !is.na(object) ) else stats::nobs(object, ...) } nobs.data.frame <- function(object, ...) sapply(object, nobs.default) ## Now provided by 'stats' package, so provide alias to satisfy ## dependencies nobs.lm <- stats:::nobs.lm gdata/R/installXLSXsupport.R0000644000175100001440000000212013003720415015504 0ustar hornikusers## s$Id: read.xls.R 1423 2010-02-21 17:12:30Z ggrothendieck2 $ installXLSXsupport <- function(perl="perl", verbose=FALSE) { ## determine proper path to perl executable perl <- if (missing(perl)) findPerl(verbose = verbose) else findPerl(perl, verbose = verbose) ## ## directories package.dir <- find.package('gdata') perl.dir <- file.path(package.dir,'perl') ## ## cmd <- "install_modules.pl" sc <- file.path(perl.dir, cmd) ## ## ## ## execution command cmd <- paste(shQuote(perl), shQuote(sc), sep=" ") ## if(verbose) { cat("\n") cat("Attempting to automaticall install Perl libraries to support XLSX (Excel 2007+) file format...\n") cat("\n") } ## output <- system(cmd, intern=TRUE) ## if(verbose) cat("Results: ", output, "\n") ## if( "XLSX" %in% xlsFormats(perl=perl, verbose=verbose) ) { cat("\nPerl XLSX support libraries successfully installed.\n\n") invisible(TRUE) } else { stop("\nUnable to install Perl XLSX support libraries.\n\n") invisible(FALSE) } } gdata/R/getDateTimeParts.R0000644000175100001440000000464713003720414015107 0ustar hornikusers### getDateTimePart.R ###------------------------------------------------------------------------ ### What: Extract date and time parts from various date and time classes ### $Id$ ### Time-stamp: <2008-12-30 22:42:58 ggorjan> ###------------------------------------------------------------------------ ### {{{ getYear ###------------------------------------------------------------------------ getYear <- function(x, format, ...) UseMethod("getYear") getYear.default <- function(x, format, ...) stop("'getYear' can only be used on objects of a date/time class") getYear.Date <- getYear.POSIXct <- getYear.POSIXlt <- function(x, format="%Y", ...) format(x=x, format=format, ...) ### }}} ### {{{ getMonth ###------------------------------------------------------------------------ getMonth <- function(x, format, ...) UseMethod("getMonth") getMonth.default <- function(x, format, ...) stop("'getMonth' can only be used on objects of a date/time class") getMonth.Date <- getMonth.POSIXct <- getMonth.POSIXlt <- function(x, format="%m", ...) format(x=x, format=format) ### }}} ### {{{ getDay ###------------------------------------------------------------------------ getDay <- function(x, format, ...) UseMethod("getDay") getDay.default <- function(x, format, ...) stop("'getDay' can only be used on objects of a date/time class") getDay.Date <- getDay.POSIXct <- getDay.POSIXlt <- function(x, format="%d", ...) format(x=x, format=format) ### }}} ### {{{ getHour ###------------------------------------------------------------------------ getHour <- function(x, format, ...) UseMethod("getHour") getHour.default <- function(x, format, ...) stop("'getHour' can only be used on objects of a date/time class") ### }}} ### {{{ getMin ###------------------------------------------------------------------------ getMin <- function(x, format, ...) UseMethod("getMin") getMin.default <- function(x, format, ...) stop("'getMin' can only be used on objects of a date/time class") ### }}} ### {{{ getSec ###------------------------------------------------------------------------ getSec <- function(x, format, ...) UseMethod("getSec") getSec.default <- function(x, format, ...) stop("'getSec' can only be used on objects of a date/time class") ### }}} ### {{{ Dear Emacs ## Local variables: ## folded-file: t ## End: ### }}} ###------------------------------------------------------------------------ ### getDateTimePart.R ends heregdata/R/xlsFormats.R0000644000175100001440000000152713003720414014035 0ustar hornikusers## s$Id: read.xls.R 1423 2010-02-21 17:12:30Z ggrothendieck2 $ xlsFormats <- function(perl="perl", verbose=FALSE) { ## determine proper path to perl executable perl <- if (missing(perl)) findPerl(verbose = verbose) else findPerl(perl, verbose = verbose) ## ## directories package.dir <- find.package('gdata') perl.dir <- file.path(package.dir,'perl') ## ## cmd <- "supportedFormats.pl" sc <- file.path(perl.dir, cmd) ## ## ## ## execution command cmd <- paste(shQuote(perl), shQuote(sc), sep=" ") ## if(verbose) { cat("\n") cat("Determining supported formats...\n") cat("\n") } ## output <- system(cmd, intern=TRUE) ## if(verbose) cat("Results: ", output, "\n") ## retval <- unlist( strsplit(output," ")) retval <- retval[ -c(1,2) ] return(retval) } gdata/R/frameApply.R0000644000175100001440000000322713003720414013772 0ustar hornikusers# $Id: frameApply.R 625 2005-06-09 14:20:30Z nj7w $ # frameApply <- function(x, by = NULL, on = by[1], fun = function(xi) c(Count = nrow(xi)) , subset = TRUE, simplify = TRUE, byvar.sep = "\\$\\@\\$", ...) { subset <- eval(substitute(subset), x, parent.frame()) x <- x[subset, , drop = FALSE] if(!is.null(by)) { x[by] <- drop.levels(x[by]) for(i in seq(along = by)) if(length(grep(byvar.sep, as.character(x[[by[i]]])))) stop("Choose a different value for byvar.sep.") byvars <- unique(x[by]) BYVAR <- do.call("paste", c(as.list(x[by]), sep = byvar.sep)) byvars <- byvars[order(unique(BYVAR)), , drop = FALSE] splx <- split(x[on], BYVAR) splres <- lapply(splx, fun, ...) if(!simplify) out <- list(by = byvars, result = splres) else { i <- 1 ; nres <- length(splres) while(inherits(splres[[i]], "try-error") & i < nres) i <- i + 1 nms <- names(splres[[i]]) # nms <- lapply(splres, function(xi) { # if(inherits(xi, "try-error")) return(NULL) # else names(xi) # }) # nms <- do.call("rbind", nms)[1, ] splres <- lapply(splres, function(xi) { if(inherits(xi, "try-error")) { return(rep(NA, length(nms))) } else xi }) res <- do.call("rbind", splres) res <- as.data.frame(res) names(res) <- nms if(length(intersect(names(byvars), names(res)))) stop("Names of \"by\" variables are also used as names of result elements. Not allowed.\n") out <- data.frame(byvars, res) } } else { out <- fun(x[on]) if(simplify) out <- as.data.frame(as.list(out)) } out } gdata/R/wideByFactor.R0000644000175100001440000000267713003720415014265 0ustar hornikusers### wideByFactor.R ###------------------------------------------------------------------------ ### What: Reshape by factor levels - code ### $Id$ ### Time-stamp: <2008-12-30 22:17:32 ggorjan> ###------------------------------------------------------------------------ wideByFactor <- function(x, factor, common, sort=TRUE, keepFactor=TRUE) { ## --- Setup --- if(!is.data.frame(x)) stop("'x' must be a data frame") if(length(factor) != 1) stop("'factor' can be only of length one") if(!is.factor(x[[factor]])) stop("column defined in 'factor' must be a factor") if(sort) x <- x[order(x[[factor]]), ] ## --- Extend by factors levels --- y <- x[common] if(keepFactor) y[factor] <- x[factor] levs <- levels(x[[factor]]) ## Remove common and factor from the list of column names other <- names(x) other <- other[!(other %in% common) & !(other %in% factor)] ## Add all other columns but as a set for each level of a factor for(level in levs) { for(col in other) { ## add a column col y[paste(col, level, sep=".")] <- x[col] ## fill with NA for other levels than level y[x[factor] != level, paste(col, level, sep=".")] <- NA ## This filling migth be inefficient if there is large number ## of levels, since there will be quite a lot of filling. } } y } ###------------------------------------------------------------------------ ### wideByFactor.R ends heregdata/R/mapLevels.R0000644000175100001440000002137013003720414013621 0ustar hornikusers### mapLevels.R ###------------------------------------------------------------------------ ### What: Mapping levels ### $Id: mapLevels.R 1991 2015-04-29 03:27:50Z warnes $ ### Time-stamp: <2007-04-26 13:16:18 ggorjan> ###------------------------------------------------------------------------ ### {{{ mapLevels ###------------------------------------------------------------------------ mapLevels <- function(x, codes=TRUE, sort=TRUE, drop=FALSE, combine=FALSE, ...) { UseMethod("mapLevels") } mapLevels.default <- function(x, codes=TRUE, sort=TRUE, drop=FALSE, combine=FALSE, ...) { stop(sprintf("mapLevels can only be used on %s and %s atomic 'x'", dQuote("factor"), dQuote("character"))) } mapLevels.character <- function(x, codes=TRUE, sort=TRUE, drop=FALSE, combine=FALSE, ...) { mapLevels.factor(x=x, codes=codes, sort=sort, drop=drop, ...) } ## Could coerce character to factor and then use factor method, but that ## is more expensive than simple unique and length used bellow in factor ## method mapLevels.factor <- function(x, codes=TRUE, sort=TRUE, drop=FALSE, combine=FALSE, ...) { ## --- Argument actions ---- if(is.factor(x)) { # factor if(drop) x <- factor(x) nlevs <- nlevels(x) levs <- levels(x) } else { # character levs <- unique(x) nlevs <- length(levs) if(sort) levs <- sort(levs, ...) } ## --- Create a map --- map <- vector(mode="list", length=nlevs) names(map) <- levs if(codes) { map[1:nlevs] <- 1:nlevs } else { map[1:nlevs] <- levs } class(map) <- "levelsMap" map } mapLevels.list <- function(x, codes=TRUE, sort=TRUE, drop=FALSE, combine=FALSE, ...) { map <- lapply(x, mapLevels, codes=codes, sort=sort, drop=drop, ...) class(map) <- "listLevelsMap" if(combine) { if(!codes) { return(c(map, sort=sort, recursive=TRUE)) } else { stop(sprintf("can not combine integer %s", dQuote("levelsMaps"))) } } map } mapLevels.data.frame <- function(x, codes=TRUE, sort=TRUE, drop=FALSE, combine=FALSE, ...) { mapLevels.list(x, codes=codes, sort=sort, drop=drop, combine=combine, ...) } ### }}} ### {{{ print.* ###------------------------------------------------------------------------ .unlistLevelsMap <- function(x, ind=FALSE) { y <- unlist(x, use.names=FALSE) len <- sapply(x, FUN=length) names(y) <- rep(names(x), times=len) if(ind) { return(list(y, rep(1:length(x), times=len), len)) } else { return(y) } } print.levelsMap <- function(x, ...) { x <- .unlistLevelsMap(x) print(x, ...) } print.listLevelsMap <- function(x, ...) { class(x) <- "list" print(x, ...) } ### }}} ### {{{ [.* ###------------------------------------------------------------------------ ## We need these two since [.list method drops class "[.levelsMap" <- function(x, i) { classX <- class(x) class(x) <- "list" x <- x[i] class(x) <- classX x } "[.listLevelsMap" <- function(x, i) { classX <- class(x) class(x) <- "list" x <- x[i] class(x) <- classX x } ### }}} ### {{{ is.* ###------------------------------------------------------------------------ is.levelsMap <- function(x) inherits(x=x, what="levelsMap") is.listLevelsMap <- function(x) inherits(x=x, what="listLevelsMap") .isCharacterMap <- function(x) { if(is(x) == "levelsMap") { return(inherits(x=unlist(x), what="character")) } else { stop(sprintf("can be used only on %s", dQuote("levelsMap"))) } } ### }}} ### {{{ as.* ###------------------------------------------------------------------------ as.levelsMap <- function(x, check=TRUE, ...) { if(check) .checkLevelsMap(x, method="raw") class(x) <- "levelsMap" unique(x, ...) } as.listLevelsMap <- function(x, check=TRUE) { if(check) .checkListLevelsMap(x, method="raw") class(x) <- "listLevelsMap" x } ### }}} ### {{{ .check* ###------------------------------------------------------------------------ .checkLevelsMap <- function(x, method) { xLab <- deparse(substitute(x)) also <- "\b" if(method == "class") { also <- "also" if(!is.levelsMap(x)) stop(sprintf("'%s' must be a %s", xLab, dQuote("levelsMap"))) } if(!is.list(x) || is.null(names(x))) stop(sprintf("'%s' must be %s a named list", xLab, also)) ## Components can be of different length ## if(!all(sapply(x, FUN=length) == 1)) ## stop(sprintf("all components of '%s' must have length 1", xLab)) } .checkListLevelsMap <- function(x, method) { xLab <- deparse(substitute(x)) also <- "\b" if(method == "class") { also <- "also" if(!is.listLevelsMap(x)) stop(sprintf("'%s' must be a %s", xLab, dQuote("listLevelsMap"))) } if(!is.list(x) || any(!sapply(x, FUN=is.levelsMap))) stop(sprintf("'%s' must be %s a list of %s", xLab, also, dQuote("levelsMap"))) lapply(x, FUN=.checkLevelsMap, method=method) } ### }}} ### {{{ c.* ###------------------------------------------------------------------------ c.levelsMap <- function(..., sort=TRUE, recursive=FALSE) { x <- list(...) class(x) <- "listLevelsMap" ## we use recursive=TRUE here because ... is a lists of lists c(x, sort=sort, recursive=TRUE) } c.listLevelsMap <- function(..., sort=TRUE, recursive=FALSE) { x <- list(...) lapply(x, FUN=.checkListLevelsMap, method="class") x <- unlist(x, recursive=FALSE) if(!recursive) { class(x) <- "listLevelsMap" } else { if(any(!sapply(x, FUN=.isCharacterMap))) stop(sprintf("can not combine integer %s", dQuote("levelsMaps"))) if(!is.null(names(x))) names(x) <- NULL x <- unlist(x, recursive=FALSE) ## how to merge components with the same name? class(x) <- "levelsMap" if(sort) x <- sort(x) x <- unique(x) } x } ### }}} ### {{{ sort ###------------------------------------------------------------------------ sort.levelsMap <- function(x, decreasing=FALSE, na.last=TRUE, ...) x[order(names(x), na.last=na.last, decreasing=decreasing)] ### }}} ### {{{ unique ###------------------------------------------------------------------------ unique.levelsMap <- function(x, incomparables=FALSE, ...) { ## Find duplicates y <- .unlistLevelsMap(x, ind=TRUE) ## Duplicates for values and names combinations test <- duplicated(cbind(y[[1]], names(y[[1]])), incomparables=incomparables, ...) if(any(test)) { if(any(y[[3]] > 1)) { # work with the same structure as in x j <- 1 k <- y[[3]][1] empty <- NULL for(i in seq(along=x)) { # how slow is this loop? tmp <- !test[j:k] if(all(!tmp)) { # these components will be empty empty <- c(empty, i) } else { x[[i]] <- x[[i]][tmp] } j <- j + y[[3]][i] k <- k + y[[3]][i + 1] } if(!is.null(empty)) x[empty] <- NULL } else { # simple one-length components x <- x[!test] } } x } ### }}} ### {{{ mapLevels<- ###------------------------------------------------------------------------ "mapLevels<-" <- function(x, value) UseMethod("mapLevels<-") "mapLevels<-.default" <- function(x, value) { ## --- Checks --- classX <- c("integer", "character", "factor") if(any(!(class(x) %in% classX))) stop(sprintf("'x' must be either: %s", paste(dQuote(classX), collapse=", "))) .checkLevelsMap(x=value, method="class") ## --- Mapping levels in x --- char <- all(sapply(value, is.character)) int <- all(sapply(value, is.integer)) if(int) { # codes=TRUE if(is.integer(x)) x <- factor(x) if(is.factor(x)) levels(x) <- value if(is.character(x)) stop(sprintf("can not apply integer %s to %s", dQuote("levelsMap"), dQuote("character"))) } else { # codes=FALSE if(!char) stop("all components of 'value' must be of the same class") if(is.character(x)) x <- factor(x) if(is.factor(x)) levels(x) <- value if(is.integer(x)) stop(sprintf("can not apply character %s to %s", dQuote("levelsMap"), dQuote("integer"))) } x } "mapLevels<-.list" <- function(x, value) { if(!is.listLevelsMap(value)) { if(is.levelsMap(value)) { value <- as.listLevelsMap(list(value), check=FALSE) ## no need for check as default method does checking anyway } else { stop(sprintf("'x' must be either %s or %s", dQuote("listLevelsMap"), dQuote("levelsMap"))) } } x <- mapply(FUN="mapLevels<-", x=x, value=value, SIMPLIFY=FALSE) x } "mapLevels<-.data.frame" <- function(x, value) { x[] <- "mapLevels<-.list"(x, value) x } ### }}} ### {{{ Dear Emacs ## Local variables: ## folded-file: t ## End: ### }}} ###------------------------------------------------------------------------ ### mapLevels.R ends here gdata/R/trim.R0000644000175100001440000000140113111636156012646 0ustar hornikuserstrim <- function(s, recode.factor=TRUE, ...) UseMethod("trim", s) trim.default <- function(s, recode.factor=TRUE, ...) s trim.character <- function(s, recode.factor=TRUE, ...) { s <- sub(pattern="^[[:blank:]]+", replacement="", x=s) s <- sub(pattern="[[:blank:]]+$", replacement="", x=s) s } trim.factor <- function(s, recode.factor=TRUE, ...) { levels(s) <- trim(levels(s)) if(recode.factor) { dots <- list(x=s, ...) if(is.null(dots$sort)) dots$sort <- sort s <- do.call(what="reorder.factor", args=dots) } s } trim.list <- function(s, recode.factor=TRUE, ...) lapply(s, trim, recode.factor=recode.factor, ...) trim.data.frame <- function(s, recode.factor=TRUE, ...) { s[] <- trim.list(s, recode.factor=recode.factor, ...) s } gdata/R/ls.funs.R0000644000175100001440000000052313003720415013257 0ustar hornikusersls.funs <- function (...) { mycall <- match.call() mycall[[1]] <- as.name("ls") nameList <- eval.parent(mycall) if(length(nameList)>0) { funcFlags <- sapply( nameList, function(x) is.function(get(x)) ) return(nameList[funcFlags]) } else return( list() ) } gdata/R/keep.R0000644000175100001440000000132313003720415012612 0ustar hornikuserskeep <- function(..., list=character(0), all=FALSE, sure=FALSE) { if(missing(...) && missing(list)) { warning("keep something, or use rm(list=ls()) to clear workspace - ", "nothing was removed") return(invisible(NULL)) } names <- as.character(substitute(list(...)))[-1] list <- c(list, names) keep.elements <- match(list, ls(1,all.names=all)) if(any(is.na(keep.elements))) { warning("you tried to keep \"", list[which(is.na(keep.elements))[1]], "\" which doesn't exist in workspace - nothing was removed", sep="") return(invisible(NULL)) } if(sure) rm(list=ls(1,all.names=all)[-keep.elements], pos=1) else return(ls(1,all.names=all)[-keep.elements]) } gdata/R/centerText.R0000644000175100001440000000104013003720415014007 0ustar hornikusers## Function to center text strings for display on the text console ## by prepending the necessary number of spaces to each element. centerText <- function(x, width=getOption("width")) { retval <- vector(length=length(x), mode="character") for( i in 1:length(x) ) { text <- trim(x[i]) textWidth <- nchar(text) nspaces <- floor((width - textWidth)/2) spaces <- paste( rep(" ",nspaces), sep="", collapse="") retval[i] <- paste( spaces, text, sep="", collapse="\n" ) } retval } gdata/R/ll.R0000644000175100001440000000531513003720415012302 0ustar hornikusersll <- function(pos=1, unit="KB", digits=0, dim=FALSE, sort=FALSE, class=NULL, invert=FALSE, ...) { get.object.class <- function(object.name, pos) { object <- get(object.name, pos=pos) class <- class(object)[1] return(class) } get.object.dim <- function(object.name, pos) { object <- get(object.name, pos=pos) if(class(object)[1] == "function") dim <- "" else if(!is.null(dim(object))) dim <- paste(dim(object), collapse=" x ") else dim <- length(object) return(dim) } get.object.size <- function(object.name, pos) { object <- get(object.name, pos=pos) size <- try(unclass(object.size(object)), silent=TRUE) if(class(size) == "try-error") size <- 0 return(size) } ## 1 Set unit, denominator, original.rank unit <- match.arg(unit, c("bytes","KB","MB")) denominator <- switch(unit, "KB"=1024, "MB"=1024^2, 1) original.rank <- NULL ## 2 Detect what 'pos' is like, then get class, size, dim if(is.character(pos)) # pos is an environment name pos <- match(pos, search()) if(is.list(pos)) # pos is a list-like object { if(is.null(names(pos))) stop("All elements of a list must be named") original.rank <- rank(names(pos)) pos <- as.environment(pos) } if(length(ls(pos,...)) == 0) # pos is an empty environment { object.frame <- data.frame() } else if(environmentName(as.environment(pos)) == "Autoloads") { object.frame <- data.frame(rep("function",length(ls(pos,...))), rep(0,length(ls(pos,...))), row.names=ls(pos,...)) if(dim) { object.frame <- cbind(object.frame, rep("",nrow(object.frame))) names(object.frame) <- c("Class", unit, "Dim") } else names(object.frame) <- c("Class", unit) } else { class.vector <- sapply(ls(pos,...), get.object.class, pos=pos) size.vector <- sapply(ls(pos,...), get.object.size, pos=pos) size.vector <- round(size.vector/denominator, digits) object.frame <- data.frame(class.vector=class.vector, size.vector=size.vector, row.names=names(size.vector)) names(object.frame) <- c("Class", unit) if(dim) object.frame <- cbind(object.frame, Dim=sapply(ls(pos,...),get.object.dim,pos=pos)) } ## 3 Retain original order of list elements if(!sort && !is.null(original.rank)) object.frame <- object.frame[original.rank,] ## 4 Filter results given class if(!is.null(class)) { include <- object.frame$Class %in% class if(invert) include <- !include object.frame <- object.frame[include,] } return(object.frame) } gdata/R/elem.R0000644000175100001440000000037613003720415012617 0ustar hornikusers# $Id: elem.R 625 2005-06-09 14:20:30Z nj7w $ elem <- function(object=1, unit=c("KB","MB","bytes"), digits=0, dimensions=FALSE) { .Deprecated("ll", package="gdata") ll(pos=object, unit=unit, digits=digits, dimensions=dimensions) } gdata/R/cbindX.R0000644000175100001440000000242113003720415013075 0ustar hornikusers### cbindX.R ###------------------------------------------------------------------------ ### What: Column-bind objects with different number of rows - code ### $Id: cbindX.R 1300 2008-08-05 11:47:18Z ggorjan $ ### Time-stamp: <2008-08-05 13:39:14 ggorjan> ###------------------------------------------------------------------------ cbindX <- function(...) { ## --- Setup --- x <- list(...) ## Are all objects matrices or data.frames? test <- sapply(x, function(z) is.matrix(z) | is.data.frame(z)) if(any(!test)) stop("only matrices and data.frames can be used") ## Get maximum number of rows tmp <- sapply(x, nrow) maxi <- which.max(tmp) test <- tmp < tmp[maxi] ## --- Core --- ## Adding additional "empty" rows so that all objects have the same number of rows for(i in 1:length(tmp)) { if(test[i]) { add <- matrix(nrow=tmp[maxi] - tmp[i], ncol=ncol(x[[i]])) if(is.data.frame(x[[i]])) { add <- as.data.frame(add) } colnames(add) <- colnames(x[[i]]) x[[i]] <- rbind(x[[i]], add) } } ## Column-bind all objects ret <- x[[1]] for(i in 2:length(tmp)) { ret <- cbind(ret, x[[i]]) } ## --- Return --- ret } ###------------------------------------------------------------------------ ### cbindX.R ends here gdata/R/rename.vars.R0000644000175100001440000000257613003720414014121 0ustar hornikusers# $Id: rename.vars.R 2072 2016-02-03 20:00:57Z warnes $ rename.vars <- function(data,from='',to='',info=TRUE) { dsn <- deparse(substitute(data)) dfn <- names(data) if ( length(from) != length(to)) { cat('--------- from and to not same length ---------\n') stop() } if (length(dfn) < length(to)) { cat('--------- too many new names ---------\n') stop() } chng <- match(from,dfn) frm.in <- from %in% dfn if (!all(frm.in) ) { cat('---------- some of the from names not found in',dsn,'\n') stop() } if (length(to) != length(unique(to))) { cat('---------- New names not unique\n') stop() } dfn.new <- dfn dfn.new[chng] <- to if (info) cat('\nChanging in',dsn) tmp <- rbind(from,to) dimnames(tmp)[[1]] <- c('From:','To:') dimnames(tmp)[[2]] <- rep('',length(from)) if (info) { print(tmp,quote=FALSE) cat("\n") } names(data) <- dfn.new data } remove.vars <- function( data, names, info=TRUE) { dsn <- deparse(substitute(data)) if (info) cat('\nChanging in',dsn, "\n") flag <- names %in% colnames(data) if(any(!flag)) warning("Variable(s) not found: ", paste(names[!flag], collapse=", ") ) if(any(flag)) { if(info) cat("Dropping variables:", paste(names[flag], collapse=", "), "\n\n") for(var in names[flag]) data[[var]] <- NULL } data }gdata/R/humanReadable.R0000644000175100001440000000471713003720415014430 0ustar hornikusershumanReadable <- function(x, units="auto", standard=c("IEC", "SI", "Unix"), digits=1, width=NULL, sep=" ", justify = c("right", "left") ) { ## --- Setup --- suffix.SI <- c("B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") suffix.IEC <- c("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB") suffix.Unix <- c("B" , "K", "M", "G", "T", "P", "E", "Z", "Y") standard <- match.arg(standard) if(length(justify)==1) justify <- c(justify, justify) ## --- Functions --- .applyHuman <- function(x, base, suffix, digits, width, sep) { ## Which suffix should we use? n <- length(suffix) i <- pmax(pmin(floor(log(x, base)), n-1),0) if(!is.finite(i)) i <- 0 x <- x / base^i ## Formatting if(is.null(width)) ## the same formatting for all x <- format(round(x=x, digits=digits), nsmall=digits) else { ## similar to ls, du, and df lenX <- nchar(x) if(lenX > width) { digits <- pmax( width - nchar(round(x)) - 1, 0) } if(i == 0) digits <- 0 x <- round(x, digits=digits) } c(x, suffix[i+1]) } ## -- Work if(any(x < 0)) stop("'x' must be positive") if(standard == "SI") { suffix <- suffix.SI base <- 10^3 } else if (standard=="IEC") { suffix <- suffix.IEC base <- 2^10 } else # (standard=="Unix) { suffix <- suffix.Unix base <- 2^10 } if(!missing(units) && units=="bytes") { retval <- rbind(x, "bytes") } else if(!missing(units) && units!="auto") { units <- suffix[match( toupper(units), toupper(suffix) )] power <- match(units, suffix ) -1 X <- x/(base^power) X <- format.default(x=X, digits=digits, nsmall=digits) retval <- rbind(X, rep(units, length(X))) } else retval <- sapply(X=x, FUN=".applyHuman", base=base, suffix=suffix, digits=digits, width=width, sep=sep) if(all(justify == "none")) paste(trim(retval[1,]), trim(retval[2,]), sep=sep) else paste(format(trim(retval[1,]), justify=justify[1]), format(trim(retval[2,]), justify=justify[2]), sep=sep) } gdata/R/matchcols.R0000644000175100001440000000157213003720415013651 0ustar hornikusers# $Id: matchcols.R 625 2005-06-09 14:20:30Z nj7w $ # select the columns which match/don't match a set of include/omit patterns. matchcols <- function(object, with, without, method=c("and","or"), ...) { method <- match.arg(method) cols <- colnames(object) # include columns matching 'with' pattern(s) if(method=="and") for(i in 1:length(with)) { if(length(cols)>0) cols <- grep(with[i], cols, value=TRUE, ...) } else if(!missing(with)) if(length(cols)>0) cols <- sapply( with, grep, x=cols, value=TRUE, ...) # exclude columns matching 'without' pattern(s) if(!missing(without)) for(i in 1:length(without)) if(length(cols)>0) { omit <- grep(without[i], cols, ...) if(length(omit)>0) cols <- cols[-omit] } cols } gdata/R/nPairs.R0000644000175100001440000000307713003720414013131 0ustar hornikusers### nPairs.R ###------------------------------------------------------------------------ ### What: Number of variable pairs - code ### $Id$ ### Time-stamp: <2008-12-30 18:29:58 ggorjan> ###------------------------------------------------------------------------ nPairs <- function(x, margin=FALSE, names=TRUE, abbrev=TRUE, ...) { ## --- Setup --- if(!is.data.frame(x) & !is.matrix(x)) stop("'x' must be a data.frame or a matrix") k <- ncol(x) if(!margin) { ret <- matrix(nrow=k, ncol=k) } else { ret <- matrix(nrow=k, ncol=k + 1) } ## --- Count --- diag(ret)[1:k] <- apply(X=x, MARGIN=2, FUN=function(x) sum(!is.na(x))) for(i in 1:k) { for(j in i:k) { ret[i, j] <- ret[j, i] <- sum(!is.na(x[, i]) & !is.na(x[, j])) if(margin) { if(i == 1) { ret[i, (k + 1)] <- ret[1, 1] } else { ret[i, (k + 1)] <- sum(rowSums(!is.na(x[, c(1:i)])) == i) } } } } ## --- Names --- if(names) { tmp <- colnames(x) if(abbrev) tmp <- as.character(abbreviate(tmp, ...)) rownames(ret) <- tmp if(margin) { colnames(ret) <- c(tmp, "all") } else { colnames(ret) <- tmp } } class(ret) <- c("nPairs", class(ret)) ret } summary.nPairs <- function(object, ...) { n <- nrow(object) ret <- matrix(data=0, nrow=n, ncol=n) for(i in 1:n) { tmp <- 1:n tmp <- tmp[!(tmp == i)] ret[i, tmp] <- object[i, i] - object[i, tmp] } dimnames(ret) <- dimnames(object) ret } ###------------------------------------------------------------------------ ### nPairs.R ends here gdata/R/update.data.frame.R0000644000175100001440000000301113115324412015146 0ustar hornikusers# # This function replace rows in 'object' by corresponding rows in 'new' that have # # the same value for 'by' # update.data.frame <- function(object, # new, # by, # by.object=by, # by.new=by, # append=TRUE, # verbose=TRUE, # ...) # { # retval <- object # # if(length(by.object)>1) # stop("'by.object' can specify at most one column") # # if(length(by.new)>1) # stop("'by.new' can specify at most one column") # # # object.by <- object[[by.object]] # new.by <- new [[by.new ]] # # matches.object <- match(new.by, object.by) # matches.new <- which(!is.na(matches.object)) # nomatch.new <- which(is.na(matches.object)) # matches.object <- matches.object[!is.na(matches.object)] # # if(length(matches.object)>0) # retval[matches.object, ] <- new[matches.new,] # # if(length(nomatch.new) && append) # retval <- rbind(retval, new[nomatch.new,]) # # if(verbose) # { # cat("\n") # cat("Number of rows in object:", nrow(object), "\n") # cat("Number of rows in new :", nrow(new), "\n") # cat("\n") # cat("Number of rows replaced :", length(matches.object), "\n") # cat("Number of rows appended :", length(nomatch.new), "\n") # cat("\n") # cat("Number of rows in result:", nrow(retval), "\n") # cat("\n") # } # retval # } gdata/R/startsWith.R0000644000175100001440000000045013115325605014050 0ustar hornikusersstartsWith <- function(str, pattern, trim=FALSE, ignore.case=FALSE) { if(trim) str <- trim(str) if(ignore.case) { str <- toupper(str) pattern <- toupper(pattern) } #substr(str,start=1,stop=nchar(pattern))==pattern base::startsWith(str, pattern) } gdata/R/object.size.R0000644000175100001440000000542013003720415014107 0ustar hornikusers###------------------------------------------------------------------------ ### What: Print object size in human readable format - code ###------------------------------------------------------------------------ object.size <- function(...) { structure(sapply(list(...), utils::object.size), class=c("object_sizes", "numeric")) } print.object_sizes <- function(x, quote=FALSE, humanReadable=getOption("humanReadable"), standard="IEC", units, digits=1, width=NULL, sep=" ", justify = c("right", "left"), ...) { print(format(x, humanReadable=humanReadable, standard=standard, units=units, digits=digits, width=width, sep=sep, justify=justify), quote=quote, ...) invisible(x) } format.object_sizes <- function(x, humanReadable=getOption("humanReadable"), standard="IEC", units, digits=1, width=NULL, sep=" ", justify = c("right", "left"), ...) { if( !missing(units) ) { if (units=="bytes") paste(x, "bytes") else humanReadable(x, standard=standard, units=units, digits=digits, width=width, sep=sep, justify=justify ) } else if( is.null(humanReadable) || humanReadable==FALSE ) paste(x, "bytes") else humanReadable(x, standard=standard, units=units, digits=digits, width=width, sep=sep, justify=justify) } is.object_sizes <- function(x) inherits(x, what="object_sizes") as.object_sizes <- function(x) { if(!is.numeric(x) || any(x<0)) stop("'x' must be a positive numeric vector") class(x) <- c("object_sizes", "numeric") x } c.object_sizes <- function(..., recursive=FALSE) { x <- NextMethod() if(is.numeric(x)) class(x) <- c("object_sizes", "numeric") x } ###------------------------------------------------------------------------ ### object.size.R ends here gdata/R/Args.R0000644000175100001440000000121113003720414012555 0ustar hornikusersArgs <- function(name, sort=FALSE) { a <- formals(get(as.character(substitute(name)), pos=1)) if(is.null(a)) return(NULL) arg.labels <- names(a) arg.values <- as.character(a) char <- sapply(a, is.character) arg.values[char] <- paste("\"", arg.values[char], "\"", sep="") if(sort) { ord <- order(arg.labels) if(any(arg.labels == "...")) ord <- c(ord[-which(arg.labels[ord]=="...")], which(arg.labels=="...")) arg.labels <- arg.labels[ord] arg.values <- arg.values[ord] } output <- data.frame(value=I(arg.values), row.names=arg.labels) print(output, right=FALSE) invisible(output) } gdata/R/ans.R0000644000175100001440000000003613003720414012446 0ustar hornikusersans <- function() .Last.value gdata/vignettes/0000755000175100001440000000000013115346316013363 5ustar hornikusersgdata/vignettes/unknown.Rnw0000644000175100001440000002336513003720415015553 0ustar hornikusers %\VignetteIndexEntry{Working with Unknown Values} %\VignettePackage{gdata} %\VignetteKeywords{unknown, missing, manip} \documentclass[a4paper]{report} \usepackage{Rnews} \usepackage[round]{natbib} \bibliographystyle{abbrvnat} \usepackage{Sweave} \SweaveOpts{strip.white=all, keep.source=TRUE} \begin{document} \begin{article} \title{Working with Unknown Values} \subtitle{The \pkg{gdata} package} \author{by Gregor Gorjanc} \maketitle This vignette has been published as \cite{Gorjanc}. \section{Introduction} Unknown or missing values can be represented in various ways. For example SAS uses \code{.}~(dot), while \R{} uses \code{NA}, which we can read as Not Available. When we import data into \R{}, say via \code{read.table} or its derivatives, conversion of blank fields to \code{NA} (according to \code{read.table} help) is done for \code{logical}, \code{integer}, \code{numeric} and \code{complex} classes. Additionally, the \code{na.strings} argument can be used to specify values that should also be converted to \code{NA}. Inversely, there is an argument \code{na} in \code{write.table} and its derivatives to define value that will replace \code{NA} in exported data. There are also other ways to import/export data into \R{} as described in the {\emph R Data Import/Export} manual \citep{RImportExportManual}. However, all approaches lack the possibility to define unknown value(s) for some particular column. It is possible that an unknown value in one column is a valid value in another column. For example, I have seen many datasets where values such as 0, -9, 999 and specific dates are used as column specific unknown values. This note describes a set of functions in package \pkg{gdata}\footnote{ package version 2.3.1} \citep{WarnesGdata}: \code{isUnknown}, \code{unknownToNA} and \code{NAToUnknown}, which can help with testing for unknown values and conversions between unknown values and \code{NA}. All three functions are generic (S3) and were tested (at the time of writing) to work with: \code{integer}, \code{numeric}, \code{character}, \code{factor}, \code{Date}, \code{POSIXct}, \code{POSIXlt}, \code{list}, \code{data.frame} and \code{matrix} classes. \section{Description with examples} The following examples show simple usage of these functions on \code{numeric} and \code{factor} classes, where value \code{0} (beside \code{NA}) should be treated as an unknown value: <>= library("gdata") xNum <- c(0, 6, 0, 7, 8, 9, NA) isUnknown(x=xNum) @ The default unknown value in \code{isUnknown} is \code{NA}, which means that output is the same as \code{is.na} --- at least for atomic classes. However, we can pass the argument \code{unknown} to define which values should be treated as unknown: <>= isUnknown(x=xNum, unknown=0) @ This skipped \code{NA}, but we can get the expected answer after appropriately adding \code{NA} into the argument \code{unknown}: <>= isUnknown(x=xNum, unknown=c(0, NA)) @ Now, we can change all unknown values to \code{NA} with \code{unknownToNA}. There is clearly no need to add \code{NA} here. This step is very handy after importing data from an external source, where many different unknown values might be used. Argument \code{warning=TRUE} can be used, if there is a need to be warned about ``original'' \code{NA}s: <>= (xNum2 <- unknownToNA(x=xNum, unknown=0)) @ Prior to export from \R{}, we might want to change unknown values (\code{NA} in \R{}) to some other value. Function \code{NAToUnknown} can be used for this: <>= NAToUnknown(x=xNum2, unknown=999) @ Converting \code{NA} to a value that already exists in \code{x} issues an error, but \code{force=TRUE} can be used to overcome this if needed. But be warned that there is no way back from this step: <>= NAToUnknown(x=xNum2, unknown=7, force=TRUE) @ Examples below show all peculiarities with class \code{factor}. \code{unknownToNA} removes \code{unknown} value from levels and inversely \code{NAToUnknown} adds it with a warning. Additionally, \code{"NA"} is properly distinguished from \code{NA}. It can also be seen that the argument \code{unknown} in functions \code{isUnknown} and \code{unknownToNA} need not match the class of \code{x} (otherwise factor should be used) as the test is internally done with \code{\%in\%}, which nicely resolves coercing issues. <>= (xFac <- factor(c(0, "BA", "RA", "BA", NA, "NA"))) isUnknown(x=xFac) isUnknown(x=xFac, unknown=0) isUnknown(x=xFac, unknown=c(0, NA)) isUnknown(x=xFac, unknown=c(0, "NA")) isUnknown(x=xFac, unknown=c(0, "NA", NA)) (xFac <- unknownToNA(x=xFac, unknown=0)) (xFac <- NAToUnknown(x=xFac, unknown=0)) @ These two examples with classes \code{numeric} and \code{factor} are fairly simple and we could get the same results with one or two lines of \R{} code. The real benefit of the set of functions presented here is in \code{list} and \code{data.frame} methods, where \code{data.frame} methods are merely wrappers for \code{list} methods. We need additional flexibility for \code{list}/\code{data.frame} methods, due to possibly having multiple unknown values that can be different among \code{list} components or \code{data.frame} columns. For these two methods, the argument \code{unknown} can be either a \code{vector} or \code{list}, both possibly named. Of course, greater flexibility (defining multiple unknown values per component/column) can be achieved with a \code{list}. When a \code{vector}/\code{list} object passed to the argument \code{unknown} is not named, the first value/component of a \code{vector}/\code{list} matches the first component/column of a \code{list}/\code{data.frame}. This can be quite error prone, especially with \code{vectors}. Therefore, I encourage the use of a \code{list}. In case \code{vector}/\code{list} passed to argument \code{unknown} is named, names are matched to names of \code{list} or \code{data.frame}. If lengths of \code{unknown} and \code{list} or \code{data.frame} do not match, recycling occurs. The example below illustrates the application of the described functions to a list which is composed of previously defined and modified numeric (\code{xNum}) and factor (\code{xFac}) classes. First, function \code{isUnknown} is used with \code{0} as an unknown value. Note that we get \code{FALSE} for \code{NA}s as has been the case in the first example. <>= (xList <- list(a=xNum, b=xFac)) isUnknown(x=xList, unknown=0) @ We need to add \code{NA} as an unknown value. However, we do not get the expected result this way! <>= isUnknown(x=xList, unknown=c(0, NA)) @ This is due to matching of values in the argument \code{unknown} and components in a \code{list}; i.e., \code{0} is used for component \code{a} and \code{NA} for component \code{b}. Therefore, it is less error prone and more flexible to pass a \code{list} (preferably a named list) to the argument \code{unknown}, as shown below. <>= (xList1 <- unknownToNA(x=xList, unknown=list(b=c(0, "NA"), a=0))) @ Changing \code{NA}s to some other value (only one per component/column) can be accomplished as follows: <>= NAToUnknown(x=xList1, unknown=list(b="no", a=0)) @ A named component \code{.default} of a \code{list} passed to argument \code{unknown} has a special meaning as it will match a component/column with that name and any other not defined in \code{unknown}. As such it is very useful if the number of components/columns with the same unknown value(s) is large. Consider a wide \code{data.frame} named \code{df}. Now \code{.default} can be used to define unknown value for several columns: <>= df <- data.frame(col1=c(0, 1, 999, 2), col2=c("a", "b", "c", "unknown"), col3=c(0, 1, 2, 3), col4=c(0, 1, 2, 2)) @ <>= tmp <- list(.default=0, col1=999, col2="unknown") (df2 <- unknownToNA(x=df, unknown=tmp)) @ If there is a need to work only on some components/columns you can of course ``skip'' columns with standard \R{} mechanisms, i.e., by subsetting \code{list} or \code{data.frame} objects: <>= df2 <- df cols <- c("col1", "col2") tmp <- list(col1=999, col2="unknown") df2[, cols] <- unknownToNA(x=df[, cols], unknown=tmp) df2 @ \section{Summary} Functions \code{isUnknown}, \code{unknownToNA} and \code{NAToUnknown} provide a useful interface to work with various representations of unknown/missing values. Their use is meant primarily for shaping the data after importing to or before exporting from \R{}. I welcome any comments or suggestions. % \bibliography{refs} \begin{thebibliography}{1} \providecommand{\natexlab}[1]{#1} \providecommand{\url}[1]{\texttt{#1}} \expandafter\ifx\csname urlstyle\endcsname\relax \providecommand{\doi}[1]{doi: #1}\else \providecommand{\doi}{doi: \begingroup \urlstyle{rm}\Url}\fi \bibitem[Gorjanc(2007)]{Gorjanc} G.~Gorjanc. \newblock Working with unknown values: the gdata package. \newblock \emph{R News}, 7\penalty0 (1):\penalty0 24--26, 2007. \newblock URL \url{http://CRAN.R-project.org/doc/Rnews/Rnews_2007-1.pdf}. \bibitem[{R Development Core Team}(2006)]{RImportExportManual} {R Development Core Team}. \newblock \emph{R Data Import/Export}, 2006. \newblock URL \url{http://cran.r-project.org/manuals.html}. \newblock ISBN 3-900051-10-0. \bibitem[Warnes (2006)]{WarnesGdata} G.~R. Warnes. \newblock \emph{gdata: Various R programming tools for data manipulation}, 2006. \newblock URL \url{http://cran.r-project.org/src/contrib/Descriptions/gdata.html}. \newblock R package version 2.3.1. Includes R source code and/or documentation contributed by Ben Bolker, Gregor Gorjanc and Thomas Lumley. \end{thebibliography} \address{Gregor Gorjanc\\ University of Ljubljana, Slovenia\\ \email{gregor.gorjanc@bfro.uni-lj.si}} \end{article} \end{document} gdata/vignettes/Rnews.sty0000644000175100001440000001556413003720415015225 0ustar hornikusers%% %% This is file `Rnews.sty', %% generated with the docstrip utility. %% %% The original source files were: %% %% Rnews.dtx (with options: `package') %% %% IMPORTANT NOTICE: %% %% For the copyright see the source file. %% %% Any modified versions of this file must be renamed %% with new filenames distinct from Rnews.sty. %% %% For distribution of the original source see the terms %% for copying and modification in the file Rnews.dtx. %% %% This generated file may be distributed as long as the %% original source files, as listed above, are part of the %% same distribution. (The sources need not necessarily be %% in the same archive or directory.) \def\fileversion{v0.3.6} \def\filename{Rnews} \def\filedate{2002/06/02} \def\docdate {2001/10/31} %% %% Package `Rnews' to use with LaTeX2e %% Copyright (C) 2001--2002 by the R Core Development Team %% Please report errors to KH or FL %% %% -*- LaTeX -*- \NeedsTeXFormat{LaTeX2e}[1995/12/01] \ProvidesPackage{\filename}[\filedate\space\fileversion\space Rnews package] \typeout{Package: `\filename\space\fileversion \@spaces <\filedate>'} \typeout{English documentation as of <\docdate>} \RequirePackage{ifthen} \newboolean{Rnews@driver} \DeclareOption{driver}{\setboolean{Rnews@driver}{true}} \DeclareOption*{\PackageWarning{\filename}{Unknown option `\CurrentOption'}} \ProcessOptions\relax \ifthenelse{\boolean{Rnews@driver}}{}{ \RequirePackage{multicol,graphicx,color,fancyhdr,hyperref} \newcommand{\volume}[1]{\def\Rnews@volume{#1}} \newcommand{\volnumber}[1]{\def\Rnews@number{#1}} \renewcommand{\date}[1]{\def\Rnews@date{#1}} \setcounter{secnumdepth}{-1} \renewcommand{\author}[1]{\def\Rnews@author{#1}} \renewcommand{\title}[1]{\def\Rnews@title{#1}} \newcommand{\subtitle}[1]{\def\Rnews@subtitle{#1}} \newenvironment{article}{% \author{}\title{}\subtitle{}}{\end{multicols}} \renewcommand{\maketitle}{ \begin{multicols}{2}[\chapter{\Rnews@title}\refstepcounter{chapter}][3cm] \ifx\empty\Rnews@subtitle\else\noindent\textbf{\Rnews@subtitle} \par\nobreak\addvspace{\baselineskip}\fi \ifx\empty\Rnews@author\else\noindent\textit{\Rnews@author} \par\nobreak\addvspace{\baselineskip}\fi \@afterindentfalse\@nobreaktrue\@afterheading} \renewcommand\chapter{\secdef\Rnews@chapter\@schapter} \providecommand{\nohyphens}{% \hyphenpenalty=10000\exhyphenpenalty=10000\relax} \newcommand{\Rnews@chapter}{% \renewcommand{\@seccntformat}[1]{}% \@startsection{chapter}{0}{0mm}{% -2\baselineskip \@plus -\baselineskip \@minus -.2ex}{\p@}{% \normalfont\Huge\bfseries\raggedright}} \renewcommand*\l@chapter{\@dottedtocline{0}{0pt}{1em}} \def\@schapter#1{\section*#1} \renewenvironment{figure}[1][]{% \def\@captype{figure} \noindent \begin{minipage}{\columnwidth}}{% \end{minipage}\par\addvspace{\baselineskip}} \renewcommand{\theequation}{\@arabic\c@equation} \def\equation{% \let\refstepcounter\H@refstepcounter \H@equation \def\newname{\arabic{chapter}.\theequation}% \let\theHequation\newname% \hyper@makecurrent{equation}% \Hy@raisedlink{\hyper@anchorstart{\@currentHref}}% \let\refstepcounter\new@refstepcounter}% \def\endequation{\Hy@raisedlink{\hyper@anchorend}\H@endequation} \renewcommand{\thefigure}{\@arabic\c@figure} \renewcommand{\thetable}{\@arabic\c@table} \renewcommand{\contentsname}{Contents of this issue:} \renewcommand\tableofcontents{% \section*{\contentsname \@mkboth{% \MakeUppercase\contentsname}{\MakeUppercase\contentsname}}% \@starttoc{toc}} \renewcommand{\titlepage}{% \noindent \rule{\textwidth}{1pt}\\[-.8\baselineskip] \rule{\textwidth}{.5pt} \begin{center} \includegraphics[height=2cm]{Rlogo}\hspace{7mm} \fontsize{2cm}{2cm}\selectfont News \end{center} The Newsletter of the R Project\hfill Volume \Rnews@volume/\Rnews@number, \Rnews@date\\[-.5\baselineskip] \rule{\textwidth}{.5pt}\\[-.8\baselineskip] \rule{\textwidth}{1pt} \vspace{1cm} \fancyhf{} \fancyhead[L]{Vol.~\Rnews@volume/\Rnews@number, \Rnews@date} \fancyhead[R]{\thepage} \fancyfoot[L]{R News} \fancyfoot[R]{ISSN 1609-3631} \thispagestyle{empty} \begin{bottombox} \begin{multicols}{2} \setcounter{tocdepth}{0} \tableofcontents \setcounter{tocdepth}{2} \end{multicols} \end{bottombox}} \setlength{\textheight}{250mm} \setlength{\topmargin}{-10mm} \setlength{\textwidth}{17cm} \setlength{\oddsidemargin}{-6mm} \setlength{\columnseprule}{.1pt} \setlength{\columnsep}{20pt} \RequirePackage{ae,mathpple} \RequirePackage[T1]{fontenc} \renewcommand{\rmdefault}{ppl} \renewcommand{\sfdefault}{aess} \renewcommand{\ttdefault}{aett} \definecolor{Red}{rgb}{0.7,0,0} \definecolor{Blue}{rgb}{0,0,0.8} \definecolor{hellgrau}{rgb}{0.55,0.55,0.55} \newcommand{\R}{R} \newcommand{\address}[1]{\addvspace{\baselineskip}\noindent\emph{#1}} \newcommand{\email}[1]{\href{mailto:#1}{\normalfont\texttt{#1}}} \newsavebox{\Rnews@box} \newlength{\Rnews@len} \newenvironment{bottombox}{% \begin{figure*}[b] \begin{center} \noindent \begin{lrbox}{\Rnews@box} \begin{minipage}{0.99\textwidth}}{% \end{minipage} \end{lrbox} \addtolength{\Rnews@len}{\fboxsep} \addtolength{\Rnews@len}{\fboxrule} \hspace*{-\Rnews@len}\fbox{\usebox{\Rnews@box}} \end{center} \end{figure*}} \RequirePackage{verbatim} \def\boxedverbatim{% \def\verbatim@processline{% {\setbox0=\hbox{\the\verbatim@line}% \hsize=\wd0 \the\verbatim@line\par}}% \@minipagetrue \@tempswatrue \setbox0=\vbox \bgroup\small\verbatim } \def\endboxedverbatim{% \endverbatim \unskip\setbox0=\lastbox \egroup \fbox{\box0} } \pagestyle{fancy} } % \ifthenelse{\boolean{Rnews@driver}} \newcommand\code{\bgroup\@codex} \def\@codex#1{{\normalfont\ttfamily\hyphenchar\font=-1 #1}\egroup} \newcommand{\kbd}[1]{{\normalfont\texttt{#1}}} \newcommand{\key}[1]{{\normalfont\texttt{\uppercase{#1}}}} \newcommand\samp{`\bgroup\@noligs\@sampx} \def\@sampx#1{{\normalfont\texttt{#1}}\egroup'} \newcommand{\var}[1]{{\normalfont\textsl{#1}}} \let\env=\code \newcommand{\file}[1]{{`\normalfont\textsf{#1}'}} \let\command=\code \let\option=\samp \newcommand{\dfn}[1]{{\normalfont\textsl{#1}}} \newcommand{\acronym}[1]{{\normalfont\textsc{\lowercase{#1}}}} \newcommand{\strong}[1]{{\normalfont\fontseries{b}\selectfont #1}} \let\pkg=\strong \RequirePackage{alltt} \newenvironment{example}{\begin{alltt}}{\end{alltt}} \newenvironment{smallexample}{\begin{alltt}\small}{\end{alltt}} \newenvironment{display}{\list{}{}\item\relax}{\endlist} \newenvironment{smallverbatim}{\small\verbatim}{\endverbatim} \providecommand{\operatorname}[1]{% \mathop{\operator@font#1}\nolimits} \renewcommand{\P}{% \mathop{\operator@font I\hspace{-1.5pt}P\hspace{.13pt}}} \newcommand{\E}{% \mathop{\operator@font I\hspace{-1.5pt}E\hspace{.13pt}}} \newcommand{\VAR}{\operatorname{var}} \newcommand{\COV}{\operatorname{cov}} \newcommand{\COR}{\operatorname{cor}} \RequirePackage{amsfonts} \endinput %% %% End of file `Rnews.sty'. gdata/vignettes/mapLevels.Rnw0000644000175100001440000002020513111406422015771 0ustar hornikusers %\VignetteIndexEntry{Mapping levels of a factor} %\VignettePackage{gdata} %\VignetteKeywords{levels, factor, manip} \documentclass[a4paper]{report} \usepackage{Rnews} \usepackage[round]{natbib} \bibliographystyle{abbrvnat} \usepackage{Sweave} \SweaveOpts{strip.white=all, keep.source=TRUE} \begin{document} \SweaveOpts{concordance=TRUE} \begin{article} \title{Mapping levels of a factor} \subtitle{The \pkg{gdata} package} \author{by Gregor Gorjanc} \maketitle \section{Introduction} Factors use levels attribute to store information on mapping between internal integer codes and character values i.e. levels. First level is mapped to internal integer code 1 and so on. Although some users do not like factors, their use is more efficient in terms of storage than for character vectors. Additionally, there are many functions in base \R{} that provide additional value for factors. Sometimes users need to work with internal integer codes and mapping them back to factor, especially when interfacing external programs. Mapping information is also of interest if there are many factors that should have the same set of levels. This note describes \code{mapLevels} function, which is an utility function for mapping the levels of a factor in \pkg{gdata} \footnote{from version 2.3.1} package \citep{WarnesGdata}. \section{Description with examples} Function \code{mapLevels()} is an (S3) generic function and works on \code{factor} and \code{character} atomic classes. It also works on \code{list} and \code{data.frame} objects with previously mentioned atomic classes. Function \code{mapLevels} produces a so called ``map'' with names and values. Names are levels, while values can be internal integer codes or (possibly other) levels. This will be clarified later on. Class of this ``map'' is \code{levelsMap}, if \code{x} in \code{mapLevels()} was atomic or \code{listLevelsMap} otherwise - for \code{list} and \code{data.frame} classes. The following example shows the creation and printout of such a ``map''. <>= library(gdata) (fac <- factor(c("B", "A", "Z", "D"))) (map <- mapLevels(x=fac)) @ If we have to work with internal integer codes, we can transform factor to integer and still get ``back the original factor'' with ``map'' used as argument in \code{mapLevels<-} function as shown bellow. \code{mapLevels<-} is also an (S3) generic function and works on same classes as \code{mapLevels} plus \code{integer} atomic class. <>= (int <- as.integer(fac)) mapLevels(x=int) <- map int identical(fac, int) @ Internally ``map'' (\code{levelsMap} class) is a \code{list} (see bellow), but its print method unlists it for ease of inspection. ``Map'' from example has all components of length 1. This is not mandatory as \code{mapLevels<-} function is only a wrapper around workhorse function \code{levels<-} and the later can accept \code{list} with components of various lengths. <>= str(map) @ Although not of primary importance, this ``map'' can also be used to remap factor levels as shown bellow. Components ``later'' in the map take over the ``previous'' ones. Since this is not optimal I would rather recommend other approaches for ``remapping'' the levels of a \code{factor}, say \code{recode} in \pkg{car} package \citep{FoxCar}. <>= map[[2]] <- as.integer(c(1, 2)) map int <- as.integer(fac) mapLevels(x=int) <- map int @ Up to now examples showed ``map'' with internal integer codes for values and levels for names. I call this integer ``map''. On the other hand character ``map'' uses levels for values and (possibly other) levels for names. This feature is a bit odd at first sight, but can be used to easily unify levels and internal integer codes across several factors. Imagine you have a factor that is for some reason split into two factors \code{f1} and \code{f2} and that each factor does not have all levels. This is not uncommon situation. <>= (f1 <- factor(c("A", "D", "C"))) (f2 <- factor(c("B", "D", "C"))) @ If we work with this factors, we need to be careful as they do not have the same set of levels. This can be solved with appropriately specifying \code{levels} argument in creation of factors i.e. \code{levels=c("A", "B", "C", "D")} or with proper use of \code{levels<-} function. I say proper as it is very tempting to use: <>= fTest <- f1 levels(fTest) <- c("A", "B", "C", "D") fTest @ Above example extends set of levels, but also changes level of 2nd and 3rd element in \code{fTest}! Proper use of \code{levels<-} (as shown in \code{levels} help page) would be: <>= fTest <- f1 levels(fTest) <- list(A="A", B="B", C="C", D="D") fTest @ Function \code{mapLevels} with character ``map'' can help us in such scenarios to unify levels and internal integer codes across several factors. Again the workhorse under this process is \code{levels<-} function from base \R{}! Function \code{mapLevels<-} just controls the assignment of (integer or character) ``map'' to \code{x}. Levels in \code{x} that match ``map'' values (internal integer codes or levels) are changed to ``map'' names (possibly other levels) as shown in \code{levels} help page. Levels that do not match are converted to \code{NA}. Integer ``map'' can be applied to \code{integer} or \code{factor}, while character ``map'' can be applied to \code{character} or \code{factor}. Result of \code{mapLevels<-} is always a \code{factor} with possibly ``remapped'' levels. To get one joint character ``map'' for several factors, we need to put factors in a \code{list} or \code{data.frame} and use arguments \code{codes=FALSE} and \code{combine=TRUE}. Such map can then be used to unify levels and internal integer codes. <>= (bigMap <- mapLevels(x=list(f1, f2), codes=FALSE, combine=TRUE)) mapLevels(f1) <- bigMap mapLevels(f2) <- bigMap f1 f2 cbind(as.character(f1), as.integer(f1), as.character(f2), as.integer(f2)) @ If we do not specify \code{combine=TRUE} (which is the default behaviour) and \code{x} is a \code{list} or \code{data.frame}, \code{mapLevels} returns ``map'' of class \code{listLevelsMap}. This is internally a \code{list} of ``maps'' (\code{levelsMap} objects). Both \code{listLevelsMap} and \code{levelsMap} objects can be passed to \code{mapLevels<-} for \code{list}/\code{data.frame}. Recycling occurs when length of \code{listLevelsMap} is not the same as number of components/columns of a \code{list}/\code{data.frame}. Additional convenience methods are also implemented to ease the work with ``maps'': \begin{itemize} \item \code{is.levelsMap}, \code{is.listLevelsMap}, \code{as.levelsMap} and \code{as.listLevelsMap} for testing and coercion of user defined ``maps'', \item \code{"["} for subsetting, \item \code{c} for combining \code{levelsMap} or \code{listLevelsMap} objects; argument \code{recursive=TRUE} can be used to coerce \code{listLevelsMap} to \code{levelsMap}, for example \code{c(llm1, llm2, recursive=TRUE)} and \item \code{unique} and \code{sort} for \code{levelsMap}. \end{itemize} \section{Summary} Functions \code{mapLevels} and \code{mapLevels<-} can help users to map internal integer codes to factor levels and unify levels as well as internal integer codes among several factors. I welcome any comments or suggestions. % \bibliography{refs} \begin{thebibliography}{1} \providecommand{\natexlab}[1]{#1} \providecommand{\url}[1]{\texttt{#1}} \expandafter\ifx\csname urlstyle\endcsname\relax \providecommand{\doi}[1]{doi: #1}\else \providecommand{\doi}{doi: \begingroup \urlstyle{rm}\Url}\fi \bibitem[Fox(2006)]{FoxCar} J.~Fox. \newblock \emph{car: Companion to Applied Regression}, 2006. \newblock URL \url{http://socserv.socsci.mcmaster.ca/jfox/}. \newblock R package version 1.1-1. \bibitem[Warnes(2006)]{WarnesGdata} G.~R. Warnes. \newblock \emph{gdata: Various R programming tools for data manipulation}, 2006. \newblock URL \url{http://cran.r-project.org/src/contrib/Descriptions/gdata.html}. \newblock R package version 2.3.1. Includes R source code and/or documentation contributed by Ben Bolker, Gregor Gorjanc and Thomas Lumley. \end{thebibliography} \address{Gregor Gorjanc\\ University of Ljubljana, Slovenia\\ \email{gregor.gorjanc@bfro.uni-lj.si}} \end{article} \end{document} gdata/MD50000644000175100001440000002666413115545573011707 0ustar hornikusers0c46058204b94013b5fa456893dc4c00 *ChangeLog 58ea7fd4dc6d1eef72eeedc774865974 *DESCRIPTION 905fe9c5be6e143737163bc6317e6640 *INSTALL d9c89c6d5053540222d6360e8efc127b *NAMESPACE c449f3a8a7472e85450b46bf30440206 *NEWS 92e3ca5e31d594044b8325a441142c37 *R/Args.R 94976a0bed5195b50511e200d1711280 *R/ConvertMedUnits.R 38de1344cd5bc79f32a0b42e48b43965 *R/ans.R 78e0c21cf9e4693553a4174d9d3b3c80 *R/bindData.R df4bed53c71599dde15cbf23b4ca645a *R/case.R ea9399bf2240d9c3a536da45bcf34cd2 *R/cbindX.R d4034554742c82cdf9c7da28c5e5614c *R/centerText.R d1aa5d0677fb46d53b4e4628b18159c2 *R/combine.R 23a1a2a8bf811331e02ccd5668567cf4 *R/drop.levels.R 1e65b516ecac8ecc4d9caa8d66c6e494 *R/duplicated2.R 3896b0c9e1e81f87adb9c876f3ecfbda *R/elem.R 0388011f483caa3314a0688a376f7cc5 *R/env.R 7f43c48b182d910961f643876fefbdf9 *R/findPerl.R 3044762c27f0a3b46a5d7d43e687e407 *R/first.R 4f9c6613afb87d8360ea897bbc485fe7 *R/frameApply.R d6474a86ab986d0cbcc8712797bbb57b *R/getDateTimeParts.R 2c548ba3e816419eae43cfef033bc495 *R/humanReadable.R a4f086ff4289d532ac61ca2ecc5a8862 *R/installXLSXsupport.R 037266e58fb3a611b3a56bb09c898abd *R/interleave.R e9594e1749df0f487eaab95db4253e59 *R/is.what.R 67c1d25af3bb35db744d5534991d3b20 *R/keep.R b7b1031b972faf2efe7445fb5f396132 *R/left.R b8c1e8dbe508af1afa208883503679ec *R/ll.R b75ac1b645046c8028d8014de255bd25 *R/ls.funs.R c88c8f75673c31e0f04a8799a91d1f60 *R/mapLevels.R e7d2246f66781fe99981b2dd083dd6cb *R/matchcols.R f52e020af12751d69476bec5ec0f94c9 *R/mv.R afe62c0075aaa73974eabad0c53cf68c *R/nPairs.R 4bb20d225902c5b0a36cbb36a14f85f7 *R/nobs.R 1169ce3d329eb62aa51b036df6b24a5b *R/object.size.R c3c6e74c9238d0b99b74c09cf76b154d *R/onAttach.R 53d3ccd68f0b4b5d3e2c27d7bfaedc17 *R/read.xls.R 6d18057ee1ad1f831d325420f5578216 *R/rename.vars.R 54fa890d6dea94a2a0a70e9a568030be *R/reorder.R cef4f8eb74136397feee111cf921676f *R/resample.R f126721102aa7f6d258e74bf6215b831 *R/sheetCount.R c05127e236b0b7a350b37eda84993615 *R/startsWith.R 6ff7e70b153cf189af5f517b5df071a1 *R/trim.R 05f7763f8efeef44932e4c47cfc091ff *R/trimSum.R cccb187cbc78989f5c98633224e7d01e *R/unknown.R 0c763fce55062857b133aebac8695d8c *R/unmatrix.R 17a13b5e726e9525e45f2dc6e9c45cc4 *R/update.data.frame.R 7f861f8bb26571cc867193cc47c17c84 *R/update.list.R 24c7667dffcef1d6e327f6748a8c7bdc *R/upperTriangle.R 5a0dcdddde5d9c1ec0d215d12f323500 *R/wideByFactor.R 3aba5bc17da034f229905ab82c8a9db5 *R/write.fwf.R 41c99f4c80f3c613c8173f93ec9c3bc0 *R/xls2sep.R 84c93b4e26e20d6ff792637f41c71df3 *R/xlsFormats.R 494447a9a3b7a389e79a556a1c356caa *data/MedUnits.rda 0c46058204b94013b5fa456893dc4c00 *inst/ChangeLog c449f3a8a7472e85450b46bf30440206 *inst/NEWS ac512b1ebd4d71ac96b2a4d5288d53cb *inst/bin/xls2csv ce0b4437c51faccb3595d986e8acae80 *inst/bin/xls2csv.bat e444b0ed03b42abe356a8ad70f055189 *inst/doc/gregmisc.pdf b19ba078add1b84a300ae1adb5167567 *inst/doc/mapLevels.R a56f1085ce3ebccb98ab046ca15905e2 *inst/doc/mapLevels.Rnw 532c54d6155c3a29655c7e78c30ccb5f *inst/doc/mapLevels.pdf 788f58d8791841c0dd0a9bddfa28b8fe *inst/doc/unknown.R a968a07310286934daaea108e3f915f4 *inst/doc/unknown.Rnw 442cd7cd29a6c16ec69f16b743725683 *inst/doc/unknown.pdf 3622c5d29d09f1a179211f22acf6cdef *inst/perl/Archive/README-Archive-Zip 013677fabc8a49480cca5c10d67dd850 *inst/perl/Archive/Zip.pm da56a4326657fda95d0de93c65ed4006 *inst/perl/Archive/Zip/Archive.pm 51456309908c0b43ec573b698764f704 *inst/perl/Archive/Zip/BufferedFileHandle.pm d3839740a8b261feac1e977eba4721b1 *inst/perl/Archive/Zip/DirectoryMember.pm 88d6a6c71e83354937b6d16526e60da9 *inst/perl/Archive/Zip/FAQ.pod 613c672c74ec59faa88e958d16f8f2ad *inst/perl/Archive/Zip/FileMember.pm d179050e910601cc9052bc4e38ebd1b5 *inst/perl/Archive/Zip/Member.pm 6c7ce09c710d370be907cf1e70134209 *inst/perl/Archive/Zip/MemberRead.pm c6ecb7b9d336fa23a94929552eba6373 *inst/perl/Archive/Zip/MockFileHandle.pm bdffc1ab114897b87d109e06ffa94068 *inst/perl/Archive/Zip/NewFileMember.pm 5b969994e19eef9b0fda0c756dd529ef *inst/perl/Archive/Zip/StringMember.pm a0680f49434e681325498f3d0ce1147f *inst/perl/Archive/Zip/Tree.pm 5ad94e7c07432859fc85cec9b215b1a1 *inst/perl/Archive/Zip/ZipFileMember.pm a12b3df60b1790a375e98cd7e11526d9 *inst/perl/Crypt/RC4.pm 050d359c44120bd9262cd61b9a773cd6 *inst/perl/Digest/Perl/MD5.pm dfc0b868e0becf87d19a52c24740e5e0 *inst/perl/Graphics/ColorUtils.pm f8109a53128f172d5199998a0774a982 *inst/perl/IO/AtomicFile.pm cb8bf30e73340e4eba233c51dd8b2f34 *inst/perl/IO/InnerFile.pm 5886a657d7e49b133d23f7b2dbe30c21 *inst/perl/IO/Lines.pm 6be2f7b5899b83a897025caf868e2b8b *inst/perl/IO/Scalar.pm aaa5b626b1467f10703f741377f48f45 *inst/perl/IO/Scalar.pm.html 520d9d810f5758f247727f8f2730d71e *inst/perl/IO/ScalarArray.pm 3e242abfa789aff62181bf299d9089e8 *inst/perl/IO/Stringy.pm 546777a943a0b90882709f2b10d317d1 *inst/perl/IO/Wrap.pm 3669bd450d4fc4e6b883fcd7ad604caf *inst/perl/IO/WrapTie.pm 75749fd752f9b91652bcc147694f00a1 *inst/perl/OLE/README-OLE-Storage_Lite bc2eb29f789cb0c16619b5d88e6f6410 *inst/perl/OLE/Storage_Lite.pm ae9cde51f8840b6299d510ddb4360591 *inst/perl/Spreadsheet/ParseExcel.pm 24a716293e90c22e90e53395a14fcb28 *inst/perl/Spreadsheet/ParseExcel/Cell.pm 1abd6d27404bd3ce1ea077bdc647956d *inst/perl/Spreadsheet/ParseExcel/Dump.pm 82f4009b80841e82942de379549527a3 *inst/perl/Spreadsheet/ParseExcel/FmtDefault.pm efa4e5809ddb4d7041093e8115ac50d0 *inst/perl/Spreadsheet/ParseExcel/FmtJapan.pm 2b155083f756a684723fe400966ca944 *inst/perl/Spreadsheet/ParseExcel/FmtJapan2.pm d6eba6e2c95f108a1aac09a42e0fb76c *inst/perl/Spreadsheet/ParseExcel/FmtUnicode.pm 6b8027d0201f9e69d2fb44a3846486cf *inst/perl/Spreadsheet/ParseExcel/Font.pm dc2fcccd7889f61bf96a462a0e26ec7c *inst/perl/Spreadsheet/ParseExcel/Format.pm 797ff88759790280da306ad8fa889c1e *inst/perl/Spreadsheet/ParseExcel/SaveParser.pm f4d6c7214ff3ac8651710960b57c992a *inst/perl/Spreadsheet/ParseExcel/SaveParser/Workbook.pm 42a270a338246bb0489cb787f538c8a4 *inst/perl/Spreadsheet/ParseExcel/SaveParser/Worksheet.pm b05f0f8e84a9ad9fb155af83f2d546e7 *inst/perl/Spreadsheet/ParseExcel/Utility.pm 066d26b4c80257a6016f931e6e8ae098 *inst/perl/Spreadsheet/ParseExcel/Workbook.pm 8b53d89dc8969e9840084fc34524bc55 *inst/perl/Spreadsheet/ParseExcel/Worksheet.pm 0209fe512b8c884afc8000c057723a2f *inst/perl/Spreadsheet/ParseXLSX.pm c9b8eab5a257c27463c22af1316e7241 *inst/perl/Spreadsheet/README-ParseExcel 359fb41631453c30b1c8341786d00195 *inst/perl/Spreadsheet/README-XLS 441b297006e72c914e99851da06d4826 *inst/perl/VERSIONS dee41d67b156f0f146020ee91157ac56 *inst/perl/XML/Twig.pm 41d922f764505fb973db5f098ece6c44 *inst/perl/XML/Twig/XPath.pm b71548d8785cc55810b5a7d903c52012 *inst/perl/install_modules.pl b0ff06837bf24d17a67e4da5431c3fb9 *inst/perl/module_tools.pl 44dd4baf6c1ef3999708143f3ab007fd *inst/perl/sheetCount.pl 44dd4baf6c1ef3999708143f3ab007fd *inst/perl/sheetNames.pl f11614dea00e704edd1582714d1792e2 *inst/perl/supportedFormats.pl 4559c6f484b4e25e5cf037efef73c930 *inst/perl/xls2csv.pl 4559c6f484b4e25e5cf037efef73c930 *inst/perl/xls2tab.pl 4559c6f484b4e25e5cf037efef73c930 *inst/perl/xls2tsv.pl 799008428d5dab63e1bde17859cff753 *inst/xls/ExampleExcelFile.xls 88d5db30d66db593a854e441fcd79fca *inst/xls/ExampleExcelFile.xlsx beadf509a237ee40d483bd0ac09672d9 *inst/xls/ExampleExcelFile_1900.xls bdc103a6d2e0e5825c19add38ad2770b *inst/xls/ExampleExcelFile_1900.xlsx 8cfb85e7bfdc636d9b4bfb5dd251b41e *inst/xls/ExampleExcelFile_1904.xls c2cc0620d5e325d16db0fab03afdba9e *inst/xls/ExampleExcelFile_1904.xlsx 7c16d3cfd37123f3c321c12a92b9269a *inst/xls/iris.xls 8a0467a49bfb791295925cb6a372b1ff *inst/xls/latin-1.xls 36e751188a4e3d37ce3be57d2152922a *inst/xls/latin-1.xlsx a8cf07872660e85dab41367e2b4f08e5 *inst/xls/wide.xls 47183196217c8b121f88bfd588beffed *inst/xls/wide.xlsx 24a1020fb457c398c620457ad114e245 *man/Args.Rd b0dec88638d111fb8b0c75ebfd99cdfd *man/ConvertMedUnits.Rd 673f2150582676c574a306697b29ffa5 *man/MedUnits.Rd 6fcbcc90f241aa78bc0cd4ba479ac9bd *man/ans.Rd 140de526fc0a3c819dcd687e7bb3ed77 *man/bindData.Rd a4c69a81cca648bfdc2f6913229b4e0e *man/case.Rd 640626fce10a1b97ad82fd594f18b058 *man/cbindX.Rd ea80105996fa8d335490899cc37a5c35 *man/centerText.Rd 0c1246172eb620aed35dbfbc3e411b74 *man/combine.Rd fca345001344f5bfcbaac5f79e6f0762 *man/drop.levels.Rd a471e3c2a2b1ca5550a1822d5620e848 *man/duplicated2.Rd 28b102aeb6f3c62fe6b3b40fefa4c005 *man/elem.Rd e45e3959608492f8e12298d221e605e3 *man/env.Rd cf77c8066c350ae035771a0f28cccc16 *man/first.Rd 73aeb6a00e393012dfc4fb1e0b8fc15a *man/frameApply.Rd dc8a3653f3c70778ccce2d926194adce *man/gdata-defunct.Rd 34cfa7a16878f7d2032b49f350eec4a5 *man/gdata-package.Rd 38580f70b4b3af84ebfa4b952dd7021b *man/getDateTimePart.Rd 76c13487780d8ea770544678350e3f17 *man/humanReadable.Rd 6beab4e8b711110199599f8427d1d042 *man/installXLSXsupport.Rd 26bb8febce31195f8efcf071270913bc *man/interleave.Rd 0d70b8cd533a830a68103355b1054ec5 *man/is.what.Rd 8c50e81caf14aebb11d908fbcc9fe2de *man/keep.Rd c9e910926112da9792065043cec7d46d *man/left.Rd 4bfbaf0835fff3cbc3ba8f17f9823bfb *man/ll.Rd dfe069423abb32eaed74b2daad0a56d8 *man/ls.funs.Rd 3723cf974f55156ced750e3235445f1b *man/mapLevels.Rd 3ca3aeaf85340d25fc36c6a5275fce2c *man/matchcols.Rd 3e88754395ac82201e29f95c9f1b74b7 *man/mv.Rd 4789e9c9a034bc5665d93c80579729ef *man/nPairs.Rd 4e3ba1601ecb171596b609516d2e8911 *man/nobs.Rd 6604b93792d11b827aaa6c40c9a9cb62 *man/object.size.Rd 2ad3d3570252d4954bdf4d81ea3404fa *man/read.xls.Rd b73a198509b4fdcb4a24a85909309532 *man/rename.vars.Rd da64c27965060d0f35bed084a416a57a *man/reorder.Rd fc28b1b680997fd8ff2ab73478db4872 *man/resample.Rd 55d4019112a5759791610fd48c20d4b3 *man/sheetCount.Rd 1a3958cb8dfc8a3d50d73e51bd280a08 *man/startsWith.Rd 6d61fa9945832366fbdfbab74a203ee1 *man/trim.Rd 347e8e1afc135b4ce6f5f3a7face76d5 *man/trimSum.Rd 4f5e0665c2c046b93f45963387b7d44f *man/unknown.Rd 5b789bc21ee7f46f8a9b138f1ed36829 *man/unmatrix.Rd 4d464263185abe092c86553b0ab526ca *man/update.list.Rd 7427fd1c31684c2392095c99c8fc7006 *man/upperTriangle.Rd ad219282ec6913083b82073691923f9b *man/wideByFactor.Rd 33f5856b0960a91d03807b781e664704 *man/write.fwf.Rd 8a9c1fe9d0316d0b98e6d353c2b7a6cf *man/xlsFormats.Rd 64fc1cde149ab7f61c266fdb295a6404 *tests/runRUnitTests.R 89f2fc0a5b6dd6ff34fa2facd5d36127 *tests/test.humanReadable.R ccffb94b714933312996808118fea16f *tests/test.humanReadable.Rout.save 8d734967558d8d545de947ddd4ba06ac *tests/test.read.xls.R 0db606f7f501390fda8163bc90f12559 *tests/test.read.xls.Rout.save db19f080b3ed7525389cecf47481409f *tests/test.reorder.factor.R bd7f72149f9b88bfa6dad4255c6f3a8c *tests/test.reorder.factor.Rout.save dee3232474b92bcdf1ad75ca31080792 *tests/test.write.fwf.eol.R b32b0eb85790d71ea6025ae5eca71fb1 *tests/tests.write.fwf.R 7b4107a3201364d085d5a55e567fd3b9 *tests/tests.write.fwf.Rout.save ed871fe534197367139b63b6c8309396 *tests/unitTests/Makefile 31c57f48835f4994ee0de16334d79d5b *tests/unitTests/runit.bindData.R a0a06add3d810b0e124af7caba50e183 *tests/unitTests/runit.cbindX.R cb27bc15a8122fdc3b71bbe7e06eaf07 *tests/unitTests/runit.drop.levels.R 96281d9a32875f5c84672082985d4176 *tests/unitTests/runit.getDateTimeParts.R 0ed10955b2716673d5ea0e9c071418fb *tests/unitTests/runit.mapLevels.R 1f14c2e453df84fb51b90e2c97ea8b32 *tests/unitTests/runit.nPairs.R d79b878fa342ddbcea236ac115b24b15 *tests/unitTests/runit.reorder.factor.R 7682181a6e2ec434130b1ea987e4d5b0 *tests/unitTests/runit.trim.R 42330b4f7f6bd1cd335f59088d6bf282 *tests/unitTests/runit.trimSum.R 80c6bc219e067e6c933b61dc3b3042c1 *tests/unitTests/runit.unknown.R 07c0f9fc38612b196f2d7449c133fde2 *tests/unitTests/runit.wideByFactor.R 8bd376ab7034b5f9016caff11eecbb92 *tests/unitTests/runit.write.fwf.R a7982b90f82857e34a253df2be42d7c1 *vignettes/Rnews.sty a56f1085ce3ebccb98ab046ca15905e2 *vignettes/mapLevels.Rnw a968a07310286934daaea108e3f915f4 *vignettes/unknown.Rnw gdata/DESCRIPTION0000644000175100001440000000345113115545573013072 0ustar hornikusersPackage: gdata Title: Various R Programming Tools for Data Manipulation Description: Various R programming tools for data manipulation, including: - medical unit conversions ('ConvertMedUnits', 'MedUnits'), - combining objects ('bindData', 'cbindX', 'combine', 'interleave'), - character vector operations ('centerText', 'startsWith', 'trim'), - factor manipulation ('levels', 'reorder.factor', 'mapLevels'), - obtaining information about R objects ('object.size', 'elem', 'env', 'humanReadable', 'is.what', 'll', 'keep', 'ls.funs', 'Args','nPairs', 'nobs'), - manipulating MS-Excel formatted files ('read.xls', 'installXLSXsupport', 'sheetCount', 'xlsFormats'), - generating fixed-width format files ('write.fwf'), - extricating components of date & time objects ('getYear', 'getMonth', 'getDay', 'getHour', 'getMin', 'getSec'), - operations on columns of data frames ('matchcols', 'rename.vars'), - matrix operations ('unmatrix', 'upperTriangle', 'lowerTriangle'), - operations on vectors ('case', 'unknownToNA', 'duplicated2', 'trimSum'), - operations on data frames ('frameApply', 'wideByFactor'), - value of last evaluated expression ('ans'), and - wrapper for 'sample' that ensures consistent behavior for both scalar and vector arguments ('resample'). Depends: R (>= 2.3.0) SystemRequirements: perl (>= 5.10.0) Imports: gtools, stats, methods, utils Version: 2.18.0 Date: 2017-06-05 Author: Gregory R. Warnes, Ben Bolker, Gregor Gorjanc, Gabor Grothendieck, Ales Korosec, Thomas Lumley, Don MacQueen, Arni Magnusson, Jim Rogers, and others Maintainer: Gregory R. Warnes License: GPL-2 NeedsCompilation: no Suggests: RUnit Packaged: 2017-06-05 21:27:42 UTC; gwarnes Repository: CRAN Date/Publication: 2017-06-06 15:34:19 UTC gdata/ChangeLog0000644000175100001440000012401513115345675013137 0ustar hornikusers2017-06-05 warnes * [r2154] DESCRIPTION: Fix type in DESCRIPTION date field. * [r2153] .Rbuildignore: Specify which file patterns to ignore when building R package file. * [r2152] DESCRIPTION, tests/test.humanReadable.Rout.save, tests/test.read.xls.Rout.save, tests/test.reorder.factor.Rout.save, tests/tests.write.fwf.Rout.save, vignettes/mapLevels.Rnw: Update package version and stored test output. * [r2151] inst/doc/Rnews.sty: Remove obsolete Rnews.sty file from inst/doc. * [r2150] R/startsWith.R: gdata::startsWith() now uses base::startsWith() to do the actual comparison, after hanlding ignore.case and trim arguments. * [r2149] man/trim.Rd: Add reference to 'new' base function 'trimws'. * [r2148] NAMESPACE, R/update.data.frame.R, R/update.list.R, man/update.list.Rd: Drop 'update.data.frame' until there is time to work on it. 2016-08-12 warnes * [r2130] NAMESPACE: Add mv to exported namespace 2016-05-31 warnes * [r2128] R/humanReadable.R: Fix typo that forced users of humanReadable() to provide two elements to the 'justify' argument. The correction allows a single value to be provided which will be expanded to two internally. 2016-02-05 warnes * [r2077] man/update.list.Rd: Add documentation for update() data.frame method. * [r2076] R/mv.R, man/mv.Rd: Add mv() function to rename an object. 2016-02-03 warnes * [r2075] NAMESPACE: - Add update() methods list and data.frame - Add 'first<-' and 'last<-' assignment methods * [r2074] R/update.data.frame.R, R/update.list.R, man/update.list.Rd: Add update() methods for lists and data frames * [r2073] R/first.R, man/first.Rd: Add assignment versions of first() and last() * [r2072] R/rename.vars.R: Improve logging and error reporting for remove.vars() 2015-10-15 warnes * [r2068] R/installXLSXsupport.R: Remove unused call to tempdir(). 2015-07-22 warnes * [r2062] DESCRIPTION, NAMESPACE, tests/test.humanReadable.Rout.save, tests/test.read.xls.R, tests/test.read.xls.Rout.save, tests/test.reorder.factor.Rout.save, tests/tests.write.fwf.Rout.save: Renamed 'test' directory to 'tests', commented out tests for lme4 which has a changed API 2015-07-03 warnes * [r2056] DESCRIPTION, inst/ChangeLog, inst/NEWS: Update for gdata 2.17.0 2015-06-29 warnes * [r2055] inst/ChangeLog: Update ChangeLog * [r2054] tests/test.humanReadable.Rout.save, tests/test.read.xls.R, tests/test.read.xls.Rout.save, tests/test.reorder.factor.Rout.save, tests/tests.write.fwf.Rout.save: Add note for R CMD check to help reviewers not freak out when diffs occur because of absence of a PERL library needed to support XLSX files. * [r2053] R/upperTriangle.R, man/upperTriangle.Rd: Add 'byrow' argument to lowerTriangle()/upperTriangle() functions. 2015-05-02 warnes * [r2018] Rename 'trunk' to 'pkg' for compatibility with R-forge 2015-04-29 warnes * [r1993] Update ChangeLog and NEWS again. * [r1992] Apparentely read.csv() needs different combination of "fileEncoding=`latin1`" and "encoding=`latin1`" on unix and windows platforms. * [r1991] In mapLevels(), use sapply() instead of lapply() to avoid warning message. * [r1990] Displaying all the latin1 characters for diff isn't reliable across platforms. Simply summarize the latin1 data instead. * [r1989] Display read latin1 data so that diff can catch changes. 2015-04-28 warnes * [r1988] Update ChangeLog for gdata 2.16.1 * [r1987] Update NEWS for gdata 2.16.1 * [r1986] Remove no-longer defined methods. * [r1985] Summary: Minor formatting changes, use rnorm() for X in example, and use set.seed() for consistent results. * [r1984] Summary: Replace unicode single-quote characters with ASCII ones. * [r1983] Summary: Call base::sort instead of sort, which has been redefined by arguments. * [r1982] Update NEWS and ChangeLog. * [r1981] Bump version number. * [r1980] Remove CVS header tag. * [r1979] Update version requirement for R (>= 2.3.0) and perl (5.10.0). * [r1978] - first() and last() are now simply wrappers to utils::head() and utils::tail() with a default 'n=1' instead of 'n=6'. - Move code for left() and right() into a separate file. * [r1977] If arguments 'X' or 'FUN' is supplied to reorder.factor(), mimic the behavior of stats::reorder.default() rather than trying to call it via NextMethod. 2015-04-25 warnes * [r1974] List needs a conjuction * [r1973] Fix spelling errors & typos * [r1972] Fix typographical errors * [r1971] Update NEWS and ChangeLog (again) * [r1970] Remove aggregate.table() entirely * [r1969] 'test.humanReadable.R' needed set.seed() to make the results consistent. * [r1968] Update .save files * [r1967] Missed on commit. * [r1966] Modfy write.fwf() to properly handle matrix argument, avoiding conversion to dataframe unless rownames=TRUE. Add corresponding unit tests. * [r1965] Installing PERL modules was failing. Adding CPAN configuration option fixed the problem. * [r1964] Error message about executable name was missing one alternative * [r1963] Better describe gdata contents * [r1962] is.* and as.* aren't generics * [r1961] Add 'justify' argument to print and format object_sizes methods * [r1960] Add 'justify' argument to print and format object_sizes methods * [r1959] Remove stray call to 'browser' * [r1958] Update DESCRIPTION, ChangeLog, and NEWS * [r1957] Complete work on object.size(), object_sizes methods, and humanReadable. * [r1956] Add error message if Excel file format is too old 2015-04-23 warnes * [r1953] Update NEWS and ChangeLog * [r1952] - write.fwf() now properly supports matrix objects, including matrix objects wihtout column names. (Reported by Carl Witthoft.) * [r1951] Remove 'use POSIX' from xls2csv.pl since it is no longer needed * [r1939] Update NEWS and ChangeLog * [r1938] reorder.factor() now hands off processing to stats:::reorder.default() when either 'X' or 'FUN' is specified. 2015-04-22 warnes * [r1937] Update NEWS and ChangeLog for changes to humanReadable() * [r1936] Fix 'units' argument of humanReadable() * [r1935] Update object.size() man page to reflect change in class of return value from 'object_size' to 'object_sizes' * [r1934] Update NEWS and ChangeLog for gdata 2.16.0 * [r1933] Modify gdaata:object.size to generate S3 objects of class 'object_sizes' (note the final 's') to avoid conflicts with methods in utils for object_size. * [r1932] Correct behavior of reorder.factor() when argument 'X' is supplied by delgating to stats:::reorder.default() 2015-04-14 warnes * [r1929] Update ChangeLog * [r1928] Remove editorializing * [r1927] Update NEWS and ChangeLog for gdata 2.15.0 * [r1926] Add 'scientific' argument to write.fwf to allow control of whether numeric values can be displated using scientific notation. * [r1925] Replace depricated PERL function POSIX::isnumeric with equivalent regexp * [r1924] Add gdata ChangeLog to SVN 2015-04-10 warnes * [r1922] Update files for gdata 2.15.0 2015-04-08 warnes * [r1919] Move first/last/left/right to from gtools to gdata 2014-08-28 warnes * [r1883] Everything works now! * [r1882] Suppress annoying warnings in Spreadsheet::ParseXLS::FmtDefalt. * [r1881] Add tests and corresponding test files for 1900 and 1904 based XLX/XLSX files * [r1880] Complete transition from Spreadsheet::XLSX to Spreadsheet::ParseXLSX * [r1879] Handle Excel files created on the Mac, where by default Excel uses 1904-01-01 as the baseline for dates, rather than the usual 1900-01-01. * [r1878] Remove dotfiles * [r1877] Update for release * [r1876] Add test for handling fo very wide xls and xlsx files. * [r1875] Add test for handling fo very wide xls and xlsx files. * [r1874] Modify code to use latest version of Spreadsheet::ParseExcel and to replace Spreadsheet::XLSX woth Spreadsheet::ParseXLSX * [r1873] Update Spreadsheet::ParseExcel, add Spreadsheet:ParseXLSX, add dependencies 2014-04-05 warnes * [r1801] Apply same changes to NAToUnknown that were previously applied to unknownToNA for POSIXlt. * [r1800] Update NEWS with latest changes * [r1799] Call stats::nobs instead of stats:::nobs.default within gdata::nobs.default. This avoids R CMD check warning. * [r1798] Don't compare optional POSIXlt field. Explicitly compare POSIXlt, with special handling of '-1' unknown value. * [r1797] Don't use gdata::: prefix to access gdata function * [r1796] Fix syntax error in DESCRIPTION file. * [r1795] Package name needs to be defined outside of if test. * [r1794] Style file needed * [r1793] The issue Brian pointed out was an error in the isUnknown() code, not an error in the unit tests! * [r1792] Apply changes Brian recommned to NAtoUnknown as well as unknownToNA. * [r1791] Update NEWS file * [r1790] Don't need latex .dtx source file * [r1789] Move vignettes from inst/doc/ to vignettes/ * [r1788] Change 'aggregate.table' from deprecated to defunct. * [r1787] Complete changes so that the unit tests are run as part of R CMD check * [r1786] Update NEWS for gdata 2.13.4 * [r1785] Update NAMESPACE file to remove deleted function * [r1784] Move unit test files back to inst/unitTests. Fix up runRUnitTests.R to work properly in the new location * [r1783] - For unit tests, don't check for equality of optional POSIXlt components. (Bug reported by Brian Ripley). * [r1782] Move unit test code into the (now) standard location 2014-03-19 arnima * [r1777] change warning message to R standards 2013-12-18 arnima * [r1758] Retain original list order unless sort=FALSE; also stop if unnamed list 2013-12-16 warnes * [r1757] Trim will now remove all types of leading/trailing whitespace by using the [:blank:] character class. 2013-06-29 warnes * [r1692] Update NEWS for second try for gdata 2.13.2 * [r1691] Simplify ll() by stuffing list arguments into an environment, avoiding the need to use attach/detach. 2013-06-28 warnes * [r1685] Update NEWS for gdata 2.13.2 * [r1684] Minor update to tests/*.Rout.save * [r1683] Add on.exit() handler to ensure a matching detach occurs when attach is used in ll() * [r1682] Update for gdata 2.13.2 * [r1681] Improve deprecated message 2013-03-24 warnes * [r1645] Update test files for code changes * [r1644] Fix formatting in NEWS * [r1643] Replaced calls to depreciated function ".path.package" with the new public function "path.package". 2013-01-14 warnes * [r1639] Replace (obsolete) '.path.package' with 'find.package' function. 2012-09-20 warnes * [r1622] Correct .Rd file errors detected by 'R CMD check'. * [r1621] Add duplicated() and ans() to the NAMESPACE. * [r1620] Update for gdata 2.13.0. * [r1619] Fix typographic error. * [r1618] Add 'ans()' and 'duplicated()' contributed by Liviu Andronic. 2012-09-19 warnes * [r1617] Correct column names. Unit columns were reversed and misspelled. * [r1616] Add ignore.stderr to system command in sheetCmd() to prevent stderr messages from being included in the captured output from the perl script. 2012-09-12 warnes * [r1606] Update for gdata 2.12.0 * [r1605] 'stats::aggregate' was made into a generic on 27-Jan-2010, so that attempting to call 'aggregate' on a 'table' object will now incorrectly call 'aggregate.table'. Since 'aggregate.table' can be replaced by a call to tapply using two index vectors, e.g. aggregate.table(x, by1=a, by2=b, mean) can be replaced by tapply(x, INDEX=list(a, b), FUN=mean), the 'aggregate.table' function will now display a warning that it is depreciated and recommending the equivalent call to tapply. It will be removed entirely in a future version of gdata. * [r1604] Don't ignore .Rnw files, but do ignore .svn files. 2012-09-11 warnes * [r1603] Clarify workding of DROP argument to interleave(). * [r1602] Replace call to aggregate.table() with equivalent tapply() call since aggregate.table() is being depreciated. 2012-08-22 warnes * [r1601] Update DESCRIPTION and NEWS for gdate 2.11.1. * [r1600] Add example for read.xls() that shows how to use the fileEncoding argument to read in latin-1 encoded data. * [r1599] Add XLSX test for latin-1 characters, and look for them in their new location in inst/xls/. * [r1598] add XLSX version of latin-1.xls * [r1597] Add test file and code to ensure that read.xls() can properly handle files with alternative encodings. latin-1.xls contains each of the non-ascii latin-1 special characters in both the column headings and the body of the file. * [r1596] Change code to have R read the csv/tab data from the file rather than from the connetion we made, so that file encodings can be properly handled. * [r1595] Always close the connection. 2012-08-13 warnes * [r1594] Remove trailing space from output line. 2012-06-18 warnes * [r1567] Update NEWS for 2.11.0 release. * [r1566] Bump version number and add SystemRequirements for perl. * [r1565] read.xls() and supporting functions now allow blank lines to be preserved, rather than skipped, by supplying the argument "blank.lines.skip=FALSE". The underlying perl function has been extended to suppor this via an optional "-s" argument which, when present, *preserves* blank lines during the conversion. 2012-06-13 warnes * [r1564] - nobs.default needs to handle logical vectors in addition to numeric vectors. - update DESCRIPTION and NEWS for 2.10.6. * [r1563] nobs.default needs to handle logical as well as numeric vectors. 2012-06-08 warnes * [r1562] Update DESCRIPTION and tests * [r1561] fix incorrect function name * [r1560] Mark example for installXLSXsupport() to not be executed durin R CMD check. * [r1559] stats:::nobs.default and stats::nobs.lm require R > 2.13.0, so add this as a dependency. 2012-06-06 warnes * [r1552] Update for release 2.10.2 * [r1551] Fix bugs in nobs.default. * [r1550] Update to reflect warning on startup that 'nobs' hides 'stats::nobs'. * [r1549] Remove stray non-ASCII characters. * [r1548] The nobs() dispatch method must be defined in the gdata namespace to pick up the definition of gdata::nobs.default. * [r1547] Update DESCRIPTION and NEWS for 2.10.1 release. * [r1546] Define aliases for 'nobs' and 'nobs.lm' to support backward compatibility for packages depending on gdata. * [r1545] Update DESCRIPTION and NEWS for 2.10.0 release * [r1544] - Add manual page and NAMESPACE entry for startsWith(). - Add 'ignore.case' argument to startsWith(). * [r1543] Update to match new code. * [r1542] Replace non-ASCII characters. * [r1541] Add na.strings to read.xls call to convert "#DIV/0!" to NA. 2012-06-05 warnes * [r1540] Remove nobs method dispatch and lm methods since these are now provided by the stats package. * [r1539] Spell out arguments to ls() to avoid R CMD check warnings. * [r1538] Add .Rinstignore file to omit latex style and source files from distributed inst/doc directory. * [r1537] - Add NULL definition of MedUnits to avoid R CMD check warning. - Specify local environment when calling data() so that MedUnits gets defined in the function's environment rather than the global environment. * [r1536] Fix error in ls.funs() that occurs when there are no objects in the environment. * [r1535] Avoid warning by calling utils::object.size rather than Internal(object.size(x)) 2012-05-31 warnes * [r1534] - Remove dispatch function 'nobs' and method 'nobs.lm' since these are now provided by the R 'stats' package. 2012-05-04 warnes * [r1532] Update for next release * [r1531] Add ls.funs() to show functions defined in the specified environment. * [r1530] Fix enumerate syntax. 2012-04-03 warnes * [r1522] Add startsWith() function. 2011-10-05 warnes * [r1516] Fix typo 2011-09-30 warnes * [r1515] Update DESCRIPTION and README for 2.9.0 release. * [r1514] Update DESCRIPTION and README for 2.9.0 release. 2011-09-20 warnes * [r1508] Improve xls2csv() man page * [r1507] Add case() function, a vector equivalent of the switch() function * [r1506] Add case() function, a vector equivalent of the switch() function 2011-09-02 warnes * [r1500] Add 'centerText' function to center text strings for a specified width. * [r1499] Add 'centerText' function to center text strings for a specified width. 2011-04-16 warnes * [r1469] Update for release 2.8.2 2011-04-15 warnes * [r1468] Fix errors on windows when R or Perl install path includes spaces by properly quoting the path. * [r1467] Fix error in xlsFormat() on windows when R or Perl install path includes spaces by quoting the path. 2011-01-15 ggorjan * [r1465] Adding summary method for nPairs 2010-11-12 warnes * [r1462] Update NEWS for gdata 2.8.1 * [r1461] Update DEScription file for 2.8.1 release * [r1460] Update test output to match latest code * [r1459] Modify write.fwf() to capture and pass on additional arguments for write.table(). This resolves a bug reported by Jan Wijffels. 2010-11-01 arnima * [r1453] Minor improvement in Args.Rd help page 2010-10-19 warnes * [r1452] Avoid use of file.access() which is unreliable on Windows network shares. 2010-07-08 ggrothendieck2 * [r1448] findPerl call added to xls2sep 2010-07-07 ggrothendieck2 * [r1447] small improvements to read.xls.Rd 2010-05-03 warnes * [r1439] Rename installXLSXModules() to installXLSXsupport() and provide documentation for it. * [r1438] Update news for gdata 2.8.0 * [r1437] Add .onAttach function to check & inform user if perl is available, to check whether XLS and XLSX formats are avaiable, and to run the (new) installXLSXModules() functon to attempt to install the necessar libraries if not. Added installXLSXModules() function. 2010-05-02 warnes * [r1436] Correct error in xlsFormat example * [r1435] Update perl code to work (but generate warnings) when Zlib or SpreadSheet::XLXS is not instaled. Also update Greg's email address 2010-02-21 ggrothendieck2 * [r1423] isOpen problems fixed (isOpen must have changed in R since this worked in earlier versions). Also nba.xls link in read.xls.Rd disappeared. Replaced with similar link. 2010-02-20 ggrothendieck2 * [r1422] improved INSTALL file 2010-02-19 ggrothendieck2 * [r1421] added findPerl to locate ActiveState Perl on Windows if perl= not specified and Rtools perl would have otherwise been used. Also added INSTALL file. 2010-01-28 warnes * [r1419] Update for release 2.7.1 * [r1418] xls2sep(): Show output of perl call when verbose=T * [r1417] More Win32 fixes * [r1416] More work on Win32 building * [r1415] Support building Compress::Raw::Zlib perl package under windows. 2010-01-26 warnes * [r1413] Fix typos * [r1412] Show more details in sheetCount() when verbose=TRUE 2010-01-24 warnes * [r1411] Replace two calls to 'dQuote', to 'dQuote.ascii' * [r1408] Remove auto-generated pdf files from svn * [r1407] create 'distclean' to remove perl binary dir, currently mac-only * [r1406] Make read.xls() and xls2sep() quieter when verbose=FALSE * [r1405] Add tests for read.xls, sheetCount, and sheetNames * [r1404] Modify makefile to 1) clean up after build, 2) make tar non-verbose * [r1403] Close connections when done. * [r1402] Fix typo * [r1401] Fix R CMD CHECK errors * [r1400] Use the original gz file for Compress::Raw::Zlib to avoid issues with 'non-platform-independent' filename error in R CMD CHECK * [r1399] Rename files to remove R CMD check error * [r1398] Update for 2.7.0 release * [r1397] Add new functions to NAMESPACE * [r1396] Add Compress::Raw::Zlib code * [r1395] Add/Update documentation * [r1394] Minor formatting change * [r1393] Add additional example files * [r1392] Combine sheetCount.pl and sheetNames.pl and modify to support Excel 2007 'xlsx' format * [r1391] Complete changes to handle Excel 2007 'xlsx' files * [r1390] Add additional Perl modules to support Excel 2007 'xlsx' files 2010-01-24 ggrothendieck2 * [r1389] added sheetNames.Rd (documenting sheetNames/sheetCount) and updated NAMESPACE file. * [r1388] fixed spacing problem in NEWS 2010-01-23 warnes * [r1387] Check if parsing the xls file succeeds... Current code doesn't handle new XML-based format * [r1386] Remove perl 'Spreadsheet:XLSX' module since it depends on Compress-Raw-Zlib, which probably won't be available on most machines, and I don't have time to figure out how to get R to build it properly when gdata is installed. * [r1385] Add perl 'Spreadsheet:XLSX' module to support new Excel XML format files * [r1384] Add xls2tsv() convenience wrapper to xls2sep() * [r1383] Update to match new xls2csv.pl code, allow specification of sheets by name, support CSV and TAB delimited files using the same code, other minor changes. * [r1382] Add sheetNames() function to extract the names from XLS files * [r1381] Fix xls2csv.bat * [r1380] If only one sheet is present in the file, don't insert the sheet name into the filename * [r1379] Add additional test/example Excel files * [r1378] Modify xls2csv.pl script to: - Use tab-delimiter and .tsv or .tab extension if called with the name xls2tsv.pl or xls2tab.pl, respectively. This allows a single source file and two symlinks to be used intstead of maintaining several almost-identical files. - Allow selection of sheets by name - Provide better error checking - Other code improvements * [r1377] Add perl scripts to extract worksheet names and sheet count from Excel files 2010-01-22 warnes * [r1376] Upgrade Perl OLE::StorageLight module to version 0.19 * [r1375] Upgrade perl Spreadsheet::ParseExcel to version 0.56 * [r1374] Add complete list of contributors 2010-01-22 arnima * [r1373] Minor improvement in help page * [r1371] Many small improvements to documentation of Arni's five functions 2010-01-22 warnes * [r1370] - Move xls2csv(), xls2tab(), xls2sep() to a separate file - Move qQuote.ascii to a separate file - Bug Fix: xls2csv(), xls2tab() failed to pass the provided 'perl' parameter to xls2sep() - New Feature: xls2sep() (and hence xls2csv, xls2tab, and read.xls) now supports ftp URLs. 2009-12-06 arnima * [r1369] Minor improvements of Args(). * [r1368] Improved ll() so user can limit output to specified classes 2009-11-16 arnima * [r1366] ll(.GlobalEnv) does not crash anymore 2009-08-20 warnes * [r1357] Replace \ldots with \dots to make the new R CMD CHECK happy. 2009-08-19 warnes * [r1355] Update for 2.6.1 release * [r1354] Modify unit tests to avoid issues related to zime zones. 2009-08-05 warnes * [r1353] Update vignettes for 2.6.0 release * [r1352] Fix formatting warning in frameApply man page 2009-07-16 ggorjan * [r1350] Reverting recent change and clarifying the meaning. 2009-07-16 warnes * [r1349] Add contents of \value section for resample() man page * [r1348] Update test output to remove R CMD check warning * [r1347] Update ChangeLog and NEWS for gdata 2.6.0 release * [r1346] Update DESCRIPTION file for gdata 2.6.0 * [r1345] Correct Greg's email address * [r1344] Correct minor typos in write.fwf() man page * [r1343] Correct page for resample() * [r1342] Add support for using tab for field separator during translation from xls format in read.xls 2009-04-19 arnima * [r1314] Changed object.size(object) to unclass(object.size(object)). 2008-12-31 ggorjan * [r1312] Documenting changes and exporting the functions. * [r1311] Enhanced function object.size that returns the size of multiple objects. There is also a handy print method that can print size of an object in "human readable" format when options(humanReadable=TRUE) or print(object.size(x), humanReadable=TRUE). * [r1310] New function wideByFactor that reshapes given dataset by a given factor - it creates a "multivariate" data.frame. * [r1309] New function nPairs that gives the number of variable pairs in a data.frame or a matrix. * [r1308] New functions getYear, getMonth, getDay, getHour, getMin, and getSec for extracting the date/time parts from objects of a date/time class. * [r1307] New function bindData that binds two data frames into a multivariate data frame in a different way than merge. * [r1306] New function .runRUnitTestsGdata that enables run of all RUnit tests during the R CMD check as well as directly from within R. 2008-12-20 ggorjan * [r1305] * [r1304] To remove some output in the R CMD check 2008-08-05 ggorjan * [r1300] - Increased version to 2.5.0 - New function cbindX that can bind objects with different number of rows. - write.fwf gains width argument. Unknown values can increase or decrease the width of the columns. Additional tests and documentation fixes. 2008-06-30 arnima * [r1299] Simplified default 'unit' argument from c("KB","MB","bytes") to "KB". 2008-05-13 warnes * [r1270] Update NEWS file for 2.4.2 * [r1269] Use path.expand() to give proper full path to xls file to be translated by read.xls() * [r1268] Modifed read.xls() failed to return the converted data... fixed. * [r1267] Correct broken patch for open-office support * [r1266] For read.xls() and xls2csv(): - Implement more informative log messages when verbose=TRUE - Quote temporary file name to avoid errors when calling perl to do the work. - Add better error messages, particularly when perl fails to generate an output .csv file. Update version number in DESCRIPTION. 2008-05-12 warnes * [r1265] Patch to correct issue with OpenOffice-created XLS files. Thanks to Robert Burns for pointing out the patch at http://rt.cpan.org/Public/Bug/Display.html?id=7206 2008-03-25 warnes * [r1250] Update for version 2.4.1 * [r1249] Example iris.xls file didn't complete & properly formatted iris data set. Fixed. * [r1248] Update perl modules to latest versions 2008-03-24 warnes * [r1247] Fix typo in win32 example for read.xls() 2008-03-11 warnes * [r1246] Add xls2csv to exported function list 2008-01-30 warnes * [r1241] Update DESCRIPTION and NEWS for release 2.4.0 2008-01-29 arnima * [r1240] Added argument 'all'. * [r1239] Added argument 'all'. 2007-10-22 warnes * [r1196] Clarify GPL version 2007-09-10 ggorjan * [r1169] removed unmatched brace * [r1168] adding alias 2007-09-06 ggorjan * [r1162] keyword 2007-08-21 ggorjan * [r1154] package help page * [r1153] move * [r1152] move 2007-08-20 ggorjan * [r1151] clean * [r1150] a real vignette * [r1149] a real vignette * [r1148] additional keyword for searchig 2007-08-17 ggorjan * [r1147] keyword 2007-07-22 arnima * [r1103] Reverted back to as.character(substitute(x)), so user can run keep(x), keep("x"), Args(x), and Args("x"). 2007-07-21 arnima * [r1102] Changed as.character(substitute()) to deparse(substitute()), following help(substitute) recommendation. * [r1101] Changed as.character(substitute()) to deparse(substitute()), following help(substitute) recommendation. 2007-07-10 warnes * [r1099] Update read.xls() code and docs with enhacements by Gabor Grothendieck 2007-06-06 ggorjan * [r1097] last edits from newsletter * [r1096] drop levels as suggested by Brian Ripley * [r1095] better integration of unit tests * [r1094] making codetools happy 2007-01-28 arnima * [r1042] Throw warnings rather than errors 2007-01-27 arnima * [r1041] Meaningful error message is given when requested object does not exist * [r1040] is.* tests that return NA are not reported is.what recursion is avoided 2006-11-30 ggorjan * [r1035] minor commet to the code * [r1034] description of mapLevels methods * [r1033] description of unknown methods 2006-11-16 ggorjan * [r1013] seems that c.factor was not a good idea and there were better examples posted on r-devel list 2006-11-14 ggorjan * [r1012] Removed executable property 2006-11-10 ggorjan * [r1004] just formatting 2006-11-02 ggorjan * [r1002] typos 2006-10-30 ggorjan * [r1001] some more examples for use of read.fwf after write.fwf * [r1000] ignore for report files * [r999] Id tag from source * [r998] removing unused import * [r997] Id tag * [r996] write.fwf * [r995] Id tag * [r994] added unit tests for reorder.factor * [r993] mapply keeps names in R 2.4; POSIX unit tests solved; $ should work now 2006-10-29 ggorjan * [r992] fixed problem in tests; added unknown methods and tests for matrices * [r991] sort is generic now; mapply keeps names in R 2.4.0; some codetools suggestions fixed * [r990] sort is generic from R 2.4.0 * [r989] trim() gains ... argument; version bump * [r988] Fixed collision bug with stats version of reorder.factor 2006-10-27 warnes * [r987] Add c() method for factor objects, submitted by Gregor Gorjanc 2006-09-19 warnes * [r986] Update NEWS file for 2.3.0 release * [r985] Explicitly set the local in runit.trim.R to one where leading spaces affect sort order so that the unit test works properly. 2006-09-18 warnes * [r984] Update Rnews.sty to the latest version * [r983] Integrate fixes for trim() from Gregor and myself. * [r982] Remove unneeded files. 2006-09-13 warnes * [r981] Add unknown() and unit test files * [r980] More fixes from Gregor Gorjanc * [r979] Add mapLevels functions from Gregor Gorjanc, along with associated unit tests. 2006-08-03 warnes * [r978] Add Gregor Gorjanc's mapFactor() and combineLevels() functions. 2006-08-02 warnes * [r977] Update my email address * [r976] Remove MedUnits.rda to convert to binary format * [r975] Remove MedUnits.rda to convert to binary format * [r974] Update version number * [r973] Integrate changes suggested by Gregor Gorjanc 2006-03-14 nj7w * [r940] Fixed R CMD check errors and added trim.default to NAMESPACE 2006-03-13 nj7w * [r939] Added trim.character and trim.factor as per Gregor's suggestions 2006-01-03 warnes * [r839] Add resample() function, which generates a random sample or permutation from the elements of the supplied vector, even if the vector has length 1. This avoide the problems caused by base::sample()'s special case for vectors of length 1, where it attempts to sample from 1:x. 2005-12-13 nj7w * [r806] Updated news and removed changelog 2005-12-12 nj7w * [r798] Updated version number for CRAN release 2005-12-08 warnes * [r789] Andrew Burgess reported that interleave() converts 1-column matrixes to vectors and provided a patch. A slight modification of his patch has been applied. There is now a 'drop' argument, which controls whether 'unnecessary' dimensions are dropped. The default is FALSE. 2005-12-04 warnes * [r779] Andrew Burgess reported that interleave() converts 1-column matrixes to vectors and provided a patch. A slight modification of his patch has been applied. There is now a 'drop' argument, which controls whether 'unnecessary' dimensions are dropped. The default is FALSE. 2005-12-01 nj7w * [r775] Updated Greg's email address * [r774] Updated Jim's email address 2005-11-21 arnima * [r744] Suppressed warning message in attach() call. 2005-10-27 warnes * [r716] Bump version number again to show that I fixed a bug. * [r715] Update version number * [r714] Remove explicit loading of gtools in examples, so that failure to import functions from gtools gets properly caught by running the examples. * [r713] Add missing close-bracket * [r712] Add upperTriangle and friends * [r711] Add functions for extracting, modifying upper and lower trianglular components of matrices. 2005-10-19 arnima * [r695] Replaced the "not.using" vector with a more robust try(get(test)) to find out whether a particular is.* function returns a logical of length one. 2005-09-12 nj7w * [r671] Updated Greg's email 2005-09-06 nj7w * [r661] Added library(gtools) in the example * [r660] Removed gtools dependency from NAMESPACE, as it was being used only in an example, and was giving warning * [r659] Added Suggests field 2005-09-02 nj7w * [r658] Updated the example in frameApply * [r656] Added NEWS * [r654] ChangeLog 2005-08-31 nj7w * [r644] Added DESCRIPTION file * [r643] removed DESCRIPTION.in 2005-07-20 nj7w * [r631] updated documentation * [r630] ## Args() was using a different search path from args(), e.g. rep <- function(local) return(NULL) args() Args() ## Fixed * [r629] ## is.what() was giving needless warnings for functions, e.g. is.what(plot) ## Fixed * [r628] ## ll() was crashing if argument was a list of length zero, e.g. x <- list() ll(x) ## Fixed, and added sort.elements (see new help page) 2005-06-09 nj7w * [r625] Updating the version number, and various help files to synchronize splitting of gregmisc bundle in 4 individual components. 2005-06-07 nj7w * [r622] Reverting to the previous version of drop.levels.R by replacing sapply(...) with as.data.frame(lapply(...)) because sapply has the undesirable effect of converting the object to a matrix, which in turn coerces the factors to numeric. 2005-05-13 nj7w * [r621] 1) Using dQuote.ascii function in read.xls as the new version of dQuote doesn't work proprly with UTF-8 locale. 2) Modified CrossTable.Rd usage in gmodels 3) Modified heatmap.2 usage in gplots. 2005-04-02 warnes * [r600] Move drop.levels() from gtools to gdata. * [r598] Move frameApply() to gdata package. 2005-03-31 warnes * [r586] Comment out example to avoid R CMD check warnings 2005-03-22 warnes * [r578] Fixes to pass `R CMD check'. * [r577] Integrated fixes from Arni. * [r576] Improve documentation of 'perl' argument and give examples. 2005-03-09 warnes * [r573] - Add ConvertMedUnits() plus documentation - Add documentation for MedUnits data set. * [r572] Update MedUnits data file. * [r571] Don't need both .Rda and .tab forms of the data. * [r570] Add MedUnits data set, which provides conversions between American 'Conventional' and Standard Intertional (SI) medical units. 2005-03-01 warnes * [r566] - Remove 'elem' call from ll example. - Add note to 'elem' man page that it is depreciated and 'll' should be used instead. 2005-02-26 nj7w * [r565] *** empty log message *** 2005-02-25 warnes * [r564] Remove ll methods since the base function now handles lists and data frames. * [r563] Integrate changes submitted by Arni Magnusson 2005-01-31 warnes * [r529] Add ability to specify the perl executable and path. 2005-01-28 warnes * [r526] Add dependency on stats. 2005-01-12 warnes * [r515] Add dependency on R 1.9.0+ to prevent poeple from installing on old versions of R which don't support namespaces. 2004-12-27 warnes * [r509] Update usage to match code. * [r508] Replace 'F' with 'FALSE'. 2004-10-12 warneg * [r465] Add unmatrix() function 2004-09-27 warneg * [r461] Updated to pass R CMD check. 2004-09-03 warneg * [r455] added to cvs. * [r454] Checkin xls2csv.pl. Should have been in long ago, must have been an oversight * [r451] Need to look for files using the new package name. * [r449] Need to use the new package name when looking for iris.xls. * [r448] Add ll.list to the to the list of functions described * [r447] Add ll and friends to the namespace * [r446] initial bundle checkin 2004-09-02 warneg * [r442] Initial revision 2004-08-27 warnes * [r441] Fixed bug in mixedsort, and modified reorder.factor to use mixedsort. 2004-07-29 warnes * [r427] Add perl modules to CVS. 2004-07-27 warnes * [r425] Fix typos/spelling. * [r424] Add note that Perl is required for read.xls to work properly. 2004-07-16 warnes * [r420] Remove the temporary csv file if reading it in fails. 2004-06-22 warnes * [r377] Add S3 methods for data frames and lists. 2004-06-08 warnes * [r371] Moved from gregmisc/src/. * [r370] Remove the files in src, instead provide "pre-installed" perl packages in inst/perl. 2004-06-05 warnes * [r365] Fix typo. * [r364] Fix Unix makefile so that it works when invoked directly. * [r363] Fixes for Windows * [r362] Minor enhancment to read.xls example. * [r361] - Merge Makefile.win into Makefile. Makefile.win now just redirects to Makefile. - Update xls2csv.bat and xls2csv shell script to correctly obtain thier installion path and infer the location of the perl code and libraries. - The xls2csv.pl script now assumes that the libraries it needs are installed into the same directory where it is. 2004-06-04 warnes * [r360] More changes, indended to improve installation reliabilty and to make Makefile and Makefile.win as similar as possible. 2004-05-27 warnes * [r358] Clean should remove scripts from source directory. * [r357] Moved to xls2csv.pl.in. * [r354] More fixes. * [r353] Fix missing brace. * [r352] Add explicit package name to see also links. * [r351] More xls2csv perl module support changes. * [r350] More changes to fix local installation of perl modules. 2004-05-26 warnes * [r345] Escape underscores in email addresses so Latex is happy. 2004-05-25 warnes * [r339] More changes to xls2csv code. * [r337] Add Args() function contributed by Arni Magnusson . * [r335] - Change to call perl directly rather than depending on the installed shell script. This should make the code more portable to MS-Windows systes. - Add additional commants.. * [r332] Makefile now modifies xls2csv.bat xls2csv.pl and xls2csv to contain an explicit path to the perl script/libraries. * [r330] R CMD build calls the clean target to purge build files from the source tree when packaging. To get use this behavior correctly, I've renamed the clean target to cleanup and distclean target to clean. * [r329] Add read.xls(), a function to read Microsoft Excel files by translating them to csv files via the xls2csv.pl script. * [r326] More fixes. Seems to work now. 2004-05-24 warnes * [r325] Add files to enable inclusion and installation of xls2csv.pl as part of the package. 2004-04-01 warnes * [r312] Add function remove.vars(). 2004-03-26 warnes * [r307] Contents of package 'mva' moveed to 'stats'. * [r298] - Fix is.what() for use under R 1.9.0 - is.what() now uses is.* functions found in any attached frame 2004-01-21 warnes * [r282] - Add ... argument to match generic provided in mva. 2004-01-19 warnes * [r275] - Integrated (partial) patch submitted by Arni Magnusson to clarify help text. - Modifed code to use match.arg(). 2003-12-15 warnes * [r271] - Applied patch from Arni that fixed a bug that caused env() to crash if any environment was completely empty 2003-12-03 warnes * [r253] - match function argument defaults with 'usage' 2003-12-02 warnes * [r249] Add one argument, to match code. 2003-12-01 warnes * [r244] - Apply changes submitted by Arni Magnusson 2003-11-19 warnes * [r229] Changes to pass R CMD check. 2003-11-18 warnes * [r224] - Convert from MS-Dos to Unix line endings. - Reformat to 80 columns. 2003-11-17 warnes * [r223] Replace 'T' with 'TRUE' to remove R CMD check error. * [r222] Fix syntax error. 2003-11-10 warnes * [r220] - Add files contributed by Arni Magnusson . As well as some of my own. 2003-06-07 warnes * [r198] - Fixed error in examples. Had sqrt(var(x)/(n-1)) for the standard error of the mean instead of sqrt(var(x)/n). 2003-05-23 warnes * [r197] - Fixed typos * [r196] - library() backported from 1.7-devel. This version of the function adds the "pos=" argument to specify where in the search path the library should be placed. - updated .First.lib to use library(...pos=3) for MASS to avoid the 'genotype' data set in MASS from masking the genotype funciton in genetics when it loads gregmisc - Added logit() inv.logit() matchcols() function and corresponding docs 2003-05-20 warnes * [r195] - Omit NULL variables. * [r194] - Added function trim() and assocated docs. 2003-04-22 warnes * [r188] - The mva package (which is part of recommended) now provides a generic 'reorder' function. Consequently, the 'reorder' function here has been renamed to 'reorder.factor'. - Removed check of whether the argument is a factor object. 2003-03-03 warnes * [r165] - Updated to match reorder.Rd which was exetended to handle factor label names in addition to numeric indices. * [r164] - Added handling of factor level names in addition to numeric indexes. 2002-09-23 warnes * [r118] Added inst/doc directory and contents to CVS. * [r117] - Modified all files to include CVS Id and Log tags. 2002-08-01 warnes * [r112] Added reorder() function to reorder the levels of a factor. 2002-04-09 warneg * [r109] Checkin for version 0.5.3 * [r108] - Properly handle case when some or all arguments are vectors. 2002-03-26 warneg * [r104] - Changed methods to include '...' to match the generic. - Updated for version 0.5.1 * [r102] Added ... to methods. * [r101] Updated to add ... parameter to function calls. * [r98] Initial checkin. * [r95] - Added CVS tags 2002-02-21 warneg * [r87] - Fixed bug where row and column labels didn't always correspond to the contents. This only occured when a factor was used for by1 or by2 and the factors levels weren't in the default sort order. 2002-02-20 warneg * [r86] New function. * [r85] Initial checkin. * [r84] Initial checkin. * [r83] Noted that specialized methods exist. * [r82] Incorrectly had contents of nobs.R here instead of help text. Corrected. * [r81] Minor changes, typo and formatting fixes. * [r79] - initial checkin. 2001-12-12 warneg * [r53] Added omitted documentaton for 'info' parameter. Changed example code not to use 'Orthodont' data set so that the nlme package is not required. 2001-12-08 warneg * [r47] Changed 'T' to 'TRUE' in parameter list. 2001-12-07 warneg * [r45] - Fixed see also link. Mis-typed 'data.frame' as 'dataframe'. * [r44] Added attribution. * [r43] Added proper attribution to Don MacQueen. * [r39] Initial checkin. Unfortunately, I've lost the email of the person who sent this to me. I'll credit him/her when I find out who it was! * [r38] Initial checkin 2001-12-05 warneg * [r34] - Renamed 'concat' function to 'combine' to avoid name conflict with an existing S-Plus function. * [r32] - Changed function name 'concat' to 'combine' and renamed concat.Rd to combine.Rd gdata/man/0000755000175100001440000000000013115327445012130 5ustar hornikusersgdata/man/getDateTimePart.Rd0000644000175100001440000000363013003720415015432 0ustar hornikusers% getDateTimeParts.Rd %-------------------------------------------------------------------------- % What: Extract date and time parts from ... - help % $Id$ % Time-stamp: <2008-12-30 22:44:20 ggorjan> %-------------------------------------------------------------------------- \name{getYear} \alias{getDateTimeParts} \alias{getYear} \alias{getYear.default} \alias{getYear.Date} \alias{getYear.POSIXct} \alias{getYear.POSIXlt} \alias{getMonth} \alias{getMonth.default} \alias{getMonth.Date} \alias{getMonth.POSIXct} \alias{getMonth.POSIXlt} \alias{getDay} \alias{getDay.default} \alias{getDay.Date} \alias{getDay.POSIXct} \alias{getDay.POSIXlt} \alias{getHour} \alias{getHour.default} \alias{getMin} \alias{getMin.default} \alias{getSec} \alias{getSec.default} \title{Get date/time parts from date and time objects} \description{get* functions provide an *experimental* approach for extracting the date/time parts from objects of a date/time class. They are designed to be intiutive and thus lowering the learning curve for work with date and time classes in \R{}.} \usage{ getYear(x, format, \dots) getMonth(x, format, \dots) getDay(x, format, \dots) getHour(x, format, \dots) getMin(x, format, \dots) getSec(x, format, \dots) } \arguments{ \item{x}{generic, date/time object} \item{format}{character, format} \item{\dots}{arguments pased to other methods} } \value{Character} \author{Gregor Gorjanc} \seealso{ \code{\link{Date}}, \code{\link{DateTimeClasses}}, \code{\link{strptime}} } \examples{ ## --- Date class --- tmp <- Sys.Date() tmp getYear(tmp) getMonth(tmp) getDay(tmp) ## --- POSIXct class --- tmp <- as.POSIXct(tmp) getYear(tmp) getMonth(tmp) getDay(tmp) ## --- POSIXlt class --- tmp <- as.POSIXlt(tmp) getYear(tmp) getMonth(tmp) getDay(tmp) } \keyword{manip} \keyword{misc} %-------------------------------------------------------------------------- % getDateTimeParts.Rd ends heregdata/man/xlsFormats.Rd0000644000175100001440000000112613003720415014547 0ustar hornikusers\name{xlsFormats} \Rdversion{1.1} \alias{xlsFormats} \title{ Check which file formats are supported by read.xls } \description{ Check which file formats are supported by read.xls } \usage{ xlsFormats(perl = "perl", verbose = FALSE) } \arguments{ \item{perl}{Path to perl interpreter (optional).} \item{verbose}{If \code{TRUE}, show additional messages during processing.} } \value{ Vector of supported formats, possible elements are 'XLS' and 'XLSX'. } \seealso{ \code{\link{read.xls}}, \code{\link{xls2csv}}. } \examples{ xlsFormats() } \keyword{ misc } gdata/man/ConvertMedUnits.Rd0000644000175100001440000000420713003720416015502 0ustar hornikusers\name{ConvertMedUnits} \alias{ConvertMedUnits} \title{Convert medical measurements between International Standard (SI) and US 'Conventional' Units.} \description{ Convert Medical measurements between International Standard (SI) and US 'Conventional' Units. } \usage{ ConvertMedUnits(x, measurement, abbreviation, to = c("Conventional", "SI", "US"), exact = !missing(abbreviation)) } \arguments{ \item{x}{Vector of measurement values} \item{measurement}{Name of the measurement} \item{abbreviation}{Measurement abbreviation} \item{to}{Target units} \item{exact}{Logicial indicating whether matching should be exact} } \details{ Medical laboratories and practitioners in the United States use one set of units (the so-called 'Conventional' units) for reporting the results of clinical laboratory measurements, while the rest of the world uses the International Standard (SI) units. It often becomes necessary to translate between these units when participating in international collaborations. This function converts between SI and US 'Conventional' units. If \code{exact=FALSE}, \code{grep} will be used to do a case-insensitive sub-string search for matching measurement names. If more than one match is found, an error will be generated, along with a list of the matching entries. } \value{ Returns a vector of converted values. The attribute 'units' will contain the target units converted. } \seealso{ The data set \code{\link{MedUnits}} provides the conversion factors. } \references{ \url{http://www.globalrph.com/conv_si.htm} } \author{ Gregory R. Warnes \email{greg@warnes.net} } \examples{ data(MedUnits) # show available conversions MedUnits$Measurement # Convert SI Glucose measurement to 'Conventional' units GlucoseSI = c(5, 5.4, 5, 5.1, 5.6, 5.1, 4.9, 5.2, 5.5) # in SI Units GlucoseUS = ConvertMedUnits( GlucoseSI, "Glucose", to="US" ) cbind(GlucoseSI,GlucoseUS) \dontrun{ # See what happens when there is more than one match ConvertMedUnits( 27.5, "Creatin", to="US") } # To solve the problem do: ConvertMedUnits( 27.5, "Creatinine", to="US", exact=TRUE) } \keyword{manip} gdata/man/matchcols.Rd0000644000175100001440000000542713003720415014372 0ustar hornikusers\name{matchcols} \alias{matchcols} \title{Select columns names matching certain critera} \description{ This function allows easy selection of the column names of an object using a set of inclusion and exclusion critera. } \usage{ matchcols(object, with, without, method=c("and","or"), ...) } \arguments{ \item{object}{Matrix or dataframe} \item{with, without}{Vector of regular expression patterns} \item{method}{One of "and" or "or"} \item{\dots}{Optional arguments to \code{grep}} } \value{ Vector of column names which match all (\code{method="and"}) or any (\code{method="or"}) of the patterns specified in \code{with}, but none of the patterns specified in \code{without}. } \author{Gregory R. Warnes \email{greg@warnes.net}} \seealso{ \code{\link[base]{grep}} } \examples{ # create a matrix with a lot of named columns x <- matrix( ncol=30, nrow=5 ) colnames(x) <- c("AffyID","Overall Group Means: Control", "Overall Group Means: Moderate", "Overall Group Means: Marked", "Overall Group Means: Severe", "Overall Group StdDev: Control", "Overall Group StdDev: Moderate", "Overall Group StdDev: Marked", "Overall Group StdDev: Severe", "Overall Group CV: Control", "Overall Group CV: Moderate", "Overall Group CV: Marked", "Overall Group CV: Severe", "Overall Model P-value", "Overall Model: (Intercept): Estimate", "Overall Model: Moderate: Estimate", "Overall Model: Marked: Estimate", "Overall Model: Severe: Estimate", "Overall Model: (Intercept): Std. Error", "Overall Model: Moderate: Std. Error", "Overall Model: Marked: Std. Error", "Overall Model: Severe: Std. Error", "Overall Model: (Intercept): t value", "Overall Model: Moderate: t value", "Overall Model: Marked: t value", "Overall Model: Severe: t value", "Overall Model: (Intercept): Pr(>|t|)", "Overall Model: Moderate: Pr(>|t|)", "Overall Model: Marked: Pr(>|t|)", "Overall Model: Severe: Pr(>|t|)") # Get the columns which give estimates or p-values # only for marked and severe groups matchcols(x, with=c("Pr", "Std. Error"), without=c("Intercept","Moderate"), method="or" ) # Get just the column which give the p-value for the intercept matchcols(x, with=c("Intercept", "Pr") ) } \keyword{manip} gdata/man/interleave.Rd0000644000175100001440000000653113003720416014551 0ustar hornikusers% $Id: interleave.Rd 1603 2012-09-11 20:41:43Z warnes $ % % $Log$ % Revision 1.8 2005/12/12 22:02:48 nj7w % Updated version number for CRAN release % % Revision 1.7 2005/12/04 03:02:06 warnes % Andrew Burgess reported that interleave() converts 1-column matrixes % to vectors and provided a patch. A slight modification of his patch % has been applied. There is now a 'drop' argument, which controls % whether 'unnecessary' dimensions are dropped. The default is FALSE. % % Revision 1.6 2005/09/12 15:42:45 nj7w % Updated Greg's email % % Revision 1.5 2005/06/09 14:20:25 nj7w % Updating the version number, and various help files to synchronize splitting of gregmisc bundle in 4 individual components. % % Revision 1.1.1.1 2005/05/25 22:07:33 nj7w % Initial entry for individual package gdata % % Revision 1.4 2003/06/07 17:58:37 warnes % % - Fixed error in examples. Had sqrt(var(x)/(n-1)) for the standard % error of the mean instead of sqrt(var(x)/n). % % Revision 1.3 2002/09/23 13:59:30 warnes % - Modified all files to include CVS Id and Log tags. % % Revision 1.2 2002/04/09 00:51:32 warneg % % Checkin for version 0.5.3 % % Revision 1.1 2002/02/20 21:41:54 warneg % Initial checkin. % % \name{interleave} \alias{interleave} \title{ Interleave Rows of Data Frames or Matrices } \description{ Interleave rows of data frames or Matrices. } \usage{ interleave(..., append.source=TRUE, sep=": ", drop=FALSE) } \arguments{ \item{\dots}{ objects to be interleaved } \item{append.source}{Boolean Flag. When \code{TRUE} (the default) the argument name will be appended to the row names to show the source of each row. } \item{sep}{Separator between the original row name and the object name.} \item{drop}{boolean flag - When TRUE, matrices containing one column will be converted to vectors.} } \details{ This function creates a new matrix or data frame from its arguments. The new object will have all of the rows from the source objects interleaved. IE, it will contain row 1 of object 1, followed by row 1 of object 2, .. row 1 of object 'n', row 2 of object 1, row 2 of object 2, ... row 2 of object 'n' ... } \value{ Matrix containing the interleaved rows of the function arguments. } \author{ Gregory R. Warnes \email{greg@warnes.net} } \seealso{ \code{\link{cbind}}, \code{\link{rbind}}, \code{\link{combine}} } \examples{ # Simple example a <- matrix(1:10,ncol=2,byrow=TRUE) b <- matrix(letters[1:10],ncol=2,byrow=TRUE) c <- matrix(LETTERS[1:10],ncol=2,byrow=TRUE) interleave(a,b,c) # Useful example: # # Create a 2-way table of means, standard errors, and # obs g1 <- sample(letters[1:5], 1000, replace=TRUE) g2 <- sample(LETTERS[1:3], 1000, replace=TRUE ) dat <- rnorm(1000) stderr <- function(x) sqrt( var(x,na.rm=TRUE) / nobs(x) ) means <- tapply(dat, list(g1, g2), mean ) stderrs <- tapply(dat, list(g1, g2), stderr ) ns <- tapply(dat, list(g1, g2), nobs ) blanks <- matrix( " ", nrow=5, ncol=3) tab <- interleave( "Mean"=round(means,2), "Std Err"=round(stderrs,2), "N"=ns, " " = blanks, sep=" " ) print(tab, quote=FALSE) # Using drop to control coercion to a lower dimensions: m1 <- matrix(1:4) m2 <- matrix(5:8) interleave(m1, m2, drop=TRUE) # This will be coerced to a vector interleave(m1, m2, drop=FALSE) # This will remain a matrix } \keyword{category} \keyword{array} gdata/man/write.fwf.Rd0000644000175100001440000002307213003720416014325 0ustar hornikusers% write.fwf.Rd %-------------------------------------------------------------------------- % What: Write fixed width format man page % $Id: write.fwf.Rd 1928 2015-04-14 22:02:01Z warnes $ % Time-stamp: <2008-08-05 12:40:32 ggorjan> %-------------------------------------------------------------------------- \name{write.fwf} \alias{write.fwf} \concept{data output} \concept{data export} \title{Write object in fixed width format} \description{ \code{write.fwf} writes object in *f*ixed *w*idth *f*ormat. } \usage{ write.fwf(x, file="", append=FALSE, quote=FALSE, sep=" ", na="", rownames=FALSE, colnames=TRUE, rowCol=NULL, justify="left", formatInfo=FALSE, quoteInfo=TRUE, width=NULL, eol="\n", qmethod=c("escape", "double"), scientific=TRUE, \dots) } \arguments{ \item{x}{data.frame or matrix, the object to be written} \item{file}{character, name of file or connection, look in \code{\link{write.table}} for more} \item{append}{logical, append to existing data in \code{file}} \item{quote}{logical, quote data in output} \item{na}{character, the string to use for missing values i.e. \code{NA} in the output} \item{sep}{character, separator between columns in output} \item{rownames}{logical, print row names} \item{colnames}{logical, print column names} \item{rowCol}{character, rownames column name} \item{justify}{character, alignment of character columns; see \code{\link{format}}} \item{formatInfo}{logical, return information on number of levels, widths and format} \item{quoteInfo}{logical, should \code{formatInfo} account for quotes} \item{width}{numeric, width of the columns in the output} \item{eol}{the character(s) to print at the end of each line (row). For example, 'eol="\\r\\n"' will produce Windows' line endings on a Unix-alike OS, and 'eol="\\r"' will produce files as expected by Mac OS Excel 2004.} \item{qmethod}{a character string specifying how to deal with embedded double quote characters when quoting strings. Must be one of '"escape"' (default), in which case the quote character is escaped in C style by a backslash, or '"double"', in which case it is doubled. You can specify just the initial letter.} \item{scientific}{logical, if TRUE, allow numeric values to be formatted using scientific notation.} \item{\dots}{further arguments to \code{\link{format.info}} and \code{\link{format}} } } \details{ While *F*ixed *w*idth *f*ormat is no longer widely used, it remains common in some disciplines. Output is similar to \code{print(x)} or \code{format(x)}. Formatting is done completely by \code{\link{format}} on a column basis. Columns in the output are by default separated with a space i.e. empty column with a width of one character, but that can be changed with \code{sep} argument as passed to \code{\link{write.table}} via \dots. As mentioned formatting is done completely by \code{\link{format}}. Arguments can be passed to \code{format} via \code{\dots} to further modify the output. However, note that the returned \code{formatInfo} might not properly account for this, since \code{\link{format.info}} (which is used to collect information about formatting) lacks the arguments of \code{\link{format}}. \code{quote} can be used to quote fields in the output. Since all columns of \code{x} are converted to character (via \code{\link{format}}) during the output, all columns will be quoted! If quotes are used, \code{\link{read.table}} can be easily used to read the data back into \R. Check examples. Do read the details about \code{quoteInfo} argument. Use only *true* character, i.e., avoid use of tabs, i.e., "\\t", or similar separators via argument \code{sep}. Width of the separator is taken as the number of characters evaluated via \code{\link{nchar}(sep)}. Use argument \code{na} to convert missing/unknown values. Only single value can be specified. Use \code{\link{NAToUnknown}} prior to export if you need greater flexibility. If \code{rowCol} is not \code{NULL} and \code{rownames=TRUE}, rownames will also have column name with \code{rowCol} value. This is mainly for flexibility with tools outside \R. Note that (at least in \R 2.4.0) it is not "easy" to import data back to \R with \code{\link{read.fwf}} if you also export rownames. This is the reason, that default is \code{rownames=FALSE}. Information about format of output will be returned if \code{formatInfo=TRUE}. Returned value is described in value section. This information is gathered by \code{\link{format.info}} and care was taken to handle numeric properly. If output contains rownames, values account for this. Additionally, if \code{rowCol} is not \code{NULL} returned values contain also information about format of rownames. If \code{quote=TRUE}, the output is of course wider due to quotes. Return value (with \code{formatInfo=TRUE}) can account for this in two ways; controlled with argument \code{quoteInfo}. However, note that there is no way to properly read the data back to \R if \code{quote=TRUE & quoteInfo=FALSE} arguments were used for export. \code{quoteInfo} applies only when \code{quote=TRUE}. Assume that there is a file with quoted data as shown bellow (column numbers in first three lines are only for demonstration of the values in the output). \preformatted{ 123456789 12345678 # for position 123 1234567 123456 # for width with quoteInfo=TRUE 1 12345 1234 # for width with quoteInfo=FALSE "a" "hsgdh" " 9" " " " bb" " 123" } With \code{quoteInfo=TRUE} \code{write.fwf} will return \preformatted{ colname position width V1 1 3 V2 5 7 V3 13 6 } or (with \code{quoteInfo=FALSE}) \preformatted{ colname position width V1 2 1 V2 6 5 V3 14 4 } Argument \code{width} can be used to increase the width of the columns in the output. This argument is passed to the width argument of \code{\link{format}} function. Values in \code{width} are recycled if there is less values than the number of columns. If the specified width is to short in comparison to the "width" of the data in particular column, error is issued. } \value{ Besides its effect to write/export data \code{write.fwf} can provide information on format and width. A data.frame is returned with the following columns: \item{colname}{name of the column} \item{nlevels}{number of unique values (unused levels of factors are dropped), 0 for numeric column} \item{position}{starting column number in the output} \item{width}{width of the column} \item{digits}{number of digits after the decimal point} \item{exp}{width of exponent in exponential representation; 0 means there is no exponential representation, while 1 represents exponent of length one i.e. \code{1e+6} and 2 \code{1e+06} or \code{1e+16}} } \author{Gregor Gorjanc} \seealso{ \code{\link{format.info}}, \code{\link{format}}, \code{\link{NAToUnknown}}, \code{\link{write.table}}, \code{\link{read.fwf}}, \code{\link{read.table}} and \code{\link{trim}} } \examples{ ## Some data num <- round(c(733070.345678, 1214213.78765456, 553823.798765678, 1085022.8876545678, 571063.88765456, 606718.3876545678, 1053686.6, 971024.187656, 631193.398765456, 879431.1), digits=3) testData <- data.frame(num1=c(1:10, NA), num2=c(NA, seq(from=1, to=5.5, by=0.5)), num3=c(NA, num), int1=c(as.integer(1:4), NA, as.integer(4:9)), fac1=factor(c(NA, letters[1:9], "hjh")), fac2=factor(c(letters[6:15], NA)), cha1=c(letters[17:26], NA), cha2=c(NA, "longer", letters[25:17]), stringsAsFactors=FALSE) levels(testData$fac1) <- c(levels(testData$fac1), "unusedLevel") testData$Date <- as.Date("1900-1-1") testData$Date[2] <- NA testData$POSIXt <- as.POSIXct(strptime("1900-1-1 01:01:01", format="\%Y-\%m-\%d \%H:\%M:\%S")) testData$POSIXt[5] <- NA ## Default write.fwf(x=testData) ## NA should be - write.fwf(x=testData, na="-") ## NA should be -NA- write.fwf(x=testData, na="-NA-") ## Some other separator than space write.fwf(x=testData[, 1:4], sep="-mySep-") ## Force wider columns write.fwf(x=testData[, 1:5], width=20) ## Show effect of 'scienfic' option testData$num3 <- testData$num3 * 1e8 write.fwf(testData, scientific=TRUE) write.fwf(testData, scientific=FALSE) testData$num3 <- testData$num3 / 1e8 ## Write to file and report format and fixed width information file <- tempfile() formatInfo <- write.fwf(x=testData, file=file, formatInfo=TRUE) formatInfo ## Read exported data back to R (note +1 due to separator) ## ... without header read.fwf(file=file, widths=formatInfo$width + 1, header=FALSE, skip=1, strip.white=TRUE) ## ... with header - via postimport modfication tmp <- read.fwf(file=file, widths=formatInfo$width + 1, skip=1, strip.white=TRUE) colnames(tmp) <- read.table(file=file, nrow=1, as.is=TRUE) tmp ## ... with header - persuading read.fwf to accept header properly ## (thanks to Marc Schwartz) read.fwf(file=file, widths=formatInfo$width + 1, strip.white=TRUE, skip=1, col.names=read.table(file=file, nrow=1, as.is=TRUE)) ## ... with header - with the use of quotes write.fwf(x=testData, file=file, quote=TRUE) read.table(file=file, header=TRUE, strip.white=TRUE) ## Tidy up unlink(file) } \keyword{print} \keyword{file} %-------------------------------------------------------------------------- % write.fwf.Rd ends here gdata/man/is.what.Rd0000644000175100001440000000223513003720415013764 0ustar hornikusers\name{is.what} \alias{is.what} \title{Run Multiple is.* Tests on a Given Object} \description{ Run multiple \code{is.*} tests on a given object: \code{is.na}, \code{is.numeric}, and many others. } \usage{ is.what(object, verbose=FALSE) } \arguments{ \item{object}{any \R object.} \item{verbose}{whether negative tests should be included in output.} } \value{ A character vector containing positive tests, or when \code{verbose} is \code{TRUE}, a data frame showing all test results. } \author{Arni Magnusson, inspired by \code{demo(is.things)}.} \note{ The following procedure is used to look for valid tests: \enumerate{ \item{}{Find all objects named \code{is.*} in all loaded environments.} \item{}{Discard objects that are not functions.} \item{}{Include test result only if it is of class \code{"logical"}, not an \code{NA}, and of length 1.} } } \seealso{ \code{\link{is.na}} and \code{\link{is.numeric}} are commonly used tests. } \examples{ is.what(pi) is.what(NA, verbose=TRUE) is.what(lm(1~1)) is.what(is.what) } % Basics \keyword{classes} \keyword{NA} % Programming \keyword{programming} \keyword{error} \keyword{utilities} gdata/man/sheetCount.Rd0000644000175100001440000000240313003720416014526 0ustar hornikusers\name{sheetCount} \Rdversion{1.1} \alias{sheetCount} \alias{sheetNames} \title{ Count or list sheet names in Excel spreadsheet files. } \description{ Count or list sheet names in Excel spreadsheet files. } \usage{ sheetCount(xls, verbose = FALSE, perl = "perl") sheetNames(xls, verbose = FALSE, perl = "perl") } \arguments{ \item{xls}{File path to spreadsheet. Supports "http://", "https://", and "ftp://" URLS.} \item{verbose}{If \code{TRUE}, show additional messages during processing.} \item{perl}{Path to perl interpreter.} } \value{ \code{sheetCount} returns the number of sheets in the spreadsheet. \code{sheetNames} returns the names of sheets in the spreadsheet. } \seealso{ \code{\link{read.xls}}, \code{\link{xls2csv}}. } \examples{ xlsfile <- system.file("xls", "iris.xls", package = "gdata") xlsfile sheetCount(xlsfile) exampleFile <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile.xls') exampleFile2007 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile.xlsx') sheetCount(exampleFile) if( 'XLSX' \%in\% xlsFormats() ) # if XLSX is supported.. sheetNames(exampleFile2007) } \keyword{ misc } gdata/man/elem.Rd0000644000175100001440000000334413003720416013334 0ustar hornikusers\name{elem} \alias{elem} \title{ Display Information about Elements in a Given Object } \description{ \emph{This function is depreciated. Please use \code{\link{ll}} instead.} Display name, class, size, and dimensions of each element in a given object. } \usage{ elem(object=1, unit=c("KB","MB","bytes"), digits=0, dimensions=FALSE) } \arguments{ \item{object}{object containing named elements, perhaps a model or data frame.} \item{unit}{required unit for displaying element size: "KB", "MB", "bytes", or first letter.} \item{digits}{number of decimals to display when rounding element size.} \item{dimensions}{whether element dimensions should be returned.} } \details{ A verbose alternative to \code{names()}. } \value{ A data frame with named rows and the following columns: \item{Class}{element class.} \item{KB}{element size \emph{(see notes)}.} \item{Dim}{element dimensions \emph{(optional)}.} } \note{ The name of the element size column is the same as the unit used. Elements of class \code{classRepresentation}, \code{ClassUnionRepresentation}, and \code{grob} do not have a defined size, so 0 bytes are assumed for those. } \author{Arni Magnusson \email{arnima@u.washington.edu}} \seealso{ \code{\link[base]{names}}, \code{\link[utils]{str}}, and \code{\link[base]{summary}} display different information about object elements. \code{\link{ll}} and \code{\link{env}} are related to \code{elem}. } \examples{ \dontrun{ data(infert) elem(infert) model <- glm(case~spontaneous+induced, family=binomial, data=infert) elem(model, dim=TRUE) elem(model$family) } } % Basics \keyword{attribute} \keyword{classes} \keyword{list} % Programming \keyword{print} \keyword{utilities} gdata/man/nPairs.Rd0000644000175100001440000000366413003720415013652 0ustar hornikusers% nPairs.Rd %-------------------------------------------------------------------------- % What: Number of variable pairs - help % $Id$ % Time-stamp: <2008-12-30 18:30:11 ggorjan> %-------------------------------------------------------------------------- \name{nPairs} \alias{nPairs} \concept{pairs} \title{Number of variable pairs} \description{ \code{nPairs} counts the number of pairs between variables. } \usage{ nPairs(x, margin=FALSE, names=TRUE, abbrev=TRUE, ...) } \arguments{ \item{x}{data.frame or a matrix} \item{margin}{logical, calculate the cumulative number of \dQuote{pairs}} \item{names}{logical, add row/col-names to the output} \item{abbrev}{logical, abbreviate names} \item{\dots}{other arguments passed to \code{\link{abbreviate}}} } \details{ The class of returned matrix is nPairs and matrix. There is a summary method, which shows the opposite information - counts how many times each variable is known, while the other variable of a pair is not. See examples. } \value{ Matrix of order \eqn{k}, where \eqn{k} is the number of columns in \code{x}. Values in a matrix represent the number of pairs between columns/variables in \code{x}. If \code{margin=TRUE}, the number of columns is \eqn{k+1} and the last column represents the cumulative number of pairing all variables. } \author{Gregor Gorjanc} \seealso{\code{\link{abbreviate}}} \examples{ ## Test data test <- data.frame(V1=c(1, 2, 3, 4, 5), V2=c(NA, 2, 3, 4, 5), V3=c(1, NA, NA, NA, NA), V4=c(1, 2, 3, NA, NA)) ## Number of variable pairs nPairs(x=test) ## Without names nPairs(x=test, names=FALSE) ## Longer names colnames(test) <- c("Variable1", "Variable2", "Variable3", "Variable4") nPairs(x=test) ## Margin nPairs(x=test, margin=TRUE) ## Summary summary(object=nPairs(x=test)) } \keyword{misc} %-------------------------------------------------------------------------- % nPairs.Rd ends here gdata/man/combine.Rd0000644000175100001440000000507013003720415014023 0ustar hornikusers% $Id: combine.Rd 1435 2010-05-02 06:11:26Z warnes $ % % $Log$ % Revision 1.4 2005/12/01 16:41:14 nj7w % Updated Greg's email address % % Revision 1.3 2005/06/09 14:20:25 nj7w % Updating the version number, and various help files to synchronize splitting of gregmisc bundle in 4 individual components. % % Revision 1.1.1.1 2005/05/25 22:07:33 nj7w % Initial entry for individual package gdata % % Revision 1.2 2002/09/23 13:59:30 warnes % - Modified all files to include CVS Id and Log tags. % % \name{combine} \alias{combine} \title{ Combine R Objects With a Column Labeling the Source} \description{ Take a sequence of vector, matrix or data frames and combine into rows of a common data frame with an additional column \code{source} indicating the source object. } \usage{ combine(..., names=NULL) } \arguments{ \item{\dots}{vectors or matrices to combine.} \item{names}{character vector of names to use when creating source column.} } \details{ If there are several matrix arguments, they must all have the same number of columns. The number of columns in the result will be one larger than the number of columns in the component matrixes. If all of the arguments are vectors, these are treated as single column matrixes. In this case, the column containing the combineinated vector data is labeled \code{data}. When the arguments consist of a mix of matrices and vectors the number of columns of the result is determined by the number of columns of the matrix arguments. Vectors are considered row vectors and have their values recycled or subsetted (if necessary) to achieve this length. The \code{source} column is created as a factor with levels corresponding to the name of the object from which the each row was obtained. When the \code{names} argument is ommitted, the name of each object is obtained from the specified argument name in the call (if present) or from the name of the object. See below for examples. } % \value{ % ~Describe the value returned % If it is a LIST, use % \item{comp1 }{Description of `comp1'} % \item{comp2 }{Description of `comp2'} % ... % } %\references{ ~put references to the literature/web site here ~ } \author{Gregory R. Warnes \email{greg@warnes.net}} %\note{ ~~further notes~~ } \seealso{ \code{\link{rbind}}, \code{\link{merge}}} \examples{ a <- matrix(rnorm(12),ncol=4,nrow=3) b <- 1:4 combine(a,b) combine(x=a,b) combine(x=a,y=b) combine(a,b,names=c("one","two")) c <- 1:6 combine(b,c) } \keyword{array} \keyword{manip} gdata/man/keep.Rd0000644000175100001440000000232513003720416013334 0ustar hornikusers\name{keep} \alias{keep} \title{Remove All Objects, Except Those Specified} \description{ Remove all objects from the user workspace, except those specified. } \usage{ keep(..., list=character(0), all=FALSE, sure=FALSE) } \arguments{ \item{...}{objects to be kept, specified one by one, quoted or unquoted.} \item{list}{character vector of object names to be kept.} \item{all}{whether hidden objects (beginning with a \code{.}) should be removed, unless explicitly kept.} \item{sure}{whether to perform the removal, otherwise return names of objects that would have been removed.} } \details{ Implemented with safety caps: objects whose name starts with a \code{.} are not removed unless \code{all=TRUE}, and an explicit \code{sure=TRUE} is required to remove anything. } \value{ A character vector containing object names, or \code{NULL} when \code{sure} is \code{TRUE}. } \author{Arni Magnusson} \seealso{ \code{keep} is a convenient interface to \code{\link{rm}} when removing most objects from the user workspace. } \examples{ data(women, cars) keep(cars) ## To remove all objects except cars, run: ## keep(cars, sure=TRUE) } % Programming \keyword{data} \keyword{environment} \keyword{utilities} gdata/man/env.Rd0000644000175100001440000000166713003720415013207 0ustar hornikusers\name{env} \alias{env} \title{Describe All Loaded Environments} \description{ Display name, number of objects, and size of all loaded environments. } \usage{ env(unit="KB", digits=0) } \arguments{ \item{unit}{unit for displaying environment size: "bytes", "KB", "MB", or first letter.} \item{digits}{number of decimals to display when rounding environment size.} } \value{ A data frame with the following columns: \item{Environment}{environment name.} \item{Objects}{number of objects in environment.} \item{KB}{environment size \emph{(see notes)}.} } \note{ The name of the environment size column is the same as the unit used. } \author{Arni Magnusson} \seealso{ \code{env} is a verbose alternative to \code{\link{search}}. \code{\link{ll}} is a related function that describes objects in an environment. } \examples{ \dontrun{ env() } } % Basics \keyword{data} % Programming \keyword{environment} \keyword{utilities} gdata/man/installXLSXsupport.Rd0000644000175100001440000000540113003720416016230 0ustar hornikusers\name{installXLSXsupport} \Rdversion{1.1} \alias{installXLSXsupport} \title{ Install perl modules needed for read.xls to support Excel 2007+ XLSX format } \description{ Install perl modules needed for read.xls to support Excel 2007+ XLSX format } \usage{ installXLSXsupport(perl = "perl", verbose = FALSE) } \arguments{ \item{perl}{Path to perl interpreter (optional).} \item{verbose}{If \code{TRUE}, show additional messages during processing.} } \value{ Either \code{TRUE} indicating that the necessary perl modules have been successfully installed, or \code{FALSE} indicating that an error has occured. } \details{ This function calls the perl script 'install_modules.pl' located in the perl subdirectory of the gdata package directory (or inst/perl in the source package). This perl script attempts to use the perl 'CPAN' package, which should be included as part of most perl installations, to automatically download, compile, and install the Compress::Raw::Zlib and Spreadsheet::XLSX perl modules needed for read.xls to support support Excel 2007+ XLSX files into the gdata perl subdirectory. Since the perl modules are installed into the gdata installation directory, the perl modules will be available until the gdata package is replaced or removed. Since this occurs each time a new version of gdata is installed, \code{installXLSXsupport} will need to be run each time a new version of the gdata package is installed. Further, the binary Compress::Raw::Zlib package installed by \code{installXLSXsupport} is tied to the particular version of perl used to compile it, therefore, you will need to re-run \code{installXLSXsupport} if you install a different perl distribution. This installation process will fail if 1) perl is not available on the search path and the \code{perl} argument is not used to specify the path of the perl executable, 2) the perl installation is not properly configured for installing binary packages*, 3) if the CPAN module is not present, or 4) if the C compiler specified by the perl installation is not present. \emph{In particular, \code{installXLSXsupport} will fail for the version of perl included with the current RTools.zip package, which is not correctly configured to allow installation of additional perl packages. (The RTools version of perl is installed in a different directory than the perl configuration files expect.)} The function \code{xlsFormats} can be used to see whether XLS and XLSX formats are supported by the currently available perl modules. } \seealso{ \code{\link{read.xls}}, \code{\link{xls2csv}}, \code{\link{xlsFormats}} } \examples{ \dontrun{ installXLSXsupport() } } \keyword{ misc } gdata/man/update.list.Rd0000644000175100001440000000530113115325266014651 0ustar hornikusers\name{update.list} %%\alias{update} \alias{update.list} %%\alias{update.data.frame} \title{ Update the elements of a list } \description{ Function to update the elements of a list to contain all of the named elements of a new list, overwriting elements with the same name, and (optionally) copying unnamed elements. } \usage{ %%update(object, %% ...) \method{update}{list}(object, new, unnamed=FALSE, ...) %%\method{update}{data.frame}(object, %% new, %% by, %% by.object=by, %% by.new=by, %% append=TRUE, %% verbose=TRUE, %% unnamed=FALSE, %% ...) } \arguments{ \item{object}{Object to be updated.} \item{new}{List or dataframe containing new elements.} \item{unnamed}{Logical. If \code{TRUE}, unnamed elements of \code{new} will be appended to \code{object}.} %% \item{by, by.object, by,new}{For dataframe objects, rows of \code{object} %% will be replaced by rows of \code{new} that have the same value %% for this \em{one} column. (See examples below for an example.) } %% \item{verbose}{Display details of changes performed.} \item{...}{optional method arguments (ignored)} } \note{ This method can be called directly, or as via the S3 base method for \code{update}. } \value{A new list constructed from the elements of \code{object} by merging the elements of \code{old} and \code{new} with named items of \code{new} replacing the corresponding elements of \code{old}. Unnamed elements of \code{new} will be appened unless \code{unnamed=FALSE}. %%For dataframe objects, entire columns are replaced unless \code{by}, \code{by.object}, or \code{by.new} are supplied, in which case replacement only occures for rows with matching values for the columns listed by these parameters. The single argument \code{by} can by used to provides column names for that are common to both \code{old} and \code{new} objects, while \code{by.old} and \cody{by.new} specify column names specific to each object. } \author{ Gregory R. Warnes \email{greg@warnes.net} } \seealso{ \code{\link[stats]{update}}, \code{\link[base]{merge}} } \examples{ old <- list(a=1,b="red",c=1.37) new <- list(b="green",c=2.4) update(old, new) update.list(old,new) # equivalent older <- list(a=0, b="orange", 4, 5, 6) newer <- list(b="purple", 7, 8, 9) update(older, newer) # ignores unnamed elements of newer update(older, newer, unnamed=TRUE) # appends unnamed elements of newer %%data(iris) %%iris$Species <- as.character(iris$Species) %%df.old <- cbind(iris[1:5,], letters=1:5) %%df.new <- df.old[3:5,] %%df.new$Petal.Width <- df.new$Petal.Width + 0.1 %% %%df.old %%df.new %%update(df.old, df.new, by=c(letters)) } \keyword{data} \keyword{manip} gdata/man/ls.funs.Rd0000644000175100001440000000156313003720416014003 0ustar hornikusers\name{ls.funs} \alias{ls.funs} \title{List function objects} \description{ Return a character vector giving the names of function objects in the specified environment. } \usage{ ls.funs(...) } \arguments{ \item{\dots}{Arguments passed to \code{ls}. See the help for \code{\link[base]{ls}} for details.} } \details{ This function calls \code{ls} and then returns a character vector containing only the names of only function objects. } \value{ character vector } \author{ Gregory R. Warnes \email{greg@warnes.net} } \seealso{ \code{\link[base]{ls}}, \code{\link[base]{is.function}} } \examples{ ## List functions defined in the global environment: ls.funs() ## List functions available in the base package: ls.funs("package:base") } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{misc} \keyword{environment} gdata/man/centerText.Rd0000644000175100001440000000225013003720415014531 0ustar hornikusers\name{centerText} \alias{centerText} \title{ Center Text Strings } \description{ Function to center text strings for display on the text console by prepending the necessary number of spaces to each element. } \usage{ centerText(x, width = getOption("width")) } \arguments{ \item{x}{Character vector containing text strings to be centered.} \item{width}{Desired display width. Defaults to the R display width given by \code{getOption("width")}. } } \details{ Each element will be centered individually by prepending the necessary number of spaces to center the text in the specified display width assuming a fixed width font. } \value{ Vector of character strings. } \author{ Gregory R. Warnes \email{greg@warnes.net} } \seealso{ \code{\link[base]{strwrap}} } \examples{ cat(centerText("One Line Test"), "\n\n") mText <-c("This", "is an example", " of a multiline text ", "with ", " leading", " and trailing ", "spaces.") cat("\n", centerText(mText), "\n", sep="\n") } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{manip} \keyword{character} gdata/man/nobs.Rd0000644000175100001440000000576513003720415013363 0ustar hornikusers% $Id: nobs.Rd 1549 2012-06-06 21:11:07Z warnes $ % % $Log$ % Revision 1.7 2005/09/12 15:42:45 nj7w % Updated Greg's email % % Revision 1.6 2005/06/09 14:20:25 nj7w % Updating the version number, and various help files to synchronize splitting of gregmisc bundle in 4 individual components. % % Revision 1.1.1.1 2005/05/25 22:07:33 nj7w % Initial entry for individual package gdata % % Revision 1.5 2002/09/23 13:59:30 warnes % - Modified all files to include CVS Id and Log tags. % % Revision 1.4 2002/03/26 19:29:15 warneg % % Updated to add ... parameter to function calls. % % Revision 1.3 2002/02/20 21:31:08 warneg % % Noted that specialized methods exist. % % Revision 1.2 2002/02/20 21:29:34 warneg % % Incorrectly had contents of nobs.R here instead of help text. Corrected. % % \name{nobs} \alias{nobs} % Now provided by stats \alias{nobs.data.frame} \alias{nobs.default} \alias{nobs.lm} % Now provided by stats %- Also NEED an `\alias' for EACH other topic documented here. \title{ Compute the Number of Non-missing Observations } \description{ Compute the number of non-missing observations. Provides a 'default' method to handle vectors, and a method for data frames. } \usage{ nobs(object, ...) \method{nobs}{default}(object, ...) \method{nobs}{data.frame}(object, ...) \method{nobs}{lm}(object, ...) } \arguments{ \item{object}{ Target Object } \item{\dots}{ Optional parameters (currently ignored)} } \details{ Calculate the number of observations in \code{object}. \itemize{ \item{For numeric vectors, this is simply the number of non-NA elements, as computed by \code{sum(!is.na(object))}. } \item{For dataframe objects, the result is a vector containing the number of non-NA elementes of each column. } } The \code{nobs} and \code{nobs.lm} functions defined in gtools are simply aliases for the functions in the base R \code{stats} package, provided for backwards compatibility. } \value{ Either single numeric value (for vectors) or a vector of numeric values (for data.frames) giving the number of non-missing values. } \note{ The base R package \code{stats} now provides a S3 dispatch function for \code{nobs}, and methods for for objects of classes "lm", "glm", "nls" and "logLik", as well as a default method. Since they provided a subset of the the functionality, the base method dispatch (\code{nobs}) function and method for "lm" objects (\code{nobs.lm}) are, as of \code{gdata} version 2.10.1, simply aliases for the equivalent functions in the base R \code{stats} package. Since \code{gdata}'s default method (\code{nobs.default}) processes vectors and hands any other data/object types to \code{stats:::nobs.default}. } \author{ Gregory R. Warnes \email{greg@warnes.net} } \seealso{ \code{\link{is.na}}, \code{\link{length}} } \examples{ x <- c(1,2,3,5,NA,6,7,1,NA ) length(x) nobs(x) df <- data.frame(x=rnorm(100), y=rnorm(100)) df[1,1] <- NA df[1,2] <- NA df[2,1] <- NA nobs(df) fit <- lm(y ~ x, data=df) nobs(fit) } \keyword{attribute} gdata/man/first.Rd0000644000175100001440000000303613003720415013536 0ustar hornikusers\name{first} \alias{first} \alias{last} \alias{first<-} \alias{last<-} \title{Return first or last element of an object} \description{ Return first or last element of an object. These functions are convenience wrappers for \code{head(x, n=1, ...)} and \code{tail(x, n=1, ...)}. } \usage{ first(x, n=1, ...) last(x, n=1, ...) first(x, n=1, ...) <- value last(x, n=1, ...) <- value } \arguments{ \item{x}{data object} \item{n}{a single integer. If positive, size for the resulting object: number of elements for a vector (including lists), rows for a matrix or data frame or lines for a function. If negative, all but the 'n' last/first number of elements of 'x'.} \item{...}{arguments to be passed to or from other methods.} \item{value}{a vector of values to be assigned (should be of length \code{n})} } \value{ An object (usually) like 'x' but generally smaller. } \author{ Gregory R. Warnes \email{greg@warnes.net} } \seealso{ \code{\link[utils]{head}}, \code{\link[utils]{tail}}, \code{\link{left}}, \code{\link{right}} } \examples{ ## works for vectors.. v <- 1:10 first(v) last(v) first(v) <- 9 v last(v) <- 20 v ## and for lists l <- list(a=1, b=2, c=3) first(l) last(l) first(l) <- "apple" last(l) <- "bannana" l ## and data.frames df <- data.frame(a=1:2, b=3:4, c=5:6) first(df) last(df) first(df) <- factor(c("red","green")) last(df) <- list(c(20,30)) # note the enclosing list! df ## and matrixes m <- as.matrix(df) first(m) last(m) first(m) <- "z" last(m) <- "q" m } \keyword{ manip } gdata/man/ll.Rd0000644000175100001440000000401613003720416013016 0ustar hornikusers\name{ll} \alias{ll} \title{Describe Objects or Elements} \description{ Display name, class, size, and dimensions of each object in a given environment. Alternatively, if the main argument is a list-like object, its elements are listed and described. } \usage{ ll(pos=1, unit="KB", digits=0, dim=FALSE, sort=FALSE, class=NULL, invert=FALSE, ...) } \arguments{ \item{pos}{environment position number, environment name, data frame, list, model, or any object that \code{is.list}.} \item{unit}{unit for displaying object size: "bytes", "KB", "MB", or first letter.} \item{digits}{number of decimals to display when rounding object size.} \item{dim}{whether object dimensions should be returned.} \item{sort}{whether elements should be sorted by name.} \item{class}{character vector for limiting the output to specified classes.} \item{invert}{whether to invert the \code{class} filter, so specified classes are excluded.} \item{...}{passed to \code{ls}.} } \value{ A data frame with named rows and the following columns: \item{Class}{object class.} \item{KB}{object size \emph{(see note)}.} \item{Dim}{object dimensions \emph{(optional)}.} } \note{The name of the object size column is the same as the unit used.} \author{Arni Magnusson, with a contribution by Jim Rogers} \seealso{ \code{ll} is a verbose alternative to \code{\link{ls}} (objects in an environment) and \code{\link{names}} (elements in a list-like object). \code{\link{str}} and \code{\link{summary}} also describe elements in a list-like objects. \code{\link{env}} is a related function that describes all loaded environments. } \examples{ ll() ll(all=TRUE) ll("package:base") ll("package:base", class="function", invert=TRUE) data(infert) ll(infert) model <- glm(case~spontaneous+induced, family=binomial, data=infert) ll(model, dim=TRUE) ll(model, sort=TRUE) ll(model$family) } % Basics \keyword{data} \keyword{attribute} \keyword{classes} \keyword{list} % Programming \keyword{environment} \keyword{print} \keyword{utilities} gdata/man/unmatrix.Rd0000644000175100001440000000173513003720416014263 0ustar hornikusers\name{unmatrix} \alias{unmatrix} \title{Convert a matrix into a vector, with appropriate names} \description{ Convert a matrix into a vector, with element names constructed from the row and column names of the matrix. } \usage{ unmatrix(x, byrow=FALSE) } \arguments{ \item{x}{matrix} \item{byrow}{Logical. If \code{FALSE}, the elements within columns will be adjacent in the resulting vector, otherwise elements within rows will be adjacent.} } \value{ A vector with names constructed from the row and column names from the matrix. If the the row or column names are missing, ('r1', 'r2', ..,) or ('c1', 'c2', ..) will be used as appropriate. } \author{Gregory R. Warnes \email{greg@warnes.net} } \seealso{ \code{\link[base]{as.vector}} } \examples{ # simple, useless example m <- matrix( letters[1:10], ncol=5) m unmatrix(m) # unroll model output x <- rnorm(100) y <- rnorm(100, mean=3+5*x, sd=0.25) m <- coef( summary(lm( y ~ x )) ) unmatrix(m) } \keyword{manip} gdata/man/reorder.Rd0000644000175100001440000000607613003720416014061 0ustar hornikusers\name{reorder.factor} \alias{reorder.factor} \title{Reorder the Levels of a Factor} \description{ Reorder the levels of a factor } \usage{ \method{reorder}{factor}(x, X, FUN, ..., order=is.ordered(x), new.order, sort=mixedsort) } \arguments{ \item{x}{factor} \item{X}{auxillary data vector} \item{FUN}{function to be applied to subsets of \code{X} determined by \code{x}, to determine factor order} \item{...}{optional parameters to \code{FUN}} \item{order}{logical value indicating whether the returned object should be an \code{\link{ordered}} factor} \item{new.order}{a vector of indexes or a vector of label names giving the order of the new factor levels} \item{sort}{function to use to sort the factor level names, used only when \code{new.order} is missing} } \details{ This function changes the order of the levels of a factor. It can do so via three different mechanisms, depending on whether, \code{X} \emph{and} \code{FUN}, \code{new.order} or \code{sort} are provided. If \code{X} \emph{and} \code{Fun} are provided: The data in \code{X} is grouped by the levels of \code{x} and \code{FUN} is applied. The groups are then sorted by this value, and the resulting order is used for the new factor level names. If \code{new.order} is a numeric vector, the new factor level names are constructed by reordering the factor levels according to the numeric values. If \code{new.order} is a chraccter vector, \code{new.order} gives the list of new factor level names. In either case levels omitted from \code{new.order} will become missing (\code{NA}) values. If \code{sort} is provided (as it is by default): The new factor level names are generated by calling the function specified by \code{sort} to the existing factor level \emph{names}. With \code{sort=mixedsort} (the default) the factor levels are sorted so that combined numeric and character strings are sorted in according to character rules on the character sections (including ignoring case), and the numeric rules for the numeric sections. See \code{\link[gtools]{mixedsort}} for details. } \value{ A new factor with reordered levels } \author{Gregory R. Warnes \email{greg@warnes.net}} \seealso{ \code{\link{factor}} and \code{\link[stats]{reorder}} } \examples{ \dontshow{ set.seed(123456) } # Create a 4 level example factor trt <- factor( sample( c("PLACEBO", "300 MG", "600 MG", "1200 MG"), 100, replace=TRUE ) ) summary(trt) # Note that the levels are not in a meaningful order. # Change the order to something useful.. # - default "mixedsort" ordering trt2 <- reorder(trt) summary(trt2) # - using indexes: trt3 <- reorder(trt, new.order=c(4, 2, 3, 1)) summary(trt3) # - using label names: trt4 <- reorder(trt, new.order=c("PLACEBO", "300 MG", "600 MG", "1200 MG")) summary(trt4) # - using frequency trt5 <- reorder(trt, X=rnorm(100), FUN=mean) summary(trt5) # Drop out the '300 MG' level trt6 <- reorder(trt, new.order=c("PLACEBO", "600 MG", "1200 MG")) summary(trt6) } \keyword{ manip } gdata/man/case.Rd0000644000175100001440000000213013003720415013314 0ustar hornikusers\name{case} \alias{case} \title{Map elements of a vector according to the provided 'cases'} \description{ Map elements of a vector according to the provided 'cases'. This function is useful for mapping discrete values to factor labels and is the vector equivalent to the \code{switch} function. } \usage{ case(x, ..., default = NA) } \arguments{ \item{x}{Vector to be converted} \item{\dots}{Map of alternatives, specified as "name"=value} \item{default}{Value to be assigned to elements of \code{x} not matching any of the alternatives. Defaults to \code{NA}.} } \details{ This function is to \code{switch} what \code{ifelse} is to \code{if}, and is a convenience wrapper for \code{factor}. } \value{ A factor variables with each element of \code{x} mapped into the corresponding level of specified in the mapping. } \author{Gregory R. Warnes \email{greg@warnes.net}} \seealso{\code{factor}, \code{switch}, \code{ifelse}} \examples{ ## default = NA case( c(1,1,4,3), "a"=1, "b"=2, "c"=3) ## default = "foo" case( c(1,1,4,3), "a"=1, "b"=2, "c"=3, default="foo" ) } \keyword{ manip } gdata/man/mapLevels.Rd0000644000175100001440000001614013003720415014337 0ustar hornikusers% mapLevels.Rd %-------------------------------------------------------------------------- % What: Mapping levels % $Id: mapLevels.Rd,v 1.1 2006/03/29 13:47:10 ggorjan Exp ggorjan $ % Time-stamp: <2006-11-01 00:15:57 ggorjan> %-------------------------------------------------------------------------- \name{mapLevels} \alias{mapLevels} \alias{mapLevels.default} \alias{mapLevels.factor} \alias{mapLevels.character} \alias{mapLevels.list} \alias{mapLevels.data.frame} \alias{print.levelsMap} \alias{print.listLevelsMap} \alias{is.levelsMap} \alias{is.listLevelsMap} \alias{as.levelsMap} \alias{as.listLevelsMap} \alias{.checkLevelsMap} \alias{.checkListLevelsMap} \alias{"[.levelsMap"} \alias{"[.listLevelsMap"} \alias{c.levelsMap} \alias{c.listLevelsMap} \alias{unique.levelsMap} \alias{sort.levelsMap} \alias{mapLevels<-} \alias{mapLevels<-.default} \alias{mapLevels<-.factor} \alias{mapLevels<-.character} \alias{mapLevels<-.list} \alias{mapLevels<-.data.frame} \title{Mapping levels} \description{ \code{mapLevels} produces a map with information on levels and/or internal integer codes. As such can be conveniently used to store level mapping when one needs to work with internal codes of a factor and later transfrorm back to factor or when working with several factors that should have the same levels and therefore the same internal coding. } \usage{ mapLevels(x, codes=TRUE, sort=TRUE, drop=FALSE, combine=FALSE, \dots) mapLevels(x) <- value } \arguments{ \item{x}{object whose levels will be mapped, look into details} \item{codes}{boolean, create integer levelsMap (with internal codes) or character levelsMap (with level names)} \item{sort}{boolean, sort levels of character \code{x}, look into details} \item{drop}{boolean, drop unused levels} \item{combine}{boolean, combine levels, look into details} \item{\dots}{additional arguments for \code{sort}} \item{value}{levelsMap or listLevelsMap, output of \code{mapLevels} methods or constructed by user, look into details} } \section{mapLevels}{ \code{mapLevels} function was written primarly for work with \dQuote{factors}, but is generic and can also be used with \dQuote{character}, \dQuote{list} and \dQuote{data.frame}, while \dQuote{default} method produces error. Here the term levels is also used for unique character values. When \code{codes=TRUE} \bold{integer \dQuote{levelsMap}} with information on mapping internal codes with levels is produced. Output can be used to transform integer to factor or remap factor levels as described bellow. With \code{codes=FALSE} \bold{character \dQuote{levelsMap}} is produced. The later is usefull, when one would like to remap factors or combine factors with some overlap in levels as described in \code{mapLevels<-} section and shown in examples. \code{sort} argument provides possibility to sort levels of \dQuote{character} \code{x} and has no effect when \code{x} is a \dQuote{factor}. Argument \code{combine} has effect only in \dQuote{list} and \dQuote{data.frame} methods and when \code{codes=FALSE} i.e. with \bold{character \dQuote{levelsMaps}}. The later condition is necesarry as it is not possible to combine maps with different mapping of level names and integer codes. It is assumed that passed \dQuote{list} and \dQuote{data.frame} have all components for which methods exist. Otherwise error is produced. } \section{levelsMap and listLevelsMap}{ Function \code{mapLevels} returns a map of levels. This map is of class \dQuote{levelsMap}, which is actually a list of length equal to number of levels and with each component of length 1. Components need not be of length 1. There can be either integer or character \dQuote{levelsMap}. \bold{Integer \dQuote{levelsMap}} (when \code{codes=TRUE}) has names equal to levels and components equal to internal codes. \bold{Character \dQuote{levelsMap}} (when \code{codes=FALSE}) has names and components equal to levels. When \code{mapLevels} is applied to \dQuote{list} or \dQuote{data.frame}, result is of class \dQuote{listLevelsMap}, which is a list of \dQuote{levelsMap} components described previously. If \code{combine=TRUE}, result is a \dQuote{levelsMap} with all levels in \code{x} components. For ease of inspection, print methods unlists \dQuote{levelsMap} with proper names. \code{mapLevels<-} methods are fairly general and therefore additional convenience methods are implemented to ease the work with maps: \code{is.levelsMap} and \code{is.listLevelsMap}; \code{as.levelsMap} and \code{as.listLevelsMap} for coercion of user defined maps; generic \code{"["} and \code{c} for both classes (argument \code{recursive} can be used in \code{c} to coerce \dQuote{listLevelsMap} to \dQuote{levelsMap}) and generic \code{unique} and \code{sort} (generic from \R 2.4) for \dQuote{levelsMap}. } \section{mapLevels<-}{ Workhorse under \code{mapLevels<-} methods is \code{\link{levels<-}}. \code{mapLevels<-} just control the assignment of \dQuote{levelsMap} (integer or character) or \dQuote{listLevelsMap} to \code{x}. The idea is that map values are changed to map names as indicated in \code{\link{levels}} examples. \bold{Integer \dQuote{levelsMap}} can be applied to \dQuote{integer} or \dQuote{factor}, while \bold{character \dQuote{levelsMap}} can be applied to \dQuote{character} or \dQuote{factor}. Methods for \dQuote{list} and \dQuote{data.frame} can work only on mentioned atomic components/columns and can accept either \dQuote{levelsMap} or \dQuote{listLevelsMap}. Recycling occurs, if length of \code{value} is not the same as number of components/columns of a \dQuote{list/data.frame}. } \value{ \code{mapLevels()} returns \dQuote{levelsMap} or \dQuote{listLevelsMap} objects as described in levelsMap and listLevelsMap section. Result of \code{mapLevels<-} is always a factor with remapped levels or a \dQuote{list/data.frame} with remapped factors. } \author{Gregor Gorjanc} \seealso{ \code{\link{factor}}, \code{\link{levels}} and \code{\link{unclass}} } \examples{ ## --- Integer levelsMap --- (f <- factor(sample(letters, size=20, replace=TRUE))) (mapInt <- mapLevels(f)) ## Integer to factor (int <- as.integer(f)) (mapLevels(int) <- mapInt) all.equal(int, f) ## Remap levels of a factor (fac <- factor(as.integer(f))) (mapLevels(fac) <- mapInt) # the same as levels(fac) <- mapInt all.equal(fac, f) ## --- Character levelesMap --- f1 <- factor(letters[1:10]) f2 <- factor(letters[5:14]) ## Internal codes are the same, but levels are not as.integer(f1) as.integer(f2) ## Get character levelsMaps and combine them mapCha1 <- mapLevels(f1, codes=FALSE) mapCha2 <- mapLevels(f2, codes=FALSE) (mapCha <- c(mapCha1, mapCha2)) ## Remap factors mapLevels(f1) <- mapCha # the same as levels(f1) <- mapCha mapLevels(f2) <- mapCha # the same as levels(f2) <- mapCha ## Internal codes are now "consistent" among factors as.integer(f1) as.integer(f2) ## Remap characters to get factors f1 <- as.character(f1); f2 <- as.character(f2) mapLevels(f1) <- mapCha mapLevels(f2) <- mapCha ## Internal codes are now "consistent" among factors as.integer(f1) as.integer(f2) } \keyword{misc} \keyword{manip} %-------------------------------------------------------------------------- % mapLevels.Rd ends here gdata/man/gdata-package.Rd0000644000175100001440000000146113003720416015061 0ustar hornikusers \name{gdata-package} \alias{gdata-package} \alias{gdata} \docType{package} \title{Various R programming tools for data manipulation} \description{ \pkg{gdata} package provides various \R programming tools for data manipulation. } \details{ The following are sources of information on \pkg{gdata} package: \tabular{ll}{ DESCRIPTION file\tab \code{library(help="gdata")}\cr \tab \cr This file\tab \code{package?gdata}\cr \tab \cr Vignette\tab \code{vignette("unknown")}\cr \tab \code{vignette("mapLevels")}\cr \tab \cr Some help files\tab \code{\link{read.xls}}\cr \tab \code{\link{write.fwf}}\cr \tab \cr News\tab \code{file.show(system.file("NEWS", package="gdata"))}\cr } } \keyword{package}gdata/man/read.xls.Rd0000644000175100001440000001407213003720415014131 0ustar hornikusers\name{read.xls} \alias{read.xls} \alias{xls2csv} \alias{xls2tab} \alias{xls2tsv} \alias{xls2sep} \title{Read Excel files} \description{Read a Microsoft Excel file into a data frame} \usage{ read.xls(xls, sheet=1, verbose=FALSE, pattern, na.strings=c("NA","#DIV/0!"), ..., method=c("csv","tsv","tab"), perl="perl") xls2csv(xls, sheet=1, verbose=FALSE, blank.lines.skip=TRUE, ..., perl="perl") xls2tab(xls, sheet=1, verbose=FALSE, blank.lines.skip=TRUE, ..., perl="perl") xls2tsv(xls, sheet=1, verbose=FALSE, blank.lines.skip=TRUE, ..., perl="perl") xls2sep(xls, sheet=1, verbose=FALSE, blank.lines.skip=TRUE, ..., method=c("csv","tsv","tab"), perl="perl") } \arguments{ \item{xls}{path to the Microsoft Excel file. Supports "http://", "https://", and "ftp://" URLS.} \item{sheet}{name or number of the worksheet to read} \item{verbose}{logical flag indicating whether details should be printed as the file is processed.} \item{pattern}{if specified, them skip all lines before the first containing this string} \item{perl}{name of the perl executable to be called.} \item{method}{intermediate file format, "csv" for comma-separated and "tab" for tab-separated} \item{na.strings}{a character vector of strings which are to be interpreted as 'NA' values. See \code{\link[utils]{read.table}} for details.} \item{blank.lines.skip}{logical flag indicating whether blank lines in the orginal file should be ignored.} \item{...}{additional arguments to read.table. The defaults for read.csv() are used.} } \value{ \code{"read.xls"} returns a data frame. \code{"xls2sep"} returns a temporary file in the specified format. \code{"xls2csv"} and \code{"xls2tab"} are simply wrappers for \code{"xls2sep"} specifying method as "csv" or "tab", respectively. } \details{ This function works translating the named Microsoft Excel file into a temporary .csv or .tab file, using the xls2csv or xls2tab Perl script installed as part of this (gdata) package. Caution: In the conversion to csv, strings will be quoted. This can be problem if you are trying to use the \code{comment.char} option of \code{read.table} since the first character of all lines (including comment lines) will be "\"" after conversion. If you have quotes in your data which confuse the process you may wish to use \code{read.xls(..., quote = '')}. This will cause the quotes to be regarded as data and you will have to then handle the quotes yourself after reading the file in. Caution: If you call \code{"xls2csv"} directly, is your responsibility to close and delete the file after using it. } \references{http://www.analytics.washington.edu/statcomp/downloads/xls2csv} \note{ Either a working version of Perl must be present in the executable search path, or the exact path of the perl executable must be provided via the \code{perl} argument. See the examples below for an illustration.} \seealso{ \code{\link[utils]{read.csv}} } \examples{ # iris.xls is included in the gregmisc package for use as an example xlsfile <- file.path(path.package('gdata'),'xls','iris.xls') xlsfile iris <- read.xls(xlsfile) # defaults to csv format iris <- read.xls(xlsfile,method="csv") # specify csv format iris <- read.xls(xlsfile,method="tab") # specify tab format head(iris) # look at the top few rows \dontshow{ iris.1 <- read.xls(xlsfile) # defaults to csv format iris.2 <- read.xls(xlsfile,method="csv") # specify csv format iris.3 <- read.xls(xlsfile,method="tab") # specify tab format stopifnot(all.equal(iris.1, iris.2)) stopifnot(all.equal(iris.1, iris.3)) } \dontrun{ ## Example specifying exact Perl path for default MS-Windows install of ## ActiveState perl iris <- read.xls(xlsfile, perl="C:/perl/bin/perl.exe") } \dontrun{ ## Example specifying exact Perl path for Unix systems iris <- read.xls(xlsfile, perl="/usr/bin/perl") ## finding perl ## (read.xls automatically calls findPerl so this is rarely needed) perl <- gdata:::findPerl("perl") iris <- read.xls(xlsfile, perl=perl) } \dontrun{ ## read xls file from net nba.url <- "http://mgtclass.mgt.unm.edu/Bose/Excel/Tutorial.05/Cases/NBA.xls" nba <- read.xls(nba.url) } \dontrun{ ## read xls file ignoring all lines prior to first containing State crime.url <- "http://www.jrsainfo.org/jabg/state_data2/Tribal_Data00.xls" crime <- read.xls(crime.url, pattern = "State") ## use of xls2csv - open con, print two lines, close con con <- xls2csv(crime.url) print(readLines(con, 2)) file.remove(summary(con)$description) } ## Examples demonstrating selection of specific 'sheets' ## from the example XLS file 'ExampleExcelFile.xls' exampleFile <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile.xls') exampleFile2007 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile.xlsx') ## see the number and names of sheets: sheetCount(exampleFile) if( 'XLSX' \%in\% xlsFormats() ) ## if XLSX is supported.. sheetCount(exampleFile2007) ## show names of shets in the file sheetNames(exampleFile) if( 'XLSX' \%in\% xlsFormats() ) ## if XLSX is supported.. sheetNames(exampleFile2007) data <- read.xls(exampleFile) ## default is first worksheet data <- read.xls(exampleFile, sheet=2) ## second worksheet by number data <- read.xls(exampleFile, sheet="Sheet Second",v=TRUE) ## and by name ## load the third worksheet, skipping the first two non-data lines... if( 'XLSX' \%in\% xlsFormats() ) ## if XLSX is supported.. data <- read.xls(exampleFile2007, sheet="Sheet with initial text", skip=2) ## load a file containing data and column names using latin-1 ## characters latinFile <- file.path(path.package('gdata'),'xls','latin-1.xls') latin1 <- read.xls(latinFile, fileEncoding="latin1") colnames(latin1) } \author{ Gregory R. Warnes \email{greg@warnes.net}, Jim Rogers \email{james.a.rogers@pfizer.com}, and Gabor Grothendiek \email{ggrothendieck@gmail.com}. } \keyword{file} gdata/man/drop.levels.Rd0000644000175100001440000000213513003720415014643 0ustar hornikusers% $Id: drop.levels.Rd 1096 2007-06-06 10:19:15Z ggorjan $ \name{drop.levels} \alias{drop.levels} \title{Drop unused factor levels} \description{Drop unused levels in a factor} \usage{ drop.levels(x, reorder=TRUE, ...) } \arguments{ \item{x}{object to be processed} \item{reorder}{should factor levels be reordered using \code{\link{reorder.factor}}?} \item{...}{additional arguments to \code{\link{reorder.factor}}} } \details{ \code{drop.levels} is a generic function, where default method does nothing, while method for factor \code{s} drops all unused levels. Drop is done with \code{x[, drop=TRUE]}. There are also convenient methods for \code{list} and \code{data.frame}, where all unused levels are dropped in all factors (one by one) in a \code{list} or a \code{data.frame}. } \value{Input object without unused levels.} \author{Jim Rogers \email{james.a.rogers@pfizer.com} and Gregor Gorjanc} \examples{ f <- factor(c("A", "B", "C", "D"))[1:3] drop.levels(f) l <- list(f=f, i=1:3, c=c("A", "B", "D")) drop.levels(l) df <- as.data.frame(l) str(df) str(drop.levels(df)) } \keyword{manip} gdata/man/gdata-defunct.Rd0000644000175100001440000000137413003720415015120 0ustar hornikusers\name{gdata-defunct} \alias{aggregate.table} \title{Defunct Functions in Package 'gdata'} \description{ The functions or variables listed here are no longer part of 'gdata'. } \usage{ aggregate.table(x, by1, by2, FUN=mean, ...) } %- maybe also `usage' for other objects documented here. \arguments{ \item{x}{ data to be summarized } \item{by1}{ first grouping factor. } \item{by2}{ second grouping factor. } \item{FUN}{ a scalar function to compute the summary statistics which can be applied to all data subsets. Defaults to \code{mean}.} \item{\dots}{ Optional arguments for \code{FUN}. } } \details{ \code{aggregate.table(x, by1, by2, FUN=mean, ...)} should be replacede by \code{tapply(X=x, INDEX=list(by1, by2), FUN=FUN, ...)}. } gdata/man/mv.Rd0000644000175100001440000000132713003720415013032 0ustar hornikusers\name{mv} \alias{mv} \title{ Rename an Object } \description{ Rename an object. } \usage{ mv(from, to, envir = parent.frame()) } \arguments{ \item{from}{Character scalar giving the source object name} \item{to}{Character scalar giving the desination object name} \item{envir}{Environment in which to do the rename} } \details{ This function renames an object by the value of object \code{a} to the name \code{b}, and removing \code{a}. } \value{ Invisibly returns the value of the object. } \author{ Gregory R. Warnes \email{greg@warnes.net} } \seealso{ \code{\link[base]{rm}}, \code{\link[base]{assign}} } \examples{ a <- 1:10 a mv("a", "b") b exists("a") } \keyword{environment} \keyword{data} gdata/man/ans.Rd0000644000175100001440000000163513003720416013174 0ustar hornikusers\name{ans} \alias{ans} \title{Value of Last Evaluated Expression} \usage{ ans() } \description{ The functon returns the value of the last evaluated \emph{top-level} expression, which is always assigned to \code{.Last.value} (in \code{package:base}). } \details{ This function retrieves \code{.Last.value}. For more details see \code{\link[base]{.Last.value}}. } \value{ \code{.Last.value} } \seealso{ \code{\link[base]{.Last.value}}, \code{\link[base]{eval}} } \author{Liviu Andronic} \examples{ 2+2 # Trivial calculation... ans() # See the answer again gamma(1:15) # Some intensive calculation... fac14 <- ans() # store the results into a variable rnorm(20) # Generate some standard normal values ans()^2 # Convert to Chi-square(1) values... stem(ans()) # Now show a stem-and-leaf table } \keyword{programming} gdata/man/resample.Rd0000644000175100001440000000246713003720416014227 0ustar hornikusers\name{resample} \alias{resample} \title{Consistent Random Samples and Permutations} \description{ \code{resample} takes a sample of the specified size from the elements of \code{x} using either with or without replacement. } \usage{ resample(x, size, replace = FALSE, prob = NULL) } \arguments{ \item{x}{ A numeric, complex, character or logical vector from which to choose.} \item{size}{Non-negative integer giving the number of items to choose.} \item{replace}{Should sampling be with replacement?} \item{prob}{A vector of probability weights for obtaining the elements of the vector being sampled.} } \details{ \code{resample} differs from the S/R \code{sample} function in \code{resample} always considers \code{x} to be a vector of elements to select from, while \code{sample} treats a vector of length one as a special case and samples from \code{1:x}. Otherwise, the functions have identical behavior. } \value{ Vector of the same length as the input, with the elements permuted. } \author{Gregory R. Warnes \email{greg@warnes.net} } \seealso{ \code{\link{sample}} } \examples{ ## sample behavior differs if first argument is scalar vs vector sample( c(10) ) sample( c(10,10) ) ## resample has the consistent behavior for both cases resample( c(10) ) resample( c(10,10) ) } \keyword{misc} gdata/man/Args.Rd0000644000175100001440000000163113003720415013302 0ustar hornikusers\name{Args} \alias{Args} \title{Describe Function Arguments} \description{ Display function argument names and corresponding default values, formatted in two columns for easy reading. } \usage{ Args(name, sort=FALSE) } \arguments{ \item{name}{a function or function name.} \item{sort}{whether arguments should be sorted.} } \value{ A data frame with named rows and a single column called \code{value}, containing the default value of each argument. } \note{ Primitive functions like \code{sum} and \code{all} have no formal arguments. See the \code{\link{formals}} help page. } \author{Arni Magnusson} \seealso{ \code{Args} is a verbose alternative to \code{\link{args}}, based on \code{\link{formals}}. \code{\link{help}} also describes function arguments. } \examples{ Args(glm) Args(scan) Args(legend, sort=TRUE) } % Programming \keyword{programming} \keyword{utilities} \keyword{documentation} gdata/man/unknown.Rd0000644000175100001440000001325713003720415014114 0ustar hornikusers% unknown.Rd %-------------------------------------------------------------------------- % What: Change given unknown value to NA and vice versa man page % $Id: unknown.Rd 1357 2009-08-20 14:54:44Z warnes $ % Time-stamp: <2007-08-17 20:18:54 ggorjan> %-------------------------------------------------------------------------- \name{unknownToNA} \alias{isUnknown} \alias{isUnknown.default} \alias{isUnknown.POSIXlt} \alias{isUnknown.list} \alias{isUnknown.data.frame} \alias{isUnknown.matrix} \alias{unknownToNA} \alias{unknownToNA.default} \alias{unknownToNA.factor} \alias{unknownToNA.list} \alias{unknownToNA.data.frame} \alias{NAToUnknown} \alias{NAToUnknown.default} \alias{NAToUnknown.factor} \alias{NAToUnknown.list} \alias{NAToUnknown.data.frame} \concept{missing} \title{Change unknown values to NA and vice versa} \description{ Unknown or missing values (\code{NA} in \R) can be represented in various ways (as 0, 999, etc.) in different programs. \code{isUnknown}, \code{unknownToNA}, and \code{NAToUnknown} can help to change unknown values to \code{NA} and vice versa. } \usage{ isUnknown(x, unknown=NA, \dots) unknownToNA(x, unknown, warning=FALSE, \dots) NAToUnknown(x, unknown, force=FALSE, call.=FALSE, \dots) } \arguments{ \item{x}{generic, object with unknown value(s)} \item{unknown}{generic, value used instead of \code{NA}} \item{warning}{logical, issue warning if \code{x} already has \code{NA}} \item{force}{logical, force to apply already existing value in \code{x}} \item{\dots}{arguments pased to other methods (as.character for POSIXlt in case of isUnknown)} \item{call.}{logical, look in \code{\link{warning}}} } \details{ This functions were written to handle different variants of \dQuote{other \code{NA}} like representations that are usually used in various external data sources. \code{unknownToNA} can help to change unknown values to \code{NA} for work in \R, while \code{NAToUnknown} is meant for the opposite and would usually be used prior to export of data from \R. \code{isUnknown} is utility function for testing for unknown values. All functions are generic and the following classes were tested to work with latest version: \dQuote{integer}, \dQuote{numeric}, \dQuote{character}, \dQuote{factor}, \dQuote{Date}, \dQuote{POSIXct}, \dQuote{POSIXlt}, \dQuote{list}, \dQuote{data.frame} and \dQuote{matrix}. For others default method might work just fine. \code{unknownToNA} and \code{isUnknown} can cope with multiple values in \code{unknown}, but those should be given as a \dQuote{vector}. If not, coercing to vector is applied. Argument \code{unknown} can be feed also with \dQuote{list} in \dQuote{list} and \dQuote{data.frame} methods. If named \dQuote{list} or \dQuote{vector} is passed to argument \code{unknown} and \code{x} is also named, matching of names will occur. Recycling occurs in all \dQuote{list} and \dQuote{data.frame} methods, when \code{unknown} argument is not of the same length as \code{x} and \code{unknown} is not named. Argument \code{unknown} in \code{NAToUnknown} should hold value that is not already present in \code{x}. If it does, error is produced and one can bypass that with \code{force=TRUE}, but be warned that there is no way to distinguish values after this action. Use at your own risk! Anyway, warning is issued about new value in \code{x}. Additionally, caution should be taken when using \code{NAToUnknown} on factors as additional level (value of \code{unknown}) is introduced. Then, as expected, \code{unknownToNA} removes defined level in \code{unknown}. If \code{unknown="NA"}, then \code{"NA"} is removed from factor levels in \code{unknownToNA} due to consistency with conversions back and forth. Unknown representation in \code{unknown} should have the same class as \code{x} in \code{NAToUnknown}, except in factors, where \code{unknown} value is coerced to character anyway. Silent coercing is also applied, when \dQuote{integer} and \dQuote{numeric} are in question. Otherwise warning is issued and coercing is tried. If that fails, \R introduces \code{NA} and the goal of \code{NAToUnknown} is not reached. \code{NAToUnknown} accepts only single value in \code{unknown} if \code{x} is atomic, while \dQuote{list} and \dQuote{data.frame} methods accept also \dQuote{vector} and \dQuote{list}. \dQuote{list/data.frame} methods can work on many components/columns. To reduce the number of needed specifications in \code{unknown} argument, default unknown value can be specified with component ".default". This matches component/column ".default" as well as all other undefined components/columns! Look in examples. } \value{ \code{unknownToNA} and \code{NAToUnknown} return modified \code{x}. \code{isUnknown} returns logical values for object \code{x}. } \author{Gregor Gorjanc} \seealso{\code{\link{is.na}}} \examples{ xInt <- c(0, 1, 0, 5, 6, 7, 8, 9, NA) isUnknown(x=xInt, unknown=0) isUnknown(x=xInt, unknown=c(0, NA)) (xInt <- unknownToNA(x=xInt, unknown=0)) (xInt <- NAToUnknown(x=xInt, unknown=0)) xFac <- factor(c("0", 1, 2, 3, NA, "NA")) isUnknown(x=xFac, unknown=0) isUnknown(x=xFac, unknown=c(0, NA)) isUnknown(x=xFac, unknown=c(0, "NA")) isUnknown(x=xFac, unknown=c(0, "NA", NA)) (xFac <- unknownToNA(x=xFac, unknown="NA")) (xFac <- NAToUnknown(x=xFac, unknown="NA")) xList <- list(xFac=xFac, xInt=xInt) isUnknown(xList, unknown=c("NA", 0)) isUnknown(xList, unknown=list("NA", 0)) tmp <- c(0, "NA") names(tmp) <- c(".default", "xFac") isUnknown(xList, unknown=tmp) tmp <- list(.default=0, xFac="NA") isUnknown(xList, unknown=tmp) (xList <- unknownToNA(xList, unknown=tmp)) (xList <- NAToUnknown(xList, unknown=999)) } \keyword{manip} \keyword{NA} %-------------------------------------------------------------------------- % unknown.Rd ends here gdata/man/startsWith.Rd0000644000175100001440000000275713111636047014603 0ustar hornikusers\name{startsWith} \alias{startsWith} \title{ Determine if a character string "starts with" with the specified characters. } \description{ Determine if a character string "starts with" with the specified characters. } \usage{ startsWith(str, pattern, trim=FALSE, ignore.case=FALSE) } \arguments{ \item{str}{character vector to test} \item{pattern}{characters to check for} \item{trim}{Logical flag indicating whether leading whitespace should be removed from \code{str} before testing for a match.} \item{ignore.case}{Logical flag indicating whether case should be ignored when testing for a match.} } \details{ This function returns TRUE for each element of the vector \code{str} where \code{pattern} occurs at the beginning of the string. If \code{trim} is TRUE, leading whitespace is removed from the elements of \code{str} before the test is performed. If \code{ignore.case} is TRUE, character case is ignored. } \value{ Boolean vector of the same length as \code{str}. } \author{ Gregory R. Warnes \email{greg@warnes.net} } \seealso{ \code{\link[base]{substr}}, \code{\link{trim}} } \examples{ ## simplest example: startsWith( 'Testing', 'Test') ## vector examples s <- c('Testing', ' Testing', 'testing', 'Texting') names(s) <- s startsWith(s, 'Test') # ' Testing', 'testing', and 'Texting' do not match startsWith(s, 'Test', trim=TRUE) # Now ' Testing' matches startsWith(s, 'Test', ignore.case=TRUE) # Now 'testing' matches } \keyword{character} gdata/man/bindData.Rd0000644000175100001440000000531313003720416014116 0ustar hornikusers% bindData.Rd %-------------------------------------------------------------------------- % What: Bind two data frames - help % $Id$ % Time-stamp: <2008-12-30 13:49:50 ggorjan> %-------------------------------------------------------------------------- \name{bindData} \alias{bindData} \title{Bind two data frames into a multivariate data frame} \description{ Usually data frames represent one set of variables and one needs to bind/join them for multivariate analysis. When \code{\link{merge}} is not the approriate solution, \code{bindData} might perform an appropriate binding for two data frames. This is especially usefull when some variables are measured once, while others are repeated. } \usage{ bindData(x, y, common) } \arguments{ \item{x}{data.frame} \item{y}{data.frame} \item{common}{character, list of column names that are common to both input data frames} } \details{ Data frames are joined in a such a way, that the new data frame has \eqn{c + (n_1 - c) + (n_2 - c)} columns, where \eqn{c} is the number of common columns, and \eqn{n_1} and \eqn{n_2} are the number of columns in the first and in the second data frame, respectively. } \value{ A data frame. } \author{Gregor Grojanc} \seealso{ \code{\link[base]{merge}}, \code{\link{wideByFactor}} } \examples{ n1 <- 6 n2 <- 12 n3 <- 4 ## Single trait 1 num <- c(5:n1, 10:13) (tmp1 <- data.frame(y1=rnorm(n=n1), f1=factor(rep(c("A", "B"), n1/2)), ch=letters[num], fa=factor(letters[num]), nu=(num) + 0.5, id=factor(num), stringsAsFactors=FALSE)) ## Single trait 2 with repeated records, some subjects also in tmp1 num <- 4:9 (tmp2 <- data.frame(y2=rnorm(n=n2), f2=factor(rep(c("C", "D"), n2/2)), ch=letters[rep(num, times=2)], fa=factor(letters[rep(c(num), times=2)]), nu=c((num) + 0.5, (num) + 0.25), id=factor(rep(num, times=2)), stringsAsFactors=FALSE)) ## Single trait 3 with completely distinct set of subjects num <- 1:4 (tmp3 <- data.frame(y3=rnorm(n=n3), f3=factor(rep(c("E", "F"), n3/2)), ch=letters[num], fa=factor(letters[num]), nu=(num) + 0.5, id=factor(num), stringsAsFactors=FALSE)) ## Combine all datasets (tmp12 <- bindData(x=tmp1, y=tmp2, common=c("id", "nu", "ch", "fa"))) (tmp123 <- bindData(x=tmp12, y=tmp3, common=c("id", "nu", "ch", "fa"))) ## Sort by subject tmp123[order(tmp123$ch), ] } \keyword{manip} \keyword{misc} %-------------------------------------------------------------------------- % bindData.Rd ends heregdata/man/rename.vars.Rd0000644000175100001440000000360213003720416014630 0ustar hornikusers% $Id: rename.vars.Rd 1435 2010-05-02 06:11:26Z warnes $ % % $Log$ % Revision 1.9 2005/09/12 15:42:45 nj7w % Updated Greg's email % % Revision 1.8 2005/06/09 14:20:25 nj7w % Updating the version number, and various help files to synchronize splitting of gregmisc bundle in 4 individual components. % % Revision 1.1.1.1 2005/05/25 22:07:33 nj7w % Initial entry for individual package gdata % % Revision 1.7 2004/04/01 20:23:15 warnes % Add function remove.vars(). % % Revision 1.6 2002/09/23 13:59:30 warnes % - Modified all files to include CVS Id and Log tags. % % \name{rename.vars} \alias{rename.vars} \alias{remove.vars} \title{Remove or rename variables in a dataframe } \description{ Remove or rename a variables in a data frame. } \usage{ rename.vars(data, from="", to="", info=TRUE) remove.vars(data, names="", info=TRUE) } \arguments{ \item{data}{ dataframe to be modified. } \item{from}{ character vector containing the current name of each variable to be renamed.} \item{to}{ character vector containing the new name of each variable to be renamed.} \item{names}{ character vector containing the names of variables to be removed.} \item{info}{ boolean value indicating whether to print details of the removal/renaming. Defaults to TRUE.} } %\details{ % ~~ If necessary, more details than the __description__ above ~~ %} \value{ The updated data frame with variables listed in \code{from} renamed to the corresponding element of \code{to}. } \author{Code by Don MacQueen \email{macq\@llnl.gov}. Documentation by Gregory R. Warnes \email{greg@warnes.net} } \seealso{ \code{\link{names}}, \code{\link{colnames}}, \code{\link{data.frame}} } \examples{ data <- data.frame(x=1:10,y=1:10,z=1:10) names(data) data <- rename.vars(data, c("x","y","z"), c("first","second","third")) names(data) data <- remove.vars(data, "second") names(data) } \keyword{ manip } gdata/man/trim.Rd0000644000175100001440000000331513115327432013370 0ustar hornikusers\name{trim} \alias{trim} \title{Remove leading and trailing spaces from character strings} \description{ Remove leading and trailing spaces from character strings and other related objects. } \usage{ trim(s, recode.factor=TRUE, \dots) } \arguments{ \item{s}{object to be processed} \item{recode.factor}{should levels of a factor be recoded, see below} \item{\dots}{arguments passed to other methods, currently only to \code{\link{reorder.factor}} for factors} } \details{ \code{trim} is a generic function, where default method does nothing, while method for character \code{s} trims its elements and method for factor \code{s} trims \code{\link{levels}}. There are also methods for \code{list} and \code{data.frame}. Trimming character strings can change the sort order in some locales. For factors, this can affect the coding of levels. By default, factor levels are recoded to match the trimmed sort order, but this can be disabled by setting \code{recode.factor=FALSE}. Recoding is done with \code{\link{reorder.factor}}. } \value{ \code{s} with all leading and trailing spaces removed in its elements. } \author{Gregory R. Warnes \email{greg@warnes.net} with contributions by Gregor Gorjanc} \seealso{ \code{\link[base]{trimws}}, \code{\link[base]{sub}},\code{\link[base]{gsub}} as well as argument \code{strip.white} in \code{\link{read.table}} and \code{\link{reorder.factor}} } \examples{ s <- " this is an example string " trim(s) f <- factor(c(s, s, " A", " B ", " C ", "D ")) levels(f) trim(f) levels(trim(f)) trim(f, recode.factor=FALSE) levels(trim(f, recode.factor=FALSE)) l <- list(s=rep(s, times=6), f=f, i=1:6) trim(l) df <- as.data.frame(l) trim(df) } \keyword{manip} \keyword{character} gdata/man/trimSum.Rd0000644000175100001440000000261013003720416014045 0ustar hornikusers% trimSum.Rd %-------------------------------------------------------------------------- % What: Sum trimmed values - help % $Id$ % Time-stamp: <2008-12-20 00:15:57 ggorjan> %-------------------------------------------------------------------------- \name{trimSum} \alias{trimSum} \title{Trim a vector such that the last/first value represents the sum of trimmed values} \description{\code{trimSum} trims (shortens) a vector in such a way that the last or first value represents the sum of trimmed values. User needs to specify the desired length of a trimmed vector. } \usage{trimSum(x, n, right=TRUE, na.rm=FALSE, \dots)} \arguments{ \item{x}{numeric, a vector of numeric values} \item{n}{numeric, desired length of the output} \item{right}{logical, trim on the right/bottom or the left/top side} \item{na.rm}{logical, remove \code{NA} values when applying a function} \item{\dots}{arguments passed to other methods - currently not used} } \value{Trimmed vector with a last/first value representing the sum of trimmed values} \author{Gregor Gorjanc} \seealso{\code{\link[gdata]{trim}}} \examples{ x <- 1:10 trimSum(x, n=5) trimSum(x, n=5, right=FALSE) x[9] <- NA trimSum(x, n=5) trimSum(x, n=5, na.rm=TRUE) } \keyword{manip} %-------------------------------------------------------------------------- % trimSum.Rd ends here gdata/man/left.Rd0000644000175100001440000000136513003720416013345 0ustar hornikusers\name{left} \alias{right} \alias{left} \title{Return the leftmost or rightmost columns of a matrix or dataframe} \description{ Return the leftmost or rightmost or columns of a matrix or dataframe } \usage{ right(x, n = 6) left(x, n=6) } \arguments{ \item{x}{Matrix or dataframe} \item{n}{Number of columns to return} } \value{ An object consisting of the leftmost or rightmost \code{n} columns of \code{x}. } \author{ Gregory R. Warnes \email{greg@warnes.net} } \seealso{ \code{\link{first}}, \code{\link{last}}, \code{\link[utils]{head}}, \code{\link[utils]{tail}} } \examples{ m <- matrix( 1:100, ncol=10 ) colnames(m) <- paste("Col",1:10, sep="_") left(m) right(m) d <- as.data.frame(m) left(d) right(d) } \keyword{ manip } gdata/man/upperTriangle.Rd0000644000175100001440000000445413003720415015235 0ustar hornikusers\name{upperTriangle} \alias{upperTriangle} \alias{upperTriangle<-} \alias{lowerTriangle} \alias{lowerTriangle<-} \title{Extract or replace the upper/lower triangular portion of a matrix} \description{ Extract or replace the upper/lower triangular portion of a matrix } \usage{ upperTriangle(x, diag=FALSE, byrow=FALSE) upperTriangle(x, diag=FALSE, byrow=FALSE) <- value lowerTriangle(x, diag=FALSE, byrow=FALSE) lowerTriangle(x, diag=FALSE, byrow=FALSE) <- value } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{Matrix} \item{diag}{Logical. If \code{TRUE}, include the matrix diagonal.} \item{byrow}{Logical. If \code{FALSE}, return/replace elements in column-wise order. If \code{TRUE}, return/replace elements in row-wise order.} \item{value}{Either a single value or a vector of length equal to that of the current upper/lower triangular. Should be of a mode which can be coerced to that of \code{x}.} } \value{ \code{upperTriangle(x)} and \code{lowerTriangle(x)} return the upper or lower triangle of matrix x, respectively. The assignment forms replace the upper or lower triangular area of the matrix with the provided value(s). } \note{ By default, the elements are returned/replaced in R's default column-wise order. Thus \preformatted{ lowerTriangle(x) <- upperTriangle(x)} will not yield a symmetric matrix. Instead use: \preformatted{ lowerTriangle(x) <- upperTriangle(x, byrow=TRUE)} or equivalently: \preformatted{ lowerTriangle(x, byrow=TRUE) <- upperTriangle(x)} } \author{Gregory R. Warnes \email{greg@warnes.net}} \seealso{ \code{\link[base]{diag}}, \code{\link[base]{lower.tri}}, \code{\link[base]{upper.tri}} } \examples{ x <- matrix( 1:25, nrow=5, ncol=5) x upperTriangle(x) upperTriangle(x, diag=TRUE) upperTriangle(x, diag=TRUE, byrow=TRUE) lowerTriangle(x) lowerTriangle(x, diag=TRUE) lowerTriangle(x, diag=TRUE, byrow=TRUE) upperTriangle(x) <- NA x upperTriangle(x, diag=TRUE) <- 1:15 x lowerTriangle(x) <- NA x lowerTriangle(x, diag=TRUE) <- 1:15 x ## Copy lower triangle into upper triangle to make ## the matrix (diagonally) symmetric x <- matrix(LETTERS[1:25], nrow=5, ncol=5, byrow=TRUE) x lowerTriangle(x) = upperTriangle(x, byrow=TRUE) x } \keyword{array} gdata/man/wideByFactor.Rd0000644000175100001440000000413713003720416014775 0ustar hornikusers% wideByFactor.Rd %-------------------------------------------------------------------------- % What: Reshape by factor levels - help % $Id$ % Time-stamp: <2008-12-30 13:49:50 ggorjan> %-------------------------------------------------------------------------- \name{wideByFactor} \alias{wideByFactor} \title{Create multivariate data by a given factor} \description{ \code{wideByFactor} modifies data.frame in such a way that variables are \dQuote{separated} into several columns by factor levels. } \usage{ wideByFactor(x, factor, common, sort=TRUE, keepFactor=TRUE) } \arguments{ \item{x}{data frame} \item{factor}{character, column name of a factor by which variables will be divided} \item{common}{character, column names of (common) columns that should not be divided} \item{sort}{logical, sort resulting data frame by factor levels} \item{keepFactor}{logical, keep the \sQuote{factor} column} } \details{ Given data frame is modified in such a way, that output represents a data frame with \eqn{c + f + n * v} columns, where \eqn{c} is a number of common columns for all levels of a factor, \eqn{f} is a factor column, \eqn{n} is a number of levels in factor \eqn{f} and \eqn{v} is a number of variables that should be divided for each level of a factor. Number of rows stays the same! } \value{ A data frame where divided variables have sort of \dQuote{diagonalized} structure } \author{Gregor Gorjanc} \seealso{ \code{\link[stats]{reshape}} in the \pkg{stats} package, \code{\link[reshape]{melt}} and \code{\link[reshape]{cast}} in the \pkg{reshape} package } \examples{ n <- 10 f <- 2 tmp <- data.frame(y1=rnorm(n=n), y2=rnorm(n=n), f1=factor(rep(letters[1:f], n/2)), f2=factor(c(rep(c("M"), n/2), rep(c("F"), n/2))), c1=1:n, c2=2*(1:n)) wideByFactor(x=tmp, factor="f1", common=c("c1", "c2", "f2")) wideByFactor(x=tmp, factor="f1", common=c("c1", "c2")) } \keyword{manip} \keyword{misc} %-------------------------------------------------------------------------- % wideByFactor.Rd ends heregdata/man/frameApply.Rd0000644000175100001440000000753213003720415014514 0ustar hornikusers% $Id% % \name{frameApply} \alias{frameApply} \title{Subset analysis on data frames} \description{Apply a function to row subsets of a data frame. } \usage{ frameApply(x, by=NULL, on=by[1], fun=function(xi) c(Count=nrow(xi)), subset=TRUE, simplify=TRUE, byvar.sep="\\\\$\\\\@\\\\$", ...) } \arguments{ \item{x}{a data frame} \item{by}{names of columns in \code{x} specifying the variables to use to form the subgroups. None of the \code{by} variables should have the name "sep" (you will get an error if one of them does; a bit of laziness in the code). Unused levels of the \code{by} variables will be dropped. Use \code{by = NULL} (the default) to indicate that all of the data is to be treated as a single (trivial) subgroup.} \item{on}{names of columns in \code{x} specifying columns over which \code{fun} is to be applied. These can include columns specified in \code{by}, (as with the default) although that is not usually the case.} \item{fun}{a function that can operate on data frames that are row subsets of \code{x[on]}. If \code{simplify = TRUE}, the return value of the function should always be either a try-error (see \code{\link{try}}), or a vector of fixed length (i.e. same length for every subset), preferably with named elements.} \item{subset}{logical vector (can be specified in terms of variables in data). This row subset of \code{x} is taken before doing anything else.} \item{simplify}{logical. If TRUE (the default), return value will be a data frame including the \code{by} columns and a column for each element of the return vector of \code{fun}. If FALSE, the return value will be a list, sometimes necessary for less structured output (see description of return value below).} \item{byvar.sep}{character. This can be any character string not found anywhere in the values of the \code{by} variables. The \code{by} variables will be pasted together using this as the separator, and the result will be used as the index to form the subgroups. } \item{...}{additional arguments to \code{fun}.} } \value{a data frame if \code{simplify = TRUE} (the default), assuming there is sufficiently structured output from \code{fun}. If \code{simplify = FALSE} and \code{by} is not NULL, the return value will be a list with two elements. The first element, named "by", will be a data frame with the unique rows of \code{x[by]}, and the second element, named "result" will be a list where the ith component gives the result for the ith row of the "by" element. } \details{This function accomplishes something similar to \code{\link{by}}. The main difference is that \code{frameApply} is designed to return data frames and lists instead of objects of class 'by'. Also, \code{frameApply} works only on the unique combinations of the \code{by} that are actually present in the data, not on the entire cartesian product of the \code{by} variables. In some cases this results in great gains in efficiency, although \code{frameApply} is hardly an efficient function.} \examples{ data(ELISA, package="gtools") # Default is slightly unintuitive, but commonly useful: frameApply(ELISA, by = c("PlateDay", "Read")) # Wouldn't actually recommend this model! Just a demo: frameApply(ELISA, on = c("Signal", "Concentration"), by = c("PlateDay", "Read"), fun = function(dat) coef(lm(Signal ~ Concentration, data = dat))) frameApply(ELISA, on = "Signal", by = "Concentration", fun = function(dat, ...) { x <- dat[[1]] out <- c(Mean = mean(x, ...), SD = sd(x, ...), N = sum(!is.na(x))) }, na.rm = TRUE, subset = !is.na(Concentration)) } \author{Jim Rogers \email{james.a.rogers@pfizer.com}} \keyword{manip} gdata/man/object.size.Rd0000644000175100001440000001004713003720415014626 0ustar hornikusers% Come material taken from src/library/utils/man/object.size.Rd % Part of the R package, http://www.R-project.org % Copyright 1995-2007 R Core Development Team % Distributed under GPL 2 or later \name{object.size} \alias{object.size} \alias{c.object_sizes} \alias{as.object_sizes} \alias{is.object_sizes} \alias{format.object_sizes} \alias{print.object_sizes} \title{Report the Space Allocated for Objects} \description{ Provides an estimate of the memory that is being used to store \R objects. } \usage{ object.size(\dots) \method{is}{object_sizes}(x) \method{as}{object_sizes}(x) \method{c}{object_sizes}(\dots, recursive=FALSE) \method{format}{object_sizes}(x, humanReadable=getOption("humanReadable"), standard="IEC", units, digits=1, width=NULL, sep=" ", justify = c("right", "left"), \dots) \method{print}{object_sizes}(x, quote=FALSE, humanReadable=getOption("humanReadable"), standard="IEC", units, digits=1, width=NULL, sep=" ", justify = c("right", "left"), \dots) } \arguments{ \item{\dots}{\code{object.size}: \R objects; \code{print} and \code{format}: arguments to be passed to other methods. } \item{x}{output from \code{object.size}} \item{quote}{logical, indicating whether or not the result should be printed with surrounding quotes.} \item{humanReadable}{logical, use the \dQuote{human readable} format.} \item{standard,units,digits,width,sep,justify}{See the man page for \code{\link{humanReadable}}. } \item{recursive}{See the man page for \code{\link[base]{c}}. } } \details{ \emph{This is a modified copy of the man page for utils::object.size in R 2.2.1.} Exactly which parts of the memory allocation should be attributed to which object is not clear-cut. This function merely provides a rough indication: it should be reasonably accurate for atomic vectors, but does not detect if elements of a list are shared, for example. (Sharing amongst elements of a character vector is taken into account, but not that between character vectors in a single object.) The calculation is of the size of the object, and excludes the space needed to store its name in the symbol table. Associated space (e.g., the environment of a function and what the pointer in a \code{EXTPTRSXP} points to) is not included in the calculation. Object sizes are larger on 64-bit builds than 32-bit ones, but will very likely be the same on different platforms with the same word length and pointer size. % Modificitaion start \emph{Changes} Class of returned object is \code{c("object_sizes", "numeric")} with appropriate \code{print} and \code{c} methods. By default \code{object.size} outputs size in bytes, but human readable format similar to \code{ls}, \code{df} or \code{du} shell commands can be displayed by calling \code{humanReadable} directly, calling \code{print} with the argument \code{humanReadable=TRUE}, or by setting \code{options(humanReadable=TRUE)}. % Modificitaion end } \value{ A numeric vector class \code{c("object_sizes", "numeric")} containing estimated memory allocation attributable to the objects in bytes. } \seealso{ \code{\link[utils]{object.size}} in package 'utils' for the standard version of this function, \code{\link{Memory-limits}} for the design limitations on object size, \code{\link{humanReadable}} for human readable format. } \examples{ object.size(letters) object.size(ls) ## find the 10 largest objects in the base package allObj <- sapply(ls("package:base"), function(x) object.size(get(x, envir = baseenv())) ) ( bigObj <- as.object_sizes(rev(sort(allObj))[1:10] ) ) print(bigObj, humanReadable=TRUE) as.object_sizes(14567567) \dontshow{ optionsOrig <- options("humanReadable") } options(humanReadable=TRUE) ( z <- object.size(letters, c(letters, letters), rep(letters, 100), rep(letters, 10000) ) ) is.object_sizes(z) as.object_sizes(14567567) \dontshow{ options(optionsOrig) } } \keyword{utilities} gdata/man/cbindX.Rd0000644000175100001440000000247613003720416013626 0ustar hornikusers% cbindX.Rd %-------------------------------------------------------------------------- % What: Column-bind objects with different number of rows - help % $Id: cbindX.Rd 1357 2009-08-20 14:54:44Z warnes $ % Time-stamp: <2008-08-05 13:38:45 ggorjan> %-------------------------------------------------------------------------- \name{cbindX} \alias{cbindX} \title{Column-bind objects with different number of rows} \description{ \code{cbindX} column-binds objects with different number of rows. } \usage{cbindX(...)} \arguments{ \item{\dots}{matrix and data.frame objects} } \details{ First the object with maximal number of rows is found. Other objects that have less rows get (via \code{\link{rbind}}) additional rows with \code{NA} values. Finally, all objects are column-binded (via \code{\link{cbind}}). } \value{See details} \author{Gregor Gorjanc} \seealso{Regular \code{\link{cbind}} and \code{\link{rbind}}} \examples{ df1 <- data.frame(a=1:3, b=c("A", "B", "C")) df2 <- data.frame(c=as.character(1:5), a=5:1) ma1 <- matrix(as.character(1:4), nrow=2, ncol=2) ma2 <- matrix(1:6, nrow=3, ncol=2) cbindX(df1, df2) cbindX(ma1, ma2) cbindX(df1, ma1) cbindX(df1, df2, ma1, ma2) cbindX(ma1, ma2, df1, df2) } \keyword{misc} %-------------------------------------------------------------------------- % cbindX.Rd ends here gdata/man/humanReadable.Rd0000644000175100001440000001405213003720416015140 0ustar hornikusers\name{humanReadable} \alias{humanReadable} \title{Print Byte Size in Human Readable Format} \description{ \code{humanReadable} converts integer byte sizes to a human readable units such as kB, MB, GB, etc. } \usage{ humanReadable(x, units="auto", standard=c("IEC", "SI", "Unix"), digits=1, width=NULL, sep=" ", justify=c("right", "left") ) } \arguments{ \item{x}{integer, byte size} \item{standard}{character, "IEC" for powers of 1024 ('MiB'), "SI" for powers of 1000 ('MB'), or "Unix" for powers of 1024 ('M'). See details.} \item{units}{character, unit to use for all values (optional), one of "auto", "bytes", or an appropriate unit corresponding to \code{standard}.} \item{digits}{integer, number of digits after decimal point} \item{width}{integer, width of number string} \item{sep}{character, separator between number and unit} \item{justify}{two-element vector specifiy the alignment for the number and unit components of the size. Each element should be one of "none", "left", "right", or "center"} } \details{ The basic unit used to store information in computers is a bit. Bits are represented as zeroes and ones - binary number system. Although, the binary number system is not the same as the decimal number system, decimal prefixes for binary multiples such as kilo and mega are often used. In the decimal system kilo represent 1000, which is close to \eqn{1024 = 2^{10}} in the binary system. This sometimes causes problems as it is not clear which powers (2 or 10) are used in a notation like 1 kB. To overcome this problem International Electrotechnical Commission (IEC) has provided the following solution to this problem: \tabular{lrcll}{ Name \tab System \tab Symbol \tab Size \tab Conversion \cr byte \tab binary \tab B \tab \eqn{2^3} \tab 8 bits \cr kilobyte \tab decimal \tab kB \tab \eqn{10^3} \tab 1000 bytes \cr kibibyte \tab binary \tab KiB \tab \eqn{2^{10}} \tab 1024 bytes \cr megabyte \tab decimal \tab MB \tab \eqn{(10^3)^2} \tab 1000 kilobytes\cr mebibyte \tab binary \tab MiB \tab \eqn{(2^{10})^2} \tab 1024 kibibytes\cr gigabyte \tab decimal \tab GB \tab \eqn{(10^3)^3} \tab 1000 megabytes\cr gibibyte \tab binary \tab GiB \tab \eqn{(2^{10})^3} \tab 1024 mebibytes\cr terabyte \tab decimal \tab TB \tab \eqn{(10^3)^4} \tab 1000 gigabytes\cr tebibyte \tab binary \tab TiB \tab \eqn{(2^{10})^4} \tab 1024 gibibytes\cr petabyte \tab decimal \tab PB \tab \eqn{(10^3)^5} \tab 1000 terabytes\cr pebibyte \tab binary \tab PiB \tab \eqn{(2^{10})^5} \tab 1024 tebibytes\cr exabyte \tab decimal \tab EB \tab \eqn{(10^3)^6} \tab 1000 petabytes\cr exbibyte \tab binary \tab EiB \tab \eqn{(2^{10})^6} \tab 1024 pebibytes\cr zettabyte \tab decimal \tab ZB \tab \eqn{(10^3)^7} \tab 1000 exabytes\cr zebibyte \tab binary \tab ZiB \tab \eqn{(2^{10})^7} \tab 1024 exbibytes\cr yottabyte \tab decimal \tab YB \tab \eqn{(10^3)^8} \tab 1000 zettabytes\cr yebibyte \tab binary \tab YiB \tab \eqn{(2^{10})^8} \tab 1024 zebibytes\cr } where Zi and Yi are GNU extensions to IEC. To get the output in the decimal system (powers of 1000) use \code{standard="SI"}. To obtain IEC standard (powers of 1024) use \code{standard="IEC"}. In addition, single-character units are provided that follow (and extend) the Unix pattern (use \code{standard="Unix"}): \tabular{lrcll}{ Name \tab System \tab Symbol \tab Size \tab Conversion \cr byte \tab binary \tab B \tab \eqn{2^3} \tab 8 bits \cr kibibyte \tab binary \tab K \tab \eqn{2^{10}} \tab 1024 bytes \cr mebibyte \tab binary \tab M \tab \eqn{(2^{10})^2} \tab 1024 kibibytes\cr gibibyte \tab binary \tab G \tab \eqn{(2^{10})^3} \tab 1024 mebibytes\cr tebibyte \tab binary \tab T \tab \eqn{(2^{10})^4} \tab 1024 gibibytes\cr pebibyte \tab binary \tab P \tab \eqn{(2^{10})^5} \tab 1024 tebibytes\cr exbibyte \tab binary \tab E \tab \eqn{(2^{10})^6} \tab 1024 pebibytes\cr zebibyte \tab binary \tab Z \tab \eqn{(2^{10})^7} \tab 1024 exbibytes\cr yottabyte \tab binary \tab Y \tab \eqn{(2^{10})^8} \tab 1024 zebibytes\cr } For printout both \code{digits} and \code{width} can be specified. If \code{width} is \code{NULL}, all values have given number of digits. If \code{width} is not \code{NULL}, output is rounded to a given width and formated similar to human readable format of the Unix \code{ls}, \code{df} or \code{du} shell commands. } \references{ Wikipedia: \url{http://en.wikipedia.org/wiki/Byte} \url{http://en.wikipedia.org/wiki/SI_prefix} \url{http://en.wikipedia.org/wiki/Binary_prefix} GNU manual for coreutils: \url{http://www.gnu.org/software/coreutils/manual/html_node/Block-size.html#Block-size} } \value{ Byte size in human readable format as character with proper unit symbols added at the end of the string. } \author{Ales Korosec, Gregor Gorjanc, and Gregory R. Warnes \email{greg@warnes.net}} \seealso{ \code{\link{object.size}} in package 'gdata', \code{\link[utils]{object.size}} in package 'utils', \code{\link[gdata]{ll}} } \examples{ # Simple example: maximum addressible size of 32 bit pointer humanReadable(2^32-1) humanReadable(2^32-1, standard="IEC") humanReadable(2^32-1, standard="SI") humanReadable(2^32-1, standard="Unix") humanReadable(2^32-1, unit="MiB") humanReadable(2^32-1, standard="IEC", unit="MiB") humanReadable(2^32-1, standard="SI", unit="MB") humanReadable(2^32-1, standard="Unix", unit="M") # Vector of sizes matrix(humanReadable(c(60810, 124141, 124, 13412513), width=4)) matrix(humanReadable(c(60810, 124141, 124, 13412513), width=4, unit="KiB")) # Specify digits rather than width matrix(humanReadable(c(60810, 124141, 124, 13412513), width=NULL, digits=2)) # Change the justification matrix(humanReadable(c(60810, 124141, 124, 13412513), width=NULL, justify=c("right", "right") )) } \keyword{misc} %-------------------------------------------------------------------------- % humanReadable.Rd ends here gdata/man/MedUnits.Rd0000644000175100001440000000406013003720415014135 0ustar hornikusers\name{MedUnits} \alias{MedUnits} \docType{data} \title{ Table of conversions between Intertional Standard (SI) and US 'Conventional' Units for common medical measurements. } \description{ Table of conversions between Intertional Standard (SI) and US 'Conventional' Units for common medical measurements. } \usage{data(MedUnits)} \format{ A data frame with the following 5 variables. \describe{ \item{Abbreviation}{Common Abbreviation (mostly missing)} \item{Measurement}{Measurement Name} \item{ConventionalUnit}{Conventional Unit} \item{Conversion}{Conversion factor} \item{SIUnit}{SI Unit} } } \details{ Medical laboratories and practitioners in the United States use one set of units (the so-called 'Conventional' units) for reporting the results of clinical laboratory measurements, while the rest of the world uses the International Standard (SI) units. It often becomes necessary to translate between these units when participating in international collaborations. This data set provides constants for converting between SI and US 'Conventional' units. To perform the conversion from SI units to US 'Conventional' units do: Measurement in \code{ConventionalUnit} = (Measurement in \code{SIUnit}) / \code{Conversion} To perform conversion from 'Conventional' to SI units do: Measurement in \code{SIUnit} = (Measurement in \code{ConventionalUnit}) * \code{Conversion} } \source{ \url{http://www.globalrph.com/conv_si.htm} } \seealso{ The function \code{\link{ConvertMedUnits}} automates the conversion task. } \examples{ data(MedUnits) # show available conversions MedUnits$Measurement # utility function matchUnits <- function(X) MedUnits[ grep(X, MedUnits$Measurement),] # Convert SI Glucose measurement to 'Conventional' units GlucoseSI = c(5, 5.4, 5, 5.1, 5.6, 5.1, 4.9, 5.2, 5.5) # in SI Units GlucoseUS = GlucoseSI / matchUnits("Glucose")$Conversion cbind(GlucoseSI,GlucoseUS) # also consider using ConvertMedUnits() ConvertMedUnits( GlucoseSI, "Glucose", to="US" ) } \keyword{datasets} gdata/man/duplicated2.Rd0000644000175100001440000000362413003720415014612 0ustar hornikusers\name{duplicated2} \alias{duplicated2} \title{Determine Duplicate Elements} \description{ \code{duplicated2()} determines which elements of a vector or data frame are duplicates, and returns a logical vector indicating which elements (rows) are duplicates. } \usage{ duplicated2(x, bothWays=TRUE, ...) } \arguments{ \item{x}{a vector or a data frame or an array or \code{NULL}.} \item{bothWays}{if \code{TRUE} (the default), duplication should be considered from both sides. For more information see the argument \code{fromLast} to the function \code{\link{duplicated}}.} \item{\dots}{further arguments passed down to \code{duplicated()} and its methods.} } \details{ The standard \code{\link{duplicated}} function (in \code{package:base}) only returns \code{TRUE} for the second and following copies of each duplicated value (second-to-last and earlier when \code{fromLast=TRUE}). This function returns all duplicated elementes, including the first (last) value. When \code{bothWays} is \code{FALSE}, \code{duplicated2()} defaults to a \code{\link{duplicated}} call. When \code{bothWays} is \code{TRUE}, the following call is being executed: \code{duplicated(x, ...) | duplicated(x, fromLast=TRUE, ...)} } \value{ For a vector input, a logical vector of the same length as \code{x}. For a data frame, a logical vector with one element for each row. For a matrix or array, and when \code{MARGIN = 0}, a logical array with the same dimensions and dimnames. For more details see \code{\link{duplicated}}. } \seealso{ \code{\link[base]{duplicated}}, \code{\link[base]{unique}} } \author{Liviu Andronic} \examples{ data(iris) iris[duplicated(iris), ] # 2nd duplicated value iris[duplicated(iris, fromLast=TRUE), ] # 1st duplicated value iris[duplicated2(iris), ] # both duplicated values } \keyword{logic} \keyword{manip} gdata/.Rinstignore0000644000175100001440000000005413003720414013645 0ustar hornikusersdoc/.*\.tex$ doc/.*\.sty$ doc/.*\.dtx$ \.svn